本文整理汇总了C++中cpumask_set_cpu函数的典型用法代码示例。如果您正苦于以下问题:C++ cpumask_set_cpu函数的具体用法?C++ cpumask_set_cpu怎么用?C++ cpumask_set_cpu使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cpumask_set_cpu函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: db8500_cpufreq_cooling_probe
static int db8500_cpufreq_cooling_probe(struct platform_device *pdev)
{
struct thermal_cooling_device *cdev;
struct cpumask mask_val;
/* make sure cpufreq driver has been initialized */
if (!cpufreq_frequency_get_table(0))
return -EPROBE_DEFER;
cpumask_set_cpu(0, &mask_val);
cdev = cpufreq_cooling_register(&mask_val);
if (IS_ERR(cdev)) {
dev_err(&pdev->dev, "Failed to register cooling device\n");
return PTR_ERR(cdev);
}
platform_set_drvdata(pdev, cdev);
dev_info(&pdev->dev, "Cooling device registered: %s\n", cdev->type);
return 0;
}
开发者ID:3null,项目名称:linux,代码行数:23,代码来源:db8500_cpufreq_cooling.c
示例2: cpudl_find
/*
* cpudl_find - find the best (later-dl) CPU in the system
* @cp: the cpudl max-heap context
* @p: the task
* @later_mask: a mask to fill in with the selected CPUs (or NULL)
*
* Returns: int - best CPU (heap maximum if suitable)
*/
int cpudl_find(struct cpudl *cp, struct task_struct *p,
struct cpumask *later_mask)
{
int best_cpu = -1;
const struct sched_dl_entity *dl_se = &p->dl;
if (later_mask && cpumask_and(later_mask, cp->free_cpus,
&p->cpus_allowed) && cpumask_and(later_mask,
later_mask, cpu_active_mask)) {
best_cpu = cpumask_any(later_mask);
goto out;
} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
best_cpu = cpudl_maximum(cp);
if (later_mask)
cpumask_set_cpu(best_cpu, later_mask);
}
out:
WARN_ON(!cpu_present(best_cpu) && best_cpu != -1);
return best_cpu;
}
开发者ID:aragua,项目名称:linux,代码行数:31,代码来源:cpudeadline.c
示例3: amlogic_register_thermal
/* Register with the in-kernel thermal management */
static int amlogic_register_thermal(struct amlogic_thermal_platform_data *pdata)
{
int ret=0;
struct cpumask mask_val;
memset(&mask_val,0,sizeof(struct cpumask));
cpumask_set_cpu(0, &mask_val);
pdata->cpu_cool_dev= cpufreq_cooling_register(&mask_val);
if (IS_ERR(pdata->cpu_cool_dev)) {
pr_err("Failed to register cpufreq cooling device\n");
ret = -EINVAL;
goto err_unregister;
}
pdata->cpucore_cool_dev = cpucore_cooling_register();
if (IS_ERR(pdata->cpucore_cool_dev)) {
pr_err("Failed to register cpufreq cooling device\n");
ret = -EINVAL;
goto err_unregister;
}
pdata->therm_dev = thermal_zone_device_register(pdata->name,
pdata->temp_trip_count, 7, pdata, &amlogic_dev_ops, NULL, 0,
pdata->idle_interval);
if (IS_ERR(pdata->therm_dev)) {
pr_err("Failed to register thermal zone device\n");
ret = -EINVAL;
goto err_unregister;
}
pr_info("amlogic: Kernel Thermal management registered\n");
return 0;
err_unregister:
amlogic_unregister_thermal(pdata);
return ret;
}
开发者ID:codesnake,项目名称:kernel_amlogic_meson-common,代码行数:38,代码来源:amlogic_thermal.c
示例4: disable_nonboot_cpus
int disable_nonboot_cpus(void)
{
int cpu, first_cpu, error = 0;
cpu_maps_update_begin();
first_cpu = cpumask_first(cpu_online_mask);
/*
* We take down all of the non-boot CPUs in one shot to avoid races
* with the userspace trying to use the CPU hotplug at the same time
*/
cpumask_clear(frozen_cpus);
pr_info("Disabling non-boot CPUs ...\n");
for_each_online_cpu(cpu) {
if (cpu == first_cpu)
continue;
trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
error = _cpu_down(cpu, 1);
trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
if (!error)
cpumask_set_cpu(cpu, frozen_cpus);
else {
pr_err("Error taking CPU%d down: %d\n", cpu, error);
break;
}
}
if (!error) {
BUG_ON(num_online_cpus() > 1);
/* Make sure the CPUs won't be enabled by someone else */
cpu_hotplug_disabled = 1;
} else {
pr_err("Non-boot CPUs are not disabled\n");
}
cpu_maps_update_done();
return error;
}
开发者ID:tsj123,项目名称:androidx86_remix,代码行数:37,代码来源:cpu.c
示例5: prom_init
void __init prom_init(void)
{
int *argv, *envp; /* passed as 32 bit ptrs */
struct psb_info *prom_infop;
void *reset_vec;
#ifdef CONFIG_SMP
int i;
#endif
/* truncate to 32 bit and sign extend all args */
argv = (int *)(long)(int)fw_arg1;
envp = (int *)(long)(int)fw_arg2;
prom_infop = (struct psb_info *)(long)(int)fw_arg3;
nlm_prom_info = *prom_infop;
nlm_init_node();
/* Update reset entry point with CPU init code */
reset_vec = (void *)CKSEG1ADDR(RESET_VEC_PHYS);
memset(reset_vec, 0, RESET_VEC_SIZE);
memcpy(reset_vec, (void *)nlm_reset_entry,
(nlm_reset_entry_end - nlm_reset_entry));
nlm_early_serial_setup();
build_arcs_cmdline(argv);
prom_add_memory();
#ifdef CONFIG_SMP
for (i = 0; i < 32; i++)
if (nlm_prom_info.online_cpu_map & (1 << i))
cpumask_set_cpu(i, &nlm_cpumask);
nlm_wakeup_secondary_cpus();
register_smp_ops(&nlm_smp_ops);
#endif
xlr_board_info_setup();
xlr_percpu_fmn_init();
}
开发者ID:AlexanderGraham,项目名称:linux,代码行数:37,代码来源:setup.c
示例6: percpu_ida_free
/**
* percpu_ida_free - free a tag
* @pool: pool @tag was allocated from
* @tag: a tag previously allocated with percpu_ida_alloc()
*
* Safe to be called from interrupt context.
*/
void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
{
struct percpu_ida_cpu *tags;
unsigned long flags;
unsigned nr_free;
BUG_ON(tag >= pool->nr_tags);
tags = raw_cpu_ptr(pool->tag_cpu);
spin_lock_irqsave(&tags->lock, flags);
tags->freelist[tags->nr_free++] = tag;
nr_free = tags->nr_free;
if (nr_free == 1) {
cpumask_set_cpu(smp_processor_id(),
&pool->cpus_have_tags);
wake_up(&pool->wait);
}
spin_unlock_irqrestore(&tags->lock, flags);
if (nr_free == pool->percpu_max_size) {
spin_lock_irqsave(&pool->lock, flags);
spin_lock(&tags->lock);
if (tags->nr_free == pool->percpu_max_size) {
move_tags(pool->freelist, &pool->nr_free,
tags->freelist, &tags->nr_free,
pool->percpu_batch_size);
wake_up(&pool->wait);
}
spin_unlock(&tags->lock);
spin_unlock_irqrestore(&pool->lock, flags);
}
}
开发者ID:krzk,项目名称:linux,代码行数:44,代码来源:percpu_ida.c
示例7: crash_ipi_callback
void crash_ipi_callback(struct pt_regs *regs)
{
static cpumask_t cpus_state_saved = CPU_MASK_NONE;
int cpu = smp_processor_id();
if (!cpu_online(cpu))
return;
hard_irq_disable();
if (!cpumask_test_cpu(cpu, &cpus_state_saved)) {
crash_save_cpu(regs, cpu);
cpumask_set_cpu(cpu, &cpus_state_saved);
}
atomic_inc(&cpus_in_crash);
smp_mb__after_atomic_inc();
/*
* Starting the kdump boot.
* This barrier is needed to make sure that all CPUs are stopped.
*/
while (!time_to_dump)
cpu_relax();
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 1);
#ifdef CONFIG_PPC64
kexec_smp_wait();
#else
for (;;); /* FIXME */
#endif
/* NOTREACHED */
}
开发者ID:03199618,项目名称:linux,代码行数:36,代码来源:crash.c
示例8: rq_attach_root
void rq_attach_root(struct rq *rq, struct root_domain *rd)
{
struct root_domain *old_rd = NULL;
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
old_rd = rq->rd;
if (cpumask_test_cpu(rq->cpu, old_rd->online))
set_rq_offline(rq);
cpumask_clear_cpu(rq->cpu, old_rd->span);
/*
* If we dont want to free the old_rd yet then
* set old_rd to NULL to skip the freeing later
* in this function:
*/
if (!atomic_dec_and_test(&old_rd->refcount))
old_rd = NULL;
}
atomic_inc(&rd->refcount);
rq->rd = rd;
cpumask_set_cpu(rq->cpu, rd->span);
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
if (old_rd)
call_rcu_sched(&old_rd->rcu, free_rootdomain);
}
开发者ID:the-snowwhite,项目名称:linux-socfpga,代码行数:36,代码来源:topology.c
示例9: kzalloc
/*
* Take a map of online CPUs and the number of available interrupt vectors
* and generate an output cpumask suitable for spreading MSI/MSI-X vectors
* so that they are distributed as good as possible around the CPUs. If
* more vectors than CPUs are available we'll map one to each CPU,
* otherwise we map one to the first sibling of each socket.
*
* If there are more vectors than CPUs we will still only have one bit
* set per CPU, but interrupt code will keep on assigning the vectors from
* the start of the bitmap until we run out of vectors.
*/
struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
{
struct cpumask *affinity_mask;
unsigned int max_vecs = *nr_vecs;
if (max_vecs == 1)
return NULL;
affinity_mask = kzalloc(cpumask_size(), GFP_KERNEL);
if (!affinity_mask) {
*nr_vecs = 1;
return NULL;
}
get_online_cpus();
if (max_vecs >= num_online_cpus()) {
cpumask_copy(affinity_mask, cpu_online_mask);
*nr_vecs = num_online_cpus();
} else {
unsigned int vecs = 0, cpu;
for_each_online_cpu(cpu) {
if (cpu == get_first_sibling(cpu)) {
cpumask_set_cpu(cpu, affinity_mask);
vecs++;
}
if (--max_vecs == 0)
break;
}
*nr_vecs = vecs;
}
put_online_cpus();
return affinity_mask;
}
开发者ID:gxt,项目名称:linux,代码行数:47,代码来源:affinity.c
示例10: bench_outstanding_parallel_cpus
void noinline bench_outstanding_parallel_cpus(uint32_t loops, int nr_cpus,
int outstanding_pages)
{
const char *desc = "parallel_cpus";
struct time_bench_sync sync;
struct time_bench_cpu *cpu_tasks;
struct cpumask my_cpumask;
int i;
/* Allocate records for CPUs */
cpu_tasks = kzalloc(sizeof(*cpu_tasks) * nr_cpus, GFP_KERNEL);
/* Reduce number of CPUs to run on */
cpumask_clear(&my_cpumask);
for (i = 0; i < nr_cpus ; i++) {
cpumask_set_cpu(i, &my_cpumask);
}
pr_info("Limit to %d parallel CPUs\n", nr_cpus);
time_bench_run_concurrent(loops, outstanding_pages, NULL,
&my_cpumask, &sync, cpu_tasks,
time_alloc_pages_outstanding);
time_bench_print_stats_cpumask(desc, cpu_tasks, &my_cpumask);
kfree(cpu_tasks);
}
开发者ID:netoptimizer,项目名称:prototype-kernel,代码行数:24,代码来源:page_bench02.c
示例11: hyperv_prepare_irq_remapping
static int __init hyperv_prepare_irq_remapping(void)
{
struct fwnode_handle *fn;
int i;
if (!hypervisor_is_type(X86_HYPER_MS_HYPERV) ||
!x2apic_supported())
return -ENODEV;
fn = irq_domain_alloc_named_id_fwnode("HYPERV-IR", 0);
if (!fn)
return -ENOMEM;
ioapic_ir_domain =
irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
0, IOAPIC_REMAPPING_ENTRY, fn,
&hyperv_ir_domain_ops, NULL);
irq_domain_free_fwnode(fn);
/*
* Hyper-V doesn't provide irq remapping function for
* IO-APIC and so IO-APIC only accepts 8-bit APIC ID.
* Cpu's APIC ID is read from ACPI MADT table and APIC IDs
* in the MADT table on Hyper-v are sorted monotonic increasingly.
* APIC ID reflects cpu topology. There maybe some APIC ID
* gaps when cpu number in a socket is not power of two. Prepare
* max cpu affinity for IOAPIC irqs. Scan cpu 0-255 and set cpu
* into ioapic_max_cpumask if its APIC ID is less than 256.
*/
for (i = min_t(unsigned int, num_possible_cpus() - 1, 255); i >= 0; i--)
if (cpu_physical_id(i) < 256)
cpumask_set_cpu(i, &ioapic_max_cpumask);
return 0;
}
开发者ID:Anjali05,项目名称:linux,代码行数:36,代码来源:hyperv-iommu.c
示例12: bl_idle_driver_init
static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int cpu_id)
{
struct cpuinfo_arm *cpu_info;
struct cpumask *cpumask;
unsigned long cpuid;
int cpu;
cpumask = kzalloc(cpumask_size(), GFP_KERNEL);
if (!cpumask)
return -ENOMEM;
for_each_possible_cpu(cpu) {
cpu_info = &per_cpu(cpu_data, cpu);
cpuid = is_smp() ? cpu_info->cpuid : read_cpuid_id();
/* read cpu id part number */
if ((cpuid & 0xFFF0) == cpu_id)
cpumask_set_cpu(cpu, cpumask);
}
drv->cpumask = cpumask;
return 0;
}
开发者ID:03199618,项目名称:linux,代码行数:24,代码来源:cpuidle-big_little.c
示例13: homecache_mask
/* Return a mask of the cpus whose caches currently own these pages. */
static void homecache_mask(struct page *page, int pages,
struct cpumask *home_mask)
{
int i;
cpumask_clear(home_mask);
for (i = 0; i < pages; ++i) {
int home = page_home(&page[i]);
if (home == PAGE_HOME_IMMUTABLE ||
home == PAGE_HOME_INCOHERENT) {
cpumask_copy(home_mask, cpu_possible_mask);
return;
}
#if CHIP_HAS_CBOX_HOME_MAP()
if (home == PAGE_HOME_HASH) {
cpumask_or(home_mask, home_mask, &hash_for_home_map);
continue;
}
#endif
if (home == PAGE_HOME_UNCACHED)
continue;
BUG_ON(home < 0 || home >= NR_CPUS);
cpumask_set_cpu(home, home_mask);
}
}
开发者ID:ANFS,项目名称:ANFS-kernel,代码行数:25,代码来源:homecache.c
示例14: update_siblings_masks
static void update_siblings_masks(unsigned int cpuid)
{
struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
int cpu;
if (cpuid_topo->cluster_id == -1) {
/*
* DT does not contain topology information for this cpu
* reset it to default behaviour
*/
pr_debug("CPU%u: No topology information configured\n", cpuid);
cpuid_topo->core_id = 0;
cpumask_set_cpu(cpuid, &cpuid_topo->core_sibling);
cpumask_set_cpu(cpuid, &cpuid_topo->thread_sibling);
return;
}
/* update core and thread sibling masks */
for_each_possible_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
if (cpu != cpuid)
cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
if (cpuid_topo->core_id != cpu_topo->core_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
if (cpu != cpuid)
cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
}
}
开发者ID:7799,项目名称:linux,代码行数:36,代码来源:topology.c
示例15: tegra_cpuidle_register
static int tegra_cpuidle_register(unsigned int cpu)
{
struct cpuidle_driver *drv;
struct cpuidle_state *state;
drv = &per_cpu(cpuidle_drv, cpu);
drv->name = driver_name;
drv->owner = owner;
drv->cpumask = &per_cpu(idle_mask, cpu);
cpumask_set_cpu(cpu, drv->cpumask);
drv->state_count = 0;
state = &drv->states[CPUIDLE_STATE_CLKGATING];
snprintf(state->name, CPUIDLE_NAME_LEN, "clock-gated");
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPU clock gated");
state->exit_latency = 10;
state->target_residency = 10;
state->power_usage = 600;
state->flags = CPUIDLE_FLAG_TIME_VALID;
state->enter = tegra_idle_enter_clock_gating;
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
drv->safe_state_index = 0;
#endif
drv->state_count++;
#ifdef CONFIG_PM_SLEEP
state = &drv->states[CPUIDLE_STATE_POWERGATING];
snprintf(state->name, CPUIDLE_NAME_LEN, "powered-down");
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPU power gated");
state->exit_latency = tegra_cpu_power_good_time();
state->target_residency = tegra_cpu_power_off_time() +
tegra_cpu_power_good_time();
if (state->target_residency < tegra_pd_min_residency)
state->target_residency = tegra_pd_min_residency;
state->power_usage = 100;
state->flags = CPUIDLE_FLAG_TIME_VALID;
state->enter = tegra_idle_enter_pd;
drv->state_count++;
if (cpu == 0) {
state = &drv->states[CPUIDLE_STATE_MC_CLK_STOP];
snprintf(state->name, CPUIDLE_NAME_LEN, "mc-clock");
snprintf(state->desc, CPUIDLE_DESC_LEN, "MC clock stop");
state->exit_latency = tegra_cpu_power_good_time() +
DRAM_SELF_REFRESH_EXIT_LATENCY;
state->target_residency = tegra_cpu_power_off_time() +
tegra_cpu_power_good_time() + DRAM_SELF_REFRESH_EXIT_LATENCY;
if (state->target_residency < tegra_mc_clk_stop_min_residency())
state->target_residency =
tegra_mc_clk_stop_min_residency();
state->power_usage = 0;
state->flags = CPUIDLE_FLAG_TIME_VALID;
state->enter = tegra_idle_enter_pd;
state->disabled = true;
drv->state_count++;
}
#endif
if (cpuidle_register(drv, NULL)) {
pr_err("CPU%u: failed to register driver\n", cpu);
return -EIO;
}
on_each_cpu_mask(drv->cpumask, tegra_cpuidle_setup_bctimer,
(void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1);
return 0;
}
开发者ID:1ee7,项目名称:linux_l4t_tx1,代码行数:68,代码来源:cpuidle.c
示例16: ixgbe_alloc_q_vector
/**
* ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
* @adapter: board private structure to initialize
* @v_count: q_vectors allocated on adapter, used for ring interleaving
* @v_idx: index of vector in adapter struct
* @txr_count: total number of Tx rings to allocate
* @txr_idx: index of first Tx ring to allocate
* @rxr_count: total number of Rx rings to allocate
* @rxr_idx: index of first Rx ring to allocate
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
int v_count, int v_idx,
int txr_count, int txr_idx,
int rxr_count, int rxr_idx)
{
struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring;
int node = -1;
int cpu = -1;
int ring_count, size;
ring_count = txr_count + rxr_count;
size = sizeof(struct ixgbe_q_vector) +
(sizeof(struct ixgbe_ring) * ring_count);
/* customize cpu for Flow Director mapping */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
if (cpu_online(v_idx)) {
cpu = v_idx;
node = cpu_to_node(cpu);
}
}
/* allocate q_vector and rings */
q_vector = kzalloc_node(size, GFP_KERNEL, node);
if (!q_vector)
q_vector = kzalloc(size, GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
/* setup affinity mask and node */
if (cpu != -1)
cpumask_set_cpu(cpu, &q_vector->affinity_mask);
q_vector->numa_node = node;
#ifdef CONFIG_IXGBE_DCA
/* initialize CPU for DCA */
q_vector->cpu = -1;
#endif
/* initialize NAPI */
netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbe_poll, 64);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
q_vector->adapter = adapter;
q_vector->v_idx = v_idx;
/* initialize work limits */
q_vector->tx.work_limit = adapter->tx_work_limit;
/* initialize pointer to rings */
ring = q_vector->ring;
/* intialize ITR */
if (txr_count && !rxr_count) {
/* tx only vector */
if (adapter->tx_itr_setting == 1)
q_vector->itr = IXGBE_10K_ITR;
else
q_vector->itr = adapter->tx_itr_setting;
} else {
/* rx or rx/tx vector */
if (adapter->rx_itr_setting == 1)
q_vector->itr = IXGBE_20K_ITR;
else
q_vector->itr = adapter->rx_itr_setting;
}
while (txr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* configure backlink on ring */
ring->q_vector = q_vector;
/* update q_vector Tx values */
ixgbe_add_ring(ring, &q_vector->tx);
/* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count;
ring->queue_index = txr_idx;
/* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring;
//.........这里部分代码省略.........
开发者ID:3null,项目名称:fastsocket,代码行数:101,代码来源:ixgbe_lib.c
示例17: cpufreq_interactivex_timer
static void cpufreq_interactivex_timer(unsigned long data)
{
u64 delta_idle;
u64 update_time;
u64 *cpu_time_in_idle;
u64 *cpu_idle_exit_time;
struct timer_list *t;
u64 now_idle = get_cpu_idle_time_us(data,
&update_time);
cpu_time_in_idle = &per_cpu(time_in_idle, data);
cpu_idle_exit_time = &per_cpu(idle_exit_time, data);
if (update_time == *cpu_idle_exit_time)
return;
delta_idle = cputime64_sub(now_idle, *cpu_time_in_idle);
/* Scale up if there were no idle cycles since coming out of idle */
if (delta_idle == 0) {
if (policy->cur == policy->max)
return;
if (nr_running() < 1)
return;
target_freq = policy->max;
cpumask_set_cpu(data, &work_cpumask);
queue_work(up_wq, &freq_scale_work);
return;
}
/*
* There is a window where if the cpu utlization can go from low to high
* between the timer expiring, delta_idle will be > 0 and the cpu will
* be 100% busy, preventing idle from running, and this timer from
* firing. So setup another timer to fire to check cpu utlization.
* Do not setup the timer if there is no scheduled work.
*/
t = &per_cpu(cpu_timer, data);
if (!timer_pending(t) && nr_running() > 0) {
*cpu_time_in_idle = get_cpu_idle_time_us(
data, cpu_idle_exit_time);
mod_timer(t, jiffies + 2);
}
if (policy->cur == policy->min)
return;
/*
* Do not scale down unless we have been at this frequency for the
* minimum sample time.
*/
if (cputime64_sub(update_time, freq_change_time) < min_sample_time)
return;
target_freq = policy->min;
cpumask_set_cpu(data, &work_cpumask);
queue_work(down_wq, &freq_scale_work);
}
开发者ID:bedwa,项目名称:Infusion-GB,代码行数:63,代码来源:cpufreq_interactivex.c
示例18: tick_nohz_stop_sched_tick
//.........这里部分代码省略.........
* calculate the expiry time for the next timer wheel
* timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
* that there is no timer pending or at least extremely
* far into the future (12 days for HZ=1000). In this
* case we set the expiry to the end of time.
*/
if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
/*
* Calculate the time delta for the next timer event.
* If the time delta exceeds the maximum time delta
* permitted by the current clocksource then adjust
* the time delta accordingly to ensure the
* clocksource does not wrap.
*/
time_delta = min_t(u64, time_delta,
tick_period.tv64 * delta_jiffies);
expires = ktime_add_ns(last_update, time_delta);
} else {
expires.tv64 = KTIME_MAX;
}
/*
* If this cpu is the one which updates jiffies, then
* give up the assignment and let it be taken by the
* cpu which runs the tick timer next, which might be
* this cpu as well. If we don't drop this here the
* jiffies might be stale and do_timer() never
* invoked.
*/
if (cpu == tick_do_timer_cpu)
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
if (delta_jiffies > 1)
cpumask_set_cpu(cpu, nohz_cpu_mask);
/* Skip reprogram of event if its not changed */
if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
goto out;
/*
* nohz_stop_sched_tick can be called several times before
* the nohz_restart_sched_tick is called. This happens when
* interrupts arrive which do not cause a reschedule. In the
* first call we save the current tick time, so we can restart
* the scheduler tick in nohz_restart_sched_tick.
*/
if (!ts->tick_stopped) {
if (select_nohz_load_balancer(1)) {
/*
* sched tick not stopped!
*/
cpumask_clear_cpu(cpu, nohz_cpu_mask);
goto out;
}
ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
ts->tick_stopped = 1;
ts->idle_jiffies = last_jiffies;
rcu_enter_nohz();
}
ts->idle_sleeps++;
/* Mark expires */
ts->idle_expires = expires;
开发者ID:marcero,项目名称:ab73kernel-Hannspad-2632,代码行数:66,代码来源:tick-sched.c
示例19: acpi_cpufreq_cpu_init
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i;
unsigned int valid_states = 0;
unsigned int cpu = policy->cpu;
struct acpi_cpufreq_data *data;
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
struct acpi_processor_performance *perf;
#ifdef CONFIG_SMP
static int blacklisted;
#endif
pr_debug("acpi_cpufreq_cpu_init\n");
#ifdef CONFIG_SMP
if (blacklisted)
return blacklisted;
blacklisted = acpi_cpufreq_blacklist(c);
if (blacklisted)
return blacklisted;
#endif
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
result = -ENOMEM;
goto err_free;
}
perf = per_cpu_ptr(acpi_perf_data, cpu);
data->acpi_perf_cpu = cpu;
policy->driver_data = data;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
result = acpi_processor_register_performance(perf, cpu);
if (result)
goto err_free_mask;
policy->shared_type = perf->shared_type;
/*
* Will let policy->cpus know about dependency only when software
* coordination is required.
*/
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
cpumask_copy(policy->cpus, perf->shared_cpu_map);
}
cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
#ifdef CONFIG_SMP
dmi_check_system(sw_any_bug_dmi_table);
if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
}
if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
cpumask_clear(policy->cpus);
cpumask_set_cpu(cpu, policy->cpus);
cpumask_copy(data->freqdomain_cpus,
topology_sibling_cpumask(cpu));
policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
pr_info_once(PFX "overriding BIOS provided _PSD data\n");
}
#endif
/* capability check */
if (perf->state_count <= 1) {
pr_debug("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
if (perf->control_register.space_id != perf->status_register.space_id) {
result = -ENODEV;
goto err_unreg;
}
switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86 == 0xf) {
pr_debug("AMD K8 systems must use native drivers.\n");
result = -ENODEV;
goto err_unreg;
}
pr_debug("SYSTEM IO addr space\n");
data->cpu_feature = SYSTEM_IO_CAPABLE;
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
pr_debug("HARDWARE addr space\n");
if (check_est_cpu(cpu)) {
data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
break;
//.........这里部分代码省略.........
开发者ID:343829084,项目名称:linux-study,代码行数:101,代码来源:acpi-cpufreq.c
示例20: smp_callin
//.........这里部分代码省略.........
{
int cpuid, phys_id;
unsigned long timeout;
/*
* If waken up by an INIT in an 82489DX configuration
* we may get here before an INIT-deassert IPI reaches
* our local APIC. We have to wait for the IPI or we'll
* lock up on an APIC access.
*
* Since CPU0 is not wakened up by INIT, it doesn't wait for the IPI.
*/
cpuid = smp_processor_id();
if (apic->wait_for_init_deassert && cpuid != 0)
apic->wait_for_init_deassert(&init_deasserted);
/*
* (This works even if the APIC is not enabled.)
*/
phys_id = read_apic_id();
if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
phys_id, cpuid);
}
pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
/*
* STARTUP IPIs are fragile beasts as they might sometimes
* trigger some glue motherboard logic. Complete APIC bus
* silence for 1 second, this overestimates the time the
* boot CPU is spending to send the up to 2 STARTUP IPIs
* by a factor of two. This should be enough.
*/
/*
* Waiting 2s total for startup (udelay is not yet working)
*/
timeout = jiffies + 2*HZ;
while (time_before(jiffies, timeout)) {
/*
* Has the boot CPU finished it's STARTUP sequence?
*/
if (cpumask_test_cpu(cpuid, cpu_callout_mask))
break;
cpu_relax();
}
if (!time_before(jiffies, timeout)) {
panic("%s: CPU%d started up but did not get a callout!\n",
__func__, cpuid);
}
/*
* the boot CPU has finished the init stage and is spinning
* on callin_map until we finish. We are free to set up this
* CPU, first the APIC. (this is probably redundant on most
* boards)
*/
pr_debug("CALLIN, before setup_local_APIC()\n");
if (apic->smp_callin_clear_local_apic)
apic->smp_callin_clear_local_apic();
setup_local_APIC();
end_local_APIC_setup();
/*
* Need to setup vector mappings before we enable interrupts.
*/
setup_vector_irq(smp_processor_id());
/*
* Save our processor parameters. Note: this information
* is needed for clock calibration.
*/
smp_store_cpu_info(cpuid);
/*
* Get our bogomips.
* Update loops_per_jiffy in cpu_data. Previous call to
* smp_store_cpu_info() stored a value that is close but not as
* accurate as the value just calculated.
*/
calibrate_delay();
cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
pr_debug("Stack at about %p\n", &cpuid);
/*
* This must be done before setting cpu_online_mask
* or calling notify_cpu_starting.
*/
set_cpu_sibling_map(raw_smp_processor_id());
wmb();
notify_cpu_starting(cpuid);
/*
* Allow the master to continue.
*/
cpumask_set_cpu(cpuid, cpu_callin_mask);
}
开发者ID:sunchentong,项目名称:Adam-Kernel-GalaxyS6-G920F,代码行数:101,代码来源:smpboot.c
注:本文中的cpumask_set_cpu函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论