/* ktap mainthread initization, main entry for ktap */
ktap_state *kp_newstate(struct ktap_parm *parm, struct dentry *dir, char **argv)
{
ktap_state *ks;
pid_t pid;
int cpu;
ks = kzalloc(sizeof(ktap_state) + sizeof(ktap_global_state),
GFP_KERNEL);
if (!ks)
return NULL;
ks->stack = kp_malloc(ks, KTAP_STACK_SIZE);
G(ks) = (ktap_global_state *)(ks + 1);
G(ks)->mainthread = ks;
G(ks)->seed = 201236; /* todo: make more random in future */
G(ks)->task = current;
G(ks)->verbose = parm->verbose; /* for debug use */
G(ks)->print_timestamp = parm->print_timestamp;
G(ks)->workload = parm->workload;
INIT_LIST_HEAD(&(G(ks)->timers));
INIT_LIST_HEAD(&(G(ks)->probe_events_head));
G(ks)->exit = 0;
if (kp_transport_init(ks, dir))
goto out;
pid = (pid_t)parm->trace_pid;
if (pid != -1) {
struct task_struct *task;
rcu_read_lock();
task = pid_task(find_vpid(pid), PIDTYPE_PID);
if (!task) {
kp_error(ks, "cannot find pid %d\n", pid);
rcu_read_unlock();
goto out;
}
G(ks)->trace_task = task;
get_task_struct(task);
rcu_read_unlock();
}
if( !alloc_cpumask_var(&G(ks)->cpumask, GFP_KERNEL))
goto out;
cpumask_copy(G(ks)->cpumask, cpu_online_mask);
cpu = parm->trace_cpu;
if (cpu != -1) {
if (!cpu_online(cpu)) {
printk(KERN_INFO "ktap: cpu %d is not online\n", cpu);
goto out;
}
cpumask_clear(G(ks)->cpumask);
cpumask_set_cpu(cpu, G(ks)->cpumask);
}
if (cfunction_cache_init(ks))
goto out;
kp_tstring_resize(ks, 512); /* set inital string hashtable size */
ktap_init_state(ks);
ktap_init_registry(ks);
ktap_init_arguments(ks, parm->argc, argv);
/* init library */
kp_init_baselib(ks);
kp_init_kdebuglib(ks);
kp_init_timerlib(ks);
kp_init_ansilib(ks);
if (alloc_kp_percpu_data())
goto out;
if (kp_probe_init(ks))
goto out;
return ks;
out:
G(ks)->exit = 1;
kp_final_exit(ks);
return NULL;
}
开发者ID:joelagnel,项目名称:ktap,代码行数:87,代码来源:vm.c
示例2: can_use_console
/*
* Can we actually use the console at this time on this cpu?
*
* Console drivers may assume that per-cpu resources have
* been allocated. So unless they're explicitly marked as
* being able to cope (CON_ANYTIME) don't call them until
* this CPU is officially up.
*/
static inline int can_use_console(unsigned int cpu)
{
return cpu_online(cpu) || have_callable_console();
}
static void hotplug_decision_work_fn(struct work_struct *work)
{
unsigned int running, disable_load, sampling_rate, avg_running = 0;
unsigned int online_cpus, available_cpus, i, j;
bool hotplug_flag_on = false;
bool hotplug_flag_off = false;
#if DEBUG
unsigned int k;
#endif
if (!isEnabled)
return;
online_cpus = num_online_cpus();
available_cpus = CPUS_AVAILABLE;
disable_load = DISABLE_LOAD_THRESHOLD; // * online_cpus;
//enable_load = ENABLE_LOAD_THRESHOLD; // * online_cpus;
/*
* Multiply nr_running() by 100 so we don't have to
* use fp division to get the average.
*/
running = nr_running() * 100;
history[index] = running;
#if DEBUG
pr_info("online_cpus is: %d\n", online_cpus);
//pr_info("enable_load is: %d\n", enable_load);
pr_info("disable_load is: %d\n", disable_load);
pr_info("index is: %d\n", index);
pr_info("running is: %d\n", running);
#endif
/*
* Use a circular buffer to calculate the average load
* over the sampling periods.
* This will absorb load spikes of short duration where
* we don't want additional cores to be onlined because
* the cpufreq driver should take care of those load spikes.
*/
for (i = 0, j = index; i < SAMPLING_PERIODS; i++, j--) {
avg_running += history[j];
if (unlikely(j == 0))
j = INDEX_MAX_VALUE;
}
/*
* If we are at the end of the buffer, return to the beginning.
*/
if (unlikely(index++ == INDEX_MAX_VALUE))
index = 0;
#if DEBUG
pr_info("array contents: ");
for (k = 0; k < SAMPLING_PERIODS; k++) {
pr_info("%d: %d\t",k, history[k]);
}
pr_info("\n");
pr_info("avg_running before division: %d\n", avg_running);
#endif
avg_running = avg_running / SAMPLING_PERIODS;
#if DEBUG
pr_info("average_running is: %d\n", avg_running);
#endif
if (likely(!(flags & HOTPLUG_DISABLED))) {
int cpu;
for (cpu = 1; cpu < CPUS_AVAILABLE; cpu++)
{
if (avg_running >= enable_load[cpu] && (!cpu_online(cpu)))
{
hotplug_cpu_single_on[cpu] = 1;
hotplug_flag_on = true;
}
else if (avg_running < enable_load[cpu] && (cpu_online(cpu)))
{
hotplug_cpu_single_off[cpu] = 1;
hotplug_flag_off = true;
}
}
if (unlikely((avg_running >= ENABLE_ALL_LOAD_THRESHOLD) && (online_cpus < available_cpus))) {
pr_info("auto_hotplug: Onlining all CPUs, avg running: %d\n", avg_running);
/*
* Flush any delayed offlining work from the workqueue.
* No point in having expensive unnecessary hotplug transitions.
* We still online after flushing, because load is high enough to
* warrant it.
* We set the paused flag so the sampling can continue but no more
* hotplug events will occur.
*/
flags |= HOTPLUG_PAUSED;
if (delayed_work_pending(&aphotplug_offline_work))
cancel_delayed_work(&aphotplug_offline_work);
hotplug_flag_on = false;
schedule_work_on(0, &hotplug_online_all_work);
return;
} else if (flags & HOTPLUG_PAUSED) {
schedule_delayed_work_on(0, &hotplug_decision_work, MIN_SAMPLING_RATE);
//.........这里部分代码省略.........
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
int err, nr_calls = 0;
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct take_cpu_down_param tcd_param = {
.mod = mod,
.hcpu = hcpu,
};
if (num_online_cpus() == 1)
return -EBUSY;
if (!cpu_online(cpu))
return -EINVAL;
cpu_hotplug_begin();
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
if (err) {
nr_calls--;
__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
pr_warn("%s: attempt to take down CPU %u failed\n",
__func__, cpu);
goto out_release;
}
/*
* By now we've cleared cpu_active_mask, wait for all preempt-disabled
* and RCU users of this state to go away such that all new such users
* will observe it.
*
* For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
* not imply sync_sched(), so explicitly call both.
*
* Do sync before park smpboot threads to take care the rcu boost case.
*/
#ifdef CONFIG_PREEMPT
synchronize_sched();
#endif
synchronize_rcu();
smpboot_park_threads(cpu);
/*
* So now all preempt/rcu users must observe !cpu_active().
*/
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
if (err) {
/* CPU didn't die: tell everyone. Can't complain. */
smpboot_unpark_threads(cpu);
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
goto out_release;
}
BUG_ON(cpu_online(cpu));
/*
* The migration_call() CPU_DYING callback will have removed all
* runnable tasks from the cpu, there's only the idle task left now
* that the migration thread is done doing the stop_machine thing.
*
* Wait for the stop thread to go away.
*/
while (!idle_cpu(cpu))
cpu_relax();
/* This actually kills the CPU. */
__cpu_die(cpu);
/* CPU is completely dead: tell everyone. Too late to complain. */
cpu_notify_nofail(CPU_DEAD | mod, hcpu);
check_for_tasks(cpu);
out_release:
cpu_hotplug_done();
if (!err)
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
return err;
}
开发者ID:383530895,项目名称:linux,代码行数:82,代码来源:cpu.c
示例9: switch_L2
int switch_L2(enum options option)
{
int i, cpu;
int err = 0;
int retry=0;
u64 t1;
u64 t2;
unsigned long mask = (1<<0);
if(option >= BORROW_NONE) {
pr_err("wrong option %d\n", option);
return -1;
}
t1 = sched_clock();
/* bind this process to main cpu */
while(sched_setaffinity(0, (struct cpumask*) &mask) < 0)
{
pr_err("Could not set cpu 0 affinity for current process(%d).\n", retry);
retry++;
if(retry > 100)
{
return -1;
}
}
/*disable hot-plug*/
hps_set_enabled(0);
is_l2_borrowed = 0;
for(i=1; i<NR_CPUS; i++)
{
if(cpu_online(i))
{
err = cpu_down(i);
if(err < 0)
{
pr_err("[L2$ sharing] disable cpu %d failed!\n", i);
hps_set_enabled(1);
return -1;
}
}
}
/* disable preemption */
cpu = get_cpu();
/* enable other clusters' power */
enable_secondary_clusters_pwr();
config_L2_size(option);
if(option == BORROW_L2)
{
is_l2_borrowed = 1;
}
else // if(option == RETURN_L2)
{
is_l2_borrowed = 0;
/* Disable other clusters' power */
disable_secondary_clusters_pwr();
}
/*enable hot-plug*/
hps_set_enabled(1);
put_cpu();
t2 = sched_clock();
if(option == BORROW_L2)
{
pr_notice("[%s]: borrow L2$ cost %llu ns\n", __func__, t2 - t1);
}
else
{
pr_notice("[%s]: return L2$ cost %llu ns\n", __func__, t2 - t1);
}
return err;
}
static int __ref __cpu_hotplug(bool out_flag, enum hotplug_cmd cmd)
{
int i = 0;
int ret = 0;
if (exynos_dm_hotplug_disabled())
return 0;
#if defined(CONFIG_SCHED_HMP)
if (out_flag) {
if (do_disable_hotplug)
goto blk_out;
if (cmd == CMD_BIG_OUT && !in_low_power_mode) {
for (i = setup_max_cpus - 1; i >= NR_CA7; i--) {
if (cpu_online(i)) {
ret = cpu_down(i);
if (ret)
goto blk_out;
}
}
} else {
for (i = setup_max_cpus - 1; i > 0; i--) {
if (cpu_online(i)) {
ret = cpu_down(i);
if (ret)
goto blk_out;
}
}
}
} else {
if (in_suspend_prepared)
goto blk_out;
if (cmd == CMD_BIG_IN) {
if (in_low_power_mode)
goto blk_out;
for (i = NR_CA7; i < setup_max_cpus; i++) {
if (!cpu_online(i)) {
ret = cpu_up(i);
if (ret)
goto blk_out;
}
}
} else {
if ((big_hotpluged && !do_disable_hotplug) ||
(cmd == CMD_LITTLE_IN)) {
for (i = 1; i < NR_CA7; i++) {
if (!cpu_online(i)) {
ret = cpu_up(i);
if (ret)
goto blk_out;
}
}
} else {
if (lcd_is_on) {
for (i = NR_CA7; i < setup_max_cpus; i++) {
if (!cpu_online(i)) {
if (i == NR_CA7)
set_hmp_boostpulse(100000);
ret = cpu_up(i);
if (ret)
goto blk_out;
}
}
for (i = 1; i < NR_CA7; i++) {
if (!cpu_online(i)) {
ret = cpu_up(i);
if (ret)
goto blk_out;
}
}
} else {
for (i = 1; i < setup_max_cpus; i++) {
if (!cpu_online(i)) {
ret = cpu_up(i);
if (ret)
goto blk_out;
}
}
}
}
}
}
#else
if (out_flag) {
if (do_disable_hotplug)
goto blk_out;
for (i = setup_max_cpus - 1; i > 0; i--) {
if (cpu_online(i)) {
ret = cpu_down(i);
if (ret)
goto blk_out;
}
}
} else {
//.........这里部分代码省略.........
static void __devinit
smp_callin (void)
{
#ifdef XEN
/* work around for spinlock irq assert. */
unsigned long flags;
#endif
int cpuid, phys_id;
extern void ia64_init_itm(void);
#ifdef CONFIG_PERFMON
extern void pfm_init_percpu(void);
#endif
cpuid = smp_processor_id();
phys_id = hard_smp_processor_id();
if (cpu_online(cpuid)) {
printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
phys_id, cpuid);
BUG();
}
fix_b0_for_bsp();
#ifdef XEN
notify_cpu_starting(cpuid);
lock_ipi_calllock(&flags);
#else
lock_ipi_calllock();
#endif
cpu_set(cpuid, cpu_online_map);
#ifdef XEN
unlock_ipi_calllock(flags);
#else
unlock_ipi_calllock();
#endif
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
smp_setup_percpu_timer();
ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
#ifdef CONFIG_PERFMON
pfm_init_percpu();
#endif
local_irq_enable();
if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
/*
* Synchronize the ITC with the BP. Need to do this after irqs are
* enabled because ia64_sync_itc() calls smp_call_function_single(), which
* calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
* local_bh_enable(), which bugs out if irqs are not enabled...
*/
Dprintk("Going to syncup ITC with BP.\n");
ia64_sync_itc(0);
}
/*
* Get our bogomips.
*/
ia64_init_itm();
#ifndef XEN
calibrate_delay();
#endif
local_cpu_data->loops_per_jiffy = loops_per_jiffy;
#ifdef CONFIG_IA32_SUPPORT
ia32_gdt_init();
#endif
/*
* Allow the master to continue.
*/
cpu_set(cpuid, cpu_callin_map);
Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
}
//.........这里部分代码省略.........
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
p->exit_state = 0;
/*
* Ok, make it visible to the rest of the system.
* We dont wake it up yet.
*/
p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
INIT_LIST_HEAD(&p->ptrace_children);
INIT_LIST_HEAD(&p->ptrace_list);
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
/* for sys_ioprio_set(IOPRIO_WHO_PGRP) */
p->ioprio = current->ioprio;
/*
* The task hasn't been attached yet, so its cpus_allowed mask will
* not be changed, nor will its assigned CPU.
*
* The cpus_allowed mask of the parent may have changed after it was
* copied first time - so re-copy it here, then check the child's CPU
* to ensure it is on a valid CPU (and if not, just force it back to
* parent's CPU). This avoids alot of nasty races.
*/
p->cpus_allowed = current->cpus_allowed;
if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
!cpu_online(task_cpu(p))))
set_task_cpu(p, smp_processor_id());
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
p->real_parent = current->real_parent;
else
p->real_parent = current;
p->parent = p->real_parent;
spin_lock(¤t->sighand->siglock);
/*
* Process group and session signals need to be delivered to just the
* parent before the fork or both the parent and the child after the
* fork. Restart if a signal comes in before we add the new process to
* it's process group.
* A fatal signal pending means that current will exit, so the new
* thread can't slip out of an OOM kill (or normal SIGKILL).
*/
recalc_sigpending();
if (signal_pending(current)) {
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
goto bad_fork_cleanup_namespaces;
}
if (clone_flags & CLONE_THREAD) {
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
if (!cputime_eq(current->signal->it_virt_expires,
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
int err, nr_calls = 0;
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct take_cpu_down_param tcd_param = {
.mod = mod,
.hcpu = hcpu,
};
if (num_online_cpus() == 1)
return -EBUSY;
if (!cpu_online(cpu))
return -EINVAL;
cpu_hotplug_begin();
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
if (err) {
nr_calls--;
__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
printk("%s: attempt to take down CPU %u failed\n",
__func__, cpu);
goto out_release;
}
smpboot_park_threads(cpu);
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
if (err) {
/* CPU didn't die: tell everyone. Can't complain. */
smpboot_unpark_threads(cpu);
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
goto out_release;
}
BUG_ON(cpu_online(cpu));
/*
* The migration_call() CPU_DYING callback will have removed all
* runnable tasks from the cpu, there's only the idle task left now
* that the migration thread is done doing the stop_machine thing.
*
* Wait for the stop thread to go away.
*/
while (!idle_cpu(cpu))
cpu_relax();
/* This actually kills the CPU. */
__cpu_die(cpu);
/* CPU is completely dead: tell everyone. Too late to complain. */
cpu_notify_nofail(CPU_DEAD | mod, hcpu);
check_for_tasks(cpu);
out_release:
cpu_hotplug_done();
if (!err)
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
return err;
}
请发表评论