本文整理汇总了C++中smp_call_function函数的典型用法代码示例。如果您正苦于以下问题:C++ smp_call_function函数的具体用法?C++ smp_call_function怎么用?C++ smp_call_function使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了smp_call_function函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: twd_rate_change
static int twd_rate_change(struct notifier_block *nb,
unsigned long flags, void *data)
{
struct clk_notifier_data *cnd = data;
/*
* The twd clock events must be reprogrammed to account for the new
* frequency. The timer is local to a cpu, so cross-call to the
* changing cpu.
*/
if (flags == POST_RATE_CHANGE)
smp_call_function(twd_update_frequency,
(void *)&cnd->new_rate, 1);
return NOTIFY_OK;
}
开发者ID:SynerconTechnologies,项目名称:FLA-Kernel,代码行数:16,代码来源:smp_twd.c
示例2: do_cpuid
static inline void do_cpuid(int cpu, u32 reg, u32 *data)
{
struct cpuid_command cmd;
preempt_disable();
if ( cpu == smp_processor_id() ) {
cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
} else {
cmd.cpu = cpu;
cmd.reg = reg;
cmd.data = data;
smp_call_function(cpuid_smp_cpuid, &cmd, 1, 1);
}
preempt_enable();
}
开发者ID:robacklin,项目名称:celinux,代码行数:16,代码来源:cpuid.c
示例3: smp_send_stop
/*
* Stop all CPUs and turn off local APICs and the IO-APIC, so other OSs see a
* clean IRQ state.
*/
void smp_send_stop(void)
{
int timeout = 10;
smp_call_function(stop_this_cpu, NULL, 0);
/* Wait 10ms for all other CPUs to go offline. */
while ( (num_online_cpus() > 1) && (timeout-- > 0) )
mdelay(1);
local_irq_disable();
__stop_this_cpu();
disable_IO_APIC();
hpet_disable();
local_irq_enable();
}
开发者ID:CrazyXen,项目名称:XEN_CODE,代码行数:20,代码来源:smp.c
示例4: flush_tlb_mm
/*
* The following tlb flush calls are invoked when old translations are
* being torn down, or pte attributes are changing. For single threaded
* address spaces, a new context is obtained on the current cpu, and tlb
* context on other cpus are invalidated to force a new context allocation
* at switch_mm time, should the mm ever be used on other cpus. For
* multithreaded address spaces, intercpu interrupts have to be sent.
* Another case where intercpu interrupts are required is when the target
* mm might be active on another cpu (eg debuggers doing the flushes on
* behalf of debugees, kswapd stealing pages from another process etc).
* Kanoj 07/00.
*/
void flush_tlb_mm(struct mm_struct *mm)
{
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
} else {
int i;
for_each_online_cpu(i)
if (smp_processor_id() != i)
cpu_context(i, mm) = 0;
}
local_flush_tlb_mm(mm);
preempt_enable();
}
开发者ID:bsingharora,项目名称:linux,代码行数:28,代码来源:smp.c
示例5: flush_tlb_page
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
struct flush_tlb_data fd;
fd.vma = vma;
fd.addr1 = page;
smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
} else {
int i;
for (i = 0; i < smp_num_cpus; i++)
if (smp_processor_id() != i)
CPU_CONTEXT(i, vma->vm_mm) = 0;
}
local_flush_tlb_page(vma, page);
}
开发者ID:huangyukun2012,项目名称:linux-2.4.21,代码行数:16,代码来源:smp.c
示例6: read_pat_on_cpus
static int read_pat_on_cpus(void)
{
int fail[2] = {0, 0};
int rc = read_pat(&compat_pat_wc.original_pat);
if( rc != 0 )
return rc;
smp_call_function((void(*)(void*))read_pat_on_cpu, &fail, 1, 1);
if( fail[0] )
return -EIO;
if( fail[1] )
return -EFAULT;
return 0;
}
开发者ID:ido,项目名称:openonload,代码行数:16,代码来源:compat_pat_wc.c
示例7: check_nmi_watchdog
int __init check_nmi_watchdog (void)
{
volatile int endflag = 0;
int *counts;
int cpu;
counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
if (!counts)
return -1;
printk(KERN_INFO "testing NMI watchdog ... ");
#ifdef CONFIG_SMP
if (nmi_watchdog == NMI_LOCAL_APIC)
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
#endif
for (cpu = 0; cpu < NR_CPUS; cpu++)
counts[cpu] = cpu_pda(cpu)->__nmi_count;
local_irq_enable();
mdelay((10*1000)/nmi_hz); // wait 10 ticks
for_each_online_cpu(cpu) {
if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
endflag = 1;
printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
cpu,
counts[cpu],
cpu_pda(cpu)->__nmi_count);
nmi_active = 0;
lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
nmi_perfctr_msr = 0;
kfree(counts);
return -1;
}
}
endflag = 1;
printk("OK.\n");
/* now that we know it works we can reduce NMI frequency to
something more reasonable; makes a difference in some configs */
if (nmi_watchdog == NMI_LOCAL_APIC)
nmi_hz = 1;
kfree(counts);
return 0;
}
开发者ID:FatSunHYS,项目名称:OSCourseDesign,代码行数:47,代码来源:nmi.c
示例8: vfp_init
/*
* VFP support code initialisation.
*/
static int __init vfp_init(void)
{
unsigned int vfpsid;
unsigned int cpu_arch = cpu_architecture();
if (cpu_arch >= CPU_ARCH_ARMv6)
vfp_enable(NULL);
/*
* First check that there is a VFP that we can use.
* The handler is already setup to just log calls, so
* we just need to read the VFPSID register.
*/
vfp_vector = vfp_testing_entry;
barrier();
vfpsid = fmrx(FPSID);
barrier();
vfp_vector = vfp_null_entry;
printk(KERN_INFO "VFP support v0.3: ");
if (VFP_arch)
printk("not present\n");
else if (vfpsid & FPSID_NODOUBLE) {
printk("no double precision support\n");
} else {
smp_call_function(vfp_enable, NULL, 1, 1);
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
vfp_vector = vfp_support_entry;
thread_register_notifier(&vfp_notifier_block);
/*
* We detected VFP, and the support code is
* in place; report VFP support to userspace.
*/
elf_hwcap |= HWCAP_VFP;
}
return 0;
}
开发者ID:274914765,项目名称:C,代码行数:50,代码来源:vfpmodule.c
示例9: kexec_prepare_cpus
static void kexec_prepare_cpus(void)
{
int my_cpu, i, notified=-1;
smp_call_function(kexec_smp_down, NULL, /* wait */0);
my_cpu = get_cpu();
/* check the others cpus are now down (via paca hw cpu id == -1) */
for (i=0; i < NR_CPUS; i++) {
if (i == my_cpu)
continue;
while (paca[i].hw_cpu_id != -1) {
barrier();
if (!cpu_possible(i)) {
printk("kexec: cpu %d hw_cpu_id %d is not"
" possible, ignoring\n",
i, paca[i].hw_cpu_id);
break;
}
if (!cpu_online(i)) {
/* Fixme: this can be spinning in
* pSeries_secondary_wait with a paca
* waiting for it to go online.
*/
printk("kexec: cpu %d hw_cpu_id %d is not"
" online, ignoring\n",
i, paca[i].hw_cpu_id);
break;
}
if (i != notified) {
printk( "kexec: waiting for cpu %d (physical"
" %d) to go down\n",
i, paca[i].hw_cpu_id);
notified = i;
}
}
}
/* after we tell the others to go down */
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 0);
put_cpu();
local_irq_disable();
}
开发者ID:HappyASR,项目名称:LinuxKernel2.6.27,代码行数:47,代码来源:machine_kexec_64.c
示例10: pause_other_cpus
int pause_other_cpus(pause_fn_t fn, void *arg)
{
int ret = 0;
unsigned long flags;
int online_cpus_cnt;
unsigned long count;
preempt_disable();
BLOCKER_DEBUG("Running pause on cpu %d\n", smp_processor_id());
atomic_set(&blocker_count, 0);
online_cpus_cnt = num_online_cpus();
if (online_cpus_cnt > 1) {
count = jiffies + HZ * MAX_BLOCKER_WAIT_MSEC / 1000 + 1;
smp_call_function(blocker, NULL, false);
while (time_before(jiffies, count)) {
if (atomic_read(&blocker_count) + 1 == online_cpus_cnt)
break;
}
if (!time_before(jiffies, count)) {
pr_err("BLOCKER: Failed %s, online:%d, count:%d\n",
__func__, online_cpus_cnt,
(int)atomic_read(&blocker_count));
atomic_set(&blocker_count, BLOCKER_INVALID);
ret = -1;
goto error;
}
}
local_irq_save(flags);
atomic_inc(&blocker_count);
BLOCKER_DEBUG("In critical section on cpu %d\n", smp_processor_id());
if (fn && atomic_read(&blocker_count) == online_cpus_cnt)
ret = fn(arg);
else
pr_debug("Skip calling fn in blocker! fn: 0x%08X, rsp: %d\n",
(unsigned int)fn, atomic_read(&blocker_count));
/* Release other CPUs */
atomic_set(&blocker_count, BLOCKER_INVALID);
local_irq_restore(flags);
error:
BLOCKER_DEBUG("Finishing pause on cpu %d\n", smp_processor_id());
preempt_enable();
return ret;
}
开发者ID:CVlaspoel,项目名称:VSMC-i9105p,代码行数:46,代码来源:blocker.c
示例11: RTDECL
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
{
int rc;
RTMPARGS Args;
Args.pfnWorker = pfnWorker;
Args.pvUser1 = pvUser1;
Args.pvUser2 = pvUser2;
Args.idCpu = idCpu;
Args.cHits = 0;
if (!RTMpIsCpuPossible(idCpu))
return VERR_CPU_NOT_FOUND;
# ifdef preempt_disable
preempt_disable();
# endif
if (idCpu != RTMpCpuId())
{
if (RTMpIsCpuOnline(idCpu))
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
#else /* older kernels */
rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
#endif /* older kernels */
Assert(rc == 0);
rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE;
}
else
rc = VERR_CPU_OFFLINE;
}
else
{
rtmpLinuxWrapper(&Args);
rc = VINF_SUCCESS;
}
# ifdef preempt_enable
preempt_enable();
# endif
NOREF(rc);
return rc;
}
开发者ID:lskakaxi,项目名称:virtualbox-drv,代码行数:46,代码来源:mp-r0drv-linux.c
示例12: flush_tlb_range
void flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd;
fd.mm = mm;
fd.addr1 = start;
fd.addr2 = end;
smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
} else {
int i;
for (i = 0; i < smp_num_cpus; i++)
if (smp_processor_id() != i)
cpu_context(i, mm) = 0;
}
local_flush_tlb_range(mm, start, end);
}
开发者ID:SimonKagstrom,项目名称:mci500h-linux-2.4.27,代码行数:17,代码来源:smp.c
示例13: cacheop_on_each_cpu
static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
int wait)
{
preempt_disable();
/*
* It's possible that this gets called early on when IRQs are
* still disabled due to ioremapping by the boot CPU, so don't
* even attempt IPIs unless there are other CPUs online.
*/
if (num_online_cpus() > 1)
smp_call_function(func, info, wait);
func(info);
preempt_enable();
}
开发者ID:24hours,项目名称:linux,代码行数:17,代码来源:cache.c
示例14: check_nmi_watchdog
int __init check_nmi_watchdog (void)
{
static unsigned int __initdata prev_nmi_count[NR_CPUS];
int cpu;
bool_t ok = 1;
if ( !nmi_watchdog )
return 0;
printk("Testing NMI watchdog on all CPUs:");
for_each_online_cpu ( cpu )
prev_nmi_count[cpu] = nmi_count(cpu);
/* Wait for 10 ticks. Busy-wait on all CPUs: the LAPIC counter that
* the NMI watchdog uses only runs while the core's not halted */
if ( nmi_watchdog == NMI_LOCAL_APIC )
smp_call_function(wait_for_nmis, NULL, 0);
wait_for_nmis(NULL);
for_each_online_cpu ( cpu )
{
if ( nmi_count(cpu) - prev_nmi_count[cpu] <= 5 )
{
printk(" %d", cpu);
ok = 0;
}
}
printk(" %s\n", ok ? "ok" : "stuck");
/*
* Now that we know it works we can reduce NMI frequency to
* something more reasonable; makes a difference in some configs.
* There's a limit to how slow we can go because writing the perfctr
* MSRs only sets the low 32 bits, with the top 8 bits sign-extended
* from those, so it's not possible to set up a delay larger than
* 2^31 cycles and smaller than (2^40 - 2^31) cycles.
* (Intel SDM, section 18.22.2)
*/
if ( nmi_watchdog == NMI_LOCAL_APIC )
nmi_hz = max(1ul, cpu_khz >> 20);
return 0;
}
开发者ID:caobosco,项目名称:libxlPVUSB,代码行数:45,代码来源:nmi.c
示例15: pal_cache_flush
static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu)
{
u64 gr28, gr29, gr30, gr31;
struct ia64_pal_retval result = {0, 0, 0, 0};
struct cache_flush_args args = {0, 0, 0, 0};
long psr;
gr28 = gr29 = gr30 = gr31 = 0;
kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31);
if (gr31 != 0)
printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu);
/* Always call Host Pal in int=1 */
gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
args.cache_type = gr29;
args.operation = gr30;
smp_call_function(remote_pal_cache_flush,
(void *)&args, 1);
if (args.status != 0)
printk(KERN_ERR"pal_cache_flush error!,"
"status:0x%lx\n", args.status);
/*
* Call Host PAL cache flush
* Clear psr.ic when call PAL_CACHE_FLUSH
*/
local_irq_save(psr);
result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1,
&result.v0);
local_irq_restore(psr);
if (result.status != 0)
printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld"
"in1:%lx,in2:%lx\n",
vcpu, result.status, gr29, gr30);
#if 0
if (gr29 == PAL_CACHE_TYPE_COHERENT) {
cpus_setall(vcpu->arch.cache_coherent_map);
cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map);
cpus_setall(cpu_cache_coherent_map);
cpu_clear(vcpu->cpu, cpu_cache_coherent_map);
}
#endif
return result;
}
开发者ID:AjayMashi,项目名称:nitro-kvm,代码行数:45,代码来源:kvm_fw.c
示例16: RTDECL
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
{
IPRT_LINUX_SAVE_EFL_AC();
int rc;
RTMPARGS Args;
RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
Args.pfnWorker = pfnWorker;
Args.pvUser1 = pvUser1;
Args.pvUser2 = pvUser2;
Args.idCpu = idCpu;
Args.cHits = 0;
if (!RTMpIsCpuPossible(idCpu))
return VERR_CPU_NOT_FOUND;
RTThreadPreemptDisable(&PreemptState);
if (idCpu != RTMpCpuId())
{
if (RTMpIsCpuOnline(idCpu))
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
#else /* older kernels */
rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
#endif /* older kernels */
Assert(rc == 0);
rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE;
}
else
rc = VERR_CPU_OFFLINE;
}
else
{
rtmpLinuxWrapper(&Args);
rc = VINF_SUCCESS;
}
RTThreadPreemptRestore(&PreemptState);;
NOREF(rc);
IPRT_LINUX_RESTORE_EFL_AC();
return rc;
}
开发者ID:jeppeter,项目名称:vbox,代码行数:45,代码来源:mp-r0drv-linux.c
示例17: sys_perfmonctl
asmlinkage int
sys_perfmonctl (int cmd, void *data)
{
struct perfmon_struct *pdata;
int err;
printk("sys_perfmonctl: cmd = 0x%x\n", cmd);
pdata = kmalloc(sizeof(struct perfmon_struct), GFP_USER);
err = __copy_from_user(pdata, data, sizeof(struct perfmon_struct));
switch(cmd) {
case PMC_CMD_BUFFER:
perfmon_buffer_ctl(data);
break;
case PMC_CMD_DUMP:
perfmon_dump_ctl(data);
break;
case PMC_CMD_DECR_PROFILE: /* NIA time sampling */
decr_profile(data);
break;
case PMC_CMD_PROFILE:
perfmon_profile_ctl(pdata);
break;
case PMC_CMD_TRACE:
perfmon_trace_ctl(pdata);
break;
case PMC_CMD_TIMESLICE:
perfmon_timeslice_ctl(pdata);
break;
#if 0
case PMC_OP_TIMESLICE:
pmc_enable_timeslice(pdata);
break;
case PMC_OP_DUMP_TIMESLICE:
pmc_dump_timeslice(pdata);
smp_call_function(pmc_dump_timeslice, (void *)pdata, 0, 1);
break;
#endif
default:
printk("Perfmon: Unknown command\n");
break;
}
kfree(pdata);
return 0;
}
开发者ID:Picture-Elements,项目名称:linux-2.4-peijse,代码行数:45,代码来源:perfmon.c
示例18: fiq_glue_register_handler
int fiq_glue_register_handler(struct fiq_glue_handler *handler)
{
int ret;
if (!handler || !handler->fiq) {
ret = -EINVAL;
goto err_bad_arg;
}
mutex_lock(&fiq_glue_lock);
if (!trusty_dev) {
ret = -ENODEV;
goto err_no_trusty;
}
handler->next = fiq_handlers;
/*
* Write barrier paired with smp_read_barrier_depends in
* trusty_fiq_handler. Make sure next pointer is updated before
* fiq_handlers so trusty_fiq_handler does not see an uninitialized
* value and terminate early or crash.
*/
smp_wmb();
fiq_handlers = handler;
smp_call_function(smp_nop_call, NULL, true);
if (!handler->next) {
ret = fiq_glue_set_handler();
if (ret)
goto err_set_fiq_handler;
}
mutex_unlock(&fiq_glue_lock);
return 0;
err_set_fiq_handler:
fiq_handlers = handler->next;
err_no_trusty:
mutex_unlock(&fiq_glue_lock);
err_bad_arg:
pr_err("%s: failed, %d\n", __func__, ret);
return ret;
}
开发者ID:FrozenCow,项目名称:FIRE-ICE,代码行数:45,代码来源:trusty-fiq-arm64.c
示例19: flush_tlb_page
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
preempt_disable();
if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
struct flush_tlb_data fd;
fd.vma = vma;
fd.addr1 = page;
smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
} else {
int i;
for (i = 0; i < num_online_cpus(); i++)
if (smp_processor_id() != i)
cpu_context(i, vma->vm_mm) = 0;
}
local_flush_tlb_page(vma, page);
preempt_enable();
}
开发者ID:1x23,项目名称:unifi-gpl,代码行数:18,代码来源:smp.c
示例20: setup_APIC_clocks
void __init setup_APIC_clocks (void)
{
printk("Using local APIC timer interrupts.\n");
using_apic_timer = 1;
__cli();
calibration_result = calibrate_APIC_clock();
/*
* Now set up the timer for real.
*/
setup_APIC_timer((void *)calibration_result);
__sti();
/* and update all other cpus */
smp_call_function(setup_APIC_timer, (void *)calibration_result, 1, 1);
}
开发者ID:liexusong,项目名称:Linux-2.4.16,代码行数:18,代码来源:apic.c
注:本文中的smp_call_function函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论