/*
* Create a new thread based on an existing one.
* The new thread has name NAME, and starts executing in function FUNC.
* DATA1 and DATA2 are passed to FUNC.
*/
int
thread_fork(const char *name,
void *data1, unsigned long data2,
void (*func)(void *, unsigned long),
struct thread **ret)
{
struct thread *newguy;
int s, result;
/* Allocate a thread */
newguy = thread_create(name);
if (newguy==NULL) {
return ENOMEM;
}
/* Allocate a stack */
newguy->t_stack = kmalloc(STACK_SIZE);
if (newguy->t_stack==NULL) {
kfree(newguy->t_name);
kfree(newguy);
return ENOMEM;
}
/* stick a magic number on the bottom end of the stack */
newguy->t_stack[0] = 0xae;
newguy->t_stack[1] = 0x11;
newguy->t_stack[2] = 0xda;
newguy->t_stack[3] = 0x33;
/* Inherit the current directory */
if (curthread->t_cwd != NULL) {
VOP_INCREF(curthread->t_cwd);
newguy->t_cwd = curthread->t_cwd;
}
/* Set up the pcb (this arranges for func to be called) */
md_initpcb(&newguy->t_pcb, newguy->t_stack, data1, data2, func);
/* Interrupts off for atomicity */
s = splhigh();
/*
* Make sure our data structures have enough space, so we won't
* run out later at an inconvenient time.
*/
result = array_preallocate(sleepers, numthreads+1);
if (result) {
goto fail;
}
result = array_preallocate(zombies, numthreads+1);
if (result) {
goto fail;
}
/* Do the same for the scheduler. */
result = scheduler_preallocate(numthreads+1);
if (result) {
goto fail;
}
/* Make the new thread runnable */
result = make_runnable(newguy);
if (result != 0) {
goto fail;
}
/*
* Increment the thread counter. This must be done atomically
* with the preallocate calls; otherwise the count can be
* temporarily too low, which would obviate its reason for
* existence.
*/
numthreads++;
/* Done with stuff that needs to be atomic */
splx(s);
/*
* Return new thread structure if it's wanted. Note that
* using the thread structure from the parent thread should be
* done only with caution, because in general the child thread
* might exit at any time.
*/
if (ret != NULL) {
*ret = newguy;
}
return 0;
fail:
splx(s);
if (newguy->t_cwd != NULL) {
VOP_DECREF(newguy->t_cwd);
}
kfree(newguy->t_stack);
//.........这里部分代码省略.........
开发者ID:cozos,项目名称:os161,代码行数:101,代码来源:thread.c
示例5: trap
/*ARGSUSED*/
void
trap(struct frame *fp, int type, u_int code, u_int v)
{
extern char fubail[], subail[];
struct lwp *l;
struct proc *p;
struct pcb *pcb;
void *onfault;
ksiginfo_t ksi;
int s;
int rv;
u_quad_t sticks;
curcpu()->ci_data.cpu_ntrap++;
l = curlwp;
p = l->l_proc;
pcb = lwp_getpcb(l);
KSI_INIT_TRAP(&ksi);
ksi.ksi_trap = type & ~T_USER;
if (USERMODE(fp->f_sr)) {
type |= T_USER;
sticks = p->p_sticks;
l->l_md.md_regs = fp->f_regs;
LWP_CACHE_CREDS(l, p);
} else
sticks = 0;
switch (type) {
default:
dopanic:
printf("trap type %d, code = 0x%x, v = 0x%x\n", type, code, v);
printf("%s program counter = 0x%x\n",
(type & T_USER) ? "user" : "kernel", fp->f_pc);
/*
* Let the kernel debugger see the trap frame that
* caused us to panic. This is a convenience so
* one can see registers at the point of failure.
*/
s = splhigh();
#ifdef KGDB
/* If connected, step or cont returns 1 */
if (kgdb_trap(type, (db_regs_t *)fp))
goto kgdb_cont;
#endif
#ifdef DDB
(void)kdb_trap(type, (db_regs_t *)fp);
#endif
#ifdef KGDB
kgdb_cont:
#endif
splx(s);
if (panicstr) {
printf("trap during panic!\n");
#ifdef DEBUG
/* XXX should be a machine-dependent hook */
printf("(press a key)\n"); (void)cngetc();
#endif
}
regdump((struct trapframe *)fp, 128);
type &= ~T_USER;
if ((u_int)type < trap_types)
panic(trap_type[type]);
panic("trap");
case T_BUSERR: /* Kernel bus error */
onfault = pcb->pcb_onfault;
if (onfault == NULL)
goto dopanic;
rv = EFAULT;
/*
* If we have arranged to catch this fault in any of the
* copy to/from user space routines, set PC to return to
* indicated location and set flag informing buserror code
* that it may need to clean up stack frame.
*/
copyfault:
fp->f_stackadj = exframesize[fp->f_format];
fp->f_format = fp->f_vector = 0;
fp->f_pc = (int)onfault;
fp->f_regs[D0] = rv;
return;
case T_BUSERR|T_USER: /* Bus error */
case T_ADDRERR|T_USER: /* Address error */
ksi.ksi_addr = (void *)v;
ksi.ksi_signo = SIGBUS;
ksi.ksi_code = (type == (T_BUSERR|T_USER)) ?
BUS_OBJERR : BUS_ADRERR;
break;
case T_ILLINST|T_USER: /* Illegal instruction fault */
case T_PRIVINST|T_USER: /* Privileged instruction fault */
ksi.ksi_addr = (void *)(int)fp->f_format;
/* XXX was ILL_PRIVIN_FAULT */
ksi.ksi_signo = SIGILL;
ksi.ksi_code = (type == (T_PRIVINST|T_USER)) ?
ILL_PRVOPC : ILL_ILLOPC;
//.........这里部分代码省略.........
/*
* void cpu_reboot(int howto, char *bootstr)
*
* Reboots the system
*
* Deal with any syncing, unmounting, dumping and shutdown hooks,
* then reset the CPU.
*/
void
cpu_reboot(int howto, char *bootstr)
{
/*
* If we are still cold then hit the air brakes
* and crash to earth fast
*/
if (cold) {
doshutdownhooks();
printf("The operating system has halted.\n");
printf("Please press any key to reboot.\n\n");
cngetc();
printf("rebooting...\n");
goto reset;
}
/* Disable console buffering */
/*
* If RB_NOSYNC was not specified sync the discs.
* Note: Unless cold is set to 1 here, syslogd will die during the
* unmount. It looks like syslogd is getting woken up only to find
* that it cannot page part of the binary in as the filesystem has
* been unmounted.
*/
if (!(howto & RB_NOSYNC))
bootsync();
/* Say NO to interrupts */
splhigh();
/* Do a dump if requested. */
if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
dumpsys();
/* Run any shutdown hooks */
doshutdownhooks();
/* Make sure IRQ's are disabled */
IRQdisable;
if (howto & RB_HALT) {
printf("The operating system has halted.\n");
printf("Please press any key to reboot.\n\n");
cngetc();
}
printf("rebooting...\n\r");
reset:
/*
* Make really really sure that all interrupts are disabled,
* and poke the Internal Bus and Peripheral Bus reset lines.
*/
(void) disable_interrupts(I32_bit|F32_bit);
*(volatile uint32_t *)(IQ80321_80321_VBASE + VERDE_ATU_BASE +
ATU_PCSR) = PCSR_RIB | PCSR_RPB;
/* ...and if that didn't work, just croak. */
printf("RESET FAILED!\n");
for (;;);
}
int
waittest(int nargs, char **args)
{
int i, spl, status, err;
pid_t kid;
pid_t kids2[NTHREADS];
int kids2_head = 0, kids2_tail = 0;
(void)nargs;
(void)args;
init_sem();
kprintf("Starting wait test...\n");
/*
* This first set should (hopefully) still be running when
* wait is called (helped by the splhigh).
*/
kprintf("\n");
kprintf("Set 1 (wait should generally succeed)\n");
kprintf("-------------------------------------\n");
spl = splhigh();
for (i = 0; i < NTHREADS; i++) {
err = thread_fork("wait test thread", waitfirstthread, NULL, i,
&kid);
if (err) {
panic("waittest: thread_fork failed (%d)\n", err);
}
kprintf("Spawned pid %d\n", kid);
kids2[kids2_tail] = kid;
kids2_tail = (kids2_tail+1) % NTHREADS;
}
splx(spl);
for (i = 0; i < NTHREADS; i++) {
kid = kids2[kids2_head];
kids2_head = (kids2_head+1) % NTHREADS;
kprintf("Waiting on pid %d...\n", kid);
err = pid_join(kid, &status, 0);
if (err) {
kprintf("Pid %d waitpid error %d!\n", kid, err);
}
else {
kprintf("Pid %d exit status: %d\n", kid, status);
}
}
/*
* This second set has to V their semaphore before the exit,
* so when wait is called, they will have already exited, but
* their parent is still alive.
*/
kprintf("\n");
kprintf("Set 2 (wait should always succeed)\n");
kprintf("----------------------------------\n");
for (i = 0; i < NTHREADS; i++) {
err = thread_fork("wait test thread", exitfirstthread, NULL, i,
&kid);
if (err) {
panic("waittest: thread_fork failed (%d)\n", err);
}
kprintf("Spawned pid %d\n", kid);
kids2[kids2_tail] = kid;
kids2_tail = (kids2_tail+1) % NTHREADS;
if (err) {
panic("waittest: q_addtail failed (%d)\n", err);
}
}
for (i = 0; i < NTHREADS; i++) {
kid = kids2[kids2_head];
kids2_head = (kids2_head+1) % NTHREADS;
kprintf("Waiting for pid %d to V()...\n", kid);
P(exitsems[i]);
kprintf("Appears that pid %d P()'d\n", kid);
kprintf("Waiting on pid %d...\n", kid);
err = pid_join(kid, &status, 0);
if (err) {
kprintf("Pid %d waitpid error %d!\n", kid, err);
}
else {
kprintf("Pid %d exit status: %d\n", kid, status);
}
}
/*
* This third set has to V their semaphore before the exit, so
* when wait is called, they will have already exited, and
* since we've gone through and disowned them all, their exit
* statuses should have been disposed of already and our waits
* should all fail.
*/
kprintf("\n");
//.........这里部分代码省略.........
/*
* Implement device not available (DNA) exception
*
* If we were the last lwp to use the FPU, we can simply return.
* Otherwise, we save the previous state, if necessary, and restore
* our last saved state.
*/
void
fpudna(struct cpu_info *ci)
{
uint16_t cw;
uint32_t mxcsr;
struct lwp *l, *fl;
struct pcb *pcb;
int s;
if (ci->ci_fpsaving) {
/* Recursive trap. */
x86_enable_intr();
return;
}
/* Lock out IPIs and disable preemption. */
s = splhigh();
x86_enable_intr();
/* Save state on current CPU. */
l = ci->ci_curlwp;
pcb = lwp_getpcb(l);
fl = ci->ci_fpcurlwp;
if (fl != NULL) {
/*
* It seems we can get here on Xen even if we didn't
* switch lwp. In this case do nothing
*/
if (fl == l) {
KASSERT(pcb->pcb_fpcpu == ci);
clts();
splx(s);
return;
}
KASSERT(fl != l);
fpusave_cpu(true);
KASSERT(ci->ci_fpcurlwp == NULL);
}
/* Save our state if on a remote CPU. */
if (pcb->pcb_fpcpu != NULL) {
/* Explicitly disable preemption before dropping spl. */
KPREEMPT_DISABLE(l);
splx(s);
fpusave_lwp(l, true);
KASSERT(pcb->pcb_fpcpu == NULL);
s = splhigh();
KPREEMPT_ENABLE(l);
}
/*
* Restore state on this CPU, or initialize. Ensure that
* the entire update is atomic with respect to FPU-sync IPIs.
*/
clts();
ci->ci_fpcurlwp = l;
pcb->pcb_fpcpu = ci;
if ((l->l_md.md_flags & MDL_USEDFPU) == 0) {
fninit();
cw = pcb->pcb_savefpu.fp_fxsave.fx_fcw;
fldcw(&cw);
mxcsr = pcb->pcb_savefpu.fp_fxsave.fx_mxcsr;
x86_ldmxcsr(&mxcsr);
l->l_md.md_flags |= MDL_USEDFPU;
} else {
/*
* AMD FPU's do not restore FIP, FDP, and FOP on fxrstor,
* leaking other process's execution history. Clear them
* manually.
*/
static const double zero = 0.0;
int status;
/*
* Clear the ES bit in the x87 status word if it is currently
* set, in order to avoid causing a fault in the upcoming load.
*/
fnstsw(&status);
if (status & 0x80)
fnclex();
/*
* Load the dummy variable into the x87 stack. This mangles
* the x87 stack, but we don't care since we're about to call
* fxrstor() anyway.
*/
fldummy(&zero);
fxrstor(&pcb->pcb_savefpu);
}
KASSERT(ci == curcpu());
splx(s);
}
//.........这里部分代码省略.........
#endif
leds_off(LEDS_GREEN);
#if TIMESYNCH_CONF_ENABLED
timesynch_init();
timesynch_set_authority_level((linkaddr_node_addr.u8[0] << 4) + 16);
#endif /* TIMESYNCH_CONF_ENABLED */
#if WITH_UIP
process_start(&tcpip_process, NULL);
process_start(&uip_fw_process, NULL); /* Start IP output */
process_start(&slip_process, NULL);
slip_set_input_callback(set_gateway);
{
uip_ipaddr_t hostaddr, netmask;
uip_init();
uip_ipaddr(&hostaddr, 172,16,
linkaddr_node_addr.u8[0],linkaddr_node_addr.u8[1]);
uip_ipaddr(&netmask, 255,255,0,0);
uip_ipaddr_copy(&meshif.ipaddr, &hostaddr);
uip_sethostaddr(&hostaddr);
uip_setnetmask(&netmask);
uip_over_mesh_set_net(&hostaddr, &netmask);
/* uip_fw_register(&slipif);*/
uip_over_mesh_set_gateway_netif(&slipif);
uip_fw_default(&meshif);
uip_over_mesh_init(UIP_OVER_MESH_CHANNEL);
printf("uIP started with IP address %d.%d.%d.%d\n",
uip_ipaddr_to_quad(&hostaddr));
}
#endif /* WITH_UIP */
energest_init();
ENERGEST_ON(ENERGEST_TYPE_CPU);
watchdog_start();
/* Stop the watchdog */
watchdog_stop();
#if !PROCESS_CONF_NO_PROCESS_NAMES
print_processes(autostart_processes);
#else /* !PROCESS_CONF_NO_PROCESS_NAMES */
putchar('\n'); /* include putchar() */
#endif /* !PROCESS_CONF_NO_PROCESS_NAMES */
autostart_start(autostart_processes);
/*
* This is the scheduler loop.
*/
while(1) {
int r;
do {
/* Reset watchdog. */
watchdog_periodic();
r = process_run();
} while(r > 0);
/*
* Idle processing.
*/
int s = splhigh(); /* Disable interrupts. */
/* uart1_active is for avoiding LPM3 when still sending or receiving */
if(process_nevents() != 0 || uart1_active()) {
splx(s); /* Re-enable interrupts. */
} else {
static unsigned long irq_energest = 0;
/* Re-enable interrupts and go to sleep atomically. */
ENERGEST_OFF(ENERGEST_TYPE_CPU);
ENERGEST_ON(ENERGEST_TYPE_LPM);
/* We only want to measure the processing done in IRQs when we
are asleep, so we discard the processing time done when we
were awake. */
energest_type_set(ENERGEST_TYPE_IRQ, irq_energest);
watchdog_stop();
_BIS_SR(GIE | SCG0 | SCG1 | CPUOFF); /* LPM3 sleep. This
statement will block
until the CPU is
woken up by an
interrupt that sets
the wake up flag. */
/* We get the current processing time for interrupts that was
done during the LPM and store it for next time around. */
dint();
irq_energest = energest_type_time(ENERGEST_TYPE_IRQ);
eint();
watchdog_start();
ENERGEST_OFF(ENERGEST_TYPE_LPM);
ENERGEST_ON(ENERGEST_TYPE_CPU);
}
}
}
static int
pci_mem_find(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t type,
bus_addr_t *basep, bus_size_t *sizep, int *flagsp)
{
pcireg_t address, mask, address1 = 0, mask1 = 0xffffffff;
u_int64_t waddress, wmask;
int s, is64bit, isrom;
is64bit = (PCI_MAPREG_MEM_TYPE(type) == PCI_MAPREG_MEM_TYPE_64BIT);
isrom = (reg == PCI_MAPREG_ROM);
if ((!isrom) && (reg < PCI_MAPREG_START ||
#if 0
/*
* Can't do this check; some devices have mapping registers
* way out in left field.
*/
reg >= PCI_MAPREG_END ||
#endif
(reg & 3)))
panic("pci_mem_find: bad request");
if (is64bit && (reg + 4) >= PCI_MAPREG_END)
panic("pci_mem_find: bad 64-bit request");
/*
* Section 6.2.5.1, `Address Maps', tells us that:
*
* 1) The builtin software should have already mapped the device in a
* reasonable way.
*
* 2) A device which wants 2^n bytes of memory will hardwire the bottom
* n bits of the address to 0. As recommended, we write all 1s and see
* what we get back.
*/
s = splhigh();
address = pci_conf_read(pc, tag, reg);
pci_conf_write(pc, tag, reg, 0xffffffff);
mask = pci_conf_read(pc, tag, reg);
pci_conf_write(pc, tag, reg, address);
if (is64bit) {
address1 = pci_conf_read(pc, tag, reg + 4);
pci_conf_write(pc, tag, reg + 4, 0xffffffff);
mask1 = pci_conf_read(pc, tag, reg + 4);
pci_conf_write(pc, tag, reg + 4, address1);
}
splx(s);
if (!isrom) {
/*
* roms should have an enable bit instead of a memory
* type decoder bit. For normal BARs, make sure that
* the address decoder type matches what we asked for.
*/
if (PCI_MAPREG_TYPE(address) != PCI_MAPREG_TYPE_MEM) {
printf("pci_mem_find: expected type mem, found i/o\n");
return (1);
}
/* XXX Allow 64bit bars for 32bit requests.*/
if (PCI_MAPREG_MEM_TYPE(address) !=
PCI_MAPREG_MEM_TYPE(type) &&
PCI_MAPREG_MEM_TYPE(address) !=
PCI_MAPREG_MEM_TYPE_64BIT) {
printf("pci_mem_find: "
"expected mem type %08x, found %08x\n",
PCI_MAPREG_MEM_TYPE(type),
PCI_MAPREG_MEM_TYPE(address));
return (1);
}
}
waddress = (u_int64_t)address1 << 32UL | address;
wmask = (u_int64_t)mask1 << 32UL | mask;
if ((is64bit && PCI_MAPREG_MEM64_SIZE(wmask) == 0) ||
(!is64bit && PCI_MAPREG_MEM_SIZE(mask) == 0)) {
aprint_debug("pci_mem_find: void region\n");
return (1);
}
switch (PCI_MAPREG_MEM_TYPE(address)) {
case PCI_MAPREG_MEM_TYPE_32BIT:
case PCI_MAPREG_MEM_TYPE_32BIT_1M:
break;
case PCI_MAPREG_MEM_TYPE_64BIT:
/*
* Handle the case of a 64-bit memory register on a
* platform with 32-bit addressing. Make sure that
* the address assigned and the device's memory size
* fit in 32 bits. We implicitly assume that if
* bus_addr_t is 64-bit, then so is bus_size_t.
*/
if (sizeof(u_int64_t) > sizeof(bus_addr_t) &&
(address1 != 0 || mask1 != 0xffffffff)) {
printf("pci_mem_find: 64-bit memory map which is "
"inaccessible on a 32-bit platform\n");
return (1);
}
break;
default:
//.........这里部分代码省略.........
static int
init_zs_linemon(
register queue_t *q,
register queue_t *my_q
)
{
register struct zscom *zs;
register struct savedzsops *szs;
register parsestream_t *parsestream = (parsestream_t *)(void *)my_q->q_ptr;
/*
* we expect the zsaline pointer in the q_data pointer
* from there on we insert our on EXTERNAL/STATUS ISR routine
* into the interrupt path, before the standard handler
*/
zs = ((struct zsaline *)(void *)q->q_ptr)->za_common;
if (!zs)
{
/*
* well - not found on startup - just say no (shouldn't happen though)
*/
return 0;
}
else
{
unsigned long s;
/*
* we do a direct replacement, in case others fiddle also
* if somebody else grabs our hook and we disconnect
* we are in DEEP trouble - panic is likely to be next, sorry
*/
szs = (struct savedzsops *)(void *)kmem_alloc(sizeof(struct savedzsops));
if (szs == (struct savedzsops *)0)
{
parseprintf(DD_INSTALL, ("init_zs_linemon: CD monitor NOT installed - no memory\n"));
return 0;
}
else
{
parsestream->parse_data = (void *)szs;
s = splhigh();
parsestream->parse_dqueue = q; /* remember driver */
szs->zsops = *zs->zs_ops;
szs->zsops.zsop_xsint = zs_xsisr; /* place our bastard */
szs->oldzsops = zs->zs_ops;
emergencyzs = zs->zs_ops;
zsopinit(zs, &szs->zsops); /* hook it up */
(void) splx(s);
parseprintf(DD_INSTALL, ("init_zs_linemon: CD monitor installed\n"));
return 1;
}
}
}
//.........这里部分代码省略.........
if(!UIP_CONF_IPV6_RPL) {
uip_ipaddr_t ipaddr;
int i;
uip_ip6addr(&ipaddr, 0xaaaa, 0, 0, 0, 0, 0, 0, 0);
uip_ds6_set_addr_iid(&ipaddr, &uip_lladdr);
uip_ds6_addr_add(&ipaddr, 0, ADDR_TENTATIVE);
printf("Tentative global IPv6 address ");
for(i = 0; i < 7; ++i) {
printf("%02x%02x:",
ipaddr.u8[i * 2], ipaddr.u8[i * 2 + 1]);
}
printf("%02x%02x\n",
ipaddr.u8[7 * 2], ipaddr.u8[7 * 2 + 1]);
}
#else /* WITH_UIP6 */
NETSTACK_RDC.init();
NETSTACK_MAC.init();
NETSTACK_NETWORK.init();
printf("%s %lu %u\n",
NETSTACK_RDC.name,
CLOCK_SECOND / (NETSTACK_RDC.channel_check_interval() == 0? 1:
NETSTACK_RDC.channel_check_interval()),
RF_CHANNEL);
#endif /* WITH_UIP6 */
#if !WITH_UIP6
uart1_set_input(serial_line_input_byte);
serial_line_init();
#endif
#if TIMESYNCH_CONF_ENABLED
timesynch_init();
timesynch_set_authority_level(rimeaddr_node_addr.u8[0]);
#endif /* TIMESYNCH_CONF_ENABLED */
/* process_start(&sensors_process, NULL);
SENSORS_ACTIVATE(button_sensor);*/
energest_init();
ENERGEST_ON(ENERGEST_TYPE_CPU);
print_processes(autostart_processes);
autostart_start(autostart_processes);
duty_cycle_scroller_start(CLOCK_SECOND * 2);
/*
* This is the scheduler loop.
*/
watchdog_start();
watchdog_stop(); /* Stop the wdt... */
while(1) {
int r;
do {
/* Reset watchdog. */
watchdog_periodic();
r = process_run();
} while(r > 0);
/*
* Idle processing.
*/
int s = splhigh(); /* Disable interrupts. */
/* uart1_active is for avoiding LPM3 when still sending or receiving */
if(process_nevents() != 0 || uart1_active()) {
splx(s); /* Re-enable interrupts. */
} else {
static unsigned long irq_energest = 0;
/* Re-enable interrupts and go to sleep atomically. */
ENERGEST_OFF(ENERGEST_TYPE_CPU);
ENERGEST_ON(ENERGEST_TYPE_LPM);
/* We only want to measure the processing done in IRQs when we
are asleep, so we discard the processing time done when we
were awake. */
energest_type_set(ENERGEST_TYPE_IRQ, irq_energest);
watchdog_stop();
_BIS_SR(GIE | SCG0 | SCG1 | CPUOFF); /* LPM3 sleep. This
statement will block
until the CPU is
woken up by an
interrupt that sets
the wake up flag. */
/* We get the current processing time for interrupts that was
done during the LPM and store it for next time around. */
dint();
irq_energest = energest_type_time(ENERGEST_TYPE_IRQ);
eint();
watchdog_start();
ENERGEST_OFF(ENERGEST_TYPE_LPM);
ENERGEST_ON(ENERGEST_TYPE_CPU);
}
}
}
请发表评论