/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
unsigned long address)
{
struct vm_area_struct *vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
const int field = sizeof(unsigned long) * 2;
unsigned long flags = 0;
siginfo_t info;
int fault;
info.si_code = SEGV_MAPERR;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
goto vmalloc_fault;
#ifdef MODULE_START
if (unlikely(address >= MODULE_START && address < MODULE_END))
goto vmalloc_fault;
#endif
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto bad_area_nosemaphore;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
info.si_code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
flags |= FAULT_FLAG_WRITE;
} else {
if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
goto bad_area;
}
survive:
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
//.........这里部分代码省略.........
void __devinit setup_local_APIC(void)
{
unsigned long oldvalue, value, ver, maxlvt;
int i, j;
/* Pound the ESR really hard over the head with a big hammer - mbligh */
if (esr_disable) {
apic_write(APIC_ESR, 0);
apic_write(APIC_ESR, 0);
apic_write(APIC_ESR, 0);
apic_write(APIC_ESR, 0);
}
value = apic_read(APIC_LVR);
ver = GET_APIC_VERSION(value);
BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
/*
* Double-check whether this APIC is really registered.
*/
if (!apic_id_registered())
BUG();
/*
* Intel recommends to set DFR, LDR and TPR before enabling
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
init_apic_ldr();
/*
* Set Task Priority to reject any interrupts below FIRST_DYNAMIC_VECTOR.
*/
apic_write_around(APIC_TASKPRI, (FIRST_DYNAMIC_VECTOR & 0xF0) - 0x10);
/*
* After a crash, we no longer service the interrupts and a pending
* interrupt from previous kernel might still have ISR bit set.
*
* Most probably by now CPU has serviced that pending interrupt and
* it might not have done the ack_APIC_irq() because it thought,
* interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
* does not clear the ISR bit and cpu thinks it has already serivced
* the interrupt. Hence a vector might get locked. It was noticed
* for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
*/
for (i = APIC_ISR_NR - 1; i >= 0; i--) {
value = apic_read(APIC_ISR + i*0x10);
for (j = 31; j >= 0; j--) {
if (value & (1<<j))
ack_APIC_irq();
}
}
/*
* Now that we are all set up, enable the APIC
*/
value = apic_read(APIC_SPIV);
value &= ~APIC_VECTOR_MASK;
/*
* Enable APIC
*/
value |= APIC_SPIV_APIC_ENABLED;
/*
* Some unknown Intel IO/APIC (or APIC) errata is biting us with
* certain networking cards. If high frequency interrupts are
* happening on a particular IOAPIC pin, plus the IOAPIC routing
* entry is masked/unmasked at a high rate as well then sooner or
* later IOAPIC line gets 'stuck', no more interrupts are received
* from the device. If focus CPU is disabled then the hang goes
* away, oh well :-(
*
* [ This bug can be reproduced easily with a level-triggered
* PCI Ne2000 networking cards and PII/PIII processors, dual
* BX chipset. ]
*/
/*
* Actually disabling the focus CPU check just makes the hang less
* frequent as it makes the interrupt distributon model be more
* like LRU than MRU (the short-term load is more even across CPUs).
* See also the comment in end_level_ioapic_irq(). --macro
*/
#if 1
/* Enable focus processor (bit==0) */
value &= ~APIC_SPIV_FOCUS_DISABLED;
#else
/* Disable focus processor (bit==1) */
value |= APIC_SPIV_FOCUS_DISABLED;
#endif
/*
* Set spurious IRQ vector
*/
value |= SPURIOUS_APIC_VECTOR;
/*
* Enable directed EOI
*/
if ( directed_eoi_enabled )
//.........这里部分代码省略.........
开发者ID:HPSI,项目名称:xen-v4v,代码行数:101,代码来源:apic.c
示例3: play_dead
static inline void play_dead(void)
{
BUG();
}
开发者ID:E-LLP,项目名称:n900,代码行数:4,代码来源:process_32.c
示例4: do_check
static int
do_check( PKT_secret_key *sk, const char *tryagain_text, int mode,
int *canceled )
{
gpg_error_t err;
u16 csum=0;
int i, res;
size_t nbytes;
if( sk->is_protected ) { /* remove the protection */
DEK *dek = NULL;
u32 keyid[4]; /* 4! because we need two of them */
gcry_cipher_hd_t cipher_hd=NULL;
PKT_secret_key *save_sk;
if( sk->protect.s2k.mode == 1001 ) {
log_info(_("secret key parts are not available\n"));
return G10ERR_UNU_SECKEY;
}
if( sk->protect.algo == CIPHER_ALGO_NONE )
BUG();
if( openpgp_cipher_test_algo( sk->protect.algo ) ) {
log_info(_("protection algorithm %d%s is not supported\n"),
sk->protect.algo,sk->protect.algo==1?" (IDEA)":"" );
if (sk->protect.algo==CIPHER_ALGO_IDEA)
{
write_status (STATUS_RSA_OR_IDEA);
idea_cipher_warn (0);
}
return G10ERR_CIPHER_ALGO;
}
if(gcry_md_test_algo (sk->protect.s2k.hash_algo))
{
log_info(_("protection digest %d is not supported\n"),
sk->protect.s2k.hash_algo);
return G10ERR_DIGEST_ALGO;
}
keyid_from_sk( sk, keyid );
keyid[2] = keyid[3] = 0;
if( !sk->is_primary ) {
keyid[2] = sk->main_keyid[0];
keyid[3] = sk->main_keyid[1];
}
dek = passphrase_to_dek( keyid, sk->pubkey_algo, sk->protect.algo,
&sk->protect.s2k, mode,
tryagain_text, canceled );
if (!dek && canceled && *canceled)
return GPG_ERR_CANCELED;
err = openpgp_cipher_open (&cipher_hd, sk->protect.algo,
GCRY_CIPHER_MODE_CFB,
(GCRY_CIPHER_SECURE
| (sk->protect.algo >= 100 ?
0 : GCRY_CIPHER_ENABLE_SYNC)));
if (err)
log_fatal ("cipher open failed: %s\n", gpg_strerror (err) );
err = gcry_cipher_setkey (cipher_hd, dek->key, dek->keylen);
if (err)
log_fatal ("set key failed: %s\n", gpg_strerror (err) );
xfree(dek);
save_sk = copy_secret_key( NULL, sk );
gcry_cipher_setiv ( cipher_hd, sk->protect.iv, sk->protect.ivlen );
csum = 0;
if( sk->version >= 4 ) {
int ndata;
unsigned int ndatabits;
byte *p, *data;
u16 csumc = 0;
i = pubkey_get_npkey(sk->pubkey_algo);
assert ( gcry_mpi_get_flag (sk->skey[i], GCRYMPI_FLAG_OPAQUE ));
p = gcry_mpi_get_opaque ( sk->skey[i], &ndatabits );
ndata = (ndatabits+7)/8;
if ( ndata > 1 && p )
csumc = p[ndata-2] << 8 | p[ndata-1];
data = xmalloc_secure ( ndata );
if (p)
gcry_cipher_decrypt ( cipher_hd, data, ndata, p, ndata );
else
memset (data, 0, ndata);
gcry_mpi_release (sk->skey[i]); sk->skey[i] = NULL ;
p = data;
if (sk->protect.sha1chk) {
/* This is the new SHA1 checksum method to detect
tampering with the key as used by the Klima/Rosa
attack */
sk->csum = 0;
csum = 1;
if( ndata < 20 )
log_error("not enough bytes for SHA-1 checksum\n");
else {
gcry_md_hd_t h;
//.........这里部分代码省略.........
/**
* write_mft_record_nolock - write out a mapped (extent) mft record
* @ni: ntfs inode describing the mapped (extent) mft record
* @m: mapped (extent) mft record to write
* @sync: if true, wait for i/o completion
*
* Write the mapped (extent) mft record @m described by the (regular or extent)
* ntfs inode @ni to backing store. If the mft record @m has a counterpart in
* the mft mirror, that is also updated.
*
* On success, clean the mft record and return 0. On error, leave the mft
* record dirty and return -errno. The caller should call make_bad_inode() on
* the base inode to ensure no more access happens to this inode. We do not do
* it here as the caller may want to finish writing other extent mft records
* first to minimize on-disk metadata inconsistencies.
*
* NOTE: We always perform synchronous i/o and ignore the @sync parameter.
* However, if the mft record has a counterpart in the mft mirror and @sync is
* true, we write the mft record, wait for i/o completion, and only then write
* the mft mirror copy. This ensures that if the system crashes either the mft
* or the mft mirror will contain a self-consistent mft record @m. If @sync is
* false on the other hand, we start i/o on both and then wait for completion
* on them. This provides a speedup but no longer guarantees that you will end
* up with a self-consistent mft record in the case of a crash but if you asked
* for asynchronous writing you probably do not care about that anyway.
*
* TODO: If @sync is false, want to do truly asynchronous i/o, i.e. just
* schedule i/o via ->writepage or do it via kntfsd or whatever.
*/
int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
{
ntfs_volume *vol = ni->vol;
struct page *page = ni->page;
unsigned int blocksize = vol->sb->s_blocksize;
int max_bhs = vol->mft_record_size / blocksize;
struct buffer_head *bhs[max_bhs];
struct buffer_head *bh, *head;
unsigned int block_start, block_end, m_start, m_end;
int i_bhs, nr_bhs, err = 0;
ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
BUG_ON(NInoAttr(ni));
BUG_ON(!max_bhs);
BUG_ON(!PageLocked(page));
/*
* If the ntfs_inode is clean no need to do anything. If it is dirty,
* mark it as clean now so that it can be redirtied later on if needed.
* There is no danger of races since the caller is holding the locks
* for the mft record @m and the page it is in.
*/
if (!NInoTestClearDirty(ni))
goto done;
/* Make sure we have mapped buffers. */
if (!page_has_buffers(page)) {
no_buffers_err_out:
ntfs_error(vol->sb, "Writing mft records without existing "
"buffers is not implemented yet. %s",
ntfs_please_email);
err = -EOPNOTSUPP;
goto err_out;
}
bh = head = page_buffers(page);
if (!bh)
goto no_buffers_err_out;
nr_bhs = 0;
block_start = 0;
m_start = ni->page_ofs;
m_end = m_start + vol->mft_record_size;
do {
block_end = block_start + blocksize;
/*
* If the buffer is outside the mft record, just skip it,
* clearing it if it is dirty to make sure it is not written
* out. It should never be marked dirty but better be safe.
*/
if ((block_end <= m_start) || (block_start >= m_end)) {
if (buffer_dirty(bh)) {
ntfs_warning(vol->sb, "Clearing dirty mft "
"record page buffer. %s",
ntfs_please_email);
clear_buffer_dirty(bh);
}
continue;
}
if (!buffer_mapped(bh)) {
ntfs_error(vol->sb, "Writing mft records without "
"existing mapped buffers is not "
"implemented yet. %s",
ntfs_please_email);
err = -EOPNOTSUPP;
continue;
}
if (!buffer_uptodate(bh)) {
ntfs_error(vol->sb, "Writing mft records without "
"existing uptodate buffers is not "
"implemented yet. %s",
ntfs_please_email);
err = -EOPNOTSUPP;
continue;
}
//.........这里部分代码省略.........
/*
* receive a message from an RxRPC socket
* - we need to be careful about two or more threads calling recvmsg
* simultaneously
*/
int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct rxrpc_skb_priv *sp;
struct rxrpc_call *call = NULL, *continue_call = NULL;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
struct sk_buff *skb;
long timeo;
int copy, ret, ullen, offset, copied = 0;
u32 abort_code;
DEFINE_WAIT(wait);
_enter(",,,%zu,%d", len, flags);
if (flags & (MSG_OOB | MSG_TRUNC))
return -EOPNOTSUPP;
ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);
timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
msg->msg_flags |= MSG_MORE;
lock_sock(&rx->sk);
for (;;) {
/* return immediately if a client socket has no outstanding
* calls */
if (RB_EMPTY_ROOT(&rx->calls)) {
if (copied)
goto out;
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
release_sock(&rx->sk);
if (continue_call)
rxrpc_put_call(continue_call);
return -ENODATA;
}
}
/* get the next message on the Rx queue */
skb = skb_peek(&rx->sk.sk_receive_queue);
if (!skb) {
/* nothing remains on the queue */
if (copied &&
(msg->msg_flags & MSG_PEEK || timeo == 0))
goto out;
/* wait for a message to turn up */
release_sock(&rx->sk);
prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
TASK_INTERRUPTIBLE);
ret = sock_error(&rx->sk);
if (ret)
goto wait_error;
if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
if (signal_pending(current))
goto wait_interrupted;
timeo = schedule_timeout(timeo);
}
finish_wait(sk_sleep(&rx->sk), &wait);
lock_sock(&rx->sk);
continue;
}
peek_next_packet:
sp = rxrpc_skb(skb);
call = sp->call;
ASSERT(call != NULL);
_debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);
/* make sure we wait for the state to be updated in this call */
spin_lock_bh(&call->lock);
spin_unlock_bh(&call->lock);
if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
_debug("packet from released call");
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
BUG();
rxrpc_free_skb(skb);
continue;
}
/* determine whether to continue last data receive */
if (continue_call) {
_debug("maybe cont");
if (call != continue_call ||
skb->mark != RXRPC_SKB_MARK_DATA) {
release_sock(&rx->sk);
rxrpc_put_call(continue_call);
_leave(" = %d [noncont]", copied);
return copied;
}
}
//.........这里部分代码省略.........
/*
* Called with IRQs disabled. IRQs must remain disabled from that point
* all the way until processing this kprobe is complete. The current
* kprobes implementation cannot process more than one nested level of
* kprobe, and that level is reserved for user kprobe handlers, so we can't
* risk encountering a new kprobe in an interrupt handler.
*/
void __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p, *cur;
struct kprobe_ctlblk *kcb;
kcb = get_kprobe_ctlblk();
cur = kprobe_running();
#ifdef CONFIG_THUMB2_KERNEL
/*
* First look for a probe which was registered using an address with
* bit 0 set, this is the usual situation for pointers to Thumb code.
* If not found, fallback to looking for one with bit 0 clear.
*/
p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1));
if (!p)
p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
#else /* ! CONFIG_THUMB2_KERNEL */
p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
#endif
if (p) {
if (cur) {
/* Kprobe is pending, so we're recursing. */
switch (kcb->kprobe_status) {
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/* A pre- or post-handler probe got us here. */
kprobes_inc_nmissed_count(p);
save_previous_kprobe(kcb);
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_REENTER;
singlestep(p, regs, kcb);
restore_previous_kprobe(kcb);
break;
default:
/* impossible cases */
BUG();
}
} else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
/* Probe hit and conditional execution check ok. */
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
/*
* If we have no pre-handler or it returned 0, we
* continue with normal processing. If we have a
* pre-handler and it returned non-zero, it prepped
* for calling the break_handler below on re-entry,
* so get out doing nothing more here.
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
kcb->kprobe_status = KPROBE_HIT_SS;
singlestep(p, regs, kcb);
if (p->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
reset_current_kprobe();
}
} else {
/*
* Probe hit but conditional execution check failed,
* so just skip the instruction and continue as if
* nothing had happened.
*/
singlestep_skip(p, regs);
}
} else if (cur) {
/* We probably hit a jprobe. Call its break handler. */
if (cur->break_handler && cur->break_handler(cur, regs)) {
kcb->kprobe_status = KPROBE_HIT_SS;
singlestep(cur, regs, kcb);
if (cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
}
reset_current_kprobe();
} else {
/*
* The probe was removed and a race is in progress.
* There is nothing we can do about it. Let's restart
* the instruction. By the time we can restart, the
* real instruction will be there.
*/
}
}
/** Initialize <b>crypto</b> from the key material in key_data.
*
* If <b>is_hs_v3</b> is set, this cpath will be used for next gen hidden
* service circuits and <b>key_data</b> must be at least
* HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN bytes in length.
*
* If <b>is_hs_v3</b> is not set, key_data must contain CPATH_KEY_MATERIAL_LEN
* bytes, which are used as follows:
* - 20 to initialize f_digest
* - 20 to initialize b_digest
* - 16 to key f_crypto
* - 16 to key b_crypto
*
* (If 'reverse' is true, then f_XX and b_XX are swapped.)
*
* Return 0 if init was successful, else -1 if it failed.
*/
int
relay_crypto_init(relay_crypto_t *crypto,
const char *key_data, size_t key_data_len,
int reverse, int is_hs_v3)
{
crypto_digest_t *tmp_digest;
crypto_cipher_t *tmp_crypto;
size_t digest_len = 0;
size_t cipher_key_len = 0;
tor_assert(crypto);
tor_assert(key_data);
tor_assert(!(crypto->f_crypto || crypto->b_crypto ||
crypto->f_digest || crypto->b_digest));
/* Basic key size validation */
if (is_hs_v3 && BUG(key_data_len != HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN)) {
goto err;
} else if (!is_hs_v3 && BUG(key_data_len != CPATH_KEY_MATERIAL_LEN)) {
goto err;
}
/* If we are using this crypto for next gen onion services use SHA3-256,
otherwise use good ol' SHA1 */
if (is_hs_v3) {
digest_len = DIGEST256_LEN;
cipher_key_len = CIPHER256_KEY_LEN;
crypto->f_digest = crypto_digest256_new(DIGEST_SHA3_256);
crypto->b_digest = crypto_digest256_new(DIGEST_SHA3_256);
} else {
digest_len = DIGEST_LEN;
cipher_key_len = CIPHER_KEY_LEN;
crypto->f_digest = crypto_digest_new();
crypto->b_digest = crypto_digest_new();
}
tor_assert(digest_len != 0);
tor_assert(cipher_key_len != 0);
const int cipher_key_bits = (int) cipher_key_len * 8;
crypto_digest_add_bytes(crypto->f_digest, key_data, digest_len);
crypto_digest_add_bytes(crypto->b_digest, key_data+digest_len, digest_len);
crypto->f_crypto = crypto_cipher_new_with_bits(key_data+(2*digest_len),
cipher_key_bits);
if (!crypto->f_crypto) {
log_warn(LD_BUG,"Forward cipher initialization failed.");
goto err;
}
crypto->b_crypto = crypto_cipher_new_with_bits(
key_data+(2*digest_len)+cipher_key_len,
cipher_key_bits);
if (!crypto->b_crypto) {
log_warn(LD_BUG,"Backward cipher initialization failed.");
goto err;
}
if (reverse) {
tmp_digest = crypto->f_digest;
crypto->f_digest = crypto->b_digest;
crypto->b_digest = tmp_digest;
tmp_crypto = crypto->f_crypto;
crypto->f_crypto = crypto->b_crypto;
crypto->b_crypto = tmp_crypto;
}
return 0;
err:
relay_crypto_clear(crypto);
return -1;
}
/*
* Insert a prio_tree_node @node into a radix priority search tree @root. The
* algorithm typically takes O(log n) time where 'log n' is the number of bits
* required to represent the maximum heap_index. In the worst case, the algo
* can take O((log n)^2) - check prio_tree_expand.
*
* If a prior node with same radix_index and heap_index is already found in
* the tree, then returns the address of the prior node. Otherwise, inserts
* @node into the tree and returns @node.
*/
struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root,
struct prio_tree_node *node)
{
struct prio_tree_node *cur, *res = node;
unsigned long radix_index, heap_index;
unsigned long r_index, h_index, index, mask;
int size_flag = 0;
get_index(root, node, &radix_index, &heap_index);
if (prio_tree_empty(root) ||
heap_index > prio_tree_maxindex(root->index_bits))
return prio_tree_expand(root, node, heap_index);
cur = root->prio_tree_node;
mask = 1UL << (root->index_bits - 1);
while (mask) {
get_index(root, cur, &r_index, &h_index);
if (r_index == radix_index && h_index == heap_index)
return cur;
if (h_index < heap_index ||
(h_index == heap_index && r_index > radix_index)) {
struct prio_tree_node *tmp = node;
node = prio_tree_replace(root, cur, node);
cur = tmp;
/* swap indices */
index = r_index;
r_index = radix_index;
radix_index = index;
index = h_index;
h_index = heap_index;
heap_index = index;
}
if (size_flag)
index = heap_index - radix_index;
else
index = radix_index;
if (index & mask) {
if (prio_tree_right_empty(cur)) {
INIT_PRIO_TREE_NODE(node);
prio_set_parent(cur, node, false);
return res;
} else
cur = cur->right;
} else {
if (prio_tree_left_empty(cur)) {
INIT_PRIO_TREE_NODE(node);
prio_set_parent(cur, node, true);
return res;
} else
cur = cur->left;
}
mask >>= 1;
if (!mask) {
mask = 1UL << (BITS_PER_LONG - 1);
size_flag = 1;
}
}
/* Should not reach here */
BUG();
return NULL;
}
static void
print_keyrec(int number,struct keyrec *keyrec)
{
int i;
iobuf_writebyte(keyrec->uidbuf,0);
iobuf_flush_temp(keyrec->uidbuf);
es_printf ("(%d)\t%s ", number, iobuf_get_temp_buffer (keyrec->uidbuf));
if (keyrec->size>0)
es_printf ("%d bit ", keyrec->size);
if(keyrec->type)
{
const char *str = gcry_pk_algo_name (keyrec->type);
if(str)
es_printf ("%s ",str);
else
es_printf ("unknown ");
}
switch(keyrec->desc.mode)
{
/* If the keyserver helper gave us a short keyid, we have no
choice but to use it. Do check --keyid-format to add a 0x if
needed. */
case KEYDB_SEARCH_MODE_SHORT_KID:
es_printf ("key %s%08lX",
(opt.keyid_format==KF_0xSHORT
|| opt.keyid_format==KF_0xLONG)?"0x":"",
(ulong)keyrec->desc.u.kid[1]);
break;
/* However, if it gave us a long keyid, we can honor
--keyid-format */
case KEYDB_SEARCH_MODE_LONG_KID:
es_printf ("key %s",keystr(keyrec->desc.u.kid));
break;
case KEYDB_SEARCH_MODE_FPR16:
es_printf ("key ");
for(i=0;i<16;i++)
es_printf ("%02X",keyrec->desc.u.fpr[i]);
break;
case KEYDB_SEARCH_MODE_FPR20:
es_printf ("key ");
for(i=0;i<20;i++)
es_printf ("%02X", keyrec->desc.u.fpr[i]);
break;
default:
BUG();
break;
}
if(keyrec->createtime>0)
{
es_printf (", ");
es_printf (_("created: %s"), strtimestamp(keyrec->createtime));
}
if(keyrec->expiretime>0)
{
es_printf (", ");
es_printf (_("expires: %s"), strtimestamp(keyrec->expiretime));
}
if (keyrec->flags&1)
es_printf (" (%s)", _("revoked"));
if(keyrec->flags&2)
es_printf (" (%s)", _("disabled"));
if(keyrec->flags&4)
es_printf (" (%s)", _("expired"));
es_printf ("\n");
}
请发表评论