本文整理汇总了C++中pmd_none函数的典型用法代码示例。如果您正苦于以下问题:C++ pmd_none函数的具体用法?C++ pmd_none怎么用?C++ pmd_none使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pmd_none函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: stage2_flush_pmds
static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
phys_addr_t addr, phys_addr_t end)
{
pmd_t *pmd;
phys_addr_t next;
pmd = pmd_offset(pud, addr);
do {
next = kvm_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) {
if (kvm_pmd_huge(*pmd)) {
hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
} else {
stage2_flush_ptes(kvm, pmd, addr, next);
}
}
开发者ID:0xheart0,项目名称:linux,代码行数:17,代码来源:mmu.c
示例2: shmedia_mapioaddr
static void shmedia_mapioaddr(unsigned long pa, unsigned long va,
unsigned long flags)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep, pte;
pgprot_t prot;
pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va);
if (!flags)
flags = 1; /* 1 = CB0-1 device */
pgdp = pgd_offset_k(va);
if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
pudp = (pud_t *)sh64_get_page();
set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE));
}
pudp = pud_offset(pgdp, va);
if (pud_none(*pudp) || !pud_present(*pudp)) {
pmdp = (pmd_t *)sh64_get_page();
set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE));
}
pmdp = pmd_offset(pudp, va);
if (pmd_none(*pmdp) || !pmd_present(*pmdp)) {
ptep = (pte_t *)sh64_get_page();
set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
}
prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |
_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags);
pte = pfn_pte(pa >> PAGE_SHIFT, prot);
ptep = pte_offset_kernel(pmdp, va);
if (!pte_none(*ptep) &&
pte_val(*ptep) != pte_val(pte))
pte_ERROR(*ptep);
set_pte(ptep, pte);
flush_tlb_kernel_range(va, PAGE_SIZE);
}
开发者ID:mikuhatsune001,项目名称:linux2.6.32,代码行数:46,代码来源:ioremap_64.c
示例3: stage2_set_pte
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
phys_addr_t addr, const pte_t *new_pte, bool iomap)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte, old_pte;
/* Create 2nd stage page table mapping - Level 1 */
pgd = kvm->arch.pgd + pgd_index(addr);
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
if (!cache)
return 0; /* ignore calls from kvm_set_spte_hva */
pmd = mmu_memory_cache_alloc(cache);
pud_populate(NULL, pud, pmd);
get_page(virt_to_page(pud));
}
pmd = pmd_offset(pud, addr);
/* Create 2nd stage page table mapping - Level 2 */
if (pmd_none(*pmd)) {
if (!cache)
return 0; /* ignore calls from kvm_set_spte_hva */
pte = mmu_memory_cache_alloc(cache);
kvm_clean_pte(pte);
pmd_populate_kernel(NULL, pmd, pte);
get_page(virt_to_page(pmd));
}
pte = pte_offset_kernel(pmd, addr);
if (iomap && pte_present(*pte))
return -EFAULT;
/* Create 2nd stage page table mapping - Level 3 */
old_pte = *pte;
kvm_set_pte(pte, *new_pte);
if (pte_present(old_pte))
kvm_tlb_flush_vmid_ipa(kvm, addr);
else
get_page(virt_to_page(pte));
return 0;
}
开发者ID:monojo,项目名称:xu3,代码行数:46,代码来源:mmu.c
示例4: stage2_flush_pmds
static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
phys_addr_t addr, phys_addr_t end)
{
pmd_t *pmd;
phys_addr_t next;
pmd = pmd_offset(pud, addr);
do {
next = kvm_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) {
if (kvm_pmd_huge(*pmd))
kvm_flush_dcache_pmd(*pmd);
else
stage2_flush_ptes(kvm, pmd, addr, next);
}
} while (pmd++, addr = next, addr != end);
}
开发者ID:TheGalaxyProject,项目名称:tgpkernel-s7-o,代码行数:17,代码来源:mmu.c
示例5: pgd_offset_k
pte_t *lookup_address(unsigned long address)
{
pgd_t *pgd = pgd_offset_k(address);
pud_t *pud;
pmd_t *pmd;
if (pgd_none(*pgd))
return NULL;
pud = pud_offset(pgd, address);
if (pud_none(*pud))
return NULL;
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
return NULL;
if (pmd_large(*pmd))
return (pte_t *)pmd;
return pte_offset_kernel(pmd, address);
}
开发者ID:1x23,项目名称:unifi-gpl,代码行数:17,代码来源:pageattr.c
示例6: unmap_area_sections
/*
* Section support is unsafe on SMP - If you iounmap and ioremap a region,
* the other CPUs will not see this change until their next context switch.
* Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
* which requires the new ioremap'd region to be referenced, the CPU will
* reference the _old_ region.
*
* Note that get_vm_area_caller() allocates a guard 4K page, so we need to
* mask the size back to 1MB aligned or we will overflow in the loop below.
*/
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
pgd_t *pgd;
pud_t *pud;
pmd_t *pmdp;
flush_cache_vunmap(addr, end);
pgd = pgd_offset_k(addr);
pud = pud_offset(pgd, addr);
pmdp = pmd_offset(pud, addr);
do {
pmd_t pmd = *pmdp;
if (!pmd_none(pmd)) {
/*
* Clear the PMD from the page table, and
* increment the kvm sequence so others
* notice this change.
*
* Note: this is still racy on SMP machines.
*/
pmd_clear(pmdp);
init_mm.context.kvm_seq++;
/*
* Free the page table, if there was one.
*/
if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
}
addr += PMD_SIZE;
pmdp += 2;
} while (addr < end);
/*
* Ensure that the active_mm is up to date - we want to
* catch any use-after-iounmap cases.
*/
if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
__check_kvm_seq(current->active_mm);
flush_tlb_kernel_range(virt, end);
}
开发者ID:Dzenik,项目名称:kernel-source,代码行数:55,代码来源:ioremap.c
示例7: consistent_init
/*
* Initialise the consistent memory allocation.
*/
static int __init consistent_init(void)
{
int ret = 0;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int i = 0;
u32 base = CONSISTENT_BASE;
return 0;
#if 0
do {
pgd = pgd_offset(&init_mm, base);
pud = pud_alloc(&init_mm, pgd, base);
if (!pud) {
printk(KERN_ERR "%s: no pud tables\n", __func__);
ret = -ENOMEM;
break;
}
pmd = pmd_alloc(&init_mm, pud, base);
if (!pmd) {
printk(KERN_ERR "%s: no pmd tables\n", __func__);
ret = -ENOMEM;
break;
}
WARN_ON(!pmd_none(*pmd));
pte = pte_alloc_kernel(pmd, base);
if (!pte) {
printk(KERN_ERR "%s: no pte tables\n", __func__);
ret = -ENOMEM;
break;
}
consistent_pte[i++] = pte;
base += (1 << PGDIR_SHIFT);
} while (base < CONSISTENT_END);
return ret;
#endif
}
开发者ID:JayaWei,项目名称:tiny4412-linux,代码行数:48,代码来源:dma-mapping-cma.c
示例8: BUG_ON
static hugepte_t *hugepte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pmd_t *pmd = NULL;
BUG_ON(!in_hugepage_area(mm->context, addr));
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd))
return NULL;
pmd = pmd_offset(pgd, addr);
/* We shouldn't find a (normal) PTE page pointer here */
BUG_ON(!pmd_none(*pmd) && !pmd_hugepage(*pmd));
return (hugepte_t *)pmd;
}
开发者ID:dduval,项目名称:kernel-rhel4,代码行数:18,代码来源:hugetlbpage.c
示例9: page_table_range_init
/*
* NOTE: The pagetables are allocated contiguous on the physical space
* so we can cache the place of the first one and move around without
* checking the pgd every time.
*/
static void __init page_table_range_init(unsigned long start,
unsigned long end, pgd_t *pgd_base)
{
pgd_t *pgd;
int pgd_idx;
unsigned long vaddr;
vaddr = start;
pgd_idx = pgd_index(vaddr);
pgd = pgd_base + pgd_idx;
for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
pmd_t *pmd = pmd_offset(pud_offset(pgd, vaddr), vaddr);
if (pmd_none(*pmd))
assign_pte(pmd, alloc_pte());
vaddr += PMD_SIZE;
}
}
开发者ID:AllenWeb,项目名称:linux,代码行数:23,代码来源:init.c
示例10: spin_lock
/**
* get_struct_page - Gets a struct page for a particular address
* @address - the address of the page we need
*
* Two versions of this function have to be provided for working
* between the 2.4 and 2.5 kernels. Rather than littering the
* function with #defines, there is just two separate copies.
* Look at the one that is relevant to the kernel you're using
*/
struct page *get_struct_page(unsigned long addr)
{
struct mm_struct *mm;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep, pte;
unsigned long pfn;
struct page *page=NULL;
mm = current->mm;
/* Is this possible? */
if (!mm) return NULL;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, addr);
if (!pgd_none(*pgd) && !pgd_bad(*pgd)) {
pud = pud_offset(pgd, addr);
if (!pud_none(*pud) && !pud_bad(*pud)) {
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd) && !pmd_bad(*pmd)) {
/*
* disable preemption because of potential kmap().
* page_table_lock should already have disabled
* preemtion. But, be paranoid.
*/
preempt_disable();
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
pte_unmap(ptep);
preempt_enable();
if (pte_present(pte)) {
pfn = pte_pfn(pte);
if (pfn_valid(pfn))
page = pte_page(pte);
}
}
}
}
spin_unlock(&mm->page_table_lock);
return page;
}
开发者ID:baozich,项目名称:scripts,代码行数:53,代码来源:pagetable.c
示例11: walk_pmd_range
static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pmd_t *pmd;
unsigned long next;
int err = 0;
pmd = pmd_offset(pud, addr);
do {
again:
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd)) {
if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk);
if (err)
break;
continue;
}
/*
* This implies that each ->pmd_entry() handler
* needs to know about pmd_trans_huge() pmds
*/
if (walk->pmd_entry)
err = walk->pmd_entry(pmd, addr, next, walk);
if (err)
break;
/*
* Check this here so we only break down trans_huge
* pages when we _need_ to
*/
if (!walk->pte_entry)
continue;
split_huge_page_pmd(walk->mm, pmd);
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
goto again;
err = walk_pte_range(pmd, addr, next, walk);
if (err)
break;
} while (pmd++, addr = next, addr != end);
return err;
}
开发者ID:285452612,项目名称:ali_kernel,代码行数:44,代码来源:pagewalk.c
示例12: do_translation_fault
/*
* First Level Translation Fault Handler
*
* We enter here because the first level page table doesn't contain
* a valid entry for the address.
*
* If the address is in kernel space (>= TASK_SIZE), then we are
* probably faulting in the vmalloc() area.
*
* If the init_task's first level page tables contains the relevant
* entry, we copy the it to this task. If not, we send the process
* a signal, fixup the exception, or oops the kernel.
*
* NOTE! We MUST NOT take any locks for this case. We may be in an
* interrupt or a critical region, and should only copy the information
* from the master page table, nothing more.
*/
int do_translation_fault(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
struct task_struct *tsk;
int offset;
pgd_t *pgd, *pgd_k;
pmd_t *pmd, *pmd_k;
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
offset = __pgd_offset(addr);
/*
* FIXME: CP15 C1 is write only on ARMv3 architectures.
* You really need to read the value in the page table
* register, not a copy.
*/
pgd = cpu_get_pgd() + offset;
pgd_k = init_mm.pgd + offset;
if (pgd_none(*pgd_k))
goto bad_area;
#if 0 /* note that we are two-level */
if (!pgd_present(*pgd))
set_pgd(pgd, *pgd_k);
#endif
pmd_k = pmd_offset(pgd_k, addr);
pmd = pmd_offset(pgd, addr);
if (pmd_none(*pmd_k))
goto bad_area;
set_pmd(pmd, *pmd_k);
return 0;
bad_area:
tsk = current;
do_bad_area(tsk, tsk->active_mm, addr, fsr, regs);
return 0;
}
开发者ID:SimonKagstrom,项目名称:mci500h-linux-2.4.27,代码行数:61,代码来源:fault-common.c
示例13: VMALLOC_VMADDR
struct page *kmem_vm_nopage(struct vm_area_struct *vma, unsigned long address, int write)
{
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long kaddr;
pgd_t *pgd;
pmd_t *pmd;
pte_t *ptep, pte;
struct page *page = NULL;
/* address is user VA; convert to kernel VA of desired page */
kaddr = (address - vma->vm_start) + offset;
kaddr = VMALLOC_VMADDR(kaddr);
spin_lock(&init_mm.page_table_lock);
/* Lookup page structure for kernel VA */
pgd = pgd_offset(&init_mm, kaddr);
if (pgd_none(*pgd) || pgd_bad(*pgd))
goto out;
pmd = pmd_offset(pgd, kaddr);
if (pmd_none(*pmd) || pmd_bad(*pmd))
goto out;
ptep = pte_offset(pmd, kaddr);
if (!ptep)
goto out;
pte = *ptep;
if (!pte_present(pte))
goto out;
if (write && !pte_write(pte))
goto out;
page = pte_page(pte);
if (!VALID_PAGE(page)) {
page = NULL;
goto out;
}
/* Increment reference count on page */
get_page(page);
out:
spin_unlock(&init_mm.page_table_lock);
return page;
}
开发者ID:leonsh,项目名称:eldk30ppc,代码行数:44,代码来源:mem.c
示例14: get_gate_page
static int get_gate_page(struct mm_struct *mm, unsigned long address,
unsigned int gup_flags, struct vm_area_struct **vma,
struct page **page)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int ret = -EFAULT;
/* user gate pages are read-only */
if (gup_flags & FOLL_WRITE)
return -EFAULT;
if (address > TASK_SIZE)
pgd = pgd_offset_k(address);
else
pgd = pgd_offset_gate(mm, address);
BUG_ON(pgd_none(*pgd));
pud = pud_offset(pgd, address);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
return -EFAULT;
VM_BUG_ON(pmd_trans_huge(*pmd));
pte = pte_offset_map(pmd, address);
if (pte_none(*pte))
goto unmap;
*vma = get_gate_vma(mm);
if (!page)
goto out;
*page = vm_normal_page(*vma, address, *pte);
if (!*page) {
if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
goto unmap;
*page = pte_page(*pte);
}
get_page(*page);
out:
ret = 0;
unmap:
pte_unmap(pte);
return ret;
}
开发者ID:LarryShang,项目名称:linux,代码行数:43,代码来源:gup.c
示例15: do_translation_fault
/*
* First Level Translation Fault Handler
*
* We enter here because the first level page table doesn't contain
* a valid entry for the address.
*
* If the address is in kernel space (>= TASK_SIZE), then we are
* probably faulting in the vmalloc() area.
*
* If the init_task's first level page tables contains the relevant
* entry, we copy the it to this task. If not, we send the process
* a signal, fixup the exception, or oops the kernel.
*
* NOTE! We MUST NOT take any locks for this case. We may be in an
* interrupt or a critical region, and should only copy the information
* from the master page table, nothing more.
*/
int do_translation_fault(unsigned long addr, int error_code, struct pt_regs *regs)
{
struct task_struct *tsk;
struct mm_struct *mm;
int offset;
pgd_t *pgd, *pgd_k;
pmd_t *pmd, *pmd_k;
if (addr < TASK_SIZE)
return do_page_fault(addr, error_code, regs);
offset = __pgd_offset(addr);
/*
* FIXME: CP15 C1 is write only on ARMv3 architectures.
*/
pgd = cpu_get_pgd() + offset;
pgd_k = init_mm.pgd + offset;
if (pgd_none(*pgd_k))
goto bad_area;
#if 0 /* note that we are two-level */
if (!pgd_present(*pgd))
set_pgd(pgd, *pgd_k);
#endif
pmd_k = pmd_offset(pgd_k, addr);
pmd = pmd_offset(pgd, addr);
if (pmd_none(*pmd_k))
goto bad_area;
set_pmd(pmd, *pmd_k);
return 0;
bad_area:
tsk = current;
mm = tsk->active_mm;
do_bad_area(tsk, mm, addr, error_code, regs);
return 0;
}
开发者ID:robacklin,项目名称:celinux,代码行数:60,代码来源:fault-common.c
示例16: uvirt_to_kva
/* Given PGD from the address space's page table, return the kernel
* virtual mapping of the physical memory mapped at ADR.
*/
static inline unsigned long uvirt_to_kva(pgd_t * pgd, unsigned long adr)
{
unsigned long ret = 0UL;
pmd_t * pmd;
pte_t * ptep, pte;
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, adr);
if (!pmd_none(*pmd)) {
ptep = pte_offset(pmd, adr);
pte = *ptep;
if (pte_present(pte)) {
ret = (unsigned long) pte_page_address(pte);
ret |= adr & (PAGE_SIZE - 1);
}
}
}
return ret;
}
开发者ID:0omega,项目名称:platform_external_oprofile,代码行数:22,代码来源:op_util.c
示例17: hwc_virt_to_phys
static u32 hwc_virt_to_phys(u32 arg)
{
pmd_t *pmd;
pte_t *ptep;
pgd_t *pgd = pgd_offset(current->mm, arg);
if (pgd_none(*pgd) || pgd_bad(*pgd))
return 0;
pmd = pmd_offset(pgd, arg);
if (pmd_none(*pmd) || pmd_bad(*pmd))
return 0;
ptep = pte_offset_map(pmd, arg);
if (ptep && pte_present(*ptep))
return (PAGE_MASK & *ptep) | (~PAGE_MASK & arg);
return 0;
}
开发者ID:klquicksall,项目名称:Galaxy-Nexus-4.2,代码行数:19,代码来源:device.c
示例18: exit_mm
int exit_mm(struct mm_struct *mm)
{
pmd_t *pmd;
pgd_t *pgd;
uint32_t pgdno, pmdno;
physaddr_t pa;
struct vm_area_struct* vma = mm->mmap;
struct page *page;
if(!mm || !mm->mm_pgd)
return 0;
if(!atomic_dec_and_test(&mm->mm_count))
return 0;
delete_all_vma(mm);
for (pgdno = 0; pgdno < pgd_index(KERNEL_BASE_ADDR); pgdno++) {
pgd = mm->mm_pgd + pgdno;
if(!pgd_present(*pgd) || pgd_none(*pgd))
continue;
pmd_t* tmp = (pmd_t *)pgd_page_vaddr(*pgd);
for (pmdno = 0; pmdno < PTRS_PER_PMD; pmdno++) {
pmd = tmp + pmdno;
if(!pmd_present(*pmd) || pmd_none(*pmd))
continue;
struct page* p = virt2page(pmd_page_vaddr(*pmd));
page_decref(p);
pmd_set(pmd,0,0);
}
struct page* p = virt2page(pgd_page_vaddr(*pgd));
page_decref(p);
pgd_set(pgd,0,0);
}
page = virt2page((viraddr_t)mm->mm_pgd);
page_free(page);
kfree(mm);
return 0;
}
开发者ID:bingone,项目名称:fuckOS,代码行数:43,代码来源:exit.c
示例19: stage2_wp_pmds
/**
* stage2_wp_pmds - write protect PUD range
* @pud: pointer to pud entry
* @addr: range start address
* @end: range end address
*/
static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
{
pmd_t *pmd;
phys_addr_t next;
pmd = pmd_offset(pud, addr);
do {
next = kvm_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) {
if (kvm_pmd_huge(*pmd)) {
if (!kvm_s2pmd_readonly(pmd))
kvm_set_s2pmd_readonly(pmd);
} else {
stage2_wp_ptes(pmd, addr, next);
}
}
} while (pmd++, addr = next, addr != end);
}
开发者ID:0x00evil,项目名称:linux,代码行数:25,代码来源:mmu.c
示例20: show_pte
/*
* This is useful to dump out the page tables associated with
* 'addr' in mm 'mm'.
*/
void show_pte(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
if (!mm)
mm = &init_mm;
printk(KERN_ALERT "pgd = %p\n", mm->pgd);
pgd = pgd_offset(mm, addr);
printk(KERN_ALERT "*pgd = %08lx", pgd_val(*pgd));
do {
pmd_t *pmd;
pte_t *pte;
if (pgd_none(*pgd))
break;
if (pgd_bad(*pgd)) {
printk("(bad)");
break;
}
pmd = pmd_offset(pgd, addr);
printk(", *pmd = %08lx", pmd_val(*pmd));
if (pmd_none(*pmd))
break;
if (pmd_bad(*pmd)) {
printk("(bad)");
break;
}
pte = pte_offset(pmd, addr);
printk(", *pte = %08lx", pte_val(*pte));
#ifdef CONFIG_CPU_32
printk(", *ppte = %08lx", pte_val(pte[-PTRS_PER_PTE]));
#endif
} while(0);
printk("\n");
}
开发者ID:dmgerman,项目名称:linux-pre-history,代码行数:47,代码来源:fault-common.c
注:本文中的pmd_none函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论