本文整理汇总了C++中split_page函数的典型用法代码示例。如果您正苦于以下问题:C++ split_page函数的具体用法?C++ split_page怎么用?C++ split_page使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了split_page函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: setup_zero_pages
static void __init setup_zero_pages(void)
{
unsigned int order;
struct page *page;
int i;
/* Latest machines require a mapping granularity of 512KB */
order = 7;
/* Limit number of empty zero pages for small memory sizes */
while (order > 2 && (totalram_pages >> 10) < (1UL << order))
order--;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
panic("Out of memory in setup_zero_pages");
page = virt_to_page((void *) empty_zero_page);
split_page(page, order);
for (i = 1 << order; i > 0; i--) {
mark_page_reserved(page);
page++;
}
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
开发者ID:JeremyGrosser,项目名称:linux,代码行数:26,代码来源:init.c
示例2: pr
void bin_index_t::file_node::add_item(const index_t& val,page_t& page)
{
page_pr pr(page);
page_iter p=std::lower_bound(page.begin(),page.end(),val.key,pr);
index_ref r=page[static_cast<size_t>(*p)];
if(r.left()==0)
{
index_t cp(val);
cp.index_in_page=static_cast<size_t>(*p);
page.insert_item(cp);
return;
}
page_ptr child_page=get_page(r.left());
if(child_page->items_count()<child_page->page_max)
{
add_item(val,*child_page);
return;
}
page_ptr new_right_page(create_page());
split_page(*child_page,page,static_cast<size_t>(*p),*new_right_page);
if(pr(val.key,*p)) add_item(val,*child_page);
else add_item(val,*new_right_page);
add_page(new_right_page);
}
开发者ID:JerryStreith,项目名称:five-in-line,代码行数:32,代码来源:bin_index.cpp
示例3: BUG
/*
* This function will allocate the requested contiguous pages and
* map them into the kernel's vmalloc() space. This is done so we
* get unique mapping for these pages, outside of the kernel's 1:1
* virtual:physical mapping. This is necessary so we can cover large
* portions of the kernel with single large page TLB entries, and
* still get unique uncached pages for consistent DMA.
*/
void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
{
struct vm_struct *area;
unsigned long page, va, pa;
void *ret;
int order, err, i;
if (in_interrupt())
BUG();
/* only allocate page size areas */
size = PAGE_ALIGN(size);
order = get_order(size);
page = __get_free_pages(gfp, order);
if (!page) {
BUG();
return NULL;
}
/* allocate some common virtual space to map the new pages */
area = get_vm_area(size, VM_ALLOC);
if (area == 0) {
free_pages(page, order);
return NULL;
}
va = VMALLOC_VMADDR(area->addr);
ret = (void *) va;
/* this gives us the real physical address of the first page */
*dma_handle = pa = virt_to_bus((void *) page);
/* set refcount=1 on all pages in an order>0 allocation so that vfree() will actually free
* all pages that were allocated.
*/
if (order > 0) {
struct page *rpage = virt_to_page(page);
split_page(rpage, order);
}
err = 0;
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
err = map_page(va + i, pa + i, PAGE_KERNEL_NOCACHE);
if (err) {
vfree((void *) va);
return NULL;
}
/* we need to ensure that there are no cachelines in use, or worse dirty in this area
* - can't do until after virtual address mappings are created
*/
frv_cache_invalidate(va, va + size);
return ret;
}
开发者ID:0x0f,项目名称:adam-kernel,代码行数:64,代码来源:dma-alloc.c
示例4: get_order
/*
* Allocate a DMA buffer for 'dev' of size 'size' using the
* specified gfp mask. Note that 'size' must be page aligned.
*/
static struct page *__dma_alloc_buffer(struct device *dev,
size_t size, gfp_t gfp)
{
unsigned long order = get_order(size);
struct page *page, *p, *e;
void *ptr;
u64 mask = get_coherent_dma_mask(dev);
#ifdef CONFIG_DMA_API_DEBUG
u64 limit = (mask + 1) & ~mask;
if (limit && size >= limit) {
dev_warn(dev, "coherent allocation too big"
"(requested %#x mask %#llx)\n",
size, mask);
return NULL;
}
#endif
if (!mask)
return NULL;
if (mask < 0xffffffffULL)
gfp |= GFP_DMA;
page = alloc_pages(gfp, order);
if (!page)
return NULL;
/*
* Now split the huge page and free the excess pages
*/
split_page(page, order);
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order);
p < e; p++)
__free_page(p);
/*
* Ensure that the allocated pages are zeroed, and that any data
* lurking in the kernel direct-mapped region is invalidated.
*/
ptr = page_address(page);
memset(ptr, 0, size);
dmac_flush_range(ptr, ptr + size);
outer_flush_range(__pa(ptr), __pa(ptr) + size);
return page;
}
开发者ID:BxMxK,项目名称:android_kernel_asus_tf700t,代码行数:51,代码来源:dma-na-mapping.c
示例5: get_order
static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
{
unsigned long order = get_order(size);
struct page *page, *p, *e;
page = alloc_pages(gfp, order);
if (!page)
return NULL;
split_page(page, order);
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
__free_page(p);
__dma_clear_buffer(page, size);
add_meminfo_total_pages(NR_DMA_PAGES, size >> PAGE_SHIFT);
return page;
}
开发者ID:masterdroid,项目名称:B14CKB1RD_kernel_m8,代码行数:19,代码来源:dma-mapping.c
示例6: split_page
/*
* split_page takes a non-compound higher-order page, and splits it into
* n (1<<order) sub-pages: page[0..n]
* Each sub-page must be freed individually.
*
* Note: this is probably too low level an operation for use in drivers.
* Please consult with lkml before using this in your driver.
*/
void split_page(struct page *page, unsigned int order)
{
int i;
VM_BUG_ON(PageCompound(page));
VM_BUG_ON(!page_count(page));
#ifdef CONFIG_KMEMCHECK
/*
* Split shadow pages too, because free(page[0]) would
* otherwise free the whole shadow.
*/
if (kmemcheck_page_is_tracked(page))
split_page(virt_to_page(page[0].shadow), order);
#endif
for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
}
开发者ID:Razziell,项目名称:R-Kernel,代码行数:27,代码来源:backport-3.10.c
示例7: pr_debug
static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t flag, struct dma_attrs *attrs)
{
struct page *page, **map;
pgprot_t pgprot;
void *addr;
int i, order;
pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);
size = PAGE_ALIGN(size);
order = get_order(size);
page = alloc_pages(flag, order);
if (!page)
return NULL;
*handle = page_to_phys(page);
map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
if (!map) {
__free_pages(page, order);
return NULL;
}
split_page(page, order);
order = 1 << order;
size >>= PAGE_SHIFT;
map[0] = page;
for (i = 1; i < size; i++)
map[i] = page + i;
for (; i < order; i++)
__free_page(page + i);
pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
if (CPU_IS_040_OR_060)
pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
else
pgprot_val(pgprot) |= _PAGE_NOCACHE030;
addr = vmap(map, size, VM_MAP, pgprot);
kfree(map);
return addr;
}
开发者ID:ChineseDr,项目名称:linux,代码行数:42,代码来源:dma.c
示例8: setup_zero_pages
static unsigned long setup_zero_pages(void)
{
struct cpuid cpu_id;
unsigned int order;
unsigned long size;
struct page *page;
int i;
get_cpu_id(&cpu_id);
switch (cpu_id.machine) {
case 0x9672: /* g5 */
case 0x2064: /* z900 */
case 0x2066: /* z900 */
case 0x2084: /* z990 */
case 0x2086: /* z990 */
case 0x2094: /* z9-109 */
case 0x2096: /* z9-109 */
order = 0;
break;
case 0x2097: /* z10 */
case 0x2098: /* z10 */
default:
order = 2;
break;
}
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
panic("Out of memory in setup_zero_pages");
page = virt_to_page((void *) empty_zero_page);
split_page(page, order);
for (i = 1 << order; i > 0; i--) {
SetPageReserved(page);
page++;
}
size = PAGE_SIZE << order;
zero_page_mask = (size - 1) & PAGE_MASK;
return 1UL << order;
}
开发者ID:openube,项目名称:android_kernel_sony_c2305,代码行数:42,代码来源:init.c
示例9: get_order
/*
* Allocate a DMA buffer for 'dev' of size 'size' using the
* specified gfp mask. Note that 'size' must be page aligned.
*/
static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
{
unsigned long order = get_order(size);
struct page *page, *p, *e;
page = alloc_pages(gfp, order);
if (!page)
return NULL;
/*
* Now split the huge page and free the excess pages
*/
split_page(page, order);
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
__free_page(p);
__dma_clear_buffer(page, size);
return page;
}
开发者ID:RoyMcBaster,项目名称:kernel_hammerhead,代码行数:24,代码来源:dma-mapping.c
示例10: vb2_dma_sg_alloc_compacted
static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
gfp_t gfp_flags)
{
unsigned int last_page = 0;
int size = buf->size;
while (size > 0) {
struct page *pages;
int order;
int i;
order = get_order(size);
/* Dont over allocate*/
if ((PAGE_SIZE << order) > size)
order--;
pages = NULL;
while (!pages) {
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
__GFP_NOWARN | gfp_flags, order);
if (pages)
break;
if (order == 0) {
while (last_page--)
__free_page(buf->pages[last_page]);
return -ENOMEM;
}
order--;
}
split_page(pages, order);
for (i = 0; i < (1 << order); i++)
buf->pages[last_page++] = &pages[i];
size -= PAGE_SIZE << order;
}
return 0;
}
开发者ID:EvolutionMod,项目名称:ath10-lenovo,代码行数:40,代码来源:videobuf2-dma-sg.c
示例11: pte_alloc_one
struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *page = NULL, *p;
int color = ADDR_COLOR(address);
p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
if (likely(p)) {
split_page(p, COLOR_ORDER);
for (i = 0; i < PAGE_ORDER; i++) {
if (PADDR_COLOR(page_address(p)) == color)
page = p;
else
__free_page(p);
p++;
}
clear_highpage(page);
}
return page;
}
开发者ID:FatSunHYS,项目名称:OSCourseDesign,代码行数:22,代码来源:pgtable.c
示例12: pte_alloc_one_kernel
pte_t* pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte = NULL, *p;
int color = ADDR_COLOR(address);
int i;
p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER);
if (likely(p)) {
split_page(virt_to_page(p), COLOR_ORDER);
for (i = 0; i < COLOR_SIZE; i++) {
if (ADDR_COLOR(p) == color)
pte = p;
else
free_page(p);
p += PTRS_PER_PTE;
}
clear_page(pte);
}
return pte;
}
开发者ID:FatSunHYS,项目名称:OSCourseDesign,代码行数:22,代码来源:pgtable.c
示例13: ion_buffer_cached
static struct page *alloc_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer,
unsigned long order,
bool *from_pool)
{
bool cached = ion_buffer_cached(buffer);
bool split_pages = ion_buffer_fault_user_mappings(buffer);
struct page *page;
struct ion_page_pool *pool;
if (!cached)
pool = heap->uncached_pools[order_to_index(order)];
else
pool = heap->cached_pools[order_to_index(order)];
page = ion_page_pool_alloc(pool, from_pool);
if (!page)
return 0;
if (split_pages)
split_page(page, order);
return page;
}
开发者ID:boa19861105,项目名称:BOA_Butterfly2_L5.1_kernel,代码行数:22,代码来源:ion_system_heap.c
示例14: BUG
/*
* Consistent memory allocators. Used for DMA devices that want to
* share uncached memory with the processor core.
* My crufty no-MMU approach is simple. In the HW platform we can optionally
* mirror the DDR up above the processor cacheable region. So, memory accessed
* in this mirror region will not be cached. It's alloced from the same
* pool as normal memory, but the handle we return is shifted up into the
* uncached region. This will no doubt cause big problems if memory allocated
* here is not also freed properly. -- JW
*/
void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
{
unsigned long order, vaddr;
void *ret;
unsigned int i, err = 0;
struct page *page, *end;
#ifdef CONFIG_MMU
phys_addr_t pa;
struct vm_struct *area;
unsigned long va;
#endif
if (in_interrupt())
BUG();
/* Only allocate page size areas. */
size = PAGE_ALIGN(size);
order = get_order(size);
vaddr = __get_free_pages(gfp, order);
if (!vaddr)
return NULL;
/*
* we need to ensure that there are no cachelines in use,
* or worse dirty in this area.
*/
flush_dcache_range(virt_to_phys((void *)vaddr),
virt_to_phys((void *)vaddr) + size);
#ifndef CONFIG_MMU
ret = (void *)vaddr;
/*
* Here's the magic! Note if the uncached shadow is not implemented,
* it's up to the calling code to also test that condition and make
* other arranegments, such as manually flushing the cache and so on.
*/
# ifdef CONFIG_XILINX_UNCACHED_SHADOW
ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
# endif
if ((unsigned int)ret > cpuinfo.dcache_base &&
(unsigned int)ret < cpuinfo.dcache_high)
pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
/* dma_handle is same as physical (shadowed) address */
*dma_handle = (dma_addr_t)ret;
#else
/* Allocate some common virtual space to map the new pages. */
area = get_vm_area(size, VM_ALLOC);
if (!area) {
free_pages(vaddr, order);
return NULL;
}
va = (unsigned long) area->addr;
ret = (void *)va;
/* This gives us the real physical address of the first page. */
*dma_handle = pa = __virt_to_phys(vaddr);
#endif
/*
* free wasted pages. We skip the first page since we know
* that it will have count = 1 and won't require freeing.
* We also mark the pages in use as reserved so that
* remap_page_range works.
*/
page = virt_to_page(vaddr);
end = page + (1 << order);
split_page(page, order);
for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
#ifdef CONFIG_MMU
/* MS: This is the whole magic - use cache inhibit pages */
err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
#endif
SetPageReserved(page);
page++;
}
/* Free the otherwise unused pages. */
while (page < end) {
__free_page(page);
page++;
}
if (err) {
free_pages(vaddr, order);
//.........这里部分代码省略.........
开发者ID:1800alex,项目名称:linux,代码行数:101,代码来源:consistent.c
示例15: btree_put
int btree_put(btree_t bt, const void *key, const void *data)
{
const struct btree_def *def = bt->def;
struct btree_page *new_root = NULL;
struct btree_page *path_new[MAX_HEIGHT] = {0};
struct btree_page *path_old[MAX_HEIGHT] = {0};
int slot_old[MAX_HEIGHT] = {0};
int h;
check_btree(bt);
/* Special case: cursor overwrite */
if (!key) {
if (bt->slot[0] < 0) {
fprintf(stderr, "btree: put at invalid cursor\n");
return -1;
}
memcpy(PAGE_DATA(bt->path[0], bt->slot[0]), data,
def->data_size);
return 1;
}
/* Find a path down the tree that leads to the page which should
* contain this datum (though the page might be too big to hold it).
*/
if (trace_path(bt, key, path_old, slot_old)) {
/* Special case: overwrite existing item */
memcpy(PAGE_DATA(path_old[0], slot_old[0]), data,
def->data_size);
return 1;
}
/* Trace from the leaf up. If the leaf is at its maximum size, it will
* need to split, and cause a pointer to be added in the parent page
* of the same node (which may in turn cause it to split).
*/
for (h = 0; h <= bt->root->height; h++) {
if (path_old[h]->num_children < def->branches)
break;
path_new[h] = allocate_page(bt, h);
if (!path_new[h])
goto fail;
}
/* If the split reaches the top (i.e. the root splits), then we need
* to allocate a new root node.
*/
if (h > bt->root->height) {
if (h >= MAX_HEIGHT) {
fprintf(stderr, "btree: maximum height exceeded\n");
goto fail;
}
new_root = allocate_page(bt, h);
if (!new_root)
goto fail;
}
/* Trace up to one page above the split. At each page that needs
* splitting, copy the top half of keys into the new page. Also,
* insert a key into one of the pages at all pages from the leaf
* to the page above the top of the split.
*/
for (h = 0; h <= bt->root->height; h++) {
int s = slot_old[h] + 1;
struct btree_page *p = path_old[h];
/* If there's a split at this level, copy the top half of
* the keys from the old page to the new one. Check to see
* if the position we were going to insert into is in the
* old page or the new one.
*/
if (path_new[h]) {
split_page(path_old[h], path_new[h]);
if (s > p->num_children) {
s -= p->num_children;
p = path_new[h];
}
}
/* Insert the key in the appropriate page */
if (h)
insert_ptr(p, s, PAGE_KEY(path_new[h - 1], 0),
path_new[h - 1]);
else
insert_data(p, s, key, data);
/* If there was no split at this level, there's nothing to
* insert higher up, and we're all done.
*/
if (!path_new[h])
return 0;
}
/* If we made it this far, the split reached the top of the tree, and
* we need to grow it using the extra page we allocated.
*/
//.........这里部分代码省略.........
开发者ID:poelzi,项目名称:mspdebug,代码行数:101,代码来源:btree.c
注:本文中的split_page函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论