本文整理汇总了Python中pypy.rpython.lltypesystem.llarena.arena_reserve函数的典型用法代码示例。如果您正苦于以下问题:Python arena_reserve函数的具体用法?Python arena_reserve怎么用?Python arena_reserve使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了arena_reserve函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: malloc_fixedsize_clear
def malloc_fixedsize_clear(self, typeid, size, can_collect,
has_finalizer=False, contains_weakptr=False):
if (has_finalizer or not can_collect or
(raw_malloc_usage(size) > self.lb_young_var_basesize and
raw_malloc_usage(size) > self.largest_young_fixedsize)):
# ^^^ we do two size comparisons; the first one appears redundant,
# but it can be constant-folded if 'size' is a constant; then
# it almost always folds down to False, which kills the
# second comparison as well.
ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
# "non-simple" case or object too big: don't use the nursery
return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
can_collect,
has_finalizer,
contains_weakptr)
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + size
result = self.nursery_free
if raw_malloc_usage(totalsize) > self.nursery_top - result:
result = self.collect_nursery()
llarena.arena_reserve(result, totalsize)
# GCFLAG_NO_YOUNG_PTRS is never set on young objs
self.init_gc_object(result, typeid, flags=0)
self.nursery_free = result + totalsize
if contains_weakptr:
self.young_objects_with_weakrefs.append(result + size_gc_header)
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
开发者ID:antoine1fr,项目名称:pygirl,代码行数:27,代码来源:generation.py
示例2: test_address_order
def test_address_order():
a = arena_malloc(20, False)
assert eq(a, a)
assert lt(a, a+1)
assert lt(a+5, a+20)
b = arena_malloc(20, False)
if a > b:
a, b = b, a
assert lt(a, b)
assert lt(a+19, b)
assert lt(a, b+19)
c = b + round_up_for_allocation(llmemory.sizeof(lltype.Char))
arena_reserve(c, precomputed_size)
assert lt(b, c)
assert lt(a, c)
assert lt(llmemory.NULL, c)
d = c + llmemory.offsetof(SX, 'x')
assert lt(c, d)
assert lt(b, d)
assert lt(a, d)
assert lt(llmemory.NULL, d)
e = c + precomputed_size
assert lt(d, e)
assert lt(c, e)
assert lt(b, e)
assert lt(a, e)
assert lt(llmemory.NULL, e)
开发者ID:antoine1fr,项目名称:pygirl,代码行数:29,代码来源:test_llarena.py
示例3: markcompactcollect
def markcompactcollect(self, needed=0):
start_time = self.debug_collect_start()
self.debug_check_consistency()
self.to_see = self.AddressStack()
self.mark_roots_recursively()
if (self.objects_with_finalizers.non_empty() or
self.run_finalizers.non_empty()):
self.mark_objects_with_finalizers()
self._trace_and_mark()
self.to_see.delete()
num_of_alive_objs = self.compute_alive_objects()
size_of_alive_objs = self.totalsize_of_objs
totalsize = self.new_space_size(size_of_alive_objs, needed +
num_of_alive_objs * BYTES_PER_TID)
tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) +
llmemory.sizeof(TID_TYPE) * num_of_alive_objs)
used_space_now = self.next_collect_after + raw_malloc_usage(tid_backup_size)
if totalsize >= self.space_size or used_space_now >= self.space_size:
toaddr = self.double_space_size(totalsize)
llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size)
self.tid_backup = llmemory.cast_adr_to_ptr(
toaddr + size_of_alive_objs,
lltype.Ptr(self.TID_BACKUP))
resizing = True
else:
toaddr = llarena.arena_new_view(self.space)
llarena.arena_reserve(self.top_of_space, tid_backup_size)
self.tid_backup = llmemory.cast_adr_to_ptr(
self.top_of_space,
lltype.Ptr(self.TID_BACKUP))
resizing = False
self.next_collect_after = totalsize
weakref_offsets = self.collect_weakref_offsets()
finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs)
if (self.run_finalizers.non_empty() or
self.objects_with_finalizers.non_empty()):
self.update_run_finalizers()
if self.objects_with_weakrefs.non_empty():
self.invalidate_weakrefs(weakref_offsets)
self.update_objects_with_id()
self.compact(resizing)
if not resizing:
size = toaddr + self.space_size - finaladdr
llarena.arena_reset(finaladdr, size, True)
else:
if we_are_translated():
# because we free stuff already in raw_memmove, we
# would get double free here. Let's free it anyway
llarena.arena_free(self.space)
llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size,
True)
self.space = toaddr
self.free = finaladdr
self.top_of_space = toaddr + self.next_collect_after
self.debug_check_consistency()
self.tid_backup = lltype.nullptr(self.TID_BACKUP)
if self.run_finalizers.non_empty():
self.execute_finalizers()
self.debug_collect_finish(start_time)
开发者ID:AishwaryaKM,项目名称:python-tutorial,代码行数:59,代码来源:markcompact.py
示例4: update_forward_pointers
def update_forward_pointers(self, toaddr, maxnum):
self.base_forwarding_addr = base_forwarding_addr = toaddr
fromaddr = self.space
size_gc_header = self.gcheaderbuilder.size_gc_header
num = 0
while fromaddr < self.free:
hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
obj = fromaddr + size_gc_header
# compute the original object size, including the
# optional hash field
basesize = size_gc_header + self.get_size(obj)
totalsrcsize = basesize
if hdr.tid & GCFLAG_HASHFIELD: # already a hash field, copy it too
totalsrcsize += llmemory.sizeof(lltype.Signed)
#
if self.marked(obj):
# the object is marked as suriving. Compute the new object
# size
totaldstsize = totalsrcsize
if hdr.tid & (GCFLAG_HASHTAKEN | GCFLAG_HASHFIELD) == GCFLAG_HASHTAKEN:
# grow a new hash field -- with the exception: if
# the object actually doesn't move, don't
# (otherwise, we get a bogus toaddr > fromaddr)
if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr):
totaldstsize += llmemory.sizeof(lltype.Signed)
#
if not translated_to_c():
llarena.arena_reserve(toaddr, basesize)
if raw_malloc_usage(totaldstsize) > raw_malloc_usage(basesize):
llarena.arena_reserve(toaddr + basesize, llmemory.sizeof(lltype.Signed))
#
# save the field hdr.tid in the array tid_backup
ll_assert(num < maxnum, "overflow of the tid_backup table")
self.tid_backup[num] = self.get_type_id(obj)
num += 1
# compute forward_offset, the offset to the future copy
# of this object
forward_offset = toaddr - base_forwarding_addr
# copy the first two gc flags in forward_offset
ll_assert(forward_offset & 3 == 0, "misalignment!")
forward_offset |= (hdr.tid >> first_gcflag_bit) & 3
hdr.tid = forward_offset | GCFLAG_MARKBIT
ll_assert(self.marked(obj), "re-marking object failed!")
# done
toaddr += totaldstsize
#
fromaddr += totalsrcsize
if not translated_to_c():
assert toaddr - base_forwarding_addr <= fromaddr - self.space
self.num_alive_objs = num
self.finaladdr = toaddr
# now update references
self.root_walker.walk_roots(
MarkCompactGC._update_ref, # stack roots
MarkCompactGC._update_ref, # static in prebuilt non-gc structures
MarkCompactGC._update_ref,
) # static in prebuilt gc objects
self.walk_marked_objects(MarkCompactGC.trace_and_update_ref)
开发者ID:junion,项目名称:butlerbot-unstable,代码行数:59,代码来源:markcompact.py
示例5: malloc_varsize_collecting_nursery
def malloc_varsize_collecting_nursery(self, totalsize):
result = self.collect_nursery()
ll_assert(raw_malloc_usage(totalsize) <= self.nursery_top - result,
"not enough room in malloc_varsize_collecting_nursery()")
llarena.arena_reserve(result, totalsize)
self.nursery_free = result + llarena.round_up_for_allocation(
totalsize)
return result
开发者ID:ieure,项目名称:pypy,代码行数:8,代码来源:hybrid.py
示例6: test_address_eq_as_int
def test_address_eq_as_int():
a = arena_malloc(50, False)
arena_reserve(a, precomputed_size)
p = llmemory.cast_adr_to_ptr(a, SPTR)
a1 = llmemory.cast_ptr_to_adr(p)
assert a == a1
assert not (a != a1)
assert (a+1) != a1
assert not ((a+1) == a1)
py.test.skip("cast_adr_to_int() is hard to get consistent")
assert llmemory.cast_adr_to_int(a) == llmemory.cast_adr_to_int(a1)
assert llmemory.cast_adr_to_int(a+1) == llmemory.cast_adr_to_int(a1) + 1
开发者ID:antoine1fr,项目名称:pygirl,代码行数:12,代码来源:test_llarena.py
示例7: _get_memory
def _get_memory(self, totalsize):
# also counts the space that will be needed during the following
# collection to store the TID
requested_size = raw_malloc_usage(totalsize) + BYTES_PER_TID
self.next_collect_after -= requested_size
if self.next_collect_after < 0:
result = self.obtain_free_space(requested_size)
else:
result = self.free
self.free += totalsize
llarena.arena_reserve(result, totalsize)
return result
开发者ID:junion,项目名称:butlerbot-unstable,代码行数:12,代码来源:markcompact.py
示例8: test_shrink_obj
def test_shrink_obj():
from pypy.rpython.memory.gcheader import GCHeaderBuilder
HDR = lltype.Struct('HDR', ('h', lltype.Signed))
gcheaderbuilder = GCHeaderBuilder(HDR)
size_gc_header = gcheaderbuilder.size_gc_header
S = lltype.GcStruct('S', ('x', lltype.Signed),
('a', lltype.Array(lltype.Signed)))
myarenasize = 200
a = arena_malloc(myarenasize, False)
arena_reserve(a, size_gc_header + llmemory.sizeof(S, 10))
arena_shrink_obj(a, size_gc_header + llmemory.sizeof(S, 5))
arena_reset(a, size_gc_header + llmemory.sizeof(S, 5), False)
开发者ID:alkorzt,项目名称:pypy,代码行数:12,代码来源:test_llarena.py
示例9: markcompactcollect
def markcompactcollect(self, requested_size=0):
self.debug_collect_start(requested_size)
self.debug_check_consistency()
#
# Mark alive objects
#
self.to_see = self.AddressDeque()
self.trace_from_roots()
self.to_see.delete()
#
# Prepare new views on the same memory
#
toaddr = llarena.arena_new_view(self.space)
maxnum = self.space_size - (self.free - self.space)
maxnum /= BYTES_PER_TID
llarena.arena_reserve(self.free, llmemory.sizeof(TID_BACKUP, maxnum))
self.tid_backup = llmemory.cast_adr_to_ptr(self.free,
lltype.Ptr(TID_BACKUP))
#
# Walk all objects and assign forward pointers in the same order,
# also updating all references
#
self.update_forward_pointers(toaddr, maxnum)
if (self.run_finalizers.non_empty() or
self.objects_with_finalizers.non_empty()):
self.update_run_finalizers()
self.update_objects_with_id()
self.compact()
#
self.tid_backup = lltype.nullptr(TID_BACKUP)
self.free = self.finaladdr
self.next_collect_after = self.next_collection(self.finaladdr - toaddr,
self.num_alive_objs,
requested_size)
#
if not translated_to_c():
remaining_size = (toaddr + self.space_size) - self.finaladdr
llarena.arena_reset(self.finaladdr, remaining_size, False)
llarena.arena_free(self.space)
self.space = toaddr
#
self.debug_check_consistency()
self.debug_collect_finish()
if self.next_collect_after < 0:
raise MemoryError
#
if self.run_finalizers.non_empty():
self.execute_finalizers()
return True # executed some finalizers
else:
return False # no finalizer executed
开发者ID:ieure,项目名称:pypy,代码行数:52,代码来源:markcompact.py
示例10: allocate_new_page
def allocate_new_page(self, size_class):
"""Allocate and return a new page for the given size_class."""
#
# Allocate a new arena if needed.
if self.current_arena == ARENA_NULL:
self.allocate_new_arena()
#
# The result is simply 'current_arena.freepages'.
arena = self.current_arena
result = arena.freepages
if arena.nfreepages > 0:
#
# The 'result' was part of the chained list; read the next.
arena.nfreepages -= 1
freepages = result.address[0]
llarena.arena_reset(result,
llmemory.sizeof(llmemory.Address),
0)
#
else:
# The 'result' is part of the uninitialized pages.
ll_assert(self.num_uninitialized_pages > 0,
"fully allocated arena found in self.current_arena")
self.num_uninitialized_pages -= 1
if self.num_uninitialized_pages > 0:
freepages = result + self.page_size
else:
freepages = NULL
#
arena.freepages = freepages
if freepages == NULL:
# This was the last page, so put the arena away into
# arenas_lists[0].
ll_assert(arena.nfreepages == 0,
"freepages == NULL but nfreepages > 0")
arena.nextarena = self.arenas_lists[0]
self.arenas_lists[0] = arena
self.current_arena = ARENA_NULL
#
# Initialize the fields of the resulting page
llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER))
page = llmemory.cast_adr_to_ptr(result, PAGE_PTR)
page.arena = arena
page.nfree = 0
page.freeblock = result + self.hdrsize
page.nextpage = PAGE_NULL
ll_assert(self.page_for_size[size_class] == PAGE_NULL,
"allocate_new_page() called but a page is already waiting")
self.page_for_size[size_class] = page
return page
开发者ID:Debug-Orz,项目名称:Sypy,代码行数:50,代码来源:minimarkpage.py
示例11: free_page
def free_page(self, page):
"""Free a whole page."""
#
# Insert the freed page in the arena's 'freepages' list.
# If nfreepages == totalpages, then it will be freed at the
# end of mass_free().
arena = page.arena
arena.nfreepages += 1
pageaddr = llmemory.cast_ptr_to_adr(page)
pageaddr = llarena.getfakearenaaddress(pageaddr)
llarena.arena_reset(pageaddr, self.page_size, 0)
llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
pageaddr.address[0] = arena.freepages
arena.freepages = pageaddr
开发者ID:Debug-Orz,项目名称:Sypy,代码行数:14,代码来源:minimarkpage.py
示例12: test_look_inside_object
def test_look_inside_object():
# this code is also used in translation tests below
myarenasize = 50
a = arena_malloc(myarenasize, False)
b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
arena_reserve(b, precomputed_size)
(b + llmemory.offsetof(SX, 'x')).signed[0] = 123
assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
llmemory.cast_adr_to_ptr(b, SPTR).x += 1
assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
arena_reset(a, myarenasize, True)
arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
arena_free(a)
return 42
开发者ID:antoine1fr,项目名称:pygirl,代码行数:15,代码来源:test_llarena.py
示例13: malloc_varsize_clear
def malloc_varsize_clear(self, typeid, length, size, itemsize,
offset_to_length, can_collect,
has_finalizer=False):
# Only use the nursery if there are not too many items.
if not raw_malloc_usage(itemsize):
too_many_items = False
else:
# The following line is usually constant-folded because both
# min_nursery_size and itemsize are constants (the latter
# due to inlining).
maxlength_for_minimal_nursery = (self.min_nursery_size // 4 //
raw_malloc_usage(itemsize))
# The actual maximum length for our nursery depends on how
# many times our nursery is bigger than the minimal size.
# The computation is done in this roundabout way so that
# only the only remaining computation is the following
# shift.
maxlength = maxlength_for_minimal_nursery << self.nursery_scale
too_many_items = length > maxlength
if (has_finalizer or not can_collect or
too_many_items or
(raw_malloc_usage(size) > self.lb_young_var_basesize and
raw_malloc_usage(size) > self.largest_young_var_basesize)):
# ^^^ we do two size comparisons; the first one appears redundant,
# but it can be constant-folded if 'size' is a constant; then
# it almost always folds down to False, which kills the
# second comparison as well.
return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
itemsize, offset_to_length,
can_collect, has_finalizer)
# with the above checks we know now that totalsize cannot be more
# than about half of the nursery size; in particular, the + and *
# cannot overflow
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + size + itemsize * length
result = self.nursery_free
if raw_malloc_usage(totalsize) > self.nursery_top - result:
result = self.collect_nursery()
llarena.arena_reserve(result, totalsize)
# GCFLAG_NO_YOUNG_PTRS is never set on young objs
self.init_gc_object(result, typeid, flags=0)
(result + size_gc_header + offset_to_length).signed[0] = length
self.nursery_free = result + llarena.round_up_for_allocation(totalsize)
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
开发者ID:antoine1fr,项目名称:pygirl,代码行数:46,代码来源:generation.py
示例14: copy
def copy(self, obj):
if self.is_forwarded(obj):
#llop.debug_print(lltype.Void, obj, "already copied to", self.get_forwarding_address(obj))
return self.get_forwarding_address(obj)
else:
newaddr = self.free
objsize = self.get_size(obj)
totalsize = self.size_gc_header() + objsize
llarena.arena_reserve(newaddr, totalsize)
raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
self.free += totalsize
newobj = newaddr + self.size_gc_header()
#llop.debug_print(lltype.Void, obj, "copied to", newobj,
# "tid", self.header(obj).tid,
# "size", totalsize)
self.set_forwarding_address(obj, newobj, objsize)
return newobj
开发者ID:antoine1fr,项目名称:pygirl,代码行数:17,代码来源:semispace.py
示例15: malloc_varsize_clear
def malloc_varsize_clear(self, typeid, length, size, itemsize,
offset_to_length, can_collect,
has_finalizer=False):
if has_finalizer or not can_collect:
return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
itemsize, offset_to_length,
can_collect, has_finalizer)
size_gc_header = self.gcheaderbuilder.size_gc_header
nonvarsize = size_gc_header + size
# Compute the maximal length that makes the object still
# below 'nonlarge_max'. All the following logic is usually
# constant-folded because self.nonlarge_max, size and itemsize
# are all constants (the arguments are constant due to
# inlining) and self.has_gcptr_in_varsize() is constant-folded.
if self.has_gcptr_in_varsize(typeid):
nonlarge_max = self.nonlarge_gcptrs_max
else:
nonlarge_max = self.nonlarge_max
if not raw_malloc_usage(itemsize):
too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max
else:
maxlength = nonlarge_max - raw_malloc_usage(nonvarsize)
maxlength = maxlength // raw_malloc_usage(itemsize)
too_many_items = length > maxlength
if not too_many_items:
# With the above checks we know now that totalsize cannot be more
# than 'nonlarge_max'; in particular, the + and * cannot overflow.
# Let's try to fit the object in the nursery.
totalsize = nonvarsize + itemsize * length
result = self.nursery_free
if raw_malloc_usage(totalsize) <= self.nursery_top - result:
llarena.arena_reserve(result, totalsize)
# GCFLAG_NO_YOUNG_PTRS is never set on young objs
self.init_gc_object(result, typeid, flags=0)
(result + size_gc_header + offset_to_length).signed[0] = length
self.nursery_free = result + llarena.round_up_for_allocation(
totalsize)
return llmemory.cast_adr_to_ptr(result+size_gc_header,
llmemory.GCREF)
return self.malloc_varsize_slowpath(typeid, length)
开发者ID:AishwaryaKM,项目名称:python-tutorial,代码行数:43,代码来源:hybrid.py
示例16: malloc
def malloc(self, size):
"""Allocate a block from a page in an arena."""
nsize = llmemory.raw_malloc_usage(size)
ll_assert(nsize > 0, "malloc: size is null or negative")
ll_assert(nsize <= self.small_request_threshold,"malloc: size too big")
ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned")
self.total_memory_used += r_uint(nsize)
#
# Get the page to use from the size
size_class = nsize >> WORD_POWER_2
page = self.page_for_size[size_class]
if page == PAGE_NULL:
page = self.allocate_new_page(size_class)
#
# The result is simply 'page.freeblock'
result = page.freeblock
if page.nfree > 0:
#
# The 'result' was part of the chained list; read the next.
page.nfree -= 1
freeblock = result.address[0]
llarena.arena_reset(result,
llmemory.sizeof(llmemory.Address),
0)
#
else:
# The 'result' is part of the uninitialized blocks.
freeblock = result + nsize
#
page.freeblock = freeblock
#
pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
if freeblock - pageaddr > self.page_size - nsize:
# This was the last free block, so unlink the page from the
# chained list and put it in the 'full_page_for_size' list.
self.page_for_size[size_class] = page.nextpage
page.nextpage = self.full_page_for_size[size_class]
self.full_page_for_size[size_class] = page
#
llarena.arena_reserve(result, _dummy_size(size))
return result
开发者ID:Debug-Orz,项目名称:Sypy,代码行数:41,代码来源:minimarkpage.py
示例17: _make_a_copy_with_tid
def _make_a_copy_with_tid(self, obj, objsize, tid):
totalsize = self.size_gc_header() + objsize
newaddr = self.free
llarena.arena_reserve(newaddr, totalsize)
raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
#
# check if we need to write a hash value at the end of the new obj
if tid & (GCFLAG_HASHTAKEN|GCFLAG_HASHFIELD):
if tid & GCFLAG_HASHFIELD:
hash = (obj + objsize).signed[0]
else:
hash = llmemory.cast_adr_to_int(obj)
tid |= GCFLAG_HASHFIELD
(newaddr + totalsize).signed[0] = hash
totalsize += llmemory.sizeof(lltype.Signed)
#
self.free += totalsize
newhdr = llmemory.cast_adr_to_ptr(newaddr, lltype.Ptr(self.HDR))
newhdr.tid = tid
newobj = newaddr + self.size_gc_header()
return newobj
开发者ID:enyst,项目名称:plexnet,代码行数:21,代码来源:semispace.py
示例18: link
def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
assert step in (1, 2)
llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
if step == 1:
page.nfree = 0
nuninitialized = nblocks - nusedblocks
else:
page.nfree = nusedblocks
nuninitialized = nblocks - 2*nusedblocks
page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
if nusedblocks < nblocks:
chainedlists = ac.page_for_size
else:
chainedlists = ac.full_page_for_size
page.nextpage = chainedlists[size_class]
page.arena = ac.current_arena
chainedlists[size_class] = page
if fill_with_objects:
for i in range(0, nusedblocks*step, step):
objaddr = pageaddr + hdrsize + i * size_block
llarena.arena_reserve(objaddr, _dummy_size(size_block))
if step == 2:
prev = 'page.freeblock'
for i in range(1, nusedblocks*step, step):
holeaddr = pageaddr + hdrsize + i * size_block
llarena.arena_reserve(holeaddr,
llmemory.sizeof(llmemory.Address))
exec '%s = holeaddr' % prev in globals(), locals()
prevhole = holeaddr
prev = 'prevhole.address[0]'
endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block
exec '%s = endaddr' % prev in globals(), locals()
assert ac._nuninitialized(page, size_class) == nuninitialized
开发者ID:Debug-Orz,项目名称:Sypy,代码行数:34,代码来源:test_minimarkpage.py
示例19: test_replace_object_with_stub
def test_replace_object_with_stub():
from pypy.rpython.memory.gcheader import GCHeaderBuilder
HDR = lltype.Struct('HDR', ('x', lltype.Signed))
S = lltype.GcStruct('S', ('y', lltype.Signed), ('z', lltype.Signed))
STUB = lltype.GcStruct('STUB', ('t', lltype.Char))
gcheaderbuilder = GCHeaderBuilder(HDR)
size_gc_header = gcheaderbuilder.size_gc_header
ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))
a = arena_malloc(13*ssize, True)
hdraddr = a + 3*ssize
arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(S))
hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
hdr.x = 42
obj = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(S))
obj.y = -5
obj.z = -6
hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
arena_reset(hdraddr, size_gc_header + llmemory.sizeof(S), False)
arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(STUB))
# check that it possible to reach the newly reserved HDR+STUB
# via the header of the old 'obj' pointer, both via the existing
# 'hdraddr':
hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
hdr.x = 46
stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
stub.t = '!'
# and via a (now-invalid) pointer to the old 'obj': (this is needed
# because during a garbage collection there are still pointers to
# the old 'obj' around to be fixed)
hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
assert hdr.x == 46
stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header,
lltype.Ptr(STUB))
assert stub.t == '!'
开发者ID:enyst,项目名称:plexnet,代码行数:39,代码来源:test_llarena.py
示例20: update_forward_pointers
def update_forward_pointers(self, toaddr, num_of_alive_objs):
fromaddr = self.space
size_gc_header = self.gcheaderbuilder.size_gc_header
i = 0
while fromaddr < self.free:
hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
obj = fromaddr + size_gc_header
objsize = self.get_size(obj)
totalsize = size_gc_header + objsize
if not self.marked(obj):
self.set_forwarding_address(obj, NULL, i)
hdr.forward_ptr = NULL
else:
llarena.arena_reserve(toaddr, totalsize)
self.set_forwarding_address(obj, toaddr, i)
toaddr += totalsize
i += 1
fromaddr += totalsize
# now update references
self.root_walker.walk_roots(
MarkCompactGC._update_root, # stack roots
MarkCompactGC._update_root, # static in prebuilt non-gc structures
MarkCompactGC._update_root) # static in prebuilt gc objects
fromaddr = self.space
i = 0
while fromaddr < self.free:
hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
obj = fromaddr + size_gc_header
objsize = self.get_size_from_backup(obj, i)
totalsize = size_gc_header + objsize
if not self.surviving(obj):
pass
else:
self.trace_with_backup(obj, self._update_ref, i)
fromaddr += totalsize
i += 1
return toaddr
开发者ID:AishwaryaKM,项目名称:python-tutorial,代码行数:38,代码来源:markcompact.py
注:本文中的pypy.rpython.lltypesystem.llarena.arena_reserve函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论