Line data Source code
1 : #include "Python.h"
2 :
3 : #ifdef WITH_PYMALLOC
4 :
5 : #ifdef HAVE_MMAP
6 : #include <sys/mman.h>
7 : #ifdef MAP_ANONYMOUS
8 : #define ARENAS_USE_MMAP
9 : #endif
10 : #endif
11 :
12 : #ifdef WITH_VALGRIND
13 : #include <valgrind/valgrind.h>
14 :
15 : /* If we're using GCC, use __builtin_expect() to reduce overhead of
16 : the valgrind checks */
17 : #if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
18 : # define UNLIKELY(value) __builtin_expect((value), 0)
19 : #else
20 : # define UNLIKELY(value) (value)
21 : #endif
22 :
23 : /* -1 indicates that we haven't checked that we're running on valgrind yet. */
24 : static int running_on_valgrind = -1;
25 : #endif
26 :
27 : /* An object allocator for Python.
28 :
29 : Here is an introduction to the layers of the Python memory architecture,
30 : showing where the object allocator is actually used (layer +2), It is
31 : called for every object allocation and deallocation (PyObject_New/Del),
32 : unless the object-specific allocators implement a proprietary allocation
33 : scheme (ex.: ints use a simple free list). This is also the place where
34 : the cyclic garbage collector operates selectively on container objects.
35 :
36 :
37 : Object-specific allocators
38 : _____ ______ ______ ________
39 : [ int ] [ dict ] [ list ] ... [ string ] Python core |
40 : +3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
41 : _______________________________ | |
42 : [ Python's object allocator ] | |
43 : +2 | ####### Object memory ####### | <------ Internal buffers ------> |
44 : ______________________________________________________________ |
45 : [ Python's raw memory allocator (PyMem_ API) ] |
46 : +1 | <----- Python memory (under PyMem manager's control) ------> | |
47 : __________________________________________________________________
48 : [ Underlying general-purpose allocator (ex: C library malloc) ]
49 : 0 | <------ Virtual memory allocated for the python process -------> |
50 :
51 : =========================================================================
52 : _______________________________________________________________________
53 : [ OS-specific Virtual Memory Manager (VMM) ]
54 : -1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
55 : __________________________________ __________________________________
56 : [ ] [ ]
57 : -2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
58 :
59 : */
60 : /*==========================================================================*/
61 :
62 : /* A fast, special-purpose memory allocator for small blocks, to be used
63 : on top of a general-purpose malloc -- heavily based on previous art. */
64 :
65 : /* Vladimir Marangozov -- August 2000 */
66 :
67 : /*
68 : * "Memory management is where the rubber meets the road -- if we do the wrong
69 : * thing at any level, the results will not be good. And if we don't make the
70 : * levels work well together, we are in serious trouble." (1)
71 : *
72 : * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
73 : * "Dynamic Storage Allocation: A Survey and Critical Review",
74 : * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
75 : */
76 :
77 : /* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
78 :
79 : /*==========================================================================*/
80 :
81 : /*
82 : * Allocation strategy abstract:
83 : *
84 : * For small requests, the allocator sub-allocates <Big> blocks of memory.
85 : * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the
86 : * system's allocator.
87 : *
88 : * Small requests are grouped in size classes spaced 8 bytes apart, due
89 : * to the required valid alignment of the returned address. Requests of
90 : * a particular size are serviced from memory pools of 4K (one VMM page).
91 : * Pools are fragmented on demand and contain free lists of blocks of one
92 : * particular size class. In other words, there is a fixed-size allocator
93 : * for each size class. Free pools are shared by the different allocators
94 : * thus minimizing the space reserved for a particular size class.
95 : *
96 : * This allocation strategy is a variant of what is known as "simple
97 : * segregated storage based on array of free lists". The main drawback of
98 : * simple segregated storage is that we might end up with lot of reserved
99 : * memory for the different free lists, which degenerate in time. To avoid
100 : * this, we partition each free list in pools and we share dynamically the
101 : * reserved space between all free lists. This technique is quite efficient
102 : * for memory intensive programs which allocate mainly small-sized blocks.
103 : *
104 : * For small requests we have the following table:
105 : *
106 : * Request in bytes Size of allocated block Size class idx
107 : * ----------------------------------------------------------------
108 : * 1-8 8 0
109 : * 9-16 16 1
110 : * 17-24 24 2
111 : * 25-32 32 3
112 : * 33-40 40 4
113 : * 41-48 48 5
114 : * 49-56 56 6
115 : * 57-64 64 7
116 : * 65-72 72 8
117 : * ... ... ...
118 : * 497-504 504 62
119 : * 505-512 512 63
120 : *
121 : * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying
122 : * allocator.
123 : */
124 :
125 : /*==========================================================================*/
126 :
127 : /*
128 : * -- Main tunable settings section --
129 : */
130 :
131 : /*
132 : * Alignment of addresses returned to the user. 8-bytes alignment works
133 : * on most current architectures (with 32-bit or 64-bit address busses).
134 : * The alignment value is also used for grouping small requests in size
135 : * classes spaced ALIGNMENT bytes apart.
136 : *
137 : * You shouldn't change this unless you know what you are doing.
138 : */
139 : #define ALIGNMENT 8 /* must be 2^N */
140 : #define ALIGNMENT_SHIFT 3
141 : #define ALIGNMENT_MASK (ALIGNMENT - 1)
142 :
143 : /* Return the number of bytes in size class I, as a uint. */
144 : #define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
145 :
146 : /*
147 : * Max size threshold below which malloc requests are considered to be
148 : * small enough in order to use preallocated memory pools. You can tune
149 : * this value according to your application behaviour and memory needs.
150 : *
151 : * Note: a size threshold of 512 guarantees that newly created dictionaries
152 : * will be allocated from preallocated memory pools on 64-bit.
153 : *
154 : * The following invariants must hold:
155 : * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512
156 : * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
157 : *
158 : * Although not required, for better performance and space efficiency,
159 : * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
160 : */
161 : #define SMALL_REQUEST_THRESHOLD 512
162 : #define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
163 :
164 : /*
165 : * The system's VMM page size can be obtained on most unices with a
166 : * getpagesize() call or deduced from various header files. To make
167 : * things simpler, we assume that it is 4K, which is OK for most systems.
168 : * It is probably better if this is the native page size, but it doesn't
169 : * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
170 : * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
171 : * violation fault. 4K is apparently OK for all the platforms that python
172 : * currently targets.
173 : */
174 : #define SYSTEM_PAGE_SIZE (4 * 1024)
175 : #define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
176 :
177 : /*
178 : * Maximum amount of memory managed by the allocator for small requests.
179 : */
180 : #ifdef WITH_MEMORY_LIMITS
181 : #ifndef SMALL_MEMORY_LIMIT
182 : #define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
183 : #endif
184 : #endif
185 :
186 : /*
187 : * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
188 : * on a page boundary. This is a reserved virtual address space for the
189 : * current process (obtained through a malloc()/mmap() call). In no way this
190 : * means that the memory arenas will be used entirely. A malloc(<Big>) is
191 : * usually an address range reservation for <Big> bytes, unless all pages within
192 : * this space are referenced subsequently. So malloc'ing big blocks and not
193 : * using them does not mean "wasting memory". It's an addressable range
194 : * wastage...
195 : *
196 : * Arenas are allocated with mmap() on systems supporting anonymous memory
197 : * mappings to reduce heap fragmentation.
198 : */
199 : #define ARENA_SIZE (256 << 10) /* 256KB */
200 :
201 : #ifdef WITH_MEMORY_LIMITS
202 : #define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
203 : #endif
204 :
205 : /*
206 : * Size of the pools used for small blocks. Should be a power of 2,
207 : * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
208 : */
209 : #define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
210 : #define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
211 :
212 : /*
213 : * -- End of tunable settings section --
214 : */
215 :
216 : /*==========================================================================*/
217 :
218 : /*
219 : * Locking
220 : *
221 : * To reduce lock contention, it would probably be better to refine the
222 : * crude function locking with per size class locking. I'm not positive
223 : * however, whether it's worth switching to such locking policy because
224 : * of the performance penalty it might introduce.
225 : *
226 : * The following macros describe the simplest (should also be the fastest)
227 : * lock object on a particular platform and the init/fini/lock/unlock
228 : * operations on it. The locks defined here are not expected to be recursive
229 : * because it is assumed that they will always be called in the order:
230 : * INIT, [LOCK, UNLOCK]*, FINI.
231 : */
232 :
233 : /*
234 : * Python's threads are serialized, so object malloc locking is disabled.
235 : */
236 : #define SIMPLELOCK_DECL(lock) /* simple lock declaration */
237 : #define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
238 : #define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
239 : #define SIMPLELOCK_LOCK(lock) /* acquire released lock */
240 : #define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
241 :
242 : /*
243 : * Basic types
244 : * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.
245 : */
246 : #undef uchar
247 : #define uchar unsigned char /* assuming == 8 bits */
248 :
249 : #undef uint
250 : #define uint unsigned int /* assuming >= 16 bits */
251 :
252 : #undef ulong
253 : #define ulong unsigned long /* assuming >= 32 bits */
254 :
255 : #undef uptr
256 : #define uptr Py_uintptr_t
257 :
258 : /* When you say memory, my mind reasons in terms of (pointers to) blocks */
259 : typedef uchar block;
260 :
261 : /* Pool for small blocks. */
262 : struct pool_header {
263 : union { block *_padding;
264 : uint count; } ref; /* number of allocated blocks */
265 : block *freeblock; /* pool's free list head */
266 : struct pool_header *nextpool; /* next pool of this size class */
267 : struct pool_header *prevpool; /* previous pool "" */
268 : uint arenaindex; /* index into arenas of base adr */
269 : uint szidx; /* block size class index */
270 : uint nextoffset; /* bytes to virgin block */
271 : uint maxnextoffset; /* largest valid nextoffset */
272 : };
273 :
274 : typedef struct pool_header *poolp;
275 :
276 : /* Record keeping for arenas. */
277 : struct arena_object {
278 : /* The address of the arena, as returned by malloc. Note that 0
279 : * will never be returned by a successful malloc, and is used
280 : * here to mark an arena_object that doesn't correspond to an
281 : * allocated arena.
282 : */
283 : uptr address;
284 :
285 : /* Pool-aligned pointer to the next pool to be carved off. */
286 : block* pool_address;
287 :
288 : /* The number of available pools in the arena: free pools + never-
289 : * allocated pools.
290 : */
291 : uint nfreepools;
292 :
293 : /* The total number of pools in the arena, whether or not available. */
294 : uint ntotalpools;
295 :
296 : /* Singly-linked list of available pools. */
297 : struct pool_header* freepools;
298 :
299 : /* Whenever this arena_object is not associated with an allocated
300 : * arena, the nextarena member is used to link all unassociated
301 : * arena_objects in the singly-linked `unused_arena_objects` list.
302 : * The prevarena member is unused in this case.
303 : *
304 : * When this arena_object is associated with an allocated arena
305 : * with at least one available pool, both members are used in the
306 : * doubly-linked `usable_arenas` list, which is maintained in
307 : * increasing order of `nfreepools` values.
308 : *
309 : * Else this arena_object is associated with an allocated arena
310 : * all of whose pools are in use. `nextarena` and `prevarena`
311 : * are both meaningless in this case.
312 : */
313 : struct arena_object* nextarena;
314 : struct arena_object* prevarena;
315 : };
316 :
317 : #undef ROUNDUP
318 : #define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
319 : #define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header))
320 :
321 : #define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
322 :
323 : /* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
324 : #define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK))
325 :
326 : /* Return total number of blocks in pool of size index I, as a uint. */
327 : #define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
328 :
329 : /*==========================================================================*/
330 :
331 : /*
332 : * This malloc lock
333 : */
334 : SIMPLELOCK_DECL(_malloc_lock)
335 : #define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
336 : #define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
337 : #define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
338 : #define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
339 :
340 : /*
341 : * Pool table -- headed, circular, doubly-linked lists of partially used pools.
342 :
343 : This is involved. For an index i, usedpools[i+i] is the header for a list of
344 : all partially used pools holding small blocks with "size class idx" i. So
345 : usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
346 : 16, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
347 :
348 : Pools are carved off an arena's highwater mark (an arena_object's pool_address
349 : member) as needed. Once carved off, a pool is in one of three states forever
350 : after:
351 :
352 : used == partially used, neither empty nor full
353 : At least one block in the pool is currently allocated, and at least one
354 : block in the pool is not currently allocated (note this implies a pool
355 : has room for at least two blocks).
356 : This is a pool's initial state, as a pool is created only when malloc
357 : needs space.
358 : The pool holds blocks of a fixed size, and is in the circular list headed
359 : at usedpools[i] (see above). It's linked to the other used pools of the
360 : same size class via the pool_header's nextpool and prevpool members.
361 : If all but one block is currently allocated, a malloc can cause a
362 : transition to the full state. If all but one block is not currently
363 : allocated, a free can cause a transition to the empty state.
364 :
365 : full == all the pool's blocks are currently allocated
366 : On transition to full, a pool is unlinked from its usedpools[] list.
367 : It's not linked to from anything then anymore, and its nextpool and
368 : prevpool members are meaningless until it transitions back to used.
369 : A free of a block in a full pool puts the pool back in the used state.
370 : Then it's linked in at the front of the appropriate usedpools[] list, so
371 : that the next allocation for its size class will reuse the freed block.
372 :
373 : empty == all the pool's blocks are currently available for allocation
374 : On transition to empty, a pool is unlinked from its usedpools[] list,
375 : and linked to the front of its arena_object's singly-linked freepools list,
376 : via its nextpool member. The prevpool member has no meaning in this case.
377 : Empty pools have no inherent size class: the next time a malloc finds
378 : an empty list in usedpools[], it takes the first pool off of freepools.
379 : If the size class needed happens to be the same as the size class the pool
380 : last had, some pool initialization can be skipped.
381 :
382 :
383 : Block Management
384 :
385 : Blocks within pools are again carved out as needed. pool->freeblock points to
386 : the start of a singly-linked list of free blocks within the pool. When a
387 : block is freed, it's inserted at the front of its pool's freeblock list. Note
388 : that the available blocks in a pool are *not* linked all together when a pool
389 : is initialized. Instead only "the first two" (lowest addresses) blocks are
390 : set up, returning the first such block, and setting pool->freeblock to a
391 : one-block list holding the second such block. This is consistent with that
392 : pymalloc strives at all levels (arena, pool, and block) never to touch a piece
393 : of memory until it's actually needed.
394 :
395 : So long as a pool is in the used state, we're certain there *is* a block
396 : available for allocating, and pool->freeblock is not NULL. If pool->freeblock
397 : points to the end of the free list before we've carved the entire pool into
398 : blocks, that means we simply haven't yet gotten to one of the higher-address
399 : blocks. The offset from the pool_header to the start of "the next" virgin
400 : block is stored in the pool_header nextoffset member, and the largest value
401 : of nextoffset that makes sense is stored in the maxnextoffset member when a
402 : pool is initialized. All the blocks in a pool have been passed out at least
403 : once when and only when nextoffset > maxnextoffset.
404 :
405 :
406 : Major obscurity: While the usedpools vector is declared to have poolp
407 : entries, it doesn't really. It really contains two pointers per (conceptual)
408 : poolp entry, the nextpool and prevpool members of a pool_header. The
409 : excruciating initialization code below fools C so that
410 :
411 : usedpool[i+i]
412 :
413 : "acts like" a genuine poolp, but only so long as you only reference its
414 : nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
415 : compensating for that a pool_header's nextpool and prevpool members
416 : immediately follow a pool_header's first two members:
417 :
418 : union { block *_padding;
419 : uint count; } ref;
420 : block *freeblock;
421 :
422 : each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
423 : contains is a fudged-up pointer p such that *if* C believes it's a poolp
424 : pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
425 : circular list is empty).
426 :
427 : It's unclear why the usedpools setup is so convoluted. It could be to
428 : minimize the amount of cache required to hold this heavily-referenced table
429 : (which only *needs* the two interpool pointer members of a pool_header). OTOH,
430 : referencing code has to remember to "double the index" and doing so isn't
431 : free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
432 : on that C doesn't insert any padding anywhere in a pool_header at or before
433 : the prevpool member.
434 : **************************************************************************** */
435 :
436 : #define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
437 : #define PT(x) PTA(x), PTA(x)
438 :
439 : static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
440 : PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
441 : #if NB_SMALL_SIZE_CLASSES > 8
442 : , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
443 : #if NB_SMALL_SIZE_CLASSES > 16
444 : , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
445 : #if NB_SMALL_SIZE_CLASSES > 24
446 : , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
447 : #if NB_SMALL_SIZE_CLASSES > 32
448 : , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
449 : #if NB_SMALL_SIZE_CLASSES > 40
450 : , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
451 : #if NB_SMALL_SIZE_CLASSES > 48
452 : , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
453 : #if NB_SMALL_SIZE_CLASSES > 56
454 : , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
455 : #if NB_SMALL_SIZE_CLASSES > 64
456 : #error "NB_SMALL_SIZE_CLASSES should be less than 64"
457 : #endif /* NB_SMALL_SIZE_CLASSES > 64 */
458 : #endif /* NB_SMALL_SIZE_CLASSES > 56 */
459 : #endif /* NB_SMALL_SIZE_CLASSES > 48 */
460 : #endif /* NB_SMALL_SIZE_CLASSES > 40 */
461 : #endif /* NB_SMALL_SIZE_CLASSES > 32 */
462 : #endif /* NB_SMALL_SIZE_CLASSES > 24 */
463 : #endif /* NB_SMALL_SIZE_CLASSES > 16 */
464 : #endif /* NB_SMALL_SIZE_CLASSES > 8 */
465 : };
466 :
467 : /*==========================================================================
468 : Arena management.
469 :
470 : `arenas` is a vector of arena_objects. It contains maxarenas entries, some of
471 : which may not be currently used (== they're arena_objects that aren't
472 : currently associated with an allocated arena). Note that arenas proper are
473 : separately malloc'ed.
474 :
475 : Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
476 : we do try to free() arenas, and use some mild heuristic strategies to increase
477 : the likelihood that arenas eventually can be freed.
478 :
479 : unused_arena_objects
480 :
481 : This is a singly-linked list of the arena_objects that are currently not
482 : being used (no arena is associated with them). Objects are taken off the
483 : head of the list in new_arena(), and are pushed on the head of the list in
484 : PyObject_Free() when the arena is empty. Key invariant: an arena_object
485 : is on this list if and only if its .address member is 0.
486 :
487 : usable_arenas
488 :
489 : This is a doubly-linked list of the arena_objects associated with arenas
490 : that have pools available. These pools are either waiting to be reused,
491 : or have not been used before. The list is sorted to have the most-
492 : allocated arenas first (ascending order based on the nfreepools member).
493 : This means that the next allocation will come from a heavily used arena,
494 : which gives the nearly empty arenas a chance to be returned to the system.
495 : In my unscientific tests this dramatically improved the number of arenas
496 : that could be freed.
497 :
498 : Note that an arena_object associated with an arena all of whose pools are
499 : currently in use isn't on either list.
500 : */
501 :
502 : /* Array of objects used to track chunks of memory (arenas). */
503 : static struct arena_object* arenas = NULL;
504 : /* Number of slots currently allocated in the `arenas` vector. */
505 : static uint maxarenas = 0;
506 :
507 : /* The head of the singly-linked, NULL-terminated list of available
508 : * arena_objects.
509 : */
510 : static struct arena_object* unused_arena_objects = NULL;
511 :
512 : /* The head of the doubly-linked, NULL-terminated at each end, list of
513 : * arena_objects associated with arenas that have pools available.
514 : */
515 : static struct arena_object* usable_arenas = NULL;
516 :
517 : /* How many arena_objects do we initially allocate?
518 : * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
519 : * `arenas` vector.
520 : */
521 : #define INITIAL_ARENA_OBJECTS 16
522 :
523 : /* Number of arenas allocated that haven't been free()'d. */
524 : static size_t narenas_currently_allocated = 0;
525 :
526 : /* Total number of times malloc() called to allocate an arena. */
527 : static size_t ntimes_arena_allocated = 0;
528 : /* High water mark (max value ever seen) for narenas_currently_allocated. */
529 : static size_t narenas_highwater = 0;
530 :
531 : /* Allocate a new arena. If we run out of memory, return NULL. Else
532 : * allocate a new arena, and return the address of an arena_object
533 : * describing the new arena. It's expected that the caller will set
534 : * `usable_arenas` to the return value.
535 : */
536 : static struct arena_object*
537 11 : new_arena(void)
538 : {
539 : struct arena_object* arenaobj;
540 : uint excess; /* number of bytes above pool alignment */
541 : void *address;
542 : int err;
543 :
544 : #ifdef PYMALLOC_DEBUG
545 : if (Py_GETENV("PYTHONMALLOCSTATS"))
546 : _PyObject_DebugMallocStats(stderr);
547 : #endif
548 11 : if (unused_arena_objects == NULL) {
549 : uint i;
550 : uint numarenas;
551 : size_t nbytes;
552 :
553 : /* Double the number of arena objects on each allocation.
554 : * Note that it's possible for `numarenas` to overflow.
555 : */
556 1 : numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
557 1 : if (numarenas <= maxarenas)
558 0 : return NULL; /* overflow */
559 : #if SIZEOF_SIZE_T <= SIZEOF_INT
560 1 : if (numarenas > PY_SIZE_MAX / sizeof(*arenas))
561 0 : return NULL; /* overflow */
562 : #endif
563 1 : nbytes = numarenas * sizeof(*arenas);
564 1 : arenaobj = (struct arena_object *)realloc(arenas, nbytes);
565 1 : if (arenaobj == NULL)
566 0 : return NULL;
567 1 : arenas = arenaobj;
568 :
569 : /* We might need to fix pointers that were copied. However,
570 : * new_arena only gets called when all the pages in the
571 : * previous arenas are full. Thus, there are *no* pointers
572 : * into the old array. Thus, we don't have to worry about
573 : * invalid pointers. Just to be sure, some asserts:
574 : */
575 : assert(usable_arenas == NULL);
576 : assert(unused_arena_objects == NULL);
577 :
578 : /* Put the new arenas on the unused_arena_objects list. */
579 17 : for (i = maxarenas; i < numarenas; ++i) {
580 16 : arenas[i].address = 0; /* mark as unassociated */
581 32 : arenas[i].nextarena = i < numarenas - 1 ?
582 16 : &arenas[i+1] : NULL;
583 : }
584 :
585 : /* Update globals. */
586 1 : unused_arena_objects = &arenas[maxarenas];
587 1 : maxarenas = numarenas;
588 : }
589 :
590 : /* Take the next available arena object off the head of the list. */
591 : assert(unused_arena_objects != NULL);
592 11 : arenaobj = unused_arena_objects;
593 11 : unused_arena_objects = arenaobj->nextarena;
594 : assert(arenaobj->address == 0);
595 : #ifdef ARENAS_USE_MMAP
596 11 : address = mmap(NULL, ARENA_SIZE, PROT_READ|PROT_WRITE,
597 : MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
598 11 : err = (address == MAP_FAILED);
599 : #else
600 : address = malloc(ARENA_SIZE);
601 : err = (address == 0);
602 : #endif
603 11 : if (err) {
604 : /* The allocation failed: return NULL after putting the
605 : * arenaobj back.
606 : */
607 0 : arenaobj->nextarena = unused_arena_objects;
608 0 : unused_arena_objects = arenaobj;
609 0 : return NULL;
610 : }
611 11 : arenaobj->address = (uptr)address;
612 :
613 11 : ++narenas_currently_allocated;
614 11 : ++ntimes_arena_allocated;
615 11 : if (narenas_currently_allocated > narenas_highwater)
616 7 : narenas_highwater = narenas_currently_allocated;
617 11 : arenaobj->freepools = NULL;
618 : /* pool_address <- first pool-aligned address in the arena
619 : nfreepools <- number of whole pools that fit after alignment */
620 11 : arenaobj->pool_address = (block*)arenaobj->address;
621 11 : arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
622 : assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
623 11 : excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
624 11 : if (excess != 0) {
625 0 : --arenaobj->nfreepools;
626 0 : arenaobj->pool_address += POOL_SIZE - excess;
627 : }
628 11 : arenaobj->ntotalpools = arenaobj->nfreepools;
629 :
630 11 : return arenaobj;
631 : }
632 :
633 : /*
634 : Py_ADDRESS_IN_RANGE(P, POOL)
635 :
636 : Return true if and only if P is an address that was allocated by pymalloc.
637 : POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
638 : (the caller is asked to compute this because the macro expands POOL more than
639 : once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
640 : variable and pass the latter to the macro; because Py_ADDRESS_IN_RANGE is
641 : called on every alloc/realloc/free, micro-efficiency is important here).
642 :
643 : Tricky: Let B be the arena base address associated with the pool, B =
644 : arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
645 :
646 : B <= P < B + ARENA_SIZE
647 :
648 : Subtracting B throughout, this is true iff
649 :
650 : 0 <= P-B < ARENA_SIZE
651 :
652 : By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
653 :
654 : Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
655 : before the first arena has been allocated. `arenas` is still NULL in that
656 : case. We're relying on that maxarenas is also 0 in that case, so that
657 : (POOL)->arenaindex < maxarenas must be false, saving us from trying to index
658 : into a NULL arenas.
659 :
660 : Details: given P and POOL, the arena_object corresponding to P is AO =
661 : arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
662 : stores, etc), POOL is the correct address of P's pool, AO.address is the
663 : correct base address of the pool's arena, and P must be within ARENA_SIZE of
664 : AO.address. In addition, AO.address is not 0 (no arena can start at address 0
665 : (NULL)). Therefore Py_ADDRESS_IN_RANGE correctly reports that obmalloc
666 : controls P.
667 :
668 : Now suppose obmalloc does not control P (e.g., P was obtained via a direct
669 : call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
670 : in this case -- it may even be uninitialized trash. If the trash arenaindex
671 : is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
672 : control P.
673 :
674 : Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
675 : allocated arena, obmalloc controls all the memory in slice AO.address :
676 : AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
677 : so P doesn't lie in that slice, so the macro correctly reports that P is not
678 : controlled by obmalloc.
679 :
680 : Finally, if P is not controlled by obmalloc and AO corresponds to an unused
681 : arena_object (one not currently associated with an allocated arena),
682 : AO.address is 0, and the second test in the macro reduces to:
683 :
684 : P < ARENA_SIZE
685 :
686 : If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
687 : that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
688 : of the test still passes, and the third clause (AO.address != 0) is necessary
689 : to get the correct result: AO.address is 0 in this case, so the macro
690 : correctly reports that P is not controlled by obmalloc (despite that P lies in
691 : slice AO.address : AO.address + ARENA_SIZE).
692 :
693 : Note: The third (AO.address != 0) clause was added in Python 2.5. Before
694 : 2.5, arenas were never free()'ed, and an arenaindex < maxarena always
695 : corresponded to a currently-allocated arena, so the "P is not controlled by
696 : obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
697 : was impossible.
698 :
699 : Note that the logic is excruciating, and reading up possibly uninitialized
700 : memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
701 : creates problems for some memory debuggers. The overwhelming advantage is
702 : that this test determines whether an arbitrary address is controlled by
703 : obmalloc in a small constant time, independent of the number of arenas
704 : obmalloc controls. Since this test is needed at every entry point, it's
705 : extremely desirable that it be this fast.
706 :
707 : Since Py_ADDRESS_IN_RANGE may be reading from memory which was not allocated
708 : by Python, it is important that (POOL)->arenaindex is read only once, as
709 : another thread may be concurrently modifying the value without holding the
710 : GIL. To accomplish this, the arenaindex_temp variable is used to store
711 : (POOL)->arenaindex for the duration of the Py_ADDRESS_IN_RANGE macro's
712 : execution. The caller of the macro is responsible for declaring this
713 : variable.
714 : */
715 : #define Py_ADDRESS_IN_RANGE(P, POOL) \
716 : ((arenaindex_temp = (POOL)->arenaindex) < maxarenas && \
717 : (uptr)(P) - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE && \
718 : arenas[arenaindex_temp].address != 0)
719 :
720 :
721 : /* This is only useful when running memory debuggers such as
722 : * Purify or Valgrind. Uncomment to use.
723 : *
724 : #define Py_USING_MEMORY_DEBUGGER
725 : */
726 :
727 : #ifdef Py_USING_MEMORY_DEBUGGER
728 :
729 : /* Py_ADDRESS_IN_RANGE may access uninitialized memory by design
730 : * This leads to thousands of spurious warnings when using
731 : * Purify or Valgrind. By making a function, we can easily
732 : * suppress the uninitialized memory reads in this one function.
733 : * So we won't ignore real errors elsewhere.
734 : *
735 : * Disable the macro and use a function.
736 : */
737 :
738 : #undef Py_ADDRESS_IN_RANGE
739 :
740 : #if defined(__GNUC__) && ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) || \
741 : (__GNUC__ >= 4))
742 : #define Py_NO_INLINE __attribute__((__noinline__))
743 : #else
744 : #define Py_NO_INLINE
745 : #endif
746 :
747 : /* Don't make static, to try to ensure this isn't inlined. */
748 : int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;
749 : #undef Py_NO_INLINE
750 : #endif
751 :
752 : /*==========================================================================*/
753 :
754 : /* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct
755 : * from all other currently live pointers. This may not be possible.
756 : */
757 :
758 : /*
759 : * The basic blocks are ordered by decreasing execution frequency,
760 : * which minimizes the number of jumps in the most common cases,
761 : * improves branching prediction and instruction scheduling (small
762 : * block allocations typically result in a couple of instructions).
763 : * Unless the optimizer reorders everything, being too smart...
764 : */
765 :
766 : #undef PyObject_Malloc
767 : void *
768 178421 : PyObject_Malloc(size_t nbytes)
769 : {
770 : block *bp;
771 : poolp pool;
772 : poolp next;
773 : uint size;
774 :
775 : #ifdef WITH_VALGRIND
776 : if (UNLIKELY(running_on_valgrind == -1))
777 : running_on_valgrind = RUNNING_ON_VALGRIND;
778 : if (UNLIKELY(running_on_valgrind))
779 : goto redirect;
780 : #endif
781 :
782 : /*
783 : * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
784 : * Most python internals blindly use a signed Py_ssize_t to track
785 : * things without checking for overflows or negatives.
786 : * As size_t is unsigned, checking for nbytes < 0 is not required.
787 : */
788 178421 : if (nbytes > PY_SSIZE_T_MAX)
789 0 : return NULL;
790 :
791 : /*
792 : * This implicitly redirects malloc(0).
793 : */
794 178421 : if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
795 : LOCK();
796 : /*
797 : * Most frequent paths first
798 : */
799 164708 : size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
800 164708 : pool = usedpools[size + size];
801 164708 : if (pool != pool->nextpool) {
802 : /*
803 : * There is a used pool for this size class.
804 : * Pick up the head block of its free list.
805 : */
806 163677 : ++pool->ref.count;
807 163677 : bp = pool->freeblock;
808 : assert(bp != NULL);
809 163677 : if ((pool->freeblock = *(block **)bp) != NULL) {
810 : UNLOCK();
811 111091 : return (void *)bp;
812 : }
813 : /*
814 : * Reached the end of the free list, try to extend it.
815 : */
816 52586 : if (pool->nextoffset <= pool->maxnextoffset) {
817 : /* There is room for another block. */
818 39237 : pool->freeblock = (block*)pool +
819 : pool->nextoffset;
820 39237 : pool->nextoffset += INDEX2SIZE(size);
821 39237 : *(block **)(pool->freeblock) = NULL;
822 : UNLOCK();
823 39237 : return (void *)bp;
824 : }
825 : /* Pool is full, unlink from used pools. */
826 13349 : next = pool->nextpool;
827 13349 : pool = pool->prevpool;
828 13349 : next->prevpool = pool;
829 13349 : pool->nextpool = next;
830 : UNLOCK();
831 13349 : return (void *)bp;
832 : }
833 :
834 : /* There isn't a pool of the right size class immediately
835 : * available: use a free pool.
836 : */
837 1031 : if (usable_arenas == NULL) {
838 : /* No arena has a free pool: allocate a new arena. */
839 : #ifdef WITH_MEMORY_LIMITS
840 : if (narenas_currently_allocated >= MAX_ARENAS) {
841 : UNLOCK();
842 : goto redirect;
843 : }
844 : #endif
845 11 : usable_arenas = new_arena();
846 11 : if (usable_arenas == NULL) {
847 : UNLOCK();
848 0 : goto redirect;
849 : }
850 22 : usable_arenas->nextarena =
851 11 : usable_arenas->prevarena = NULL;
852 : }
853 : assert(usable_arenas->address != 0);
854 :
855 : /* Try to get a cached free pool. */
856 1031 : pool = usable_arenas->freepools;
857 1031 : if (pool != NULL) {
858 : /* Unlink from cached pools. */
859 580 : usable_arenas->freepools = pool->nextpool;
860 :
861 : /* This arena already had the smallest nfreepools
862 : * value, so decreasing nfreepools doesn't change
863 : * that, and we don't need to rearrange the
864 : * usable_arenas list. However, if the arena has
865 : * become wholly allocated, we need to remove its
866 : * arena_object from usable_arenas.
867 : */
868 580 : --usable_arenas->nfreepools;
869 580 : if (usable_arenas->nfreepools == 0) {
870 : /* Wholly allocated: remove. */
871 : assert(usable_arenas->freepools == NULL);
872 : assert(usable_arenas->nextarena == NULL ||
873 : usable_arenas->nextarena->prevarena ==
874 : usable_arenas);
875 :
876 5 : usable_arenas = usable_arenas->nextarena;
877 5 : if (usable_arenas != NULL) {
878 1 : usable_arenas->prevarena = NULL;
879 : assert(usable_arenas->address != 0);
880 : }
881 : }
882 : else {
883 : /* nfreepools > 0: it must be that freepools
884 : * isn't NULL, or that we haven't yet carved
885 : * off all the arena's pools for the first
886 : * time.
887 : */
888 : assert(usable_arenas->freepools != NULL ||
889 : usable_arenas->pool_address <=
890 : (block*)usable_arenas->address +
891 : ARENA_SIZE - POOL_SIZE);
892 : }
893 : init_pool:
894 : /* Frontlink to used pools. */
895 1031 : next = usedpools[size + size]; /* == prev */
896 1031 : pool->nextpool = next;
897 1031 : pool->prevpool = next;
898 1031 : next->nextpool = pool;
899 1031 : next->prevpool = pool;
900 1031 : pool->ref.count = 1;
901 1031 : if (pool->szidx == size) {
902 : /* Luckily, this pool last contained blocks
903 : * of the same size class, so its header
904 : * and free list are already initialized.
905 : */
906 359 : bp = pool->freeblock;
907 359 : pool->freeblock = *(block **)bp;
908 : UNLOCK();
909 359 : return (void *)bp;
910 : }
911 : /*
912 : * Initialize the pool header, set up the free list to
913 : * contain just the second block, and return the first
914 : * block.
915 : */
916 672 : pool->szidx = size;
917 672 : size = INDEX2SIZE(size);
918 672 : bp = (block *)pool + POOL_OVERHEAD;
919 672 : pool->nextoffset = POOL_OVERHEAD + (size << 1);
920 672 : pool->maxnextoffset = POOL_SIZE - size;
921 672 : pool->freeblock = bp + size;
922 672 : *(block **)(pool->freeblock) = NULL;
923 : UNLOCK();
924 672 : return (void *)bp;
925 : }
926 :
927 : /* Carve off a new pool. */
928 : assert(usable_arenas->nfreepools > 0);
929 : assert(usable_arenas->freepools == NULL);
930 451 : pool = (poolp)usable_arenas->pool_address;
931 : assert((block*)pool <= (block*)usable_arenas->address +
932 : ARENA_SIZE - POOL_SIZE);
933 451 : pool->arenaindex = usable_arenas - arenas;
934 : assert(&arenas[pool->arenaindex] == usable_arenas);
935 451 : pool->szidx = DUMMY_SIZE_IDX;
936 451 : usable_arenas->pool_address += POOL_SIZE;
937 451 : --usable_arenas->nfreepools;
938 :
939 451 : if (usable_arenas->nfreepools == 0) {
940 : assert(usable_arenas->nextarena == NULL ||
941 : usable_arenas->nextarena->prevarena ==
942 : usable_arenas);
943 : /* Unlink the arena: it is completely allocated. */
944 6 : usable_arenas = usable_arenas->nextarena;
945 6 : if (usable_arenas != NULL) {
946 0 : usable_arenas->prevarena = NULL;
947 : assert(usable_arenas->address != 0);
948 : }
949 : }
950 :
951 451 : goto init_pool;
952 : }
953 :
954 : /* The small block allocator ends here. */
955 :
956 : redirect:
957 : /* Redirect the original request to the underlying (libc) allocator.
958 : * We jump here on bigger requests, on error in the code above (as a
959 : * last chance to serve the request) or when the max memory limit
960 : * has been reached.
961 : */
962 13713 : if (nbytes == 0)
963 0 : nbytes = 1;
964 13713 : return (void *)malloc(nbytes);
965 : }
966 :
967 : /* free */
968 :
969 : #undef PyObject_Free
970 : void
971 147417 : PyObject_Free(void *p)
972 : {
973 : poolp pool;
974 : block *lastfree;
975 : poolp next, prev;
976 : uint size;
977 : #ifndef Py_USING_MEMORY_DEBUGGER
978 : uint arenaindex_temp;
979 : #endif
980 :
981 147417 : if (p == NULL) /* free(NULL) has no effect */
982 0 : return;
983 :
984 : #ifdef WITH_VALGRIND
985 : if (UNLIKELY(running_on_valgrind > 0))
986 : goto redirect;
987 : #endif
988 :
989 147417 : pool = POOL_ADDR(p);
990 147417 : if (Py_ADDRESS_IN_RANGE(p, pool)) {
991 : /* We allocated this address. */
992 : LOCK();
993 : /* Link p to the start of the pool's freeblock list. Since
994 : * the pool had at least the p block outstanding, the pool
995 : * wasn't empty (so it's already in a usedpools[] list, or
996 : * was full and is in no list -- it's not in the freeblocks
997 : * list in any case).
998 : */
999 : assert(pool->ref.count > 0); /* else it was empty */
1000 133999 : *(block **)p = lastfree = pool->freeblock;
1001 133999 : pool->freeblock = (block *)p;
1002 133999 : if (lastfree) {
1003 : struct arena_object* ao;
1004 : uint nf; /* ao->nfreepools */
1005 :
1006 : /* freeblock wasn't NULL, so the pool wasn't full,
1007 : * and the pool is in a usedpools[] list.
1008 : */
1009 121015 : if (--pool->ref.count != 0) {
1010 : /* pool isn't empty: leave it in usedpools */
1011 : UNLOCK();
1012 120429 : return;
1013 : }
1014 : /* Pool is now empty: unlink from usedpools, and
1015 : * link to the front of freepools. This ensures that
1016 : * previously freed pools will be allocated later
1017 : * (being not referenced, they are perhaps paged out).
1018 : */
1019 586 : next = pool->nextpool;
1020 586 : prev = pool->prevpool;
1021 586 : next->prevpool = prev;
1022 586 : prev->nextpool = next;
1023 :
1024 : /* Link the pool to freepools. This is a singly-linked
1025 : * list, and pool->prevpool isn't used there.
1026 : */
1027 586 : ao = &arenas[pool->arenaindex];
1028 586 : pool->nextpool = ao->freepools;
1029 586 : ao->freepools = pool;
1030 586 : nf = ++ao->nfreepools;
1031 :
1032 : /* All the rest is arena management. We just freed
1033 : * a pool, and there are 4 cases for arena mgmt:
1034 : * 1. If all the pools are free, return the arena to
1035 : * the system free().
1036 : * 2. If this is the only free pool in the arena,
1037 : * add the arena back to the `usable_arenas` list.
1038 : * 3. If the "next" arena has a smaller count of free
1039 : * pools, we have to "slide this arena right" to
1040 : * restore that usable_arenas is sorted in order of
1041 : * nfreepools.
1042 : * 4. Else there's nothing more to do.
1043 : */
1044 586 : if (nf == ao->ntotalpools) {
1045 : /* Case 1. First unlink ao from usable_arenas.
1046 : */
1047 : assert(ao->prevarena == NULL ||
1048 : ao->prevarena->address != 0);
1049 : assert(ao ->nextarena == NULL ||
1050 : ao->nextarena->address != 0);
1051 :
1052 : /* Fix the pointer in the prevarena, or the
1053 : * usable_arenas pointer.
1054 : */
1055 4 : if (ao->prevarena == NULL) {
1056 4 : usable_arenas = ao->nextarena;
1057 : assert(usable_arenas == NULL ||
1058 : usable_arenas->address != 0);
1059 : }
1060 : else {
1061 : assert(ao->prevarena->nextarena == ao);
1062 0 : ao->prevarena->nextarena =
1063 0 : ao->nextarena;
1064 : }
1065 : /* Fix the pointer in the nextarena. */
1066 4 : if (ao->nextarena != NULL) {
1067 : assert(ao->nextarena->prevarena == ao);
1068 0 : ao->nextarena->prevarena =
1069 0 : ao->prevarena;
1070 : }
1071 : /* Record that this arena_object slot is
1072 : * available to be reused.
1073 : */
1074 4 : ao->nextarena = unused_arena_objects;
1075 4 : unused_arena_objects = ao;
1076 :
1077 : /* Free the entire arena. */
1078 : #ifdef ARENAS_USE_MMAP
1079 4 : munmap((void *)ao->address, ARENA_SIZE);
1080 : #else
1081 : free((void *)ao->address);
1082 : #endif
1083 4 : ao->address = 0; /* mark unassociated */
1084 4 : --narenas_currently_allocated;
1085 :
1086 : UNLOCK();
1087 4 : return;
1088 : }
1089 582 : if (nf == 1) {
1090 : /* Case 2. Put ao at the head of
1091 : * usable_arenas. Note that because
1092 : * ao->nfreepools was 0 before, ao isn't
1093 : * currently on the usable_arenas list.
1094 : */
1095 5 : ao->nextarena = usable_arenas;
1096 5 : ao->prevarena = NULL;
1097 5 : if (usable_arenas)
1098 1 : usable_arenas->prevarena = ao;
1099 5 : usable_arenas = ao;
1100 : assert(usable_arenas->address != 0);
1101 :
1102 : UNLOCK();
1103 5 : return;
1104 : }
1105 : /* If this arena is now out of order, we need to keep
1106 : * the list sorted. The list is kept sorted so that
1107 : * the "most full" arenas are used first, which allows
1108 : * the nearly empty arenas to be completely freed. In
1109 : * a few un-scientific tests, it seems like this
1110 : * approach allowed a lot more memory to be freed.
1111 : */
1112 577 : if (ao->nextarena == NULL ||
1113 0 : nf <= ao->nextarena->nfreepools) {
1114 : /* Case 4. Nothing to do. */
1115 : UNLOCK();
1116 577 : return;
1117 : }
1118 : /* Case 3: We have to move the arena towards the end
1119 : * of the list, because it has more free pools than
1120 : * the arena to its right.
1121 : * First unlink ao from usable_arenas.
1122 : */
1123 0 : if (ao->prevarena != NULL) {
1124 : /* ao isn't at the head of the list */
1125 : assert(ao->prevarena->nextarena == ao);
1126 0 : ao->prevarena->nextarena = ao->nextarena;
1127 : }
1128 : else {
1129 : /* ao is at the head of the list */
1130 : assert(usable_arenas == ao);
1131 0 : usable_arenas = ao->nextarena;
1132 : }
1133 0 : ao->nextarena->prevarena = ao->prevarena;
1134 :
1135 : /* Locate the new insertion point by iterating over
1136 : * the list, using our nextarena pointer.
1137 : */
1138 0 : while (ao->nextarena != NULL &&
1139 0 : nf > ao->nextarena->nfreepools) {
1140 0 : ao->prevarena = ao->nextarena;
1141 0 : ao->nextarena = ao->nextarena->nextarena;
1142 : }
1143 :
1144 : /* Insert ao at this point. */
1145 : assert(ao->nextarena == NULL ||
1146 : ao->prevarena == ao->nextarena->prevarena);
1147 : assert(ao->prevarena->nextarena == ao->nextarena);
1148 :
1149 0 : ao->prevarena->nextarena = ao;
1150 0 : if (ao->nextarena != NULL)
1151 0 : ao->nextarena->prevarena = ao;
1152 :
1153 : /* Verify that the swaps worked. */
1154 : assert(ao->nextarena == NULL ||
1155 : nf <= ao->nextarena->nfreepools);
1156 : assert(ao->prevarena == NULL ||
1157 : nf > ao->prevarena->nfreepools);
1158 : assert(ao->nextarena == NULL ||
1159 : ao->nextarena->prevarena == ao);
1160 : assert((usable_arenas == ao &&
1161 : ao->prevarena == NULL) ||
1162 : ao->prevarena->nextarena == ao);
1163 :
1164 : UNLOCK();
1165 0 : return;
1166 : }
1167 : /* Pool was full, so doesn't currently live in any list:
1168 : * link it to the front of the appropriate usedpools[] list.
1169 : * This mimics LRU pool usage for new allocations and
1170 : * targets optimal filling when several pools contain
1171 : * blocks of the same size class.
1172 : */
1173 12984 : --pool->ref.count;
1174 : assert(pool->ref.count > 0); /* else the pool is empty */
1175 12984 : size = pool->szidx;
1176 12984 : next = usedpools[size + size];
1177 12984 : prev = next->prevpool;
1178 : /* insert pool before next: prev <-> pool <-> next */
1179 12984 : pool->nextpool = next;
1180 12984 : pool->prevpool = prev;
1181 12984 : next->prevpool = pool;
1182 12984 : prev->nextpool = pool;
1183 : UNLOCK();
1184 12984 : return;
1185 : }
1186 :
1187 : #ifdef WITH_VALGRIND
1188 : redirect:
1189 : #endif
1190 : /* We didn't allocate this address. */
1191 13418 : free(p);
1192 : }
1193 :
1194 : /* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,
1195 : * then as the Python docs promise, we do not treat this like free(p), and
1196 : * return a non-NULL result.
1197 : */
1198 :
1199 : #undef PyObject_Realloc
1200 : void *
1201 9476 : PyObject_Realloc(void *p, size_t nbytes)
1202 : {
1203 : void *bp;
1204 : poolp pool;
1205 : size_t size;
1206 : #ifndef Py_USING_MEMORY_DEBUGGER
1207 : uint arenaindex_temp;
1208 : #endif
1209 :
1210 9476 : if (p == NULL)
1211 7424 : return PyObject_Malloc(nbytes);
1212 :
1213 : /*
1214 : * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
1215 : * Most python internals blindly use a signed Py_ssize_t to track
1216 : * things without checking for overflows or negatives.
1217 : * As size_t is unsigned, checking for nbytes < 0 is not required.
1218 : */
1219 2052 : if (nbytes > PY_SSIZE_T_MAX)
1220 0 : return NULL;
1221 :
1222 : #ifdef WITH_VALGRIND
1223 : /* Treat running_on_valgrind == -1 the same as 0 */
1224 : if (UNLIKELY(running_on_valgrind > 0))
1225 : goto redirect;
1226 : #endif
1227 :
1228 2052 : pool = POOL_ADDR(p);
1229 2052 : if (Py_ADDRESS_IN_RANGE(p, pool)) {
1230 : /* We're in charge of this block */
1231 1943 : size = INDEX2SIZE(pool->szidx);
1232 1943 : if (nbytes <= size) {
1233 : /* The block is staying the same or shrinking. If
1234 : * it's shrinking, there's a tradeoff: it costs
1235 : * cycles to copy the block to a smaller size class,
1236 : * but it wastes memory not to copy it. The
1237 : * compromise here is to copy on shrink only if at
1238 : * least 25% of size can be shaved off.
1239 : */
1240 565 : if (4 * nbytes > 3 * size) {
1241 : /* It's the same,
1242 : * or shrinking and new/old > 3/4.
1243 : */
1244 301 : return p;
1245 : }
1246 264 : size = nbytes;
1247 : }
1248 1642 : bp = PyObject_Malloc(nbytes);
1249 1642 : if (bp != NULL) {
1250 1642 : memcpy(bp, p, size);
1251 1642 : PyObject_Free(p);
1252 : }
1253 1642 : return bp;
1254 : }
1255 : #ifdef WITH_VALGRIND
1256 : redirect:
1257 : #endif
1258 : /* We're not managing this block. If nbytes <=
1259 : * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this
1260 : * block. However, if we do, we need to copy the valid data from
1261 : * the C-managed block to one of our blocks, and there's no portable
1262 : * way to know how much of the memory space starting at p is valid.
1263 : * As bug 1185883 pointed out the hard way, it's possible that the
1264 : * C-managed block is "at the end" of allocated VM space, so that
1265 : * a memory fault can occur if we try to copy nbytes bytes starting
1266 : * at p. Instead we punt: let C continue to manage this block.
1267 : */
1268 109 : if (nbytes)
1269 109 : return realloc(p, nbytes);
1270 : /* C doesn't define the result of realloc(p, 0) (it may or may not
1271 : * return NULL then), but Python's docs promise that nbytes==0 never
1272 : * returns NULL. We don't pass 0 to realloc(), to avoid that endcase
1273 : * to begin with. Even then, we can't be sure that realloc() won't
1274 : * return NULL.
1275 : */
1276 0 : bp = realloc(p, 1);
1277 0 : return bp ? bp : p;
1278 : }
1279 :
1280 : #else /* ! WITH_PYMALLOC */
1281 :
1282 : /*==========================================================================*/
1283 : /* pymalloc not enabled: Redirect the entry points to malloc. These will
1284 : * only be used by extensions that are compiled with pymalloc enabled. */
1285 :
1286 : void *
1287 : PyObject_Malloc(size_t n)
1288 : {
1289 : return PyMem_MALLOC(n);
1290 : }
1291 :
1292 : void *
1293 : PyObject_Realloc(void *p, size_t n)
1294 : {
1295 : return PyMem_REALLOC(p, n);
1296 : }
1297 :
1298 : void
1299 : PyObject_Free(void *p)
1300 : {
1301 : PyMem_FREE(p);
1302 : }
1303 : #endif /* WITH_PYMALLOC */
1304 :
1305 : #ifdef PYMALLOC_DEBUG
1306 : /*==========================================================================*/
1307 : /* A x-platform debugging allocator. This doesn't manage memory directly,
1308 : * it wraps a real allocator, adding extra debugging info to the memory blocks.
1309 : */
1310 :
1311 : /* Special bytes broadcast into debug memory blocks at appropriate times.
1312 : * Strings of these are unlikely to be valid addresses, floats, ints or
1313 : * 7-bit ASCII.
1314 : */
1315 : #undef CLEANBYTE
1316 : #undef DEADBYTE
1317 : #undef FORBIDDENBYTE
1318 : #define CLEANBYTE 0xCB /* clean (newly allocated) memory */
1319 : #define DEADBYTE 0xDB /* dead (newly freed) memory */
1320 : #define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
1321 :
1322 : /* We tag each block with an API ID in order to tag API violations */
1323 : #define _PYMALLOC_MEM_ID 'm' /* the PyMem_Malloc() API */
1324 : #define _PYMALLOC_OBJ_ID 'o' /* The PyObject_Malloc() API */
1325 :
1326 : static size_t serialno = 0; /* incremented on each debug {m,re}alloc */
1327 :
1328 : /* serialno is always incremented via calling this routine. The point is
1329 : * to supply a single place to set a breakpoint.
1330 : */
1331 : static void
1332 : bumpserialno(void)
1333 : {
1334 : ++serialno;
1335 : }
1336 :
1337 : #define SST SIZEOF_SIZE_T
1338 :
1339 : /* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1340 : static size_t
1341 : read_size_t(const void *p)
1342 : {
1343 : const uchar *q = (const uchar *)p;
1344 : size_t result = *q++;
1345 : int i;
1346 :
1347 : for (i = SST; --i > 0; ++q)
1348 : result = (result << 8) | *q;
1349 : return result;
1350 : }
1351 :
1352 : /* Write n as a big-endian size_t, MSB at address p, LSB at
1353 : * p + sizeof(size_t) - 1.
1354 : */
1355 : static void
1356 : write_size_t(void *p, size_t n)
1357 : {
1358 : uchar *q = (uchar *)p + SST - 1;
1359 : int i;
1360 :
1361 : for (i = SST; --i >= 0; --q) {
1362 : *q = (uchar)(n & 0xff);
1363 : n >>= 8;
1364 : }
1365 : }
1366 :
1367 : #ifdef Py_DEBUG
1368 : /* Is target in the list? The list is traversed via the nextpool pointers.
1369 : * The list may be NULL-terminated, or circular. Return 1 if target is in
1370 : * list, else 0.
1371 : */
1372 : static int
1373 : pool_is_in_list(const poolp target, poolp list)
1374 : {
1375 : poolp origlist = list;
1376 : assert(target != NULL);
1377 : if (list == NULL)
1378 : return 0;
1379 : do {
1380 : if (target == list)
1381 : return 1;
1382 : list = list->nextpool;
1383 : } while (list != NULL && list != origlist);
1384 : return 0;
1385 : }
1386 :
1387 : #else
1388 : #define pool_is_in_list(X, Y) 1
1389 :
1390 : #endif /* Py_DEBUG */
1391 :
1392 : /* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1393 : fills them with useful stuff, here calling the underlying malloc's result p:
1394 :
1395 : p[0: S]
1396 : Number of bytes originally asked for. This is a size_t, big-endian (easier
1397 : to read in a memory dump).
1398 : p[S: 2*S]
1399 : Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
1400 : p[2*S: 2*S+n]
1401 : The requested memory, filled with copies of CLEANBYTE.
1402 : Used to catch reference to uninitialized memory.
1403 : &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
1404 : handled the request itself.
1405 : p[2*S+n: 2*S+n+S]
1406 : Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
1407 : p[2*S+n+S: 2*S+n+2*S]
1408 : A serial number, incremented by 1 on each call to _PyObject_DebugMalloc
1409 : and _PyObject_DebugRealloc.
1410 : This is a big-endian size_t.
1411 : If "bad memory" is detected later, the serial number gives an
1412 : excellent way to set a breakpoint on the next run, to capture the
1413 : instant at which this block was passed out.
1414 : */
1415 :
1416 : /* debug replacements for the PyMem_* memory API */
1417 : void *
1418 : _PyMem_DebugMalloc(size_t nbytes)
1419 : {
1420 : return _PyObject_DebugMallocApi(_PYMALLOC_MEM_ID, nbytes);
1421 : }
1422 : void *
1423 : _PyMem_DebugRealloc(void *p, size_t nbytes)
1424 : {
1425 : return _PyObject_DebugReallocApi(_PYMALLOC_MEM_ID, p, nbytes);
1426 : }
1427 : void
1428 : _PyMem_DebugFree(void *p)
1429 : {
1430 : _PyObject_DebugFreeApi(_PYMALLOC_MEM_ID, p);
1431 : }
1432 :
1433 : /* debug replacements for the PyObject_* memory API */
1434 : void *
1435 : _PyObject_DebugMalloc(size_t nbytes)
1436 : {
1437 : return _PyObject_DebugMallocApi(_PYMALLOC_OBJ_ID, nbytes);
1438 : }
1439 : void *
1440 : _PyObject_DebugRealloc(void *p, size_t nbytes)
1441 : {
1442 : return _PyObject_DebugReallocApi(_PYMALLOC_OBJ_ID, p, nbytes);
1443 : }
1444 : void
1445 : _PyObject_DebugFree(void *p)
1446 : {
1447 : _PyObject_DebugFreeApi(_PYMALLOC_OBJ_ID, p);
1448 : }
1449 : void
1450 : _PyObject_DebugCheckAddress(const void *p)
1451 : {
1452 : _PyObject_DebugCheckAddressApi(_PYMALLOC_OBJ_ID, p);
1453 : }
1454 :
1455 :
1456 : /* generic debug memory api, with an "id" to identify the API in use */
1457 : void *
1458 : _PyObject_DebugMallocApi(char id, size_t nbytes)
1459 : {
1460 : uchar *p; /* base address of malloc'ed block */
1461 : uchar *tail; /* p + 2*SST + nbytes == pointer to tail pad bytes */
1462 : size_t total; /* nbytes + 4*SST */
1463 :
1464 : bumpserialno();
1465 : total = nbytes + 4*SST;
1466 : if (total < nbytes)
1467 : /* overflow: can't represent total as a size_t */
1468 : return NULL;
1469 :
1470 : p = (uchar *)PyObject_Malloc(total);
1471 : if (p == NULL)
1472 : return NULL;
1473 :
1474 : /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */
1475 : write_size_t(p, nbytes);
1476 : p[SST] = (uchar)id;
1477 : memset(p + SST + 1 , FORBIDDENBYTE, SST-1);
1478 :
1479 : if (nbytes > 0)
1480 : memset(p + 2*SST, CLEANBYTE, nbytes);
1481 :
1482 : /* at tail, write pad (SST bytes) and serialno (SST bytes) */
1483 : tail = p + 2*SST + nbytes;
1484 : memset(tail, FORBIDDENBYTE, SST);
1485 : write_size_t(tail + SST, serialno);
1486 :
1487 : return p + 2*SST;
1488 : }
1489 :
1490 : /* The debug free first checks the 2*SST bytes on each end for sanity (in
1491 : particular, that the FORBIDDENBYTEs with the api ID are still intact).
1492 : Then fills the original bytes with DEADBYTE.
1493 : Then calls the underlying free.
1494 : */
1495 : void
1496 : _PyObject_DebugFreeApi(char api, void *p)
1497 : {
1498 : uchar *q = (uchar *)p - 2*SST; /* address returned from malloc */
1499 : size_t nbytes;
1500 :
1501 : if (p == NULL)
1502 : return;
1503 : _PyObject_DebugCheckAddressApi(api, p);
1504 : nbytes = read_size_t(q);
1505 : nbytes += 4*SST;
1506 : if (nbytes > 0)
1507 : memset(q, DEADBYTE, nbytes);
1508 : PyObject_Free(q);
1509 : }
1510 :
1511 : void *
1512 : _PyObject_DebugReallocApi(char api, void *p, size_t nbytes)
1513 : {
1514 : uchar *q = (uchar *)p;
1515 : uchar *tail;
1516 : size_t total; /* nbytes + 4*SST */
1517 : size_t original_nbytes;
1518 : int i;
1519 :
1520 : if (p == NULL)
1521 : return _PyObject_DebugMallocApi(api, nbytes);
1522 :
1523 : _PyObject_DebugCheckAddressApi(api, p);
1524 : bumpserialno();
1525 : original_nbytes = read_size_t(q - 2*SST);
1526 : total = nbytes + 4*SST;
1527 : if (total < nbytes)
1528 : /* overflow: can't represent total as a size_t */
1529 : return NULL;
1530 :
1531 : if (nbytes < original_nbytes) {
1532 : /* shrinking: mark old extra memory dead */
1533 : memset(q + nbytes, DEADBYTE, original_nbytes - nbytes + 2*SST);
1534 : }
1535 :
1536 : /* Resize and add decorations. We may get a new pointer here, in which
1537 : * case we didn't get the chance to mark the old memory with DEADBYTE,
1538 : * but we live with that.
1539 : */
1540 : q = (uchar *)PyObject_Realloc(q - 2*SST, total);
1541 : if (q == NULL)
1542 : return NULL;
1543 :
1544 : write_size_t(q, nbytes);
1545 : assert(q[SST] == (uchar)api);
1546 : for (i = 1; i < SST; ++i)
1547 : assert(q[SST + i] == FORBIDDENBYTE);
1548 : q += 2*SST;
1549 : tail = q + nbytes;
1550 : memset(tail, FORBIDDENBYTE, SST);
1551 : write_size_t(tail + SST, serialno);
1552 :
1553 : if (nbytes > original_nbytes) {
1554 : /* growing: mark new extra memory clean */
1555 : memset(q + original_nbytes, CLEANBYTE,
1556 : nbytes - original_nbytes);
1557 : }
1558 :
1559 : return q;
1560 : }
1561 :
1562 : /* Check the forbidden bytes on both ends of the memory allocated for p.
1563 : * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
1564 : * and call Py_FatalError to kill the program.
1565 : * The API id, is also checked.
1566 : */
1567 : void
1568 : _PyObject_DebugCheckAddressApi(char api, const void *p)
1569 : {
1570 : const uchar *q = (const uchar *)p;
1571 : char msgbuf[64];
1572 : char *msg;
1573 : size_t nbytes;
1574 : const uchar *tail;
1575 : int i;
1576 : char id;
1577 :
1578 : if (p == NULL) {
1579 : msg = "didn't expect a NULL pointer";
1580 : goto error;
1581 : }
1582 :
1583 : /* Check the API id */
1584 : id = (char)q[-SST];
1585 : if (id != api) {
1586 : msg = msgbuf;
1587 : snprintf(msg, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api);
1588 : msgbuf[sizeof(msgbuf)-1] = 0;
1589 : goto error;
1590 : }
1591 :
1592 : /* Check the stuff at the start of p first: if there's underwrite
1593 : * corruption, the number-of-bytes field may be nuts, and checking
1594 : * the tail could lead to a segfault then.
1595 : */
1596 : for (i = SST-1; i >= 1; --i) {
1597 : if (*(q-i) != FORBIDDENBYTE) {
1598 : msg = "bad leading pad byte";
1599 : goto error;
1600 : }
1601 : }
1602 :
1603 : nbytes = read_size_t(q - 2*SST);
1604 : tail = q + nbytes;
1605 : for (i = 0; i < SST; ++i) {
1606 : if (tail[i] != FORBIDDENBYTE) {
1607 : msg = "bad trailing pad byte";
1608 : goto error;
1609 : }
1610 : }
1611 :
1612 : return;
1613 :
1614 : error:
1615 : _PyObject_DebugDumpAddress(p);
1616 : Py_FatalError(msg);
1617 : }
1618 :
1619 : /* Display info to stderr about the memory block at p. */
1620 : void
1621 : _PyObject_DebugDumpAddress(const void *p)
1622 : {
1623 : const uchar *q = (const uchar *)p;
1624 : const uchar *tail;
1625 : size_t nbytes, serial;
1626 : int i;
1627 : int ok;
1628 : char id;
1629 :
1630 : fprintf(stderr, "Debug memory block at address p=%p:", p);
1631 : if (p == NULL) {
1632 : fprintf(stderr, "\n");
1633 : return;
1634 : }
1635 : id = (char)q[-SST];
1636 : fprintf(stderr, " API '%c'\n", id);
1637 :
1638 : nbytes = read_size_t(q - 2*SST);
1639 : fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
1640 : "requested\n", nbytes);
1641 :
1642 : /* In case this is nuts, check the leading pad bytes first. */
1643 : fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);
1644 : ok = 1;
1645 : for (i = 1; i <= SST-1; ++i) {
1646 : if (*(q-i) != FORBIDDENBYTE) {
1647 : ok = 0;
1648 : break;
1649 : }
1650 : }
1651 : if (ok)
1652 : fputs("FORBIDDENBYTE, as expected.\n", stderr);
1653 : else {
1654 : fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1655 : FORBIDDENBYTE);
1656 : for (i = SST-1; i >= 1; --i) {
1657 : const uchar byte = *(q-i);
1658 : fprintf(stderr, " at p-%d: 0x%02x", i, byte);
1659 : if (byte != FORBIDDENBYTE)
1660 : fputs(" *** OUCH", stderr);
1661 : fputc('\n', stderr);
1662 : }
1663 :
1664 : fputs(" Because memory is corrupted at the start, the "
1665 : "count of bytes requested\n"
1666 : " may be bogus, and checking the trailing pad "
1667 : "bytes may segfault.\n", stderr);
1668 : }
1669 :
1670 : tail = q + nbytes;
1671 : fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
1672 : ok = 1;
1673 : for (i = 0; i < SST; ++i) {
1674 : if (tail[i] != FORBIDDENBYTE) {
1675 : ok = 0;
1676 : break;
1677 : }
1678 : }
1679 : if (ok)
1680 : fputs("FORBIDDENBYTE, as expected.\n", stderr);
1681 : else {
1682 : fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1683 : FORBIDDENBYTE);
1684 : for (i = 0; i < SST; ++i) {
1685 : const uchar byte = tail[i];
1686 : fprintf(stderr, " at tail+%d: 0x%02x",
1687 : i, byte);
1688 : if (byte != FORBIDDENBYTE)
1689 : fputs(" *** OUCH", stderr);
1690 : fputc('\n', stderr);
1691 : }
1692 : }
1693 :
1694 : serial = read_size_t(tail + SST);
1695 : fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
1696 : "u to debug malloc/realloc.\n", serial);
1697 :
1698 : if (nbytes > 0) {
1699 : i = 0;
1700 : fputs(" Data at p:", stderr);
1701 : /* print up to 8 bytes at the start */
1702 : while (q < tail && i < 8) {
1703 : fprintf(stderr, " %02x", *q);
1704 : ++i;
1705 : ++q;
1706 : }
1707 : /* and up to 8 at the end */
1708 : if (q < tail) {
1709 : if (tail - q > 8) {
1710 : fputs(" ...", stderr);
1711 : q = tail - 8;
1712 : }
1713 : while (q < tail) {
1714 : fprintf(stderr, " %02x", *q);
1715 : ++q;
1716 : }
1717 : }
1718 : fputc('\n', stderr);
1719 : }
1720 : }
1721 :
1722 : #endif /* PYMALLOC_DEBUG */
1723 :
1724 : static size_t
1725 0 : printone(FILE *out, const char* msg, size_t value)
1726 : {
1727 : int i, k;
1728 : char buf[100];
1729 0 : size_t origvalue = value;
1730 :
1731 0 : fputs(msg, out);
1732 0 : for (i = (int)strlen(msg); i < 35; ++i)
1733 0 : fputc(' ', out);
1734 0 : fputc('=', out);
1735 :
1736 : /* Write the value with commas. */
1737 0 : i = 22;
1738 0 : buf[i--] = '\0';
1739 0 : buf[i--] = '\n';
1740 0 : k = 3;
1741 : do {
1742 0 : size_t nextvalue = value / 10;
1743 0 : uint digit = (uint)(value - nextvalue * 10);
1744 0 : value = nextvalue;
1745 0 : buf[i--] = (char)(digit + '0');
1746 0 : --k;
1747 0 : if (k == 0 && value && i >= 0) {
1748 0 : k = 3;
1749 0 : buf[i--] = ',';
1750 : }
1751 0 : } while (value && i >= 0);
1752 :
1753 0 : while (i >= 0)
1754 0 : buf[i--] = ' ';
1755 0 : fputs(buf, out);
1756 :
1757 0 : return origvalue;
1758 : }
1759 :
1760 : void
1761 0 : _PyDebugAllocatorStats(FILE *out,
1762 : const char *block_name, int num_blocks, size_t sizeof_block)
1763 : {
1764 : char buf1[128];
1765 : char buf2[128];
1766 0 : PyOS_snprintf(buf1, sizeof(buf1),
1767 : "%d %ss * %zd bytes each",
1768 : num_blocks, block_name, sizeof_block);
1769 0 : PyOS_snprintf(buf2, sizeof(buf2),
1770 : "%48s ", buf1);
1771 0 : (void)printone(out, buf2, num_blocks * sizeof_block);
1772 0 : }
1773 :
1774 : #ifdef WITH_PYMALLOC
1775 :
1776 : /* Print summary info to "out" about the state of pymalloc's structures.
1777 : * In Py_DEBUG mode, also perform some expensive internal consistency
1778 : * checks.
1779 : */
1780 : void
1781 0 : _PyObject_DebugMallocStats(FILE *out)
1782 : {
1783 : uint i;
1784 0 : const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
1785 : /* # of pools, allocated blocks, and free blocks per class index */
1786 : size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1787 : size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1788 : size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1789 : /* total # of allocated bytes in used and full pools */
1790 0 : size_t allocated_bytes = 0;
1791 : /* total # of available bytes in used pools */
1792 0 : size_t available_bytes = 0;
1793 : /* # of free pools + pools not yet carved out of current arena */
1794 0 : uint numfreepools = 0;
1795 : /* # of bytes for arena alignment padding */
1796 0 : size_t arena_alignment = 0;
1797 : /* # of bytes in used and full pools used for pool_headers */
1798 0 : size_t pool_header_bytes = 0;
1799 : /* # of bytes in used and full pools wasted due to quantization,
1800 : * i.e. the necessarily leftover space at the ends of used and
1801 : * full pools.
1802 : */
1803 0 : size_t quantization = 0;
1804 : /* # of arenas actually allocated. */
1805 0 : size_t narenas = 0;
1806 : /* running total -- should equal narenas * ARENA_SIZE */
1807 : size_t total;
1808 : char buf[128];
1809 :
1810 0 : fprintf(out, "Small block threshold = %d, in %u size classes.\n",
1811 : SMALL_REQUEST_THRESHOLD, numclasses);
1812 :
1813 0 : for (i = 0; i < numclasses; ++i)
1814 0 : numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
1815 :
1816 : /* Because full pools aren't linked to from anything, it's easiest
1817 : * to march over all the arenas. If we're lucky, most of the memory
1818 : * will be living in full pools -- would be a shame to miss them.
1819 : */
1820 0 : for (i = 0; i < maxarenas; ++i) {
1821 : uint j;
1822 0 : uptr base = arenas[i].address;
1823 :
1824 : /* Skip arenas which are not allocated. */
1825 0 : if (arenas[i].address == (uptr)NULL)
1826 0 : continue;
1827 0 : narenas += 1;
1828 :
1829 0 : numfreepools += arenas[i].nfreepools;
1830 :
1831 : /* round up to pool alignment */
1832 0 : if (base & (uptr)POOL_SIZE_MASK) {
1833 0 : arena_alignment += POOL_SIZE;
1834 0 : base &= ~(uptr)POOL_SIZE_MASK;
1835 0 : base += POOL_SIZE;
1836 : }
1837 :
1838 : /* visit every pool in the arena */
1839 : assert(base <= (uptr) arenas[i].pool_address);
1840 0 : for (j = 0;
1841 0 : base < (uptr) arenas[i].pool_address;
1842 0 : ++j, base += POOL_SIZE) {
1843 0 : poolp p = (poolp)base;
1844 0 : const uint sz = p->szidx;
1845 : uint freeblocks;
1846 :
1847 0 : if (p->ref.count == 0) {
1848 : /* currently unused */
1849 : assert(pool_is_in_list(p, arenas[i].freepools));
1850 0 : continue;
1851 : }
1852 0 : ++numpools[sz];
1853 0 : numblocks[sz] += p->ref.count;
1854 0 : freeblocks = NUMBLOCKS(sz) - p->ref.count;
1855 0 : numfreeblocks[sz] += freeblocks;
1856 : #ifdef Py_DEBUG
1857 : if (freeblocks > 0)
1858 : assert(pool_is_in_list(p, usedpools[sz + sz]));
1859 : #endif
1860 : }
1861 : }
1862 : assert(narenas == narenas_currently_allocated);
1863 :
1864 0 : fputc('\n', out);
1865 0 : fputs("class size num pools blocks in use avail blocks\n"
1866 : "----- ---- --------- ------------- ------------\n",
1867 : out);
1868 :
1869 0 : for (i = 0; i < numclasses; ++i) {
1870 0 : size_t p = numpools[i];
1871 0 : size_t b = numblocks[i];
1872 0 : size_t f = numfreeblocks[i];
1873 0 : uint size = INDEX2SIZE(i);
1874 0 : if (p == 0) {
1875 : assert(b == 0 && f == 0);
1876 0 : continue;
1877 : }
1878 0 : fprintf(out, "%5u %6u "
1879 : "%11" PY_FORMAT_SIZE_T "u "
1880 : "%15" PY_FORMAT_SIZE_T "u "
1881 : "%13" PY_FORMAT_SIZE_T "u\n",
1882 : i, size, p, b, f);
1883 0 : allocated_bytes += b * size;
1884 0 : available_bytes += f * size;
1885 0 : pool_header_bytes += p * POOL_OVERHEAD;
1886 0 : quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
1887 : }
1888 0 : fputc('\n', out);
1889 : #ifdef PYMALLOC_DEBUG
1890 : (void)printone(out, "# times object malloc called", serialno);
1891 : #endif
1892 0 : (void)printone(out, "# arenas allocated total", ntimes_arena_allocated);
1893 0 : (void)printone(out, "# arenas reclaimed", ntimes_arena_allocated - narenas);
1894 0 : (void)printone(out, "# arenas highwater mark", narenas_highwater);
1895 0 : (void)printone(out, "# arenas allocated current", narenas);
1896 :
1897 0 : PyOS_snprintf(buf, sizeof(buf),
1898 : "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
1899 : narenas, ARENA_SIZE);
1900 0 : (void)printone(out, buf, narenas * ARENA_SIZE);
1901 :
1902 0 : fputc('\n', out);
1903 :
1904 0 : total = printone(out, "# bytes in allocated blocks", allocated_bytes);
1905 0 : total += printone(out, "# bytes in available blocks", available_bytes);
1906 :
1907 0 : PyOS_snprintf(buf, sizeof(buf),
1908 : "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
1909 0 : total += printone(out, buf, (size_t)numfreepools * POOL_SIZE);
1910 :
1911 0 : total += printone(out, "# bytes lost to pool headers", pool_header_bytes);
1912 0 : total += printone(out, "# bytes lost to quantization", quantization);
1913 0 : total += printone(out, "# bytes lost to arena alignment", arena_alignment);
1914 0 : (void)printone(out, "Total", total);
1915 0 : }
1916 :
1917 : #endif /* #ifdef WITH_PYMALLOC */
1918 :
1919 : #ifdef Py_USING_MEMORY_DEBUGGER
1920 : /* Make this function last so gcc won't inline it since the definition is
1921 : * after the reference.
1922 : */
1923 : int
1924 : Py_ADDRESS_IN_RANGE(void *P, poolp pool)
1925 : {
1926 : uint arenaindex_temp = pool->arenaindex;
1927 :
1928 : return arenaindex_temp < maxarenas &&
1929 : (uptr)P - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE &&
1930 : arenas[arenaindex_temp].address != 0;
1931 : }
1932 : #endif
|