blob: 832622cb61921f045c89feb9b684617c330acbbf [file] [log] [blame]
#ifndef Py_CPYTHON_OBJIMPL_H
# error "this header file must not be included directly"
#endif
#ifdef __cplusplus
extern "C" {
#endif
#define _PyObject_SIZE(typeobj) ( (typeobj)->tp_basicsize )
/* _PyObject_VAR_SIZE returns the number of bytes (as size_t) allocated for a
vrbl-size object with nitems items, exclusive of gc overhead (if any). The
value is rounded up to the closest multiple of sizeof(void *), in order to
ensure that pointer fields at the end of the object are correctly aligned
for the platform (this is of special importance for subclasses of, e.g.,
str or int, so that pointers can be stored after the embedded data).
Note that there's no memory wastage in doing this, as malloc has to
return (at worst) pointer-aligned memory anyway.
*/
#if ((SIZEOF_VOID_P - 1) & SIZEOF_VOID_P) != 0
# error "_PyObject_VAR_SIZE requires SIZEOF_VOID_P be a power of 2"
#endif
#define _PyObject_VAR_SIZE(typeobj, nitems) \
_Py_SIZE_ROUND_UP((typeobj)->tp_basicsize + \
(nitems)*(typeobj)->tp_itemsize, \
SIZEOF_VOID_P)
/* This example code implements an object constructor with a custom
allocator, where PyObject_New is inlined, and shows the important
distinction between two steps (at least):
1) the actual allocation of the object storage;
2) the initialization of the Python specific fields
in this storage with PyObject_{Init, InitVar}.
PyObject *
YourObject_New(...)
{
PyObject *op;
op = (PyObject *) Your_Allocator(_PyObject_SIZE(YourTypeStruct));
if (op == NULL)
return PyErr_NoMemory();
PyObject_Init(op, &YourTypeStruct);
op->ob_field = value;
...
return op;
}
Note that in C++, the use of the new operator usually implies that
the 1st step is performed automatically for you, so in a C++ class
constructor you would start directly with PyObject_Init/InitVar. */
/* Inline functions trading binary compatibility for speed:
PyObject_INIT() is the fast version of PyObject_Init(), and
PyObject_INIT_VAR() is the fast version of PyObject_InitVar().
These inline functions must not be called with op=NULL. */
static inline PyObject*
_PyObject_INIT(PyObject *op, PyTypeObject *typeobj)
{
assert(op != NULL);
Py_SET_TYPE(op, typeobj);
if (PyType_GetFlags(typeobj) & Py_TPFLAGS_HEAPTYPE) {
Py_INCREF(typeobj);
}
_Py_NewReference(op);
return op;
}
#define PyObject_INIT(op, typeobj) \
_PyObject_INIT(_PyObject_CAST(op), (typeobj))
static inline PyVarObject*
_PyObject_INIT_VAR(PyVarObject *op, PyTypeObject *typeobj, Py_ssize_t size)
{
assert(op != NULL);
Py_SET_SIZE(op, size);
PyObject_INIT((PyObject *)op, typeobj);
return op;
}
#define PyObject_INIT_VAR(op, typeobj, size) \
_PyObject_INIT_VAR(_PyVarObject_CAST(op), (typeobj), (size))
/* This function returns the number of allocated memory blocks, regardless of size */
PyAPI_FUNC(Py_ssize_t) _Py_GetAllocatedBlocks(void);
/* Macros */
#ifdef WITH_PYMALLOC
PyAPI_FUNC(int) _PyObject_DebugMallocStats(FILE *out);
#endif
typedef struct {
/* user context passed as the first argument to the 2 functions */
void *ctx;
/* allocate an arena of size bytes */
void* (*alloc) (void *ctx, size_t size);
/* free an arena */
void (*free) (void *ctx, void *ptr, size_t size);
} PyObjectArenaAllocator;
/* Get the arena allocator. */
PyAPI_FUNC(void) PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator);
/* Set the arena allocator. */
PyAPI_FUNC(void) PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator);
PyAPI_FUNC(Py_ssize_t) _PyGC_CollectNoFail(void);
PyAPI_FUNC(Py_ssize_t) _PyGC_CollectIfEnabled(void);
/* Test if an object has a GC head */
#define PyObject_IS_GC(o) \
(PyType_IS_GC(Py_TYPE(o)) \
&& (Py_TYPE(o)->tp_is_gc == NULL || Py_TYPE(o)->tp_is_gc(o)))
/* GC information is stored BEFORE the object structure. */
typedef struct {
// Pointer to next object in the list.
// 0 means the object is not tracked
uintptr_t _gc_next;
// Pointer to previous object in the list.
// Lowest two bits are used for flags documented later.
uintptr_t _gc_prev;
} PyGC_Head;
#define _Py_AS_GC(o) ((PyGC_Head *)(o)-1)
/* True if the object is currently tracked by the GC. */
#define _PyObject_GC_IS_TRACKED(o) (_Py_AS_GC(o)->_gc_next != 0)
/* True if the object may be tracked by the GC in the future, or already is.
This can be useful to implement some optimizations. */
#define _PyObject_GC_MAY_BE_TRACKED(obj) \
(PyObject_IS_GC(obj) && \
(!PyTuple_CheckExact(obj) || _PyObject_GC_IS_TRACKED(obj)))
/* Bit flags for _gc_prev */
/* Bit 0 is set when tp_finalize is called */
#define _PyGC_PREV_MASK_FINALIZED (1)
/* Bit 1 is set when the object is in generation which is GCed currently. */
#define _PyGC_PREV_MASK_COLLECTING (2)
/* The (N-2) most significant bits contain the real address. */
#define _PyGC_PREV_SHIFT (2)
#define _PyGC_PREV_MASK (((uintptr_t) -1) << _PyGC_PREV_SHIFT)
// Lowest bit of _gc_next is used for flags only in GC.
// But it is always 0 for normal code.
#define _PyGCHead_NEXT(g) ((PyGC_Head*)(g)->_gc_next)
#define _PyGCHead_SET_NEXT(g, p) ((g)->_gc_next = (uintptr_t)(p))
// Lowest two bits of _gc_prev is used for _PyGC_PREV_MASK_* flags.
#define _PyGCHead_PREV(g) ((PyGC_Head*)((g)->_gc_prev & _PyGC_PREV_MASK))
#define _PyGCHead_SET_PREV(g, p) do { \
assert(((uintptr_t)p & ~_PyGC_PREV_MASK) == 0); \
(g)->_gc_prev = ((g)->_gc_prev & ~_PyGC_PREV_MASK) \
| ((uintptr_t)(p)); \
} while (0)
#define _PyGCHead_FINALIZED(g) \
(((g)->_gc_prev & _PyGC_PREV_MASK_FINALIZED) != 0)
#define _PyGCHead_SET_FINALIZED(g) \
((g)->_gc_prev |= _PyGC_PREV_MASK_FINALIZED)
#define _PyGC_FINALIZED(o) \
_PyGCHead_FINALIZED(_Py_AS_GC(o))
#define _PyGC_SET_FINALIZED(o) \
_PyGCHead_SET_FINALIZED(_Py_AS_GC(o))
PyAPI_FUNC(PyObject *) _PyObject_GC_Malloc(size_t size);
PyAPI_FUNC(PyObject *) _PyObject_GC_Calloc(size_t size);
/* Test if a type supports weak references */
#define PyType_SUPPORTS_WEAKREFS(t) ((t)->tp_weaklistoffset > 0)
PyAPI_FUNC(PyObject **) PyObject_GET_WEAKREFS_LISTPTR(PyObject *op);
#ifdef __cplusplus
}
#endif