blob: 3e4f9f36da2efe29d4cec4f159a47615e33c9f02 [file] [log] [blame]
Matthew Wilcox1366c372016-03-17 14:21:45 -07001#include <stdlib.h>
2#include <string.h>
3#include <malloc.h>
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -08004#include <pthread.h>
Matthew Wilcox1366c372016-03-17 14:21:45 -07005#include <unistd.h>
6#include <assert.h>
7
Matthew Wilcox12ea6532016-12-16 14:53:45 -05008#include <linux/gfp.h>
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -08009#include <linux/poison.h>
Matthew Wilcox1366c372016-03-17 14:21:45 -070010#include <linux/slab.h>
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080011#include <linux/radix-tree.h>
Matthew Wilcox1366c372016-03-17 14:21:45 -070012#include <urcu/uatomic.h>
13
14int nr_allocated;
Matthew Wilcox847d3572016-12-14 15:08:02 -080015int preempt_count;
Matthew Wilcox5eeb2d22016-12-24 07:49:18 -050016int kmalloc_verbose;
Matthew Wilcox1366c372016-03-17 14:21:45 -070017
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080018struct kmem_cache {
19 pthread_mutex_t lock;
20 int size;
21 int nr_objs;
22 void *objs;
23 void (*ctor)(void *);
24};
25
Matthew Wilcox1366c372016-03-17 14:21:45 -070026void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
27{
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080028 struct radix_tree_node *node;
Matthew Wilcox31023cd2016-12-14 15:07:59 -080029
30 if (flags & __GFP_NOWARN)
31 return NULL;
32
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080033 pthread_mutex_lock(&cachep->lock);
34 if (cachep->nr_objs) {
35 cachep->nr_objs--;
36 node = cachep->objs;
37 cachep->objs = node->private_data;
38 pthread_mutex_unlock(&cachep->lock);
39 node->private_data = NULL;
40 } else {
41 pthread_mutex_unlock(&cachep->lock);
42 node = malloc(cachep->size);
43 if (cachep->ctor)
44 cachep->ctor(node);
45 }
46
Matthew Wilcox1366c372016-03-17 14:21:45 -070047 uatomic_inc(&nr_allocated);
Matthew Wilcox5eeb2d22016-12-24 07:49:18 -050048 if (kmalloc_verbose)
49 printf("Allocating %p from slab\n", node);
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080050 return node;
Matthew Wilcox1366c372016-03-17 14:21:45 -070051}
52
53void kmem_cache_free(struct kmem_cache *cachep, void *objp)
54{
55 assert(objp);
56 uatomic_dec(&nr_allocated);
Matthew Wilcox5eeb2d22016-12-24 07:49:18 -050057 if (kmalloc_verbose)
58 printf("Freeing %p to slab\n", objp);
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080059 pthread_mutex_lock(&cachep->lock);
60 if (cachep->nr_objs > 10) {
61 memset(objp, POISON_FREE, cachep->size);
62 free(objp);
63 } else {
64 struct radix_tree_node *node = objp;
65 cachep->nr_objs++;
66 node->private_data = cachep->objs;
67 cachep->objs = node;
68 }
69 pthread_mutex_unlock(&cachep->lock);
Matthew Wilcox1366c372016-03-17 14:21:45 -070070}
71
Matthew Wilcoxde1af8f2016-12-14 15:09:25 -080072void *kmalloc(size_t size, gfp_t gfp)
73{
74 void *ret = malloc(size);
75 uatomic_inc(&nr_allocated);
Matthew Wilcox5eeb2d22016-12-24 07:49:18 -050076 if (kmalloc_verbose)
77 printf("Allocating %p from malloc\n", ret);
Matthew Wilcoxde1af8f2016-12-14 15:09:25 -080078 return ret;
79}
80
81void kfree(void *p)
82{
83 if (!p)
84 return;
85 uatomic_dec(&nr_allocated);
Matthew Wilcox5eeb2d22016-12-24 07:49:18 -050086 if (kmalloc_verbose)
87 printf("Freeing %p to malloc\n", p);
Matthew Wilcoxde1af8f2016-12-14 15:09:25 -080088 free(p);
89}
90
Matthew Wilcox1366c372016-03-17 14:21:45 -070091struct kmem_cache *
92kmem_cache_create(const char *name, size_t size, size_t offset,
93 unsigned long flags, void (*ctor)(void *))
94{
95 struct kmem_cache *ret = malloc(sizeof(*ret));
96
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080097 pthread_mutex_init(&ret->lock, NULL);
Matthew Wilcox1366c372016-03-17 14:21:45 -070098 ret->size = size;
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080099 ret->nr_objs = 0;
100 ret->objs = NULL;
Matthew Wilcox1366c372016-03-17 14:21:45 -0700101 ret->ctor = ctor;
102 return ret;
103}