blob: c27c11261c376a6e6eff4406a3380e6fa04ae843 [file] [log] [blame]
Matthew Wilcox1366c372016-03-17 14:21:45 -07001#include <stdlib.h>
2#include <string.h>
3#include <malloc.h>
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -08004#include <pthread.h>
Matthew Wilcox1366c372016-03-17 14:21:45 -07005#include <unistd.h>
6#include <assert.h>
7
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -08008#include <linux/poison.h>
Matthew Wilcox1366c372016-03-17 14:21:45 -07009#include <linux/slab.h>
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080010#include <linux/radix-tree.h>
Matthew Wilcox1366c372016-03-17 14:21:45 -070011#include <urcu/uatomic.h>
12
13int nr_allocated;
Matthew Wilcox847d3572016-12-14 15:08:02 -080014int preempt_count;
Matthew Wilcox1366c372016-03-17 14:21:45 -070015
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080016struct kmem_cache {
17 pthread_mutex_t lock;
18 int size;
19 int nr_objs;
20 void *objs;
21 void (*ctor)(void *);
22};
23
Matthew Wilcox1366c372016-03-17 14:21:45 -070024void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
25{
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080026 struct radix_tree_node *node;
Matthew Wilcox31023cd2016-12-14 15:07:59 -080027
28 if (flags & __GFP_NOWARN)
29 return NULL;
30
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080031 pthread_mutex_lock(&cachep->lock);
32 if (cachep->nr_objs) {
33 cachep->nr_objs--;
34 node = cachep->objs;
35 cachep->objs = node->private_data;
36 pthread_mutex_unlock(&cachep->lock);
37 node->private_data = NULL;
38 } else {
39 pthread_mutex_unlock(&cachep->lock);
40 node = malloc(cachep->size);
41 if (cachep->ctor)
42 cachep->ctor(node);
43 }
44
Matthew Wilcox1366c372016-03-17 14:21:45 -070045 uatomic_inc(&nr_allocated);
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080046 return node;
Matthew Wilcox1366c372016-03-17 14:21:45 -070047}
48
49void kmem_cache_free(struct kmem_cache *cachep, void *objp)
50{
51 assert(objp);
52 uatomic_dec(&nr_allocated);
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080053 pthread_mutex_lock(&cachep->lock);
54 if (cachep->nr_objs > 10) {
55 memset(objp, POISON_FREE, cachep->size);
56 free(objp);
57 } else {
58 struct radix_tree_node *node = objp;
59 cachep->nr_objs++;
60 node->private_data = cachep->objs;
61 cachep->objs = node;
62 }
63 pthread_mutex_unlock(&cachep->lock);
Matthew Wilcox1366c372016-03-17 14:21:45 -070064}
65
Matthew Wilcoxde1af8f2016-12-14 15:09:25 -080066void *kmalloc(size_t size, gfp_t gfp)
67{
68 void *ret = malloc(size);
69 uatomic_inc(&nr_allocated);
70 return ret;
71}
72
73void kfree(void *p)
74{
75 if (!p)
76 return;
77 uatomic_dec(&nr_allocated);
78 free(p);
79}
80
Matthew Wilcox1366c372016-03-17 14:21:45 -070081struct kmem_cache *
82kmem_cache_create(const char *name, size_t size, size_t offset,
83 unsigned long flags, void (*ctor)(void *))
84{
85 struct kmem_cache *ret = malloc(sizeof(*ret));
86
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080087 pthread_mutex_init(&ret->lock, NULL);
Matthew Wilcox1366c372016-03-17 14:21:45 -070088 ret->size = size;
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080089 ret->nr_objs = 0;
90 ret->objs = NULL;
Matthew Wilcox1366c372016-03-17 14:21:45 -070091 ret->ctor = ctor;
92 return ret;
93}