blob: ba0edad2d048017bd08257f0c7b34eb9f86bf1ea [file] [log] [blame]
Jonthan Brassowf5db4af2009-06-22 10:12:35 +01001/*
2 * Copyright (C) 2006-2009 Red Hat, Inc.
3 *
4 * This file is released under the LGPL.
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <net/sock.h>
10#include <linux/workqueue.h>
11#include <linux/connector.h>
12#include <linux/device-mapper.h>
13#include <linux/dm-log-userspace.h>
14
15#include "dm-log-userspace-transfer.h"
16
17static uint32_t dm_ulog_seq;
18
19/*
20 * Netlink/Connector is an unreliable protocol. How long should
21 * we wait for a response before assuming it was lost and retrying?
22 * (If we do receive a response after this time, it will be discarded
23 * and the response to the resent request will be waited for.
24 */
25#define DM_ULOG_RETRY_TIMEOUT (15 * HZ)
26
27/*
28 * Pre-allocated space for speed
29 */
30#define DM_ULOG_PREALLOCED_SIZE 512
31static struct cn_msg *prealloced_cn_msg;
32static struct dm_ulog_request *prealloced_ulog_tfr;
33
34static struct cb_id ulog_cn_id = {
35 .idx = CN_IDX_DM,
36 .val = CN_VAL_DM_USERSPACE_LOG
37};
38
39static DEFINE_MUTEX(dm_ulog_lock);
40
41struct receiving_pkg {
42 struct list_head list;
43 struct completion complete;
44
45 uint32_t seq;
46
47 int error;
48 size_t *data_size;
49 char *data;
50};
51
52static DEFINE_SPINLOCK(receiving_list_lock);
53static struct list_head receiving_list;
54
55static int dm_ulog_sendto_server(struct dm_ulog_request *tfr)
56{
57 int r;
58 struct cn_msg *msg = prealloced_cn_msg;
59
60 memset(msg, 0, sizeof(struct cn_msg));
61
62 msg->id.idx = ulog_cn_id.idx;
63 msg->id.val = ulog_cn_id.val;
64 msg->ack = 0;
65 msg->seq = tfr->seq;
66 msg->len = sizeof(struct dm_ulog_request) + tfr->data_size;
67
68 r = cn_netlink_send(msg, 0, gfp_any());
69
70 return r;
71}
72
73/*
74 * Parameters for this function can be either msg or tfr, but not
75 * both. This function fills in the reply for a waiting request.
76 * If just msg is given, then the reply is simply an ACK from userspace
77 * that the request was received.
78 *
79 * Returns: 0 on success, -ENOENT on failure
80 */
81static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
82{
83 uint32_t rtn_seq = (msg) ? msg->seq : (tfr) ? tfr->seq : 0;
84 struct receiving_pkg *pkg;
85
86 /*
87 * The 'receiving_pkg' entries in this list are statically
88 * allocated on the stack in 'dm_consult_userspace'.
89 * Each process that is waiting for a reply from the user
90 * space server will have an entry in this list.
91 *
92 * We are safe to do it this way because the stack space
93 * is unique to each process, but still addressable by
94 * other processes.
95 */
96 list_for_each_entry(pkg, &receiving_list, list) {
97 if (rtn_seq != pkg->seq)
98 continue;
99
100 if (msg) {
101 pkg->error = -msg->ack;
102 /*
103 * If we are trying again, we will need to know our
104 * storage capacity. Otherwise, along with the
105 * error code, we make explicit that we have no data.
106 */
107 if (pkg->error != -EAGAIN)
108 *(pkg->data_size) = 0;
109 } else if (tfr->data_size > *(pkg->data_size)) {
110 DMERR("Insufficient space to receive package [%u] "
Randy Dunlap894ef822009-08-16 07:33:30 -0700111 "(%u vs %zu)", tfr->request_type,
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100112 tfr->data_size, *(pkg->data_size));
113
114 *(pkg->data_size) = 0;
115 pkg->error = -ENOSPC;
116 } else {
117 pkg->error = tfr->error;
118 memcpy(pkg->data, tfr->data, tfr->data_size);
119 *(pkg->data_size) = tfr->data_size;
120 }
121 complete(&pkg->complete);
122 return 0;
123 }
124
125 return -ENOENT;
126}
127
128/*
129 * This is the connector callback that delivers data
130 * that was sent from userspace.
131 */
132static void cn_ulog_callback(void *data)
133{
134 struct cn_msg *msg = (struct cn_msg *)data;
135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
136
137 spin_lock(&receiving_list_lock);
138 if (msg->len == 0)
139 fill_pkg(msg, NULL);
140 else if (msg->len < sizeof(*tfr))
141 DMERR("Incomplete message received (expected %u, got %u): [%u]",
142 (unsigned)sizeof(*tfr), msg->len, msg->seq);
143 else
144 fill_pkg(NULL, tfr);
145 spin_unlock(&receiving_list_lock);
146}
147
148/**
149 * dm_consult_userspace
Jonathan Brassow7ec23d52009-09-04 20:40:34 +0100150 * @uuid: log's universal unique identifier (must be DM_UUID_LEN in size)
151 * @luid: log's local unique identifier
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100152 * @request_type: found in include/linux/dm-log-userspace.h
153 * @data: data to tx to the server
154 * @data_size: size of data in bytes
155 * @rdata: place to put return data from server
156 * @rdata_size: value-result (amount of space given/amount of space used)
157 *
158 * rdata_size is undefined on failure.
159 *
160 * Memory used to communicate with userspace is zero'ed
161 * before populating to ensure that no unwanted bits leak
162 * from kernel space to user-space. All userspace log communications
163 * between kernel and user space go through this function.
164 *
165 * Returns: 0 on success, -EXXX on failure
166 **/
Jonathan Brassow7ec23d52009-09-04 20:40:34 +0100167int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100168 char *data, size_t data_size,
169 char *rdata, size_t *rdata_size)
170{
171 int r = 0;
172 size_t dummy = 0;
173 int overhead_size =
174 sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg);
175 struct dm_ulog_request *tfr = prealloced_ulog_tfr;
176 struct receiving_pkg pkg;
177
178 if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
179 DMINFO("Size of tfr exceeds preallocated size");
180 return -EINVAL;
181 }
182
183 if (!rdata_size)
184 rdata_size = &dummy;
185resend:
186 /*
187 * We serialize the sending of requests so we can
188 * use the preallocated space.
189 */
190 mutex_lock(&dm_ulog_lock);
191
192 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
193 memcpy(tfr->uuid, uuid, DM_UUID_LEN);
Jonathan Brassow7ec23d52009-09-04 20:40:34 +0100194 tfr->luid = luid;
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100195 tfr->seq = dm_ulog_seq++;
196
197 /*
198 * Must be valid request type (all other bits set to
199 * zero). This reserves other bits for possible future
200 * use.
201 */
202 tfr->request_type = request_type & DM_ULOG_REQUEST_MASK;
203
204 tfr->data_size = data_size;
205 if (data && data_size)
206 memcpy(tfr->data, data, data_size);
207
208 memset(&pkg, 0, sizeof(pkg));
209 init_completion(&pkg.complete);
210 pkg.seq = tfr->seq;
211 pkg.data_size = rdata_size;
212 pkg.data = rdata;
213 spin_lock(&receiving_list_lock);
214 list_add(&(pkg.list), &receiving_list);
215 spin_unlock(&receiving_list_lock);
216
217 r = dm_ulog_sendto_server(tfr);
218
219 mutex_unlock(&dm_ulog_lock);
220
221 if (r) {
222 DMERR("Unable to send log request [%u] to userspace: %d",
223 request_type, r);
224 spin_lock(&receiving_list_lock);
225 list_del_init(&(pkg.list));
226 spin_unlock(&receiving_list_lock);
227
228 goto out;
229 }
230
231 r = wait_for_completion_timeout(&(pkg.complete), DM_ULOG_RETRY_TIMEOUT);
232 spin_lock(&receiving_list_lock);
233 list_del_init(&(pkg.list));
234 spin_unlock(&receiving_list_lock);
235 if (!r) {
236 DMWARN("[%s] Request timed out: [%u/%u] - retrying",
237 (strlen(uuid) > 8) ?
238 (uuid + (strlen(uuid) - 8)) : (uuid),
239 request_type, pkg.seq);
240 goto resend;
241 }
242
243 r = pkg.error;
244 if (r == -EAGAIN)
245 goto resend;
246
247out:
248 return r;
249}
250
251int dm_ulog_tfr_init(void)
252{
253 int r;
254 void *prealloced;
255
256 INIT_LIST_HEAD(&receiving_list);
257
258 prealloced = kmalloc(DM_ULOG_PREALLOCED_SIZE, GFP_KERNEL);
259 if (!prealloced)
260 return -ENOMEM;
261
262 prealloced_cn_msg = prealloced;
263 prealloced_ulog_tfr = prealloced + sizeof(struct cn_msg);
264
265 r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback);
266 if (r) {
267 cn_del_callback(&ulog_cn_id);
268 return r;
269 }
270
271 return 0;
272}
273
274void dm_ulog_tfr_exit(void)
275{
276 cn_del_callback(&ulog_cn_id);
277 kfree(prealloced_cn_msg);
278}