blob: b428c0ae63d5d1e3c173cd0afe9b04072f786415 [file] [log] [blame]
Jonthan Brassowf5db4af2009-06-22 10:12:35 +01001/*
2 * Copyright (C) 2006-2009 Red Hat, Inc.
3 *
4 * This file is released under the LGPL.
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Jonthan Brassowf5db4af2009-06-22 10:12:35 +010010#include <net/sock.h>
11#include <linux/workqueue.h>
12#include <linux/connector.h>
13#include <linux/device-mapper.h>
14#include <linux/dm-log-userspace.h>
15
16#include "dm-log-userspace-transfer.h"
17
18static uint32_t dm_ulog_seq;
19
20/*
21 * Netlink/Connector is an unreliable protocol. How long should
22 * we wait for a response before assuming it was lost and retrying?
23 * (If we do receive a response after this time, it will be discarded
24 * and the response to the resent request will be waited for.
25 */
26#define DM_ULOG_RETRY_TIMEOUT (15 * HZ)
27
28/*
29 * Pre-allocated space for speed
30 */
31#define DM_ULOG_PREALLOCED_SIZE 512
32static struct cn_msg *prealloced_cn_msg;
33static struct dm_ulog_request *prealloced_ulog_tfr;
34
35static struct cb_id ulog_cn_id = {
36 .idx = CN_IDX_DM,
37 .val = CN_VAL_DM_USERSPACE_LOG
38};
39
40static DEFINE_MUTEX(dm_ulog_lock);
41
42struct receiving_pkg {
43 struct list_head list;
44 struct completion complete;
45
46 uint32_t seq;
47
48 int error;
49 size_t *data_size;
50 char *data;
51};
52
53static DEFINE_SPINLOCK(receiving_list_lock);
54static struct list_head receiving_list;
55
56static int dm_ulog_sendto_server(struct dm_ulog_request *tfr)
57{
58 int r;
59 struct cn_msg *msg = prealloced_cn_msg;
60
61 memset(msg, 0, sizeof(struct cn_msg));
62
63 msg->id.idx = ulog_cn_id.idx;
64 msg->id.val = ulog_cn_id.val;
65 msg->ack = 0;
66 msg->seq = tfr->seq;
67 msg->len = sizeof(struct dm_ulog_request) + tfr->data_size;
68
David Friesac8f7332014-01-15 22:29:19 -060069 r = cn_netlink_send(msg, 0, 0, gfp_any());
Jonthan Brassowf5db4af2009-06-22 10:12:35 +010070
71 return r;
72}
73
74/*
75 * Parameters for this function can be either msg or tfr, but not
76 * both. This function fills in the reply for a waiting request.
77 * If just msg is given, then the reply is simply an ACK from userspace
78 * that the request was received.
79 *
80 * Returns: 0 on success, -ENOENT on failure
81 */
82static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
83{
84 uint32_t rtn_seq = (msg) ? msg->seq : (tfr) ? tfr->seq : 0;
85 struct receiving_pkg *pkg;
86
87 /*
88 * The 'receiving_pkg' entries in this list are statically
89 * allocated on the stack in 'dm_consult_userspace'.
90 * Each process that is waiting for a reply from the user
91 * space server will have an entry in this list.
92 *
93 * We are safe to do it this way because the stack space
94 * is unique to each process, but still addressable by
95 * other processes.
96 */
97 list_for_each_entry(pkg, &receiving_list, list) {
98 if (rtn_seq != pkg->seq)
99 continue;
100
101 if (msg) {
102 pkg->error = -msg->ack;
103 /*
104 * If we are trying again, we will need to know our
105 * storage capacity. Otherwise, along with the
106 * error code, we make explicit that we have no data.
107 */
108 if (pkg->error != -EAGAIN)
109 *(pkg->data_size) = 0;
110 } else if (tfr->data_size > *(pkg->data_size)) {
111 DMERR("Insufficient space to receive package [%u] "
Randy Dunlap894ef822009-08-16 07:33:30 -0700112 "(%u vs %zu)", tfr->request_type,
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100113 tfr->data_size, *(pkg->data_size));
114
115 *(pkg->data_size) = 0;
116 pkg->error = -ENOSPC;
117 } else {
118 pkg->error = tfr->error;
119 memcpy(pkg->data, tfr->data, tfr->data_size);
120 *(pkg->data_size) = tfr->data_size;
121 }
122 complete(&pkg->complete);
123 return 0;
124 }
125
126 return -ENOENT;
127}
128
129/*
130 * This is the connector callback that delivers data
131 * that was sent from userspace.
132 */
Philipp Reisner18366b02009-10-02 02:40:06 +0000133static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100134{
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
136
Eric W. Biederman38bf1952012-05-04 11:34:03 +0000137 if (!capable(CAP_SYS_ADMIN))
Philipp Reisner24836472009-10-02 02:40:08 +0000138 return;
139
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100140 spin_lock(&receiving_list_lock);
141 if (msg->len == 0)
142 fill_pkg(msg, NULL);
143 else if (msg->len < sizeof(*tfr))
144 DMERR("Incomplete message received (expected %u, got %u): [%u]",
145 (unsigned)sizeof(*tfr), msg->len, msg->seq);
146 else
147 fill_pkg(NULL, tfr);
148 spin_unlock(&receiving_list_lock);
149}
150
151/**
152 * dm_consult_userspace
Jonathan Brassow7ec23d52009-09-04 20:40:34 +0100153 * @uuid: log's universal unique identifier (must be DM_UUID_LEN in size)
154 * @luid: log's local unique identifier
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100155 * @request_type: found in include/linux/dm-log-userspace.h
156 * @data: data to tx to the server
157 * @data_size: size of data in bytes
158 * @rdata: place to put return data from server
159 * @rdata_size: value-result (amount of space given/amount of space used)
160 *
161 * rdata_size is undefined on failure.
162 *
163 * Memory used to communicate with userspace is zero'ed
164 * before populating to ensure that no unwanted bits leak
165 * from kernel space to user-space. All userspace log communications
166 * between kernel and user space go through this function.
167 *
168 * Returns: 0 on success, -EXXX on failure
169 **/
Jonathan Brassow7ec23d52009-09-04 20:40:34 +0100170int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100171 char *data, size_t data_size,
172 char *rdata, size_t *rdata_size)
173{
174 int r = 0;
175 size_t dummy = 0;
Jonathan Brassowebfd32b2010-02-16 18:42:53 +0000176 int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100177 struct dm_ulog_request *tfr = prealloced_ulog_tfr;
178 struct receiving_pkg pkg;
179
Jonathan Brassowebfd32b2010-02-16 18:42:53 +0000180 /*
181 * Given the space needed to hold the 'struct cn_msg' and
182 * 'struct dm_ulog_request' - do we have enough payload
183 * space remaining?
184 */
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100185 if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
186 DMINFO("Size of tfr exceeds preallocated size");
187 return -EINVAL;
188 }
189
190 if (!rdata_size)
191 rdata_size = &dummy;
192resend:
193 /*
194 * We serialize the sending of requests so we can
195 * use the preallocated space.
196 */
197 mutex_lock(&dm_ulog_lock);
198
Jonathan Brassowebfd32b2010-02-16 18:42:53 +0000199 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100200 memcpy(tfr->uuid, uuid, DM_UUID_LEN);
Jonathan Brassow86a54a482011-01-13 19:59:52 +0000201 tfr->version = DM_ULOG_REQUEST_VERSION;
Jonathan Brassow7ec23d52009-09-04 20:40:34 +0100202 tfr->luid = luid;
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100203 tfr->seq = dm_ulog_seq++;
204
205 /*
206 * Must be valid request type (all other bits set to
207 * zero). This reserves other bits for possible future
208 * use.
209 */
210 tfr->request_type = request_type & DM_ULOG_REQUEST_MASK;
211
212 tfr->data_size = data_size;
213 if (data && data_size)
214 memcpy(tfr->data, data, data_size);
215
216 memset(&pkg, 0, sizeof(pkg));
217 init_completion(&pkg.complete);
218 pkg.seq = tfr->seq;
219 pkg.data_size = rdata_size;
220 pkg.data = rdata;
221 spin_lock(&receiving_list_lock);
222 list_add(&(pkg.list), &receiving_list);
223 spin_unlock(&receiving_list_lock);
224
225 r = dm_ulog_sendto_server(tfr);
226
227 mutex_unlock(&dm_ulog_lock);
228
229 if (r) {
230 DMERR("Unable to send log request [%u] to userspace: %d",
231 request_type, r);
232 spin_lock(&receiving_list_lock);
233 list_del_init(&(pkg.list));
234 spin_unlock(&receiving_list_lock);
235
236 goto out;
237 }
238
239 r = wait_for_completion_timeout(&(pkg.complete), DM_ULOG_RETRY_TIMEOUT);
240 spin_lock(&receiving_list_lock);
241 list_del_init(&(pkg.list));
242 spin_unlock(&receiving_list_lock);
243 if (!r) {
244 DMWARN("[%s] Request timed out: [%u/%u] - retrying",
245 (strlen(uuid) > 8) ?
246 (uuid + (strlen(uuid) - 8)) : (uuid),
247 request_type, pkg.seq);
248 goto resend;
249 }
250
251 r = pkg.error;
252 if (r == -EAGAIN)
253 goto resend;
254
255out:
256 return r;
257}
258
259int dm_ulog_tfr_init(void)
260{
261 int r;
262 void *prealloced;
263
264 INIT_LIST_HEAD(&receiving_list);
265
266 prealloced = kmalloc(DM_ULOG_PREALLOCED_SIZE, GFP_KERNEL);
267 if (!prealloced)
268 return -ENOMEM;
269
270 prealloced_cn_msg = prealloced;
271 prealloced_ulog_tfr = prealloced + sizeof(struct cn_msg);
272
273 r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback);
274 if (r) {
275 cn_del_callback(&ulog_cn_id);
276 return r;
277 }
278
279 return 0;
280}
281
282void dm_ulog_tfr_exit(void)
283{
284 cn_del_callback(&ulog_cn_id);
285 kfree(prealloced_cn_msg);
286}