blob: 3670e8dd03fe383dc6570fef16bbffa2784d61e8 [file] [log] [blame]
Alex Eldere2a58ee2013-04-30 00:44:33 -05001
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
Yehuda Sadehdfc56062010-11-19 14:51:04 -080025 For usage instructions, please refer to:
Yehuda Sadeh602adf42010-08-12 16:11:25 -070026
Yehuda Sadehdfc56062010-11-19 14:51:04 -080027 Documentation/ABI/testing/sysfs-bus-rbd
Yehuda Sadeh602adf42010-08-12 16:11:25 -070028
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
Ilya Dryomoved95b212016-08-12 16:40:02 +020034#include <linux/ceph/cls_lock_client.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070035#include <linux/ceph/decode.h>
Yehuda Sadeh59c2be12011-03-21 15:10:11 -070036#include <linux/parser.h>
Alex Elder30d1cff2013-05-01 12:43:03 -050037#include <linux/bsearch.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070038
39#include <linux/kernel.h>
40#include <linux/device.h>
41#include <linux/module.h>
Christoph Hellwig7ad18af2015-01-13 17:20:04 +010042#include <linux/blk-mq.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070043#include <linux/fs.h>
44#include <linux/blkdev.h>
Alex Elder1c2a9df2013-05-01 12:43:03 -050045#include <linux/slab.h>
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +020046#include <linux/idr.h>
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +040047#include <linux/workqueue.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070048
49#include "rbd_types.h"
50
Alex Elderaafb2302012-09-06 16:00:54 -050051#define RBD_DEBUG /* Activate rbd_assert() calls */
52
Alex Elder593a9e72012-02-07 12:03:37 -060053/*
54 * The basic unit of block I/O is a sector. It is interpreted in a
55 * number of contexts in Linux (blk, bio, genhd), but the default is
56 * universally 512 bytes. These symbols are just slightly more
57 * meaningful than the bare numbers they represent.
58 */
59#define SECTOR_SHIFT 9
60#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
61
Alex Eldera2acd002013-05-08 22:50:04 -050062/*
63 * Increment the given counter and return its updated value.
64 * If the counter is already 0 it will not be incremented.
65 * If the counter is already at its maximum value returns
66 * -EINVAL without updating it.
67 */
68static int atomic_inc_return_safe(atomic_t *v)
69{
70 unsigned int counter;
71
72 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
73 if (counter <= (unsigned int)INT_MAX)
74 return (int)counter;
75
76 atomic_dec(v);
77
78 return -EINVAL;
79}
80
81/* Decrement the counter. Return the resulting value, or -EINVAL */
82static int atomic_dec_return_safe(atomic_t *v)
83{
84 int counter;
85
86 counter = atomic_dec_return(v);
87 if (counter >= 0)
88 return counter;
89
90 atomic_inc(v);
91
92 return -EINVAL;
93}
94
Alex Elderf0f8cef2012-01-29 13:57:44 -060095#define RBD_DRV_NAME "rbd"
Yehuda Sadeh602adf42010-08-12 16:11:25 -070096
Ilya Dryomov7e513d42013-12-16 19:26:32 +020097#define RBD_MINORS_PER_MAJOR 256
98#define RBD_SINGLE_MAJOR_PART_SHIFT 4
Yehuda Sadeh602adf42010-08-12 16:11:25 -070099
Ilya Dryomov6d69bb532015-10-11 19:38:00 +0200100#define RBD_MAX_PARENT_CHAIN_LEN 16
101
Alex Elderd4b125e2012-07-03 16:01:19 -0500102#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
103#define RBD_MAX_SNAP_NAME_LEN \
104 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
105
Alex Elder35d489f2012-07-03 16:01:19 -0500106#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700107
108#define RBD_SNAP_HEAD_NAME "-"
109
Alex Elder9682fc62013-04-30 00:44:33 -0500110#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
111
Alex Elder9e15b772012-10-30 19:40:33 -0500112/* This allows a single page to hold an image name sent by OSD */
113#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
Alex Elder1e130192012-07-03 16:01:19 -0500114#define RBD_IMAGE_ID_LEN_MAX 64
Alex Elder9e15b772012-10-30 19:40:33 -0500115
Alex Elder1e130192012-07-03 16:01:19 -0500116#define RBD_OBJ_PREFIX_LEN_MAX 64
Alex Elder589d30e2012-07-10 20:30:11 -0500117
Ilya Dryomoved95b212016-08-12 16:40:02 +0200118#define RBD_NOTIFY_TIMEOUT 5 /* seconds */
Ilya Dryomov99d16942016-08-12 16:11:41 +0200119#define RBD_RETRY_DELAY msecs_to_jiffies(1000)
120
Alex Elderd8891402012-10-09 13:50:17 -0700121/* Feature bits */
122
Ilya Dryomov8767b292017-03-02 19:56:57 +0100123#define RBD_FEATURE_LAYERING (1ULL<<0)
124#define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
125#define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
126#define RBD_FEATURE_DATA_POOL (1ULL<<7)
127
Ilya Dryomoved95b212016-08-12 16:40:02 +0200128#define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
129 RBD_FEATURE_STRIPINGV2 | \
Ilya Dryomov7e973322017-01-25 18:16:22 +0100130 RBD_FEATURE_EXCLUSIVE_LOCK | \
131 RBD_FEATURE_DATA_POOL)
Alex Elderd8891402012-10-09 13:50:17 -0700132
133/* Features supported by this (client software) implementation. */
134
Alex Elder770eba62012-10-25 23:34:40 -0500135#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
Alex Elderd8891402012-10-09 13:50:17 -0700136
Alex Elder81a89792012-02-02 08:13:30 -0600137/*
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
Alex Elder81a89792012-02-02 08:13:30 -0600140 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700141#define DEV_NAME_LEN 32
142
143/*
144 * block device image metadata (in-memory version)
145 */
146struct rbd_image_header {
Alex Elderf35a4de2013-05-06 09:51:29 -0500147 /* These six fields never change for a given rbd image */
Alex Elder849b4262012-07-09 21:04:24 -0500148 char *object_prefix;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700149 __u8 obj_order;
Alex Elderf35a4de2013-05-06 09:51:29 -0500150 u64 stripe_unit;
151 u64 stripe_count;
Ilya Dryomov7e973322017-01-25 18:16:22 +0100152 s64 data_pool_id;
Alex Elderf35a4de2013-05-06 09:51:29 -0500153 u64 features; /* Might be changeable someday? */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700154
Alex Elderf84344f2012-08-31 17:29:51 -0500155 /* The remaining fields need to be updated occasionally */
156 u64 image_size;
157 struct ceph_snap_context *snapc;
Alex Elderf35a4de2013-05-06 09:51:29 -0500158 char *snap_names; /* format 1 only */
159 u64 *snap_sizes; /* format 1 only */
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700160};
161
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500162/*
163 * An rbd image specification.
164 *
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
Alex Elderc66c6e02012-11-01 08:39:26 -0500166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
168 *
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
173 *
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
179 *
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
183 *
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500186 */
187struct rbd_spec {
188 u64 pool_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500189 const char *pool_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500190
Alex Elderecb4dc22013-04-26 09:43:47 -0500191 const char *image_id;
192 const char *image_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500193
194 u64 snap_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500195 const char *snap_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500196
197 struct kref kref;
198};
199
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700200/*
Alex Elderf0f8cef2012-01-29 13:57:44 -0600201 * an instance of the client. multiple devices may share an rbd client.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700202 */
203struct rbd_client {
204 struct ceph_client *client;
205 struct kref kref;
206 struct list_head node;
207};
208
Alex Elderbf0d5f502012-11-22 00:00:08 -0600209struct rbd_img_request;
210typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
211
212#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
213
214struct rbd_obj_request;
215typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
216
Alex Elder9969ebc2013-01-18 12:31:10 -0600217enum obj_request_type {
218 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
219};
Alex Elderbf0d5f502012-11-22 00:00:08 -0600220
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800221enum obj_operation_type {
222 OBJ_OP_WRITE,
223 OBJ_OP_READ,
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800224 OBJ_OP_DISCARD,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800225};
226
Alex Elder926f9b32013-02-11 12:33:24 -0600227enum obj_req_flags {
228 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
Alex Elder6365d332013-02-11 12:33:24 -0600229 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
Alex Elder5679c592013-02-11 12:33:24 -0600230 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
231 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
Alex Elder926f9b32013-02-11 12:33:24 -0600232};
233
Alex Elderbf0d5f502012-11-22 00:00:08 -0600234struct rbd_obj_request {
Ilya Dryomova90bb0c2017-01-25 18:16:23 +0100235 u64 object_no;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600236 u64 offset; /* object start byte */
237 u64 length; /* bytes from offset */
Alex Elder926f9b32013-02-11 12:33:24 -0600238 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600239
Alex Elderc5b5ef62013-02-11 12:33:24 -0600240 /*
241 * An object request associated with an image will have its
242 * img_data flag set; a standalone object request will not.
243 *
244 * A standalone object request will have which == BAD_WHICH
245 * and a null obj_request pointer.
246 *
247 * An object request initiated in support of a layered image
248 * object (to check for its existence before a write) will
249 * have which == BAD_WHICH and a non-null obj_request pointer.
250 *
251 * Finally, an object request for rbd image data will have
252 * which != BAD_WHICH, and will have a non-null img_request
253 * pointer. The value of which will be in the range
254 * 0..(img_request->obj_request_count-1).
255 */
256 union {
257 struct rbd_obj_request *obj_request; /* STAT op */
258 struct {
259 struct rbd_img_request *img_request;
260 u64 img_offset;
261 /* links for img_request->obj_requests list */
262 struct list_head links;
263 };
264 };
Alex Elderbf0d5f502012-11-22 00:00:08 -0600265 u32 which; /* posn image request list */
266
267 enum obj_request_type type;
Alex Elder788e2df2013-01-17 12:25:27 -0600268 union {
269 struct bio *bio_list;
270 struct {
271 struct page **pages;
272 u32 page_count;
273 };
274 };
Alex Elder0eefd472013-04-19 15:34:50 -0500275 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500276 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600277
278 struct ceph_osd_request *osd_req;
279
280 u64 xferred; /* bytes transferred */
Sage Weil1b83bef2013-02-25 16:11:12 -0800281 int result;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600282
283 rbd_obj_callback_t callback;
Alex Elder788e2df2013-01-17 12:25:27 -0600284 struct completion completion;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600285
286 struct kref kref;
287};
288
Alex Elder0c425242013-02-08 09:55:49 -0600289enum img_req_flags {
Alex Elder9849e982013-01-24 16:13:36 -0600290 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
291 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
Alex Elderd0b2e942013-01-24 16:13:36 -0600292 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800293 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
Alex Elder0c425242013-02-08 09:55:49 -0600294};
295
Alex Elderbf0d5f502012-11-22 00:00:08 -0600296struct rbd_img_request {
Alex Elderbf0d5f502012-11-22 00:00:08 -0600297 struct rbd_device *rbd_dev;
298 u64 offset; /* starting image byte offset */
299 u64 length; /* byte count from offset */
Alex Elder0c425242013-02-08 09:55:49 -0600300 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600301 union {
Alex Elder9849e982013-01-24 16:13:36 -0600302 u64 snap_id; /* for reads */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600303 struct ceph_snap_context *snapc; /* for writes */
Alex Elder9849e982013-01-24 16:13:36 -0600304 };
305 union {
306 struct request *rq; /* block request */
307 struct rbd_obj_request *obj_request; /* obj req initiator */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600308 };
Alex Elder3d7efd12013-04-19 15:34:50 -0500309 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500310 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600311 spinlock_t completion_lock;/* protects next_completion */
312 u32 next_completion;
313 rbd_img_callback_t callback;
Alex Elder55f27e02013-04-10 12:34:25 -0500314 u64 xferred;/* aggregate bytes transferred */
Alex Eldera5a337d2013-01-24 16:13:36 -0600315 int result; /* first nonzero obj_request result */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600316
317 u32 obj_request_count;
318 struct list_head obj_requests; /* rbd_obj_request structs */
319
320 struct kref kref;
321};
322
323#define for_each_obj_request(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600324 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600325#define for_each_obj_request_from(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600326 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600327#define for_each_obj_request_safe(ireq, oreq, n) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600328 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600329
Ilya Dryomov99d16942016-08-12 16:11:41 +0200330enum rbd_watch_state {
331 RBD_WATCH_STATE_UNREGISTERED,
332 RBD_WATCH_STATE_REGISTERED,
333 RBD_WATCH_STATE_ERROR,
334};
335
Ilya Dryomoved95b212016-08-12 16:40:02 +0200336enum rbd_lock_state {
337 RBD_LOCK_STATE_UNLOCKED,
338 RBD_LOCK_STATE_LOCKED,
339 RBD_LOCK_STATE_RELEASING,
340};
341
342/* WatchNotify::ClientId */
343struct rbd_client_id {
344 u64 gid;
345 u64 handle;
346};
347
Alex Elderf84344f2012-08-31 17:29:51 -0500348struct rbd_mapping {
Alex Elder99c1f082012-08-30 14:42:15 -0500349 u64 size;
Alex Elder34b13182012-07-13 20:35:12 -0500350 u64 features;
Alex Elderf84344f2012-08-31 17:29:51 -0500351 bool read_only;
352};
353
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700354/*
355 * a single device
356 */
357struct rbd_device {
Alex Elderde71a292012-07-03 16:01:19 -0500358 int dev_id; /* blkdev unique id */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700359
360 int major; /* blkdev assigned major */
Ilya Dryomovdd82fff2013-12-13 15:28:57 +0200361 int minor;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700362 struct gendisk *disk; /* blkdev's gendisk and rq */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700363
Alex Eldera30b71b2012-07-10 20:30:11 -0500364 u32 image_format; /* Either 1 or 2 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700365 struct rbd_client *rbd_client;
366
367 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
368
Alex Elderb82d1672013-01-14 12:43:31 -0600369 spinlock_t lock; /* queue, flags, open_count */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700370
371 struct rbd_image_header header;
Alex Elderb82d1672013-01-14 12:43:31 -0600372 unsigned long flags; /* possibly lock protected */
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500373 struct rbd_spec *spec;
Ilya Dryomovd1475432015-06-22 13:24:48 +0300374 struct rbd_options *opts;
Mike Christie0d6d1e9c2016-08-18 18:38:45 +0200375 char *config_info; /* add{,_single_major} string */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700376
Ilya Dryomovc41d13a2016-04-29 20:01:25 +0200377 struct ceph_object_id header_oid;
Ilya Dryomov922dab62016-05-26 01:15:02 +0200378 struct ceph_object_locator header_oloc;
Alex Elder971f8392012-10-25 23:34:41 -0500379
Ilya Dryomov1643dfa2016-08-12 15:45:52 +0200380 struct ceph_file_layout layout; /* used for all rbd requests */
Alex Elder0903e872012-11-14 12:25:19 -0600381
Ilya Dryomov99d16942016-08-12 16:11:41 +0200382 struct mutex watch_mutex;
383 enum rbd_watch_state watch_state;
Ilya Dryomov922dab62016-05-26 01:15:02 +0200384 struct ceph_osd_linger_request *watch_handle;
Ilya Dryomov99d16942016-08-12 16:11:41 +0200385 u64 watch_cookie;
386 struct delayed_work watch_dwork;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700387
Ilya Dryomoved95b212016-08-12 16:40:02 +0200388 struct rw_semaphore lock_rwsem;
389 enum rbd_lock_state lock_state;
390 struct rbd_client_id owner_cid;
391 struct work_struct acquired_lock_work;
392 struct work_struct released_lock_work;
393 struct delayed_work lock_dwork;
394 struct work_struct unlock_work;
395 wait_queue_head_t lock_waitq;
396
Ilya Dryomov1643dfa2016-08-12 15:45:52 +0200397 struct workqueue_struct *task_wq;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700398
Alex Elder86b00e02012-10-25 23:34:42 -0500399 struct rbd_spec *parent_spec;
400 u64 parent_overlap;
Alex Eldera2acd002013-05-08 22:50:04 -0500401 atomic_t parent_ref;
Alex Elder2f82ee52012-10-30 19:40:33 -0500402 struct rbd_device *parent;
Alex Elder86b00e02012-10-25 23:34:42 -0500403
Christoph Hellwig7ad18af2015-01-13 17:20:04 +0100404 /* Block layer tags. */
405 struct blk_mq_tag_set tag_set;
406
Josh Durginc6666012011-11-21 17:11:12 -0800407 /* protects updating the header */
408 struct rw_semaphore header_rwsem;
Alex Elderf84344f2012-08-31 17:29:51 -0500409
410 struct rbd_mapping mapping;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700411
412 struct list_head node;
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800413
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800414 /* sysfs related */
415 struct device dev;
Alex Elderb82d1672013-01-14 12:43:31 -0600416 unsigned long open_count; /* protected by lock */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800417};
418
Alex Elderb82d1672013-01-14 12:43:31 -0600419/*
Ilya Dryomov87c0fde2016-09-29 13:41:05 +0200420 * Flag bits for rbd_dev->flags:
421 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
422 * by rbd_dev->lock
423 * - BLACKLISTED is protected by rbd_dev->lock_rwsem
Alex Elderb82d1672013-01-14 12:43:31 -0600424 */
Alex Elder6d292902013-01-14 12:43:31 -0600425enum rbd_dev_flags {
426 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
Alex Elderb82d1672013-01-14 12:43:31 -0600427 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
Ilya Dryomov87c0fde2016-09-29 13:41:05 +0200428 RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
Alex Elder6d292902013-01-14 12:43:31 -0600429};
430
Alex Eldercfbf6372013-05-31 17:40:45 -0500431static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
Alex Eldere124a82f2012-01-29 13:57:44 -0600432
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700433static LIST_HEAD(rbd_dev_list); /* devices */
Alex Eldere124a82f2012-01-29 13:57:44 -0600434static DEFINE_SPINLOCK(rbd_dev_list_lock);
435
Alex Elder432b8582012-01-29 13:57:44 -0600436static LIST_HEAD(rbd_client_list); /* clients */
437static DEFINE_SPINLOCK(rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700438
Alex Elder78c2a442013-05-01 12:43:04 -0500439/* Slab caches for frequently-allocated structures */
440
Alex Elder1c2a9df2013-05-01 12:43:03 -0500441static struct kmem_cache *rbd_img_request_cache;
Alex Elder868311b2013-05-01 12:43:03 -0500442static struct kmem_cache *rbd_obj_request_cache;
Alex Elder1c2a9df2013-05-01 12:43:03 -0500443
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200444static int rbd_major;
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +0200445static DEFINE_IDA(rbd_dev_id_ida);
446
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +0400447static struct workqueue_struct *rbd_wq;
448
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200449/*
450 * Default to false for now, as single-major requires >= 0.75 version of
451 * userspace rbd utility.
452 */
453static bool single_major = false;
454module_param(single_major, bool, S_IRUGO);
455MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
456
Alex Elder3d7efd12013-04-19 15:34:50 -0500457static int rbd_img_request_submit(struct rbd_img_request *img_request);
458
Alex Elderf0f8cef2012-01-29 13:57:44 -0600459static ssize_t rbd_add(struct bus_type *bus, const char *buf,
460 size_t count);
461static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
462 size_t count);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200463static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
464 size_t count);
465static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
466 size_t count);
Ilya Dryomov6d69bb532015-10-11 19:38:00 +0200467static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
Alex Eldera2acd002013-05-08 22:50:04 -0500468static void rbd_spec_put(struct rbd_spec *spec);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600469
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200470static int rbd_dev_id_to_minor(int dev_id)
471{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200472 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200473}
474
475static int minor_to_rbd_dev_id(int minor)
476{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200477 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200478}
479
Ilya Dryomoved95b212016-08-12 16:40:02 +0200480static bool rbd_is_lock_supported(struct rbd_device *rbd_dev)
481{
482 return (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
483 rbd_dev->spec->snap_id == CEPH_NOSNAP &&
484 !rbd_dev->mapping.read_only;
485}
486
487static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
488{
489 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
490 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
491}
492
493static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
494{
495 bool is_lock_owner;
496
497 down_read(&rbd_dev->lock_rwsem);
498 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
499 up_read(&rbd_dev->lock_rwsem);
500 return is_lock_owner;
501}
502
Ilya Dryomov8767b292017-03-02 19:56:57 +0100503static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
504{
505 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
506}
507
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700508static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
509static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200510static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
511static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
Ilya Dryomov8767b292017-03-02 19:56:57 +0100512static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL);
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700513
514static struct attribute *rbd_bus_attrs[] = {
515 &bus_attr_add.attr,
516 &bus_attr_remove.attr,
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200517 &bus_attr_add_single_major.attr,
518 &bus_attr_remove_single_major.attr,
Ilya Dryomov8767b292017-03-02 19:56:57 +0100519 &bus_attr_supported_features.attr,
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700520 NULL,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600521};
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200522
523static umode_t rbd_bus_is_visible(struct kobject *kobj,
524 struct attribute *attr, int index)
525{
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200526 if (!single_major &&
527 (attr == &bus_attr_add_single_major.attr ||
528 attr == &bus_attr_remove_single_major.attr))
529 return 0;
530
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200531 return attr->mode;
532}
533
534static const struct attribute_group rbd_bus_group = {
535 .attrs = rbd_bus_attrs,
536 .is_visible = rbd_bus_is_visible,
537};
538__ATTRIBUTE_GROUPS(rbd_bus);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600539
540static struct bus_type rbd_bus_type = {
541 .name = "rbd",
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700542 .bus_groups = rbd_bus_groups,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600543};
544
545static void rbd_root_dev_release(struct device *dev)
546{
547}
548
549static struct device rbd_root_dev = {
550 .init_name = "rbd",
551 .release = rbd_root_dev_release,
552};
553
Alex Elder06ecc6c2012-11-01 10:17:15 -0500554static __printf(2, 3)
555void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
556{
557 struct va_format vaf;
558 va_list args;
559
560 va_start(args, fmt);
561 vaf.fmt = fmt;
562 vaf.va = &args;
563
564 if (!rbd_dev)
565 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
566 else if (rbd_dev->disk)
567 printk(KERN_WARNING "%s: %s: %pV\n",
568 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
569 else if (rbd_dev->spec && rbd_dev->spec->image_name)
570 printk(KERN_WARNING "%s: image %s: %pV\n",
571 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
572 else if (rbd_dev->spec && rbd_dev->spec->image_id)
573 printk(KERN_WARNING "%s: id %s: %pV\n",
574 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
575 else /* punt */
576 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
577 RBD_DRV_NAME, rbd_dev, &vaf);
578 va_end(args);
579}
580
Alex Elderaafb2302012-09-06 16:00:54 -0500581#ifdef RBD_DEBUG
582#define rbd_assert(expr) \
583 if (unlikely(!(expr))) { \
584 printk(KERN_ERR "\nAssertion failure in %s() " \
585 "at line %d:\n\n" \
586 "\trbd_assert(%s);\n\n", \
587 __func__, __LINE__, #expr); \
588 BUG(); \
589 }
590#else /* !RBD_DEBUG */
591# define rbd_assert(expr) ((void) 0)
592#endif /* !RBD_DEBUG */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800593
Ilya Dryomov27617132015-07-16 17:36:11 +0300594static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
Alex Elderb454e362013-04-19 15:34:50 -0500595static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
Alex Elder05a46af2013-04-26 15:44:36 -0500596static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
597static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
Alex Elder8b3e1a52013-01-24 16:13:36 -0600598
Alex Eldercc4a38bd2013-04-30 00:44:33 -0500599static int rbd_dev_refresh(struct rbd_device *rbd_dev);
Alex Elder2df3fac2013-05-06 09:51:30 -0500600static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
Ilya Dryomova720ae02014-07-23 17:11:19 +0400601static int rbd_dev_header_info(struct rbd_device *rbd_dev);
Ilya Dryomove8f59b52014-07-24 10:42:13 +0400602static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
Alex Elder54cac612013-04-30 00:44:33 -0500603static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
604 u64 snap_id);
Alex Elder2ad3d712013-04-30 00:44:33 -0500605static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
606 u8 *order, u64 *snap_size);
607static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
608 u64 *snap_features);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700609
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700610static int rbd_open(struct block_device *bdev, fmode_t mode)
611{
Alex Elderf0f8cef2012-01-29 13:57:44 -0600612 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600613 bool removing = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700614
Alex Elderf84344f2012-08-31 17:29:51 -0500615 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700616 return -EROFS;
617
Alex Eldera14ea262013-02-05 13:23:12 -0600618 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600619 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
620 removing = true;
621 else
622 rbd_dev->open_count++;
Alex Eldera14ea262013-02-05 13:23:12 -0600623 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600624 if (removing)
625 return -ENOENT;
626
Alex Elderc3e946c2012-11-16 09:29:16 -0600627 (void) get_device(&rbd_dev->dev);
Alex Elder340c7a22012-08-10 13:12:07 -0700628
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700629 return 0;
630}
631
Al Virodb2a1442013-05-05 21:52:57 -0400632static void rbd_release(struct gendisk *disk, fmode_t mode)
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800633{
634 struct rbd_device *rbd_dev = disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600635 unsigned long open_count_before;
636
Alex Eldera14ea262013-02-05 13:23:12 -0600637 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600638 open_count_before = rbd_dev->open_count--;
Alex Eldera14ea262013-02-05 13:23:12 -0600639 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600640 rbd_assert(open_count_before > 0);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800641
Alex Elderc3e946c2012-11-16 09:29:16 -0600642 put_device(&rbd_dev->dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800643}
644
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800645static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
646{
Josh Durgin77f33c02013-09-30 17:09:54 -0700647 int ret = 0;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800648 int val;
649 bool ro;
Josh Durgin77f33c02013-09-30 17:09:54 -0700650 bool ro_changed = false;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800651
Josh Durgin77f33c02013-09-30 17:09:54 -0700652 /* get_user() may sleep, so call it before taking rbd_dev->lock */
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800653 if (get_user(val, (int __user *)(arg)))
654 return -EFAULT;
655
656 ro = val ? true : false;
657 /* Snapshot doesn't allow to write*/
658 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
659 return -EROFS;
660
Josh Durgin77f33c02013-09-30 17:09:54 -0700661 spin_lock_irq(&rbd_dev->lock);
662 /* prevent others open this device */
663 if (rbd_dev->open_count > 1) {
664 ret = -EBUSY;
665 goto out;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800666 }
667
Josh Durgin77f33c02013-09-30 17:09:54 -0700668 if (rbd_dev->mapping.read_only != ro) {
669 rbd_dev->mapping.read_only = ro;
670 ro_changed = true;
671 }
672
673out:
674 spin_unlock_irq(&rbd_dev->lock);
675 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
676 if (ret == 0 && ro_changed)
677 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
678
679 return ret;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800680}
681
682static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
683 unsigned int cmd, unsigned long arg)
684{
685 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
686 int ret = 0;
687
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800688 switch (cmd) {
689 case BLKROSET:
690 ret = rbd_ioctl_set_ro(rbd_dev, arg);
691 break;
692 default:
693 ret = -ENOTTY;
694 }
695
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800696 return ret;
697}
698
699#ifdef CONFIG_COMPAT
700static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
701 unsigned int cmd, unsigned long arg)
702{
703 return rbd_ioctl(bdev, mode, cmd, arg);
704}
705#endif /* CONFIG_COMPAT */
706
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700707static const struct block_device_operations rbd_bd_ops = {
708 .owner = THIS_MODULE,
709 .open = rbd_open,
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800710 .release = rbd_release,
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800711 .ioctl = rbd_ioctl,
712#ifdef CONFIG_COMPAT
713 .compat_ioctl = rbd_compat_ioctl,
714#endif
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700715};
716
717/*
Alex Elder7262cfc2013-05-16 15:04:20 -0500718 * Initialize an rbd client instance. Success or not, this function
Alex Eldercfbf6372013-05-31 17:40:45 -0500719 * consumes ceph_opts. Caller holds client_mutex.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700720 */
Alex Elderf8c38922012-08-10 13:12:07 -0700721static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700722{
723 struct rbd_client *rbdc;
724 int ret = -ENOMEM;
725
Alex Elder37206ee2013-02-20 17:32:08 -0600726 dout("%s:\n", __func__);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700727 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
728 if (!rbdc)
729 goto out_opt;
730
731 kref_init(&rbdc->kref);
732 INIT_LIST_HEAD(&rbdc->node);
733
Alex Elder43ae4702012-07-03 16:01:18 -0500734 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700735 if (IS_ERR(rbdc->client))
Alex Elder08f75462013-05-29 11:19:00 -0500736 goto out_rbdc;
Alex Elder43ae4702012-07-03 16:01:18 -0500737 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700738
739 ret = ceph_open_session(rbdc->client);
740 if (ret < 0)
Alex Elder08f75462013-05-29 11:19:00 -0500741 goto out_client;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700742
Alex Elder432b8582012-01-29 13:57:44 -0600743 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700744 list_add_tail(&rbdc->node, &rbd_client_list);
Alex Elder432b8582012-01-29 13:57:44 -0600745 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700746
Alex Elder37206ee2013-02-20 17:32:08 -0600747 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Elderbc534d82012-01-29 13:57:44 -0600748
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700749 return rbdc;
Alex Elder08f75462013-05-29 11:19:00 -0500750out_client:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700751 ceph_destroy_client(rbdc->client);
Alex Elder08f75462013-05-29 11:19:00 -0500752out_rbdc:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700753 kfree(rbdc);
754out_opt:
Alex Elder43ae4702012-07-03 16:01:18 -0500755 if (ceph_opts)
756 ceph_destroy_options(ceph_opts);
Alex Elder37206ee2013-02-20 17:32:08 -0600757 dout("%s: error %d\n", __func__, ret);
758
Vasiliy Kulikov28f259b2010-09-26 12:59:37 +0400759 return ERR_PTR(ret);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700760}
761
Alex Elder2f82ee52012-10-30 19:40:33 -0500762static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
763{
764 kref_get(&rbdc->kref);
765
766 return rbdc;
767}
768
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700769/*
Alex Elder1f7ba332012-08-10 13:12:07 -0700770 * Find a ceph client with specific addr and configuration. If
771 * found, bump its reference count.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700772 */
Alex Elder1f7ba332012-08-10 13:12:07 -0700773static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700774{
775 struct rbd_client *client_node;
Alex Elder1f7ba332012-08-10 13:12:07 -0700776 bool found = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700777
Alex Elder43ae4702012-07-03 16:01:18 -0500778 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700779 return NULL;
780
Alex Elder1f7ba332012-08-10 13:12:07 -0700781 spin_lock(&rbd_client_list_lock);
782 list_for_each_entry(client_node, &rbd_client_list, node) {
783 if (!ceph_compare_options(ceph_opts, client_node->client)) {
Alex Elder2f82ee52012-10-30 19:40:33 -0500784 __rbd_get_client(client_node);
785
Alex Elder1f7ba332012-08-10 13:12:07 -0700786 found = true;
787 break;
788 }
789 }
790 spin_unlock(&rbd_client_list_lock);
791
792 return found ? client_node : NULL;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700793}
794
795/*
Ilya Dryomov210c1042015-06-22 13:24:48 +0300796 * (Per device) rbd map options
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700797 */
798enum {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300799 Opt_queue_depth,
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700800 Opt_last_int,
801 /* int args above */
802 Opt_last_string,
803 /* string args above */
Alex Eldercc0538b2012-08-10 13:12:07 -0700804 Opt_read_only,
805 Opt_read_write,
Ilya Dryomov80de1912016-09-20 14:23:17 +0200806 Opt_lock_on_read,
Ilya Dryomov210c1042015-06-22 13:24:48 +0300807 Opt_err
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700808};
809
Alex Elder43ae4702012-07-03 16:01:18 -0500810static match_table_t rbd_opts_tokens = {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300811 {Opt_queue_depth, "queue_depth=%d"},
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700812 /* int args above */
813 /* string args above */
Alex Elderbe466c12012-10-22 11:31:26 -0500814 {Opt_read_only, "read_only"},
Alex Eldercc0538b2012-08-10 13:12:07 -0700815 {Opt_read_only, "ro"}, /* Alternate spelling */
816 {Opt_read_write, "read_write"},
817 {Opt_read_write, "rw"}, /* Alternate spelling */
Ilya Dryomov80de1912016-09-20 14:23:17 +0200818 {Opt_lock_on_read, "lock_on_read"},
Ilya Dryomov210c1042015-06-22 13:24:48 +0300819 {Opt_err, NULL}
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700820};
821
Alex Elder98571b52013-01-20 14:44:42 -0600822struct rbd_options {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300823 int queue_depth;
Alex Elder98571b52013-01-20 14:44:42 -0600824 bool read_only;
Ilya Dryomov80de1912016-09-20 14:23:17 +0200825 bool lock_on_read;
Alex Elder98571b52013-01-20 14:44:42 -0600826};
827
Ilya Dryomovb5584182015-06-23 16:21:19 +0300828#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
Alex Elder98571b52013-01-20 14:44:42 -0600829#define RBD_READ_ONLY_DEFAULT false
Ilya Dryomov80de1912016-09-20 14:23:17 +0200830#define RBD_LOCK_ON_READ_DEFAULT false
Alex Elder98571b52013-01-20 14:44:42 -0600831
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700832static int parse_rbd_opts_token(char *c, void *private)
833{
Alex Elder43ae4702012-07-03 16:01:18 -0500834 struct rbd_options *rbd_opts = private;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700835 substring_t argstr[MAX_OPT_ARGS];
836 int token, intval, ret;
837
Alex Elder43ae4702012-07-03 16:01:18 -0500838 token = match_token(c, rbd_opts_tokens, argstr);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700839 if (token < Opt_last_int) {
840 ret = match_int(&argstr[0], &intval);
841 if (ret < 0) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300842 pr_err("bad mount option arg (not int) at '%s'\n", c);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700843 return ret;
844 }
845 dout("got int token %d val %d\n", token, intval);
846 } else if (token > Opt_last_int && token < Opt_last_string) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300847 dout("got string token %d val %s\n", token, argstr[0].from);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700848 } else {
849 dout("got token %d\n", token);
850 }
851
852 switch (token) {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300853 case Opt_queue_depth:
854 if (intval < 1) {
855 pr_err("queue_depth out of range\n");
856 return -EINVAL;
857 }
858 rbd_opts->queue_depth = intval;
859 break;
Alex Eldercc0538b2012-08-10 13:12:07 -0700860 case Opt_read_only:
861 rbd_opts->read_only = true;
862 break;
863 case Opt_read_write:
864 rbd_opts->read_only = false;
865 break;
Ilya Dryomov80de1912016-09-20 14:23:17 +0200866 case Opt_lock_on_read:
867 rbd_opts->lock_on_read = true;
868 break;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700869 default:
Ilya Dryomov210c1042015-06-22 13:24:48 +0300870 /* libceph prints "bad option" msg */
871 return -EINVAL;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700872 }
Ilya Dryomov210c1042015-06-22 13:24:48 +0300873
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700874 return 0;
875}
876
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800877static char* obj_op_name(enum obj_operation_type op_type)
878{
879 switch (op_type) {
880 case OBJ_OP_READ:
881 return "read";
882 case OBJ_OP_WRITE:
883 return "write";
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800884 case OBJ_OP_DISCARD:
885 return "discard";
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800886 default:
887 return "???";
888 }
889}
890
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700891/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700892 * Get a ceph client with specific addr and configuration, if one does
Alex Elder7262cfc2013-05-16 15:04:20 -0500893 * not exist create it. Either way, ceph_opts is consumed by this
894 * function.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700895 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500896static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700897{
Alex Elderf8c38922012-08-10 13:12:07 -0700898 struct rbd_client *rbdc;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700899
Alex Eldercfbf6372013-05-31 17:40:45 -0500900 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
Alex Elder1f7ba332012-08-10 13:12:07 -0700901 rbdc = rbd_client_find(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500902 if (rbdc) /* using an existing client */
Alex Elder43ae4702012-07-03 16:01:18 -0500903 ceph_destroy_options(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500904 else
Alex Elderf8c38922012-08-10 13:12:07 -0700905 rbdc = rbd_client_create(ceph_opts);
Alex Eldercfbf6372013-05-31 17:40:45 -0500906 mutex_unlock(&client_mutex);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700907
Alex Elder9d3997f2012-10-25 23:34:42 -0500908 return rbdc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700909}
910
911/*
912 * Destroy ceph client
Alex Elderd23a4b32012-01-29 13:57:43 -0600913 *
Alex Elder432b8582012-01-29 13:57:44 -0600914 * Caller must hold rbd_client_list_lock.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700915 */
916static void rbd_client_release(struct kref *kref)
917{
918 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
919
Alex Elder37206ee2013-02-20 17:32:08 -0600920 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500921 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700922 list_del(&rbdc->node);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500923 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700924
925 ceph_destroy_client(rbdc->client);
926 kfree(rbdc);
927}
928
929/*
930 * Drop reference to ceph client node. If it's not referenced anymore, release
931 * it.
932 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500933static void rbd_put_client(struct rbd_client *rbdc)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700934{
Alex Elderc53d5892012-10-25 23:34:42 -0500935 if (rbdc)
936 kref_put(&rbdc->kref, rbd_client_release);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700937}
938
Alex Eldera30b71b2012-07-10 20:30:11 -0500939static bool rbd_image_format_valid(u32 image_format)
940{
941 return image_format == 1 || image_format == 2;
942}
943
Alex Elder8e94af82012-07-25 09:32:40 -0500944static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
945{
Alex Elder103a1502012-08-02 11:29:45 -0500946 size_t size;
947 u32 snap_count;
948
949 /* The header has to start with the magic rbd header text */
950 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
951 return false;
952
Alex Elderdb2388b2012-10-20 22:17:27 -0500953 /* The bio layer requires at least sector-sized I/O */
954
955 if (ondisk->options.order < SECTOR_SHIFT)
956 return false;
957
958 /* If we use u64 in a few spots we may be able to loosen this */
959
960 if (ondisk->options.order > 8 * sizeof (int) - 1)
961 return false;
962
Alex Elder103a1502012-08-02 11:29:45 -0500963 /*
964 * The size of a snapshot header has to fit in a size_t, and
965 * that limits the number of snapshots.
966 */
967 snap_count = le32_to_cpu(ondisk->snap_count);
968 size = SIZE_MAX - sizeof (struct ceph_snap_context);
969 if (snap_count > size / sizeof (__le64))
970 return false;
971
972 /*
973 * Not only that, but the size of the entire the snapshot
974 * header must also be representable in a size_t.
975 */
976 size -= snap_count * sizeof (__le64);
977 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
978 return false;
979
980 return true;
Alex Elder8e94af82012-07-25 09:32:40 -0500981}
982
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700983/*
Ilya Dryomov5bc3fb12017-01-25 18:16:22 +0100984 * returns the size of an object in the image
985 */
986static u32 rbd_obj_bytes(struct rbd_image_header *header)
987{
988 return 1U << header->obj_order;
989}
990
Ilya Dryomov263423f2017-01-25 18:16:22 +0100991static void rbd_init_layout(struct rbd_device *rbd_dev)
992{
993 if (rbd_dev->header.stripe_unit == 0 ||
994 rbd_dev->header.stripe_count == 0) {
995 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
996 rbd_dev->header.stripe_count = 1;
997 }
998
999 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1000 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1001 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
Ilya Dryomov7e973322017-01-25 18:16:22 +01001002 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1003 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
Ilya Dryomov263423f2017-01-25 18:16:22 +01001004 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1005}
1006
Ilya Dryomov5bc3fb12017-01-25 18:16:22 +01001007/*
Alex Elderbb23e372013-05-06 09:51:29 -05001008 * Fill an rbd image header with information from the given format 1
1009 * on-disk header.
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001010 */
Alex Elder662518b2013-05-06 09:51:29 -05001011static int rbd_header_from_disk(struct rbd_device *rbd_dev,
Alex Elder4156d992012-08-02 11:29:46 -05001012 struct rbd_image_header_ondisk *ondisk)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001013{
Alex Elder662518b2013-05-06 09:51:29 -05001014 struct rbd_image_header *header = &rbd_dev->header;
Alex Elderbb23e372013-05-06 09:51:29 -05001015 bool first_time = header->object_prefix == NULL;
1016 struct ceph_snap_context *snapc;
1017 char *object_prefix = NULL;
1018 char *snap_names = NULL;
1019 u64 *snap_sizes = NULL;
Alex Elderccece232012-07-10 20:30:10 -05001020 u32 snap_count;
Alex Elderbb23e372013-05-06 09:51:29 -05001021 int ret = -ENOMEM;
Alex Elder621901d2012-08-23 23:22:06 -05001022 u32 i;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001023
Alex Elderbb23e372013-05-06 09:51:29 -05001024 /* Allocate this now to avoid having to handle failure below */
1025
1026 if (first_time) {
Ilya Dryomov848d7962017-01-25 18:16:21 +01001027 object_prefix = kstrndup(ondisk->object_prefix,
1028 sizeof(ondisk->object_prefix),
1029 GFP_KERNEL);
Alex Elderbb23e372013-05-06 09:51:29 -05001030 if (!object_prefix)
1031 return -ENOMEM;
Alex Elderbb23e372013-05-06 09:51:29 -05001032 }
1033
1034 /* Allocate the snapshot context and fill it in */
Alex Elder6a523252012-07-19 17:12:59 -05001035
Alex Elder103a1502012-08-02 11:29:45 -05001036 snap_count = le32_to_cpu(ondisk->snap_count);
Alex Elderbb23e372013-05-06 09:51:29 -05001037 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1038 if (!snapc)
1039 goto out_err;
1040 snapc->seq = le64_to_cpu(ondisk->snap_seq);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001041 if (snap_count) {
Alex Elderbb23e372013-05-06 09:51:29 -05001042 struct rbd_image_snap_ondisk *snaps;
Alex Elderf785cc12012-08-23 23:22:06 -05001043 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1044
Alex Elderbb23e372013-05-06 09:51:29 -05001045 /* We'll keep a copy of the snapshot names... */
Alex Elder621901d2012-08-23 23:22:06 -05001046
Alex Elderbb23e372013-05-06 09:51:29 -05001047 if (snap_names_len > (u64)SIZE_MAX)
1048 goto out_2big;
1049 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1050 if (!snap_names)
Alex Elder6a523252012-07-19 17:12:59 -05001051 goto out_err;
Alex Elderbb23e372013-05-06 09:51:29 -05001052
1053 /* ...as well as the array of their sizes. */
Markus Elfring88a25a52016-09-11 12:21:25 +02001054 snap_sizes = kmalloc_array(snap_count,
1055 sizeof(*header->snap_sizes),
1056 GFP_KERNEL);
Alex Elderbb23e372013-05-06 09:51:29 -05001057 if (!snap_sizes)
1058 goto out_err;
1059
Alex Elderf785cc12012-08-23 23:22:06 -05001060 /*
Alex Elderbb23e372013-05-06 09:51:29 -05001061 * Copy the names, and fill in each snapshot's id
1062 * and size.
1063 *
Alex Elder99a41eb2013-05-06 09:51:30 -05001064 * Note that rbd_dev_v1_header_info() guarantees the
Alex Elderbb23e372013-05-06 09:51:29 -05001065 * ondisk buffer we're working with has
Alex Elderf785cc12012-08-23 23:22:06 -05001066 * snap_names_len bytes beyond the end of the
1067 * snapshot id array, this memcpy() is safe.
1068 */
Alex Elderbb23e372013-05-06 09:51:29 -05001069 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1070 snaps = ondisk->snaps;
1071 for (i = 0; i < snap_count; i++) {
1072 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1073 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1074 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001075 }
Alex Elder849b4262012-07-09 21:04:24 -05001076
Alex Elderbb23e372013-05-06 09:51:29 -05001077 /* We won't fail any more, fill in the header */
Alex Elder6a523252012-07-19 17:12:59 -05001078
Alex Elderbb23e372013-05-06 09:51:29 -05001079 if (first_time) {
1080 header->object_prefix = object_prefix;
1081 header->obj_order = ondisk->options.order;
Ilya Dryomov263423f2017-01-25 18:16:22 +01001082 rbd_init_layout(rbd_dev);
Alex Elder662518b2013-05-06 09:51:29 -05001083 } else {
1084 ceph_put_snap_context(header->snapc);
1085 kfree(header->snap_names);
1086 kfree(header->snap_sizes);
Alex Elderbb23e372013-05-06 09:51:29 -05001087 }
1088
1089 /* The remaining fields always get updated (when we refresh) */
Alex Elder621901d2012-08-23 23:22:06 -05001090
Alex Elderf84344f2012-08-31 17:29:51 -05001091 header->image_size = le64_to_cpu(ondisk->image_size);
Alex Elderbb23e372013-05-06 09:51:29 -05001092 header->snapc = snapc;
1093 header->snap_names = snap_names;
1094 header->snap_sizes = snap_sizes;
Alex Elder468521c2013-04-26 09:43:47 -05001095
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001096 return 0;
Alex Elderbb23e372013-05-06 09:51:29 -05001097out_2big:
1098 ret = -EIO;
Alex Elder6a523252012-07-19 17:12:59 -05001099out_err:
Alex Elderbb23e372013-05-06 09:51:29 -05001100 kfree(snap_sizes);
1101 kfree(snap_names);
1102 ceph_put_snap_context(snapc);
1103 kfree(object_prefix);
Alex Elderccece232012-07-10 20:30:10 -05001104
Alex Elderbb23e372013-05-06 09:51:29 -05001105 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001106}
1107
Alex Elder9682fc62013-04-30 00:44:33 -05001108static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1109{
1110 const char *snap_name;
1111
1112 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1113
1114 /* Skip over names until we find the one we are looking for */
1115
1116 snap_name = rbd_dev->header.snap_names;
1117 while (which--)
1118 snap_name += strlen(snap_name) + 1;
1119
1120 return kstrdup(snap_name, GFP_KERNEL);
1121}
1122
Alex Elder30d1cff2013-05-01 12:43:03 -05001123/*
1124 * Snapshot id comparison function for use with qsort()/bsearch().
1125 * Note that result is for snapshots in *descending* order.
1126 */
1127static int snapid_compare_reverse(const void *s1, const void *s2)
1128{
1129 u64 snap_id1 = *(u64 *)s1;
1130 u64 snap_id2 = *(u64 *)s2;
1131
1132 if (snap_id1 < snap_id2)
1133 return 1;
1134 return snap_id1 == snap_id2 ? 0 : -1;
1135}
1136
1137/*
1138 * Search a snapshot context to see if the given snapshot id is
1139 * present.
1140 *
1141 * Returns the position of the snapshot id in the array if it's found,
1142 * or BAD_SNAP_INDEX otherwise.
1143 *
1144 * Note: The snapshot array is in kept sorted (by the osd) in
1145 * reverse order, highest snapshot id first.
1146 */
Alex Elder9682fc62013-04-30 00:44:33 -05001147static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1148{
1149 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
Alex Elder30d1cff2013-05-01 12:43:03 -05001150 u64 *found;
Alex Elder9682fc62013-04-30 00:44:33 -05001151
Alex Elder30d1cff2013-05-01 12:43:03 -05001152 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1153 sizeof (snap_id), snapid_compare_reverse);
Alex Elder9682fc62013-04-30 00:44:33 -05001154
Alex Elder30d1cff2013-05-01 12:43:03 -05001155 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
Alex Elder9682fc62013-04-30 00:44:33 -05001156}
1157
Alex Elder2ad3d712013-04-30 00:44:33 -05001158static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1159 u64 snap_id)
Alex Elder54cac612013-04-30 00:44:33 -05001160{
1161 u32 which;
Josh Durginda6a6b62013-09-04 17:57:31 -07001162 const char *snap_name;
Alex Elder54cac612013-04-30 00:44:33 -05001163
1164 which = rbd_dev_snap_index(rbd_dev, snap_id);
1165 if (which == BAD_SNAP_INDEX)
Josh Durginda6a6b62013-09-04 17:57:31 -07001166 return ERR_PTR(-ENOENT);
Alex Elder54cac612013-04-30 00:44:33 -05001167
Josh Durginda6a6b62013-09-04 17:57:31 -07001168 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1169 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
Alex Elder54cac612013-04-30 00:44:33 -05001170}
1171
Alex Elder9e15b772012-10-30 19:40:33 -05001172static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1173{
Alex Elder9e15b772012-10-30 19:40:33 -05001174 if (snap_id == CEPH_NOSNAP)
1175 return RBD_SNAP_HEAD_NAME;
1176
Alex Elder54cac612013-04-30 00:44:33 -05001177 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1178 if (rbd_dev->image_format == 1)
1179 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001180
Alex Elder54cac612013-04-30 00:44:33 -05001181 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001182}
1183
Alex Elder2ad3d712013-04-30 00:44:33 -05001184static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1185 u64 *snap_size)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001186{
Alex Elder2ad3d712013-04-30 00:44:33 -05001187 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1188 if (snap_id == CEPH_NOSNAP) {
1189 *snap_size = rbd_dev->header.image_size;
1190 } else if (rbd_dev->image_format == 1) {
1191 u32 which;
Alex Elder00f1f362012-02-07 12:03:36 -06001192
Alex Elder2ad3d712013-04-30 00:44:33 -05001193 which = rbd_dev_snap_index(rbd_dev, snap_id);
1194 if (which == BAD_SNAP_INDEX)
1195 return -ENOENT;
Alex Elder00f1f362012-02-07 12:03:36 -06001196
Alex Elder2ad3d712013-04-30 00:44:33 -05001197 *snap_size = rbd_dev->header.snap_sizes[which];
1198 } else {
1199 u64 size = 0;
1200 int ret;
1201
1202 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1203 if (ret)
1204 return ret;
1205
1206 *snap_size = size;
1207 }
1208 return 0;
1209}
1210
1211static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1212 u64 *snap_features)
1213{
1214 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1215 if (snap_id == CEPH_NOSNAP) {
1216 *snap_features = rbd_dev->header.features;
1217 } else if (rbd_dev->image_format == 1) {
1218 *snap_features = 0; /* No features for format 1 */
1219 } else {
1220 u64 features = 0;
1221 int ret;
1222
1223 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1224 if (ret)
1225 return ret;
1226
1227 *snap_features = features;
1228 }
1229 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001230}
1231
Alex Elderd1cf5782013-04-27 09:59:30 -05001232static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001233{
Alex Elder8f4b7d92013-05-06 07:40:30 -05001234 u64 snap_id = rbd_dev->spec->snap_id;
Alex Elder2ad3d712013-04-30 00:44:33 -05001235 u64 size = 0;
1236 u64 features = 0;
1237 int ret;
Alex Elder8b0241f2013-04-25 23:15:08 -05001238
Alex Elder2ad3d712013-04-30 00:44:33 -05001239 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1240 if (ret)
1241 return ret;
1242 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1243 if (ret)
1244 return ret;
1245
1246 rbd_dev->mapping.size = size;
1247 rbd_dev->mapping.features = features;
1248
Alex Elder8b0241f2013-04-25 23:15:08 -05001249 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001250}
1251
Alex Elderd1cf5782013-04-27 09:59:30 -05001252static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1253{
1254 rbd_dev->mapping.size = 0;
1255 rbd_dev->mapping.features = 0;
Alex Elder200a6a82013-04-28 23:32:34 -05001256}
1257
Alex Elder65ccfe22012-08-09 10:33:26 -07001258static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1259{
Ilya Dryomov5bc3fb12017-01-25 18:16:22 +01001260 u64 segment_size = rbd_obj_bytes(&rbd_dev->header);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001261
Alex Elder65ccfe22012-08-09 10:33:26 -07001262 return offset & (segment_size - 1);
1263}
1264
1265static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1266 u64 offset, u64 length)
1267{
Ilya Dryomov5bc3fb12017-01-25 18:16:22 +01001268 u64 segment_size = rbd_obj_bytes(&rbd_dev->header);
Alex Elder65ccfe22012-08-09 10:33:26 -07001269
1270 offset &= segment_size - 1;
1271
Alex Elderaafb2302012-09-06 16:00:54 -05001272 rbd_assert(length <= U64_MAX - offset);
Alex Elder65ccfe22012-08-09 10:33:26 -07001273 if (offset + length > segment_size)
1274 length = segment_size - offset;
1275
1276 return length;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001277}
1278
1279/*
1280 * bio helpers
1281 */
1282
1283static void bio_chain_put(struct bio *chain)
1284{
1285 struct bio *tmp;
1286
1287 while (chain) {
1288 tmp = chain;
1289 chain = chain->bi_next;
1290 bio_put(tmp);
1291 }
1292}
1293
1294/*
1295 * zeros a bio chain, starting at specific offset
1296 */
1297static void zero_bio_chain(struct bio *chain, int start_ofs)
1298{
Kent Overstreet79886132013-11-23 17:19:00 -08001299 struct bio_vec bv;
1300 struct bvec_iter iter;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001301 unsigned long flags;
1302 void *buf;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001303 int pos = 0;
1304
1305 while (chain) {
Kent Overstreet79886132013-11-23 17:19:00 -08001306 bio_for_each_segment(bv, chain, iter) {
1307 if (pos + bv.bv_len > start_ofs) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001308 int remainder = max(start_ofs - pos, 0);
Kent Overstreet79886132013-11-23 17:19:00 -08001309 buf = bvec_kmap_irq(&bv, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001310 memset(buf + remainder, 0,
Kent Overstreet79886132013-11-23 17:19:00 -08001311 bv.bv_len - remainder);
1312 flush_dcache_page(bv.bv_page);
Dan Carpenter85b5aaa2010-10-11 21:15:11 +02001313 bvec_kunmap_irq(buf, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001314 }
Kent Overstreet79886132013-11-23 17:19:00 -08001315 pos += bv.bv_len;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001316 }
1317
1318 chain = chain->bi_next;
1319 }
1320}
1321
1322/*
Alex Elderb9434c52013-04-19 15:34:50 -05001323 * similar to zero_bio_chain(), zeros data defined by a page array,
1324 * starting at the given byte offset from the start of the array and
1325 * continuing up to the given end offset. The pages array is
1326 * assumed to be big enough to hold all bytes up to the end.
1327 */
1328static void zero_pages(struct page **pages, u64 offset, u64 end)
1329{
1330 struct page **page = &pages[offset >> PAGE_SHIFT];
1331
1332 rbd_assert(end > offset);
1333 rbd_assert(end - offset <= (u64)SIZE_MAX);
1334 while (offset < end) {
1335 size_t page_offset;
1336 size_t length;
1337 unsigned long flags;
1338 void *kaddr;
1339
Geert Uytterhoeven491205a2013-05-13 20:35:37 -05001340 page_offset = offset & ~PAGE_MASK;
1341 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
Alex Elderb9434c52013-04-19 15:34:50 -05001342 local_irq_save(flags);
1343 kaddr = kmap_atomic(*page);
1344 memset(kaddr + page_offset, 0, length);
Alex Eldere2156052013-05-22 20:54:25 -05001345 flush_dcache_page(*page);
Alex Elderb9434c52013-04-19 15:34:50 -05001346 kunmap_atomic(kaddr);
1347 local_irq_restore(flags);
1348
1349 offset += length;
1350 page++;
1351 }
1352}
1353
1354/*
Alex Elderf7760da2012-10-20 22:17:27 -05001355 * Clone a portion of a bio, starting at the given byte offset
1356 * and continuing for the number of bytes indicated.
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001357 */
Alex Elderf7760da2012-10-20 22:17:27 -05001358static struct bio *bio_clone_range(struct bio *bio_src,
1359 unsigned int offset,
1360 unsigned int len,
1361 gfp_t gfpmask)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001362{
Alex Elderf7760da2012-10-20 22:17:27 -05001363 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001364
Kent Overstreet5341a6272013-08-07 14:31:11 -07001365 bio = bio_clone(bio_src, gfpmask);
Alex Elderf7760da2012-10-20 22:17:27 -05001366 if (!bio)
1367 return NULL; /* ENOMEM */
1368
Kent Overstreet5341a6272013-08-07 14:31:11 -07001369 bio_advance(bio, offset);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001370 bio->bi_iter.bi_size = len;
Alex Elder542582f2012-08-09 10:33:25 -07001371
Alex Elderf7760da2012-10-20 22:17:27 -05001372 return bio;
1373}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001374
Alex Elderf7760da2012-10-20 22:17:27 -05001375/*
1376 * Clone a portion of a bio chain, starting at the given byte offset
1377 * into the first bio in the source chain and continuing for the
1378 * number of bytes indicated. The result is another bio chain of
1379 * exactly the given length, or a null pointer on error.
1380 *
1381 * The bio_src and offset parameters are both in-out. On entry they
1382 * refer to the first source bio and the offset into that bio where
1383 * the start of data to be cloned is located.
1384 *
1385 * On return, bio_src is updated to refer to the bio in the source
1386 * chain that contains first un-cloned byte, and *offset will
1387 * contain the offset of that byte within that bio.
1388 */
1389static struct bio *bio_chain_clone_range(struct bio **bio_src,
1390 unsigned int *offset,
1391 unsigned int len,
1392 gfp_t gfpmask)
1393{
1394 struct bio *bi = *bio_src;
1395 unsigned int off = *offset;
1396 struct bio *chain = NULL;
1397 struct bio **end;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001398
Alex Elderf7760da2012-10-20 22:17:27 -05001399 /* Build up a chain of clone bios up to the limit */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001400
Kent Overstreet4f024f32013-10-11 15:44:27 -07001401 if (!bi || off >= bi->bi_iter.bi_size || !len)
Alex Elderf7760da2012-10-20 22:17:27 -05001402 return NULL; /* Nothing to clone */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001403
Alex Elderf7760da2012-10-20 22:17:27 -05001404 end = &chain;
1405 while (len) {
1406 unsigned int bi_size;
1407 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001408
Alex Elderf5400b72012-11-01 10:17:15 -05001409 if (!bi) {
1410 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
Alex Elderf7760da2012-10-20 22:17:27 -05001411 goto out_err; /* EINVAL; ran out of bio's */
Alex Elderf5400b72012-11-01 10:17:15 -05001412 }
Kent Overstreet4f024f32013-10-11 15:44:27 -07001413 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
Alex Elderf7760da2012-10-20 22:17:27 -05001414 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1415 if (!bio)
1416 goto out_err; /* ENOMEM */
1417
1418 *end = bio;
1419 end = &bio->bi_next;
1420
1421 off += bi_size;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001422 if (off == bi->bi_iter.bi_size) {
Alex Elderf7760da2012-10-20 22:17:27 -05001423 bi = bi->bi_next;
1424 off = 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001425 }
Alex Elderf7760da2012-10-20 22:17:27 -05001426 len -= bi_size;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001427 }
Alex Elderf7760da2012-10-20 22:17:27 -05001428 *bio_src = bi;
1429 *offset = off;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001430
Alex Elderf7760da2012-10-20 22:17:27 -05001431 return chain;
1432out_err:
1433 bio_chain_put(chain);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001434
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001435 return NULL;
1436}
1437
Alex Elder926f9b32013-02-11 12:33:24 -06001438/*
1439 * The default/initial value for all object request flags is 0. For
1440 * each flag, once its value is set to 1 it is never reset to 0
1441 * again.
1442 */
Alex Elder6365d332013-02-11 12:33:24 -06001443static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1444{
1445 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
Alex Elder6365d332013-02-11 12:33:24 -06001446 struct rbd_device *rbd_dev;
1447
Alex Elder57acbaa2013-02-11 12:33:24 -06001448 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001449 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
Alex Elder6365d332013-02-11 12:33:24 -06001450 obj_request);
1451 }
1452}
1453
1454static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1455{
1456 smp_mb();
1457 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1458}
1459
Alex Elder57acbaa2013-02-11 12:33:24 -06001460static void obj_request_done_set(struct rbd_obj_request *obj_request)
1461{
1462 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1463 struct rbd_device *rbd_dev = NULL;
1464
1465 if (obj_request_img_data_test(obj_request))
1466 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001467 rbd_warn(rbd_dev, "obj_request %p already marked done",
Alex Elder57acbaa2013-02-11 12:33:24 -06001468 obj_request);
1469 }
1470}
1471
1472static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1473{
1474 smp_mb();
1475 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1476}
1477
Alex Elder5679c592013-02-11 12:33:24 -06001478/*
1479 * This sets the KNOWN flag after (possibly) setting the EXISTS
1480 * flag. The latter is set based on the "exists" value provided.
1481 *
1482 * Note that for our purposes once an object exists it never goes
1483 * away again. It's possible that the response from two existence
1484 * checks are separated by the creation of the target object, and
1485 * the first ("doesn't exist") response arrives *after* the second
1486 * ("does exist"). In that case we ignore the second one.
1487 */
1488static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1489 bool exists)
1490{
1491 if (exists)
1492 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1493 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1494 smp_mb();
1495}
1496
1497static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1498{
1499 smp_mb();
1500 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1501}
1502
1503static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1504{
1505 smp_mb();
1506 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1507}
1508
Ilya Dryomov96385562014-06-10 13:53:29 +04001509static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1510{
1511 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1512
1513 return obj_request->img_offset <
1514 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1515}
1516
Alex Elderbf0d5f502012-11-22 00:00:08 -06001517static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1518{
Alex Elder37206ee2013-02-20 17:32:08 -06001519 dout("%s: obj %p (was %d)\n", __func__, obj_request,
Peter Zijlstra2c935bc2016-11-14 17:29:48 +01001520 kref_read(&obj_request->kref));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001521 kref_get(&obj_request->kref);
1522}
1523
1524static void rbd_obj_request_destroy(struct kref *kref);
1525static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1526{
1527 rbd_assert(obj_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001528 dout("%s: obj %p (was %d)\n", __func__, obj_request,
Peter Zijlstra2c935bc2016-11-14 17:29:48 +01001529 kref_read(&obj_request->kref));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001530 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1531}
1532
Alex Elder0f2d5be2014-04-26 14:21:44 +04001533static void rbd_img_request_get(struct rbd_img_request *img_request)
1534{
1535 dout("%s: img %p (was %d)\n", __func__, img_request,
Peter Zijlstra2c935bc2016-11-14 17:29:48 +01001536 kref_read(&img_request->kref));
Alex Elder0f2d5be2014-04-26 14:21:44 +04001537 kref_get(&img_request->kref);
1538}
1539
Alex Eldere93f3152013-05-08 22:50:04 -05001540static bool img_request_child_test(struct rbd_img_request *img_request);
1541static void rbd_parent_request_destroy(struct kref *kref);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001542static void rbd_img_request_destroy(struct kref *kref);
1543static void rbd_img_request_put(struct rbd_img_request *img_request)
1544{
1545 rbd_assert(img_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001546 dout("%s: img %p (was %d)\n", __func__, img_request,
Peter Zijlstra2c935bc2016-11-14 17:29:48 +01001547 kref_read(&img_request->kref));
Alex Eldere93f3152013-05-08 22:50:04 -05001548 if (img_request_child_test(img_request))
1549 kref_put(&img_request->kref, rbd_parent_request_destroy);
1550 else
1551 kref_put(&img_request->kref, rbd_img_request_destroy);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001552}
1553
1554static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1555 struct rbd_obj_request *obj_request)
1556{
Alex Elder25dcf952013-01-25 17:08:55 -06001557 rbd_assert(obj_request->img_request == NULL);
1558
Alex Elderb155e862013-04-15 14:50:37 -05001559 /* Image request now owns object's original reference */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001560 obj_request->img_request = img_request;
Alex Elder25dcf952013-01-25 17:08:55 -06001561 obj_request->which = img_request->obj_request_count;
Alex Elder6365d332013-02-11 12:33:24 -06001562 rbd_assert(!obj_request_img_data_test(obj_request));
1563 obj_request_img_data_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001564 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001565 img_request->obj_request_count++;
1566 list_add_tail(&obj_request->links, &img_request->obj_requests);
Alex Elder37206ee2013-02-20 17:32:08 -06001567 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1568 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001569}
1570
1571static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1572 struct rbd_obj_request *obj_request)
1573{
1574 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001575
Alex Elder37206ee2013-02-20 17:32:08 -06001576 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1577 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001578 list_del(&obj_request->links);
Alex Elder25dcf952013-01-25 17:08:55 -06001579 rbd_assert(img_request->obj_request_count > 0);
1580 img_request->obj_request_count--;
1581 rbd_assert(obj_request->which == img_request->obj_request_count);
1582 obj_request->which = BAD_WHICH;
Alex Elder6365d332013-02-11 12:33:24 -06001583 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001584 rbd_assert(obj_request->img_request == img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001585 obj_request->img_request = NULL;
Alex Elder25dcf952013-01-25 17:08:55 -06001586 obj_request->callback = NULL;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001587 rbd_obj_request_put(obj_request);
1588}
1589
1590static bool obj_request_type_valid(enum obj_request_type type)
1591{
1592 switch (type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06001593 case OBJ_REQUEST_NODATA:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001594 case OBJ_REQUEST_BIO:
Alex Elder788e2df2013-01-17 12:25:27 -06001595 case OBJ_REQUEST_PAGES:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001596 return true;
1597 default:
1598 return false;
1599 }
1600}
1601
Ilya Dryomov4a17dad2016-09-13 21:08:10 +02001602static void rbd_img_obj_callback(struct rbd_obj_request *obj_request);
1603
Ilya Dryomov980917f2016-09-12 18:59:42 +02001604static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001605{
Ilya Dryomov980917f2016-09-12 18:59:42 +02001606 struct ceph_osd_request *osd_req = obj_request->osd_req;
1607
Ilya Dryomova90bb0c2017-01-25 18:16:23 +01001608 dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__,
1609 obj_request, obj_request->object_no, obj_request->offset,
Ilya Dryomov67e2b652017-01-25 18:16:22 +01001610 obj_request->length, osd_req);
Ilya Dryomov4a17dad2016-09-13 21:08:10 +02001611 if (obj_request_img_data_test(obj_request)) {
1612 WARN_ON(obj_request->callback != rbd_img_obj_callback);
1613 rbd_img_request_get(obj_request->img_request);
1614 }
Ilya Dryomov980917f2016-09-12 18:59:42 +02001615 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001616}
1617
1618static void rbd_img_request_complete(struct rbd_img_request *img_request)
1619{
Alex Elder55f27e02013-04-10 12:34:25 -05001620
Alex Elder37206ee2013-02-20 17:32:08 -06001621 dout("%s: img %p\n", __func__, img_request);
Alex Elder55f27e02013-04-10 12:34:25 -05001622
1623 /*
1624 * If no error occurred, compute the aggregate transfer
1625 * count for the image request. We could instead use
1626 * atomic64_cmpxchg() to update it as each object request
1627 * completes; not clear which way is better off hand.
1628 */
1629 if (!img_request->result) {
1630 struct rbd_obj_request *obj_request;
1631 u64 xferred = 0;
1632
1633 for_each_obj_request(img_request, obj_request)
1634 xferred += obj_request->xferred;
1635 img_request->xferred = xferred;
1636 }
1637
Alex Elderbf0d5f502012-11-22 00:00:08 -06001638 if (img_request->callback)
1639 img_request->callback(img_request);
1640 else
1641 rbd_img_request_put(img_request);
1642}
1643
Alex Elder0c425242013-02-08 09:55:49 -06001644/*
1645 * The default/initial value for all image request flags is 0. Each
1646 * is conditionally set to 1 at image request initialization time
1647 * and currently never change thereafter.
1648 */
1649static void img_request_write_set(struct rbd_img_request *img_request)
1650{
1651 set_bit(IMG_REQ_WRITE, &img_request->flags);
1652 smp_mb();
1653}
1654
1655static bool img_request_write_test(struct rbd_img_request *img_request)
1656{
1657 smp_mb();
1658 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1659}
1660
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001661/*
1662 * Set the discard flag when the img_request is an discard request
1663 */
1664static void img_request_discard_set(struct rbd_img_request *img_request)
1665{
1666 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1667 smp_mb();
1668}
1669
1670static bool img_request_discard_test(struct rbd_img_request *img_request)
1671{
1672 smp_mb();
1673 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1674}
1675
Alex Elder9849e982013-01-24 16:13:36 -06001676static void img_request_child_set(struct rbd_img_request *img_request)
1677{
1678 set_bit(IMG_REQ_CHILD, &img_request->flags);
1679 smp_mb();
1680}
1681
Alex Eldere93f3152013-05-08 22:50:04 -05001682static void img_request_child_clear(struct rbd_img_request *img_request)
1683{
1684 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1685 smp_mb();
1686}
1687
Alex Elder9849e982013-01-24 16:13:36 -06001688static bool img_request_child_test(struct rbd_img_request *img_request)
1689{
1690 smp_mb();
1691 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1692}
1693
Alex Elderd0b2e942013-01-24 16:13:36 -06001694static void img_request_layered_set(struct rbd_img_request *img_request)
1695{
1696 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1697 smp_mb();
1698}
1699
Alex Eldera2acd002013-05-08 22:50:04 -05001700static void img_request_layered_clear(struct rbd_img_request *img_request)
1701{
1702 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1703 smp_mb();
1704}
1705
Alex Elderd0b2e942013-01-24 16:13:36 -06001706static bool img_request_layered_test(struct rbd_img_request *img_request)
1707{
1708 smp_mb();
1709 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1710}
1711
Josh Durgin3b434a2a2014-04-04 17:32:15 -07001712static enum obj_operation_type
1713rbd_img_request_op_type(struct rbd_img_request *img_request)
1714{
1715 if (img_request_write_test(img_request))
1716 return OBJ_OP_WRITE;
1717 else if (img_request_discard_test(img_request))
1718 return OBJ_OP_DISCARD;
1719 else
1720 return OBJ_OP_READ;
1721}
1722
Alex Elder6e2a4502013-03-27 09:16:30 -05001723static void
1724rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1725{
Alex Elderb9434c52013-04-19 15:34:50 -05001726 u64 xferred = obj_request->xferred;
1727 u64 length = obj_request->length;
1728
Alex Elder6e2a4502013-03-27 09:16:30 -05001729 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1730 obj_request, obj_request->img_request, obj_request->result,
Alex Elderb9434c52013-04-19 15:34:50 -05001731 xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001732 /*
Josh Durgin17c1cc12013-08-26 17:55:38 -07001733 * ENOENT means a hole in the image. We zero-fill the entire
1734 * length of the request. A short read also implies zero-fill
1735 * to the end of the request. An error requires the whole
1736 * length of the request to be reported finished with an error
1737 * to the block layer. In each case we update the xferred
1738 * count to indicate the whole request was satisfied.
Alex Elder6e2a4502013-03-27 09:16:30 -05001739 */
Alex Elderb9434c52013-04-19 15:34:50 -05001740 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
Alex Elder6e2a4502013-03-27 09:16:30 -05001741 if (obj_request->result == -ENOENT) {
Alex Elderb9434c52013-04-19 15:34:50 -05001742 if (obj_request->type == OBJ_REQUEST_BIO)
1743 zero_bio_chain(obj_request->bio_list, 0);
1744 else
1745 zero_pages(obj_request->pages, 0, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001746 obj_request->result = 0;
Alex Elderb9434c52013-04-19 15:34:50 -05001747 } else if (xferred < length && !obj_request->result) {
1748 if (obj_request->type == OBJ_REQUEST_BIO)
1749 zero_bio_chain(obj_request->bio_list, xferred);
1750 else
1751 zero_pages(obj_request->pages, xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001752 }
Josh Durgin17c1cc12013-08-26 17:55:38 -07001753 obj_request->xferred = length;
Alex Elder6e2a4502013-03-27 09:16:30 -05001754 obj_request_done_set(obj_request);
1755}
1756
Alex Elderbf0d5f502012-11-22 00:00:08 -06001757static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1758{
Alex Elder37206ee2013-02-20 17:32:08 -06001759 dout("%s: obj %p cb %p\n", __func__, obj_request,
1760 obj_request->callback);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001761 if (obj_request->callback)
1762 obj_request->callback(obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06001763 else
1764 complete_all(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001765}
1766
Ilya Dryomov0dcc6852016-09-26 15:43:52 +02001767static void rbd_obj_request_error(struct rbd_obj_request *obj_request, int err)
1768{
1769 obj_request->result = err;
1770 obj_request->xferred = 0;
1771 /*
1772 * kludge - mirror rbd_obj_request_submit() to match a put in
1773 * rbd_img_obj_callback()
1774 */
1775 if (obj_request_img_data_test(obj_request)) {
1776 WARN_ON(obj_request->callback != rbd_img_obj_callback);
1777 rbd_img_request_get(obj_request->img_request);
1778 }
1779 obj_request_done_set(obj_request);
1780 rbd_obj_request_complete(obj_request);
1781}
1782
Alex Elderc47f9372013-02-26 14:23:07 -06001783static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001784{
Alex Elder57acbaa2013-02-11 12:33:24 -06001785 struct rbd_img_request *img_request = NULL;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001786 struct rbd_device *rbd_dev = NULL;
Alex Elder57acbaa2013-02-11 12:33:24 -06001787 bool layered = false;
1788
1789 if (obj_request_img_data_test(obj_request)) {
1790 img_request = obj_request->img_request;
1791 layered = img_request && img_request_layered_test(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001792 rbd_dev = img_request->rbd_dev;
Alex Elder57acbaa2013-02-11 12:33:24 -06001793 }
Alex Elder8b3e1a52013-01-24 16:13:36 -06001794
1795 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1796 obj_request, img_request, obj_request->result,
1797 obj_request->xferred, obj_request->length);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001798 if (layered && obj_request->result == -ENOENT &&
1799 obj_request->img_offset < rbd_dev->parent_overlap)
Alex Elder8b3e1a52013-01-24 16:13:36 -06001800 rbd_img_parent_read(obj_request);
1801 else if (img_request)
Alex Elder6e2a4502013-03-27 09:16:30 -05001802 rbd_img_obj_request_read_callback(obj_request);
1803 else
1804 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001805}
1806
Alex Elderc47f9372013-02-26 14:23:07 -06001807static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001808{
Sage Weil1b83bef2013-02-25 16:11:12 -08001809 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1810 obj_request->result, obj_request->length);
1811 /*
Alex Elder8b3e1a52013-01-24 16:13:36 -06001812 * There is no such thing as a successful short write. Set
1813 * it to our originally-requested length.
Sage Weil1b83bef2013-02-25 16:11:12 -08001814 */
1815 obj_request->xferred = obj_request->length;
Alex Elder07741302013-02-05 23:41:50 -06001816 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001817}
1818
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001819static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1820{
1821 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1822 obj_request->result, obj_request->length);
1823 /*
1824 * There is no such thing as a successful short discard. Set
1825 * it to our originally-requested length.
1826 */
1827 obj_request->xferred = obj_request->length;
Josh Durgind0265de2014-04-07 16:54:10 -07001828 /* discarding a non-existent object is not a problem */
1829 if (obj_request->result == -ENOENT)
1830 obj_request->result = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001831 obj_request_done_set(obj_request);
1832}
1833
Alex Elderfbfab532013-02-08 09:55:48 -06001834/*
1835 * For a simple stat call there's nothing to do. We'll do more if
1836 * this is part of a write sequence for a layered image.
1837 */
Alex Elderc47f9372013-02-26 14:23:07 -06001838static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
Alex Elderfbfab532013-02-08 09:55:48 -06001839{
Alex Elder37206ee2013-02-20 17:32:08 -06001840 dout("%s: obj %p\n", __func__, obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001841 obj_request_done_set(obj_request);
1842}
1843
Ilya Dryomov27617132015-07-16 17:36:11 +03001844static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1845{
1846 dout("%s: obj %p\n", __func__, obj_request);
1847
1848 if (obj_request_img_data_test(obj_request))
1849 rbd_osd_copyup_callback(obj_request);
1850 else
1851 obj_request_done_set(obj_request);
1852}
1853
Ilya Dryomov85e084f2016-04-28 16:07:24 +02001854static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001855{
1856 struct rbd_obj_request *obj_request = osd_req->r_priv;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001857 u16 opcode;
1858
Ilya Dryomov85e084f2016-04-28 16:07:24 +02001859 dout("%s: osd_req %p\n", __func__, osd_req);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001860 rbd_assert(osd_req == obj_request->osd_req);
Alex Elder57acbaa2013-02-11 12:33:24 -06001861 if (obj_request_img_data_test(obj_request)) {
1862 rbd_assert(obj_request->img_request);
1863 rbd_assert(obj_request->which != BAD_WHICH);
1864 } else {
1865 rbd_assert(obj_request->which == BAD_WHICH);
1866 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06001867
Sage Weil1b83bef2013-02-25 16:11:12 -08001868 if (osd_req->r_result < 0)
1869 obj_request->result = osd_req->r_result;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001870
Alex Elderc47f9372013-02-26 14:23:07 -06001871 /*
1872 * We support a 64-bit length, but ultimately it has to be
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01001873 * passed to the block layer, which just supports a 32-bit
1874 * length field.
Alex Elderc47f9372013-02-26 14:23:07 -06001875 */
Yan, Zheng7665d852016-01-07 16:48:57 +08001876 obj_request->xferred = osd_req->r_ops[0].outdata_len;
Alex Elder8b3e1a52013-01-24 16:13:36 -06001877 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001878
Alex Elder79528732013-04-03 21:32:51 -05001879 opcode = osd_req->r_ops[0].op;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001880 switch (opcode) {
1881 case CEPH_OSD_OP_READ:
Alex Elderc47f9372013-02-26 14:23:07 -06001882 rbd_osd_read_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001883 break;
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001884 case CEPH_OSD_OP_SETALLOCHINT:
Ilya Dryomove30b7572015-10-07 17:27:17 +02001885 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
1886 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001887 /* fall through */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001888 case CEPH_OSD_OP_WRITE:
Ilya Dryomove30b7572015-10-07 17:27:17 +02001889 case CEPH_OSD_OP_WRITEFULL:
Alex Elderc47f9372013-02-26 14:23:07 -06001890 rbd_osd_write_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001891 break;
Alex Elderfbfab532013-02-08 09:55:48 -06001892 case CEPH_OSD_OP_STAT:
Alex Elderc47f9372013-02-26 14:23:07 -06001893 rbd_osd_stat_callback(obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001894 break;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001895 case CEPH_OSD_OP_DELETE:
1896 case CEPH_OSD_OP_TRUNCATE:
1897 case CEPH_OSD_OP_ZERO:
1898 rbd_osd_discard_callback(obj_request);
1899 break;
Alex Elder36be9a72013-01-19 00:30:28 -06001900 case CEPH_OSD_OP_CALL:
Ilya Dryomov27617132015-07-16 17:36:11 +03001901 rbd_osd_call_callback(obj_request);
1902 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001903 default:
Ilya Dryomova90bb0c2017-01-25 18:16:23 +01001904 rbd_warn(NULL, "unexpected OSD op: object_no %016llx opcode %d",
1905 obj_request->object_no, opcode);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001906 break;
1907 }
1908
Alex Elder07741302013-02-05 23:41:50 -06001909 if (obj_request_done_test(obj_request))
Alex Elderbf0d5f502012-11-22 00:00:08 -06001910 rbd_obj_request_complete(obj_request);
1911}
1912
Alex Elder9d4df012013-04-19 15:34:50 -05001913static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
Alex Elder430c28c2013-04-03 21:32:51 -05001914{
Alex Elder8c042b02013-04-03 01:28:58 -05001915 struct ceph_osd_request *osd_req = obj_request->osd_req;
Alex Elder430c28c2013-04-03 21:32:51 -05001916
Ilya Dryomov7c848832016-09-15 17:56:39 +02001917 rbd_assert(obj_request_img_data_test(obj_request));
1918 osd_req->r_snapid = obj_request->img_request->snap_id;
Alex Elder9d4df012013-04-19 15:34:50 -05001919}
1920
1921static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1922{
Alex Elder9d4df012013-04-19 15:34:50 -05001923 struct ceph_osd_request *osd_req = obj_request->osd_req;
Alex Elder9d4df012013-04-19 15:34:50 -05001924
Ilya Dryomovbb873b52016-05-26 00:29:52 +02001925 osd_req->r_mtime = CURRENT_TIME;
1926 osd_req->r_data_offset = obj_request->offset;
Alex Elder430c28c2013-04-03 21:32:51 -05001927}
1928
Ilya Dryomovbc812072017-01-25 18:16:23 +01001929static struct ceph_osd_request *
1930__rbd_osd_req_create(struct rbd_device *rbd_dev,
1931 struct ceph_snap_context *snapc,
1932 int num_ops, unsigned int flags,
1933 struct rbd_obj_request *obj_request)
1934{
1935 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1936 struct ceph_osd_request *req;
Ilya Dryomova90bb0c2017-01-25 18:16:23 +01001937 const char *name_format = rbd_dev->image_format == 1 ?
1938 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
Ilya Dryomovbc812072017-01-25 18:16:23 +01001939
1940 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
1941 if (!req)
1942 return NULL;
1943
1944 req->r_flags = flags;
1945 req->r_callback = rbd_osd_req_callback;
1946 req->r_priv = obj_request;
1947
1948 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
Ilya Dryomova90bb0c2017-01-25 18:16:23 +01001949 if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1950 rbd_dev->header.object_prefix, obj_request->object_no))
Ilya Dryomovbc812072017-01-25 18:16:23 +01001951 goto err_req;
1952
1953 if (ceph_osdc_alloc_messages(req, GFP_NOIO))
1954 goto err_req;
1955
1956 return req;
1957
1958err_req:
1959 ceph_osdc_put_request(req);
1960 return NULL;
1961}
1962
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001963/*
1964 * Create an osd request. A read request has one osd op (read).
1965 * A write request has either one (watch) or two (hint+write) osd ops.
1966 * (All rbd data writes are prefixed with an allocation hint op, but
1967 * technically osd watch is a write request, hence this distinction.)
1968 */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001969static struct ceph_osd_request *rbd_osd_req_create(
1970 struct rbd_device *rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001971 enum obj_operation_type op_type,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001972 unsigned int num_ops,
Alex Elder430c28c2013-04-03 21:32:51 -05001973 struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001974{
Alex Elderbf0d5f502012-11-22 00:00:08 -06001975 struct ceph_snap_context *snapc = NULL;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001976
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001977 if (obj_request_img_data_test(obj_request) &&
1978 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
Alex Elder6365d332013-02-11 12:33:24 -06001979 struct rbd_img_request *img_request = obj_request->img_request;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001980 if (op_type == OBJ_OP_WRITE) {
1981 rbd_assert(img_request_write_test(img_request));
1982 } else {
1983 rbd_assert(img_request_discard_test(img_request));
1984 }
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001985 snapc = img_request->snapc;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001986 }
1987
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001988 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001989
Ilya Dryomovbc812072017-01-25 18:16:23 +01001990 return __rbd_osd_req_create(rbd_dev, snapc, num_ops,
1991 (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD) ?
Ilya Dryomov54ea0042017-02-11 18:48:41 +01001992 CEPH_OSD_FLAG_WRITE : CEPH_OSD_FLAG_READ, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001993}
1994
Alex Elder0eefd472013-04-19 15:34:50 -05001995/*
Josh Durgind3246fb2014-04-07 16:49:21 -07001996 * Create a copyup osd request based on the information in the object
1997 * request supplied. A copyup request has two or three osd ops, a
1998 * copyup method call, potentially a hint op, and a write or truncate
1999 * or zero op.
Alex Elder0eefd472013-04-19 15:34:50 -05002000 */
2001static struct ceph_osd_request *
2002rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
2003{
2004 struct rbd_img_request *img_request;
Josh Durgind3246fb2014-04-07 16:49:21 -07002005 int num_osd_ops = 3;
Alex Elder0eefd472013-04-19 15:34:50 -05002006
2007 rbd_assert(obj_request_img_data_test(obj_request));
2008 img_request = obj_request->img_request;
2009 rbd_assert(img_request);
Josh Durgind3246fb2014-04-07 16:49:21 -07002010 rbd_assert(img_request_write_test(img_request) ||
2011 img_request_discard_test(img_request));
Alex Elder0eefd472013-04-19 15:34:50 -05002012
Josh Durgind3246fb2014-04-07 16:49:21 -07002013 if (img_request_discard_test(img_request))
2014 num_osd_ops = 2;
2015
Ilya Dryomovbc812072017-01-25 18:16:23 +01002016 return __rbd_osd_req_create(img_request->rbd_dev,
2017 img_request->snapc, num_osd_ops,
Ilya Dryomov54ea0042017-02-11 18:48:41 +01002018 CEPH_OSD_FLAG_WRITE, obj_request);
Alex Elder0eefd472013-04-19 15:34:50 -05002019}
2020
Alex Elderbf0d5f502012-11-22 00:00:08 -06002021static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2022{
2023 ceph_osdc_put_request(osd_req);
2024}
2025
Ilya Dryomov6c696d82017-01-25 18:16:23 +01002026static struct rbd_obj_request *
2027rbd_obj_request_create(enum obj_request_type type)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002028{
2029 struct rbd_obj_request *obj_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002030
2031 rbd_assert(obj_request_type_valid(type));
2032
Ilya Dryomov5a60e872015-06-24 17:24:33 +03002033 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
Ilya Dryomov6c696d82017-01-25 18:16:23 +01002034 if (!obj_request)
Alex Elderf907ad52013-05-01 12:43:03 -05002035 return NULL;
Alex Elderf907ad52013-05-01 12:43:03 -05002036
Alex Elderbf0d5f502012-11-22 00:00:08 -06002037 obj_request->which = BAD_WHICH;
2038 obj_request->type = type;
2039 INIT_LIST_HEAD(&obj_request->links);
Alex Elder788e2df2013-01-17 12:25:27 -06002040 init_completion(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002041 kref_init(&obj_request->kref);
2042
Ilya Dryomov67e2b652017-01-25 18:16:22 +01002043 dout("%s %p\n", __func__, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002044 return obj_request;
2045}
2046
2047static void rbd_obj_request_destroy(struct kref *kref)
2048{
2049 struct rbd_obj_request *obj_request;
2050
2051 obj_request = container_of(kref, struct rbd_obj_request, kref);
2052
Alex Elder37206ee2013-02-20 17:32:08 -06002053 dout("%s: obj %p\n", __func__, obj_request);
2054
Alex Elderbf0d5f502012-11-22 00:00:08 -06002055 rbd_assert(obj_request->img_request == NULL);
2056 rbd_assert(obj_request->which == BAD_WHICH);
2057
2058 if (obj_request->osd_req)
2059 rbd_osd_req_destroy(obj_request->osd_req);
2060
2061 rbd_assert(obj_request_type_valid(obj_request->type));
2062 switch (obj_request->type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06002063 case OBJ_REQUEST_NODATA:
2064 break; /* Nothing to do */
Alex Elderbf0d5f502012-11-22 00:00:08 -06002065 case OBJ_REQUEST_BIO:
2066 if (obj_request->bio_list)
2067 bio_chain_put(obj_request->bio_list);
2068 break;
Alex Elder788e2df2013-01-17 12:25:27 -06002069 case OBJ_REQUEST_PAGES:
Ilya Dryomov04dc9232016-09-15 18:05:16 +02002070 /* img_data requests don't own their page array */
2071 if (obj_request->pages &&
2072 !obj_request_img_data_test(obj_request))
Alex Elder788e2df2013-01-17 12:25:27 -06002073 ceph_release_page_vector(obj_request->pages,
2074 obj_request->page_count);
2075 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002076 }
2077
Alex Elder868311b2013-05-01 12:43:03 -05002078 kmem_cache_free(rbd_obj_request_cache, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002079}
2080
Alex Elderfb65d2282013-05-08 22:50:04 -05002081/* It's OK to call this for a device with no parent */
2082
2083static void rbd_spec_put(struct rbd_spec *spec);
2084static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2085{
2086 rbd_dev_remove_parent(rbd_dev);
2087 rbd_spec_put(rbd_dev->parent_spec);
2088 rbd_dev->parent_spec = NULL;
2089 rbd_dev->parent_overlap = 0;
2090}
2091
Alex Elderbf0d5f502012-11-22 00:00:08 -06002092/*
Alex Eldera2acd002013-05-08 22:50:04 -05002093 * Parent image reference counting is used to determine when an
2094 * image's parent fields can be safely torn down--after there are no
2095 * more in-flight requests to the parent image. When the last
2096 * reference is dropped, cleaning them up is safe.
2097 */
2098static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2099{
2100 int counter;
2101
2102 if (!rbd_dev->parent_spec)
2103 return;
2104
2105 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2106 if (counter > 0)
2107 return;
2108
2109 /* Last reference; clean up parent data structures */
2110
2111 if (!counter)
2112 rbd_dev_unparent(rbd_dev);
2113 else
Ilya Dryomov9584d502014-07-11 12:11:20 +04002114 rbd_warn(rbd_dev, "parent reference underflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002115}
2116
2117/*
2118 * If an image has a non-zero parent overlap, get a reference to its
2119 * parent.
2120 *
2121 * Returns true if the rbd device has a parent with a non-zero
2122 * overlap and a reference for it was successfully taken, or
2123 * false otherwise.
2124 */
2125static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2126{
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002127 int counter = 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002128
2129 if (!rbd_dev->parent_spec)
2130 return false;
2131
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002132 down_read(&rbd_dev->header_rwsem);
2133 if (rbd_dev->parent_overlap)
2134 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2135 up_read(&rbd_dev->header_rwsem);
Alex Eldera2acd002013-05-08 22:50:04 -05002136
2137 if (counter < 0)
Ilya Dryomov9584d502014-07-11 12:11:20 +04002138 rbd_warn(rbd_dev, "parent reference overflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002139
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002140 return counter > 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002141}
2142
Alex Elderbf0d5f502012-11-22 00:00:08 -06002143/*
2144 * Caller is responsible for filling in the list of object requests
2145 * that comprises the image request, and the Linux request pointer
2146 * (if there is one).
2147 */
Alex Eldercc344fa2013-02-19 12:25:56 -06002148static struct rbd_img_request *rbd_img_request_create(
2149 struct rbd_device *rbd_dev,
Alex Elderbf0d5f502012-11-22 00:00:08 -06002150 u64 offset, u64 length,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002151 enum obj_operation_type op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07002152 struct ceph_snap_context *snapc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002153{
2154 struct rbd_img_request *img_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002155
Ilya Dryomov7a716aa2014-08-05 11:25:54 +04002156 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002157 if (!img_request)
2158 return NULL;
2159
Alex Elderbf0d5f502012-11-22 00:00:08 -06002160 img_request->rq = NULL;
2161 img_request->rbd_dev = rbd_dev;
2162 img_request->offset = offset;
2163 img_request->length = length;
Alex Elder0c425242013-02-08 09:55:49 -06002164 img_request->flags = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002165 if (op_type == OBJ_OP_DISCARD) {
2166 img_request_discard_set(img_request);
2167 img_request->snapc = snapc;
2168 } else if (op_type == OBJ_OP_WRITE) {
Alex Elder0c425242013-02-08 09:55:49 -06002169 img_request_write_set(img_request);
Josh Durgin4e752f02014-04-08 11:12:11 -07002170 img_request->snapc = snapc;
Alex Elder0c425242013-02-08 09:55:49 -06002171 } else {
Alex Elderbf0d5f502012-11-22 00:00:08 -06002172 img_request->snap_id = rbd_dev->spec->snap_id;
Alex Elder0c425242013-02-08 09:55:49 -06002173 }
Alex Eldera2acd002013-05-08 22:50:04 -05002174 if (rbd_dev_parent_get(rbd_dev))
Alex Elderd0b2e942013-01-24 16:13:36 -06002175 img_request_layered_set(img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002176 spin_lock_init(&img_request->completion_lock);
2177 img_request->next_completion = 0;
2178 img_request->callback = NULL;
Alex Eldera5a337d2013-01-24 16:13:36 -06002179 img_request->result = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002180 img_request->obj_request_count = 0;
2181 INIT_LIST_HEAD(&img_request->obj_requests);
2182 kref_init(&img_request->kref);
2183
Alex Elder37206ee2013-02-20 17:32:08 -06002184 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002185 obj_op_name(op_type), offset, length, img_request);
Alex Elder37206ee2013-02-20 17:32:08 -06002186
Alex Elderbf0d5f502012-11-22 00:00:08 -06002187 return img_request;
2188}
2189
2190static void rbd_img_request_destroy(struct kref *kref)
2191{
2192 struct rbd_img_request *img_request;
2193 struct rbd_obj_request *obj_request;
2194 struct rbd_obj_request *next_obj_request;
2195
2196 img_request = container_of(kref, struct rbd_img_request, kref);
2197
Alex Elder37206ee2013-02-20 17:32:08 -06002198 dout("%s: img %p\n", __func__, img_request);
2199
Alex Elderbf0d5f502012-11-22 00:00:08 -06002200 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2201 rbd_img_obj_request_del(img_request, obj_request);
Alex Elder25dcf952013-01-25 17:08:55 -06002202 rbd_assert(img_request->obj_request_count == 0);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002203
Alex Eldera2acd002013-05-08 22:50:04 -05002204 if (img_request_layered_test(img_request)) {
2205 img_request_layered_clear(img_request);
2206 rbd_dev_parent_put(img_request->rbd_dev);
2207 }
2208
Josh Durginbef95452014-04-04 17:47:52 -07002209 if (img_request_write_test(img_request) ||
2210 img_request_discard_test(img_request))
Alex Elder812164f82013-04-30 00:44:32 -05002211 ceph_put_snap_context(img_request->snapc);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002212
Alex Elder1c2a9df2013-05-01 12:43:03 -05002213 kmem_cache_free(rbd_img_request_cache, img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002214}
2215
Alex Eldere93f3152013-05-08 22:50:04 -05002216static struct rbd_img_request *rbd_parent_request_create(
2217 struct rbd_obj_request *obj_request,
2218 u64 img_offset, u64 length)
2219{
2220 struct rbd_img_request *parent_request;
2221 struct rbd_device *rbd_dev;
2222
2223 rbd_assert(obj_request->img_request);
2224 rbd_dev = obj_request->img_request->rbd_dev;
2225
Josh Durgin4e752f02014-04-08 11:12:11 -07002226 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002227 length, OBJ_OP_READ, NULL);
Alex Eldere93f3152013-05-08 22:50:04 -05002228 if (!parent_request)
2229 return NULL;
2230
2231 img_request_child_set(parent_request);
2232 rbd_obj_request_get(obj_request);
2233 parent_request->obj_request = obj_request;
2234
2235 return parent_request;
2236}
2237
2238static void rbd_parent_request_destroy(struct kref *kref)
2239{
2240 struct rbd_img_request *parent_request;
2241 struct rbd_obj_request *orig_request;
2242
2243 parent_request = container_of(kref, struct rbd_img_request, kref);
2244 orig_request = parent_request->obj_request;
2245
2246 parent_request->obj_request = NULL;
2247 rbd_obj_request_put(orig_request);
2248 img_request_child_clear(parent_request);
2249
2250 rbd_img_request_destroy(kref);
2251}
2252
Alex Elder12178572013-02-08 09:55:49 -06002253static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2254{
Alex Elder6365d332013-02-11 12:33:24 -06002255 struct rbd_img_request *img_request;
Alex Elder12178572013-02-08 09:55:49 -06002256 unsigned int xferred;
2257 int result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002258 bool more;
Alex Elder12178572013-02-08 09:55:49 -06002259
Alex Elder6365d332013-02-11 12:33:24 -06002260 rbd_assert(obj_request_img_data_test(obj_request));
2261 img_request = obj_request->img_request;
2262
Alex Elder12178572013-02-08 09:55:49 -06002263 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2264 xferred = (unsigned int)obj_request->xferred;
2265 result = obj_request->result;
2266 if (result) {
2267 struct rbd_device *rbd_dev = img_request->rbd_dev;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002268 enum obj_operation_type op_type;
2269
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002270 if (img_request_discard_test(img_request))
2271 op_type = OBJ_OP_DISCARD;
2272 else if (img_request_write_test(img_request))
2273 op_type = OBJ_OP_WRITE;
2274 else
2275 op_type = OBJ_OP_READ;
Alex Elder12178572013-02-08 09:55:49 -06002276
Ilya Dryomov9584d502014-07-11 12:11:20 +04002277 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002278 obj_op_name(op_type), obj_request->length,
2279 obj_request->img_offset, obj_request->offset);
Ilya Dryomov9584d502014-07-11 12:11:20 +04002280 rbd_warn(rbd_dev, " result %d xferred %x",
Alex Elder12178572013-02-08 09:55:49 -06002281 result, xferred);
2282 if (!img_request->result)
2283 img_request->result = result;
Ilya Dryomov082a75d2015-04-25 15:56:15 +03002284 /*
2285 * Need to end I/O on the entire obj_request worth of
2286 * bytes in case of error.
2287 */
2288 xferred = obj_request->length;
Alex Elder12178572013-02-08 09:55:49 -06002289 }
2290
Alex Elder8b3e1a52013-01-24 16:13:36 -06002291 if (img_request_child_test(img_request)) {
2292 rbd_assert(img_request->obj_request != NULL);
2293 more = obj_request->which < img_request->obj_request_count - 1;
2294 } else {
2295 rbd_assert(img_request->rq != NULL);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01002296
2297 more = blk_update_request(img_request->rq, result, xferred);
2298 if (!more)
2299 __blk_mq_end_request(img_request->rq, result);
Alex Elder8b3e1a52013-01-24 16:13:36 -06002300 }
2301
2302 return more;
Alex Elder12178572013-02-08 09:55:49 -06002303}
2304
Alex Elder21692382013-04-05 01:27:12 -05002305static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2306{
2307 struct rbd_img_request *img_request;
2308 u32 which = obj_request->which;
2309 bool more = true;
2310
Alex Elder6365d332013-02-11 12:33:24 -06002311 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elder21692382013-04-05 01:27:12 -05002312 img_request = obj_request->img_request;
2313
2314 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2315 rbd_assert(img_request != NULL);
Alex Elder21692382013-04-05 01:27:12 -05002316 rbd_assert(img_request->obj_request_count > 0);
2317 rbd_assert(which != BAD_WHICH);
2318 rbd_assert(which < img_request->obj_request_count);
Alex Elder21692382013-04-05 01:27:12 -05002319
2320 spin_lock_irq(&img_request->completion_lock);
2321 if (which != img_request->next_completion)
2322 goto out;
2323
2324 for_each_obj_request_from(img_request, obj_request) {
Alex Elder21692382013-04-05 01:27:12 -05002325 rbd_assert(more);
2326 rbd_assert(which < img_request->obj_request_count);
2327
2328 if (!obj_request_done_test(obj_request))
2329 break;
Alex Elder12178572013-02-08 09:55:49 -06002330 more = rbd_img_obj_end_request(obj_request);
Alex Elder21692382013-04-05 01:27:12 -05002331 which++;
2332 }
2333
2334 rbd_assert(more ^ (which == img_request->obj_request_count));
2335 img_request->next_completion = which;
2336out:
2337 spin_unlock_irq(&img_request->completion_lock);
Alex Elder0f2d5be2014-04-26 14:21:44 +04002338 rbd_img_request_put(img_request);
Alex Elder21692382013-04-05 01:27:12 -05002339
2340 if (!more)
2341 rbd_img_request_complete(img_request);
2342}
2343
Alex Elderf1a47392013-04-19 15:34:50 -05002344/*
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002345 * Add individual osd ops to the given ceph_osd_request and prepare
2346 * them for submission. num_ops is the current number of
2347 * osd operations already to the object request.
2348 */
2349static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2350 struct ceph_osd_request *osd_request,
2351 enum obj_operation_type op_type,
2352 unsigned int num_ops)
2353{
2354 struct rbd_img_request *img_request = obj_request->img_request;
2355 struct rbd_device *rbd_dev = img_request->rbd_dev;
2356 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2357 u64 offset = obj_request->offset;
2358 u64 length = obj_request->length;
2359 u64 img_end;
2360 u16 opcode;
2361
2362 if (op_type == OBJ_OP_DISCARD) {
Josh Durgind3246fb2014-04-07 16:49:21 -07002363 if (!offset && length == object_size &&
2364 (!img_request_layered_test(img_request) ||
2365 !obj_request_overlaps_parent(obj_request))) {
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002366 opcode = CEPH_OSD_OP_DELETE;
2367 } else if ((offset + length == object_size)) {
2368 opcode = CEPH_OSD_OP_TRUNCATE;
2369 } else {
2370 down_read(&rbd_dev->header_rwsem);
2371 img_end = rbd_dev->header.image_size;
2372 up_read(&rbd_dev->header_rwsem);
2373
2374 if (obj_request->img_offset + length == img_end)
2375 opcode = CEPH_OSD_OP_TRUNCATE;
2376 else
2377 opcode = CEPH_OSD_OP_ZERO;
2378 }
2379 } else if (op_type == OBJ_OP_WRITE) {
Ilya Dryomove30b7572015-10-07 17:27:17 +02002380 if (!offset && length == object_size)
2381 opcode = CEPH_OSD_OP_WRITEFULL;
2382 else
2383 opcode = CEPH_OSD_OP_WRITE;
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002384 osd_req_op_alloc_hint_init(osd_request, num_ops,
2385 object_size, object_size);
2386 num_ops++;
2387 } else {
2388 opcode = CEPH_OSD_OP_READ;
2389 }
2390
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002391 if (opcode == CEPH_OSD_OP_DELETE)
Yan, Zheng144cba12015-04-27 11:09:54 +08002392 osd_req_op_init(osd_request, num_ops, opcode, 0);
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002393 else
2394 osd_req_op_extent_init(osd_request, num_ops, opcode,
2395 offset, length, 0, 0);
2396
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002397 if (obj_request->type == OBJ_REQUEST_BIO)
2398 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2399 obj_request->bio_list, length);
2400 else if (obj_request->type == OBJ_REQUEST_PAGES)
2401 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2402 obj_request->pages, length,
2403 offset & ~PAGE_MASK, false, false);
2404
2405 /* Discards are also writes */
2406 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2407 rbd_osd_req_format_write(obj_request);
2408 else
2409 rbd_osd_req_format_read(obj_request);
2410}
2411
2412/*
Alex Elderf1a47392013-04-19 15:34:50 -05002413 * Split up an image request into one or more object requests, each
2414 * to a different object. The "type" parameter indicates whether
2415 * "data_desc" is the pointer to the head of a list of bio
2416 * structures, or the base of a page array. In either case this
2417 * function assumes data_desc describes memory sufficient to hold
2418 * all data described by the image request.
2419 */
2420static int rbd_img_request_fill(struct rbd_img_request *img_request,
2421 enum obj_request_type type,
2422 void *data_desc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002423{
2424 struct rbd_device *rbd_dev = img_request->rbd_dev;
2425 struct rbd_obj_request *obj_request = NULL;
2426 struct rbd_obj_request *next_obj_request;
Jingoo Hana1580732013-08-09 13:04:35 +09002427 struct bio *bio_list = NULL;
Alex Elderf1a47392013-04-19 15:34:50 -05002428 unsigned int bio_offset = 0;
Jingoo Hana1580732013-08-09 13:04:35 +09002429 struct page **pages = NULL;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002430 enum obj_operation_type op_type;
Alex Elder7da22d22013-01-24 16:13:36 -06002431 u64 img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002432 u64 resid;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002433
Alex Elderf1a47392013-04-19 15:34:50 -05002434 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2435 (int)type, data_desc);
Alex Elder37206ee2013-02-20 17:32:08 -06002436
Alex Elder7da22d22013-01-24 16:13:36 -06002437 img_offset = img_request->offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002438 resid = img_request->length;
Alex Elder4dda41d2013-02-20 21:59:33 -06002439 rbd_assert(resid > 0);
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002440 op_type = rbd_img_request_op_type(img_request);
Alex Elderf1a47392013-04-19 15:34:50 -05002441
2442 if (type == OBJ_REQUEST_BIO) {
2443 bio_list = data_desc;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002444 rbd_assert(img_offset ==
2445 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002446 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002447 pages = data_desc;
2448 }
2449
Alex Elderbf0d5f502012-11-22 00:00:08 -06002450 while (resid) {
Alex Elder2fa12322013-04-05 01:27:12 -05002451 struct ceph_osd_request *osd_req;
Ilya Dryomova90bb0c2017-01-25 18:16:23 +01002452 u64 object_no = img_offset >> rbd_dev->header.obj_order;
Ilya Dryomov67e2b652017-01-25 18:16:22 +01002453 u64 offset = rbd_segment_offset(rbd_dev, img_offset);
2454 u64 length = rbd_segment_length(rbd_dev, img_offset, resid);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002455
Ilya Dryomov6c696d82017-01-25 18:16:23 +01002456 obj_request = rbd_obj_request_create(type);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002457 if (!obj_request)
2458 goto out_unwind;
Ilya Dryomov62054da2014-03-04 11:57:17 +02002459
Ilya Dryomova90bb0c2017-01-25 18:16:23 +01002460 obj_request->object_no = object_no;
Ilya Dryomov67e2b652017-01-25 18:16:22 +01002461 obj_request->offset = offset;
2462 obj_request->length = length;
2463
Josh Durgin03507db2013-08-27 14:45:46 -07002464 /*
2465 * set obj_request->img_request before creating the
2466 * osd_request so that it gets the right snapc
2467 */
2468 rbd_img_obj_request_add(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002469
Alex Elderf1a47392013-04-19 15:34:50 -05002470 if (type == OBJ_REQUEST_BIO) {
2471 unsigned int clone_size;
2472
2473 rbd_assert(length <= (u64)UINT_MAX);
2474 clone_size = (unsigned int)length;
2475 obj_request->bio_list =
2476 bio_chain_clone_range(&bio_list,
2477 &bio_offset,
2478 clone_size,
David Disseldorp2224d872016-04-05 11:13:39 +02002479 GFP_NOIO);
Alex Elderf1a47392013-04-19 15:34:50 -05002480 if (!obj_request->bio_list)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002481 goto out_unwind;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002482 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002483 unsigned int page_count;
2484
2485 obj_request->pages = pages;
2486 page_count = (u32)calc_pages_for(offset, length);
2487 obj_request->page_count = page_count;
2488 if ((offset + length) & ~PAGE_MASK)
2489 page_count--; /* more on last page */
2490 pages += page_count;
2491 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06002492
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002493 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2494 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2495 obj_request);
Alex Elder2fa12322013-04-05 01:27:12 -05002496 if (!osd_req)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002497 goto out_unwind;
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002498
Alex Elder2fa12322013-04-05 01:27:12 -05002499 obj_request->osd_req = osd_req;
Alex Elder21692382013-04-05 01:27:12 -05002500 obj_request->callback = rbd_img_obj_callback;
Alex Elder7da22d22013-01-24 16:13:36 -06002501 obj_request->img_offset = img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002502
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002503 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2504
Alex Elder7da22d22013-01-24 16:13:36 -06002505 img_offset += length;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002506 resid -= length;
2507 }
2508
2509 return 0;
2510
Alex Elderbf0d5f502012-11-22 00:00:08 -06002511out_unwind:
2512 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
Ilya Dryomov42dd0372014-03-04 11:57:17 +02002513 rbd_img_obj_request_del(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002514
2515 return -ENOMEM;
2516}
2517
Alex Elder3d7efd12013-04-19 15:34:50 -05002518static void
Ilya Dryomov27617132015-07-16 17:36:11 +03002519rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
Alex Elder0eefd472013-04-19 15:34:50 -05002520{
2521 struct rbd_img_request *img_request;
2522 struct rbd_device *rbd_dev;
Alex Elderebda6402013-05-10 16:29:22 -05002523 struct page **pages;
Alex Elder0eefd472013-04-19 15:34:50 -05002524 u32 page_count;
2525
Ilya Dryomov27617132015-07-16 17:36:11 +03002526 dout("%s: obj %p\n", __func__, obj_request);
2527
Josh Durgind3246fb2014-04-07 16:49:21 -07002528 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2529 obj_request->type == OBJ_REQUEST_NODATA);
Alex Elder0eefd472013-04-19 15:34:50 -05002530 rbd_assert(obj_request_img_data_test(obj_request));
2531 img_request = obj_request->img_request;
2532 rbd_assert(img_request);
2533
2534 rbd_dev = img_request->rbd_dev;
2535 rbd_assert(rbd_dev);
Alex Elder0eefd472013-04-19 15:34:50 -05002536
Alex Elderebda6402013-05-10 16:29:22 -05002537 pages = obj_request->copyup_pages;
2538 rbd_assert(pages != NULL);
Alex Elder0eefd472013-04-19 15:34:50 -05002539 obj_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002540 page_count = obj_request->copyup_page_count;
2541 rbd_assert(page_count);
2542 obj_request->copyup_page_count = 0;
2543 ceph_release_page_vector(pages, page_count);
Alex Elder0eefd472013-04-19 15:34:50 -05002544
2545 /*
2546 * We want the transfer count to reflect the size of the
2547 * original write request. There is no such thing as a
2548 * successful short write, so if the request was successful
2549 * we can just set it to the originally-requested length.
2550 */
2551 if (!obj_request->result)
2552 obj_request->xferred = obj_request->length;
2553
Ilya Dryomov27617132015-07-16 17:36:11 +03002554 obj_request_done_set(obj_request);
Alex Elder0eefd472013-04-19 15:34:50 -05002555}
2556
2557static void
Alex Elder3d7efd12013-04-19 15:34:50 -05002558rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2559{
2560 struct rbd_obj_request *orig_request;
Alex Elder0eefd472013-04-19 15:34:50 -05002561 struct ceph_osd_request *osd_req;
Alex Elder0eefd472013-04-19 15:34:50 -05002562 struct rbd_device *rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002563 struct page **pages;
Josh Durgind3246fb2014-04-07 16:49:21 -07002564 enum obj_operation_type op_type;
Alex Elderebda6402013-05-10 16:29:22 -05002565 u32 page_count;
Alex Elderbbea1c12013-05-06 17:40:33 -05002566 int img_result;
Alex Elderebda6402013-05-10 16:29:22 -05002567 u64 parent_length;
Alex Elder3d7efd12013-04-19 15:34:50 -05002568
2569 rbd_assert(img_request_child_test(img_request));
2570
2571 /* First get what we need from the image request */
2572
2573 pages = img_request->copyup_pages;
2574 rbd_assert(pages != NULL);
2575 img_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002576 page_count = img_request->copyup_page_count;
2577 rbd_assert(page_count);
2578 img_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002579
2580 orig_request = img_request->obj_request;
2581 rbd_assert(orig_request != NULL);
Alex Elderb91f09f2013-05-10 16:29:22 -05002582 rbd_assert(obj_request_type_valid(orig_request->type));
Alex Elderbbea1c12013-05-06 17:40:33 -05002583 img_result = img_request->result;
Alex Elderebda6402013-05-10 16:29:22 -05002584 parent_length = img_request->length;
Ilya Dryomovfa355112016-09-16 15:20:42 +02002585 rbd_assert(img_result || parent_length == img_request->xferred);
Alex Elder3d7efd12013-04-19 15:34:50 -05002586 rbd_img_request_put(img_request);
2587
Alex Elder91c6feb2013-05-06 17:40:32 -05002588 rbd_assert(orig_request->img_request);
2589 rbd_dev = orig_request->img_request->rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002590 rbd_assert(rbd_dev);
Alex Elder3d7efd12013-04-19 15:34:50 -05002591
Alex Elderbbea1c12013-05-06 17:40:33 -05002592 /*
2593 * If the overlap has become 0 (most likely because the
2594 * image has been flattened) we need to free the pages
2595 * and re-submit the original write request.
2596 */
2597 if (!rbd_dev->parent_overlap) {
Alex Elderbbea1c12013-05-06 17:40:33 -05002598 ceph_release_page_vector(pages, page_count);
Ilya Dryomov980917f2016-09-12 18:59:42 +02002599 rbd_obj_request_submit(orig_request);
2600 return;
Alex Elderbbea1c12013-05-06 17:40:33 -05002601 }
2602
2603 if (img_result)
Alex Elder0eefd472013-04-19 15:34:50 -05002604 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002605
Alex Elder8785b1d2013-05-09 10:08:49 -05002606 /*
2607 * The original osd request is of no use to use any more.
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002608 * We need a new one that can hold the three ops in a copyup
Alex Elder8785b1d2013-05-09 10:08:49 -05002609 * request. Allocate the new copyup osd request for the
2610 * original request, and release the old one.
2611 */
Alex Elderbbea1c12013-05-06 17:40:33 -05002612 img_result = -ENOMEM;
Alex Elder0eefd472013-04-19 15:34:50 -05002613 osd_req = rbd_osd_req_create_copyup(orig_request);
2614 if (!osd_req)
2615 goto out_err;
Alex Elder8785b1d2013-05-09 10:08:49 -05002616 rbd_osd_req_destroy(orig_request->osd_req);
Alex Elder0eefd472013-04-19 15:34:50 -05002617 orig_request->osd_req = osd_req;
2618 orig_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002619 orig_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002620
Alex Elder0eefd472013-04-19 15:34:50 -05002621 /* Initialize the copyup op */
2622
2623 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
Alex Elderebda6402013-05-10 16:29:22 -05002624 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
Alex Elder0eefd472013-04-19 15:34:50 -05002625 false, false);
2626
Josh Durgind3246fb2014-04-07 16:49:21 -07002627 /* Add the other op(s) */
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002628
Josh Durgind3246fb2014-04-07 16:49:21 -07002629 op_type = rbd_img_request_op_type(orig_request->img_request);
2630 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
Alex Elder0eefd472013-04-19 15:34:50 -05002631
2632 /* All set, send it off. */
2633
Ilya Dryomov980917f2016-09-12 18:59:42 +02002634 rbd_obj_request_submit(orig_request);
2635 return;
Alex Elder0eefd472013-04-19 15:34:50 -05002636
Alex Elder0eefd472013-04-19 15:34:50 -05002637out_err:
Ilya Dryomovfa355112016-09-16 15:20:42 +02002638 ceph_release_page_vector(pages, page_count);
Ilya Dryomov0dcc6852016-09-26 15:43:52 +02002639 rbd_obj_request_error(orig_request, img_result);
Alex Elder3d7efd12013-04-19 15:34:50 -05002640}
2641
2642/*
2643 * Read from the parent image the range of data that covers the
2644 * entire target of the given object request. This is used for
2645 * satisfying a layered image write request when the target of an
2646 * object request from the image request does not exist.
2647 *
2648 * A page array big enough to hold the returned data is allocated
2649 * and supplied to rbd_img_request_fill() as the "data descriptor."
2650 * When the read completes, this page array will be transferred to
2651 * the original object request for the copyup operation.
2652 *
Ilya Dryomovc2e82412016-09-13 20:18:01 +02002653 * If an error occurs, it is recorded as the result of the original
2654 * object request in rbd_img_obj_exists_callback().
Alex Elder3d7efd12013-04-19 15:34:50 -05002655 */
2656static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2657{
Ilya Dryomov058aa992016-09-12 14:44:45 +02002658 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002659 struct rbd_img_request *parent_request = NULL;
Alex Elder3d7efd12013-04-19 15:34:50 -05002660 u64 img_offset;
2661 u64 length;
2662 struct page **pages = NULL;
2663 u32 page_count;
2664 int result;
2665
Alex Elder3d7efd12013-04-19 15:34:50 -05002666 rbd_assert(rbd_dev->parent != NULL);
2667
2668 /*
2669 * Determine the byte range covered by the object in the
2670 * child image to which the original request was to be sent.
2671 */
2672 img_offset = obj_request->img_offset - obj_request->offset;
Ilya Dryomov5bc3fb12017-01-25 18:16:22 +01002673 length = rbd_obj_bytes(&rbd_dev->header);
Alex Elder3d7efd12013-04-19 15:34:50 -05002674
2675 /*
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002676 * There is no defined parent data beyond the parent
2677 * overlap, so limit what we read at that boundary if
2678 * necessary.
2679 */
2680 if (img_offset + length > rbd_dev->parent_overlap) {
2681 rbd_assert(img_offset < rbd_dev->parent_overlap);
2682 length = rbd_dev->parent_overlap - img_offset;
2683 }
2684
2685 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002686 * Allocate a page array big enough to receive the data read
2687 * from the parent.
2688 */
2689 page_count = (u32)calc_pages_for(0, length);
2690 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2691 if (IS_ERR(pages)) {
2692 result = PTR_ERR(pages);
2693 pages = NULL;
2694 goto out_err;
2695 }
2696
2697 result = -ENOMEM;
Alex Eldere93f3152013-05-08 22:50:04 -05002698 parent_request = rbd_parent_request_create(obj_request,
2699 img_offset, length);
Alex Elder3d7efd12013-04-19 15:34:50 -05002700 if (!parent_request)
2701 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002702
2703 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2704 if (result)
2705 goto out_err;
Ilya Dryomov058aa992016-09-12 14:44:45 +02002706
Alex Elder3d7efd12013-04-19 15:34:50 -05002707 parent_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002708 parent_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002709 parent_request->callback = rbd_img_obj_parent_read_full_callback;
Ilya Dryomov058aa992016-09-12 14:44:45 +02002710
Alex Elder3d7efd12013-04-19 15:34:50 -05002711 result = rbd_img_request_submit(parent_request);
2712 if (!result)
2713 return 0;
2714
2715 parent_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002716 parent_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002717 parent_request->obj_request = NULL;
2718 rbd_obj_request_put(obj_request);
2719out_err:
2720 if (pages)
2721 ceph_release_page_vector(pages, page_count);
2722 if (parent_request)
2723 rbd_img_request_put(parent_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002724 return result;
2725}
2726
Alex Elderc5b5ef62013-02-11 12:33:24 -06002727static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2728{
Alex Elderc5b5ef62013-02-11 12:33:24 -06002729 struct rbd_obj_request *orig_request;
Alex Elder638f5ab2013-05-06 17:40:33 -05002730 struct rbd_device *rbd_dev;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002731 int result;
2732
2733 rbd_assert(!obj_request_img_data_test(obj_request));
2734
2735 /*
2736 * All we need from the object request is the original
2737 * request and the result of the STAT op. Grab those, then
2738 * we're done with the request.
2739 */
2740 orig_request = obj_request->obj_request;
2741 obj_request->obj_request = NULL;
Alex Elder912c3172013-05-13 20:35:38 -05002742 rbd_obj_request_put(orig_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002743 rbd_assert(orig_request);
2744 rbd_assert(orig_request->img_request);
2745
2746 result = obj_request->result;
2747 obj_request->result = 0;
2748
2749 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2750 obj_request, orig_request, result,
2751 obj_request->xferred, obj_request->length);
2752 rbd_obj_request_put(obj_request);
2753
Alex Elder638f5ab2013-05-06 17:40:33 -05002754 /*
2755 * If the overlap has become 0 (most likely because the
Ilya Dryomov980917f2016-09-12 18:59:42 +02002756 * image has been flattened) we need to re-submit the
2757 * original request.
Alex Elder638f5ab2013-05-06 17:40:33 -05002758 */
2759 rbd_dev = orig_request->img_request->rbd_dev;
2760 if (!rbd_dev->parent_overlap) {
Ilya Dryomov980917f2016-09-12 18:59:42 +02002761 rbd_obj_request_submit(orig_request);
2762 return;
Alex Elder638f5ab2013-05-06 17:40:33 -05002763 }
Alex Elderc5b5ef62013-02-11 12:33:24 -06002764
2765 /*
2766 * Our only purpose here is to determine whether the object
2767 * exists, and we don't want to treat the non-existence as
2768 * an error. If something else comes back, transfer the
2769 * error to the original request and complete it now.
2770 */
2771 if (!result) {
2772 obj_request_existence_set(orig_request, true);
2773 } else if (result == -ENOENT) {
2774 obj_request_existence_set(orig_request, false);
Ilya Dryomovc2e82412016-09-13 20:18:01 +02002775 } else {
2776 goto fail_orig_request;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002777 }
2778
2779 /*
2780 * Resubmit the original request now that we have recorded
2781 * whether the target object exists.
2782 */
Ilya Dryomovc2e82412016-09-13 20:18:01 +02002783 result = rbd_img_obj_request_submit(orig_request);
2784 if (result)
2785 goto fail_orig_request;
2786
2787 return;
2788
2789fail_orig_request:
Ilya Dryomov0dcc6852016-09-26 15:43:52 +02002790 rbd_obj_request_error(orig_request, result);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002791}
2792
2793static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2794{
Ilya Dryomov058aa992016-09-12 14:44:45 +02002795 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002796 struct rbd_obj_request *stat_request;
Ilya Dryomov710214e2016-09-15 17:53:32 +02002797 struct page **pages;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002798 u32 page_count;
2799 size_t size;
2800 int ret;
2801
Ilya Dryomov6c696d82017-01-25 18:16:23 +01002802 stat_request = rbd_obj_request_create(OBJ_REQUEST_PAGES);
Ilya Dryomov710214e2016-09-15 17:53:32 +02002803 if (!stat_request)
2804 return -ENOMEM;
2805
Ilya Dryomova90bb0c2017-01-25 18:16:23 +01002806 stat_request->object_no = obj_request->object_no;
2807
Ilya Dryomov710214e2016-09-15 17:53:32 +02002808 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2809 stat_request);
2810 if (!stat_request->osd_req) {
2811 ret = -ENOMEM;
2812 goto fail_stat_request;
2813 }
2814
Alex Elderc5b5ef62013-02-11 12:33:24 -06002815 /*
2816 * The response data for a STAT call consists of:
2817 * le64 length;
2818 * struct {
2819 * le32 tv_sec;
2820 * le32 tv_nsec;
2821 * } mtime;
2822 */
2823 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2824 page_count = (u32)calc_pages_for(0, size);
2825 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
Ilya Dryomov710214e2016-09-15 17:53:32 +02002826 if (IS_ERR(pages)) {
2827 ret = PTR_ERR(pages);
2828 goto fail_stat_request;
2829 }
Alex Elderc5b5ef62013-02-11 12:33:24 -06002830
Ilya Dryomov710214e2016-09-15 17:53:32 +02002831 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
2832 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2833 false, false);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002834
2835 rbd_obj_request_get(obj_request);
2836 stat_request->obj_request = obj_request;
2837 stat_request->pages = pages;
2838 stat_request->page_count = page_count;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002839 stat_request->callback = rbd_img_obj_exists_callback;
2840
Ilya Dryomov980917f2016-09-12 18:59:42 +02002841 rbd_obj_request_submit(stat_request);
2842 return 0;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002843
Ilya Dryomov710214e2016-09-15 17:53:32 +02002844fail_stat_request:
2845 rbd_obj_request_put(stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002846 return ret;
2847}
2848
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002849static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
Alex Elderb454e362013-04-19 15:34:50 -05002850{
Ilya Dryomov058aa992016-09-12 14:44:45 +02002851 struct rbd_img_request *img_request = obj_request->img_request;
2852 struct rbd_device *rbd_dev = img_request->rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002853
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002854 /* Reads */
Josh Durgin1c220882014-04-04 17:49:12 -07002855 if (!img_request_write_test(img_request) &&
2856 !img_request_discard_test(img_request))
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002857 return true;
Alex Elderb454e362013-04-19 15:34:50 -05002858
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002859 /* Non-layered writes */
2860 if (!img_request_layered_test(img_request))
2861 return true;
2862
2863 /*
2864 * Layered writes outside of the parent overlap range don't
2865 * share any data with the parent.
2866 */
2867 if (!obj_request_overlaps_parent(obj_request))
2868 return true;
2869
2870 /*
Guangliang Zhaoc622d222014-04-01 22:22:15 +08002871 * Entire-object layered writes - we will overwrite whatever
2872 * parent data there is anyway.
2873 */
2874 if (!obj_request->offset &&
2875 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2876 return true;
2877
2878 /*
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002879 * If the object is known to already exist, its parent data has
2880 * already been copied.
2881 */
2882 if (obj_request_known_test(obj_request) &&
2883 obj_request_exists_test(obj_request))
2884 return true;
2885
2886 return false;
2887}
2888
2889static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2890{
Ilya Dryomov058aa992016-09-12 14:44:45 +02002891 rbd_assert(obj_request_img_data_test(obj_request));
2892 rbd_assert(obj_request_type_valid(obj_request->type));
2893 rbd_assert(obj_request->img_request);
2894
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002895 if (img_obj_request_simple(obj_request)) {
Ilya Dryomov980917f2016-09-12 18:59:42 +02002896 rbd_obj_request_submit(obj_request);
2897 return 0;
Alex Elderb454e362013-04-19 15:34:50 -05002898 }
2899
2900 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002901 * It's a layered write. The target object might exist but
2902 * we may not know that yet. If we know it doesn't exist,
2903 * start by reading the data for the full target object from
2904 * the parent so we can use it for a copyup to the target.
Alex Elderb454e362013-04-19 15:34:50 -05002905 */
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002906 if (obj_request_known_test(obj_request))
Alex Elder3d7efd12013-04-19 15:34:50 -05002907 return rbd_img_obj_parent_read_full(obj_request);
2908
2909 /* We don't know whether the target exists. Go find out. */
Alex Elderb454e362013-04-19 15:34:50 -05002910
2911 return rbd_img_obj_exists_submit(obj_request);
2912}
2913
Alex Elderbf0d5f502012-11-22 00:00:08 -06002914static int rbd_img_request_submit(struct rbd_img_request *img_request)
2915{
Alex Elderbf0d5f502012-11-22 00:00:08 -06002916 struct rbd_obj_request *obj_request;
Alex Elder46faeed2013-04-10 17:47:46 -05002917 struct rbd_obj_request *next_obj_request;
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002918 int ret = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002919
Alex Elder37206ee2013-02-20 17:32:08 -06002920 dout("%s: img %p\n", __func__, img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002921
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002922 rbd_img_request_get(img_request);
2923 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
Alex Elderb454e362013-04-19 15:34:50 -05002924 ret = rbd_img_obj_request_submit(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002925 if (ret)
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002926 goto out_put_ireq;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002927 }
2928
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002929out_put_ireq:
2930 rbd_img_request_put(img_request);
2931 return ret;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002932}
2933
Alex Elder8b3e1a52013-01-24 16:13:36 -06002934static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2935{
2936 struct rbd_obj_request *obj_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002937 struct rbd_device *rbd_dev;
2938 u64 obj_end;
Alex Elder02c74fb2013-05-06 17:40:33 -05002939 u64 img_xferred;
2940 int img_result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002941
2942 rbd_assert(img_request_child_test(img_request));
2943
Alex Elder02c74fb2013-05-06 17:40:33 -05002944 /* First get what we need from the image request and release it */
2945
Alex Elder8b3e1a52013-01-24 16:13:36 -06002946 obj_request = img_request->obj_request;
Alex Elder02c74fb2013-05-06 17:40:33 -05002947 img_xferred = img_request->xferred;
2948 img_result = img_request->result;
2949 rbd_img_request_put(img_request);
2950
2951 /*
2952 * If the overlap has become 0 (most likely because the
2953 * image has been flattened) we need to re-submit the
2954 * original request.
2955 */
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002956 rbd_assert(obj_request);
2957 rbd_assert(obj_request->img_request);
Alex Elder02c74fb2013-05-06 17:40:33 -05002958 rbd_dev = obj_request->img_request->rbd_dev;
2959 if (!rbd_dev->parent_overlap) {
Ilya Dryomov980917f2016-09-12 18:59:42 +02002960 rbd_obj_request_submit(obj_request);
2961 return;
Alex Elder02c74fb2013-05-06 17:40:33 -05002962 }
2963
2964 obj_request->result = img_result;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002965 if (obj_request->result)
2966 goto out;
2967
2968 /*
2969 * We need to zero anything beyond the parent overlap
2970 * boundary. Since rbd_img_obj_request_read_callback()
2971 * will zero anything beyond the end of a short read, an
2972 * easy way to do this is to pretend the data from the
2973 * parent came up short--ending at the overlap boundary.
2974 */
2975 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2976 obj_end = obj_request->img_offset + obj_request->length;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002977 if (obj_end > rbd_dev->parent_overlap) {
2978 u64 xferred = 0;
2979
2980 if (obj_request->img_offset < rbd_dev->parent_overlap)
2981 xferred = rbd_dev->parent_overlap -
2982 obj_request->img_offset;
2983
Alex Elder02c74fb2013-05-06 17:40:33 -05002984 obj_request->xferred = min(img_xferred, xferred);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002985 } else {
Alex Elder02c74fb2013-05-06 17:40:33 -05002986 obj_request->xferred = img_xferred;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002987 }
2988out:
Alex Elder8b3e1a52013-01-24 16:13:36 -06002989 rbd_img_obj_request_read_callback(obj_request);
2990 rbd_obj_request_complete(obj_request);
2991}
2992
2993static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2994{
Alex Elder8b3e1a52013-01-24 16:13:36 -06002995 struct rbd_img_request *img_request;
2996 int result;
2997
2998 rbd_assert(obj_request_img_data_test(obj_request));
2999 rbd_assert(obj_request->img_request != NULL);
3000 rbd_assert(obj_request->result == (s32) -ENOENT);
Alex Elder5b2ab722013-05-06 17:40:33 -05003001 rbd_assert(obj_request_type_valid(obj_request->type));
Alex Elder8b3e1a52013-01-24 16:13:36 -06003002
Alex Elder8b3e1a52013-01-24 16:13:36 -06003003 /* rbd_read_finish(obj_request, obj_request->length); */
Alex Eldere93f3152013-05-08 22:50:04 -05003004 img_request = rbd_parent_request_create(obj_request,
Alex Elder8b3e1a52013-01-24 16:13:36 -06003005 obj_request->img_offset,
Alex Eldere93f3152013-05-08 22:50:04 -05003006 obj_request->length);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003007 result = -ENOMEM;
3008 if (!img_request)
3009 goto out_err;
3010
Alex Elder5b2ab722013-05-06 17:40:33 -05003011 if (obj_request->type == OBJ_REQUEST_BIO)
3012 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3013 obj_request->bio_list);
3014 else
3015 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3016 obj_request->pages);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003017 if (result)
3018 goto out_err;
3019
3020 img_request->callback = rbd_img_parent_read_callback;
3021 result = rbd_img_request_submit(img_request);
3022 if (result)
3023 goto out_err;
3024
3025 return;
3026out_err:
3027 if (img_request)
3028 rbd_img_request_put(img_request);
3029 obj_request->result = result;
3030 obj_request->xferred = 0;
3031 obj_request_done_set(obj_request);
3032}
3033
Ilya Dryomoved95b212016-08-12 16:40:02 +02003034static const struct rbd_client_id rbd_empty_cid;
3035
3036static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3037 const struct rbd_client_id *rhs)
3038{
3039 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3040}
3041
3042static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3043{
3044 struct rbd_client_id cid;
3045
3046 mutex_lock(&rbd_dev->watch_mutex);
3047 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3048 cid.handle = rbd_dev->watch_cookie;
3049 mutex_unlock(&rbd_dev->watch_mutex);
3050 return cid;
3051}
3052
3053/*
3054 * lock_rwsem must be held for write
3055 */
3056static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3057 const struct rbd_client_id *cid)
3058{
3059 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3060 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3061 cid->gid, cid->handle);
3062 rbd_dev->owner_cid = *cid; /* struct */
3063}
3064
3065static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3066{
3067 mutex_lock(&rbd_dev->watch_mutex);
3068 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3069 mutex_unlock(&rbd_dev->watch_mutex);
3070}
3071
3072/*
3073 * lock_rwsem must be held for write
3074 */
3075static int rbd_lock(struct rbd_device *rbd_dev)
3076{
3077 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3078 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3079 char cookie[32];
3080 int ret;
3081
3082 WARN_ON(__rbd_is_lock_owner(rbd_dev));
3083
3084 format_lock_cookie(rbd_dev, cookie);
3085 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3086 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3087 RBD_LOCK_TAG, "", 0);
3088 if (ret)
3089 return ret;
3090
3091 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3092 rbd_set_owner_cid(rbd_dev, &cid);
3093 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3094 return 0;
3095}
3096
3097/*
3098 * lock_rwsem must be held for write
3099 */
3100static int rbd_unlock(struct rbd_device *rbd_dev)
3101{
3102 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3103 char cookie[32];
3104 int ret;
3105
3106 WARN_ON(!__rbd_is_lock_owner(rbd_dev));
3107
3108 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3109
3110 format_lock_cookie(rbd_dev, cookie);
3111 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3112 RBD_LOCK_NAME, cookie);
3113 if (ret && ret != -ENOENT) {
3114 rbd_warn(rbd_dev, "cls_unlock failed: %d", ret);
3115 return ret;
3116 }
3117
3118 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3119 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3120 return 0;
3121}
3122
3123static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3124 enum rbd_notify_op notify_op,
3125 struct page ***preply_pages,
3126 size_t *preply_len)
3127{
3128 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3129 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3130 int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN;
3131 char buf[buf_size];
3132 void *p = buf;
3133
3134 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3135
3136 /* encode *LockPayload NotifyMessage (op + ClientId) */
3137 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3138 ceph_encode_32(&p, notify_op);
3139 ceph_encode_64(&p, cid.gid);
3140 ceph_encode_64(&p, cid.handle);
3141
3142 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3143 &rbd_dev->header_oloc, buf, buf_size,
3144 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3145}
3146
3147static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3148 enum rbd_notify_op notify_op)
3149{
3150 struct page **reply_pages;
3151 size_t reply_len;
3152
3153 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
3154 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3155}
3156
3157static void rbd_notify_acquired_lock(struct work_struct *work)
3158{
3159 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3160 acquired_lock_work);
3161
3162 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3163}
3164
3165static void rbd_notify_released_lock(struct work_struct *work)
3166{
3167 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3168 released_lock_work);
3169
3170 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3171}
3172
3173static int rbd_request_lock(struct rbd_device *rbd_dev)
3174{
3175 struct page **reply_pages;
3176 size_t reply_len;
3177 bool lock_owner_responded = false;
3178 int ret;
3179
3180 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3181
3182 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3183 &reply_pages, &reply_len);
3184 if (ret && ret != -ETIMEDOUT) {
3185 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3186 goto out;
3187 }
3188
3189 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3190 void *p = page_address(reply_pages[0]);
3191 void *const end = p + reply_len;
3192 u32 n;
3193
3194 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3195 while (n--) {
3196 u8 struct_v;
3197 u32 len;
3198
3199 ceph_decode_need(&p, end, 8 + 8, e_inval);
3200 p += 8 + 8; /* skip gid and cookie */
3201
3202 ceph_decode_32_safe(&p, end, len, e_inval);
3203 if (!len)
3204 continue;
3205
3206 if (lock_owner_responded) {
3207 rbd_warn(rbd_dev,
3208 "duplicate lock owners detected");
3209 ret = -EIO;
3210 goto out;
3211 }
3212
3213 lock_owner_responded = true;
3214 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3215 &struct_v, &len);
3216 if (ret) {
3217 rbd_warn(rbd_dev,
3218 "failed to decode ResponseMessage: %d",
3219 ret);
3220 goto e_inval;
3221 }
3222
3223 ret = ceph_decode_32(&p);
3224 }
3225 }
3226
3227 if (!lock_owner_responded) {
3228 rbd_warn(rbd_dev, "no lock owners detected");
3229 ret = -ETIMEDOUT;
3230 }
3231
3232out:
3233 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3234 return ret;
3235
3236e_inval:
3237 ret = -EINVAL;
3238 goto out;
3239}
3240
3241static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
3242{
3243 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
3244
3245 cancel_delayed_work(&rbd_dev->lock_dwork);
3246 if (wake_all)
3247 wake_up_all(&rbd_dev->lock_waitq);
3248 else
3249 wake_up(&rbd_dev->lock_waitq);
3250}
3251
3252static int get_lock_owner_info(struct rbd_device *rbd_dev,
3253 struct ceph_locker **lockers, u32 *num_lockers)
3254{
3255 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3256 u8 lock_type;
3257 char *lock_tag;
3258 int ret;
3259
3260 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3261
3262 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3263 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3264 &lock_type, &lock_tag, lockers, num_lockers);
3265 if (ret)
3266 return ret;
3267
3268 if (*num_lockers == 0) {
3269 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3270 goto out;
3271 }
3272
3273 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3274 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3275 lock_tag);
3276 ret = -EBUSY;
3277 goto out;
3278 }
3279
3280 if (lock_type == CEPH_CLS_LOCK_SHARED) {
3281 rbd_warn(rbd_dev, "shared lock type detected");
3282 ret = -EBUSY;
3283 goto out;
3284 }
3285
3286 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
3287 strlen(RBD_LOCK_COOKIE_PREFIX))) {
3288 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3289 (*lockers)[0].id.cookie);
3290 ret = -EBUSY;
3291 goto out;
3292 }
3293
3294out:
3295 kfree(lock_tag);
3296 return ret;
3297}
3298
3299static int find_watcher(struct rbd_device *rbd_dev,
3300 const struct ceph_locker *locker)
3301{
3302 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3303 struct ceph_watch_item *watchers;
3304 u32 num_watchers;
3305 u64 cookie;
3306 int i;
3307 int ret;
3308
3309 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3310 &rbd_dev->header_oloc, &watchers,
3311 &num_watchers);
3312 if (ret)
3313 return ret;
3314
3315 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
3316 for (i = 0; i < num_watchers; i++) {
3317 if (!memcmp(&watchers[i].addr, &locker->info.addr,
3318 sizeof(locker->info.addr)) &&
3319 watchers[i].cookie == cookie) {
3320 struct rbd_client_id cid = {
3321 .gid = le64_to_cpu(watchers[i].name.num),
3322 .handle = cookie,
3323 };
3324
3325 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3326 rbd_dev, cid.gid, cid.handle);
3327 rbd_set_owner_cid(rbd_dev, &cid);
3328 ret = 1;
3329 goto out;
3330 }
3331 }
3332
3333 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3334 ret = 0;
3335out:
3336 kfree(watchers);
3337 return ret;
3338}
3339
3340/*
3341 * lock_rwsem must be held for write
3342 */
3343static int rbd_try_lock(struct rbd_device *rbd_dev)
3344{
3345 struct ceph_client *client = rbd_dev->rbd_client->client;
3346 struct ceph_locker *lockers;
3347 u32 num_lockers;
3348 int ret;
3349
3350 for (;;) {
3351 ret = rbd_lock(rbd_dev);
3352 if (ret != -EBUSY)
3353 return ret;
3354
3355 /* determine if the current lock holder is still alive */
3356 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
3357 if (ret)
3358 return ret;
3359
3360 if (num_lockers == 0)
3361 goto again;
3362
3363 ret = find_watcher(rbd_dev, lockers);
3364 if (ret) {
3365 if (ret > 0)
3366 ret = 0; /* have to request lock */
3367 goto out;
3368 }
3369
3370 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
3371 ENTITY_NAME(lockers[0].id.name));
3372
3373 ret = ceph_monc_blacklist_add(&client->monc,
3374 &lockers[0].info.addr);
3375 if (ret) {
3376 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
3377 ENTITY_NAME(lockers[0].id.name), ret);
3378 goto out;
3379 }
3380
3381 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
3382 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3383 lockers[0].id.cookie,
3384 &lockers[0].id.name);
3385 if (ret && ret != -ENOENT)
3386 goto out;
3387
3388again:
3389 ceph_free_lockers(lockers, num_lockers);
3390 }
3391
3392out:
3393 ceph_free_lockers(lockers, num_lockers);
3394 return ret;
3395}
3396
3397/*
3398 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
3399 */
3400static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
3401 int *pret)
3402{
3403 enum rbd_lock_state lock_state;
3404
3405 down_read(&rbd_dev->lock_rwsem);
3406 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3407 rbd_dev->lock_state);
3408 if (__rbd_is_lock_owner(rbd_dev)) {
3409 lock_state = rbd_dev->lock_state;
3410 up_read(&rbd_dev->lock_rwsem);
3411 return lock_state;
3412 }
3413
3414 up_read(&rbd_dev->lock_rwsem);
3415 down_write(&rbd_dev->lock_rwsem);
3416 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3417 rbd_dev->lock_state);
3418 if (!__rbd_is_lock_owner(rbd_dev)) {
3419 *pret = rbd_try_lock(rbd_dev);
3420 if (*pret)
3421 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
3422 }
3423
3424 lock_state = rbd_dev->lock_state;
3425 up_write(&rbd_dev->lock_rwsem);
3426 return lock_state;
3427}
3428
3429static void rbd_acquire_lock(struct work_struct *work)
3430{
3431 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3432 struct rbd_device, lock_dwork);
3433 enum rbd_lock_state lock_state;
3434 int ret;
3435
3436 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3437again:
3438 lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
3439 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
3440 if (lock_state == RBD_LOCK_STATE_LOCKED)
3441 wake_requests(rbd_dev, true);
3442 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
3443 rbd_dev, lock_state, ret);
3444 return;
3445 }
3446
3447 ret = rbd_request_lock(rbd_dev);
3448 if (ret == -ETIMEDOUT) {
3449 goto again; /* treat this as a dead client */
3450 } else if (ret < 0) {
3451 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
3452 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3453 RBD_RETRY_DELAY);
3454 } else {
3455 /*
3456 * lock owner acked, but resend if we don't see them
3457 * release the lock
3458 */
3459 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
3460 rbd_dev);
3461 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3462 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
3463 }
3464}
3465
3466/*
3467 * lock_rwsem must be held for write
3468 */
3469static bool rbd_release_lock(struct rbd_device *rbd_dev)
3470{
3471 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3472 rbd_dev->lock_state);
3473 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
3474 return false;
3475
3476 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
3477 downgrade_write(&rbd_dev->lock_rwsem);
3478 /*
3479 * Ensure that all in-flight IO is flushed.
3480 *
3481 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3482 * may be shared with other devices.
3483 */
3484 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
3485 up_read(&rbd_dev->lock_rwsem);
3486
3487 down_write(&rbd_dev->lock_rwsem);
3488 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3489 rbd_dev->lock_state);
3490 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
3491 return false;
3492
3493 if (!rbd_unlock(rbd_dev))
3494 /*
3495 * Give others a chance to grab the lock - we would re-acquire
3496 * almost immediately if we got new IO during ceph_osdc_sync()
3497 * otherwise. We need to ack our own notifications, so this
3498 * lock_dwork will be requeued from rbd_wait_state_locked()
3499 * after wake_requests() in rbd_handle_released_lock().
3500 */
3501 cancel_delayed_work(&rbd_dev->lock_dwork);
3502
3503 return true;
3504}
3505
3506static void rbd_release_lock_work(struct work_struct *work)
3507{
3508 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3509 unlock_work);
3510
3511 down_write(&rbd_dev->lock_rwsem);
3512 rbd_release_lock(rbd_dev);
3513 up_write(&rbd_dev->lock_rwsem);
3514}
3515
3516static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
3517 void **p)
3518{
3519 struct rbd_client_id cid = { 0 };
3520
3521 if (struct_v >= 2) {
3522 cid.gid = ceph_decode_64(p);
3523 cid.handle = ceph_decode_64(p);
3524 }
3525
3526 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3527 cid.handle);
3528 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3529 down_write(&rbd_dev->lock_rwsem);
3530 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3531 /*
3532 * we already know that the remote client is
3533 * the owner
3534 */
3535 up_write(&rbd_dev->lock_rwsem);
3536 return;
3537 }
3538
3539 rbd_set_owner_cid(rbd_dev, &cid);
3540 downgrade_write(&rbd_dev->lock_rwsem);
3541 } else {
3542 down_read(&rbd_dev->lock_rwsem);
3543 }
3544
3545 if (!__rbd_is_lock_owner(rbd_dev))
3546 wake_requests(rbd_dev, false);
3547 up_read(&rbd_dev->lock_rwsem);
3548}
3549
3550static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
3551 void **p)
3552{
3553 struct rbd_client_id cid = { 0 };
3554
3555 if (struct_v >= 2) {
3556 cid.gid = ceph_decode_64(p);
3557 cid.handle = ceph_decode_64(p);
3558 }
3559
3560 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3561 cid.handle);
3562 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3563 down_write(&rbd_dev->lock_rwsem);
3564 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3565 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3566 __func__, rbd_dev, cid.gid, cid.handle,
3567 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
3568 up_write(&rbd_dev->lock_rwsem);
3569 return;
3570 }
3571
3572 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3573 downgrade_write(&rbd_dev->lock_rwsem);
3574 } else {
3575 down_read(&rbd_dev->lock_rwsem);
3576 }
3577
3578 if (!__rbd_is_lock_owner(rbd_dev))
3579 wake_requests(rbd_dev, false);
3580 up_read(&rbd_dev->lock_rwsem);
3581}
3582
3583static bool rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
3584 void **p)
3585{
3586 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
3587 struct rbd_client_id cid = { 0 };
3588 bool need_to_send;
3589
3590 if (struct_v >= 2) {
3591 cid.gid = ceph_decode_64(p);
3592 cid.handle = ceph_decode_64(p);
3593 }
3594
3595 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3596 cid.handle);
3597 if (rbd_cid_equal(&cid, &my_cid))
3598 return false;
3599
3600 down_read(&rbd_dev->lock_rwsem);
3601 need_to_send = __rbd_is_lock_owner(rbd_dev);
3602 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
3603 if (!rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid)) {
3604 dout("%s rbd_dev %p queueing unlock_work\n", __func__,
3605 rbd_dev);
3606 queue_work(rbd_dev->task_wq, &rbd_dev->unlock_work);
3607 }
3608 }
3609 up_read(&rbd_dev->lock_rwsem);
3610 return need_to_send;
3611}
3612
3613static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
3614 u64 notify_id, u64 cookie, s32 *result)
3615{
3616 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3617 int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN;
3618 char buf[buf_size];
3619 int ret;
3620
3621 if (result) {
3622 void *p = buf;
3623
3624 /* encode ResponseMessage */
3625 ceph_start_encoding(&p, 1, 1,
3626 buf_size - CEPH_ENCODING_START_BLK_LEN);
3627 ceph_encode_32(&p, *result);
3628 } else {
3629 buf_size = 0;
3630 }
3631
3632 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3633 &rbd_dev->header_oloc, notify_id, cookie,
3634 buf, buf_size);
3635 if (ret)
3636 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
3637}
3638
3639static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
3640 u64 cookie)
3641{
3642 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3643 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
3644}
3645
3646static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
3647 u64 notify_id, u64 cookie, s32 result)
3648{
3649 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3650 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
3651}
Ilya Dryomov922dab62016-05-26 01:15:02 +02003652
3653static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3654 u64 notifier_id, void *data, size_t data_len)
Alex Elderb8d70032012-11-30 17:53:04 -06003655{
Ilya Dryomov922dab62016-05-26 01:15:02 +02003656 struct rbd_device *rbd_dev = arg;
Ilya Dryomoved95b212016-08-12 16:40:02 +02003657 void *p = data;
3658 void *const end = p + data_len;
Ilya Dryomovd4c22692016-09-06 11:15:48 +02003659 u8 struct_v = 0;
Ilya Dryomoved95b212016-08-12 16:40:02 +02003660 u32 len;
3661 u32 notify_op;
Alex Elderb8d70032012-11-30 17:53:04 -06003662 int ret;
3663
Ilya Dryomoved95b212016-08-12 16:40:02 +02003664 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3665 __func__, rbd_dev, cookie, notify_id, data_len);
3666 if (data_len) {
3667 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
3668 &struct_v, &len);
3669 if (ret) {
3670 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
3671 ret);
3672 return;
3673 }
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003674
Ilya Dryomoved95b212016-08-12 16:40:02 +02003675 notify_op = ceph_decode_32(&p);
3676 } else {
3677 /* legacy notification for header updates */
3678 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
3679 len = 0;
3680 }
Alex Elderb8d70032012-11-30 17:53:04 -06003681
Ilya Dryomoved95b212016-08-12 16:40:02 +02003682 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
3683 switch (notify_op) {
3684 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
3685 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
3686 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3687 break;
3688 case RBD_NOTIFY_OP_RELEASED_LOCK:
3689 rbd_handle_released_lock(rbd_dev, struct_v, &p);
3690 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3691 break;
3692 case RBD_NOTIFY_OP_REQUEST_LOCK:
3693 if (rbd_handle_request_lock(rbd_dev, struct_v, &p))
3694 /*
3695 * send ResponseMessage(0) back so the client
3696 * can detect a missing owner
3697 */
3698 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3699 cookie, 0);
3700 else
3701 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3702 break;
3703 case RBD_NOTIFY_OP_HEADER_UPDATE:
3704 ret = rbd_dev_refresh(rbd_dev);
3705 if (ret)
3706 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3707
3708 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3709 break;
3710 default:
3711 if (rbd_is_lock_owner(rbd_dev))
3712 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3713 cookie, -EOPNOTSUPP);
3714 else
3715 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3716 break;
3717 }
Alex Elderb8d70032012-11-30 17:53:04 -06003718}
3719
Ilya Dryomov99d16942016-08-12 16:11:41 +02003720static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
3721
Ilya Dryomov922dab62016-05-26 01:15:02 +02003722static void rbd_watch_errcb(void *arg, u64 cookie, int err)
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003723{
Ilya Dryomov922dab62016-05-26 01:15:02 +02003724 struct rbd_device *rbd_dev = arg;
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003725
Ilya Dryomov922dab62016-05-26 01:15:02 +02003726 rbd_warn(rbd_dev, "encountered watch error: %d", err);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003727
Ilya Dryomoved95b212016-08-12 16:40:02 +02003728 down_write(&rbd_dev->lock_rwsem);
3729 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3730 up_write(&rbd_dev->lock_rwsem);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003731
Ilya Dryomov99d16942016-08-12 16:11:41 +02003732 mutex_lock(&rbd_dev->watch_mutex);
3733 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
3734 __rbd_unregister_watch(rbd_dev);
3735 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003736
Ilya Dryomov99d16942016-08-12 16:11:41 +02003737 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003738 }
Ilya Dryomov99d16942016-08-12 16:11:41 +02003739 mutex_unlock(&rbd_dev->watch_mutex);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003740}
3741
3742/*
Ilya Dryomov99d16942016-08-12 16:11:41 +02003743 * watch_mutex must be locked
Alex Elder9969ebc2013-01-18 12:31:10 -06003744 */
Ilya Dryomov99d16942016-08-12 16:11:41 +02003745static int __rbd_register_watch(struct rbd_device *rbd_dev)
Alex Elder9969ebc2013-01-18 12:31:10 -06003746{
3747 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomov922dab62016-05-26 01:15:02 +02003748 struct ceph_osd_linger_request *handle;
Alex Elder9969ebc2013-01-18 12:31:10 -06003749
Ilya Dryomov922dab62016-05-26 01:15:02 +02003750 rbd_assert(!rbd_dev->watch_handle);
Ilya Dryomov99d16942016-08-12 16:11:41 +02003751 dout("%s rbd_dev %p\n", __func__, rbd_dev);
Alex Elder9969ebc2013-01-18 12:31:10 -06003752
Ilya Dryomov922dab62016-05-26 01:15:02 +02003753 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3754 &rbd_dev->header_oloc, rbd_watch_cb,
3755 rbd_watch_errcb, rbd_dev);
3756 if (IS_ERR(handle))
3757 return PTR_ERR(handle);
Alex Elder9969ebc2013-01-18 12:31:10 -06003758
Ilya Dryomov922dab62016-05-26 01:15:02 +02003759 rbd_dev->watch_handle = handle;
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003760 return 0;
Alex Elder9969ebc2013-01-18 12:31:10 -06003761}
3762
Ilya Dryomov99d16942016-08-12 16:11:41 +02003763/*
3764 * watch_mutex must be locked
3765 */
3766static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
Ilya Dryomovfca27062013-12-16 18:02:40 +02003767{
Ilya Dryomov922dab62016-05-26 01:15:02 +02003768 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3769 int ret;
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003770
Ilya Dryomov99d16942016-08-12 16:11:41 +02003771 rbd_assert(rbd_dev->watch_handle);
3772 dout("%s rbd_dev %p\n", __func__, rbd_dev);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003773
Ilya Dryomov922dab62016-05-26 01:15:02 +02003774 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3775 if (ret)
3776 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003777
Ilya Dryomov922dab62016-05-26 01:15:02 +02003778 rbd_dev->watch_handle = NULL;
Ilya Dryomovc525f032016-04-28 16:07:26 +02003779}
3780
Ilya Dryomov99d16942016-08-12 16:11:41 +02003781static int rbd_register_watch(struct rbd_device *rbd_dev)
Ilya Dryomovc525f032016-04-28 16:07:26 +02003782{
Ilya Dryomov99d16942016-08-12 16:11:41 +02003783 int ret;
Ilya Dryomov811c6682016-04-15 16:22:16 +02003784
Ilya Dryomov99d16942016-08-12 16:11:41 +02003785 mutex_lock(&rbd_dev->watch_mutex);
3786 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
3787 ret = __rbd_register_watch(rbd_dev);
3788 if (ret)
3789 goto out;
3790
3791 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3792 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3793
3794out:
3795 mutex_unlock(&rbd_dev->watch_mutex);
3796 return ret;
3797}
3798
3799static void cancel_tasks_sync(struct rbd_device *rbd_dev)
3800{
3801 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3802
3803 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
Ilya Dryomoved95b212016-08-12 16:40:02 +02003804 cancel_work_sync(&rbd_dev->acquired_lock_work);
3805 cancel_work_sync(&rbd_dev->released_lock_work);
3806 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
3807 cancel_work_sync(&rbd_dev->unlock_work);
Ilya Dryomov99d16942016-08-12 16:11:41 +02003808}
3809
3810static void rbd_unregister_watch(struct rbd_device *rbd_dev)
3811{
Ilya Dryomoved95b212016-08-12 16:40:02 +02003812 WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
Ilya Dryomov99d16942016-08-12 16:11:41 +02003813 cancel_tasks_sync(rbd_dev);
3814
3815 mutex_lock(&rbd_dev->watch_mutex);
3816 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
3817 __rbd_unregister_watch(rbd_dev);
3818 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
3819 mutex_unlock(&rbd_dev->watch_mutex);
3820
Ilya Dryomov811c6682016-04-15 16:22:16 +02003821 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
Ilya Dryomovfca27062013-12-16 18:02:40 +02003822}
3823
Ilya Dryomov99d16942016-08-12 16:11:41 +02003824static void rbd_reregister_watch(struct work_struct *work)
3825{
3826 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3827 struct rbd_device, watch_dwork);
Ilya Dryomoved95b212016-08-12 16:40:02 +02003828 bool was_lock_owner = false;
Ilya Dryomov87c0fde2016-09-29 13:41:05 +02003829 bool need_to_wake = false;
Ilya Dryomov99d16942016-08-12 16:11:41 +02003830 int ret;
3831
3832 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3833
Ilya Dryomoved95b212016-08-12 16:40:02 +02003834 down_write(&rbd_dev->lock_rwsem);
3835 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3836 was_lock_owner = rbd_release_lock(rbd_dev);
3837
Ilya Dryomov99d16942016-08-12 16:11:41 +02003838 mutex_lock(&rbd_dev->watch_mutex);
Ilya Dryomov87c0fde2016-09-29 13:41:05 +02003839 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
3840 mutex_unlock(&rbd_dev->watch_mutex);
3841 goto out;
3842 }
Ilya Dryomov99d16942016-08-12 16:11:41 +02003843
3844 ret = __rbd_register_watch(rbd_dev);
3845 if (ret) {
3846 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
Ilya Dryomov4d736442016-09-29 14:23:12 +02003847 if (ret == -EBLACKLISTED || ret == -ENOENT) {
Ilya Dryomov87c0fde2016-09-29 13:41:05 +02003848 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3849 need_to_wake = true;
3850 } else {
Ilya Dryomov99d16942016-08-12 16:11:41 +02003851 queue_delayed_work(rbd_dev->task_wq,
3852 &rbd_dev->watch_dwork,
3853 RBD_RETRY_DELAY);
Ilya Dryomov87c0fde2016-09-29 13:41:05 +02003854 }
3855 mutex_unlock(&rbd_dev->watch_mutex);
3856 goto out;
Ilya Dryomov99d16942016-08-12 16:11:41 +02003857 }
3858
Ilya Dryomov87c0fde2016-09-29 13:41:05 +02003859 need_to_wake = true;
Ilya Dryomov99d16942016-08-12 16:11:41 +02003860 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3861 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3862 mutex_unlock(&rbd_dev->watch_mutex);
3863
3864 ret = rbd_dev_refresh(rbd_dev);
3865 if (ret)
3866 rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
3867
Ilya Dryomoved95b212016-08-12 16:40:02 +02003868 if (was_lock_owner) {
3869 ret = rbd_try_lock(rbd_dev);
3870 if (ret)
3871 rbd_warn(rbd_dev, "reregisteration lock failed: %d",
3872 ret);
3873 }
3874
Ilya Dryomov87c0fde2016-09-29 13:41:05 +02003875out:
Ilya Dryomoved95b212016-08-12 16:40:02 +02003876 up_write(&rbd_dev->lock_rwsem);
Ilya Dryomov87c0fde2016-09-29 13:41:05 +02003877 if (need_to_wake)
3878 wake_requests(rbd_dev, true);
Ilya Dryomov99d16942016-08-12 16:11:41 +02003879}
3880
Alex Elder36be9a72013-01-19 00:30:28 -06003881/*
Alex Elderf40eb342013-04-25 15:09:42 -05003882 * Synchronous osd object method call. Returns the number of bytes
3883 * returned in the outbound buffer, or a negative error code.
Alex Elder36be9a72013-01-19 00:30:28 -06003884 */
3885static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
Ilya Dryomovecd4a682017-01-25 18:16:21 +01003886 struct ceph_object_id *oid,
3887 struct ceph_object_locator *oloc,
Alex Elder36be9a72013-01-19 00:30:28 -06003888 const char *method_name,
Alex Elder41579762013-04-21 12:14:45 -05003889 const void *outbound,
Alex Elder36be9a72013-01-19 00:30:28 -06003890 size_t outbound_size,
Alex Elder41579762013-04-21 12:14:45 -05003891 void *inbound,
Alex Eldere2a58ee2013-04-30 00:44:33 -05003892 size_t inbound_size)
Alex Elder36be9a72013-01-19 00:30:28 -06003893{
Ilya Dryomovecd4a682017-01-25 18:16:21 +01003894 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3895 struct page *req_page = NULL;
3896 struct page *reply_page;
Alex Elder36be9a72013-01-19 00:30:28 -06003897 int ret;
3898
3899 /*
Alex Elder6010a452013-04-05 01:27:11 -05003900 * Method calls are ultimately read operations. The result
3901 * should placed into the inbound buffer provided. They
3902 * also supply outbound data--parameters for the object
3903 * method. Currently if this is present it will be a
3904 * snapshot id.
Alex Elder36be9a72013-01-19 00:30:28 -06003905 */
Ilya Dryomovecd4a682017-01-25 18:16:21 +01003906 if (outbound) {
3907 if (outbound_size > PAGE_SIZE)
3908 return -E2BIG;
Alex Elder36be9a72013-01-19 00:30:28 -06003909
Ilya Dryomovecd4a682017-01-25 18:16:21 +01003910 req_page = alloc_page(GFP_KERNEL);
3911 if (!req_page)
3912 return -ENOMEM;
Alex Elder36be9a72013-01-19 00:30:28 -06003913
Ilya Dryomovecd4a682017-01-25 18:16:21 +01003914 memcpy(page_address(req_page), outbound, outbound_size);
Alex Elder04017e22013-04-05 14:46:02 -05003915 }
Alex Elder430c28c2013-04-03 21:32:51 -05003916
Ilya Dryomovecd4a682017-01-25 18:16:21 +01003917 reply_page = alloc_page(GFP_KERNEL);
3918 if (!reply_page) {
3919 if (req_page)
3920 __free_page(req_page);
3921 return -ENOMEM;
3922 }
Alex Elder36be9a72013-01-19 00:30:28 -06003923
Ilya Dryomovecd4a682017-01-25 18:16:21 +01003924 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
3925 CEPH_OSD_FLAG_READ, req_page, outbound_size,
3926 reply_page, &inbound_size);
3927 if (!ret) {
3928 memcpy(inbound, page_address(reply_page), inbound_size);
3929 ret = inbound_size;
3930 }
Alex Elder57385b52013-04-21 12:14:45 -05003931
Ilya Dryomovecd4a682017-01-25 18:16:21 +01003932 if (req_page)
3933 __free_page(req_page);
3934 __free_page(reply_page);
Alex Elder36be9a72013-01-19 00:30:28 -06003935 return ret;
3936}
3937
Ilya Dryomoved95b212016-08-12 16:40:02 +02003938/*
3939 * lock_rwsem must be held for read
3940 */
3941static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
3942{
3943 DEFINE_WAIT(wait);
3944
3945 do {
3946 /*
3947 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3948 * and cancel_delayed_work() in wake_requests().
3949 */
3950 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3951 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3952 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
3953 TASK_UNINTERRUPTIBLE);
3954 up_read(&rbd_dev->lock_rwsem);
3955 schedule();
3956 down_read(&rbd_dev->lock_rwsem);
Ilya Dryomov87c0fde2016-09-29 13:41:05 +02003957 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
3958 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
3959
Ilya Dryomoved95b212016-08-12 16:40:02 +02003960 finish_wait(&rbd_dev->lock_waitq, &wait);
3961}
3962
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003963static void rbd_queue_workfn(struct work_struct *work)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003964{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003965 struct request *rq = blk_mq_rq_from_pdu(work);
3966 struct rbd_device *rbd_dev = rq->q->queuedata;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003967 struct rbd_img_request *img_request;
Josh Durgin4e752f02014-04-08 11:12:11 -07003968 struct ceph_snap_context *snapc = NULL;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003969 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3970 u64 length = blk_rq_bytes(rq);
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003971 enum obj_operation_type op_type;
Josh Durgin4e752f02014-04-08 11:12:11 -07003972 u64 mapping_size;
Ilya Dryomov80de1912016-09-20 14:23:17 +02003973 bool must_be_locked;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003974 int result;
3975
Christoph Hellwigaebf5262017-01-31 16:57:31 +01003976 switch (req_op(rq)) {
3977 case REQ_OP_DISCARD:
3978 op_type = OBJ_OP_DISCARD;
3979 break;
3980 case REQ_OP_WRITE:
3981 op_type = OBJ_OP_WRITE;
3982 break;
3983 case REQ_OP_READ:
3984 op_type = OBJ_OP_READ;
3985 break;
3986 default:
3987 dout("%s: non-fs request type %d\n", __func__, req_op(rq));
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003988 result = -EIO;
3989 goto err;
3990 }
3991
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003992 /* Ignore/skip any zero-length requests */
3993
3994 if (!length) {
3995 dout("%s: zero-length request\n", __func__);
3996 result = 0;
3997 goto err_rq;
3998 }
3999
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08004000 /* Only reads are allowed to a read-only device */
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004001
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08004002 if (op_type != OBJ_OP_READ) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004003 if (rbd_dev->mapping.read_only) {
4004 result = -EROFS;
4005 goto err_rq;
4006 }
4007 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
4008 }
4009
4010 /*
4011 * Quit early if the mapped snapshot no longer exists. It's
4012 * still possible the snapshot will have disappeared by the
4013 * time our request arrives at the osd, but there's no sense in
4014 * sending it if we already know.
4015 */
4016 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
4017 dout("request for non-existent snapshot");
4018 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
4019 result = -ENXIO;
4020 goto err_rq;
4021 }
4022
4023 if (offset && length > U64_MAX - offset + 1) {
4024 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
4025 length);
4026 result = -EINVAL;
4027 goto err_rq; /* Shouldn't happen */
4028 }
4029
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004030 blk_mq_start_request(rq);
4031
Josh Durgin4e752f02014-04-08 11:12:11 -07004032 down_read(&rbd_dev->header_rwsem);
4033 mapping_size = rbd_dev->mapping.size;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08004034 if (op_type != OBJ_OP_READ) {
Josh Durgin4e752f02014-04-08 11:12:11 -07004035 snapc = rbd_dev->header.snapc;
4036 ceph_get_snap_context(snapc);
Ilya Dryomoved95b212016-08-12 16:40:02 +02004037 must_be_locked = rbd_is_lock_supported(rbd_dev);
Ilya Dryomov80de1912016-09-20 14:23:17 +02004038 } else {
4039 must_be_locked = rbd_dev->opts->lock_on_read &&
4040 rbd_is_lock_supported(rbd_dev);
Josh Durgin4e752f02014-04-08 11:12:11 -07004041 }
4042 up_read(&rbd_dev->header_rwsem);
4043
4044 if (offset + length > mapping_size) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004045 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
Josh Durgin4e752f02014-04-08 11:12:11 -07004046 length, mapping_size);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004047 result = -EIO;
4048 goto err_rq;
4049 }
4050
Ilya Dryomoved95b212016-08-12 16:40:02 +02004051 if (must_be_locked) {
4052 down_read(&rbd_dev->lock_rwsem);
Ilya Dryomov87c0fde2016-09-29 13:41:05 +02004053 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
4054 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
Ilya Dryomoved95b212016-08-12 16:40:02 +02004055 rbd_wait_state_locked(rbd_dev);
Ilya Dryomov87c0fde2016-09-29 13:41:05 +02004056
4057 WARN_ON((rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) ^
4058 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
4059 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
4060 result = -EBLACKLISTED;
4061 goto err_unlock;
4062 }
Ilya Dryomoved95b212016-08-12 16:40:02 +02004063 }
4064
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08004065 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07004066 snapc);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004067 if (!img_request) {
4068 result = -ENOMEM;
Ilya Dryomoved95b212016-08-12 16:40:02 +02004069 goto err_unlock;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004070 }
4071 img_request->rq = rq;
Ilya Dryomov70b16db2015-11-27 19:23:24 +01004072 snapc = NULL; /* img_request consumes a ref */
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004073
Guangliang Zhao90e98c52014-04-01 22:22:16 +08004074 if (op_type == OBJ_OP_DISCARD)
4075 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
4076 NULL);
4077 else
4078 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
4079 rq->bio);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004080 if (result)
4081 goto err_img_request;
4082
4083 result = rbd_img_request_submit(img_request);
4084 if (result)
4085 goto err_img_request;
4086
Ilya Dryomoved95b212016-08-12 16:40:02 +02004087 if (must_be_locked)
4088 up_read(&rbd_dev->lock_rwsem);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004089 return;
4090
4091err_img_request:
4092 rbd_img_request_put(img_request);
Ilya Dryomoved95b212016-08-12 16:40:02 +02004093err_unlock:
4094 if (must_be_locked)
4095 up_read(&rbd_dev->lock_rwsem);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004096err_rq:
4097 if (result)
4098 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08004099 obj_op_name(op_type), length, offset, result);
SF Markus Elfringe96a6502014-11-02 15:20:59 +01004100 ceph_put_snap_context(snapc);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004101err:
4102 blk_mq_end_request(rq, result);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004103}
4104
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004105static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4106 const struct blk_mq_queue_data *bd)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004107{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004108 struct request *rq = bd->rq;
4109 struct work_struct *work = blk_mq_rq_to_pdu(rq);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004110
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004111 queue_work(rbd_wq, work);
4112 return BLK_MQ_RQ_QUEUE_OK;
Alex Elderbf0d5f502012-11-22 00:00:08 -06004113}
4114
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004115static void rbd_free_disk(struct rbd_device *rbd_dev)
4116{
4117 struct gendisk *disk = rbd_dev->disk;
4118
4119 if (!disk)
4120 return;
4121
Alex Eldera0cab922013-04-25 23:15:08 -05004122 rbd_dev->disk = NULL;
4123 if (disk->flags & GENHD_FL_UP) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004124 del_gendisk(disk);
Alex Eldera0cab922013-04-25 23:15:08 -05004125 if (disk->queue)
4126 blk_cleanup_queue(disk->queue);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004127 blk_mq_free_tag_set(&rbd_dev->tag_set);
Alex Eldera0cab922013-04-25 23:15:08 -05004128 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004129 put_disk(disk);
4130}
4131
Alex Elder788e2df2013-01-17 12:25:27 -06004132static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004133 struct ceph_object_id *oid,
4134 struct ceph_object_locator *oloc,
4135 void *buf, int buf_len)
Alex Elder788e2df2013-01-17 12:25:27 -06004136
4137{
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004138 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4139 struct ceph_osd_request *req;
4140 struct page **pages;
4141 int num_pages = calc_pages_for(0, buf_len);
Alex Elder788e2df2013-01-17 12:25:27 -06004142 int ret;
4143
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004144 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4145 if (!req)
4146 return -ENOMEM;
Alex Elder788e2df2013-01-17 12:25:27 -06004147
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004148 ceph_oid_copy(&req->r_base_oid, oid);
4149 ceph_oloc_copy(&req->r_base_oloc, oloc);
4150 req->r_flags = CEPH_OSD_FLAG_READ;
Alex Elder788e2df2013-01-17 12:25:27 -06004151
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004152 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
Alex Elder788e2df2013-01-17 12:25:27 -06004153 if (ret)
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004154 goto out_req;
Alex Elder788e2df2013-01-17 12:25:27 -06004155
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004156 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4157 if (IS_ERR(pages)) {
4158 ret = PTR_ERR(pages);
4159 goto out_req;
4160 }
Alex Elder1ceae7e2013-02-06 13:11:38 -06004161
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004162 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4163 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4164 true);
Alex Elder788e2df2013-01-17 12:25:27 -06004165
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004166 ceph_osdc_start_request(osdc, req, false);
4167 ret = ceph_osdc_wait_request(osdc, req);
4168 if (ret >= 0)
4169 ceph_copy_from_page_vector(pages, buf, 0, ret);
4170
4171out_req:
4172 ceph_osdc_put_request(req);
Alex Elder788e2df2013-01-17 12:25:27 -06004173 return ret;
4174}
4175
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004176/*
Alex Elder662518b2013-05-06 09:51:29 -05004177 * Read the complete header for the given rbd device. On successful
4178 * return, the rbd_dev->header field will contain up-to-date
4179 * information about the image.
Alex Elder4156d992012-08-02 11:29:46 -05004180 */
Alex Elder99a41eb2013-05-06 09:51:30 -05004181static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
Alex Elder4156d992012-08-02 11:29:46 -05004182{
4183 struct rbd_image_header_ondisk *ondisk = NULL;
4184 u32 snap_count = 0;
4185 u64 names_size = 0;
4186 u32 want_count;
4187 int ret;
4188
4189 /*
4190 * The complete header will include an array of its 64-bit
4191 * snapshot ids, followed by the names of those snapshots as
4192 * a contiguous block of NUL-terminated strings. Note that
4193 * the number of snapshots could change by the time we read
4194 * it in, in which case we re-read it.
4195 */
4196 do {
4197 size_t size;
4198
4199 kfree(ondisk);
4200
4201 size = sizeof (*ondisk);
4202 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4203 size += names_size;
4204 ondisk = kmalloc(size, GFP_KERNEL);
4205 if (!ondisk)
Alex Elder662518b2013-05-06 09:51:29 -05004206 return -ENOMEM;
Alex Elder4156d992012-08-02 11:29:46 -05004207
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004208 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4209 &rbd_dev->header_oloc, ondisk, size);
Alex Elder4156d992012-08-02 11:29:46 -05004210 if (ret < 0)
Alex Elder662518b2013-05-06 09:51:29 -05004211 goto out;
Alex Elderc0cd10db2013-04-26 09:43:47 -05004212 if ((size_t)ret < size) {
Alex Elder4156d992012-08-02 11:29:46 -05004213 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05004214 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4215 size, ret);
Alex Elder662518b2013-05-06 09:51:29 -05004216 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05004217 }
4218 if (!rbd_dev_ondisk_valid(ondisk)) {
4219 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05004220 rbd_warn(rbd_dev, "invalid header");
Alex Elder662518b2013-05-06 09:51:29 -05004221 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05004222 }
4223
4224 names_size = le64_to_cpu(ondisk->snap_names_len);
4225 want_count = snap_count;
4226 snap_count = le32_to_cpu(ondisk->snap_count);
4227 } while (snap_count != want_count);
4228
Alex Elder662518b2013-05-06 09:51:29 -05004229 ret = rbd_header_from_disk(rbd_dev, ondisk);
4230out:
Alex Elder4156d992012-08-02 11:29:46 -05004231 kfree(ondisk);
4232
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004233 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004234}
4235
Alex Elder15228ed2013-05-01 12:43:03 -05004236/*
4237 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
4238 * has disappeared from the (just updated) snapshot context.
4239 */
4240static void rbd_exists_validate(struct rbd_device *rbd_dev)
4241{
4242 u64 snap_id;
4243
4244 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
4245 return;
4246
4247 snap_id = rbd_dev->spec->snap_id;
4248 if (snap_id == CEPH_NOSNAP)
4249 return;
4250
4251 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
4252 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4253}
4254
Josh Durgin98752012013-08-29 17:26:31 -07004255static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4256{
4257 sector_t size;
Josh Durgin98752012013-08-29 17:26:31 -07004258
4259 /*
Ilya Dryomov811c6682016-04-15 16:22:16 +02004260 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4261 * try to update its size. If REMOVING is set, updating size
4262 * is just useless work since the device can't be opened.
Josh Durgin98752012013-08-29 17:26:31 -07004263 */
Ilya Dryomov811c6682016-04-15 16:22:16 +02004264 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4265 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
Josh Durgin98752012013-08-29 17:26:31 -07004266 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4267 dout("setting size to %llu sectors", (unsigned long long)size);
4268 set_capacity(rbd_dev->disk, size);
4269 revalidate_disk(rbd_dev->disk);
4270 }
4271}
4272
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004273static int rbd_dev_refresh(struct rbd_device *rbd_dev)
Alex Elder1fe5e992012-07-25 09:32:41 -05004274{
Alex Eldere627db02013-05-06 07:40:30 -05004275 u64 mapping_size;
Alex Elder1fe5e992012-07-25 09:32:41 -05004276 int ret;
4277
Alex Eldercfbf6372013-05-31 17:40:45 -05004278 down_write(&rbd_dev->header_rwsem);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004279 mapping_size = rbd_dev->mapping.size;
Ilya Dryomova720ae02014-07-23 17:11:19 +04004280
4281 ret = rbd_dev_header_info(rbd_dev);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04004282 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03004283 goto out;
Alex Elder15228ed2013-05-01 12:43:03 -05004284
Ilya Dryomove8f59b52014-07-24 10:42:13 +04004285 /*
4286 * If there is a parent, see if it has disappeared due to the
4287 * mapped image getting flattened.
4288 */
4289 if (rbd_dev->parent) {
4290 ret = rbd_dev_v2_parent_info(rbd_dev);
4291 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03004292 goto out;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04004293 }
4294
Ilya Dryomov5ff11082014-07-23 17:11:21 +04004295 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
Ilya Dryomov73e39e42015-01-08 20:18:22 +03004296 rbd_dev->mapping.size = rbd_dev->header.image_size;
Ilya Dryomov5ff11082014-07-23 17:11:21 +04004297 } else {
4298 /* validate mapped snapshot's EXISTS flag */
4299 rbd_exists_validate(rbd_dev);
4300 }
Alex Elder15228ed2013-05-01 12:43:03 -05004301
Ilya Dryomov73e39e42015-01-08 20:18:22 +03004302out:
Alex Eldercfbf6372013-05-31 17:40:45 -05004303 up_write(&rbd_dev->header_rwsem);
Ilya Dryomov73e39e42015-01-08 20:18:22 +03004304 if (!ret && mapping_size != rbd_dev->mapping.size)
Josh Durgin98752012013-08-29 17:26:31 -07004305 rbd_dev_update_size(rbd_dev);
Alex Elder1fe5e992012-07-25 09:32:41 -05004306
Ilya Dryomov73e39e42015-01-08 20:18:22 +03004307 return ret;
Alex Elder1fe5e992012-07-25 09:32:41 -05004308}
4309
Christoph Hellwigd6296d32017-05-01 10:19:08 -06004310static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
4311 unsigned int hctx_idx, unsigned int numa_node)
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004312{
4313 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4314
4315 INIT_WORK(work, rbd_queue_workfn);
4316 return 0;
4317}
4318
Eric Biggersf363b082017-03-30 13:39:16 -07004319static const struct blk_mq_ops rbd_mq_ops = {
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004320 .queue_rq = rbd_queue_rq,
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004321 .init_request = rbd_init_request,
4322};
4323
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004324static int rbd_init_disk(struct rbd_device *rbd_dev)
4325{
4326 struct gendisk *disk;
4327 struct request_queue *q;
Alex Elder593a9e72012-02-07 12:03:37 -06004328 u64 segment_size;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004329 int err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004330
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004331 /* create gendisk info */
Ilya Dryomov7e513d42013-12-16 19:26:32 +02004332 disk = alloc_disk(single_major ?
4333 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
4334 RBD_MINORS_PER_MAJOR);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004335 if (!disk)
Alex Elder1fcdb8a2012-08-29 17:11:06 -05004336 return -ENOMEM;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004337
Alex Elderf0f8cef2012-01-29 13:57:44 -06004338 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
Alex Elderde71a292012-07-03 16:01:19 -05004339 rbd_dev->dev_id);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004340 disk->major = rbd_dev->major;
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02004341 disk->first_minor = rbd_dev->minor;
Ilya Dryomov7e513d42013-12-16 19:26:32 +02004342 if (single_major)
4343 disk->flags |= GENHD_FL_EXT_DEVT;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004344 disk->fops = &rbd_bd_ops;
4345 disk->private_data = rbd_dev;
4346
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004347 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4348 rbd_dev->tag_set.ops = &rbd_mq_ops;
Ilya Dryomovb5584182015-06-23 16:21:19 +03004349 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004350 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
Ilya Dryomovb5584182015-06-23 16:21:19 +03004351 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004352 rbd_dev->tag_set.nr_hw_queues = 1;
4353 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
4354
4355 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4356 if (err)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004357 goto out_disk;
Josh Durgin029bcbd2011-07-22 11:35:23 -07004358
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004359 q = blk_mq_init_queue(&rbd_dev->tag_set);
4360 if (IS_ERR(q)) {
4361 err = PTR_ERR(q);
4362 goto out_tag_set;
4363 }
4364
Ilya Dryomovd8a2c892015-03-24 16:15:17 +03004365 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4366 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
Alex Elder593a9e72012-02-07 12:03:37 -06004367
Josh Durgin029bcbd2011-07-22 11:35:23 -07004368 /* set io sizes to object size */
Alex Elder593a9e72012-02-07 12:03:37 -06004369 segment_size = rbd_obj_bytes(&rbd_dev->header);
4370 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
Ilya Dryomov0d9fde42015-10-07 16:09:35 +02004371 q->limits.max_sectors = queue_max_hw_sectors(q);
Ilya Dryomovd3834fe2015-06-12 19:19:02 +03004372 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
Alex Elder593a9e72012-02-07 12:03:37 -06004373 blk_queue_max_segment_size(q, segment_size);
4374 blk_queue_io_min(q, segment_size);
4375 blk_queue_io_opt(q, segment_size);
Josh Durgin029bcbd2011-07-22 11:35:23 -07004376
Guangliang Zhao90e98c52014-04-01 22:22:16 +08004377 /* enable the discard support */
4378 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
4379 q->limits.discard_granularity = segment_size;
4380 q->limits.discard_alignment = segment_size;
Jens Axboe2bb4cd52015-07-14 08:15:12 -06004381 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
Guangliang Zhao90e98c52014-04-01 22:22:16 +08004382
Ronny Hegewaldbae818e2015-10-15 18:50:46 +00004383 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
Jan Karadc3b17c2017-02-02 15:56:50 +01004384 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
Ronny Hegewaldbae818e2015-10-15 18:50:46 +00004385
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004386 disk->queue = q;
4387
4388 q->queuedata = rbd_dev;
4389
4390 rbd_dev->disk = disk;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004391
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004392 return 0;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004393out_tag_set:
4394 blk_mq_free_tag_set(&rbd_dev->tag_set);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004395out_disk:
4396 put_disk(disk);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004397 return err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004398}
4399
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004400/*
4401 sysfs
4402*/
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004403
Alex Elder593a9e72012-02-07 12:03:37 -06004404static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4405{
4406 return container_of(dev, struct rbd_device, dev);
4407}
4408
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004409static ssize_t rbd_size_show(struct device *dev,
4410 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004411{
Alex Elder593a9e72012-02-07 12:03:37 -06004412 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004413
Alex Elderfc71d832013-04-26 15:44:36 -05004414 return sprintf(buf, "%llu\n",
4415 (unsigned long long)rbd_dev->mapping.size);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004416}
4417
Alex Elder34b13182012-07-13 20:35:12 -05004418/*
4419 * Note this shows the features for whatever's mapped, which is not
4420 * necessarily the base image.
4421 */
4422static ssize_t rbd_features_show(struct device *dev,
4423 struct device_attribute *attr, char *buf)
4424{
4425 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4426
4427 return sprintf(buf, "0x%016llx\n",
Alex Elderfc71d832013-04-26 15:44:36 -05004428 (unsigned long long)rbd_dev->mapping.features);
Alex Elder34b13182012-07-13 20:35:12 -05004429}
4430
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004431static ssize_t rbd_major_show(struct device *dev,
4432 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004433{
Alex Elder593a9e72012-02-07 12:03:37 -06004434 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004435
Alex Elderfc71d832013-04-26 15:44:36 -05004436 if (rbd_dev->major)
4437 return sprintf(buf, "%d\n", rbd_dev->major);
4438
4439 return sprintf(buf, "(none)\n");
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02004440}
Alex Elderfc71d832013-04-26 15:44:36 -05004441
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02004442static ssize_t rbd_minor_show(struct device *dev,
4443 struct device_attribute *attr, char *buf)
4444{
4445 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4446
4447 return sprintf(buf, "%d\n", rbd_dev->minor);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004448}
4449
Ilya Dryomov005a07bf2016-08-18 18:38:43 +02004450static ssize_t rbd_client_addr_show(struct device *dev,
4451 struct device_attribute *attr, char *buf)
4452{
4453 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4454 struct ceph_entity_addr *client_addr =
4455 ceph_client_addr(rbd_dev->rbd_client->client);
4456
4457 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
4458 le32_to_cpu(client_addr->nonce));
4459}
4460
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004461static ssize_t rbd_client_id_show(struct device *dev,
4462 struct device_attribute *attr, char *buf)
4463{
Alex Elder593a9e72012-02-07 12:03:37 -06004464 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004465
Alex Elder1dbb4392012-01-24 10:08:37 -06004466 return sprintf(buf, "client%lld\n",
Ilya Dryomov033268a2016-08-12 14:59:58 +02004467 ceph_client_gid(rbd_dev->rbd_client->client));
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004468}
4469
Mike Christie267fb902016-08-18 18:38:43 +02004470static ssize_t rbd_cluster_fsid_show(struct device *dev,
4471 struct device_attribute *attr, char *buf)
4472{
4473 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4474
4475 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
4476}
4477
Mike Christie0d6d1e9c2016-08-18 18:38:45 +02004478static ssize_t rbd_config_info_show(struct device *dev,
4479 struct device_attribute *attr, char *buf)
4480{
4481 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4482
4483 return sprintf(buf, "%s\n", rbd_dev->config_info);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004484}
4485
4486static ssize_t rbd_pool_show(struct device *dev,
4487 struct device_attribute *attr, char *buf)
4488{
Alex Elder593a9e72012-02-07 12:03:37 -06004489 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004490
Alex Elder0d7dbfc2012-10-25 23:34:41 -05004491 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004492}
4493
Alex Elder9bb2f332012-07-12 10:46:35 -05004494static ssize_t rbd_pool_id_show(struct device *dev,
4495 struct device_attribute *attr, char *buf)
4496{
4497 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4498
Alex Elder0d7dbfc2012-10-25 23:34:41 -05004499 return sprintf(buf, "%llu\n",
Alex Elderfc71d832013-04-26 15:44:36 -05004500 (unsigned long long) rbd_dev->spec->pool_id);
Alex Elder9bb2f332012-07-12 10:46:35 -05004501}
4502
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004503static ssize_t rbd_name_show(struct device *dev,
4504 struct device_attribute *attr, char *buf)
4505{
Alex Elder593a9e72012-02-07 12:03:37 -06004506 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004507
Alex Eldera92ffdf2012-10-30 19:40:33 -05004508 if (rbd_dev->spec->image_name)
4509 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
4510
4511 return sprintf(buf, "(unknown)\n");
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004512}
4513
Alex Elder589d30e2012-07-10 20:30:11 -05004514static ssize_t rbd_image_id_show(struct device *dev,
4515 struct device_attribute *attr, char *buf)
4516{
4517 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4518
Alex Elder0d7dbfc2012-10-25 23:34:41 -05004519 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05004520}
4521
Alex Elder34b13182012-07-13 20:35:12 -05004522/*
4523 * Shows the name of the currently-mapped snapshot (or
4524 * RBD_SNAP_HEAD_NAME for the base image).
4525 */
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004526static ssize_t rbd_snap_show(struct device *dev,
4527 struct device_attribute *attr,
4528 char *buf)
4529{
Alex Elder593a9e72012-02-07 12:03:37 -06004530 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004531
Alex Elder0d7dbfc2012-10-25 23:34:41 -05004532 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004533}
4534
Mike Christie92a58672016-08-18 18:38:44 +02004535static ssize_t rbd_snap_id_show(struct device *dev,
4536 struct device_attribute *attr, char *buf)
4537{
4538 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4539
4540 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
4541}
4542
Alex Elder86b00e02012-10-25 23:34:42 -05004543/*
Ilya Dryomovff961282014-07-22 21:53:07 +04004544 * For a v2 image, shows the chain of parent images, separated by empty
4545 * lines. For v1 images or if there is no parent, shows "(no parent
4546 * image)".
Alex Elder86b00e02012-10-25 23:34:42 -05004547 */
4548static ssize_t rbd_parent_show(struct device *dev,
Ilya Dryomovff961282014-07-22 21:53:07 +04004549 struct device_attribute *attr,
4550 char *buf)
Alex Elder86b00e02012-10-25 23:34:42 -05004551{
4552 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Ilya Dryomovff961282014-07-22 21:53:07 +04004553 ssize_t count = 0;
Alex Elder86b00e02012-10-25 23:34:42 -05004554
Ilya Dryomovff961282014-07-22 21:53:07 +04004555 if (!rbd_dev->parent)
Alex Elder86b00e02012-10-25 23:34:42 -05004556 return sprintf(buf, "(no parent image)\n");
4557
Ilya Dryomovff961282014-07-22 21:53:07 +04004558 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
4559 struct rbd_spec *spec = rbd_dev->parent_spec;
Alex Elder86b00e02012-10-25 23:34:42 -05004560
Ilya Dryomovff961282014-07-22 21:53:07 +04004561 count += sprintf(&buf[count], "%s"
4562 "pool_id %llu\npool_name %s\n"
4563 "image_id %s\nimage_name %s\n"
4564 "snap_id %llu\nsnap_name %s\n"
4565 "overlap %llu\n",
4566 !count ? "" : "\n", /* first? */
4567 spec->pool_id, spec->pool_name,
4568 spec->image_id, spec->image_name ?: "(unknown)",
4569 spec->snap_id, spec->snap_name,
4570 rbd_dev->parent_overlap);
4571 }
Alex Elder86b00e02012-10-25 23:34:42 -05004572
Ilya Dryomovff961282014-07-22 21:53:07 +04004573 return count;
Alex Elder86b00e02012-10-25 23:34:42 -05004574}
4575
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004576static ssize_t rbd_image_refresh(struct device *dev,
4577 struct device_attribute *attr,
4578 const char *buf,
4579 size_t size)
4580{
Alex Elder593a9e72012-02-07 12:03:37 -06004581 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Alex Elderb8136232012-07-25 09:32:41 -05004582 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004583
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004584 ret = rbd_dev_refresh(rbd_dev);
Alex Eldere627db02013-05-06 07:40:30 -05004585 if (ret)
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04004586 return ret;
Alex Elderb8136232012-07-25 09:32:41 -05004587
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04004588 return size;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004589}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004590
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004591static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
Alex Elder34b13182012-07-13 20:35:12 -05004592static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004593static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02004594static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
Ilya Dryomov005a07bf2016-08-18 18:38:43 +02004595static DEVICE_ATTR(client_addr, S_IRUGO, rbd_client_addr_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004596static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
Mike Christie267fb902016-08-18 18:38:43 +02004597static DEVICE_ATTR(cluster_fsid, S_IRUGO, rbd_cluster_fsid_show, NULL);
Mike Christie0d6d1e9c2016-08-18 18:38:45 +02004598static DEVICE_ATTR(config_info, S_IRUSR, rbd_config_info_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004599static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
Alex Elder9bb2f332012-07-12 10:46:35 -05004600static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004601static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
Alex Elder589d30e2012-07-10 20:30:11 -05004602static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004603static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
4604static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
Mike Christie92a58672016-08-18 18:38:44 +02004605static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
Alex Elder86b00e02012-10-25 23:34:42 -05004606static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004607
4608static struct attribute *rbd_attrs[] = {
4609 &dev_attr_size.attr,
Alex Elder34b13182012-07-13 20:35:12 -05004610 &dev_attr_features.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004611 &dev_attr_major.attr,
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02004612 &dev_attr_minor.attr,
Ilya Dryomov005a07bf2016-08-18 18:38:43 +02004613 &dev_attr_client_addr.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004614 &dev_attr_client_id.attr,
Mike Christie267fb902016-08-18 18:38:43 +02004615 &dev_attr_cluster_fsid.attr,
Mike Christie0d6d1e9c2016-08-18 18:38:45 +02004616 &dev_attr_config_info.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004617 &dev_attr_pool.attr,
Alex Elder9bb2f332012-07-12 10:46:35 -05004618 &dev_attr_pool_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004619 &dev_attr_name.attr,
Alex Elder589d30e2012-07-10 20:30:11 -05004620 &dev_attr_image_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004621 &dev_attr_current_snap.attr,
Mike Christie92a58672016-08-18 18:38:44 +02004622 &dev_attr_snap_id.attr,
Alex Elder86b00e02012-10-25 23:34:42 -05004623 &dev_attr_parent.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004624 &dev_attr_refresh.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004625 NULL
4626};
4627
4628static struct attribute_group rbd_attr_group = {
4629 .attrs = rbd_attrs,
4630};
4631
4632static const struct attribute_group *rbd_attr_groups[] = {
4633 &rbd_attr_group,
4634 NULL
4635};
4636
Ilya Dryomov6cac4692015-10-16 20:11:25 +02004637static void rbd_dev_release(struct device *dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004638
Bhumika Goyalb9942bc2017-02-11 12:14:38 +05304639static const struct device_type rbd_device_type = {
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004640 .name = "rbd",
4641 .groups = rbd_attr_groups,
Ilya Dryomov6cac4692015-10-16 20:11:25 +02004642 .release = rbd_dev_release,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004643};
4644
Alex Elder8b8fb992012-10-26 17:25:24 -05004645static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4646{
4647 kref_get(&spec->kref);
4648
4649 return spec;
4650}
4651
4652static void rbd_spec_free(struct kref *kref);
4653static void rbd_spec_put(struct rbd_spec *spec)
4654{
4655 if (spec)
4656 kref_put(&spec->kref, rbd_spec_free);
4657}
4658
4659static struct rbd_spec *rbd_spec_alloc(void)
4660{
4661 struct rbd_spec *spec;
4662
4663 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4664 if (!spec)
4665 return NULL;
Ilya Dryomov04077592014-07-23 17:11:20 +04004666
4667 spec->pool_id = CEPH_NOPOOL;
4668 spec->snap_id = CEPH_NOSNAP;
Alex Elder8b8fb992012-10-26 17:25:24 -05004669 kref_init(&spec->kref);
4670
Alex Elder8b8fb992012-10-26 17:25:24 -05004671 return spec;
4672}
4673
4674static void rbd_spec_free(struct kref *kref)
4675{
4676 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4677
4678 kfree(spec->pool_name);
4679 kfree(spec->image_id);
4680 kfree(spec->image_name);
4681 kfree(spec->snap_name);
4682 kfree(spec);
4683}
4684
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02004685static void rbd_dev_free(struct rbd_device *rbd_dev)
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004686{
Ilya Dryomov99d16942016-08-12 16:11:41 +02004687 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
Ilya Dryomoved95b212016-08-12 16:40:02 +02004688 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004689
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004690 ceph_oid_destroy(&rbd_dev->header_oid);
Ilya Dryomov6b6dddb2016-08-05 16:15:38 +02004691 ceph_oloc_destroy(&rbd_dev->header_oloc);
Mike Christie0d6d1e9c2016-08-18 18:38:45 +02004692 kfree(rbd_dev->config_info);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004693
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004694 rbd_put_client(rbd_dev->rbd_client);
4695 rbd_spec_put(rbd_dev->spec);
4696 kfree(rbd_dev->opts);
4697 kfree(rbd_dev);
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02004698}
4699
4700static void rbd_dev_release(struct device *dev)
4701{
4702 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4703 bool need_put = !!rbd_dev->opts;
4704
4705 if (need_put) {
4706 destroy_workqueue(rbd_dev->task_wq);
4707 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4708 }
4709
4710 rbd_dev_free(rbd_dev);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004711
4712 /*
4713 * This is racy, but way better than putting module outside of
4714 * the release callback. The race window is pretty small, so
4715 * doing something similar to dm (dm-builtin.c) is overkill.
4716 */
4717 if (need_put)
4718 module_put(THIS_MODULE);
4719}
4720
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02004721static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
4722 struct rbd_spec *spec)
Alex Elderc53d5892012-10-25 23:34:42 -05004723{
4724 struct rbd_device *rbd_dev;
4725
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02004726 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
Alex Elderc53d5892012-10-25 23:34:42 -05004727 if (!rbd_dev)
4728 return NULL;
4729
4730 spin_lock_init(&rbd_dev->lock);
4731 INIT_LIST_HEAD(&rbd_dev->node);
Alex Elderc53d5892012-10-25 23:34:42 -05004732 init_rwsem(&rbd_dev->header_rwsem);
4733
Ilya Dryomov7e973322017-01-25 18:16:22 +01004734 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004735 ceph_oid_init(&rbd_dev->header_oid);
Ilya Dryomov431a02c2017-01-25 18:16:21 +01004736 rbd_dev->header_oloc.pool = spec->pool_id;
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004737
Ilya Dryomov99d16942016-08-12 16:11:41 +02004738 mutex_init(&rbd_dev->watch_mutex);
4739 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4740 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
4741
Ilya Dryomoved95b212016-08-12 16:40:02 +02004742 init_rwsem(&rbd_dev->lock_rwsem);
4743 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
4744 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
4745 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
4746 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
4747 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
4748 init_waitqueue_head(&rbd_dev->lock_waitq);
4749
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004750 rbd_dev->dev.bus = &rbd_bus_type;
4751 rbd_dev->dev.type = &rbd_device_type;
4752 rbd_dev->dev.parent = &rbd_root_dev;
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004753 device_initialize(&rbd_dev->dev);
4754
Alex Elderc53d5892012-10-25 23:34:42 -05004755 rbd_dev->rbd_client = rbdc;
Ilya Dryomovd1475432015-06-22 13:24:48 +03004756 rbd_dev->spec = spec;
Alex Elder0903e872012-11-14 12:25:19 -06004757
Alex Elderc53d5892012-10-25 23:34:42 -05004758 return rbd_dev;
4759}
4760
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02004761/*
4762 * Create a mapping rbd_dev.
4763 */
4764static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4765 struct rbd_spec *spec,
4766 struct rbd_options *opts)
4767{
4768 struct rbd_device *rbd_dev;
4769
4770 rbd_dev = __rbd_dev_create(rbdc, spec);
4771 if (!rbd_dev)
4772 return NULL;
4773
4774 rbd_dev->opts = opts;
4775
4776 /* get an id and fill in device name */
4777 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
4778 minor_to_rbd_dev_id(1 << MINORBITS),
4779 GFP_KERNEL);
4780 if (rbd_dev->dev_id < 0)
4781 goto fail_rbd_dev;
4782
4783 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
4784 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
4785 rbd_dev->name);
4786 if (!rbd_dev->task_wq)
4787 goto fail_dev_id;
4788
4789 /* we have a ref from do_rbd_add() */
4790 __module_get(THIS_MODULE);
4791
4792 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
4793 return rbd_dev;
4794
4795fail_dev_id:
4796 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4797fail_rbd_dev:
4798 rbd_dev_free(rbd_dev);
4799 return NULL;
4800}
4801
Alex Elderc53d5892012-10-25 23:34:42 -05004802static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4803{
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004804 if (rbd_dev)
4805 put_device(&rbd_dev->dev);
Alex Elderc53d5892012-10-25 23:34:42 -05004806}
4807
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004808/*
Alex Elder9d475de2012-07-03 16:01:19 -05004809 * Get the size and object order for an image snapshot, or if
4810 * snap_id is CEPH_NOSNAP, gets this information for the base
4811 * image.
4812 */
4813static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4814 u8 *order, u64 *snap_size)
4815{
4816 __le64 snapid = cpu_to_le64(snap_id);
4817 int ret;
4818 struct {
4819 u8 order;
4820 __le64 size;
4821 } __attribute__ ((packed)) size_buf = { 0 };
4822
Ilya Dryomovecd4a682017-01-25 18:16:21 +01004823 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4824 &rbd_dev->header_oloc, "get_size",
4825 &snapid, sizeof(snapid),
4826 &size_buf, sizeof(size_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004827 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder9d475de2012-07-03 16:01:19 -05004828 if (ret < 0)
4829 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004830 if (ret < sizeof (size_buf))
4831 return -ERANGE;
Alex Elder9d475de2012-07-03 16:01:19 -05004832
Josh Durginc3545572013-08-28 17:08:10 -07004833 if (order) {
Alex Elderc86f86e2013-04-25 15:09:41 -05004834 *order = size_buf.order;
Josh Durginc3545572013-08-28 17:08:10 -07004835 dout(" order %u", (unsigned int)*order);
4836 }
Alex Elder9d475de2012-07-03 16:01:19 -05004837 *snap_size = le64_to_cpu(size_buf.size);
4838
Josh Durginc3545572013-08-28 17:08:10 -07004839 dout(" snap_id 0x%016llx snap_size = %llu\n",
4840 (unsigned long long)snap_id,
Alex Elder57385b52013-04-21 12:14:45 -05004841 (unsigned long long)*snap_size);
Alex Elder9d475de2012-07-03 16:01:19 -05004842
4843 return 0;
4844}
4845
4846static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4847{
4848 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4849 &rbd_dev->header.obj_order,
4850 &rbd_dev->header.image_size);
4851}
4852
Alex Elder1e130192012-07-03 16:01:19 -05004853static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4854{
4855 void *reply_buf;
4856 int ret;
4857 void *p;
4858
4859 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4860 if (!reply_buf)
4861 return -ENOMEM;
4862
Ilya Dryomovecd4a682017-01-25 18:16:21 +01004863 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4864 &rbd_dev->header_oloc, "get_object_prefix",
4865 NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06004866 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder1e130192012-07-03 16:01:19 -05004867 if (ret < 0)
4868 goto out;
4869
4870 p = reply_buf;
4871 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
Alex Elder57385b52013-04-21 12:14:45 -05004872 p + ret, NULL, GFP_NOIO);
4873 ret = 0;
Alex Elder1e130192012-07-03 16:01:19 -05004874
4875 if (IS_ERR(rbd_dev->header.object_prefix)) {
4876 ret = PTR_ERR(rbd_dev->header.object_prefix);
4877 rbd_dev->header.object_prefix = NULL;
4878 } else {
4879 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4880 }
Alex Elder1e130192012-07-03 16:01:19 -05004881out:
4882 kfree(reply_buf);
4883
4884 return ret;
4885}
4886
Alex Elderb1b54022012-07-03 16:01:19 -05004887static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4888 u64 *snap_features)
4889{
4890 __le64 snapid = cpu_to_le64(snap_id);
4891 struct {
4892 __le64 features;
4893 __le64 incompat;
Alex Elder41579762013-04-21 12:14:45 -05004894 } __attribute__ ((packed)) features_buf = { 0 };
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004895 u64 unsup;
Alex Elderb1b54022012-07-03 16:01:19 -05004896 int ret;
4897
Ilya Dryomovecd4a682017-01-25 18:16:21 +01004898 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4899 &rbd_dev->header_oloc, "get_features",
4900 &snapid, sizeof(snapid),
4901 &features_buf, sizeof(features_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004902 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderb1b54022012-07-03 16:01:19 -05004903 if (ret < 0)
4904 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004905 if (ret < sizeof (features_buf))
4906 return -ERANGE;
Alex Elderd8891402012-10-09 13:50:17 -07004907
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004908 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4909 if (unsup) {
4910 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4911 unsup);
Alex Elderb8f5c6e2012-11-01 08:39:26 -05004912 return -ENXIO;
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004913 }
Alex Elderd8891402012-10-09 13:50:17 -07004914
Alex Elderb1b54022012-07-03 16:01:19 -05004915 *snap_features = le64_to_cpu(features_buf.features);
4916
4917 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
Alex Elder57385b52013-04-21 12:14:45 -05004918 (unsigned long long)snap_id,
4919 (unsigned long long)*snap_features,
4920 (unsigned long long)le64_to_cpu(features_buf.incompat));
Alex Elderb1b54022012-07-03 16:01:19 -05004921
4922 return 0;
4923}
4924
4925static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4926{
4927 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4928 &rbd_dev->header.features);
4929}
4930
Alex Elder86b00e02012-10-25 23:34:42 -05004931static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4932{
4933 struct rbd_spec *parent_spec;
4934 size_t size;
4935 void *reply_buf = NULL;
4936 __le64 snapid;
4937 void *p;
4938 void *end;
Alex Elder642a2532013-05-06 17:40:33 -05004939 u64 pool_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004940 char *image_id;
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004941 u64 snap_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004942 u64 overlap;
Alex Elder86b00e02012-10-25 23:34:42 -05004943 int ret;
4944
4945 parent_spec = rbd_spec_alloc();
4946 if (!parent_spec)
4947 return -ENOMEM;
4948
4949 size = sizeof (__le64) + /* pool_id */
4950 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4951 sizeof (__le64) + /* snap_id */
4952 sizeof (__le64); /* overlap */
4953 reply_buf = kmalloc(size, GFP_KERNEL);
4954 if (!reply_buf) {
4955 ret = -ENOMEM;
4956 goto out_err;
4957 }
4958
Ilya Dryomov4d9b67c2014-07-24 10:42:13 +04004959 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
Ilya Dryomovecd4a682017-01-25 18:16:21 +01004960 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4961 &rbd_dev->header_oloc, "get_parent",
4962 &snapid, sizeof(snapid), reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004963 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder86b00e02012-10-25 23:34:42 -05004964 if (ret < 0)
4965 goto out_err;
4966
Alex Elder86b00e02012-10-25 23:34:42 -05004967 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05004968 end = reply_buf + ret;
4969 ret = -ERANGE;
Alex Elder642a2532013-05-06 17:40:33 -05004970 ceph_decode_64_safe(&p, end, pool_id, out_err);
Alex Elder392a9da2013-05-06 17:40:33 -05004971 if (pool_id == CEPH_NOPOOL) {
4972 /*
4973 * Either the parent never existed, or we have
4974 * record of it but the image got flattened so it no
4975 * longer has a parent. When the parent of a
4976 * layered image disappears we immediately set the
4977 * overlap to 0. The effect of this is that all new
4978 * requests will be treated as if the image had no
4979 * parent.
4980 */
4981 if (rbd_dev->parent_overlap) {
4982 rbd_dev->parent_overlap = 0;
Alex Elder392a9da2013-05-06 17:40:33 -05004983 rbd_dev_parent_put(rbd_dev);
4984 pr_info("%s: clone image has been flattened\n",
4985 rbd_dev->disk->disk_name);
4986 }
4987
Alex Elder86b00e02012-10-25 23:34:42 -05004988 goto out; /* No parent? No problem. */
Alex Elder392a9da2013-05-06 17:40:33 -05004989 }
Alex Elder86b00e02012-10-25 23:34:42 -05004990
Alex Elder0903e872012-11-14 12:25:19 -06004991 /* The ceph file layout needs to fit pool id in 32 bits */
4992
4993 ret = -EIO;
Alex Elder642a2532013-05-06 17:40:33 -05004994 if (pool_id > (u64)U32_MAX) {
Ilya Dryomov9584d502014-07-11 12:11:20 +04004995 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
Alex Elder642a2532013-05-06 17:40:33 -05004996 (unsigned long long)pool_id, U32_MAX);
Alex Elder57385b52013-04-21 12:14:45 -05004997 goto out_err;
Alex Elderc0cd10db2013-04-26 09:43:47 -05004998 }
Alex Elder0903e872012-11-14 12:25:19 -06004999
Alex Elder979ed482012-11-01 08:39:26 -05005000 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elder86b00e02012-10-25 23:34:42 -05005001 if (IS_ERR(image_id)) {
5002 ret = PTR_ERR(image_id);
5003 goto out_err;
5004 }
Alex Elder3b5cf2a2013-05-29 11:18:59 -05005005 ceph_decode_64_safe(&p, end, snap_id, out_err);
Alex Elder86b00e02012-10-25 23:34:42 -05005006 ceph_decode_64_safe(&p, end, overlap, out_err);
5007
Alex Elder3b5cf2a2013-05-29 11:18:59 -05005008 /*
5009 * The parent won't change (except when the clone is
5010 * flattened, already handled that). So we only need to
5011 * record the parent spec we have not already done so.
5012 */
5013 if (!rbd_dev->parent_spec) {
5014 parent_spec->pool_id = pool_id;
5015 parent_spec->image_id = image_id;
5016 parent_spec->snap_id = snap_id;
Alex Elder70cf49c2013-05-06 17:40:33 -05005017 rbd_dev->parent_spec = parent_spec;
5018 parent_spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovfbba11b2014-06-27 21:46:33 +04005019 } else {
5020 kfree(image_id);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05005021 }
5022
5023 /*
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03005024 * We always update the parent overlap. If it's zero we issue
5025 * a warning, as we will proceed as if there was no parent.
Alex Elder3b5cf2a2013-05-29 11:18:59 -05005026 */
Alex Elder3b5cf2a2013-05-29 11:18:59 -05005027 if (!overlap) {
Alex Elder3b5cf2a2013-05-29 11:18:59 -05005028 if (parent_spec) {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03005029 /* refresh, careful to warn just once */
5030 if (rbd_dev->parent_overlap)
5031 rbd_warn(rbd_dev,
5032 "clone now standalone (overlap became 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05005033 } else {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03005034 /* initial probe */
5035 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05005036 }
Alex Elder70cf49c2013-05-06 17:40:33 -05005037 }
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03005038 rbd_dev->parent_overlap = overlap;
5039
Alex Elder86b00e02012-10-25 23:34:42 -05005040out:
5041 ret = 0;
5042out_err:
5043 kfree(reply_buf);
5044 rbd_spec_put(parent_spec);
5045
5046 return ret;
5047}
5048
Alex Eldercc070d52013-04-21 12:14:45 -05005049static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
5050{
5051 struct {
5052 __le64 stripe_unit;
5053 __le64 stripe_count;
5054 } __attribute__ ((packed)) striping_info_buf = { 0 };
5055 size_t size = sizeof (striping_info_buf);
5056 void *p;
5057 u64 obj_size;
5058 u64 stripe_unit;
5059 u64 stripe_count;
5060 int ret;
5061
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005062 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5063 &rbd_dev->header_oloc, "get_stripe_unit_count",
5064 NULL, 0, &striping_info_buf, size);
Alex Eldercc070d52013-04-21 12:14:45 -05005065 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5066 if (ret < 0)
5067 return ret;
5068 if (ret < size)
5069 return -ERANGE;
5070
5071 /*
5072 * We don't actually support the "fancy striping" feature
5073 * (STRIPINGV2) yet, but if the striping sizes are the
5074 * defaults the behavior is the same as before. So find
5075 * out, and only fail if the image has non-default values.
5076 */
5077 ret = -EINVAL;
Ilya Dryomov5bc3fb12017-01-25 18:16:22 +01005078 obj_size = rbd_obj_bytes(&rbd_dev->header);
Alex Eldercc070d52013-04-21 12:14:45 -05005079 p = &striping_info_buf;
5080 stripe_unit = ceph_decode_64(&p);
5081 if (stripe_unit != obj_size) {
5082 rbd_warn(rbd_dev, "unsupported stripe unit "
5083 "(got %llu want %llu)",
5084 stripe_unit, obj_size);
5085 return -EINVAL;
5086 }
5087 stripe_count = ceph_decode_64(&p);
5088 if (stripe_count != 1) {
5089 rbd_warn(rbd_dev, "unsupported stripe count "
5090 "(got %llu want 1)", stripe_count);
5091 return -EINVAL;
5092 }
Alex Elder500d0c02013-04-26 09:43:47 -05005093 rbd_dev->header.stripe_unit = stripe_unit;
5094 rbd_dev->header.stripe_count = stripe_count;
Alex Eldercc070d52013-04-21 12:14:45 -05005095
5096 return 0;
5097}
5098
Ilya Dryomov7e973322017-01-25 18:16:22 +01005099static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
5100{
5101 __le64 data_pool_id;
5102 int ret;
5103
5104 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5105 &rbd_dev->header_oloc, "get_data_pool",
5106 NULL, 0, &data_pool_id, sizeof(data_pool_id));
5107 if (ret < 0)
5108 return ret;
5109 if (ret < sizeof(data_pool_id))
5110 return -EBADMSG;
5111
5112 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
5113 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
5114 return 0;
5115}
5116
Alex Elder9e15b772012-10-30 19:40:33 -05005117static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5118{
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005119 CEPH_DEFINE_OID_ONSTACK(oid);
Alex Elder9e15b772012-10-30 19:40:33 -05005120 size_t image_id_size;
5121 char *image_id;
5122 void *p;
5123 void *end;
5124 size_t size;
5125 void *reply_buf = NULL;
5126 size_t len = 0;
5127 char *image_name = NULL;
5128 int ret;
5129
5130 rbd_assert(!rbd_dev->spec->image_name);
5131
Alex Elder69e7a022012-11-01 08:39:26 -05005132 len = strlen(rbd_dev->spec->image_id);
5133 image_id_size = sizeof (__le32) + len;
Alex Elder9e15b772012-10-30 19:40:33 -05005134 image_id = kmalloc(image_id_size, GFP_KERNEL);
5135 if (!image_id)
5136 return NULL;
5137
5138 p = image_id;
Alex Elder41579762013-04-21 12:14:45 -05005139 end = image_id + image_id_size;
Alex Elder57385b52013-04-21 12:14:45 -05005140 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
Alex Elder9e15b772012-10-30 19:40:33 -05005141
5142 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5143 reply_buf = kmalloc(size, GFP_KERNEL);
5144 if (!reply_buf)
5145 goto out;
5146
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005147 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
5148 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5149 "dir_get_name", image_id, image_id_size,
5150 reply_buf, size);
Alex Elder9e15b772012-10-30 19:40:33 -05005151 if (ret < 0)
5152 goto out;
5153 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05005154 end = reply_buf + ret;
5155
Alex Elder9e15b772012-10-30 19:40:33 -05005156 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5157 if (IS_ERR(image_name))
5158 image_name = NULL;
5159 else
5160 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5161out:
5162 kfree(reply_buf);
5163 kfree(image_id);
5164
5165 return image_name;
5166}
5167
Alex Elder2ad3d712013-04-30 00:44:33 -05005168static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5169{
5170 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5171 const char *snap_name;
5172 u32 which = 0;
5173
5174 /* Skip over names until we find the one we are looking for */
5175
5176 snap_name = rbd_dev->header.snap_names;
5177 while (which < snapc->num_snaps) {
5178 if (!strcmp(name, snap_name))
5179 return snapc->snaps[which];
5180 snap_name += strlen(snap_name) + 1;
5181 which++;
5182 }
5183 return CEPH_NOSNAP;
5184}
5185
5186static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5187{
5188 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5189 u32 which;
5190 bool found = false;
5191 u64 snap_id;
5192
5193 for (which = 0; !found && which < snapc->num_snaps; which++) {
5194 const char *snap_name;
5195
5196 snap_id = snapc->snaps[which];
5197 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
Josh Durginefadc982013-08-29 19:16:42 -07005198 if (IS_ERR(snap_name)) {
5199 /* ignore no-longer existing snapshots */
5200 if (PTR_ERR(snap_name) == -ENOENT)
5201 continue;
5202 else
5203 break;
5204 }
Alex Elder2ad3d712013-04-30 00:44:33 -05005205 found = !strcmp(name, snap_name);
5206 kfree(snap_name);
5207 }
5208 return found ? snap_id : CEPH_NOSNAP;
5209}
5210
5211/*
5212 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
5213 * no snapshot by that name is found, or if an error occurs.
5214 */
5215static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5216{
5217 if (rbd_dev->image_format == 1)
5218 return rbd_v1_snap_id_by_name(rbd_dev, name);
5219
5220 return rbd_v2_snap_id_by_name(rbd_dev, name);
5221}
5222
Alex Elder9e15b772012-10-30 19:40:33 -05005223/*
Ilya Dryomov04077592014-07-23 17:11:20 +04005224 * An image being mapped will have everything but the snap id.
Alex Elder9e15b772012-10-30 19:40:33 -05005225 */
Ilya Dryomov04077592014-07-23 17:11:20 +04005226static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
5227{
5228 struct rbd_spec *spec = rbd_dev->spec;
5229
5230 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
5231 rbd_assert(spec->image_id && spec->image_name);
5232 rbd_assert(spec->snap_name);
5233
5234 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
5235 u64 snap_id;
5236
5237 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5238 if (snap_id == CEPH_NOSNAP)
5239 return -ENOENT;
5240
5241 spec->snap_id = snap_id;
5242 } else {
5243 spec->snap_id = CEPH_NOSNAP;
5244 }
5245
5246 return 0;
5247}
5248
5249/*
5250 * A parent image will have all ids but none of the names.
5251 *
5252 * All names in an rbd spec are dynamically allocated. It's OK if we
5253 * can't figure out the name for an image id.
5254 */
5255static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
Alex Elder9e15b772012-10-30 19:40:33 -05005256{
Alex Elder2e9f7f12013-04-26 09:43:48 -05005257 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5258 struct rbd_spec *spec = rbd_dev->spec;
5259 const char *pool_name;
5260 const char *image_name;
5261 const char *snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05005262 int ret;
5263
Ilya Dryomov04077592014-07-23 17:11:20 +04005264 rbd_assert(spec->pool_id != CEPH_NOPOOL);
5265 rbd_assert(spec->image_id);
5266 rbd_assert(spec->snap_id != CEPH_NOSNAP);
Alex Elder9e15b772012-10-30 19:40:33 -05005267
Alex Elder2e9f7f12013-04-26 09:43:48 -05005268 /* Get the pool name; we have to make our own copy of this */
Alex Elder9e15b772012-10-30 19:40:33 -05005269
Alex Elder2e9f7f12013-04-26 09:43:48 -05005270 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
5271 if (!pool_name) {
5272 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
Alex Elder935dc892012-11-01 10:17:15 -05005273 return -EIO;
5274 }
Alex Elder2e9f7f12013-04-26 09:43:48 -05005275 pool_name = kstrdup(pool_name, GFP_KERNEL);
5276 if (!pool_name)
Alex Elder9e15b772012-10-30 19:40:33 -05005277 return -ENOMEM;
5278
5279 /* Fetch the image name; tolerate failure here */
5280
Alex Elder2e9f7f12013-04-26 09:43:48 -05005281 image_name = rbd_dev_image_name(rbd_dev);
5282 if (!image_name)
Alex Elder06ecc6c2012-11-01 10:17:15 -05005283 rbd_warn(rbd_dev, "unable to get image name");
Alex Elder9e15b772012-10-30 19:40:33 -05005284
Ilya Dryomov04077592014-07-23 17:11:20 +04005285 /* Fetch the snapshot name */
Alex Elder9e15b772012-10-30 19:40:33 -05005286
Alex Elder2e9f7f12013-04-26 09:43:48 -05005287 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
Josh Durginda6a6b62013-09-04 17:57:31 -07005288 if (IS_ERR(snap_name)) {
5289 ret = PTR_ERR(snap_name);
Alex Elder9e15b772012-10-30 19:40:33 -05005290 goto out_err;
Alex Elder2e9f7f12013-04-26 09:43:48 -05005291 }
5292
5293 spec->pool_name = pool_name;
5294 spec->image_name = image_name;
5295 spec->snap_name = snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05005296
5297 return 0;
Ilya Dryomov04077592014-07-23 17:11:20 +04005298
Alex Elder9e15b772012-10-30 19:40:33 -05005299out_err:
Alex Elder2e9f7f12013-04-26 09:43:48 -05005300 kfree(image_name);
5301 kfree(pool_name);
Alex Elder9e15b772012-10-30 19:40:33 -05005302 return ret;
5303}
5304
Alex Eldercc4a38bd2013-04-30 00:44:33 -05005305static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
Alex Elder35d489f2012-07-03 16:01:19 -05005306{
5307 size_t size;
5308 int ret;
5309 void *reply_buf;
5310 void *p;
5311 void *end;
5312 u64 seq;
5313 u32 snap_count;
5314 struct ceph_snap_context *snapc;
5315 u32 i;
5316
5317 /*
5318 * We'll need room for the seq value (maximum snapshot id),
5319 * snapshot count, and array of that many snapshot ids.
5320 * For now we have a fixed upper limit on the number we're
5321 * prepared to receive.
5322 */
5323 size = sizeof (__le64) + sizeof (__le32) +
5324 RBD_MAX_SNAP_COUNT * sizeof (__le64);
5325 reply_buf = kzalloc(size, GFP_KERNEL);
5326 if (!reply_buf)
5327 return -ENOMEM;
5328
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005329 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5330 &rbd_dev->header_oloc, "get_snapcontext",
5331 NULL, 0, reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06005332 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder35d489f2012-07-03 16:01:19 -05005333 if (ret < 0)
5334 goto out;
5335
Alex Elder35d489f2012-07-03 16:01:19 -05005336 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05005337 end = reply_buf + ret;
5338 ret = -ERANGE;
Alex Elder35d489f2012-07-03 16:01:19 -05005339 ceph_decode_64_safe(&p, end, seq, out);
5340 ceph_decode_32_safe(&p, end, snap_count, out);
5341
5342 /*
5343 * Make sure the reported number of snapshot ids wouldn't go
5344 * beyond the end of our buffer. But before checking that,
5345 * make sure the computed size of the snapshot context we
5346 * allocate is representable in a size_t.
5347 */
5348 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
5349 / sizeof (u64)) {
5350 ret = -EINVAL;
5351 goto out;
5352 }
5353 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
5354 goto out;
Alex Elder468521c2013-04-26 09:43:47 -05005355 ret = 0;
Alex Elder35d489f2012-07-03 16:01:19 -05005356
Alex Elder812164f82013-04-30 00:44:32 -05005357 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
Alex Elder35d489f2012-07-03 16:01:19 -05005358 if (!snapc) {
5359 ret = -ENOMEM;
5360 goto out;
5361 }
Alex Elder35d489f2012-07-03 16:01:19 -05005362 snapc->seq = seq;
Alex Elder35d489f2012-07-03 16:01:19 -05005363 for (i = 0; i < snap_count; i++)
5364 snapc->snaps[i] = ceph_decode_64(&p);
5365
Alex Elder49ece552013-05-06 08:37:00 -05005366 ceph_put_snap_context(rbd_dev->header.snapc);
Alex Elder35d489f2012-07-03 16:01:19 -05005367 rbd_dev->header.snapc = snapc;
5368
5369 dout(" snap context seq = %llu, snap_count = %u\n",
Alex Elder57385b52013-04-21 12:14:45 -05005370 (unsigned long long)seq, (unsigned int)snap_count);
Alex Elder35d489f2012-07-03 16:01:19 -05005371out:
5372 kfree(reply_buf);
5373
Alex Elder57385b52013-04-21 12:14:45 -05005374 return ret;
Alex Elder35d489f2012-07-03 16:01:19 -05005375}
5376
Alex Elder54cac612013-04-30 00:44:33 -05005377static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
5378 u64 snap_id)
Alex Elderb8b1e2d2012-07-03 16:01:19 -05005379{
5380 size_t size;
5381 void *reply_buf;
Alex Elder54cac612013-04-30 00:44:33 -05005382 __le64 snapid;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05005383 int ret;
5384 void *p;
5385 void *end;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05005386 char *snap_name;
5387
5388 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
5389 reply_buf = kmalloc(size, GFP_KERNEL);
5390 if (!reply_buf)
5391 return ERR_PTR(-ENOMEM);
5392
Alex Elder54cac612013-04-30 00:44:33 -05005393 snapid = cpu_to_le64(snap_id);
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005394 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5395 &rbd_dev->header_oloc, "get_snapshot_name",
5396 &snapid, sizeof(snapid), reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06005397 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderf40eb342013-04-25 15:09:42 -05005398 if (ret < 0) {
5399 snap_name = ERR_PTR(ret);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05005400 goto out;
Alex Elderf40eb342013-04-25 15:09:42 -05005401 }
Alex Elderb8b1e2d2012-07-03 16:01:19 -05005402
5403 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05005404 end = reply_buf + ret;
Alex Eldere5c35532012-10-25 23:34:41 -05005405 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elderf40eb342013-04-25 15:09:42 -05005406 if (IS_ERR(snap_name))
Alex Elderb8b1e2d2012-07-03 16:01:19 -05005407 goto out;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05005408
Alex Elderf40eb342013-04-25 15:09:42 -05005409 dout(" snap_id 0x%016llx snap_name = %s\n",
Alex Elder54cac612013-04-30 00:44:33 -05005410 (unsigned long long)snap_id, snap_name);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05005411out:
5412 kfree(reply_buf);
5413
Alex Elderf40eb342013-04-25 15:09:42 -05005414 return snap_name;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05005415}
5416
Alex Elder2df3fac2013-05-06 09:51:30 -05005417static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
Alex Elder117973f2012-08-31 17:29:55 -05005418{
Alex Elder2df3fac2013-05-06 09:51:30 -05005419 bool first_time = rbd_dev->header.object_prefix == NULL;
Alex Elder117973f2012-08-31 17:29:55 -05005420 int ret;
Alex Elder117973f2012-08-31 17:29:55 -05005421
Josh Durgin1617e402013-06-12 14:43:10 -07005422 ret = rbd_dev_v2_image_size(rbd_dev);
5423 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05005424 return ret;
Josh Durgin1617e402013-06-12 14:43:10 -07005425
Alex Elder2df3fac2013-05-06 09:51:30 -05005426 if (first_time) {
5427 ret = rbd_dev_v2_header_onetime(rbd_dev);
5428 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05005429 return ret;
Alex Elder2df3fac2013-05-06 09:51:30 -05005430 }
5431
Alex Eldercc4a38bd2013-04-30 00:44:33 -05005432 ret = rbd_dev_v2_snap_context(rbd_dev);
Ilya Dryomovd194cd12015-08-31 18:22:10 +03005433 if (ret && first_time) {
5434 kfree(rbd_dev->header.object_prefix);
5435 rbd_dev->header.object_prefix = NULL;
5436 }
Alex Elder117973f2012-08-31 17:29:55 -05005437
5438 return ret;
5439}
5440
Ilya Dryomova720ae02014-07-23 17:11:19 +04005441static int rbd_dev_header_info(struct rbd_device *rbd_dev)
5442{
5443 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5444
5445 if (rbd_dev->image_format == 1)
5446 return rbd_dev_v1_header_info(rbd_dev);
5447
5448 return rbd_dev_v2_header_info(rbd_dev);
5449}
5450
Alex Elder1ddbe942012-01-29 13:57:44 -06005451/*
Alex Eldere28fff262012-02-02 08:13:30 -06005452 * Skips over white space at *buf, and updates *buf to point to the
5453 * first found non-space character (if any). Returns the length of
Alex Elder593a9e72012-02-07 12:03:37 -06005454 * the token (string of non-white space characters) found. Note
5455 * that *buf must be terminated with '\0'.
Alex Eldere28fff262012-02-02 08:13:30 -06005456 */
5457static inline size_t next_token(const char **buf)
5458{
5459 /*
5460 * These are the characters that produce nonzero for
5461 * isspace() in the "C" and "POSIX" locales.
5462 */
5463 const char *spaces = " \f\n\r\t\v";
5464
5465 *buf += strspn(*buf, spaces); /* Find start of token */
5466
5467 return strcspn(*buf, spaces); /* Return token length */
5468}
5469
5470/*
Alex Elderea3352f2012-07-09 21:04:23 -05005471 * Finds the next token in *buf, dynamically allocates a buffer big
5472 * enough to hold a copy of it, and copies the token into the new
5473 * buffer. The copy is guaranteed to be terminated with '\0'. Note
5474 * that a duplicate buffer is created even for a zero-length token.
5475 *
5476 * Returns a pointer to the newly-allocated duplicate, or a null
5477 * pointer if memory for the duplicate was not available. If
5478 * the lenp argument is a non-null pointer, the length of the token
5479 * (not including the '\0') is returned in *lenp.
5480 *
5481 * If successful, the *buf pointer will be updated to point beyond
5482 * the end of the found token.
5483 *
5484 * Note: uses GFP_KERNEL for allocation.
5485 */
5486static inline char *dup_token(const char **buf, size_t *lenp)
5487{
5488 char *dup;
5489 size_t len;
5490
5491 len = next_token(buf);
Alex Elder4caf35f2012-11-01 08:39:27 -05005492 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
Alex Elderea3352f2012-07-09 21:04:23 -05005493 if (!dup)
5494 return NULL;
Alex Elderea3352f2012-07-09 21:04:23 -05005495 *(dup + len) = '\0';
5496 *buf += len;
5497
5498 if (lenp)
5499 *lenp = len;
5500
5501 return dup;
5502}
5503
5504/*
Alex Elder859c31d2012-10-25 23:34:42 -05005505 * Parse the options provided for an "rbd add" (i.e., rbd image
5506 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
5507 * and the data written is passed here via a NUL-terminated buffer.
5508 * Returns 0 if successful or an error code otherwise.
Alex Elderd22f76e2012-07-12 10:46:35 -05005509 *
Alex Elder859c31d2012-10-25 23:34:42 -05005510 * The information extracted from these options is recorded in
5511 * the other parameters which return dynamically-allocated
5512 * structures:
5513 * ceph_opts
5514 * The address of a pointer that will refer to a ceph options
5515 * structure. Caller must release the returned pointer using
5516 * ceph_destroy_options() when it is no longer needed.
5517 * rbd_opts
5518 * Address of an rbd options pointer. Fully initialized by
5519 * this function; caller must release with kfree().
5520 * spec
5521 * Address of an rbd image specification pointer. Fully
5522 * initialized by this function based on parsed options.
5523 * Caller must release with rbd_spec_put().
5524 *
5525 * The options passed take this form:
5526 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5527 * where:
5528 * <mon_addrs>
5529 * A comma-separated list of one or more monitor addresses.
5530 * A monitor address is an ip address, optionally followed
5531 * by a port number (separated by a colon).
5532 * I.e.: ip1[:port1][,ip2[:port2]...]
5533 * <options>
5534 * A comma-separated list of ceph and/or rbd options.
5535 * <pool_name>
5536 * The name of the rados pool containing the rbd image.
5537 * <image_name>
5538 * The name of the image in that pool to map.
5539 * <snap_id>
5540 * An optional snapshot id. If provided, the mapping will
5541 * present data from the image at the time that snapshot was
5542 * created. The image head is used if no snapshot id is
5543 * provided. Snapshot mappings are always read-only.
Alex Eldera725f65e2012-02-02 08:13:30 -06005544 */
Alex Elder859c31d2012-10-25 23:34:42 -05005545static int rbd_add_parse_args(const char *buf,
Alex Elderdc79b112012-10-25 23:34:41 -05005546 struct ceph_options **ceph_opts,
Alex Elder859c31d2012-10-25 23:34:42 -05005547 struct rbd_options **opts,
5548 struct rbd_spec **rbd_spec)
Alex Eldera725f65e2012-02-02 08:13:30 -06005549{
Alex Elderd22f76e2012-07-12 10:46:35 -05005550 size_t len;
Alex Elder859c31d2012-10-25 23:34:42 -05005551 char *options;
Alex Elder0ddebc02012-10-25 23:34:41 -05005552 const char *mon_addrs;
Alex Elderecb4dc22013-04-26 09:43:47 -05005553 char *snap_name;
Alex Elder0ddebc02012-10-25 23:34:41 -05005554 size_t mon_addrs_size;
Alex Elder859c31d2012-10-25 23:34:42 -05005555 struct rbd_spec *spec = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05005556 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05005557 struct ceph_options *copts;
Alex Elderdc79b112012-10-25 23:34:41 -05005558 int ret;
Alex Eldere28fff262012-02-02 08:13:30 -06005559
5560 /* The first four tokens are required */
5561
Alex Elder7ef32142012-02-02 08:13:30 -06005562 len = next_token(&buf);
Alex Elder4fb5d6712012-11-01 10:17:15 -05005563 if (!len) {
5564 rbd_warn(NULL, "no monitor address(es) provided");
5565 return -EINVAL;
5566 }
Alex Elder0ddebc02012-10-25 23:34:41 -05005567 mon_addrs = buf;
Alex Elderf28e5652012-10-25 23:34:41 -05005568 mon_addrs_size = len + 1;
Alex Elder7ef32142012-02-02 08:13:30 -06005569 buf += len;
Alex Eldera725f65e2012-02-02 08:13:30 -06005570
Alex Elderdc79b112012-10-25 23:34:41 -05005571 ret = -EINVAL;
Alex Elderf28e5652012-10-25 23:34:41 -05005572 options = dup_token(&buf, NULL);
5573 if (!options)
Alex Elderdc79b112012-10-25 23:34:41 -05005574 return -ENOMEM;
Alex Elder4fb5d6712012-11-01 10:17:15 -05005575 if (!*options) {
5576 rbd_warn(NULL, "no options provided");
5577 goto out_err;
5578 }
Alex Eldera725f65e2012-02-02 08:13:30 -06005579
Alex Elder859c31d2012-10-25 23:34:42 -05005580 spec = rbd_spec_alloc();
5581 if (!spec)
Alex Elderf28e5652012-10-25 23:34:41 -05005582 goto out_mem;
Alex Elder859c31d2012-10-25 23:34:42 -05005583
5584 spec->pool_name = dup_token(&buf, NULL);
5585 if (!spec->pool_name)
5586 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05005587 if (!*spec->pool_name) {
5588 rbd_warn(NULL, "no pool name provided");
5589 goto out_err;
5590 }
Alex Eldere28fff262012-02-02 08:13:30 -06005591
Alex Elder69e7a022012-11-01 08:39:26 -05005592 spec->image_name = dup_token(&buf, NULL);
Alex Elder859c31d2012-10-25 23:34:42 -05005593 if (!spec->image_name)
Alex Elderf28e5652012-10-25 23:34:41 -05005594 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05005595 if (!*spec->image_name) {
5596 rbd_warn(NULL, "no image name provided");
5597 goto out_err;
5598 }
Alex Eldere28fff262012-02-02 08:13:30 -06005599
Alex Elderf28e5652012-10-25 23:34:41 -05005600 /*
5601 * Snapshot name is optional; default is to use "-"
5602 * (indicating the head/no snapshot).
5603 */
Alex Elder3feeb8942012-08-31 17:29:52 -05005604 len = next_token(&buf);
Alex Elder820a5f32012-07-09 21:04:24 -05005605 if (!len) {
Alex Elder3feeb8942012-08-31 17:29:52 -05005606 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
5607 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
Alex Elderf28e5652012-10-25 23:34:41 -05005608 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
Alex Elderdc79b112012-10-25 23:34:41 -05005609 ret = -ENAMETOOLONG;
Alex Elderf28e5652012-10-25 23:34:41 -05005610 goto out_err;
Alex Elder849b4262012-07-09 21:04:24 -05005611 }
Alex Elderecb4dc22013-04-26 09:43:47 -05005612 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
5613 if (!snap_name)
Alex Elderf28e5652012-10-25 23:34:41 -05005614 goto out_mem;
Alex Elderecb4dc22013-04-26 09:43:47 -05005615 *(snap_name + len) = '\0';
5616 spec->snap_name = snap_name;
Alex Eldere5c35532012-10-25 23:34:41 -05005617
Alex Elder0ddebc02012-10-25 23:34:41 -05005618 /* Initialize all rbd options to the defaults */
Alex Eldere28fff262012-02-02 08:13:30 -06005619
Alex Elder4e9afeb2012-10-25 23:34:41 -05005620 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
5621 if (!rbd_opts)
5622 goto out_mem;
5623
5624 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
Ilya Dryomovb5584182015-06-23 16:21:19 +03005625 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
Ilya Dryomov80de1912016-09-20 14:23:17 +02005626 rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
Alex Elderd22f76e2012-07-12 10:46:35 -05005627
Alex Elder859c31d2012-10-25 23:34:42 -05005628 copts = ceph_parse_options(options, mon_addrs,
Alex Elder0ddebc02012-10-25 23:34:41 -05005629 mon_addrs + mon_addrs_size - 1,
Alex Elder4e9afeb2012-10-25 23:34:41 -05005630 parse_rbd_opts_token, rbd_opts);
Alex Elder859c31d2012-10-25 23:34:42 -05005631 if (IS_ERR(copts)) {
5632 ret = PTR_ERR(copts);
Alex Elderdc79b112012-10-25 23:34:41 -05005633 goto out_err;
5634 }
Alex Elder859c31d2012-10-25 23:34:42 -05005635 kfree(options);
5636
5637 *ceph_opts = copts;
Alex Elder4e9afeb2012-10-25 23:34:41 -05005638 *opts = rbd_opts;
Alex Elder859c31d2012-10-25 23:34:42 -05005639 *rbd_spec = spec;
Alex Elder0ddebc02012-10-25 23:34:41 -05005640
Alex Elderdc79b112012-10-25 23:34:41 -05005641 return 0;
Alex Elderf28e5652012-10-25 23:34:41 -05005642out_mem:
Alex Elderdc79b112012-10-25 23:34:41 -05005643 ret = -ENOMEM;
Alex Elderd22f76e2012-07-12 10:46:35 -05005644out_err:
Alex Elder859c31d2012-10-25 23:34:42 -05005645 kfree(rbd_opts);
5646 rbd_spec_put(spec);
Alex Elderf28e5652012-10-25 23:34:41 -05005647 kfree(options);
Alex Elderd22f76e2012-07-12 10:46:35 -05005648
Alex Elderdc79b112012-10-25 23:34:41 -05005649 return ret;
Alex Eldera725f65e2012-02-02 08:13:30 -06005650}
5651
Alex Elder589d30e2012-07-10 20:30:11 -05005652/*
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005653 * Return pool id (>= 0) or a negative error code.
5654 */
5655static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
5656{
Ilya Dryomova319bf52015-05-15 12:02:17 +03005657 struct ceph_options *opts = rbdc->client->options;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005658 u64 newest_epoch;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005659 int tries = 0;
5660 int ret;
5661
5662again:
5663 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
5664 if (ret == -ENOENT && tries++ < 1) {
Ilya Dryomovd0b19702016-04-28 16:07:27 +02005665 ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
5666 &newest_epoch);
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005667 if (ret < 0)
5668 return ret;
5669
5670 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
Ilya Dryomov7cca78c2016-04-28 16:07:28 +02005671 ceph_osdc_maybe_request_map(&rbdc->client->osdc);
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005672 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
Ilya Dryomova319bf52015-05-15 12:02:17 +03005673 newest_epoch,
5674 opts->mount_timeout);
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005675 goto again;
5676 } else {
5677 /* the osdmap we have is new enough */
5678 return -ENOENT;
5679 }
5680 }
5681
5682 return ret;
5683}
5684
5685/*
Alex Elder589d30e2012-07-10 20:30:11 -05005686 * An rbd format 2 image has a unique identifier, distinct from the
5687 * name given to it by the user. Internally, that identifier is
5688 * what's used to specify the names of objects related to the image.
5689 *
5690 * A special "rbd id" object is used to map an rbd image name to its
5691 * id. If that object doesn't exist, then there is no v2 rbd image
5692 * with the supplied name.
5693 *
5694 * This function will record the given rbd_dev's image_id field if
5695 * it can be determined, and in that case will return 0. If any
5696 * errors occur a negative errno will be returned and the rbd_dev's
5697 * image_id field will be unchanged (and should be NULL).
5698 */
5699static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5700{
5701 int ret;
5702 size_t size;
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005703 CEPH_DEFINE_OID_ONSTACK(oid);
Alex Elder589d30e2012-07-10 20:30:11 -05005704 void *response;
Alex Elderc0fba362013-04-25 23:15:08 -05005705 char *image_id;
Alex Elder2f82ee52012-10-30 19:40:33 -05005706
Alex Elder589d30e2012-07-10 20:30:11 -05005707 /*
Alex Elder2c0d0a12012-10-30 19:40:33 -05005708 * When probing a parent image, the image id is already
5709 * known (and the image name likely is not). There's no
Alex Elderc0fba362013-04-25 23:15:08 -05005710 * need to fetch the image id again in this case. We
5711 * do still need to set the image format though.
Alex Elder2c0d0a12012-10-30 19:40:33 -05005712 */
Alex Elderc0fba362013-04-25 23:15:08 -05005713 if (rbd_dev->spec->image_id) {
5714 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5715
Alex Elder2c0d0a12012-10-30 19:40:33 -05005716 return 0;
Alex Elderc0fba362013-04-25 23:15:08 -05005717 }
Alex Elder2c0d0a12012-10-30 19:40:33 -05005718
5719 /*
Alex Elder589d30e2012-07-10 20:30:11 -05005720 * First, see if the format 2 image id file exists, and if
5721 * so, get the image's persistent id from it.
5722 */
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005723 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
5724 rbd_dev->spec->image_name);
5725 if (ret)
5726 return ret;
5727
5728 dout("rbd id object name is %s\n", oid.name);
Alex Elder589d30e2012-07-10 20:30:11 -05005729
5730 /* Response will be an encoded string, which includes a length */
5731
5732 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5733 response = kzalloc(size, GFP_NOIO);
5734 if (!response) {
5735 ret = -ENOMEM;
5736 goto out;
5737 }
5738
Alex Elderc0fba362013-04-25 23:15:08 -05005739 /* If it doesn't exist we'll assume it's a format 1 image */
5740
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005741 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5742 "get_id", NULL, 0,
5743 response, RBD_IMAGE_ID_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06005744 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderc0fba362013-04-25 23:15:08 -05005745 if (ret == -ENOENT) {
5746 image_id = kstrdup("", GFP_KERNEL);
5747 ret = image_id ? 0 : -ENOMEM;
5748 if (!ret)
5749 rbd_dev->image_format = 1;
Ilya Dryomov7dd440c2014-09-11 18:49:18 +04005750 } else if (ret >= 0) {
Alex Elderc0fba362013-04-25 23:15:08 -05005751 void *p = response;
Alex Elder589d30e2012-07-10 20:30:11 -05005752
Alex Elderc0fba362013-04-25 23:15:08 -05005753 image_id = ceph_extract_encoded_string(&p, p + ret,
Alex Elder979ed482012-11-01 08:39:26 -05005754 NULL, GFP_NOIO);
Duan Jiong461f7582014-04-11 16:38:12 +08005755 ret = PTR_ERR_OR_ZERO(image_id);
Alex Elderc0fba362013-04-25 23:15:08 -05005756 if (!ret)
5757 rbd_dev->image_format = 2;
Alex Elderc0fba362013-04-25 23:15:08 -05005758 }
5759
5760 if (!ret) {
5761 rbd_dev->spec->image_id = image_id;
5762 dout("image_id is %s\n", image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05005763 }
5764out:
5765 kfree(response);
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005766 ceph_oid_destroy(&oid);
Alex Elder589d30e2012-07-10 20:30:11 -05005767 return ret;
5768}
5769
Alex Elder3abef3b2013-05-13 20:35:37 -05005770/*
5771 * Undo whatever state changes are made by v1 or v2 header info
5772 * call.
5773 */
Alex Elder6fd48b32013-04-28 23:32:34 -05005774static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5775{
5776 struct rbd_image_header *header;
5777
Ilya Dryomove69b8d42015-01-19 12:06:14 +03005778 rbd_dev_parent_put(rbd_dev);
Alex Elder6fd48b32013-04-28 23:32:34 -05005779
5780 /* Free dynamic fields from the header, then zero it out */
5781
5782 header = &rbd_dev->header;
Alex Elder812164f82013-04-30 00:44:32 -05005783 ceph_put_snap_context(header->snapc);
Alex Elder6fd48b32013-04-28 23:32:34 -05005784 kfree(header->snap_sizes);
5785 kfree(header->snap_names);
5786 kfree(header->object_prefix);
5787 memset(header, 0, sizeof (*header));
5788}
5789
Alex Elder2df3fac2013-05-06 09:51:30 -05005790static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
Alex Eldera30b71b2012-07-10 20:30:11 -05005791{
5792 int ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005793
Alex Elder1e130192012-07-03 16:01:19 -05005794 ret = rbd_dev_v2_object_prefix(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005795 if (ret)
Alex Elder1e130192012-07-03 16:01:19 -05005796 goto out_err;
Alex Elderb1b54022012-07-03 16:01:19 -05005797
Alex Elder2df3fac2013-05-06 09:51:30 -05005798 /*
5799 * Get the and check features for the image. Currently the
5800 * features are assumed to never change.
5801 */
Alex Elderb1b54022012-07-03 16:01:19 -05005802 ret = rbd_dev_v2_features(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005803 if (ret)
Alex Elderb1b54022012-07-03 16:01:19 -05005804 goto out_err;
Alex Elder35d489f2012-07-03 16:01:19 -05005805
Alex Eldercc070d52013-04-21 12:14:45 -05005806 /* If the image supports fancy striping, get its parameters */
5807
5808 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5809 ret = rbd_dev_v2_striping_info(rbd_dev);
5810 if (ret < 0)
5811 goto out_err;
5812 }
Alex Eldera30b71b2012-07-10 20:30:11 -05005813
Ilya Dryomov7e973322017-01-25 18:16:22 +01005814 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
5815 ret = rbd_dev_v2_data_pool(rbd_dev);
5816 if (ret)
5817 goto out_err;
5818 }
5819
Ilya Dryomov263423f2017-01-25 18:16:22 +01005820 rbd_init_layout(rbd_dev);
Alex Elder35152972012-08-31 17:29:55 -05005821 return 0;
Ilya Dryomov263423f2017-01-25 18:16:22 +01005822
Alex Elder9d475de2012-07-03 16:01:19 -05005823out_err:
Alex Elder642a2532013-05-06 17:40:33 -05005824 rbd_dev->header.features = 0;
Alex Elder1e130192012-07-03 16:01:19 -05005825 kfree(rbd_dev->header.object_prefix);
5826 rbd_dev->header.object_prefix = NULL;
Alex Elder9d475de2012-07-03 16:01:19 -05005827 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005828}
5829
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005830/*
5831 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5832 * rbd_dev_image_probe() recursion depth, which means it's also the
5833 * length of the already discovered part of the parent chain.
5834 */
5835static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
Alex Elder83a06262012-10-30 15:47:17 -05005836{
Alex Elder2f82ee52012-10-30 19:40:33 -05005837 struct rbd_device *parent = NULL;
Alex Elder124afba2013-04-26 15:44:36 -05005838 int ret;
5839
5840 if (!rbd_dev->parent_spec)
5841 return 0;
Alex Elder124afba2013-04-26 15:44:36 -05005842
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005843 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5844 pr_info("parent chain is too long (%d)\n", depth);
5845 ret = -EINVAL;
5846 goto out_err;
5847 }
5848
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02005849 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005850 if (!parent) {
5851 ret = -ENOMEM;
Alex Elder124afba2013-04-26 15:44:36 -05005852 goto out_err;
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005853 }
5854
5855 /*
5856 * Images related by parent/child relationships always share
5857 * rbd_client and spec/parent_spec, so bump their refcounts.
5858 */
5859 __rbd_get_client(rbd_dev->rbd_client);
5860 rbd_spec_get(rbd_dev->parent_spec);
Alex Elder124afba2013-04-26 15:44:36 -05005861
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005862 ret = rbd_dev_image_probe(parent, depth);
Alex Elder124afba2013-04-26 15:44:36 -05005863 if (ret < 0)
5864 goto out_err;
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005865
Alex Elder124afba2013-04-26 15:44:36 -05005866 rbd_dev->parent = parent;
Alex Eldera2acd002013-05-08 22:50:04 -05005867 atomic_set(&rbd_dev->parent_ref, 1);
Alex Elder124afba2013-04-26 15:44:36 -05005868 return 0;
Alex Elder124afba2013-04-26 15:44:36 -05005869
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005870out_err:
5871 rbd_dev_unparent(rbd_dev);
Markus Elfring1761b222015-11-23 20:16:45 +01005872 rbd_dev_destroy(parent);
Alex Elder124afba2013-04-26 15:44:36 -05005873 return ret;
5874}
5875
Ilya Dryomov811c6682016-04-15 16:22:16 +02005876/*
5877 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5878 * upon return.
5879 */
Alex Elder200a6a82013-04-28 23:32:34 -05005880static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
Alex Elder124afba2013-04-26 15:44:36 -05005881{
Alex Elder83a06262012-10-30 15:47:17 -05005882 int ret;
Alex Elder83a06262012-10-30 15:47:17 -05005883
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005884 /* Record our major and minor device numbers. */
Alex Elder83a06262012-10-30 15:47:17 -05005885
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005886 if (!single_major) {
5887 ret = register_blkdev(0, rbd_dev->name);
5888 if (ret < 0)
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02005889 goto err_out_unlock;
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005890
5891 rbd_dev->major = ret;
5892 rbd_dev->minor = 0;
5893 } else {
5894 rbd_dev->major = rbd_major;
5895 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5896 }
Alex Elder83a06262012-10-30 15:47:17 -05005897
5898 /* Set up the blkdev mapping. */
5899
5900 ret = rbd_init_disk(rbd_dev);
5901 if (ret)
5902 goto err_out_blkdev;
5903
Alex Elderf35a4de2013-05-06 09:51:29 -05005904 ret = rbd_dev_mapping_set(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005905 if (ret)
5906 goto err_out_disk;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04005907
Alex Elderf35a4de2013-05-06 09:51:29 -05005908 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
Josh Durgin22001f62013-09-30 20:10:04 -07005909 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
Alex Elderf35a4de2013-05-06 09:51:29 -05005910
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005911 dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
5912 ret = device_add(&rbd_dev->dev);
Alex Elderf35a4de2013-05-06 09:51:29 -05005913 if (ret)
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005914 goto err_out_mapping;
Alex Elder83a06262012-10-30 15:47:17 -05005915
Alex Elder83a06262012-10-30 15:47:17 -05005916 /* Everything's ready. Announce the disk to the world. */
5917
Alex Elder129b79d2013-04-26 15:44:36 -05005918 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Ilya Dryomov811c6682016-04-15 16:22:16 +02005919 up_write(&rbd_dev->header_rwsem);
Alex Elder83a06262012-10-30 15:47:17 -05005920
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02005921 spin_lock(&rbd_dev_list_lock);
5922 list_add_tail(&rbd_dev->node, &rbd_dev_list);
5923 spin_unlock(&rbd_dev_list_lock);
5924
Ilya Dryomov811c6682016-04-15 16:22:16 +02005925 add_disk(rbd_dev->disk);
Ilya Dryomovca7909e2016-08-18 18:38:41 +02005926 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
5927 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
5928 rbd_dev->header.features);
Alex Elder83a06262012-10-30 15:47:17 -05005929
5930 return ret;
Alex Elder2f82ee52012-10-30 19:40:33 -05005931
Alex Elderf35a4de2013-05-06 09:51:29 -05005932err_out_mapping:
5933 rbd_dev_mapping_clear(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005934err_out_disk:
5935 rbd_free_disk(rbd_dev);
5936err_out_blkdev:
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005937 if (!single_major)
5938 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Ilya Dryomov811c6682016-04-15 16:22:16 +02005939err_out_unlock:
5940 up_write(&rbd_dev->header_rwsem);
Alex Elder83a06262012-10-30 15:47:17 -05005941 return ret;
5942}
5943
Alex Elder332bb122013-04-27 09:59:30 -05005944static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5945{
5946 struct rbd_spec *spec = rbd_dev->spec;
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005947 int ret;
Alex Elder332bb122013-04-27 09:59:30 -05005948
5949 /* Record the header object name for this rbd image. */
5950
5951 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
Alex Elder332bb122013-04-27 09:59:30 -05005952 if (rbd_dev->image_format == 1)
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005953 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5954 spec->image_name, RBD_SUFFIX);
Alex Elder332bb122013-04-27 09:59:30 -05005955 else
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005956 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5957 RBD_HEADER_PREFIX, spec->image_id);
Alex Elder332bb122013-04-27 09:59:30 -05005958
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005959 return ret;
Alex Elder332bb122013-04-27 09:59:30 -05005960}
5961
Alex Elder200a6a82013-04-28 23:32:34 -05005962static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5963{
Alex Elder6fd48b32013-04-28 23:32:34 -05005964 rbd_dev_unprobe(rbd_dev);
Alex Elder6fd48b32013-04-28 23:32:34 -05005965 rbd_dev->image_format = 0;
5966 kfree(rbd_dev->spec->image_id);
5967 rbd_dev->spec->image_id = NULL;
5968
Alex Elder200a6a82013-04-28 23:32:34 -05005969 rbd_dev_destroy(rbd_dev);
5970}
5971
Alex Eldera30b71b2012-07-10 20:30:11 -05005972/*
5973 * Probe for the existence of the header object for the given rbd
Alex Elder1f3ef782013-05-06 17:40:33 -05005974 * device. If this image is the one being mapped (i.e., not a
5975 * parent), initiate a watch on its header object before using that
5976 * object to get detailed information about the rbd image.
Alex Eldera30b71b2012-07-10 20:30:11 -05005977 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005978static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
Alex Eldera30b71b2012-07-10 20:30:11 -05005979{
5980 int ret;
5981
5982 /*
Alex Elder3abef3b2013-05-13 20:35:37 -05005983 * Get the id from the image id object. Unless there's an
5984 * error, rbd_dev->spec->image_id will be filled in with
5985 * a dynamically-allocated string, and rbd_dev->image_format
5986 * will be set to either 1 or 2.
Alex Eldera30b71b2012-07-10 20:30:11 -05005987 */
5988 ret = rbd_dev_image_id(rbd_dev);
5989 if (ret)
Alex Elderc0fba362013-04-25 23:15:08 -05005990 return ret;
Alex Elderc0fba362013-04-25 23:15:08 -05005991
Alex Elder332bb122013-04-27 09:59:30 -05005992 ret = rbd_dev_header_name(rbd_dev);
5993 if (ret)
5994 goto err_out_format;
5995
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005996 if (!depth) {
Ilya Dryomov99d16942016-08-12 16:11:41 +02005997 ret = rbd_register_watch(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005998 if (ret) {
5999 if (ret == -ENOENT)
6000 pr_info("image %s/%s does not exist\n",
6001 rbd_dev->spec->pool_name,
6002 rbd_dev->spec->image_name);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02006003 goto err_out_format;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03006004 }
Alex Elder1f3ef782013-05-06 17:40:33 -05006005 }
Alex Elderb644de22013-04-27 09:59:31 -05006006
Ilya Dryomova720ae02014-07-23 17:11:19 +04006007 ret = rbd_dev_header_info(rbd_dev);
Alex Elder5655c4d2013-04-25 23:15:08 -05006008 if (ret)
Alex Elderb644de22013-04-27 09:59:31 -05006009 goto err_out_watch;
Alex Elder83a06262012-10-30 15:47:17 -05006010
Ilya Dryomov04077592014-07-23 17:11:20 +04006011 /*
6012 * If this image is the one being mapped, we have pool name and
6013 * id, image name and id, and snap name - need to fill snap id.
6014 * Otherwise this is a parent image, identified by pool, image
6015 * and snap ids - need to fill in names for those ids.
6016 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02006017 if (!depth)
Ilya Dryomov04077592014-07-23 17:11:20 +04006018 ret = rbd_spec_fill_snap_id(rbd_dev);
6019 else
6020 ret = rbd_spec_fill_names(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03006021 if (ret) {
6022 if (ret == -ENOENT)
6023 pr_info("snap %s/%s@%s does not exist\n",
6024 rbd_dev->spec->pool_name,
6025 rbd_dev->spec->image_name,
6026 rbd_dev->spec->snap_name);
Alex Elder33dca392013-04-30 00:44:33 -05006027 goto err_out_probe;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03006028 }
Alex Elder9bb81c92013-04-27 09:59:30 -05006029
Ilya Dryomove8f59b52014-07-24 10:42:13 +04006030 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
6031 ret = rbd_dev_v2_parent_info(rbd_dev);
6032 if (ret)
6033 goto err_out_probe;
6034
6035 /*
6036 * Need to warn users if this image is the one being
6037 * mapped and has a parent.
6038 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02006039 if (!depth && rbd_dev->parent_spec)
Ilya Dryomove8f59b52014-07-24 10:42:13 +04006040 rbd_warn(rbd_dev,
6041 "WARNING: kernel layering is EXPERIMENTAL!");
6042 }
6043
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02006044 ret = rbd_dev_probe_parent(rbd_dev, depth);
Alex Elder30d60ba2013-05-06 09:51:30 -05006045 if (ret)
6046 goto err_out_probe;
Alex Elder83a06262012-10-30 15:47:17 -05006047
Alex Elder30d60ba2013-05-06 09:51:30 -05006048 dout("discovered format %u image, header name is %s\n",
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02006049 rbd_dev->image_format, rbd_dev->header_oid.name);
Alex Elder30d60ba2013-05-06 09:51:30 -05006050 return 0;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04006051
Alex Elder6fd48b32013-04-28 23:32:34 -05006052err_out_probe:
6053 rbd_dev_unprobe(rbd_dev);
Alex Elderb644de22013-04-27 09:59:31 -05006054err_out_watch:
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02006055 if (!depth)
Ilya Dryomov99d16942016-08-12 16:11:41 +02006056 rbd_unregister_watch(rbd_dev);
Alex Elder332bb122013-04-27 09:59:30 -05006057err_out_format:
6058 rbd_dev->image_format = 0;
Alex Elder5655c4d2013-04-25 23:15:08 -05006059 kfree(rbd_dev->spec->image_id);
6060 rbd_dev->spec->image_id = NULL;
Alex Elder5655c4d2013-04-25 23:15:08 -05006061 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05006062}
6063
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006064static ssize_t do_rbd_add(struct bus_type *bus,
6065 const char *buf,
6066 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006067{
Alex Eldercb8627c2012-07-09 21:04:23 -05006068 struct rbd_device *rbd_dev = NULL;
Alex Elderdc79b112012-10-25 23:34:41 -05006069 struct ceph_options *ceph_opts = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05006070 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05006071 struct rbd_spec *spec = NULL;
Alex Elder9d3997f2012-10-25 23:34:42 -05006072 struct rbd_client *rbdc;
Alex Elder51344a32013-05-06 07:40:30 -05006073 bool read_only;
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02006074 int rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006075
6076 if (!try_module_get(THIS_MODULE))
6077 return -ENODEV;
6078
Alex Eldera725f65e2012-02-02 08:13:30 -06006079 /* parse add command */
Alex Elder859c31d2012-10-25 23:34:42 -05006080 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
Alex Elderdc79b112012-10-25 23:34:41 -05006081 if (rc < 0)
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02006082 goto out;
Alex Eldera725f65e2012-02-02 08:13:30 -06006083
Alex Elder9d3997f2012-10-25 23:34:42 -05006084 rbdc = rbd_get_client(ceph_opts);
6085 if (IS_ERR(rbdc)) {
6086 rc = PTR_ERR(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05006087 goto err_out_args;
Alex Elder9d3997f2012-10-25 23:34:42 -05006088 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006089
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006090 /* pick the pool */
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04006091 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03006092 if (rc < 0) {
6093 if (rc == -ENOENT)
6094 pr_info("pool %s does not exist\n", spec->pool_name);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006095 goto err_out_client;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03006096 }
Alex Elderc0cd10db2013-04-26 09:43:47 -05006097 spec->pool_id = (u64)rc;
Alex Elder859c31d2012-10-25 23:34:42 -05006098
Ilya Dryomovd1475432015-06-22 13:24:48 +03006099 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02006100 if (!rbd_dev) {
6101 rc = -ENOMEM;
Alex Elderbd4ba652012-10-25 23:34:42 -05006102 goto err_out_client;
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02006103 }
Alex Elderc53d5892012-10-25 23:34:42 -05006104 rbdc = NULL; /* rbd_dev now owns this */
6105 spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovd1475432015-06-22 13:24:48 +03006106 rbd_opts = NULL; /* rbd_dev now owns this */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006107
Mike Christie0d6d1e9c2016-08-18 18:38:45 +02006108 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
6109 if (!rbd_dev->config_info) {
6110 rc = -ENOMEM;
6111 goto err_out_rbd_dev;
6112 }
6113
Ilya Dryomov811c6682016-04-15 16:22:16 +02006114 down_write(&rbd_dev->header_rwsem);
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02006115 rc = rbd_dev_image_probe(rbd_dev, 0);
Mike Christie0d6d1e9c2016-08-18 18:38:45 +02006116 if (rc < 0) {
6117 up_write(&rbd_dev->header_rwsem);
Alex Elderc53d5892012-10-25 23:34:42 -05006118 goto err_out_rbd_dev;
Mike Christie0d6d1e9c2016-08-18 18:38:45 +02006119 }
Alex Elder05fd6f62012-08-29 17:11:07 -05006120
Alex Elder7ce4eef2013-05-06 17:40:33 -05006121 /* If we are mapping a snapshot it must be marked read-only */
6122
Ilya Dryomovd1475432015-06-22 13:24:48 +03006123 read_only = rbd_dev->opts->read_only;
Alex Elder7ce4eef2013-05-06 17:40:33 -05006124 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
6125 read_only = true;
6126 rbd_dev->mapping.read_only = read_only;
6127
Alex Elderb536f692013-04-28 23:32:34 -05006128 rc = rbd_dev_device_setup(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05006129 if (rc) {
Ilya Dryomove37180c2013-12-16 18:02:41 +02006130 /*
Ilya Dryomov99d16942016-08-12 16:11:41 +02006131 * rbd_unregister_watch() can't be moved into
Ilya Dryomove37180c2013-12-16 18:02:41 +02006132 * rbd_dev_image_release() without refactoring, see
6133 * commit 1f3ef78861ac.
6134 */
Ilya Dryomov99d16942016-08-12 16:11:41 +02006135 rbd_unregister_watch(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05006136 rbd_dev_image_release(rbd_dev);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02006137 goto out;
Alex Elder3abef3b2013-05-13 20:35:37 -05006138 }
Alex Elderb536f692013-04-28 23:32:34 -05006139
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02006140 rc = count;
6141out:
6142 module_put(THIS_MODULE);
6143 return rc;
Alex Elder3abef3b2013-05-13 20:35:37 -05006144
Alex Elderc53d5892012-10-25 23:34:42 -05006145err_out_rbd_dev:
6146 rbd_dev_destroy(rbd_dev);
Alex Elderbd4ba652012-10-25 23:34:42 -05006147err_out_client:
Alex Elder9d3997f2012-10-25 23:34:42 -05006148 rbd_put_client(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05006149err_out_args:
Alex Elder859c31d2012-10-25 23:34:42 -05006150 rbd_spec_put(spec);
Ilya Dryomovd1475432015-06-22 13:24:48 +03006151 kfree(rbd_opts);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02006152 goto out;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006153}
6154
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006155static ssize_t rbd_add(struct bus_type *bus,
6156 const char *buf,
6157 size_t count)
6158{
6159 if (single_major)
6160 return -EINVAL;
6161
6162 return do_rbd_add(bus, buf, count);
6163}
6164
6165static ssize_t rbd_add_single_major(struct bus_type *bus,
6166 const char *buf,
6167 size_t count)
6168{
6169 return do_rbd_add(bus, buf, count);
6170}
6171
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02006172static void rbd_dev_device_release(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006173{
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006174 rbd_free_disk(rbd_dev);
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02006175
6176 spin_lock(&rbd_dev_list_lock);
6177 list_del_init(&rbd_dev->node);
6178 spin_unlock(&rbd_dev_list_lock);
6179
Alex Elder200a6a82013-04-28 23:32:34 -05006180 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02006181 device_del(&rbd_dev->dev);
Alex Elder6d80b132013-05-06 07:40:30 -05006182 rbd_dev_mapping_clear(rbd_dev);
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006183 if (!single_major)
6184 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006185}
6186
Alex Elder05a46af2013-04-26 15:44:36 -05006187static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
6188{
Alex Elderad945fc2013-04-26 15:44:36 -05006189 while (rbd_dev->parent) {
Alex Elder05a46af2013-04-26 15:44:36 -05006190 struct rbd_device *first = rbd_dev;
6191 struct rbd_device *second = first->parent;
6192 struct rbd_device *third;
6193
6194 /*
6195 * Follow to the parent with no grandparent and
6196 * remove it.
6197 */
6198 while (second && (third = second->parent)) {
6199 first = second;
6200 second = third;
6201 }
Alex Elderad945fc2013-04-26 15:44:36 -05006202 rbd_assert(second);
Alex Elder8ad42cd2013-04-28 23:32:34 -05006203 rbd_dev_image_release(second);
Alex Elderad945fc2013-04-26 15:44:36 -05006204 first->parent = NULL;
6205 first->parent_overlap = 0;
6206
6207 rbd_assert(first->parent_spec);
Alex Elder05a46af2013-04-26 15:44:36 -05006208 rbd_spec_put(first->parent_spec);
6209 first->parent_spec = NULL;
Alex Elder05a46af2013-04-26 15:44:36 -05006210 }
6211}
6212
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006213static ssize_t do_rbd_remove(struct bus_type *bus,
6214 const char *buf,
6215 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006216{
6217 struct rbd_device *rbd_dev = NULL;
Alex Elder751cc0e2013-05-31 15:17:01 -05006218 struct list_head *tmp;
6219 int dev_id;
Mike Christie0276dca2016-08-18 18:38:45 +02006220 char opt_buf[6];
Alex Elder82a442d2013-05-31 17:40:44 -05006221 bool already = false;
Mike Christie0276dca2016-08-18 18:38:45 +02006222 bool force = false;
Alex Elder0d8189e2013-04-27 09:59:30 -05006223 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006224
Mike Christie0276dca2016-08-18 18:38:45 +02006225 dev_id = -1;
6226 opt_buf[0] = '\0';
6227 sscanf(buf, "%d %5s", &dev_id, opt_buf);
6228 if (dev_id < 0) {
6229 pr_err("dev_id out of range\n");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006230 return -EINVAL;
Mike Christie0276dca2016-08-18 18:38:45 +02006231 }
6232 if (opt_buf[0] != '\0') {
6233 if (!strcmp(opt_buf, "force")) {
6234 force = true;
6235 } else {
6236 pr_err("bad remove option at '%s'\n", opt_buf);
6237 return -EINVAL;
6238 }
6239 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006240
Alex Elder751cc0e2013-05-31 15:17:01 -05006241 ret = -ENOENT;
6242 spin_lock(&rbd_dev_list_lock);
6243 list_for_each(tmp, &rbd_dev_list) {
6244 rbd_dev = list_entry(tmp, struct rbd_device, node);
6245 if (rbd_dev->dev_id == dev_id) {
6246 ret = 0;
6247 break;
6248 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006249 }
Alex Elder751cc0e2013-05-31 15:17:01 -05006250 if (!ret) {
6251 spin_lock_irq(&rbd_dev->lock);
Mike Christie0276dca2016-08-18 18:38:45 +02006252 if (rbd_dev->open_count && !force)
Alex Elder751cc0e2013-05-31 15:17:01 -05006253 ret = -EBUSY;
6254 else
Alex Elder82a442d2013-05-31 17:40:44 -05006255 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
6256 &rbd_dev->flags);
Alex Elder751cc0e2013-05-31 15:17:01 -05006257 spin_unlock_irq(&rbd_dev->lock);
6258 }
6259 spin_unlock(&rbd_dev_list_lock);
Alex Elder82a442d2013-05-31 17:40:44 -05006260 if (ret < 0 || already)
Alex Elder1ba0f1e2013-05-31 15:17:01 -05006261 return ret;
Alex Elder751cc0e2013-05-31 15:17:01 -05006262
Mike Christie0276dca2016-08-18 18:38:45 +02006263 if (force) {
6264 /*
6265 * Prevent new IO from being queued and wait for existing
6266 * IO to complete/fail.
6267 */
6268 blk_mq_freeze_queue(rbd_dev->disk->queue);
6269 blk_set_queue_dying(rbd_dev->disk->queue);
6270 }
6271
Ilya Dryomoved95b212016-08-12 16:40:02 +02006272 down_write(&rbd_dev->lock_rwsem);
6273 if (__rbd_is_lock_owner(rbd_dev))
6274 rbd_unlock(rbd_dev);
6275 up_write(&rbd_dev->lock_rwsem);
Ilya Dryomov99d16942016-08-12 16:11:41 +02006276 rbd_unregister_watch(rbd_dev);
Ilya Dryomovfca27062013-12-16 18:02:40 +02006277
Josh Durgin98752012013-08-29 17:26:31 -07006278 /*
6279 * Don't free anything from rbd_dev->disk until after all
6280 * notifies are completely processed. Otherwise
6281 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
6282 * in a potential use after free of rbd_dev->disk or rbd_dev.
6283 */
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02006284 rbd_dev_device_release(rbd_dev);
Alex Elder8ad42cd2013-04-28 23:32:34 -05006285 rbd_dev_image_release(rbd_dev);
Alex Elderaafb2302012-09-06 16:00:54 -05006286
Alex Elder1ba0f1e2013-05-31 15:17:01 -05006287 return count;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006288}
6289
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006290static ssize_t rbd_remove(struct bus_type *bus,
6291 const char *buf,
6292 size_t count)
6293{
6294 if (single_major)
6295 return -EINVAL;
6296
6297 return do_rbd_remove(bus, buf, count);
6298}
6299
6300static ssize_t rbd_remove_single_major(struct bus_type *bus,
6301 const char *buf,
6302 size_t count)
6303{
6304 return do_rbd_remove(bus, buf, count);
6305}
6306
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006307/*
6308 * create control files in sysfs
Yehuda Sadehdfc56062010-11-19 14:51:04 -08006309 * /sys/bus/rbd/...
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006310 */
6311static int rbd_sysfs_init(void)
6312{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08006313 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006314
Alex Elderfed4c142012-02-07 12:03:36 -06006315 ret = device_register(&rbd_root_dev);
Alex Elder21079782012-01-24 10:08:36 -06006316 if (ret < 0)
Yehuda Sadehdfc56062010-11-19 14:51:04 -08006317 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006318
Alex Elderfed4c142012-02-07 12:03:36 -06006319 ret = bus_register(&rbd_bus_type);
6320 if (ret < 0)
6321 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006322
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006323 return ret;
6324}
6325
6326static void rbd_sysfs_cleanup(void)
6327{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08006328 bus_unregister(&rbd_bus_type);
Alex Elderfed4c142012-02-07 12:03:36 -06006329 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006330}
6331
Alex Elder1c2a9df2013-05-01 12:43:03 -05006332static int rbd_slab_init(void)
6333{
6334 rbd_assert(!rbd_img_request_cache);
Geliang Tang03d94402016-03-13 15:17:32 +08006335 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
Alex Elder868311b2013-05-01 12:43:03 -05006336 if (!rbd_img_request_cache)
6337 return -ENOMEM;
6338
6339 rbd_assert(!rbd_obj_request_cache);
Geliang Tang03d94402016-03-13 15:17:32 +08006340 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
Alex Elder78c2a442013-05-01 12:43:04 -05006341 if (!rbd_obj_request_cache)
6342 goto out_err;
6343
Ilya Dryomov6c696d82017-01-25 18:16:23 +01006344 return 0;
Alex Elder1c2a9df2013-05-01 12:43:03 -05006345
Ilya Dryomov6c696d82017-01-25 18:16:23 +01006346out_err:
Alex Elder868311b2013-05-01 12:43:03 -05006347 kmem_cache_destroy(rbd_img_request_cache);
6348 rbd_img_request_cache = NULL;
Alex Elder1c2a9df2013-05-01 12:43:03 -05006349 return -ENOMEM;
6350}
6351
6352static void rbd_slab_exit(void)
6353{
Alex Elder868311b2013-05-01 12:43:03 -05006354 rbd_assert(rbd_obj_request_cache);
6355 kmem_cache_destroy(rbd_obj_request_cache);
6356 rbd_obj_request_cache = NULL;
6357
Alex Elder1c2a9df2013-05-01 12:43:03 -05006358 rbd_assert(rbd_img_request_cache);
6359 kmem_cache_destroy(rbd_img_request_cache);
6360 rbd_img_request_cache = NULL;
6361}
6362
Alex Eldercc344fa2013-02-19 12:25:56 -06006363static int __init rbd_init(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006364{
6365 int rc;
6366
Alex Elder1e32d342013-01-30 11:13:33 -06006367 if (!libceph_compatible(NULL)) {
6368 rbd_warn(NULL, "libceph incompatibility (quitting)");
Alex Elder1e32d342013-01-30 11:13:33 -06006369 return -EINVAL;
6370 }
Ilya Dryomove1b4d962013-12-13 15:28:57 +02006371
Alex Elder1c2a9df2013-05-01 12:43:03 -05006372 rc = rbd_slab_init();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006373 if (rc)
6374 return rc;
Ilya Dryomove1b4d962013-12-13 15:28:57 +02006375
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04006376 /*
6377 * The number of active work items is limited by the number of
Ilya Dryomovf77303b2015-04-22 18:28:13 +03006378 * rbd devices * queue depth, so leave @max_active at default.
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04006379 */
6380 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
6381 if (!rbd_wq) {
6382 rc = -ENOMEM;
6383 goto err_out_slab;
6384 }
6385
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006386 if (single_major) {
6387 rbd_major = register_blkdev(0, RBD_DRV_NAME);
6388 if (rbd_major < 0) {
6389 rc = rbd_major;
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04006390 goto err_out_wq;
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006391 }
6392 }
6393
Alex Elder1c2a9df2013-05-01 12:43:03 -05006394 rc = rbd_sysfs_init();
6395 if (rc)
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006396 goto err_out_blkdev;
Alex Elder1c2a9df2013-05-01 12:43:03 -05006397
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006398 if (single_major)
6399 pr_info("loaded (major %d)\n", rbd_major);
6400 else
6401 pr_info("loaded\n");
6402
Ilya Dryomove1b4d962013-12-13 15:28:57 +02006403 return 0;
6404
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006405err_out_blkdev:
6406 if (single_major)
6407 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04006408err_out_wq:
6409 destroy_workqueue(rbd_wq);
Ilya Dryomove1b4d962013-12-13 15:28:57 +02006410err_out_slab:
6411 rbd_slab_exit();
Alex Elder1c2a9df2013-05-01 12:43:03 -05006412 return rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006413}
6414
Alex Eldercc344fa2013-02-19 12:25:56 -06006415static void __exit rbd_exit(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006416{
Ilya Dryomovffe312c2014-05-20 15:46:04 +04006417 ida_destroy(&rbd_dev_id_ida);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006418 rbd_sysfs_cleanup();
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006419 if (single_major)
6420 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04006421 destroy_workqueue(rbd_wq);
Alex Elder1c2a9df2013-05-01 12:43:03 -05006422 rbd_slab_exit();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006423}
6424
6425module_init(rbd_init);
6426module_exit(rbd_exit);
6427
Alex Elderd552c612013-05-31 20:13:09 -05006428MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006429MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6430MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006431/* following authorship retained from original osdblk.c */
6432MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6433
Ilya Dryomov90da2582013-12-13 15:28:56 +02006434MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006435MODULE_LICENSE("GPL");