blob: 1c805eea676750d4dbe68ec21786b923c1fdeca0 [file] [log] [blame]
Alex Eldere2a58ee2013-04-30 00:44:33 -05001
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
Yehuda Sadehdfc56062010-11-19 14:51:04 -080025 For usage instructions, please refer to:
Yehuda Sadeh602adf42010-08-12 16:11:25 -070026
Yehuda Sadehdfc56062010-11-19 14:51:04 -080027 Documentation/ABI/testing/sysfs-bus-rbd
Yehuda Sadeh602adf42010-08-12 16:11:25 -070028
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
34#include <linux/ceph/decode.h>
Yehuda Sadeh59c2be12011-03-21 15:10:11 -070035#include <linux/parser.h>
Alex Elder30d1cff2013-05-01 12:43:03 -050036#include <linux/bsearch.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070037
38#include <linux/kernel.h>
39#include <linux/device.h>
40#include <linux/module.h>
Christoph Hellwig7ad18af2015-01-13 17:20:04 +010041#include <linux/blk-mq.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070042#include <linux/fs.h>
43#include <linux/blkdev.h>
Alex Elder1c2a9df2013-05-01 12:43:03 -050044#include <linux/slab.h>
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +020045#include <linux/idr.h>
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +040046#include <linux/workqueue.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070047
48#include "rbd_types.h"
49
Alex Elderaafb2302012-09-06 16:00:54 -050050#define RBD_DEBUG /* Activate rbd_assert() calls */
51
Alex Elder593a9e72012-02-07 12:03:37 -060052/*
53 * The basic unit of block I/O is a sector. It is interpreted in a
54 * number of contexts in Linux (blk, bio, genhd), but the default is
55 * universally 512 bytes. These symbols are just slightly more
56 * meaningful than the bare numbers they represent.
57 */
58#define SECTOR_SHIFT 9
59#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60
Alex Eldera2acd002013-05-08 22:50:04 -050061/*
62 * Increment the given counter and return its updated value.
63 * If the counter is already 0 it will not be incremented.
64 * If the counter is already at its maximum value returns
65 * -EINVAL without updating it.
66 */
67static int atomic_inc_return_safe(atomic_t *v)
68{
69 unsigned int counter;
70
71 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
72 if (counter <= (unsigned int)INT_MAX)
73 return (int)counter;
74
75 atomic_dec(v);
76
77 return -EINVAL;
78}
79
80/* Decrement the counter. Return the resulting value, or -EINVAL */
81static int atomic_dec_return_safe(atomic_t *v)
82{
83 int counter;
84
85 counter = atomic_dec_return(v);
86 if (counter >= 0)
87 return counter;
88
89 atomic_inc(v);
90
91 return -EINVAL;
92}
93
Alex Elderf0f8cef2012-01-29 13:57:44 -060094#define RBD_DRV_NAME "rbd"
Yehuda Sadeh602adf42010-08-12 16:11:25 -070095
Ilya Dryomov7e513d42013-12-16 19:26:32 +020096#define RBD_MINORS_PER_MAJOR 256
97#define RBD_SINGLE_MAJOR_PART_SHIFT 4
Yehuda Sadeh602adf42010-08-12 16:11:25 -070098
Ilya Dryomov6d69bb532015-10-11 19:38:00 +020099#define RBD_MAX_PARENT_CHAIN_LEN 16
100
Alex Elderd4b125e2012-07-03 16:01:19 -0500101#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
102#define RBD_MAX_SNAP_NAME_LEN \
103 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
104
Alex Elder35d489f2012-07-03 16:01:19 -0500105#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700106
107#define RBD_SNAP_HEAD_NAME "-"
108
Alex Elder9682fc62013-04-30 00:44:33 -0500109#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
110
Alex Elder9e15b772012-10-30 19:40:33 -0500111/* This allows a single page to hold an image name sent by OSD */
112#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
Alex Elder1e130192012-07-03 16:01:19 -0500113#define RBD_IMAGE_ID_LEN_MAX 64
Alex Elder9e15b772012-10-30 19:40:33 -0500114
Alex Elder1e130192012-07-03 16:01:19 -0500115#define RBD_OBJ_PREFIX_LEN_MAX 64
Alex Elder589d30e2012-07-10 20:30:11 -0500116
Alex Elderd8891402012-10-09 13:50:17 -0700117/* Feature bits */
118
Alex Elder5cbf6f122013-04-11 09:29:48 -0500119#define RBD_FEATURE_LAYERING (1<<0)
120#define RBD_FEATURE_STRIPINGV2 (1<<1)
121#define RBD_FEATURES_ALL \
122 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
Alex Elderd8891402012-10-09 13:50:17 -0700123
124/* Features supported by this (client software) implementation. */
125
Alex Elder770eba62012-10-25 23:34:40 -0500126#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
Alex Elderd8891402012-10-09 13:50:17 -0700127
Alex Elder81a89792012-02-02 08:13:30 -0600128/*
129 * An RBD device name will be "rbd#", where the "rbd" comes from
130 * RBD_DRV_NAME above, and # is a unique integer identifier.
Alex Elder81a89792012-02-02 08:13:30 -0600131 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700132#define DEV_NAME_LEN 32
133
134/*
135 * block device image metadata (in-memory version)
136 */
137struct rbd_image_header {
Alex Elderf35a4de2013-05-06 09:51:29 -0500138 /* These six fields never change for a given rbd image */
Alex Elder849b4262012-07-09 21:04:24 -0500139 char *object_prefix;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700140 __u8 obj_order;
141 __u8 crypt_type;
142 __u8 comp_type;
Alex Elderf35a4de2013-05-06 09:51:29 -0500143 u64 stripe_unit;
144 u64 stripe_count;
145 u64 features; /* Might be changeable someday? */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700146
Alex Elderf84344f2012-08-31 17:29:51 -0500147 /* The remaining fields need to be updated occasionally */
148 u64 image_size;
149 struct ceph_snap_context *snapc;
Alex Elderf35a4de2013-05-06 09:51:29 -0500150 char *snap_names; /* format 1 only */
151 u64 *snap_sizes; /* format 1 only */
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700152};
153
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500154/*
155 * An rbd image specification.
156 *
157 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
Alex Elderc66c6e02012-11-01 08:39:26 -0500158 * identify an image. Each rbd_dev structure includes a pointer to
159 * an rbd_spec structure that encapsulates this identity.
160 *
161 * Each of the id's in an rbd_spec has an associated name. For a
162 * user-mapped image, the names are supplied and the id's associated
163 * with them are looked up. For a layered image, a parent image is
164 * defined by the tuple, and the names are looked up.
165 *
166 * An rbd_dev structure contains a parent_spec pointer which is
167 * non-null if the image it represents is a child in a layered
168 * image. This pointer will refer to the rbd_spec structure used
169 * by the parent rbd_dev for its own identity (i.e., the structure
170 * is shared between the parent and child).
171 *
172 * Since these structures are populated once, during the discovery
173 * phase of image construction, they are effectively immutable so
174 * we make no effort to synchronize access to them.
175 *
176 * Note that code herein does not assume the image name is known (it
177 * could be a null pointer).
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500178 */
179struct rbd_spec {
180 u64 pool_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500181 const char *pool_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500182
Alex Elderecb4dc22013-04-26 09:43:47 -0500183 const char *image_id;
184 const char *image_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500185
186 u64 snap_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500187 const char *snap_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500188
189 struct kref kref;
190};
191
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700192/*
Alex Elderf0f8cef2012-01-29 13:57:44 -0600193 * an instance of the client. multiple devices may share an rbd client.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700194 */
195struct rbd_client {
196 struct ceph_client *client;
197 struct kref kref;
198 struct list_head node;
199};
200
Alex Elderbf0d5f502012-11-22 00:00:08 -0600201struct rbd_img_request;
202typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
203
204#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
205
206struct rbd_obj_request;
207typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
208
Alex Elder9969ebc2013-01-18 12:31:10 -0600209enum obj_request_type {
210 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
211};
Alex Elderbf0d5f502012-11-22 00:00:08 -0600212
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800213enum obj_operation_type {
214 OBJ_OP_WRITE,
215 OBJ_OP_READ,
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800216 OBJ_OP_DISCARD,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800217};
218
Alex Elder926f9b32013-02-11 12:33:24 -0600219enum obj_req_flags {
220 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
Alex Elder6365d332013-02-11 12:33:24 -0600221 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
Alex Elder5679c592013-02-11 12:33:24 -0600222 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
223 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
Alex Elder926f9b32013-02-11 12:33:24 -0600224};
225
Alex Elderbf0d5f502012-11-22 00:00:08 -0600226struct rbd_obj_request {
227 const char *object_name;
228 u64 offset; /* object start byte */
229 u64 length; /* bytes from offset */
Alex Elder926f9b32013-02-11 12:33:24 -0600230 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600231
Alex Elderc5b5ef62013-02-11 12:33:24 -0600232 /*
233 * An object request associated with an image will have its
234 * img_data flag set; a standalone object request will not.
235 *
236 * A standalone object request will have which == BAD_WHICH
237 * and a null obj_request pointer.
238 *
239 * An object request initiated in support of a layered image
240 * object (to check for its existence before a write) will
241 * have which == BAD_WHICH and a non-null obj_request pointer.
242 *
243 * Finally, an object request for rbd image data will have
244 * which != BAD_WHICH, and will have a non-null img_request
245 * pointer. The value of which will be in the range
246 * 0..(img_request->obj_request_count-1).
247 */
248 union {
249 struct rbd_obj_request *obj_request; /* STAT op */
250 struct {
251 struct rbd_img_request *img_request;
252 u64 img_offset;
253 /* links for img_request->obj_requests list */
254 struct list_head links;
255 };
256 };
Alex Elderbf0d5f502012-11-22 00:00:08 -0600257 u32 which; /* posn image request list */
258
259 enum obj_request_type type;
Alex Elder788e2df2013-01-17 12:25:27 -0600260 union {
261 struct bio *bio_list;
262 struct {
263 struct page **pages;
264 u32 page_count;
265 };
266 };
Alex Elder0eefd472013-04-19 15:34:50 -0500267 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500268 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600269
270 struct ceph_osd_request *osd_req;
271
272 u64 xferred; /* bytes transferred */
Sage Weil1b83bef2013-02-25 16:11:12 -0800273 int result;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600274
275 rbd_obj_callback_t callback;
Alex Elder788e2df2013-01-17 12:25:27 -0600276 struct completion completion;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600277
278 struct kref kref;
279};
280
Alex Elder0c425242013-02-08 09:55:49 -0600281enum img_req_flags {
Alex Elder9849e982013-01-24 16:13:36 -0600282 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
283 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
Alex Elderd0b2e942013-01-24 16:13:36 -0600284 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800285 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
Alex Elder0c425242013-02-08 09:55:49 -0600286};
287
Alex Elderbf0d5f502012-11-22 00:00:08 -0600288struct rbd_img_request {
Alex Elderbf0d5f502012-11-22 00:00:08 -0600289 struct rbd_device *rbd_dev;
290 u64 offset; /* starting image byte offset */
291 u64 length; /* byte count from offset */
Alex Elder0c425242013-02-08 09:55:49 -0600292 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600293 union {
Alex Elder9849e982013-01-24 16:13:36 -0600294 u64 snap_id; /* for reads */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600295 struct ceph_snap_context *snapc; /* for writes */
Alex Elder9849e982013-01-24 16:13:36 -0600296 };
297 union {
298 struct request *rq; /* block request */
299 struct rbd_obj_request *obj_request; /* obj req initiator */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600300 };
Alex Elder3d7efd12013-04-19 15:34:50 -0500301 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500302 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600303 spinlock_t completion_lock;/* protects next_completion */
304 u32 next_completion;
305 rbd_img_callback_t callback;
Alex Elder55f27e02013-04-10 12:34:25 -0500306 u64 xferred;/* aggregate bytes transferred */
Alex Eldera5a337d2013-01-24 16:13:36 -0600307 int result; /* first nonzero obj_request result */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600308
309 u32 obj_request_count;
310 struct list_head obj_requests; /* rbd_obj_request structs */
311
312 struct kref kref;
313};
314
315#define for_each_obj_request(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600316 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600317#define for_each_obj_request_from(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600318 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600319#define for_each_obj_request_safe(ireq, oreq, n) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600320 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600321
Alex Elderf84344f2012-08-31 17:29:51 -0500322struct rbd_mapping {
Alex Elder99c1f082012-08-30 14:42:15 -0500323 u64 size;
Alex Elder34b13182012-07-13 20:35:12 -0500324 u64 features;
Alex Elderf84344f2012-08-31 17:29:51 -0500325 bool read_only;
326};
327
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700328/*
329 * a single device
330 */
331struct rbd_device {
Alex Elderde71a292012-07-03 16:01:19 -0500332 int dev_id; /* blkdev unique id */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700333
334 int major; /* blkdev assigned major */
Ilya Dryomovdd82fff2013-12-13 15:28:57 +0200335 int minor;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700336 struct gendisk *disk; /* blkdev's gendisk and rq */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700337
Alex Eldera30b71b2012-07-10 20:30:11 -0500338 u32 image_format; /* Either 1 or 2 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700339 struct rbd_client *rbd_client;
340
341 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
342
Alex Elderb82d1672013-01-14 12:43:31 -0600343 spinlock_t lock; /* queue, flags, open_count */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700344
345 struct rbd_image_header header;
Alex Elderb82d1672013-01-14 12:43:31 -0600346 unsigned long flags; /* possibly lock protected */
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500347 struct rbd_spec *spec;
Ilya Dryomovd1475432015-06-22 13:24:48 +0300348 struct rbd_options *opts;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700349
Ilya Dryomovc41d13a2016-04-29 20:01:25 +0200350 struct ceph_object_id header_oid;
Ilya Dryomov922dab62016-05-26 01:15:02 +0200351 struct ceph_object_locator header_oloc;
Alex Elder971f8392012-10-25 23:34:41 -0500352
Ilya Dryomov1643dfa2016-08-12 15:45:52 +0200353 struct ceph_file_layout layout; /* used for all rbd requests */
Alex Elder0903e872012-11-14 12:25:19 -0600354
Ilya Dryomov922dab62016-05-26 01:15:02 +0200355 struct ceph_osd_linger_request *watch_handle;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700356
Ilya Dryomov1643dfa2016-08-12 15:45:52 +0200357 struct workqueue_struct *task_wq;
358
Alex Elder86b00e02012-10-25 23:34:42 -0500359 struct rbd_spec *parent_spec;
360 u64 parent_overlap;
Alex Eldera2acd002013-05-08 22:50:04 -0500361 atomic_t parent_ref;
Alex Elder2f82ee52012-10-30 19:40:33 -0500362 struct rbd_device *parent;
Alex Elder86b00e02012-10-25 23:34:42 -0500363
Christoph Hellwig7ad18af2015-01-13 17:20:04 +0100364 /* Block layer tags. */
365 struct blk_mq_tag_set tag_set;
366
Josh Durginc6666012011-11-21 17:11:12 -0800367 /* protects updating the header */
368 struct rw_semaphore header_rwsem;
Alex Elderf84344f2012-08-31 17:29:51 -0500369
370 struct rbd_mapping mapping;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700371
372 struct list_head node;
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800373
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800374 /* sysfs related */
375 struct device dev;
Alex Elderb82d1672013-01-14 12:43:31 -0600376 unsigned long open_count; /* protected by lock */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800377};
378
Alex Elderb82d1672013-01-14 12:43:31 -0600379/*
380 * Flag bits for rbd_dev->flags. If atomicity is required,
381 * rbd_dev->lock is used to protect access.
382 *
383 * Currently, only the "removing" flag (which is coupled with the
384 * "open_count" field) requires atomic access.
385 */
Alex Elder6d292902013-01-14 12:43:31 -0600386enum rbd_dev_flags {
387 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
Alex Elderb82d1672013-01-14 12:43:31 -0600388 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
Alex Elder6d292902013-01-14 12:43:31 -0600389};
390
Alex Eldercfbf6372013-05-31 17:40:45 -0500391static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
Alex Eldere124a82f2012-01-29 13:57:44 -0600392
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700393static LIST_HEAD(rbd_dev_list); /* devices */
Alex Eldere124a82f2012-01-29 13:57:44 -0600394static DEFINE_SPINLOCK(rbd_dev_list_lock);
395
Alex Elder432b8582012-01-29 13:57:44 -0600396static LIST_HEAD(rbd_client_list); /* clients */
397static DEFINE_SPINLOCK(rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700398
Alex Elder78c2a442013-05-01 12:43:04 -0500399/* Slab caches for frequently-allocated structures */
400
Alex Elder1c2a9df2013-05-01 12:43:03 -0500401static struct kmem_cache *rbd_img_request_cache;
Alex Elder868311b2013-05-01 12:43:03 -0500402static struct kmem_cache *rbd_obj_request_cache;
Alex Elder78c2a442013-05-01 12:43:04 -0500403static struct kmem_cache *rbd_segment_name_cache;
Alex Elder1c2a9df2013-05-01 12:43:03 -0500404
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200405static int rbd_major;
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +0200406static DEFINE_IDA(rbd_dev_id_ida);
407
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +0400408static struct workqueue_struct *rbd_wq;
409
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200410/*
411 * Default to false for now, as single-major requires >= 0.75 version of
412 * userspace rbd utility.
413 */
414static bool single_major = false;
415module_param(single_major, bool, S_IRUGO);
416MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
417
Alex Elder3d7efd12013-04-19 15:34:50 -0500418static int rbd_img_request_submit(struct rbd_img_request *img_request);
419
Alex Elderf0f8cef2012-01-29 13:57:44 -0600420static ssize_t rbd_add(struct bus_type *bus, const char *buf,
421 size_t count);
422static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
423 size_t count);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200424static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
425 size_t count);
426static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
427 size_t count);
Ilya Dryomov6d69bb532015-10-11 19:38:00 +0200428static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
Alex Eldera2acd002013-05-08 22:50:04 -0500429static void rbd_spec_put(struct rbd_spec *spec);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600430
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200431static int rbd_dev_id_to_minor(int dev_id)
432{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200433 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200434}
435
436static int minor_to_rbd_dev_id(int minor)
437{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200438 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200439}
440
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700441static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
442static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200443static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
444static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700445
446static struct attribute *rbd_bus_attrs[] = {
447 &bus_attr_add.attr,
448 &bus_attr_remove.attr,
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200449 &bus_attr_add_single_major.attr,
450 &bus_attr_remove_single_major.attr,
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700451 NULL,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600452};
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200453
454static umode_t rbd_bus_is_visible(struct kobject *kobj,
455 struct attribute *attr, int index)
456{
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200457 if (!single_major &&
458 (attr == &bus_attr_add_single_major.attr ||
459 attr == &bus_attr_remove_single_major.attr))
460 return 0;
461
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200462 return attr->mode;
463}
464
465static const struct attribute_group rbd_bus_group = {
466 .attrs = rbd_bus_attrs,
467 .is_visible = rbd_bus_is_visible,
468};
469__ATTRIBUTE_GROUPS(rbd_bus);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600470
471static struct bus_type rbd_bus_type = {
472 .name = "rbd",
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700473 .bus_groups = rbd_bus_groups,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600474};
475
476static void rbd_root_dev_release(struct device *dev)
477{
478}
479
480static struct device rbd_root_dev = {
481 .init_name = "rbd",
482 .release = rbd_root_dev_release,
483};
484
Alex Elder06ecc6c2012-11-01 10:17:15 -0500485static __printf(2, 3)
486void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
487{
488 struct va_format vaf;
489 va_list args;
490
491 va_start(args, fmt);
492 vaf.fmt = fmt;
493 vaf.va = &args;
494
495 if (!rbd_dev)
496 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
497 else if (rbd_dev->disk)
498 printk(KERN_WARNING "%s: %s: %pV\n",
499 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
500 else if (rbd_dev->spec && rbd_dev->spec->image_name)
501 printk(KERN_WARNING "%s: image %s: %pV\n",
502 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
503 else if (rbd_dev->spec && rbd_dev->spec->image_id)
504 printk(KERN_WARNING "%s: id %s: %pV\n",
505 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
506 else /* punt */
507 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
508 RBD_DRV_NAME, rbd_dev, &vaf);
509 va_end(args);
510}
511
Alex Elderaafb2302012-09-06 16:00:54 -0500512#ifdef RBD_DEBUG
513#define rbd_assert(expr) \
514 if (unlikely(!(expr))) { \
515 printk(KERN_ERR "\nAssertion failure in %s() " \
516 "at line %d:\n\n" \
517 "\trbd_assert(%s);\n\n", \
518 __func__, __LINE__, #expr); \
519 BUG(); \
520 }
521#else /* !RBD_DEBUG */
522# define rbd_assert(expr) ((void) 0)
523#endif /* !RBD_DEBUG */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800524
Ilya Dryomov27617132015-07-16 17:36:11 +0300525static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
Alex Elderb454e362013-04-19 15:34:50 -0500526static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
Alex Elder05a46af2013-04-26 15:44:36 -0500527static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
528static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
Alex Elder8b3e1a52013-01-24 16:13:36 -0600529
Alex Eldercc4a38bd2013-04-30 00:44:33 -0500530static int rbd_dev_refresh(struct rbd_device *rbd_dev);
Alex Elder2df3fac2013-05-06 09:51:30 -0500531static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
Ilya Dryomova720ae02014-07-23 17:11:19 +0400532static int rbd_dev_header_info(struct rbd_device *rbd_dev);
Ilya Dryomove8f59b52014-07-24 10:42:13 +0400533static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
Alex Elder54cac612013-04-30 00:44:33 -0500534static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
535 u64 snap_id);
Alex Elder2ad3d712013-04-30 00:44:33 -0500536static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
537 u8 *order, u64 *snap_size);
538static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
539 u64 *snap_features);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700540
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700541static int rbd_open(struct block_device *bdev, fmode_t mode)
542{
Alex Elderf0f8cef2012-01-29 13:57:44 -0600543 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600544 bool removing = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700545
Alex Elderf84344f2012-08-31 17:29:51 -0500546 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700547 return -EROFS;
548
Alex Eldera14ea262013-02-05 13:23:12 -0600549 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600550 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
551 removing = true;
552 else
553 rbd_dev->open_count++;
Alex Eldera14ea262013-02-05 13:23:12 -0600554 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600555 if (removing)
556 return -ENOENT;
557
Alex Elderc3e946c2012-11-16 09:29:16 -0600558 (void) get_device(&rbd_dev->dev);
Alex Elder340c7a22012-08-10 13:12:07 -0700559
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700560 return 0;
561}
562
Al Virodb2a1442013-05-05 21:52:57 -0400563static void rbd_release(struct gendisk *disk, fmode_t mode)
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800564{
565 struct rbd_device *rbd_dev = disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600566 unsigned long open_count_before;
567
Alex Eldera14ea262013-02-05 13:23:12 -0600568 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600569 open_count_before = rbd_dev->open_count--;
Alex Eldera14ea262013-02-05 13:23:12 -0600570 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600571 rbd_assert(open_count_before > 0);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800572
Alex Elderc3e946c2012-11-16 09:29:16 -0600573 put_device(&rbd_dev->dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800574}
575
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800576static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
577{
Josh Durgin77f33c02013-09-30 17:09:54 -0700578 int ret = 0;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800579 int val;
580 bool ro;
Josh Durgin77f33c02013-09-30 17:09:54 -0700581 bool ro_changed = false;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800582
Josh Durgin77f33c02013-09-30 17:09:54 -0700583 /* get_user() may sleep, so call it before taking rbd_dev->lock */
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800584 if (get_user(val, (int __user *)(arg)))
585 return -EFAULT;
586
587 ro = val ? true : false;
588 /* Snapshot doesn't allow to write*/
589 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
590 return -EROFS;
591
Josh Durgin77f33c02013-09-30 17:09:54 -0700592 spin_lock_irq(&rbd_dev->lock);
593 /* prevent others open this device */
594 if (rbd_dev->open_count > 1) {
595 ret = -EBUSY;
596 goto out;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800597 }
598
Josh Durgin77f33c02013-09-30 17:09:54 -0700599 if (rbd_dev->mapping.read_only != ro) {
600 rbd_dev->mapping.read_only = ro;
601 ro_changed = true;
602 }
603
604out:
605 spin_unlock_irq(&rbd_dev->lock);
606 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
607 if (ret == 0 && ro_changed)
608 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
609
610 return ret;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800611}
612
613static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
614 unsigned int cmd, unsigned long arg)
615{
616 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
617 int ret = 0;
618
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800619 switch (cmd) {
620 case BLKROSET:
621 ret = rbd_ioctl_set_ro(rbd_dev, arg);
622 break;
623 default:
624 ret = -ENOTTY;
625 }
626
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800627 return ret;
628}
629
630#ifdef CONFIG_COMPAT
631static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
632 unsigned int cmd, unsigned long arg)
633{
634 return rbd_ioctl(bdev, mode, cmd, arg);
635}
636#endif /* CONFIG_COMPAT */
637
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700638static const struct block_device_operations rbd_bd_ops = {
639 .owner = THIS_MODULE,
640 .open = rbd_open,
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800641 .release = rbd_release,
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800642 .ioctl = rbd_ioctl,
643#ifdef CONFIG_COMPAT
644 .compat_ioctl = rbd_compat_ioctl,
645#endif
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700646};
647
648/*
Alex Elder7262cfc2013-05-16 15:04:20 -0500649 * Initialize an rbd client instance. Success or not, this function
Alex Eldercfbf6372013-05-31 17:40:45 -0500650 * consumes ceph_opts. Caller holds client_mutex.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700651 */
Alex Elderf8c38922012-08-10 13:12:07 -0700652static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700653{
654 struct rbd_client *rbdc;
655 int ret = -ENOMEM;
656
Alex Elder37206ee2013-02-20 17:32:08 -0600657 dout("%s:\n", __func__);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700658 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
659 if (!rbdc)
660 goto out_opt;
661
662 kref_init(&rbdc->kref);
663 INIT_LIST_HEAD(&rbdc->node);
664
Alex Elder43ae4702012-07-03 16:01:18 -0500665 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700666 if (IS_ERR(rbdc->client))
Alex Elder08f75462013-05-29 11:19:00 -0500667 goto out_rbdc;
Alex Elder43ae4702012-07-03 16:01:18 -0500668 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700669
670 ret = ceph_open_session(rbdc->client);
671 if (ret < 0)
Alex Elder08f75462013-05-29 11:19:00 -0500672 goto out_client;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700673
Alex Elder432b8582012-01-29 13:57:44 -0600674 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700675 list_add_tail(&rbdc->node, &rbd_client_list);
Alex Elder432b8582012-01-29 13:57:44 -0600676 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700677
Alex Elder37206ee2013-02-20 17:32:08 -0600678 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Elderbc534d82012-01-29 13:57:44 -0600679
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700680 return rbdc;
Alex Elder08f75462013-05-29 11:19:00 -0500681out_client:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700682 ceph_destroy_client(rbdc->client);
Alex Elder08f75462013-05-29 11:19:00 -0500683out_rbdc:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700684 kfree(rbdc);
685out_opt:
Alex Elder43ae4702012-07-03 16:01:18 -0500686 if (ceph_opts)
687 ceph_destroy_options(ceph_opts);
Alex Elder37206ee2013-02-20 17:32:08 -0600688 dout("%s: error %d\n", __func__, ret);
689
Vasiliy Kulikov28f259b2010-09-26 12:59:37 +0400690 return ERR_PTR(ret);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700691}
692
Alex Elder2f82ee52012-10-30 19:40:33 -0500693static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
694{
695 kref_get(&rbdc->kref);
696
697 return rbdc;
698}
699
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700700/*
Alex Elder1f7ba332012-08-10 13:12:07 -0700701 * Find a ceph client with specific addr and configuration. If
702 * found, bump its reference count.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700703 */
Alex Elder1f7ba332012-08-10 13:12:07 -0700704static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700705{
706 struct rbd_client *client_node;
Alex Elder1f7ba332012-08-10 13:12:07 -0700707 bool found = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700708
Alex Elder43ae4702012-07-03 16:01:18 -0500709 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700710 return NULL;
711
Alex Elder1f7ba332012-08-10 13:12:07 -0700712 spin_lock(&rbd_client_list_lock);
713 list_for_each_entry(client_node, &rbd_client_list, node) {
714 if (!ceph_compare_options(ceph_opts, client_node->client)) {
Alex Elder2f82ee52012-10-30 19:40:33 -0500715 __rbd_get_client(client_node);
716
Alex Elder1f7ba332012-08-10 13:12:07 -0700717 found = true;
718 break;
719 }
720 }
721 spin_unlock(&rbd_client_list_lock);
722
723 return found ? client_node : NULL;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700724}
725
726/*
Ilya Dryomov210c1042015-06-22 13:24:48 +0300727 * (Per device) rbd map options
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700728 */
729enum {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300730 Opt_queue_depth,
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700731 Opt_last_int,
732 /* int args above */
733 Opt_last_string,
734 /* string args above */
Alex Eldercc0538b2012-08-10 13:12:07 -0700735 Opt_read_only,
736 Opt_read_write,
Ilya Dryomov210c1042015-06-22 13:24:48 +0300737 Opt_err
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700738};
739
Alex Elder43ae4702012-07-03 16:01:18 -0500740static match_table_t rbd_opts_tokens = {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300741 {Opt_queue_depth, "queue_depth=%d"},
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700742 /* int args above */
743 /* string args above */
Alex Elderbe466c12012-10-22 11:31:26 -0500744 {Opt_read_only, "read_only"},
Alex Eldercc0538b2012-08-10 13:12:07 -0700745 {Opt_read_only, "ro"}, /* Alternate spelling */
746 {Opt_read_write, "read_write"},
747 {Opt_read_write, "rw"}, /* Alternate spelling */
Ilya Dryomov210c1042015-06-22 13:24:48 +0300748 {Opt_err, NULL}
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700749};
750
Alex Elder98571b52013-01-20 14:44:42 -0600751struct rbd_options {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300752 int queue_depth;
Alex Elder98571b52013-01-20 14:44:42 -0600753 bool read_only;
754};
755
Ilya Dryomovb5584182015-06-23 16:21:19 +0300756#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
Alex Elder98571b52013-01-20 14:44:42 -0600757#define RBD_READ_ONLY_DEFAULT false
758
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700759static int parse_rbd_opts_token(char *c, void *private)
760{
Alex Elder43ae4702012-07-03 16:01:18 -0500761 struct rbd_options *rbd_opts = private;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700762 substring_t argstr[MAX_OPT_ARGS];
763 int token, intval, ret;
764
Alex Elder43ae4702012-07-03 16:01:18 -0500765 token = match_token(c, rbd_opts_tokens, argstr);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700766 if (token < Opt_last_int) {
767 ret = match_int(&argstr[0], &intval);
768 if (ret < 0) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300769 pr_err("bad mount option arg (not int) at '%s'\n", c);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700770 return ret;
771 }
772 dout("got int token %d val %d\n", token, intval);
773 } else if (token > Opt_last_int && token < Opt_last_string) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300774 dout("got string token %d val %s\n", token, argstr[0].from);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700775 } else {
776 dout("got token %d\n", token);
777 }
778
779 switch (token) {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300780 case Opt_queue_depth:
781 if (intval < 1) {
782 pr_err("queue_depth out of range\n");
783 return -EINVAL;
784 }
785 rbd_opts->queue_depth = intval;
786 break;
Alex Eldercc0538b2012-08-10 13:12:07 -0700787 case Opt_read_only:
788 rbd_opts->read_only = true;
789 break;
790 case Opt_read_write:
791 rbd_opts->read_only = false;
792 break;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700793 default:
Ilya Dryomov210c1042015-06-22 13:24:48 +0300794 /* libceph prints "bad option" msg */
795 return -EINVAL;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700796 }
Ilya Dryomov210c1042015-06-22 13:24:48 +0300797
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700798 return 0;
799}
800
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800801static char* obj_op_name(enum obj_operation_type op_type)
802{
803 switch (op_type) {
804 case OBJ_OP_READ:
805 return "read";
806 case OBJ_OP_WRITE:
807 return "write";
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800808 case OBJ_OP_DISCARD:
809 return "discard";
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800810 default:
811 return "???";
812 }
813}
814
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700815/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700816 * Get a ceph client with specific addr and configuration, if one does
Alex Elder7262cfc2013-05-16 15:04:20 -0500817 * not exist create it. Either way, ceph_opts is consumed by this
818 * function.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700819 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500820static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700821{
Alex Elderf8c38922012-08-10 13:12:07 -0700822 struct rbd_client *rbdc;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700823
Alex Eldercfbf6372013-05-31 17:40:45 -0500824 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
Alex Elder1f7ba332012-08-10 13:12:07 -0700825 rbdc = rbd_client_find(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500826 if (rbdc) /* using an existing client */
Alex Elder43ae4702012-07-03 16:01:18 -0500827 ceph_destroy_options(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500828 else
Alex Elderf8c38922012-08-10 13:12:07 -0700829 rbdc = rbd_client_create(ceph_opts);
Alex Eldercfbf6372013-05-31 17:40:45 -0500830 mutex_unlock(&client_mutex);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700831
Alex Elder9d3997f2012-10-25 23:34:42 -0500832 return rbdc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700833}
834
835/*
836 * Destroy ceph client
Alex Elderd23a4b32012-01-29 13:57:43 -0600837 *
Alex Elder432b8582012-01-29 13:57:44 -0600838 * Caller must hold rbd_client_list_lock.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700839 */
840static void rbd_client_release(struct kref *kref)
841{
842 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
843
Alex Elder37206ee2013-02-20 17:32:08 -0600844 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500845 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700846 list_del(&rbdc->node);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500847 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700848
849 ceph_destroy_client(rbdc->client);
850 kfree(rbdc);
851}
852
853/*
854 * Drop reference to ceph client node. If it's not referenced anymore, release
855 * it.
856 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500857static void rbd_put_client(struct rbd_client *rbdc)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700858{
Alex Elderc53d5892012-10-25 23:34:42 -0500859 if (rbdc)
860 kref_put(&rbdc->kref, rbd_client_release);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700861}
862
Alex Eldera30b71b2012-07-10 20:30:11 -0500863static bool rbd_image_format_valid(u32 image_format)
864{
865 return image_format == 1 || image_format == 2;
866}
867
Alex Elder8e94af82012-07-25 09:32:40 -0500868static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
869{
Alex Elder103a1502012-08-02 11:29:45 -0500870 size_t size;
871 u32 snap_count;
872
873 /* The header has to start with the magic rbd header text */
874 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
875 return false;
876
Alex Elderdb2388b2012-10-20 22:17:27 -0500877 /* The bio layer requires at least sector-sized I/O */
878
879 if (ondisk->options.order < SECTOR_SHIFT)
880 return false;
881
882 /* If we use u64 in a few spots we may be able to loosen this */
883
884 if (ondisk->options.order > 8 * sizeof (int) - 1)
885 return false;
886
Alex Elder103a1502012-08-02 11:29:45 -0500887 /*
888 * The size of a snapshot header has to fit in a size_t, and
889 * that limits the number of snapshots.
890 */
891 snap_count = le32_to_cpu(ondisk->snap_count);
892 size = SIZE_MAX - sizeof (struct ceph_snap_context);
893 if (snap_count > size / sizeof (__le64))
894 return false;
895
896 /*
897 * Not only that, but the size of the entire the snapshot
898 * header must also be representable in a size_t.
899 */
900 size -= snap_count * sizeof (__le64);
901 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
902 return false;
903
904 return true;
Alex Elder8e94af82012-07-25 09:32:40 -0500905}
906
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700907/*
Alex Elderbb23e372013-05-06 09:51:29 -0500908 * Fill an rbd image header with information from the given format 1
909 * on-disk header.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700910 */
Alex Elder662518b2013-05-06 09:51:29 -0500911static int rbd_header_from_disk(struct rbd_device *rbd_dev,
Alex Elder4156d992012-08-02 11:29:46 -0500912 struct rbd_image_header_ondisk *ondisk)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700913{
Alex Elder662518b2013-05-06 09:51:29 -0500914 struct rbd_image_header *header = &rbd_dev->header;
Alex Elderbb23e372013-05-06 09:51:29 -0500915 bool first_time = header->object_prefix == NULL;
916 struct ceph_snap_context *snapc;
917 char *object_prefix = NULL;
918 char *snap_names = NULL;
919 u64 *snap_sizes = NULL;
Alex Elderccece232012-07-10 20:30:10 -0500920 u32 snap_count;
Alex Elderd2bb24e2012-07-26 23:37:14 -0500921 size_t size;
Alex Elderbb23e372013-05-06 09:51:29 -0500922 int ret = -ENOMEM;
Alex Elder621901d2012-08-23 23:22:06 -0500923 u32 i;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700924
Alex Elderbb23e372013-05-06 09:51:29 -0500925 /* Allocate this now to avoid having to handle failure below */
926
927 if (first_time) {
928 size_t len;
929
930 len = strnlen(ondisk->object_prefix,
931 sizeof (ondisk->object_prefix));
932 object_prefix = kmalloc(len + 1, GFP_KERNEL);
933 if (!object_prefix)
934 return -ENOMEM;
935 memcpy(object_prefix, ondisk->object_prefix, len);
936 object_prefix[len] = '\0';
937 }
938
939 /* Allocate the snapshot context and fill it in */
Alex Elder6a523252012-07-19 17:12:59 -0500940
Alex Elder103a1502012-08-02 11:29:45 -0500941 snap_count = le32_to_cpu(ondisk->snap_count);
Alex Elderbb23e372013-05-06 09:51:29 -0500942 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
943 if (!snapc)
944 goto out_err;
945 snapc->seq = le64_to_cpu(ondisk->snap_seq);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700946 if (snap_count) {
Alex Elderbb23e372013-05-06 09:51:29 -0500947 struct rbd_image_snap_ondisk *snaps;
Alex Elderf785cc12012-08-23 23:22:06 -0500948 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
949
Alex Elderbb23e372013-05-06 09:51:29 -0500950 /* We'll keep a copy of the snapshot names... */
Alex Elder621901d2012-08-23 23:22:06 -0500951
Alex Elderbb23e372013-05-06 09:51:29 -0500952 if (snap_names_len > (u64)SIZE_MAX)
953 goto out_2big;
954 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
955 if (!snap_names)
Alex Elder6a523252012-07-19 17:12:59 -0500956 goto out_err;
Alex Elderbb23e372013-05-06 09:51:29 -0500957
958 /* ...as well as the array of their sizes. */
959
960 size = snap_count * sizeof (*header->snap_sizes);
961 snap_sizes = kmalloc(size, GFP_KERNEL);
962 if (!snap_sizes)
963 goto out_err;
964
Alex Elderf785cc12012-08-23 23:22:06 -0500965 /*
Alex Elderbb23e372013-05-06 09:51:29 -0500966 * Copy the names, and fill in each snapshot's id
967 * and size.
968 *
Alex Elder99a41eb2013-05-06 09:51:30 -0500969 * Note that rbd_dev_v1_header_info() guarantees the
Alex Elderbb23e372013-05-06 09:51:29 -0500970 * ondisk buffer we're working with has
Alex Elderf785cc12012-08-23 23:22:06 -0500971 * snap_names_len bytes beyond the end of the
972 * snapshot id array, this memcpy() is safe.
973 */
Alex Elderbb23e372013-05-06 09:51:29 -0500974 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
975 snaps = ondisk->snaps;
976 for (i = 0; i < snap_count; i++) {
977 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
978 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
979 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700980 }
Alex Elder849b4262012-07-09 21:04:24 -0500981
Alex Elderbb23e372013-05-06 09:51:29 -0500982 /* We won't fail any more, fill in the header */
Alex Elder6a523252012-07-19 17:12:59 -0500983
Alex Elderbb23e372013-05-06 09:51:29 -0500984 if (first_time) {
985 header->object_prefix = object_prefix;
986 header->obj_order = ondisk->options.order;
987 header->crypt_type = ondisk->options.crypt_type;
988 header->comp_type = ondisk->options.comp_type;
989 /* The rest aren't used for format 1 images */
990 header->stripe_unit = 0;
991 header->stripe_count = 0;
992 header->features = 0;
Alex Elder662518b2013-05-06 09:51:29 -0500993 } else {
994 ceph_put_snap_context(header->snapc);
995 kfree(header->snap_names);
996 kfree(header->snap_sizes);
Alex Elderbb23e372013-05-06 09:51:29 -0500997 }
998
999 /* The remaining fields always get updated (when we refresh) */
Alex Elder621901d2012-08-23 23:22:06 -05001000
Alex Elderf84344f2012-08-31 17:29:51 -05001001 header->image_size = le64_to_cpu(ondisk->image_size);
Alex Elderbb23e372013-05-06 09:51:29 -05001002 header->snapc = snapc;
1003 header->snap_names = snap_names;
1004 header->snap_sizes = snap_sizes;
Alex Elder468521c2013-04-26 09:43:47 -05001005
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001006 return 0;
Alex Elderbb23e372013-05-06 09:51:29 -05001007out_2big:
1008 ret = -EIO;
Alex Elder6a523252012-07-19 17:12:59 -05001009out_err:
Alex Elderbb23e372013-05-06 09:51:29 -05001010 kfree(snap_sizes);
1011 kfree(snap_names);
1012 ceph_put_snap_context(snapc);
1013 kfree(object_prefix);
Alex Elderccece232012-07-10 20:30:10 -05001014
Alex Elderbb23e372013-05-06 09:51:29 -05001015 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001016}
1017
Alex Elder9682fc62013-04-30 00:44:33 -05001018static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1019{
1020 const char *snap_name;
1021
1022 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1023
1024 /* Skip over names until we find the one we are looking for */
1025
1026 snap_name = rbd_dev->header.snap_names;
1027 while (which--)
1028 snap_name += strlen(snap_name) + 1;
1029
1030 return kstrdup(snap_name, GFP_KERNEL);
1031}
1032
Alex Elder30d1cff2013-05-01 12:43:03 -05001033/*
1034 * Snapshot id comparison function for use with qsort()/bsearch().
1035 * Note that result is for snapshots in *descending* order.
1036 */
1037static int snapid_compare_reverse(const void *s1, const void *s2)
1038{
1039 u64 snap_id1 = *(u64 *)s1;
1040 u64 snap_id2 = *(u64 *)s2;
1041
1042 if (snap_id1 < snap_id2)
1043 return 1;
1044 return snap_id1 == snap_id2 ? 0 : -1;
1045}
1046
1047/*
1048 * Search a snapshot context to see if the given snapshot id is
1049 * present.
1050 *
1051 * Returns the position of the snapshot id in the array if it's found,
1052 * or BAD_SNAP_INDEX otherwise.
1053 *
1054 * Note: The snapshot array is in kept sorted (by the osd) in
1055 * reverse order, highest snapshot id first.
1056 */
Alex Elder9682fc62013-04-30 00:44:33 -05001057static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1058{
1059 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
Alex Elder30d1cff2013-05-01 12:43:03 -05001060 u64 *found;
Alex Elder9682fc62013-04-30 00:44:33 -05001061
Alex Elder30d1cff2013-05-01 12:43:03 -05001062 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1063 sizeof (snap_id), snapid_compare_reverse);
Alex Elder9682fc62013-04-30 00:44:33 -05001064
Alex Elder30d1cff2013-05-01 12:43:03 -05001065 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
Alex Elder9682fc62013-04-30 00:44:33 -05001066}
1067
Alex Elder2ad3d712013-04-30 00:44:33 -05001068static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1069 u64 snap_id)
Alex Elder54cac612013-04-30 00:44:33 -05001070{
1071 u32 which;
Josh Durginda6a6b62013-09-04 17:57:31 -07001072 const char *snap_name;
Alex Elder54cac612013-04-30 00:44:33 -05001073
1074 which = rbd_dev_snap_index(rbd_dev, snap_id);
1075 if (which == BAD_SNAP_INDEX)
Josh Durginda6a6b62013-09-04 17:57:31 -07001076 return ERR_PTR(-ENOENT);
Alex Elder54cac612013-04-30 00:44:33 -05001077
Josh Durginda6a6b62013-09-04 17:57:31 -07001078 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1079 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
Alex Elder54cac612013-04-30 00:44:33 -05001080}
1081
Alex Elder9e15b772012-10-30 19:40:33 -05001082static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1083{
Alex Elder9e15b772012-10-30 19:40:33 -05001084 if (snap_id == CEPH_NOSNAP)
1085 return RBD_SNAP_HEAD_NAME;
1086
Alex Elder54cac612013-04-30 00:44:33 -05001087 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1088 if (rbd_dev->image_format == 1)
1089 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001090
Alex Elder54cac612013-04-30 00:44:33 -05001091 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001092}
1093
Alex Elder2ad3d712013-04-30 00:44:33 -05001094static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1095 u64 *snap_size)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001096{
Alex Elder2ad3d712013-04-30 00:44:33 -05001097 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1098 if (snap_id == CEPH_NOSNAP) {
1099 *snap_size = rbd_dev->header.image_size;
1100 } else if (rbd_dev->image_format == 1) {
1101 u32 which;
Alex Elder00f1f362012-02-07 12:03:36 -06001102
Alex Elder2ad3d712013-04-30 00:44:33 -05001103 which = rbd_dev_snap_index(rbd_dev, snap_id);
1104 if (which == BAD_SNAP_INDEX)
1105 return -ENOENT;
Alex Elder00f1f362012-02-07 12:03:36 -06001106
Alex Elder2ad3d712013-04-30 00:44:33 -05001107 *snap_size = rbd_dev->header.snap_sizes[which];
1108 } else {
1109 u64 size = 0;
1110 int ret;
1111
1112 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1113 if (ret)
1114 return ret;
1115
1116 *snap_size = size;
1117 }
1118 return 0;
1119}
1120
1121static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1122 u64 *snap_features)
1123{
1124 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1125 if (snap_id == CEPH_NOSNAP) {
1126 *snap_features = rbd_dev->header.features;
1127 } else if (rbd_dev->image_format == 1) {
1128 *snap_features = 0; /* No features for format 1 */
1129 } else {
1130 u64 features = 0;
1131 int ret;
1132
1133 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1134 if (ret)
1135 return ret;
1136
1137 *snap_features = features;
1138 }
1139 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001140}
1141
Alex Elderd1cf5782013-04-27 09:59:30 -05001142static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001143{
Alex Elder8f4b7d92013-05-06 07:40:30 -05001144 u64 snap_id = rbd_dev->spec->snap_id;
Alex Elder2ad3d712013-04-30 00:44:33 -05001145 u64 size = 0;
1146 u64 features = 0;
1147 int ret;
Alex Elder8b0241f2013-04-25 23:15:08 -05001148
Alex Elder2ad3d712013-04-30 00:44:33 -05001149 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1150 if (ret)
1151 return ret;
1152 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1153 if (ret)
1154 return ret;
1155
1156 rbd_dev->mapping.size = size;
1157 rbd_dev->mapping.features = features;
1158
Alex Elder8b0241f2013-04-25 23:15:08 -05001159 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001160}
1161
Alex Elderd1cf5782013-04-27 09:59:30 -05001162static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1163{
1164 rbd_dev->mapping.size = 0;
1165 rbd_dev->mapping.features = 0;
Alex Elder200a6a82013-04-28 23:32:34 -05001166}
1167
Himangi Saraogi7d5079a2014-07-24 03:17:07 +05301168static void rbd_segment_name_free(const char *name)
1169{
1170 /* The explicit cast here is needed to drop the const qualifier */
1171
1172 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1173}
1174
Alex Elder98571b52013-01-20 14:44:42 -06001175static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001176{
Alex Elder65ccfe22012-08-09 10:33:26 -07001177 char *name;
1178 u64 segment;
1179 int ret;
Josh Durgin3a96d5c2013-06-12 19:15:06 -07001180 char *name_format;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001181
Alex Elder78c2a442013-05-01 12:43:04 -05001182 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
Alex Elder65ccfe22012-08-09 10:33:26 -07001183 if (!name)
1184 return NULL;
1185 segment = offset >> rbd_dev->header.obj_order;
Josh Durgin3a96d5c2013-06-12 19:15:06 -07001186 name_format = "%s.%012llx";
1187 if (rbd_dev->image_format == 2)
1188 name_format = "%s.%016llx";
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02001189 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
Alex Elder65ccfe22012-08-09 10:33:26 -07001190 rbd_dev->header.object_prefix, segment);
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02001191 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
Alex Elder65ccfe22012-08-09 10:33:26 -07001192 pr_err("error formatting segment name for #%llu (%d)\n",
1193 segment, ret);
Himangi Saraogi7d5079a2014-07-24 03:17:07 +05301194 rbd_segment_name_free(name);
Alex Elder65ccfe22012-08-09 10:33:26 -07001195 name = NULL;
1196 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001197
Alex Elder65ccfe22012-08-09 10:33:26 -07001198 return name;
1199}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001200
Alex Elder65ccfe22012-08-09 10:33:26 -07001201static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1202{
1203 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001204
Alex Elder65ccfe22012-08-09 10:33:26 -07001205 return offset & (segment_size - 1);
1206}
1207
1208static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1209 u64 offset, u64 length)
1210{
1211 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1212
1213 offset &= segment_size - 1;
1214
Alex Elderaafb2302012-09-06 16:00:54 -05001215 rbd_assert(length <= U64_MAX - offset);
Alex Elder65ccfe22012-08-09 10:33:26 -07001216 if (offset + length > segment_size)
1217 length = segment_size - offset;
1218
1219 return length;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001220}
1221
1222/*
Josh Durgin029bcbd2011-07-22 11:35:23 -07001223 * returns the size of an object in the image
1224 */
1225static u64 rbd_obj_bytes(struct rbd_image_header *header)
1226{
1227 return 1 << header->obj_order;
1228}
1229
1230/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001231 * bio helpers
1232 */
1233
1234static void bio_chain_put(struct bio *chain)
1235{
1236 struct bio *tmp;
1237
1238 while (chain) {
1239 tmp = chain;
1240 chain = chain->bi_next;
1241 bio_put(tmp);
1242 }
1243}
1244
1245/*
1246 * zeros a bio chain, starting at specific offset
1247 */
1248static void zero_bio_chain(struct bio *chain, int start_ofs)
1249{
Kent Overstreet79886132013-11-23 17:19:00 -08001250 struct bio_vec bv;
1251 struct bvec_iter iter;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001252 unsigned long flags;
1253 void *buf;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001254 int pos = 0;
1255
1256 while (chain) {
Kent Overstreet79886132013-11-23 17:19:00 -08001257 bio_for_each_segment(bv, chain, iter) {
1258 if (pos + bv.bv_len > start_ofs) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001259 int remainder = max(start_ofs - pos, 0);
Kent Overstreet79886132013-11-23 17:19:00 -08001260 buf = bvec_kmap_irq(&bv, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001261 memset(buf + remainder, 0,
Kent Overstreet79886132013-11-23 17:19:00 -08001262 bv.bv_len - remainder);
1263 flush_dcache_page(bv.bv_page);
Dan Carpenter85b5aaa2010-10-11 21:15:11 +02001264 bvec_kunmap_irq(buf, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001265 }
Kent Overstreet79886132013-11-23 17:19:00 -08001266 pos += bv.bv_len;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001267 }
1268
1269 chain = chain->bi_next;
1270 }
1271}
1272
1273/*
Alex Elderb9434c52013-04-19 15:34:50 -05001274 * similar to zero_bio_chain(), zeros data defined by a page array,
1275 * starting at the given byte offset from the start of the array and
1276 * continuing up to the given end offset. The pages array is
1277 * assumed to be big enough to hold all bytes up to the end.
1278 */
1279static void zero_pages(struct page **pages, u64 offset, u64 end)
1280{
1281 struct page **page = &pages[offset >> PAGE_SHIFT];
1282
1283 rbd_assert(end > offset);
1284 rbd_assert(end - offset <= (u64)SIZE_MAX);
1285 while (offset < end) {
1286 size_t page_offset;
1287 size_t length;
1288 unsigned long flags;
1289 void *kaddr;
1290
Geert Uytterhoeven491205a2013-05-13 20:35:37 -05001291 page_offset = offset & ~PAGE_MASK;
1292 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
Alex Elderb9434c52013-04-19 15:34:50 -05001293 local_irq_save(flags);
1294 kaddr = kmap_atomic(*page);
1295 memset(kaddr + page_offset, 0, length);
Alex Eldere2156052013-05-22 20:54:25 -05001296 flush_dcache_page(*page);
Alex Elderb9434c52013-04-19 15:34:50 -05001297 kunmap_atomic(kaddr);
1298 local_irq_restore(flags);
1299
1300 offset += length;
1301 page++;
1302 }
1303}
1304
1305/*
Alex Elderf7760da2012-10-20 22:17:27 -05001306 * Clone a portion of a bio, starting at the given byte offset
1307 * and continuing for the number of bytes indicated.
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001308 */
Alex Elderf7760da2012-10-20 22:17:27 -05001309static struct bio *bio_clone_range(struct bio *bio_src,
1310 unsigned int offset,
1311 unsigned int len,
1312 gfp_t gfpmask)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001313{
Alex Elderf7760da2012-10-20 22:17:27 -05001314 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001315
Kent Overstreet5341a6272013-08-07 14:31:11 -07001316 bio = bio_clone(bio_src, gfpmask);
Alex Elderf7760da2012-10-20 22:17:27 -05001317 if (!bio)
1318 return NULL; /* ENOMEM */
1319
Kent Overstreet5341a6272013-08-07 14:31:11 -07001320 bio_advance(bio, offset);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001321 bio->bi_iter.bi_size = len;
Alex Elder542582f2012-08-09 10:33:25 -07001322
Alex Elderf7760da2012-10-20 22:17:27 -05001323 return bio;
1324}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001325
Alex Elderf7760da2012-10-20 22:17:27 -05001326/*
1327 * Clone a portion of a bio chain, starting at the given byte offset
1328 * into the first bio in the source chain and continuing for the
1329 * number of bytes indicated. The result is another bio chain of
1330 * exactly the given length, or a null pointer on error.
1331 *
1332 * The bio_src and offset parameters are both in-out. On entry they
1333 * refer to the first source bio and the offset into that bio where
1334 * the start of data to be cloned is located.
1335 *
1336 * On return, bio_src is updated to refer to the bio in the source
1337 * chain that contains first un-cloned byte, and *offset will
1338 * contain the offset of that byte within that bio.
1339 */
1340static struct bio *bio_chain_clone_range(struct bio **bio_src,
1341 unsigned int *offset,
1342 unsigned int len,
1343 gfp_t gfpmask)
1344{
1345 struct bio *bi = *bio_src;
1346 unsigned int off = *offset;
1347 struct bio *chain = NULL;
1348 struct bio **end;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001349
Alex Elderf7760da2012-10-20 22:17:27 -05001350 /* Build up a chain of clone bios up to the limit */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001351
Kent Overstreet4f024f32013-10-11 15:44:27 -07001352 if (!bi || off >= bi->bi_iter.bi_size || !len)
Alex Elderf7760da2012-10-20 22:17:27 -05001353 return NULL; /* Nothing to clone */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001354
Alex Elderf7760da2012-10-20 22:17:27 -05001355 end = &chain;
1356 while (len) {
1357 unsigned int bi_size;
1358 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001359
Alex Elderf5400b72012-11-01 10:17:15 -05001360 if (!bi) {
1361 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
Alex Elderf7760da2012-10-20 22:17:27 -05001362 goto out_err; /* EINVAL; ran out of bio's */
Alex Elderf5400b72012-11-01 10:17:15 -05001363 }
Kent Overstreet4f024f32013-10-11 15:44:27 -07001364 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
Alex Elderf7760da2012-10-20 22:17:27 -05001365 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1366 if (!bio)
1367 goto out_err; /* ENOMEM */
1368
1369 *end = bio;
1370 end = &bio->bi_next;
1371
1372 off += bi_size;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001373 if (off == bi->bi_iter.bi_size) {
Alex Elderf7760da2012-10-20 22:17:27 -05001374 bi = bi->bi_next;
1375 off = 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001376 }
Alex Elderf7760da2012-10-20 22:17:27 -05001377 len -= bi_size;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001378 }
Alex Elderf7760da2012-10-20 22:17:27 -05001379 *bio_src = bi;
1380 *offset = off;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001381
Alex Elderf7760da2012-10-20 22:17:27 -05001382 return chain;
1383out_err:
1384 bio_chain_put(chain);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001385
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001386 return NULL;
1387}
1388
Alex Elder926f9b32013-02-11 12:33:24 -06001389/*
1390 * The default/initial value for all object request flags is 0. For
1391 * each flag, once its value is set to 1 it is never reset to 0
1392 * again.
1393 */
Alex Elder6365d332013-02-11 12:33:24 -06001394static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1395{
1396 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
Alex Elder6365d332013-02-11 12:33:24 -06001397 struct rbd_device *rbd_dev;
1398
Alex Elder57acbaa2013-02-11 12:33:24 -06001399 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001400 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
Alex Elder6365d332013-02-11 12:33:24 -06001401 obj_request);
1402 }
1403}
1404
1405static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1406{
1407 smp_mb();
1408 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1409}
1410
Alex Elder57acbaa2013-02-11 12:33:24 -06001411static void obj_request_done_set(struct rbd_obj_request *obj_request)
1412{
1413 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1414 struct rbd_device *rbd_dev = NULL;
1415
1416 if (obj_request_img_data_test(obj_request))
1417 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001418 rbd_warn(rbd_dev, "obj_request %p already marked done",
Alex Elder57acbaa2013-02-11 12:33:24 -06001419 obj_request);
1420 }
1421}
1422
1423static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1424{
1425 smp_mb();
1426 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1427}
1428
Alex Elder5679c592013-02-11 12:33:24 -06001429/*
1430 * This sets the KNOWN flag after (possibly) setting the EXISTS
1431 * flag. The latter is set based on the "exists" value provided.
1432 *
1433 * Note that for our purposes once an object exists it never goes
1434 * away again. It's possible that the response from two existence
1435 * checks are separated by the creation of the target object, and
1436 * the first ("doesn't exist") response arrives *after* the second
1437 * ("does exist"). In that case we ignore the second one.
1438 */
1439static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1440 bool exists)
1441{
1442 if (exists)
1443 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1444 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1445 smp_mb();
1446}
1447
1448static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1449{
1450 smp_mb();
1451 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1452}
1453
1454static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1455{
1456 smp_mb();
1457 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1458}
1459
Ilya Dryomov96385562014-06-10 13:53:29 +04001460static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1461{
1462 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1463
1464 return obj_request->img_offset <
1465 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1466}
1467
Alex Elderbf0d5f502012-11-22 00:00:08 -06001468static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1469{
Alex Elder37206ee2013-02-20 17:32:08 -06001470 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1471 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001472 kref_get(&obj_request->kref);
1473}
1474
1475static void rbd_obj_request_destroy(struct kref *kref);
1476static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1477{
1478 rbd_assert(obj_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001479 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1480 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001481 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1482}
1483
Alex Elder0f2d5be2014-04-26 14:21:44 +04001484static void rbd_img_request_get(struct rbd_img_request *img_request)
1485{
1486 dout("%s: img %p (was %d)\n", __func__, img_request,
1487 atomic_read(&img_request->kref.refcount));
1488 kref_get(&img_request->kref);
1489}
1490
Alex Eldere93f3152013-05-08 22:50:04 -05001491static bool img_request_child_test(struct rbd_img_request *img_request);
1492static void rbd_parent_request_destroy(struct kref *kref);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001493static void rbd_img_request_destroy(struct kref *kref);
1494static void rbd_img_request_put(struct rbd_img_request *img_request)
1495{
1496 rbd_assert(img_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001497 dout("%s: img %p (was %d)\n", __func__, img_request,
1498 atomic_read(&img_request->kref.refcount));
Alex Eldere93f3152013-05-08 22:50:04 -05001499 if (img_request_child_test(img_request))
1500 kref_put(&img_request->kref, rbd_parent_request_destroy);
1501 else
1502 kref_put(&img_request->kref, rbd_img_request_destroy);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001503}
1504
1505static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1506 struct rbd_obj_request *obj_request)
1507{
Alex Elder25dcf952013-01-25 17:08:55 -06001508 rbd_assert(obj_request->img_request == NULL);
1509
Alex Elderb155e862013-04-15 14:50:37 -05001510 /* Image request now owns object's original reference */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001511 obj_request->img_request = img_request;
Alex Elder25dcf952013-01-25 17:08:55 -06001512 obj_request->which = img_request->obj_request_count;
Alex Elder6365d332013-02-11 12:33:24 -06001513 rbd_assert(!obj_request_img_data_test(obj_request));
1514 obj_request_img_data_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001515 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001516 img_request->obj_request_count++;
1517 list_add_tail(&obj_request->links, &img_request->obj_requests);
Alex Elder37206ee2013-02-20 17:32:08 -06001518 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1519 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001520}
1521
1522static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1523 struct rbd_obj_request *obj_request)
1524{
1525 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001526
Alex Elder37206ee2013-02-20 17:32:08 -06001527 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1528 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001529 list_del(&obj_request->links);
Alex Elder25dcf952013-01-25 17:08:55 -06001530 rbd_assert(img_request->obj_request_count > 0);
1531 img_request->obj_request_count--;
1532 rbd_assert(obj_request->which == img_request->obj_request_count);
1533 obj_request->which = BAD_WHICH;
Alex Elder6365d332013-02-11 12:33:24 -06001534 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001535 rbd_assert(obj_request->img_request == img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001536 obj_request->img_request = NULL;
Alex Elder25dcf952013-01-25 17:08:55 -06001537 obj_request->callback = NULL;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001538 rbd_obj_request_put(obj_request);
1539}
1540
1541static bool obj_request_type_valid(enum obj_request_type type)
1542{
1543 switch (type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06001544 case OBJ_REQUEST_NODATA:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001545 case OBJ_REQUEST_BIO:
Alex Elder788e2df2013-01-17 12:25:27 -06001546 case OBJ_REQUEST_PAGES:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001547 return true;
1548 default:
1549 return false;
1550 }
1551}
1552
Alex Elderbf0d5f502012-11-22 00:00:08 -06001553static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1554 struct rbd_obj_request *obj_request)
1555{
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001556 dout("%s %p\n", __func__, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001557 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1558}
1559
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001560static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1561{
1562 dout("%s %p\n", __func__, obj_request);
1563 ceph_osdc_cancel_request(obj_request->osd_req);
1564}
1565
1566/*
1567 * Wait for an object request to complete. If interrupted, cancel the
1568 * underlying osd request.
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001569 *
1570 * @timeout: in jiffies, 0 means "wait forever"
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001571 */
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001572static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1573 unsigned long timeout)
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001574{
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001575 long ret;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001576
1577 dout("%s %p\n", __func__, obj_request);
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001578 ret = wait_for_completion_interruptible_timeout(
1579 &obj_request->completion,
1580 ceph_timeout_jiffies(timeout));
1581 if (ret <= 0) {
1582 if (ret == 0)
1583 ret = -ETIMEDOUT;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001584 rbd_obj_request_end(obj_request);
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001585 } else {
1586 ret = 0;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001587 }
1588
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001589 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1590 return ret;
1591}
1592
1593static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1594{
1595 return __rbd_obj_request_wait(obj_request, 0);
1596}
1597
Alex Elderbf0d5f502012-11-22 00:00:08 -06001598static void rbd_img_request_complete(struct rbd_img_request *img_request)
1599{
Alex Elder55f27e02013-04-10 12:34:25 -05001600
Alex Elder37206ee2013-02-20 17:32:08 -06001601 dout("%s: img %p\n", __func__, img_request);
Alex Elder55f27e02013-04-10 12:34:25 -05001602
1603 /*
1604 * If no error occurred, compute the aggregate transfer
1605 * count for the image request. We could instead use
1606 * atomic64_cmpxchg() to update it as each object request
1607 * completes; not clear which way is better off hand.
1608 */
1609 if (!img_request->result) {
1610 struct rbd_obj_request *obj_request;
1611 u64 xferred = 0;
1612
1613 for_each_obj_request(img_request, obj_request)
1614 xferred += obj_request->xferred;
1615 img_request->xferred = xferred;
1616 }
1617
Alex Elderbf0d5f502012-11-22 00:00:08 -06001618 if (img_request->callback)
1619 img_request->callback(img_request);
1620 else
1621 rbd_img_request_put(img_request);
1622}
1623
Alex Elder0c425242013-02-08 09:55:49 -06001624/*
1625 * The default/initial value for all image request flags is 0. Each
1626 * is conditionally set to 1 at image request initialization time
1627 * and currently never change thereafter.
1628 */
1629static void img_request_write_set(struct rbd_img_request *img_request)
1630{
1631 set_bit(IMG_REQ_WRITE, &img_request->flags);
1632 smp_mb();
1633}
1634
1635static bool img_request_write_test(struct rbd_img_request *img_request)
1636{
1637 smp_mb();
1638 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1639}
1640
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001641/*
1642 * Set the discard flag when the img_request is an discard request
1643 */
1644static void img_request_discard_set(struct rbd_img_request *img_request)
1645{
1646 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1647 smp_mb();
1648}
1649
1650static bool img_request_discard_test(struct rbd_img_request *img_request)
1651{
1652 smp_mb();
1653 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1654}
1655
Alex Elder9849e982013-01-24 16:13:36 -06001656static void img_request_child_set(struct rbd_img_request *img_request)
1657{
1658 set_bit(IMG_REQ_CHILD, &img_request->flags);
1659 smp_mb();
1660}
1661
Alex Eldere93f3152013-05-08 22:50:04 -05001662static void img_request_child_clear(struct rbd_img_request *img_request)
1663{
1664 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1665 smp_mb();
1666}
1667
Alex Elder9849e982013-01-24 16:13:36 -06001668static bool img_request_child_test(struct rbd_img_request *img_request)
1669{
1670 smp_mb();
1671 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1672}
1673
Alex Elderd0b2e942013-01-24 16:13:36 -06001674static void img_request_layered_set(struct rbd_img_request *img_request)
1675{
1676 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1677 smp_mb();
1678}
1679
Alex Eldera2acd002013-05-08 22:50:04 -05001680static void img_request_layered_clear(struct rbd_img_request *img_request)
1681{
1682 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1683 smp_mb();
1684}
1685
Alex Elderd0b2e942013-01-24 16:13:36 -06001686static bool img_request_layered_test(struct rbd_img_request *img_request)
1687{
1688 smp_mb();
1689 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1690}
1691
Josh Durgin3b434a2a2014-04-04 17:32:15 -07001692static enum obj_operation_type
1693rbd_img_request_op_type(struct rbd_img_request *img_request)
1694{
1695 if (img_request_write_test(img_request))
1696 return OBJ_OP_WRITE;
1697 else if (img_request_discard_test(img_request))
1698 return OBJ_OP_DISCARD;
1699 else
1700 return OBJ_OP_READ;
1701}
1702
Alex Elder6e2a4502013-03-27 09:16:30 -05001703static void
1704rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1705{
Alex Elderb9434c52013-04-19 15:34:50 -05001706 u64 xferred = obj_request->xferred;
1707 u64 length = obj_request->length;
1708
Alex Elder6e2a4502013-03-27 09:16:30 -05001709 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1710 obj_request, obj_request->img_request, obj_request->result,
Alex Elderb9434c52013-04-19 15:34:50 -05001711 xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001712 /*
Josh Durgin17c1cc12013-08-26 17:55:38 -07001713 * ENOENT means a hole in the image. We zero-fill the entire
1714 * length of the request. A short read also implies zero-fill
1715 * to the end of the request. An error requires the whole
1716 * length of the request to be reported finished with an error
1717 * to the block layer. In each case we update the xferred
1718 * count to indicate the whole request was satisfied.
Alex Elder6e2a4502013-03-27 09:16:30 -05001719 */
Alex Elderb9434c52013-04-19 15:34:50 -05001720 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
Alex Elder6e2a4502013-03-27 09:16:30 -05001721 if (obj_request->result == -ENOENT) {
Alex Elderb9434c52013-04-19 15:34:50 -05001722 if (obj_request->type == OBJ_REQUEST_BIO)
1723 zero_bio_chain(obj_request->bio_list, 0);
1724 else
1725 zero_pages(obj_request->pages, 0, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001726 obj_request->result = 0;
Alex Elderb9434c52013-04-19 15:34:50 -05001727 } else if (xferred < length && !obj_request->result) {
1728 if (obj_request->type == OBJ_REQUEST_BIO)
1729 zero_bio_chain(obj_request->bio_list, xferred);
1730 else
1731 zero_pages(obj_request->pages, xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001732 }
Josh Durgin17c1cc12013-08-26 17:55:38 -07001733 obj_request->xferred = length;
Alex Elder6e2a4502013-03-27 09:16:30 -05001734 obj_request_done_set(obj_request);
1735}
1736
Alex Elderbf0d5f502012-11-22 00:00:08 -06001737static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1738{
Alex Elder37206ee2013-02-20 17:32:08 -06001739 dout("%s: obj %p cb %p\n", __func__, obj_request,
1740 obj_request->callback);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001741 if (obj_request->callback)
1742 obj_request->callback(obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06001743 else
1744 complete_all(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001745}
1746
Alex Elderc47f9372013-02-26 14:23:07 -06001747static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001748{
Alex Elder57acbaa2013-02-11 12:33:24 -06001749 struct rbd_img_request *img_request = NULL;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001750 struct rbd_device *rbd_dev = NULL;
Alex Elder57acbaa2013-02-11 12:33:24 -06001751 bool layered = false;
1752
1753 if (obj_request_img_data_test(obj_request)) {
1754 img_request = obj_request->img_request;
1755 layered = img_request && img_request_layered_test(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001756 rbd_dev = img_request->rbd_dev;
Alex Elder57acbaa2013-02-11 12:33:24 -06001757 }
Alex Elder8b3e1a52013-01-24 16:13:36 -06001758
1759 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1760 obj_request, img_request, obj_request->result,
1761 obj_request->xferred, obj_request->length);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001762 if (layered && obj_request->result == -ENOENT &&
1763 obj_request->img_offset < rbd_dev->parent_overlap)
Alex Elder8b3e1a52013-01-24 16:13:36 -06001764 rbd_img_parent_read(obj_request);
1765 else if (img_request)
Alex Elder6e2a4502013-03-27 09:16:30 -05001766 rbd_img_obj_request_read_callback(obj_request);
1767 else
1768 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001769}
1770
Alex Elderc47f9372013-02-26 14:23:07 -06001771static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001772{
Sage Weil1b83bef2013-02-25 16:11:12 -08001773 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1774 obj_request->result, obj_request->length);
1775 /*
Alex Elder8b3e1a52013-01-24 16:13:36 -06001776 * There is no such thing as a successful short write. Set
1777 * it to our originally-requested length.
Sage Weil1b83bef2013-02-25 16:11:12 -08001778 */
1779 obj_request->xferred = obj_request->length;
Alex Elder07741302013-02-05 23:41:50 -06001780 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001781}
1782
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001783static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1784{
1785 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1786 obj_request->result, obj_request->length);
1787 /*
1788 * There is no such thing as a successful short discard. Set
1789 * it to our originally-requested length.
1790 */
1791 obj_request->xferred = obj_request->length;
Josh Durgind0265de2014-04-07 16:54:10 -07001792 /* discarding a non-existent object is not a problem */
1793 if (obj_request->result == -ENOENT)
1794 obj_request->result = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001795 obj_request_done_set(obj_request);
1796}
1797
Alex Elderfbfab532013-02-08 09:55:48 -06001798/*
1799 * For a simple stat call there's nothing to do. We'll do more if
1800 * this is part of a write sequence for a layered image.
1801 */
Alex Elderc47f9372013-02-26 14:23:07 -06001802static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
Alex Elderfbfab532013-02-08 09:55:48 -06001803{
Alex Elder37206ee2013-02-20 17:32:08 -06001804 dout("%s: obj %p\n", __func__, obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001805 obj_request_done_set(obj_request);
1806}
1807
Ilya Dryomov27617132015-07-16 17:36:11 +03001808static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1809{
1810 dout("%s: obj %p\n", __func__, obj_request);
1811
1812 if (obj_request_img_data_test(obj_request))
1813 rbd_osd_copyup_callback(obj_request);
1814 else
1815 obj_request_done_set(obj_request);
1816}
1817
Ilya Dryomov85e084f2016-04-28 16:07:24 +02001818static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001819{
1820 struct rbd_obj_request *obj_request = osd_req->r_priv;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001821 u16 opcode;
1822
Ilya Dryomov85e084f2016-04-28 16:07:24 +02001823 dout("%s: osd_req %p\n", __func__, osd_req);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001824 rbd_assert(osd_req == obj_request->osd_req);
Alex Elder57acbaa2013-02-11 12:33:24 -06001825 if (obj_request_img_data_test(obj_request)) {
1826 rbd_assert(obj_request->img_request);
1827 rbd_assert(obj_request->which != BAD_WHICH);
1828 } else {
1829 rbd_assert(obj_request->which == BAD_WHICH);
1830 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06001831
Sage Weil1b83bef2013-02-25 16:11:12 -08001832 if (osd_req->r_result < 0)
1833 obj_request->result = osd_req->r_result;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001834
Alex Elderc47f9372013-02-26 14:23:07 -06001835 /*
1836 * We support a 64-bit length, but ultimately it has to be
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01001837 * passed to the block layer, which just supports a 32-bit
1838 * length field.
Alex Elderc47f9372013-02-26 14:23:07 -06001839 */
Yan, Zheng7665d852016-01-07 16:48:57 +08001840 obj_request->xferred = osd_req->r_ops[0].outdata_len;
Alex Elder8b3e1a52013-01-24 16:13:36 -06001841 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001842
Alex Elder79528732013-04-03 21:32:51 -05001843 opcode = osd_req->r_ops[0].op;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001844 switch (opcode) {
1845 case CEPH_OSD_OP_READ:
Alex Elderc47f9372013-02-26 14:23:07 -06001846 rbd_osd_read_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001847 break;
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001848 case CEPH_OSD_OP_SETALLOCHINT:
Ilya Dryomove30b7572015-10-07 17:27:17 +02001849 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
1850 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001851 /* fall through */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001852 case CEPH_OSD_OP_WRITE:
Ilya Dryomove30b7572015-10-07 17:27:17 +02001853 case CEPH_OSD_OP_WRITEFULL:
Alex Elderc47f9372013-02-26 14:23:07 -06001854 rbd_osd_write_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001855 break;
Alex Elderfbfab532013-02-08 09:55:48 -06001856 case CEPH_OSD_OP_STAT:
Alex Elderc47f9372013-02-26 14:23:07 -06001857 rbd_osd_stat_callback(obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001858 break;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001859 case CEPH_OSD_OP_DELETE:
1860 case CEPH_OSD_OP_TRUNCATE:
1861 case CEPH_OSD_OP_ZERO:
1862 rbd_osd_discard_callback(obj_request);
1863 break;
Alex Elder36be9a72013-01-19 00:30:28 -06001864 case CEPH_OSD_OP_CALL:
Ilya Dryomov27617132015-07-16 17:36:11 +03001865 rbd_osd_call_callback(obj_request);
1866 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001867 default:
Ilya Dryomov9584d502014-07-11 12:11:20 +04001868 rbd_warn(NULL, "%s: unsupported op %hu",
Alex Elderbf0d5f502012-11-22 00:00:08 -06001869 obj_request->object_name, (unsigned short) opcode);
1870 break;
1871 }
1872
Alex Elder07741302013-02-05 23:41:50 -06001873 if (obj_request_done_test(obj_request))
Alex Elderbf0d5f502012-11-22 00:00:08 -06001874 rbd_obj_request_complete(obj_request);
1875}
1876
Alex Elder9d4df012013-04-19 15:34:50 -05001877static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
Alex Elder430c28c2013-04-03 21:32:51 -05001878{
1879 struct rbd_img_request *img_request = obj_request->img_request;
Alex Elder8c042b02013-04-03 01:28:58 -05001880 struct ceph_osd_request *osd_req = obj_request->osd_req;
Alex Elder430c28c2013-04-03 21:32:51 -05001881
Ilya Dryomovbb873b52016-05-26 00:29:52 +02001882 if (img_request)
1883 osd_req->r_snapid = img_request->snap_id;
Alex Elder9d4df012013-04-19 15:34:50 -05001884}
1885
1886static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1887{
Alex Elder9d4df012013-04-19 15:34:50 -05001888 struct ceph_osd_request *osd_req = obj_request->osd_req;
Alex Elder9d4df012013-04-19 15:34:50 -05001889
Ilya Dryomovbb873b52016-05-26 00:29:52 +02001890 osd_req->r_mtime = CURRENT_TIME;
1891 osd_req->r_data_offset = obj_request->offset;
Alex Elder430c28c2013-04-03 21:32:51 -05001892}
1893
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001894/*
1895 * Create an osd request. A read request has one osd op (read).
1896 * A write request has either one (watch) or two (hint+write) osd ops.
1897 * (All rbd data writes are prefixed with an allocation hint op, but
1898 * technically osd watch is a write request, hence this distinction.)
1899 */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001900static struct ceph_osd_request *rbd_osd_req_create(
1901 struct rbd_device *rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001902 enum obj_operation_type op_type,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001903 unsigned int num_ops,
Alex Elder430c28c2013-04-03 21:32:51 -05001904 struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001905{
Alex Elderbf0d5f502012-11-22 00:00:08 -06001906 struct ceph_snap_context *snapc = NULL;
1907 struct ceph_osd_client *osdc;
1908 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001909
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001910 if (obj_request_img_data_test(obj_request) &&
1911 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
Alex Elder6365d332013-02-11 12:33:24 -06001912 struct rbd_img_request *img_request = obj_request->img_request;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001913 if (op_type == OBJ_OP_WRITE) {
1914 rbd_assert(img_request_write_test(img_request));
1915 } else {
1916 rbd_assert(img_request_discard_test(img_request));
1917 }
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001918 snapc = img_request->snapc;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001919 }
1920
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001921 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001922
1923 /* Allocate and initialize the request, for the num_ops ops */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001924
1925 osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001926 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
David Disseldorp2224d872016-04-05 11:13:39 +02001927 GFP_NOIO);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001928 if (!osd_req)
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02001929 goto fail;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001930
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001931 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001932 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
Alex Elder430c28c2013-04-03 21:32:51 -05001933 else
Alex Elderbf0d5f502012-11-22 00:00:08 -06001934 osd_req->r_flags = CEPH_OSD_FLAG_READ;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001935
1936 osd_req->r_callback = rbd_osd_req_callback;
1937 osd_req->r_priv = obj_request;
1938
Yan, Zheng76271512016-02-03 21:24:49 +08001939 osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
Ilya Dryomovd30291b2016-04-29 19:54:20 +02001940 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
1941 obj_request->object_name))
1942 goto fail;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001943
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02001944 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
1945 goto fail;
1946
Alex Elderbf0d5f502012-11-22 00:00:08 -06001947 return osd_req;
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02001948
1949fail:
1950 ceph_osdc_put_request(osd_req);
1951 return NULL;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001952}
1953
Alex Elder0eefd472013-04-19 15:34:50 -05001954/*
Josh Durgind3246fb2014-04-07 16:49:21 -07001955 * Create a copyup osd request based on the information in the object
1956 * request supplied. A copyup request has two or three osd ops, a
1957 * copyup method call, potentially a hint op, and a write or truncate
1958 * or zero op.
Alex Elder0eefd472013-04-19 15:34:50 -05001959 */
1960static struct ceph_osd_request *
1961rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1962{
1963 struct rbd_img_request *img_request;
1964 struct ceph_snap_context *snapc;
1965 struct rbd_device *rbd_dev;
1966 struct ceph_osd_client *osdc;
1967 struct ceph_osd_request *osd_req;
Josh Durgind3246fb2014-04-07 16:49:21 -07001968 int num_osd_ops = 3;
Alex Elder0eefd472013-04-19 15:34:50 -05001969
1970 rbd_assert(obj_request_img_data_test(obj_request));
1971 img_request = obj_request->img_request;
1972 rbd_assert(img_request);
Josh Durgind3246fb2014-04-07 16:49:21 -07001973 rbd_assert(img_request_write_test(img_request) ||
1974 img_request_discard_test(img_request));
Alex Elder0eefd472013-04-19 15:34:50 -05001975
Josh Durgind3246fb2014-04-07 16:49:21 -07001976 if (img_request_discard_test(img_request))
1977 num_osd_ops = 2;
1978
1979 /* Allocate and initialize the request, for all the ops */
Alex Elder0eefd472013-04-19 15:34:50 -05001980
1981 snapc = img_request->snapc;
1982 rbd_dev = img_request->rbd_dev;
1983 osdc = &rbd_dev->rbd_client->client->osdc;
Josh Durgind3246fb2014-04-07 16:49:21 -07001984 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
David Disseldorp2224d872016-04-05 11:13:39 +02001985 false, GFP_NOIO);
Alex Elder0eefd472013-04-19 15:34:50 -05001986 if (!osd_req)
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02001987 goto fail;
Alex Elder0eefd472013-04-19 15:34:50 -05001988
1989 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1990 osd_req->r_callback = rbd_osd_req_callback;
1991 osd_req->r_priv = obj_request;
1992
Yan, Zheng76271512016-02-03 21:24:49 +08001993 osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
Ilya Dryomovd30291b2016-04-29 19:54:20 +02001994 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
1995 obj_request->object_name))
1996 goto fail;
Alex Elder0eefd472013-04-19 15:34:50 -05001997
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02001998 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
1999 goto fail;
2000
Alex Elder0eefd472013-04-19 15:34:50 -05002001 return osd_req;
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02002002
2003fail:
2004 ceph_osdc_put_request(osd_req);
2005 return NULL;
Alex Elder0eefd472013-04-19 15:34:50 -05002006}
2007
2008
Alex Elderbf0d5f502012-11-22 00:00:08 -06002009static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2010{
2011 ceph_osdc_put_request(osd_req);
2012}
2013
2014/* object_name is assumed to be a non-null pointer and NUL-terminated */
2015
2016static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2017 u64 offset, u64 length,
2018 enum obj_request_type type)
2019{
2020 struct rbd_obj_request *obj_request;
2021 size_t size;
2022 char *name;
2023
2024 rbd_assert(obj_request_type_valid(type));
2025
2026 size = strlen(object_name) + 1;
Ilya Dryomov5a60e872015-06-24 17:24:33 +03002027 name = kmalloc(size, GFP_NOIO);
Alex Elderf907ad52013-05-01 12:43:03 -05002028 if (!name)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002029 return NULL;
2030
Ilya Dryomov5a60e872015-06-24 17:24:33 +03002031 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
Alex Elderf907ad52013-05-01 12:43:03 -05002032 if (!obj_request) {
2033 kfree(name);
2034 return NULL;
2035 }
2036
Alex Elderbf0d5f502012-11-22 00:00:08 -06002037 obj_request->object_name = memcpy(name, object_name, size);
2038 obj_request->offset = offset;
2039 obj_request->length = length;
Alex Elder926f9b32013-02-11 12:33:24 -06002040 obj_request->flags = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002041 obj_request->which = BAD_WHICH;
2042 obj_request->type = type;
2043 INIT_LIST_HEAD(&obj_request->links);
Alex Elder788e2df2013-01-17 12:25:27 -06002044 init_completion(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002045 kref_init(&obj_request->kref);
2046
Alex Elder37206ee2013-02-20 17:32:08 -06002047 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2048 offset, length, (int)type, obj_request);
2049
Alex Elderbf0d5f502012-11-22 00:00:08 -06002050 return obj_request;
2051}
2052
2053static void rbd_obj_request_destroy(struct kref *kref)
2054{
2055 struct rbd_obj_request *obj_request;
2056
2057 obj_request = container_of(kref, struct rbd_obj_request, kref);
2058
Alex Elder37206ee2013-02-20 17:32:08 -06002059 dout("%s: obj %p\n", __func__, obj_request);
2060
Alex Elderbf0d5f502012-11-22 00:00:08 -06002061 rbd_assert(obj_request->img_request == NULL);
2062 rbd_assert(obj_request->which == BAD_WHICH);
2063
2064 if (obj_request->osd_req)
2065 rbd_osd_req_destroy(obj_request->osd_req);
2066
2067 rbd_assert(obj_request_type_valid(obj_request->type));
2068 switch (obj_request->type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06002069 case OBJ_REQUEST_NODATA:
2070 break; /* Nothing to do */
Alex Elderbf0d5f502012-11-22 00:00:08 -06002071 case OBJ_REQUEST_BIO:
2072 if (obj_request->bio_list)
2073 bio_chain_put(obj_request->bio_list);
2074 break;
Alex Elder788e2df2013-01-17 12:25:27 -06002075 case OBJ_REQUEST_PAGES:
2076 if (obj_request->pages)
2077 ceph_release_page_vector(obj_request->pages,
2078 obj_request->page_count);
2079 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002080 }
2081
Alex Elderf907ad52013-05-01 12:43:03 -05002082 kfree(obj_request->object_name);
Alex Elder868311b2013-05-01 12:43:03 -05002083 obj_request->object_name = NULL;
2084 kmem_cache_free(rbd_obj_request_cache, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002085}
2086
Alex Elderfb65d2282013-05-08 22:50:04 -05002087/* It's OK to call this for a device with no parent */
2088
2089static void rbd_spec_put(struct rbd_spec *spec);
2090static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2091{
2092 rbd_dev_remove_parent(rbd_dev);
2093 rbd_spec_put(rbd_dev->parent_spec);
2094 rbd_dev->parent_spec = NULL;
2095 rbd_dev->parent_overlap = 0;
2096}
2097
Alex Elderbf0d5f502012-11-22 00:00:08 -06002098/*
Alex Eldera2acd002013-05-08 22:50:04 -05002099 * Parent image reference counting is used to determine when an
2100 * image's parent fields can be safely torn down--after there are no
2101 * more in-flight requests to the parent image. When the last
2102 * reference is dropped, cleaning them up is safe.
2103 */
2104static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2105{
2106 int counter;
2107
2108 if (!rbd_dev->parent_spec)
2109 return;
2110
2111 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2112 if (counter > 0)
2113 return;
2114
2115 /* Last reference; clean up parent data structures */
2116
2117 if (!counter)
2118 rbd_dev_unparent(rbd_dev);
2119 else
Ilya Dryomov9584d502014-07-11 12:11:20 +04002120 rbd_warn(rbd_dev, "parent reference underflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002121}
2122
2123/*
2124 * If an image has a non-zero parent overlap, get a reference to its
2125 * parent.
2126 *
2127 * Returns true if the rbd device has a parent with a non-zero
2128 * overlap and a reference for it was successfully taken, or
2129 * false otherwise.
2130 */
2131static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2132{
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002133 int counter = 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002134
2135 if (!rbd_dev->parent_spec)
2136 return false;
2137
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002138 down_read(&rbd_dev->header_rwsem);
2139 if (rbd_dev->parent_overlap)
2140 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2141 up_read(&rbd_dev->header_rwsem);
Alex Eldera2acd002013-05-08 22:50:04 -05002142
2143 if (counter < 0)
Ilya Dryomov9584d502014-07-11 12:11:20 +04002144 rbd_warn(rbd_dev, "parent reference overflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002145
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002146 return counter > 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002147}
2148
Alex Elderbf0d5f502012-11-22 00:00:08 -06002149/*
2150 * Caller is responsible for filling in the list of object requests
2151 * that comprises the image request, and the Linux request pointer
2152 * (if there is one).
2153 */
Alex Eldercc344fa2013-02-19 12:25:56 -06002154static struct rbd_img_request *rbd_img_request_create(
2155 struct rbd_device *rbd_dev,
Alex Elderbf0d5f502012-11-22 00:00:08 -06002156 u64 offset, u64 length,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002157 enum obj_operation_type op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07002158 struct ceph_snap_context *snapc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002159{
2160 struct rbd_img_request *img_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002161
Ilya Dryomov7a716aa2014-08-05 11:25:54 +04002162 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002163 if (!img_request)
2164 return NULL;
2165
Alex Elderbf0d5f502012-11-22 00:00:08 -06002166 img_request->rq = NULL;
2167 img_request->rbd_dev = rbd_dev;
2168 img_request->offset = offset;
2169 img_request->length = length;
Alex Elder0c425242013-02-08 09:55:49 -06002170 img_request->flags = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002171 if (op_type == OBJ_OP_DISCARD) {
2172 img_request_discard_set(img_request);
2173 img_request->snapc = snapc;
2174 } else if (op_type == OBJ_OP_WRITE) {
Alex Elder0c425242013-02-08 09:55:49 -06002175 img_request_write_set(img_request);
Josh Durgin4e752f02014-04-08 11:12:11 -07002176 img_request->snapc = snapc;
Alex Elder0c425242013-02-08 09:55:49 -06002177 } else {
Alex Elderbf0d5f502012-11-22 00:00:08 -06002178 img_request->snap_id = rbd_dev->spec->snap_id;
Alex Elder0c425242013-02-08 09:55:49 -06002179 }
Alex Eldera2acd002013-05-08 22:50:04 -05002180 if (rbd_dev_parent_get(rbd_dev))
Alex Elderd0b2e942013-01-24 16:13:36 -06002181 img_request_layered_set(img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002182 spin_lock_init(&img_request->completion_lock);
2183 img_request->next_completion = 0;
2184 img_request->callback = NULL;
Alex Eldera5a337d2013-01-24 16:13:36 -06002185 img_request->result = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002186 img_request->obj_request_count = 0;
2187 INIT_LIST_HEAD(&img_request->obj_requests);
2188 kref_init(&img_request->kref);
2189
Alex Elder37206ee2013-02-20 17:32:08 -06002190 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002191 obj_op_name(op_type), offset, length, img_request);
Alex Elder37206ee2013-02-20 17:32:08 -06002192
Alex Elderbf0d5f502012-11-22 00:00:08 -06002193 return img_request;
2194}
2195
2196static void rbd_img_request_destroy(struct kref *kref)
2197{
2198 struct rbd_img_request *img_request;
2199 struct rbd_obj_request *obj_request;
2200 struct rbd_obj_request *next_obj_request;
2201
2202 img_request = container_of(kref, struct rbd_img_request, kref);
2203
Alex Elder37206ee2013-02-20 17:32:08 -06002204 dout("%s: img %p\n", __func__, img_request);
2205
Alex Elderbf0d5f502012-11-22 00:00:08 -06002206 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2207 rbd_img_obj_request_del(img_request, obj_request);
Alex Elder25dcf952013-01-25 17:08:55 -06002208 rbd_assert(img_request->obj_request_count == 0);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002209
Alex Eldera2acd002013-05-08 22:50:04 -05002210 if (img_request_layered_test(img_request)) {
2211 img_request_layered_clear(img_request);
2212 rbd_dev_parent_put(img_request->rbd_dev);
2213 }
2214
Josh Durginbef95452014-04-04 17:47:52 -07002215 if (img_request_write_test(img_request) ||
2216 img_request_discard_test(img_request))
Alex Elder812164f82013-04-30 00:44:32 -05002217 ceph_put_snap_context(img_request->snapc);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002218
Alex Elder1c2a9df2013-05-01 12:43:03 -05002219 kmem_cache_free(rbd_img_request_cache, img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002220}
2221
Alex Eldere93f3152013-05-08 22:50:04 -05002222static struct rbd_img_request *rbd_parent_request_create(
2223 struct rbd_obj_request *obj_request,
2224 u64 img_offset, u64 length)
2225{
2226 struct rbd_img_request *parent_request;
2227 struct rbd_device *rbd_dev;
2228
2229 rbd_assert(obj_request->img_request);
2230 rbd_dev = obj_request->img_request->rbd_dev;
2231
Josh Durgin4e752f02014-04-08 11:12:11 -07002232 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002233 length, OBJ_OP_READ, NULL);
Alex Eldere93f3152013-05-08 22:50:04 -05002234 if (!parent_request)
2235 return NULL;
2236
2237 img_request_child_set(parent_request);
2238 rbd_obj_request_get(obj_request);
2239 parent_request->obj_request = obj_request;
2240
2241 return parent_request;
2242}
2243
2244static void rbd_parent_request_destroy(struct kref *kref)
2245{
2246 struct rbd_img_request *parent_request;
2247 struct rbd_obj_request *orig_request;
2248
2249 parent_request = container_of(kref, struct rbd_img_request, kref);
2250 orig_request = parent_request->obj_request;
2251
2252 parent_request->obj_request = NULL;
2253 rbd_obj_request_put(orig_request);
2254 img_request_child_clear(parent_request);
2255
2256 rbd_img_request_destroy(kref);
2257}
2258
Alex Elder12178572013-02-08 09:55:49 -06002259static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2260{
Alex Elder6365d332013-02-11 12:33:24 -06002261 struct rbd_img_request *img_request;
Alex Elder12178572013-02-08 09:55:49 -06002262 unsigned int xferred;
2263 int result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002264 bool more;
Alex Elder12178572013-02-08 09:55:49 -06002265
Alex Elder6365d332013-02-11 12:33:24 -06002266 rbd_assert(obj_request_img_data_test(obj_request));
2267 img_request = obj_request->img_request;
2268
Alex Elder12178572013-02-08 09:55:49 -06002269 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2270 xferred = (unsigned int)obj_request->xferred;
2271 result = obj_request->result;
2272 if (result) {
2273 struct rbd_device *rbd_dev = img_request->rbd_dev;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002274 enum obj_operation_type op_type;
2275
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002276 if (img_request_discard_test(img_request))
2277 op_type = OBJ_OP_DISCARD;
2278 else if (img_request_write_test(img_request))
2279 op_type = OBJ_OP_WRITE;
2280 else
2281 op_type = OBJ_OP_READ;
Alex Elder12178572013-02-08 09:55:49 -06002282
Ilya Dryomov9584d502014-07-11 12:11:20 +04002283 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002284 obj_op_name(op_type), obj_request->length,
2285 obj_request->img_offset, obj_request->offset);
Ilya Dryomov9584d502014-07-11 12:11:20 +04002286 rbd_warn(rbd_dev, " result %d xferred %x",
Alex Elder12178572013-02-08 09:55:49 -06002287 result, xferred);
2288 if (!img_request->result)
2289 img_request->result = result;
Ilya Dryomov082a75d2015-04-25 15:56:15 +03002290 /*
2291 * Need to end I/O on the entire obj_request worth of
2292 * bytes in case of error.
2293 */
2294 xferred = obj_request->length;
Alex Elder12178572013-02-08 09:55:49 -06002295 }
2296
Alex Elderf1a47392013-04-19 15:34:50 -05002297 /* Image object requests don't own their page array */
2298
2299 if (obj_request->type == OBJ_REQUEST_PAGES) {
2300 obj_request->pages = NULL;
2301 obj_request->page_count = 0;
2302 }
2303
Alex Elder8b3e1a52013-01-24 16:13:36 -06002304 if (img_request_child_test(img_request)) {
2305 rbd_assert(img_request->obj_request != NULL);
2306 more = obj_request->which < img_request->obj_request_count - 1;
2307 } else {
2308 rbd_assert(img_request->rq != NULL);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01002309
2310 more = blk_update_request(img_request->rq, result, xferred);
2311 if (!more)
2312 __blk_mq_end_request(img_request->rq, result);
Alex Elder8b3e1a52013-01-24 16:13:36 -06002313 }
2314
2315 return more;
Alex Elder12178572013-02-08 09:55:49 -06002316}
2317
Alex Elder21692382013-04-05 01:27:12 -05002318static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2319{
2320 struct rbd_img_request *img_request;
2321 u32 which = obj_request->which;
2322 bool more = true;
2323
Alex Elder6365d332013-02-11 12:33:24 -06002324 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elder21692382013-04-05 01:27:12 -05002325 img_request = obj_request->img_request;
2326
2327 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2328 rbd_assert(img_request != NULL);
Alex Elder21692382013-04-05 01:27:12 -05002329 rbd_assert(img_request->obj_request_count > 0);
2330 rbd_assert(which != BAD_WHICH);
2331 rbd_assert(which < img_request->obj_request_count);
Alex Elder21692382013-04-05 01:27:12 -05002332
2333 spin_lock_irq(&img_request->completion_lock);
2334 if (which != img_request->next_completion)
2335 goto out;
2336
2337 for_each_obj_request_from(img_request, obj_request) {
Alex Elder21692382013-04-05 01:27:12 -05002338 rbd_assert(more);
2339 rbd_assert(which < img_request->obj_request_count);
2340
2341 if (!obj_request_done_test(obj_request))
2342 break;
Alex Elder12178572013-02-08 09:55:49 -06002343 more = rbd_img_obj_end_request(obj_request);
Alex Elder21692382013-04-05 01:27:12 -05002344 which++;
2345 }
2346
2347 rbd_assert(more ^ (which == img_request->obj_request_count));
2348 img_request->next_completion = which;
2349out:
2350 spin_unlock_irq(&img_request->completion_lock);
Alex Elder0f2d5be2014-04-26 14:21:44 +04002351 rbd_img_request_put(img_request);
Alex Elder21692382013-04-05 01:27:12 -05002352
2353 if (!more)
2354 rbd_img_request_complete(img_request);
2355}
2356
Alex Elderf1a47392013-04-19 15:34:50 -05002357/*
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002358 * Add individual osd ops to the given ceph_osd_request and prepare
2359 * them for submission. num_ops is the current number of
2360 * osd operations already to the object request.
2361 */
2362static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2363 struct ceph_osd_request *osd_request,
2364 enum obj_operation_type op_type,
2365 unsigned int num_ops)
2366{
2367 struct rbd_img_request *img_request = obj_request->img_request;
2368 struct rbd_device *rbd_dev = img_request->rbd_dev;
2369 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2370 u64 offset = obj_request->offset;
2371 u64 length = obj_request->length;
2372 u64 img_end;
2373 u16 opcode;
2374
2375 if (op_type == OBJ_OP_DISCARD) {
Josh Durgind3246fb2014-04-07 16:49:21 -07002376 if (!offset && length == object_size &&
2377 (!img_request_layered_test(img_request) ||
2378 !obj_request_overlaps_parent(obj_request))) {
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002379 opcode = CEPH_OSD_OP_DELETE;
2380 } else if ((offset + length == object_size)) {
2381 opcode = CEPH_OSD_OP_TRUNCATE;
2382 } else {
2383 down_read(&rbd_dev->header_rwsem);
2384 img_end = rbd_dev->header.image_size;
2385 up_read(&rbd_dev->header_rwsem);
2386
2387 if (obj_request->img_offset + length == img_end)
2388 opcode = CEPH_OSD_OP_TRUNCATE;
2389 else
2390 opcode = CEPH_OSD_OP_ZERO;
2391 }
2392 } else if (op_type == OBJ_OP_WRITE) {
Ilya Dryomove30b7572015-10-07 17:27:17 +02002393 if (!offset && length == object_size)
2394 opcode = CEPH_OSD_OP_WRITEFULL;
2395 else
2396 opcode = CEPH_OSD_OP_WRITE;
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002397 osd_req_op_alloc_hint_init(osd_request, num_ops,
2398 object_size, object_size);
2399 num_ops++;
2400 } else {
2401 opcode = CEPH_OSD_OP_READ;
2402 }
2403
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002404 if (opcode == CEPH_OSD_OP_DELETE)
Yan, Zheng144cba12015-04-27 11:09:54 +08002405 osd_req_op_init(osd_request, num_ops, opcode, 0);
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002406 else
2407 osd_req_op_extent_init(osd_request, num_ops, opcode,
2408 offset, length, 0, 0);
2409
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002410 if (obj_request->type == OBJ_REQUEST_BIO)
2411 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2412 obj_request->bio_list, length);
2413 else if (obj_request->type == OBJ_REQUEST_PAGES)
2414 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2415 obj_request->pages, length,
2416 offset & ~PAGE_MASK, false, false);
2417
2418 /* Discards are also writes */
2419 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2420 rbd_osd_req_format_write(obj_request);
2421 else
2422 rbd_osd_req_format_read(obj_request);
2423}
2424
2425/*
Alex Elderf1a47392013-04-19 15:34:50 -05002426 * Split up an image request into one or more object requests, each
2427 * to a different object. The "type" parameter indicates whether
2428 * "data_desc" is the pointer to the head of a list of bio
2429 * structures, or the base of a page array. In either case this
2430 * function assumes data_desc describes memory sufficient to hold
2431 * all data described by the image request.
2432 */
2433static int rbd_img_request_fill(struct rbd_img_request *img_request,
2434 enum obj_request_type type,
2435 void *data_desc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002436{
2437 struct rbd_device *rbd_dev = img_request->rbd_dev;
2438 struct rbd_obj_request *obj_request = NULL;
2439 struct rbd_obj_request *next_obj_request;
Jingoo Hana1580732013-08-09 13:04:35 +09002440 struct bio *bio_list = NULL;
Alex Elderf1a47392013-04-19 15:34:50 -05002441 unsigned int bio_offset = 0;
Jingoo Hana1580732013-08-09 13:04:35 +09002442 struct page **pages = NULL;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002443 enum obj_operation_type op_type;
Alex Elder7da22d22013-01-24 16:13:36 -06002444 u64 img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002445 u64 resid;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002446
Alex Elderf1a47392013-04-19 15:34:50 -05002447 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2448 (int)type, data_desc);
Alex Elder37206ee2013-02-20 17:32:08 -06002449
Alex Elder7da22d22013-01-24 16:13:36 -06002450 img_offset = img_request->offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002451 resid = img_request->length;
Alex Elder4dda41d2013-02-20 21:59:33 -06002452 rbd_assert(resid > 0);
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002453 op_type = rbd_img_request_op_type(img_request);
Alex Elderf1a47392013-04-19 15:34:50 -05002454
2455 if (type == OBJ_REQUEST_BIO) {
2456 bio_list = data_desc;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002457 rbd_assert(img_offset ==
2458 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002459 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002460 pages = data_desc;
2461 }
2462
Alex Elderbf0d5f502012-11-22 00:00:08 -06002463 while (resid) {
Alex Elder2fa12322013-04-05 01:27:12 -05002464 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002465 const char *object_name;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002466 u64 offset;
2467 u64 length;
2468
Alex Elder7da22d22013-01-24 16:13:36 -06002469 object_name = rbd_segment_name(rbd_dev, img_offset);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002470 if (!object_name)
2471 goto out_unwind;
Alex Elder7da22d22013-01-24 16:13:36 -06002472 offset = rbd_segment_offset(rbd_dev, img_offset);
2473 length = rbd_segment_length(rbd_dev, img_offset, resid);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002474 obj_request = rbd_obj_request_create(object_name,
Alex Elderf1a47392013-04-19 15:34:50 -05002475 offset, length, type);
Alex Elder78c2a442013-05-01 12:43:04 -05002476 /* object request has its own copy of the object name */
2477 rbd_segment_name_free(object_name);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002478 if (!obj_request)
2479 goto out_unwind;
Ilya Dryomov62054da2014-03-04 11:57:17 +02002480
Josh Durgin03507db2013-08-27 14:45:46 -07002481 /*
2482 * set obj_request->img_request before creating the
2483 * osd_request so that it gets the right snapc
2484 */
2485 rbd_img_obj_request_add(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002486
Alex Elderf1a47392013-04-19 15:34:50 -05002487 if (type == OBJ_REQUEST_BIO) {
2488 unsigned int clone_size;
2489
2490 rbd_assert(length <= (u64)UINT_MAX);
2491 clone_size = (unsigned int)length;
2492 obj_request->bio_list =
2493 bio_chain_clone_range(&bio_list,
2494 &bio_offset,
2495 clone_size,
David Disseldorp2224d872016-04-05 11:13:39 +02002496 GFP_NOIO);
Alex Elderf1a47392013-04-19 15:34:50 -05002497 if (!obj_request->bio_list)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002498 goto out_unwind;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002499 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002500 unsigned int page_count;
2501
2502 obj_request->pages = pages;
2503 page_count = (u32)calc_pages_for(offset, length);
2504 obj_request->page_count = page_count;
2505 if ((offset + length) & ~PAGE_MASK)
2506 page_count--; /* more on last page */
2507 pages += page_count;
2508 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06002509
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002510 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2511 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2512 obj_request);
Alex Elder2fa12322013-04-05 01:27:12 -05002513 if (!osd_req)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002514 goto out_unwind;
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002515
Alex Elder2fa12322013-04-05 01:27:12 -05002516 obj_request->osd_req = osd_req;
Alex Elder21692382013-04-05 01:27:12 -05002517 obj_request->callback = rbd_img_obj_callback;
Alex Elder7da22d22013-01-24 16:13:36 -06002518 obj_request->img_offset = img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002519
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002520 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2521
2522 rbd_img_request_get(img_request);
2523
Alex Elder7da22d22013-01-24 16:13:36 -06002524 img_offset += length;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002525 resid -= length;
2526 }
2527
2528 return 0;
2529
Alex Elderbf0d5f502012-11-22 00:00:08 -06002530out_unwind:
2531 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
Ilya Dryomov42dd0372014-03-04 11:57:17 +02002532 rbd_img_obj_request_del(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002533
2534 return -ENOMEM;
2535}
2536
Alex Elder3d7efd12013-04-19 15:34:50 -05002537static void
Ilya Dryomov27617132015-07-16 17:36:11 +03002538rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
Alex Elder0eefd472013-04-19 15:34:50 -05002539{
2540 struct rbd_img_request *img_request;
2541 struct rbd_device *rbd_dev;
Alex Elderebda6402013-05-10 16:29:22 -05002542 struct page **pages;
Alex Elder0eefd472013-04-19 15:34:50 -05002543 u32 page_count;
2544
Ilya Dryomov27617132015-07-16 17:36:11 +03002545 dout("%s: obj %p\n", __func__, obj_request);
2546
Josh Durgind3246fb2014-04-07 16:49:21 -07002547 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2548 obj_request->type == OBJ_REQUEST_NODATA);
Alex Elder0eefd472013-04-19 15:34:50 -05002549 rbd_assert(obj_request_img_data_test(obj_request));
2550 img_request = obj_request->img_request;
2551 rbd_assert(img_request);
2552
2553 rbd_dev = img_request->rbd_dev;
2554 rbd_assert(rbd_dev);
Alex Elder0eefd472013-04-19 15:34:50 -05002555
Alex Elderebda6402013-05-10 16:29:22 -05002556 pages = obj_request->copyup_pages;
2557 rbd_assert(pages != NULL);
Alex Elder0eefd472013-04-19 15:34:50 -05002558 obj_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002559 page_count = obj_request->copyup_page_count;
2560 rbd_assert(page_count);
2561 obj_request->copyup_page_count = 0;
2562 ceph_release_page_vector(pages, page_count);
Alex Elder0eefd472013-04-19 15:34:50 -05002563
2564 /*
2565 * We want the transfer count to reflect the size of the
2566 * original write request. There is no such thing as a
2567 * successful short write, so if the request was successful
2568 * we can just set it to the originally-requested length.
2569 */
2570 if (!obj_request->result)
2571 obj_request->xferred = obj_request->length;
2572
Ilya Dryomov27617132015-07-16 17:36:11 +03002573 obj_request_done_set(obj_request);
Alex Elder0eefd472013-04-19 15:34:50 -05002574}
2575
2576static void
Alex Elder3d7efd12013-04-19 15:34:50 -05002577rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2578{
2579 struct rbd_obj_request *orig_request;
Alex Elder0eefd472013-04-19 15:34:50 -05002580 struct ceph_osd_request *osd_req;
2581 struct ceph_osd_client *osdc;
2582 struct rbd_device *rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002583 struct page **pages;
Josh Durgind3246fb2014-04-07 16:49:21 -07002584 enum obj_operation_type op_type;
Alex Elderebda6402013-05-10 16:29:22 -05002585 u32 page_count;
Alex Elderbbea1c12013-05-06 17:40:33 -05002586 int img_result;
Alex Elderebda6402013-05-10 16:29:22 -05002587 u64 parent_length;
Alex Elder3d7efd12013-04-19 15:34:50 -05002588
2589 rbd_assert(img_request_child_test(img_request));
2590
2591 /* First get what we need from the image request */
2592
2593 pages = img_request->copyup_pages;
2594 rbd_assert(pages != NULL);
2595 img_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002596 page_count = img_request->copyup_page_count;
2597 rbd_assert(page_count);
2598 img_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002599
2600 orig_request = img_request->obj_request;
2601 rbd_assert(orig_request != NULL);
Alex Elderb91f09f2013-05-10 16:29:22 -05002602 rbd_assert(obj_request_type_valid(orig_request->type));
Alex Elderbbea1c12013-05-06 17:40:33 -05002603 img_result = img_request->result;
Alex Elderebda6402013-05-10 16:29:22 -05002604 parent_length = img_request->length;
2605 rbd_assert(parent_length == img_request->xferred);
Alex Elder3d7efd12013-04-19 15:34:50 -05002606 rbd_img_request_put(img_request);
2607
Alex Elder91c6feb2013-05-06 17:40:32 -05002608 rbd_assert(orig_request->img_request);
2609 rbd_dev = orig_request->img_request->rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002610 rbd_assert(rbd_dev);
Alex Elder3d7efd12013-04-19 15:34:50 -05002611
Alex Elderbbea1c12013-05-06 17:40:33 -05002612 /*
2613 * If the overlap has become 0 (most likely because the
2614 * image has been flattened) we need to free the pages
2615 * and re-submit the original write request.
2616 */
2617 if (!rbd_dev->parent_overlap) {
2618 struct ceph_osd_client *osdc;
2619
2620 ceph_release_page_vector(pages, page_count);
2621 osdc = &rbd_dev->rbd_client->client->osdc;
2622 img_result = rbd_obj_request_submit(osdc, orig_request);
2623 if (!img_result)
2624 return;
2625 }
2626
2627 if (img_result)
Alex Elder0eefd472013-04-19 15:34:50 -05002628 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002629
Alex Elder8785b1d2013-05-09 10:08:49 -05002630 /*
2631 * The original osd request is of no use to use any more.
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002632 * We need a new one that can hold the three ops in a copyup
Alex Elder8785b1d2013-05-09 10:08:49 -05002633 * request. Allocate the new copyup osd request for the
2634 * original request, and release the old one.
2635 */
Alex Elderbbea1c12013-05-06 17:40:33 -05002636 img_result = -ENOMEM;
Alex Elder0eefd472013-04-19 15:34:50 -05002637 osd_req = rbd_osd_req_create_copyup(orig_request);
2638 if (!osd_req)
2639 goto out_err;
Alex Elder8785b1d2013-05-09 10:08:49 -05002640 rbd_osd_req_destroy(orig_request->osd_req);
Alex Elder0eefd472013-04-19 15:34:50 -05002641 orig_request->osd_req = osd_req;
2642 orig_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002643 orig_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002644
Alex Elder0eefd472013-04-19 15:34:50 -05002645 /* Initialize the copyup op */
2646
2647 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
Alex Elderebda6402013-05-10 16:29:22 -05002648 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
Alex Elder0eefd472013-04-19 15:34:50 -05002649 false, false);
2650
Josh Durgind3246fb2014-04-07 16:49:21 -07002651 /* Add the other op(s) */
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002652
Josh Durgind3246fb2014-04-07 16:49:21 -07002653 op_type = rbd_img_request_op_type(orig_request->img_request);
2654 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
Alex Elder0eefd472013-04-19 15:34:50 -05002655
2656 /* All set, send it off. */
2657
Alex Elder0eefd472013-04-19 15:34:50 -05002658 osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elderbbea1c12013-05-06 17:40:33 -05002659 img_result = rbd_obj_request_submit(osdc, orig_request);
2660 if (!img_result)
Alex Elder0eefd472013-04-19 15:34:50 -05002661 return;
2662out_err:
2663 /* Record the error code and complete the request */
2664
Alex Elderbbea1c12013-05-06 17:40:33 -05002665 orig_request->result = img_result;
Alex Elder0eefd472013-04-19 15:34:50 -05002666 orig_request->xferred = 0;
2667 obj_request_done_set(orig_request);
2668 rbd_obj_request_complete(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002669}
2670
2671/*
2672 * Read from the parent image the range of data that covers the
2673 * entire target of the given object request. This is used for
2674 * satisfying a layered image write request when the target of an
2675 * object request from the image request does not exist.
2676 *
2677 * A page array big enough to hold the returned data is allocated
2678 * and supplied to rbd_img_request_fill() as the "data descriptor."
2679 * When the read completes, this page array will be transferred to
2680 * the original object request for the copyup operation.
2681 *
2682 * If an error occurs, record it as the result of the original
2683 * object request and mark it done so it gets completed.
2684 */
2685static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2686{
2687 struct rbd_img_request *img_request = NULL;
2688 struct rbd_img_request *parent_request = NULL;
2689 struct rbd_device *rbd_dev;
2690 u64 img_offset;
2691 u64 length;
2692 struct page **pages = NULL;
2693 u32 page_count;
2694 int result;
2695
2696 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderb91f09f2013-05-10 16:29:22 -05002697 rbd_assert(obj_request_type_valid(obj_request->type));
Alex Elder3d7efd12013-04-19 15:34:50 -05002698
2699 img_request = obj_request->img_request;
2700 rbd_assert(img_request != NULL);
2701 rbd_dev = img_request->rbd_dev;
2702 rbd_assert(rbd_dev->parent != NULL);
2703
2704 /*
2705 * Determine the byte range covered by the object in the
2706 * child image to which the original request was to be sent.
2707 */
2708 img_offset = obj_request->img_offset - obj_request->offset;
2709 length = (u64)1 << rbd_dev->header.obj_order;
2710
2711 /*
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002712 * There is no defined parent data beyond the parent
2713 * overlap, so limit what we read at that boundary if
2714 * necessary.
2715 */
2716 if (img_offset + length > rbd_dev->parent_overlap) {
2717 rbd_assert(img_offset < rbd_dev->parent_overlap);
2718 length = rbd_dev->parent_overlap - img_offset;
2719 }
2720
2721 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002722 * Allocate a page array big enough to receive the data read
2723 * from the parent.
2724 */
2725 page_count = (u32)calc_pages_for(0, length);
2726 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2727 if (IS_ERR(pages)) {
2728 result = PTR_ERR(pages);
2729 pages = NULL;
2730 goto out_err;
2731 }
2732
2733 result = -ENOMEM;
Alex Eldere93f3152013-05-08 22:50:04 -05002734 parent_request = rbd_parent_request_create(obj_request,
2735 img_offset, length);
Alex Elder3d7efd12013-04-19 15:34:50 -05002736 if (!parent_request)
2737 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002738
2739 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2740 if (result)
2741 goto out_err;
2742 parent_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002743 parent_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002744
2745 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2746 result = rbd_img_request_submit(parent_request);
2747 if (!result)
2748 return 0;
2749
2750 parent_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002751 parent_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002752 parent_request->obj_request = NULL;
2753 rbd_obj_request_put(obj_request);
2754out_err:
2755 if (pages)
2756 ceph_release_page_vector(pages, page_count);
2757 if (parent_request)
2758 rbd_img_request_put(parent_request);
2759 obj_request->result = result;
2760 obj_request->xferred = 0;
2761 obj_request_done_set(obj_request);
2762
2763 return result;
2764}
2765
Alex Elderc5b5ef62013-02-11 12:33:24 -06002766static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2767{
Alex Elderc5b5ef62013-02-11 12:33:24 -06002768 struct rbd_obj_request *orig_request;
Alex Elder638f5ab2013-05-06 17:40:33 -05002769 struct rbd_device *rbd_dev;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002770 int result;
2771
2772 rbd_assert(!obj_request_img_data_test(obj_request));
2773
2774 /*
2775 * All we need from the object request is the original
2776 * request and the result of the STAT op. Grab those, then
2777 * we're done with the request.
2778 */
2779 orig_request = obj_request->obj_request;
2780 obj_request->obj_request = NULL;
Alex Elder912c3172013-05-13 20:35:38 -05002781 rbd_obj_request_put(orig_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002782 rbd_assert(orig_request);
2783 rbd_assert(orig_request->img_request);
2784
2785 result = obj_request->result;
2786 obj_request->result = 0;
2787
2788 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2789 obj_request, orig_request, result,
2790 obj_request->xferred, obj_request->length);
2791 rbd_obj_request_put(obj_request);
2792
Alex Elder638f5ab2013-05-06 17:40:33 -05002793 /*
2794 * If the overlap has become 0 (most likely because the
2795 * image has been flattened) we need to free the pages
2796 * and re-submit the original write request.
2797 */
2798 rbd_dev = orig_request->img_request->rbd_dev;
2799 if (!rbd_dev->parent_overlap) {
2800 struct ceph_osd_client *osdc;
2801
Alex Elder638f5ab2013-05-06 17:40:33 -05002802 osdc = &rbd_dev->rbd_client->client->osdc;
2803 result = rbd_obj_request_submit(osdc, orig_request);
2804 if (!result)
2805 return;
2806 }
Alex Elderc5b5ef62013-02-11 12:33:24 -06002807
2808 /*
2809 * Our only purpose here is to determine whether the object
2810 * exists, and we don't want to treat the non-existence as
2811 * an error. If something else comes back, transfer the
2812 * error to the original request and complete it now.
2813 */
2814 if (!result) {
2815 obj_request_existence_set(orig_request, true);
2816 } else if (result == -ENOENT) {
2817 obj_request_existence_set(orig_request, false);
2818 } else if (result) {
2819 orig_request->result = result;
Alex Elder3d7efd12013-04-19 15:34:50 -05002820 goto out;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002821 }
2822
2823 /*
2824 * Resubmit the original request now that we have recorded
2825 * whether the target object exists.
2826 */
Alex Elderb454e362013-04-19 15:34:50 -05002827 orig_request->result = rbd_img_obj_request_submit(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002828out:
Alex Elderc5b5ef62013-02-11 12:33:24 -06002829 if (orig_request->result)
2830 rbd_obj_request_complete(orig_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002831}
2832
2833static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2834{
2835 struct rbd_obj_request *stat_request;
2836 struct rbd_device *rbd_dev;
2837 struct ceph_osd_client *osdc;
2838 struct page **pages = NULL;
2839 u32 page_count;
2840 size_t size;
2841 int ret;
2842
2843 /*
2844 * The response data for a STAT call consists of:
2845 * le64 length;
2846 * struct {
2847 * le32 tv_sec;
2848 * le32 tv_nsec;
2849 * } mtime;
2850 */
2851 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2852 page_count = (u32)calc_pages_for(0, size);
2853 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2854 if (IS_ERR(pages))
2855 return PTR_ERR(pages);
2856
2857 ret = -ENOMEM;
2858 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2859 OBJ_REQUEST_PAGES);
2860 if (!stat_request)
2861 goto out;
2862
2863 rbd_obj_request_get(obj_request);
2864 stat_request->obj_request = obj_request;
2865 stat_request->pages = pages;
2866 stat_request->page_count = page_count;
2867
2868 rbd_assert(obj_request->img_request);
2869 rbd_dev = obj_request->img_request->rbd_dev;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002870 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02002871 stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002872 if (!stat_request->osd_req)
2873 goto out;
2874 stat_request->callback = rbd_img_obj_exists_callback;
2875
Yan, Zheng144cba12015-04-27 11:09:54 +08002876 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002877 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2878 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05002879 rbd_osd_req_format_read(stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002880
2881 osdc = &rbd_dev->rbd_client->client->osdc;
2882 ret = rbd_obj_request_submit(osdc, stat_request);
2883out:
2884 if (ret)
2885 rbd_obj_request_put(obj_request);
2886
2887 return ret;
2888}
2889
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002890static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
Alex Elderb454e362013-04-19 15:34:50 -05002891{
2892 struct rbd_img_request *img_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002893 struct rbd_device *rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002894
2895 rbd_assert(obj_request_img_data_test(obj_request));
2896
2897 img_request = obj_request->img_request;
2898 rbd_assert(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002899 rbd_dev = img_request->rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002900
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002901 /* Reads */
Josh Durgin1c220882014-04-04 17:49:12 -07002902 if (!img_request_write_test(img_request) &&
2903 !img_request_discard_test(img_request))
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002904 return true;
Alex Elderb454e362013-04-19 15:34:50 -05002905
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002906 /* Non-layered writes */
2907 if (!img_request_layered_test(img_request))
2908 return true;
2909
2910 /*
2911 * Layered writes outside of the parent overlap range don't
2912 * share any data with the parent.
2913 */
2914 if (!obj_request_overlaps_parent(obj_request))
2915 return true;
2916
2917 /*
Guangliang Zhaoc622d222014-04-01 22:22:15 +08002918 * Entire-object layered writes - we will overwrite whatever
2919 * parent data there is anyway.
2920 */
2921 if (!obj_request->offset &&
2922 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2923 return true;
2924
2925 /*
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002926 * If the object is known to already exist, its parent data has
2927 * already been copied.
2928 */
2929 if (obj_request_known_test(obj_request) &&
2930 obj_request_exists_test(obj_request))
2931 return true;
2932
2933 return false;
2934}
2935
2936static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2937{
2938 if (img_obj_request_simple(obj_request)) {
Alex Elderb454e362013-04-19 15:34:50 -05002939 struct rbd_device *rbd_dev;
2940 struct ceph_osd_client *osdc;
2941
2942 rbd_dev = obj_request->img_request->rbd_dev;
2943 osdc = &rbd_dev->rbd_client->client->osdc;
2944
2945 return rbd_obj_request_submit(osdc, obj_request);
2946 }
2947
2948 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002949 * It's a layered write. The target object might exist but
2950 * we may not know that yet. If we know it doesn't exist,
2951 * start by reading the data for the full target object from
2952 * the parent so we can use it for a copyup to the target.
Alex Elderb454e362013-04-19 15:34:50 -05002953 */
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002954 if (obj_request_known_test(obj_request))
Alex Elder3d7efd12013-04-19 15:34:50 -05002955 return rbd_img_obj_parent_read_full(obj_request);
2956
2957 /* We don't know whether the target exists. Go find out. */
Alex Elderb454e362013-04-19 15:34:50 -05002958
2959 return rbd_img_obj_exists_submit(obj_request);
2960}
2961
Alex Elderbf0d5f502012-11-22 00:00:08 -06002962static int rbd_img_request_submit(struct rbd_img_request *img_request)
2963{
Alex Elderbf0d5f502012-11-22 00:00:08 -06002964 struct rbd_obj_request *obj_request;
Alex Elder46faeed2013-04-10 17:47:46 -05002965 struct rbd_obj_request *next_obj_request;
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002966 int ret = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002967
Alex Elder37206ee2013-02-20 17:32:08 -06002968 dout("%s: img %p\n", __func__, img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002969
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002970 rbd_img_request_get(img_request);
2971 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
Alex Elderb454e362013-04-19 15:34:50 -05002972 ret = rbd_img_obj_request_submit(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002973 if (ret)
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002974 goto out_put_ireq;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002975 }
2976
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002977out_put_ireq:
2978 rbd_img_request_put(img_request);
2979 return ret;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002980}
2981
Alex Elder8b3e1a52013-01-24 16:13:36 -06002982static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2983{
2984 struct rbd_obj_request *obj_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002985 struct rbd_device *rbd_dev;
2986 u64 obj_end;
Alex Elder02c74fb2013-05-06 17:40:33 -05002987 u64 img_xferred;
2988 int img_result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002989
2990 rbd_assert(img_request_child_test(img_request));
2991
Alex Elder02c74fb2013-05-06 17:40:33 -05002992 /* First get what we need from the image request and release it */
2993
Alex Elder8b3e1a52013-01-24 16:13:36 -06002994 obj_request = img_request->obj_request;
Alex Elder02c74fb2013-05-06 17:40:33 -05002995 img_xferred = img_request->xferred;
2996 img_result = img_request->result;
2997 rbd_img_request_put(img_request);
2998
2999 /*
3000 * If the overlap has become 0 (most likely because the
3001 * image has been flattened) we need to re-submit the
3002 * original request.
3003 */
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003004 rbd_assert(obj_request);
3005 rbd_assert(obj_request->img_request);
Alex Elder02c74fb2013-05-06 17:40:33 -05003006 rbd_dev = obj_request->img_request->rbd_dev;
3007 if (!rbd_dev->parent_overlap) {
3008 struct ceph_osd_client *osdc;
Alex Elder8b3e1a52013-01-24 16:13:36 -06003009
Alex Elder02c74fb2013-05-06 17:40:33 -05003010 osdc = &rbd_dev->rbd_client->client->osdc;
3011 img_result = rbd_obj_request_submit(osdc, obj_request);
3012 if (!img_result)
3013 return;
3014 }
3015
3016 obj_request->result = img_result;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003017 if (obj_request->result)
3018 goto out;
3019
3020 /*
3021 * We need to zero anything beyond the parent overlap
3022 * boundary. Since rbd_img_obj_request_read_callback()
3023 * will zero anything beyond the end of a short read, an
3024 * easy way to do this is to pretend the data from the
3025 * parent came up short--ending at the overlap boundary.
3026 */
3027 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3028 obj_end = obj_request->img_offset + obj_request->length;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003029 if (obj_end > rbd_dev->parent_overlap) {
3030 u64 xferred = 0;
3031
3032 if (obj_request->img_offset < rbd_dev->parent_overlap)
3033 xferred = rbd_dev->parent_overlap -
3034 obj_request->img_offset;
3035
Alex Elder02c74fb2013-05-06 17:40:33 -05003036 obj_request->xferred = min(img_xferred, xferred);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003037 } else {
Alex Elder02c74fb2013-05-06 17:40:33 -05003038 obj_request->xferred = img_xferred;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003039 }
3040out:
Alex Elder8b3e1a52013-01-24 16:13:36 -06003041 rbd_img_obj_request_read_callback(obj_request);
3042 rbd_obj_request_complete(obj_request);
3043}
3044
3045static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3046{
Alex Elder8b3e1a52013-01-24 16:13:36 -06003047 struct rbd_img_request *img_request;
3048 int result;
3049
3050 rbd_assert(obj_request_img_data_test(obj_request));
3051 rbd_assert(obj_request->img_request != NULL);
3052 rbd_assert(obj_request->result == (s32) -ENOENT);
Alex Elder5b2ab722013-05-06 17:40:33 -05003053 rbd_assert(obj_request_type_valid(obj_request->type));
Alex Elder8b3e1a52013-01-24 16:13:36 -06003054
Alex Elder8b3e1a52013-01-24 16:13:36 -06003055 /* rbd_read_finish(obj_request, obj_request->length); */
Alex Eldere93f3152013-05-08 22:50:04 -05003056 img_request = rbd_parent_request_create(obj_request,
Alex Elder8b3e1a52013-01-24 16:13:36 -06003057 obj_request->img_offset,
Alex Eldere93f3152013-05-08 22:50:04 -05003058 obj_request->length);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003059 result = -ENOMEM;
3060 if (!img_request)
3061 goto out_err;
3062
Alex Elder5b2ab722013-05-06 17:40:33 -05003063 if (obj_request->type == OBJ_REQUEST_BIO)
3064 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3065 obj_request->bio_list);
3066 else
3067 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3068 obj_request->pages);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003069 if (result)
3070 goto out_err;
3071
3072 img_request->callback = rbd_img_parent_read_callback;
3073 result = rbd_img_request_submit(img_request);
3074 if (result)
3075 goto out_err;
3076
3077 return;
3078out_err:
3079 if (img_request)
3080 rbd_img_request_put(img_request);
3081 obj_request->result = result;
3082 obj_request->xferred = 0;
3083 obj_request_done_set(obj_request);
3084}
3085
Ilya Dryomov922dab62016-05-26 01:15:02 +02003086static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev);
3087static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev);
3088
3089static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3090 u64 notifier_id, void *data, size_t data_len)
Alex Elderb8d70032012-11-30 17:53:04 -06003091{
Ilya Dryomov922dab62016-05-26 01:15:02 +02003092 struct rbd_device *rbd_dev = arg;
Alex Elder21692382013-04-05 01:27:12 -05003093 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elderb8d70032012-11-30 17:53:04 -06003094 int ret;
3095
Ilya Dryomov922dab62016-05-26 01:15:02 +02003096 dout("%s rbd_dev %p cookie %llu notify_id %llu\n", __func__, rbd_dev,
3097 cookie, notify_id);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003098
3099 /*
3100 * Until adequate refresh error handling is in place, there is
3101 * not much we can do here, except warn.
3102 *
3103 * See http://tracker.ceph.com/issues/5040
3104 */
Alex Eldere627db02013-05-06 07:40:30 -05003105 ret = rbd_dev_refresh(rbd_dev);
3106 if (ret)
Ilya Dryomov9584d502014-07-11 12:11:20 +04003107 rbd_warn(rbd_dev, "refresh failed: %d", ret);
Alex Elderb8d70032012-11-30 17:53:04 -06003108
Ilya Dryomov922dab62016-05-26 01:15:02 +02003109 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3110 &rbd_dev->header_oloc, notify_id, cookie,
3111 NULL, 0);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003112 if (ret)
Ilya Dryomov9584d502014-07-11 12:11:20 +04003113 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
Alex Elderb8d70032012-11-30 17:53:04 -06003114}
3115
Ilya Dryomov922dab62016-05-26 01:15:02 +02003116static void rbd_watch_errcb(void *arg, u64 cookie, int err)
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003117{
Ilya Dryomov922dab62016-05-26 01:15:02 +02003118 struct rbd_device *rbd_dev = arg;
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003119 int ret;
3120
Ilya Dryomov922dab62016-05-26 01:15:02 +02003121 rbd_warn(rbd_dev, "encountered watch error: %d", err);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003122
Ilya Dryomov922dab62016-05-26 01:15:02 +02003123 __rbd_dev_header_unwatch_sync(rbd_dev);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003124
Ilya Dryomov922dab62016-05-26 01:15:02 +02003125 ret = rbd_dev_header_watch_sync(rbd_dev);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003126 if (ret) {
Ilya Dryomov922dab62016-05-26 01:15:02 +02003127 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
3128 return;
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003129 }
3130
Ilya Dryomov922dab62016-05-26 01:15:02 +02003131 ret = rbd_dev_refresh(rbd_dev);
3132 if (ret)
3133 rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003134}
3135
3136/*
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003137 * Initiate a watch request, synchronously.
Alex Elder9969ebc2013-01-18 12:31:10 -06003138 */
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003139static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
Alex Elder9969ebc2013-01-18 12:31:10 -06003140{
3141 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomov922dab62016-05-26 01:15:02 +02003142 struct ceph_osd_linger_request *handle;
Alex Elder9969ebc2013-01-18 12:31:10 -06003143
Ilya Dryomov922dab62016-05-26 01:15:02 +02003144 rbd_assert(!rbd_dev->watch_handle);
Alex Elder9969ebc2013-01-18 12:31:10 -06003145
Ilya Dryomov922dab62016-05-26 01:15:02 +02003146 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3147 &rbd_dev->header_oloc, rbd_watch_cb,
3148 rbd_watch_errcb, rbd_dev);
3149 if (IS_ERR(handle))
3150 return PTR_ERR(handle);
Alex Elder9969ebc2013-01-18 12:31:10 -06003151
Ilya Dryomov922dab62016-05-26 01:15:02 +02003152 rbd_dev->watch_handle = handle;
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003153 return 0;
Alex Elder9969ebc2013-01-18 12:31:10 -06003154}
3155
Ilya Dryomovc525f032016-04-28 16:07:26 +02003156static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
Ilya Dryomovfca27062013-12-16 18:02:40 +02003157{
Ilya Dryomov922dab62016-05-26 01:15:02 +02003158 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3159 int ret;
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003160
Ilya Dryomov922dab62016-05-26 01:15:02 +02003161 if (!rbd_dev->watch_handle)
3162 return;
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003163
Ilya Dryomov922dab62016-05-26 01:15:02 +02003164 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3165 if (ret)
3166 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003167
Ilya Dryomov922dab62016-05-26 01:15:02 +02003168 rbd_dev->watch_handle = NULL;
Ilya Dryomovc525f032016-04-28 16:07:26 +02003169}
3170
3171/*
3172 * Tear down a watch request, synchronously.
3173 */
3174static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3175{
3176 __rbd_dev_header_unwatch_sync(rbd_dev);
Ilya Dryomov811c6682016-04-15 16:22:16 +02003177
3178 dout("%s flushing notifies\n", __func__);
3179 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
Ilya Dryomovfca27062013-12-16 18:02:40 +02003180}
3181
Alex Elder36be9a72013-01-19 00:30:28 -06003182/*
Alex Elderf40eb342013-04-25 15:09:42 -05003183 * Synchronous osd object method call. Returns the number of bytes
3184 * returned in the outbound buffer, or a negative error code.
Alex Elder36be9a72013-01-19 00:30:28 -06003185 */
3186static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3187 const char *object_name,
3188 const char *class_name,
3189 const char *method_name,
Alex Elder41579762013-04-21 12:14:45 -05003190 const void *outbound,
Alex Elder36be9a72013-01-19 00:30:28 -06003191 size_t outbound_size,
Alex Elder41579762013-04-21 12:14:45 -05003192 void *inbound,
Alex Eldere2a58ee2013-04-30 00:44:33 -05003193 size_t inbound_size)
Alex Elder36be9a72013-01-19 00:30:28 -06003194{
Alex Elder21692382013-04-05 01:27:12 -05003195 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder36be9a72013-01-19 00:30:28 -06003196 struct rbd_obj_request *obj_request;
Alex Elder36be9a72013-01-19 00:30:28 -06003197 struct page **pages;
3198 u32 page_count;
3199 int ret;
3200
3201 /*
Alex Elder6010a452013-04-05 01:27:11 -05003202 * Method calls are ultimately read operations. The result
3203 * should placed into the inbound buffer provided. They
3204 * also supply outbound data--parameters for the object
3205 * method. Currently if this is present it will be a
3206 * snapshot id.
Alex Elder36be9a72013-01-19 00:30:28 -06003207 */
Alex Elder57385b52013-04-21 12:14:45 -05003208 page_count = (u32)calc_pages_for(0, inbound_size);
Alex Elder36be9a72013-01-19 00:30:28 -06003209 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3210 if (IS_ERR(pages))
3211 return PTR_ERR(pages);
3212
3213 ret = -ENOMEM;
Alex Elder6010a452013-04-05 01:27:11 -05003214 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
Alex Elder36be9a72013-01-19 00:30:28 -06003215 OBJ_REQUEST_PAGES);
3216 if (!obj_request)
3217 goto out;
3218
3219 obj_request->pages = pages;
3220 obj_request->page_count = page_count;
3221
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003222 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003223 obj_request);
Alex Elder36be9a72013-01-19 00:30:28 -06003224 if (!obj_request->osd_req)
3225 goto out;
3226
Alex Elderc99d2d42013-04-05 01:27:11 -05003227 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
Alex Elder04017e22013-04-05 14:46:02 -05003228 class_name, method_name);
3229 if (outbound_size) {
3230 struct ceph_pagelist *pagelist;
3231
3232 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3233 if (!pagelist)
3234 goto out;
3235
3236 ceph_pagelist_init(pagelist);
3237 ceph_pagelist_append(pagelist, outbound, outbound_size);
3238 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3239 pagelist);
3240 }
Alex Eldera4ce40a2013-04-05 01:27:12 -05003241 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3242 obj_request->pages, inbound_size,
Alex Elder44cd1882013-04-05 01:27:12 -05003243 0, false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05003244 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003245
Alex Elder36be9a72013-01-19 00:30:28 -06003246 ret = rbd_obj_request_submit(osdc, obj_request);
3247 if (ret)
3248 goto out;
3249 ret = rbd_obj_request_wait(obj_request);
3250 if (ret)
3251 goto out;
3252
3253 ret = obj_request->result;
3254 if (ret < 0)
3255 goto out;
Alex Elder57385b52013-04-21 12:14:45 -05003256
3257 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3258 ret = (int)obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06003259 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
Alex Elder36be9a72013-01-19 00:30:28 -06003260out:
3261 if (obj_request)
3262 rbd_obj_request_put(obj_request);
3263 else
3264 ceph_release_page_vector(pages, page_count);
3265
3266 return ret;
3267}
3268
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003269static void rbd_queue_workfn(struct work_struct *work)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003270{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003271 struct request *rq = blk_mq_rq_from_pdu(work);
3272 struct rbd_device *rbd_dev = rq->q->queuedata;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003273 struct rbd_img_request *img_request;
Josh Durgin4e752f02014-04-08 11:12:11 -07003274 struct ceph_snap_context *snapc = NULL;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003275 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3276 u64 length = blk_rq_bytes(rq);
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003277 enum obj_operation_type op_type;
Josh Durgin4e752f02014-04-08 11:12:11 -07003278 u64 mapping_size;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003279 int result;
3280
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003281 if (rq->cmd_type != REQ_TYPE_FS) {
3282 dout("%s: non-fs request type %d\n", __func__,
3283 (int) rq->cmd_type);
3284 result = -EIO;
3285 goto err;
3286 }
3287
Mike Christiec2df40d2016-06-05 14:32:17 -05003288 if (req_op(rq) == REQ_OP_DISCARD)
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003289 op_type = OBJ_OP_DISCARD;
Mike Christiec2df40d2016-06-05 14:32:17 -05003290 else if (req_op(rq) == REQ_OP_WRITE)
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003291 op_type = OBJ_OP_WRITE;
3292 else
3293 op_type = OBJ_OP_READ;
3294
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003295 /* Ignore/skip any zero-length requests */
3296
3297 if (!length) {
3298 dout("%s: zero-length request\n", __func__);
3299 result = 0;
3300 goto err_rq;
3301 }
3302
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003303 /* Only reads are allowed to a read-only device */
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003304
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003305 if (op_type != OBJ_OP_READ) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003306 if (rbd_dev->mapping.read_only) {
3307 result = -EROFS;
3308 goto err_rq;
3309 }
3310 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3311 }
3312
3313 /*
3314 * Quit early if the mapped snapshot no longer exists. It's
3315 * still possible the snapshot will have disappeared by the
3316 * time our request arrives at the osd, but there's no sense in
3317 * sending it if we already know.
3318 */
3319 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3320 dout("request for non-existent snapshot");
3321 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3322 result = -ENXIO;
3323 goto err_rq;
3324 }
3325
3326 if (offset && length > U64_MAX - offset + 1) {
3327 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3328 length);
3329 result = -EINVAL;
3330 goto err_rq; /* Shouldn't happen */
3331 }
3332
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003333 blk_mq_start_request(rq);
3334
Josh Durgin4e752f02014-04-08 11:12:11 -07003335 down_read(&rbd_dev->header_rwsem);
3336 mapping_size = rbd_dev->mapping.size;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003337 if (op_type != OBJ_OP_READ) {
Josh Durgin4e752f02014-04-08 11:12:11 -07003338 snapc = rbd_dev->header.snapc;
3339 ceph_get_snap_context(snapc);
3340 }
3341 up_read(&rbd_dev->header_rwsem);
3342
3343 if (offset + length > mapping_size) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003344 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
Josh Durgin4e752f02014-04-08 11:12:11 -07003345 length, mapping_size);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003346 result = -EIO;
3347 goto err_rq;
3348 }
3349
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003350 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07003351 snapc);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003352 if (!img_request) {
3353 result = -ENOMEM;
3354 goto err_rq;
3355 }
3356 img_request->rq = rq;
Ilya Dryomov70b16db2015-11-27 19:23:24 +01003357 snapc = NULL; /* img_request consumes a ref */
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003358
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003359 if (op_type == OBJ_OP_DISCARD)
3360 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3361 NULL);
3362 else
3363 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3364 rq->bio);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003365 if (result)
3366 goto err_img_request;
3367
3368 result = rbd_img_request_submit(img_request);
3369 if (result)
3370 goto err_img_request;
3371
3372 return;
3373
3374err_img_request:
3375 rbd_img_request_put(img_request);
3376err_rq:
3377 if (result)
3378 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003379 obj_op_name(op_type), length, offset, result);
SF Markus Elfringe96a6502014-11-02 15:20:59 +01003380 ceph_put_snap_context(snapc);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003381err:
3382 blk_mq_end_request(rq, result);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003383}
3384
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003385static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3386 const struct blk_mq_queue_data *bd)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003387{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003388 struct request *rq = bd->rq;
3389 struct work_struct *work = blk_mq_rq_to_pdu(rq);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003390
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003391 queue_work(rbd_wq, work);
3392 return BLK_MQ_RQ_QUEUE_OK;
Alex Elderbf0d5f502012-11-22 00:00:08 -06003393}
3394
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003395static void rbd_free_disk(struct rbd_device *rbd_dev)
3396{
3397 struct gendisk *disk = rbd_dev->disk;
3398
3399 if (!disk)
3400 return;
3401
Alex Eldera0cab922013-04-25 23:15:08 -05003402 rbd_dev->disk = NULL;
3403 if (disk->flags & GENHD_FL_UP) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003404 del_gendisk(disk);
Alex Eldera0cab922013-04-25 23:15:08 -05003405 if (disk->queue)
3406 blk_cleanup_queue(disk->queue);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003407 blk_mq_free_tag_set(&rbd_dev->tag_set);
Alex Eldera0cab922013-04-25 23:15:08 -05003408 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003409 put_disk(disk);
3410}
3411
Alex Elder788e2df2013-01-17 12:25:27 -06003412static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3413 const char *object_name,
Alex Elder7097f8d2013-04-30 00:44:33 -05003414 u64 offset, u64 length, void *buf)
Alex Elder788e2df2013-01-17 12:25:27 -06003415
3416{
Alex Elder21692382013-04-05 01:27:12 -05003417 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder788e2df2013-01-17 12:25:27 -06003418 struct rbd_obj_request *obj_request;
Alex Elder788e2df2013-01-17 12:25:27 -06003419 struct page **pages = NULL;
3420 u32 page_count;
Alex Elder1ceae7e2013-02-06 13:11:38 -06003421 size_t size;
Alex Elder788e2df2013-01-17 12:25:27 -06003422 int ret;
3423
3424 page_count = (u32) calc_pages_for(offset, length);
3425 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3426 if (IS_ERR(pages))
Jan Karaa8d42052014-10-22 09:17:24 +02003427 return PTR_ERR(pages);
Alex Elder788e2df2013-01-17 12:25:27 -06003428
3429 ret = -ENOMEM;
3430 obj_request = rbd_obj_request_create(object_name, offset, length,
Alex Elder36be9a72013-01-19 00:30:28 -06003431 OBJ_REQUEST_PAGES);
Alex Elder788e2df2013-01-17 12:25:27 -06003432 if (!obj_request)
3433 goto out;
3434
3435 obj_request->pages = pages;
3436 obj_request->page_count = page_count;
3437
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003438 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003439 obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06003440 if (!obj_request->osd_req)
3441 goto out;
3442
Alex Elderc99d2d42013-04-05 01:27:11 -05003443 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3444 offset, length, 0, 0);
Alex Elder406e2c92013-04-15 14:50:36 -05003445 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
Alex Eldera4ce40a2013-04-05 01:27:12 -05003446 obj_request->pages,
Alex Elder44cd1882013-04-05 01:27:12 -05003447 obj_request->length,
3448 obj_request->offset & ~PAGE_MASK,
3449 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05003450 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003451
Alex Elder788e2df2013-01-17 12:25:27 -06003452 ret = rbd_obj_request_submit(osdc, obj_request);
3453 if (ret)
3454 goto out;
3455 ret = rbd_obj_request_wait(obj_request);
3456 if (ret)
3457 goto out;
3458
3459 ret = obj_request->result;
3460 if (ret < 0)
3461 goto out;
Alex Elder1ceae7e2013-02-06 13:11:38 -06003462
3463 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3464 size = (size_t) obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06003465 ceph_copy_from_page_vector(pages, buf, 0, size);
Alex Elder7097f8d2013-04-30 00:44:33 -05003466 rbd_assert(size <= (size_t)INT_MAX);
3467 ret = (int)size;
Alex Elder788e2df2013-01-17 12:25:27 -06003468out:
3469 if (obj_request)
3470 rbd_obj_request_put(obj_request);
3471 else
3472 ceph_release_page_vector(pages, page_count);
3473
3474 return ret;
3475}
3476
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003477/*
Alex Elder662518b2013-05-06 09:51:29 -05003478 * Read the complete header for the given rbd device. On successful
3479 * return, the rbd_dev->header field will contain up-to-date
3480 * information about the image.
Alex Elder4156d992012-08-02 11:29:46 -05003481 */
Alex Elder99a41eb2013-05-06 09:51:30 -05003482static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
Alex Elder4156d992012-08-02 11:29:46 -05003483{
3484 struct rbd_image_header_ondisk *ondisk = NULL;
3485 u32 snap_count = 0;
3486 u64 names_size = 0;
3487 u32 want_count;
3488 int ret;
3489
3490 /*
3491 * The complete header will include an array of its 64-bit
3492 * snapshot ids, followed by the names of those snapshots as
3493 * a contiguous block of NUL-terminated strings. Note that
3494 * the number of snapshots could change by the time we read
3495 * it in, in which case we re-read it.
3496 */
3497 do {
3498 size_t size;
3499
3500 kfree(ondisk);
3501
3502 size = sizeof (*ondisk);
3503 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3504 size += names_size;
3505 ondisk = kmalloc(size, GFP_KERNEL);
3506 if (!ondisk)
Alex Elder662518b2013-05-06 09:51:29 -05003507 return -ENOMEM;
Alex Elder4156d992012-08-02 11:29:46 -05003508
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02003509 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elder7097f8d2013-04-30 00:44:33 -05003510 0, size, ondisk);
Alex Elder4156d992012-08-02 11:29:46 -05003511 if (ret < 0)
Alex Elder662518b2013-05-06 09:51:29 -05003512 goto out;
Alex Elderc0cd10db2013-04-26 09:43:47 -05003513 if ((size_t)ret < size) {
Alex Elder4156d992012-08-02 11:29:46 -05003514 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003515 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3516 size, ret);
Alex Elder662518b2013-05-06 09:51:29 -05003517 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05003518 }
3519 if (!rbd_dev_ondisk_valid(ondisk)) {
3520 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003521 rbd_warn(rbd_dev, "invalid header");
Alex Elder662518b2013-05-06 09:51:29 -05003522 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05003523 }
3524
3525 names_size = le64_to_cpu(ondisk->snap_names_len);
3526 want_count = snap_count;
3527 snap_count = le32_to_cpu(ondisk->snap_count);
3528 } while (snap_count != want_count);
3529
Alex Elder662518b2013-05-06 09:51:29 -05003530 ret = rbd_header_from_disk(rbd_dev, ondisk);
3531out:
Alex Elder4156d992012-08-02 11:29:46 -05003532 kfree(ondisk);
3533
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003534 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003535}
3536
Alex Elder15228ed2013-05-01 12:43:03 -05003537/*
3538 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3539 * has disappeared from the (just updated) snapshot context.
3540 */
3541static void rbd_exists_validate(struct rbd_device *rbd_dev)
3542{
3543 u64 snap_id;
3544
3545 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3546 return;
3547
3548 snap_id = rbd_dev->spec->snap_id;
3549 if (snap_id == CEPH_NOSNAP)
3550 return;
3551
3552 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3553 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3554}
3555
Josh Durgin98752012013-08-29 17:26:31 -07003556static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3557{
3558 sector_t size;
Josh Durgin98752012013-08-29 17:26:31 -07003559
3560 /*
Ilya Dryomov811c6682016-04-15 16:22:16 +02003561 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3562 * try to update its size. If REMOVING is set, updating size
3563 * is just useless work since the device can't be opened.
Josh Durgin98752012013-08-29 17:26:31 -07003564 */
Ilya Dryomov811c6682016-04-15 16:22:16 +02003565 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3566 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
Josh Durgin98752012013-08-29 17:26:31 -07003567 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3568 dout("setting size to %llu sectors", (unsigned long long)size);
3569 set_capacity(rbd_dev->disk, size);
3570 revalidate_disk(rbd_dev->disk);
3571 }
3572}
3573
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003574static int rbd_dev_refresh(struct rbd_device *rbd_dev)
Alex Elder1fe5e992012-07-25 09:32:41 -05003575{
Alex Eldere627db02013-05-06 07:40:30 -05003576 u64 mapping_size;
Alex Elder1fe5e992012-07-25 09:32:41 -05003577 int ret;
3578
Alex Eldercfbf6372013-05-31 17:40:45 -05003579 down_write(&rbd_dev->header_rwsem);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05003580 mapping_size = rbd_dev->mapping.size;
Ilya Dryomova720ae02014-07-23 17:11:19 +04003581
3582 ret = rbd_dev_header_info(rbd_dev);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003583 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003584 goto out;
Alex Elder15228ed2013-05-01 12:43:03 -05003585
Ilya Dryomove8f59b52014-07-24 10:42:13 +04003586 /*
3587 * If there is a parent, see if it has disappeared due to the
3588 * mapped image getting flattened.
3589 */
3590 if (rbd_dev->parent) {
3591 ret = rbd_dev_v2_parent_info(rbd_dev);
3592 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003593 goto out;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04003594 }
3595
Ilya Dryomov5ff11082014-07-23 17:11:21 +04003596 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003597 rbd_dev->mapping.size = rbd_dev->header.image_size;
Ilya Dryomov5ff11082014-07-23 17:11:21 +04003598 } else {
3599 /* validate mapped snapshot's EXISTS flag */
3600 rbd_exists_validate(rbd_dev);
3601 }
Alex Elder15228ed2013-05-01 12:43:03 -05003602
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003603out:
Alex Eldercfbf6372013-05-31 17:40:45 -05003604 up_write(&rbd_dev->header_rwsem);
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003605 if (!ret && mapping_size != rbd_dev->mapping.size)
Josh Durgin98752012013-08-29 17:26:31 -07003606 rbd_dev_update_size(rbd_dev);
Alex Elder1fe5e992012-07-25 09:32:41 -05003607
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003608 return ret;
Alex Elder1fe5e992012-07-25 09:32:41 -05003609}
3610
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003611static int rbd_init_request(void *data, struct request *rq,
3612 unsigned int hctx_idx, unsigned int request_idx,
3613 unsigned int numa_node)
3614{
3615 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3616
3617 INIT_WORK(work, rbd_queue_workfn);
3618 return 0;
3619}
3620
3621static struct blk_mq_ops rbd_mq_ops = {
3622 .queue_rq = rbd_queue_rq,
3623 .map_queue = blk_mq_map_queue,
3624 .init_request = rbd_init_request,
3625};
3626
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003627static int rbd_init_disk(struct rbd_device *rbd_dev)
3628{
3629 struct gendisk *disk;
3630 struct request_queue *q;
Alex Elder593a9e72012-02-07 12:03:37 -06003631 u64 segment_size;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003632 int err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003633
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003634 /* create gendisk info */
Ilya Dryomov7e513d42013-12-16 19:26:32 +02003635 disk = alloc_disk(single_major ?
3636 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3637 RBD_MINORS_PER_MAJOR);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003638 if (!disk)
Alex Elder1fcdb8a2012-08-29 17:11:06 -05003639 return -ENOMEM;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003640
Alex Elderf0f8cef2012-01-29 13:57:44 -06003641 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
Alex Elderde71a292012-07-03 16:01:19 -05003642 rbd_dev->dev_id);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003643 disk->major = rbd_dev->major;
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003644 disk->first_minor = rbd_dev->minor;
Ilya Dryomov7e513d42013-12-16 19:26:32 +02003645 if (single_major)
3646 disk->flags |= GENHD_FL_EXT_DEVT;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003647 disk->fops = &rbd_bd_ops;
3648 disk->private_data = rbd_dev;
3649
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003650 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3651 rbd_dev->tag_set.ops = &rbd_mq_ops;
Ilya Dryomovb5584182015-06-23 16:21:19 +03003652 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003653 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
Ilya Dryomovb5584182015-06-23 16:21:19 +03003654 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003655 rbd_dev->tag_set.nr_hw_queues = 1;
3656 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3657
3658 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3659 if (err)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003660 goto out_disk;
Josh Durgin029bcbd2011-07-22 11:35:23 -07003661
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003662 q = blk_mq_init_queue(&rbd_dev->tag_set);
3663 if (IS_ERR(q)) {
3664 err = PTR_ERR(q);
3665 goto out_tag_set;
3666 }
3667
Ilya Dryomovd8a2c892015-03-24 16:15:17 +03003668 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
3669 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
Alex Elder593a9e72012-02-07 12:03:37 -06003670
Josh Durgin029bcbd2011-07-22 11:35:23 -07003671 /* set io sizes to object size */
Alex Elder593a9e72012-02-07 12:03:37 -06003672 segment_size = rbd_obj_bytes(&rbd_dev->header);
3673 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
Ilya Dryomov0d9fde42015-10-07 16:09:35 +02003674 q->limits.max_sectors = queue_max_hw_sectors(q);
Ilya Dryomovd3834fe2015-06-12 19:19:02 +03003675 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
Alex Elder593a9e72012-02-07 12:03:37 -06003676 blk_queue_max_segment_size(q, segment_size);
3677 blk_queue_io_min(q, segment_size);
3678 blk_queue_io_opt(q, segment_size);
Josh Durgin029bcbd2011-07-22 11:35:23 -07003679
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003680 /* enable the discard support */
3681 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3682 q->limits.discard_granularity = segment_size;
3683 q->limits.discard_alignment = segment_size;
Jens Axboe2bb4cd52015-07-14 08:15:12 -06003684 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
Josh Durginb76f8232014-04-07 16:52:03 -07003685 q->limits.discard_zeroes_data = 1;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003686
Ronny Hegewaldbae818e2015-10-15 18:50:46 +00003687 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
3688 q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
3689
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003690 disk->queue = q;
3691
3692 q->queuedata = rbd_dev;
3693
3694 rbd_dev->disk = disk;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003695
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003696 return 0;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003697out_tag_set:
3698 blk_mq_free_tag_set(&rbd_dev->tag_set);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003699out_disk:
3700 put_disk(disk);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003701 return err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003702}
3703
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003704/*
3705 sysfs
3706*/
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003707
Alex Elder593a9e72012-02-07 12:03:37 -06003708static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3709{
3710 return container_of(dev, struct rbd_device, dev);
3711}
3712
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003713static ssize_t rbd_size_show(struct device *dev,
3714 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003715{
Alex Elder593a9e72012-02-07 12:03:37 -06003716 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003717
Alex Elderfc71d832013-04-26 15:44:36 -05003718 return sprintf(buf, "%llu\n",
3719 (unsigned long long)rbd_dev->mapping.size);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003720}
3721
Alex Elder34b13182012-07-13 20:35:12 -05003722/*
3723 * Note this shows the features for whatever's mapped, which is not
3724 * necessarily the base image.
3725 */
3726static ssize_t rbd_features_show(struct device *dev,
3727 struct device_attribute *attr, char *buf)
3728{
3729 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3730
3731 return sprintf(buf, "0x%016llx\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003732 (unsigned long long)rbd_dev->mapping.features);
Alex Elder34b13182012-07-13 20:35:12 -05003733}
3734
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003735static ssize_t rbd_major_show(struct device *dev,
3736 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003737{
Alex Elder593a9e72012-02-07 12:03:37 -06003738 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003739
Alex Elderfc71d832013-04-26 15:44:36 -05003740 if (rbd_dev->major)
3741 return sprintf(buf, "%d\n", rbd_dev->major);
3742
3743 return sprintf(buf, "(none)\n");
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003744}
Alex Elderfc71d832013-04-26 15:44:36 -05003745
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003746static ssize_t rbd_minor_show(struct device *dev,
3747 struct device_attribute *attr, char *buf)
3748{
3749 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3750
3751 return sprintf(buf, "%d\n", rbd_dev->minor);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003752}
3753
3754static ssize_t rbd_client_id_show(struct device *dev,
3755 struct device_attribute *attr, char *buf)
3756{
Alex Elder593a9e72012-02-07 12:03:37 -06003757 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003758
Alex Elder1dbb4392012-01-24 10:08:37 -06003759 return sprintf(buf, "client%lld\n",
Ilya Dryomov033268a2016-08-12 14:59:58 +02003760 ceph_client_gid(rbd_dev->rbd_client->client));
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003761}
3762
3763static ssize_t rbd_pool_show(struct device *dev,
3764 struct device_attribute *attr, char *buf)
3765{
Alex Elder593a9e72012-02-07 12:03:37 -06003766 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003767
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003768 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003769}
3770
Alex Elder9bb2f332012-07-12 10:46:35 -05003771static ssize_t rbd_pool_id_show(struct device *dev,
3772 struct device_attribute *attr, char *buf)
3773{
3774 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3775
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003776 return sprintf(buf, "%llu\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003777 (unsigned long long) rbd_dev->spec->pool_id);
Alex Elder9bb2f332012-07-12 10:46:35 -05003778}
3779
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003780static ssize_t rbd_name_show(struct device *dev,
3781 struct device_attribute *attr, char *buf)
3782{
Alex Elder593a9e72012-02-07 12:03:37 -06003783 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003784
Alex Eldera92ffdf2012-10-30 19:40:33 -05003785 if (rbd_dev->spec->image_name)
3786 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3787
3788 return sprintf(buf, "(unknown)\n");
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003789}
3790
Alex Elder589d30e2012-07-10 20:30:11 -05003791static ssize_t rbd_image_id_show(struct device *dev,
3792 struct device_attribute *attr, char *buf)
3793{
3794 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3795
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003796 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05003797}
3798
Alex Elder34b13182012-07-13 20:35:12 -05003799/*
3800 * Shows the name of the currently-mapped snapshot (or
3801 * RBD_SNAP_HEAD_NAME for the base image).
3802 */
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003803static ssize_t rbd_snap_show(struct device *dev,
3804 struct device_attribute *attr,
3805 char *buf)
3806{
Alex Elder593a9e72012-02-07 12:03:37 -06003807 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003808
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003809 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003810}
3811
Alex Elder86b00e02012-10-25 23:34:42 -05003812/*
Ilya Dryomovff961282014-07-22 21:53:07 +04003813 * For a v2 image, shows the chain of parent images, separated by empty
3814 * lines. For v1 images or if there is no parent, shows "(no parent
3815 * image)".
Alex Elder86b00e02012-10-25 23:34:42 -05003816 */
3817static ssize_t rbd_parent_show(struct device *dev,
Ilya Dryomovff961282014-07-22 21:53:07 +04003818 struct device_attribute *attr,
3819 char *buf)
Alex Elder86b00e02012-10-25 23:34:42 -05003820{
3821 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Ilya Dryomovff961282014-07-22 21:53:07 +04003822 ssize_t count = 0;
Alex Elder86b00e02012-10-25 23:34:42 -05003823
Ilya Dryomovff961282014-07-22 21:53:07 +04003824 if (!rbd_dev->parent)
Alex Elder86b00e02012-10-25 23:34:42 -05003825 return sprintf(buf, "(no parent image)\n");
3826
Ilya Dryomovff961282014-07-22 21:53:07 +04003827 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3828 struct rbd_spec *spec = rbd_dev->parent_spec;
Alex Elder86b00e02012-10-25 23:34:42 -05003829
Ilya Dryomovff961282014-07-22 21:53:07 +04003830 count += sprintf(&buf[count], "%s"
3831 "pool_id %llu\npool_name %s\n"
3832 "image_id %s\nimage_name %s\n"
3833 "snap_id %llu\nsnap_name %s\n"
3834 "overlap %llu\n",
3835 !count ? "" : "\n", /* first? */
3836 spec->pool_id, spec->pool_name,
3837 spec->image_id, spec->image_name ?: "(unknown)",
3838 spec->snap_id, spec->snap_name,
3839 rbd_dev->parent_overlap);
3840 }
Alex Elder86b00e02012-10-25 23:34:42 -05003841
Ilya Dryomovff961282014-07-22 21:53:07 +04003842 return count;
Alex Elder86b00e02012-10-25 23:34:42 -05003843}
3844
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003845static ssize_t rbd_image_refresh(struct device *dev,
3846 struct device_attribute *attr,
3847 const char *buf,
3848 size_t size)
3849{
Alex Elder593a9e72012-02-07 12:03:37 -06003850 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Alex Elderb8136232012-07-25 09:32:41 -05003851 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003852
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003853 ret = rbd_dev_refresh(rbd_dev);
Alex Eldere627db02013-05-06 07:40:30 -05003854 if (ret)
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003855 return ret;
Alex Elderb8136232012-07-25 09:32:41 -05003856
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003857 return size;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003858}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003859
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003860static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
Alex Elder34b13182012-07-13 20:35:12 -05003861static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003862static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003863static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003864static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3865static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
Alex Elder9bb2f332012-07-12 10:46:35 -05003866static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003867static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
Alex Elder589d30e2012-07-10 20:30:11 -05003868static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003869static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3870static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
Alex Elder86b00e02012-10-25 23:34:42 -05003871static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003872
3873static struct attribute *rbd_attrs[] = {
3874 &dev_attr_size.attr,
Alex Elder34b13182012-07-13 20:35:12 -05003875 &dev_attr_features.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003876 &dev_attr_major.attr,
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003877 &dev_attr_minor.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003878 &dev_attr_client_id.attr,
3879 &dev_attr_pool.attr,
Alex Elder9bb2f332012-07-12 10:46:35 -05003880 &dev_attr_pool_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003881 &dev_attr_name.attr,
Alex Elder589d30e2012-07-10 20:30:11 -05003882 &dev_attr_image_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003883 &dev_attr_current_snap.attr,
Alex Elder86b00e02012-10-25 23:34:42 -05003884 &dev_attr_parent.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003885 &dev_attr_refresh.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003886 NULL
3887};
3888
3889static struct attribute_group rbd_attr_group = {
3890 .attrs = rbd_attrs,
3891};
3892
3893static const struct attribute_group *rbd_attr_groups[] = {
3894 &rbd_attr_group,
3895 NULL
3896};
3897
Ilya Dryomov6cac4692015-10-16 20:11:25 +02003898static void rbd_dev_release(struct device *dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003899
3900static struct device_type rbd_device_type = {
3901 .name = "rbd",
3902 .groups = rbd_attr_groups,
Ilya Dryomov6cac4692015-10-16 20:11:25 +02003903 .release = rbd_dev_release,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003904};
3905
Alex Elder8b8fb992012-10-26 17:25:24 -05003906static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3907{
3908 kref_get(&spec->kref);
3909
3910 return spec;
3911}
3912
3913static void rbd_spec_free(struct kref *kref);
3914static void rbd_spec_put(struct rbd_spec *spec)
3915{
3916 if (spec)
3917 kref_put(&spec->kref, rbd_spec_free);
3918}
3919
3920static struct rbd_spec *rbd_spec_alloc(void)
3921{
3922 struct rbd_spec *spec;
3923
3924 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3925 if (!spec)
3926 return NULL;
Ilya Dryomov04077592014-07-23 17:11:20 +04003927
3928 spec->pool_id = CEPH_NOPOOL;
3929 spec->snap_id = CEPH_NOSNAP;
Alex Elder8b8fb992012-10-26 17:25:24 -05003930 kref_init(&spec->kref);
3931
Alex Elder8b8fb992012-10-26 17:25:24 -05003932 return spec;
3933}
3934
3935static void rbd_spec_free(struct kref *kref)
3936{
3937 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3938
3939 kfree(spec->pool_name);
3940 kfree(spec->image_id);
3941 kfree(spec->image_name);
3942 kfree(spec->snap_name);
3943 kfree(spec);
3944}
3945
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02003946static void rbd_dev_free(struct rbd_device *rbd_dev)
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02003947{
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02003948 ceph_oid_destroy(&rbd_dev->header_oid);
Ilya Dryomov6b6dddb2016-08-05 16:15:38 +02003949 ceph_oloc_destroy(&rbd_dev->header_oloc);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02003950
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02003951 rbd_put_client(rbd_dev->rbd_client);
3952 rbd_spec_put(rbd_dev->spec);
3953 kfree(rbd_dev->opts);
3954 kfree(rbd_dev);
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02003955}
3956
3957static void rbd_dev_release(struct device *dev)
3958{
3959 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3960 bool need_put = !!rbd_dev->opts;
3961
3962 if (need_put) {
3963 destroy_workqueue(rbd_dev->task_wq);
3964 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
3965 }
3966
3967 rbd_dev_free(rbd_dev);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02003968
3969 /*
3970 * This is racy, but way better than putting module outside of
3971 * the release callback. The race window is pretty small, so
3972 * doing something similar to dm (dm-builtin.c) is overkill.
3973 */
3974 if (need_put)
3975 module_put(THIS_MODULE);
3976}
3977
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02003978static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
3979 struct rbd_spec *spec)
Alex Elderc53d5892012-10-25 23:34:42 -05003980{
3981 struct rbd_device *rbd_dev;
3982
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02003983 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
Alex Elderc53d5892012-10-25 23:34:42 -05003984 if (!rbd_dev)
3985 return NULL;
3986
3987 spin_lock_init(&rbd_dev->lock);
3988 INIT_LIST_HEAD(&rbd_dev->node);
Alex Elderc53d5892012-10-25 23:34:42 -05003989 init_rwsem(&rbd_dev->header_rwsem);
3990
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02003991 ceph_oid_init(&rbd_dev->header_oid);
Ilya Dryomov922dab62016-05-26 01:15:02 +02003992 ceph_oloc_init(&rbd_dev->header_oloc);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02003993
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02003994 rbd_dev->dev.bus = &rbd_bus_type;
3995 rbd_dev->dev.type = &rbd_device_type;
3996 rbd_dev->dev.parent = &rbd_root_dev;
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02003997 device_initialize(&rbd_dev->dev);
3998
Alex Elderc53d5892012-10-25 23:34:42 -05003999 rbd_dev->rbd_client = rbdc;
Ilya Dryomovd1475432015-06-22 13:24:48 +03004000 rbd_dev->spec = spec;
Alex Elder0903e872012-11-14 12:25:19 -06004001
Yan, Zheng76271512016-02-03 21:24:49 +08004002 rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER;
4003 rbd_dev->layout.stripe_count = 1;
4004 rbd_dev->layout.object_size = 1 << RBD_MAX_OBJ_ORDER;
4005 rbd_dev->layout.pool_id = spec->pool_id;
Yan, Zheng30c156d2016-02-14 11:24:31 +08004006 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
Alex Elder0903e872012-11-14 12:25:19 -06004007
Alex Elderc53d5892012-10-25 23:34:42 -05004008 return rbd_dev;
4009}
4010
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02004011/*
4012 * Create a mapping rbd_dev.
4013 */
4014static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4015 struct rbd_spec *spec,
4016 struct rbd_options *opts)
4017{
4018 struct rbd_device *rbd_dev;
4019
4020 rbd_dev = __rbd_dev_create(rbdc, spec);
4021 if (!rbd_dev)
4022 return NULL;
4023
4024 rbd_dev->opts = opts;
4025
4026 /* get an id and fill in device name */
4027 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
4028 minor_to_rbd_dev_id(1 << MINORBITS),
4029 GFP_KERNEL);
4030 if (rbd_dev->dev_id < 0)
4031 goto fail_rbd_dev;
4032
4033 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
4034 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
4035 rbd_dev->name);
4036 if (!rbd_dev->task_wq)
4037 goto fail_dev_id;
4038
4039 /* we have a ref from do_rbd_add() */
4040 __module_get(THIS_MODULE);
4041
4042 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
4043 return rbd_dev;
4044
4045fail_dev_id:
4046 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4047fail_rbd_dev:
4048 rbd_dev_free(rbd_dev);
4049 return NULL;
4050}
4051
Alex Elderc53d5892012-10-25 23:34:42 -05004052static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4053{
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004054 if (rbd_dev)
4055 put_device(&rbd_dev->dev);
Alex Elderc53d5892012-10-25 23:34:42 -05004056}
4057
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004058/*
Alex Elder9d475de2012-07-03 16:01:19 -05004059 * Get the size and object order for an image snapshot, or if
4060 * snap_id is CEPH_NOSNAP, gets this information for the base
4061 * image.
4062 */
4063static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4064 u8 *order, u64 *snap_size)
4065{
4066 __le64 snapid = cpu_to_le64(snap_id);
4067 int ret;
4068 struct {
4069 u8 order;
4070 __le64 size;
4071 } __attribute__ ((packed)) size_buf = { 0 };
4072
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004073 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elder9d475de2012-07-03 16:01:19 -05004074 "rbd", "get_size",
Alex Elder41579762013-04-21 12:14:45 -05004075 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004076 &size_buf, sizeof (size_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004077 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder9d475de2012-07-03 16:01:19 -05004078 if (ret < 0)
4079 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004080 if (ret < sizeof (size_buf))
4081 return -ERANGE;
Alex Elder9d475de2012-07-03 16:01:19 -05004082
Josh Durginc3545572013-08-28 17:08:10 -07004083 if (order) {
Alex Elderc86f86e2013-04-25 15:09:41 -05004084 *order = size_buf.order;
Josh Durginc3545572013-08-28 17:08:10 -07004085 dout(" order %u", (unsigned int)*order);
4086 }
Alex Elder9d475de2012-07-03 16:01:19 -05004087 *snap_size = le64_to_cpu(size_buf.size);
4088
Josh Durginc3545572013-08-28 17:08:10 -07004089 dout(" snap_id 0x%016llx snap_size = %llu\n",
4090 (unsigned long long)snap_id,
Alex Elder57385b52013-04-21 12:14:45 -05004091 (unsigned long long)*snap_size);
Alex Elder9d475de2012-07-03 16:01:19 -05004092
4093 return 0;
4094}
4095
4096static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4097{
4098 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4099 &rbd_dev->header.obj_order,
4100 &rbd_dev->header.image_size);
4101}
4102
Alex Elder1e130192012-07-03 16:01:19 -05004103static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4104{
4105 void *reply_buf;
4106 int ret;
4107 void *p;
4108
4109 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4110 if (!reply_buf)
4111 return -ENOMEM;
4112
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004113 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elder41579762013-04-21 12:14:45 -05004114 "rbd", "get_object_prefix", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004115 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06004116 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder1e130192012-07-03 16:01:19 -05004117 if (ret < 0)
4118 goto out;
4119
4120 p = reply_buf;
4121 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
Alex Elder57385b52013-04-21 12:14:45 -05004122 p + ret, NULL, GFP_NOIO);
4123 ret = 0;
Alex Elder1e130192012-07-03 16:01:19 -05004124
4125 if (IS_ERR(rbd_dev->header.object_prefix)) {
4126 ret = PTR_ERR(rbd_dev->header.object_prefix);
4127 rbd_dev->header.object_prefix = NULL;
4128 } else {
4129 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4130 }
Alex Elder1e130192012-07-03 16:01:19 -05004131out:
4132 kfree(reply_buf);
4133
4134 return ret;
4135}
4136
Alex Elderb1b54022012-07-03 16:01:19 -05004137static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4138 u64 *snap_features)
4139{
4140 __le64 snapid = cpu_to_le64(snap_id);
4141 struct {
4142 __le64 features;
4143 __le64 incompat;
Alex Elder41579762013-04-21 12:14:45 -05004144 } __attribute__ ((packed)) features_buf = { 0 };
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004145 u64 unsup;
Alex Elderb1b54022012-07-03 16:01:19 -05004146 int ret;
4147
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004148 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elderb1b54022012-07-03 16:01:19 -05004149 "rbd", "get_features",
Alex Elder41579762013-04-21 12:14:45 -05004150 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004151 &features_buf, sizeof (features_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004152 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderb1b54022012-07-03 16:01:19 -05004153 if (ret < 0)
4154 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004155 if (ret < sizeof (features_buf))
4156 return -ERANGE;
Alex Elderd8891402012-10-09 13:50:17 -07004157
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004158 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4159 if (unsup) {
4160 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4161 unsup);
Alex Elderb8f5c6e2012-11-01 08:39:26 -05004162 return -ENXIO;
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004163 }
Alex Elderd8891402012-10-09 13:50:17 -07004164
Alex Elderb1b54022012-07-03 16:01:19 -05004165 *snap_features = le64_to_cpu(features_buf.features);
4166
4167 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
Alex Elder57385b52013-04-21 12:14:45 -05004168 (unsigned long long)snap_id,
4169 (unsigned long long)*snap_features,
4170 (unsigned long long)le64_to_cpu(features_buf.incompat));
Alex Elderb1b54022012-07-03 16:01:19 -05004171
4172 return 0;
4173}
4174
4175static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4176{
4177 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4178 &rbd_dev->header.features);
4179}
4180
Alex Elder86b00e02012-10-25 23:34:42 -05004181static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4182{
4183 struct rbd_spec *parent_spec;
4184 size_t size;
4185 void *reply_buf = NULL;
4186 __le64 snapid;
4187 void *p;
4188 void *end;
Alex Elder642a2532013-05-06 17:40:33 -05004189 u64 pool_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004190 char *image_id;
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004191 u64 snap_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004192 u64 overlap;
Alex Elder86b00e02012-10-25 23:34:42 -05004193 int ret;
4194
4195 parent_spec = rbd_spec_alloc();
4196 if (!parent_spec)
4197 return -ENOMEM;
4198
4199 size = sizeof (__le64) + /* pool_id */
4200 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4201 sizeof (__le64) + /* snap_id */
4202 sizeof (__le64); /* overlap */
4203 reply_buf = kmalloc(size, GFP_KERNEL);
4204 if (!reply_buf) {
4205 ret = -ENOMEM;
4206 goto out_err;
4207 }
4208
Ilya Dryomov4d9b67c2014-07-24 10:42:13 +04004209 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004210 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elder86b00e02012-10-25 23:34:42 -05004211 "rbd", "get_parent",
Alex Elder41579762013-04-21 12:14:45 -05004212 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004213 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004214 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder86b00e02012-10-25 23:34:42 -05004215 if (ret < 0)
4216 goto out_err;
4217
Alex Elder86b00e02012-10-25 23:34:42 -05004218 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05004219 end = reply_buf + ret;
4220 ret = -ERANGE;
Alex Elder642a2532013-05-06 17:40:33 -05004221 ceph_decode_64_safe(&p, end, pool_id, out_err);
Alex Elder392a9da2013-05-06 17:40:33 -05004222 if (pool_id == CEPH_NOPOOL) {
4223 /*
4224 * Either the parent never existed, or we have
4225 * record of it but the image got flattened so it no
4226 * longer has a parent. When the parent of a
4227 * layered image disappears we immediately set the
4228 * overlap to 0. The effect of this is that all new
4229 * requests will be treated as if the image had no
4230 * parent.
4231 */
4232 if (rbd_dev->parent_overlap) {
4233 rbd_dev->parent_overlap = 0;
Alex Elder392a9da2013-05-06 17:40:33 -05004234 rbd_dev_parent_put(rbd_dev);
4235 pr_info("%s: clone image has been flattened\n",
4236 rbd_dev->disk->disk_name);
4237 }
4238
Alex Elder86b00e02012-10-25 23:34:42 -05004239 goto out; /* No parent? No problem. */
Alex Elder392a9da2013-05-06 17:40:33 -05004240 }
Alex Elder86b00e02012-10-25 23:34:42 -05004241
Alex Elder0903e872012-11-14 12:25:19 -06004242 /* The ceph file layout needs to fit pool id in 32 bits */
4243
4244 ret = -EIO;
Alex Elder642a2532013-05-06 17:40:33 -05004245 if (pool_id > (u64)U32_MAX) {
Ilya Dryomov9584d502014-07-11 12:11:20 +04004246 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
Alex Elder642a2532013-05-06 17:40:33 -05004247 (unsigned long long)pool_id, U32_MAX);
Alex Elder57385b52013-04-21 12:14:45 -05004248 goto out_err;
Alex Elderc0cd10db2013-04-26 09:43:47 -05004249 }
Alex Elder0903e872012-11-14 12:25:19 -06004250
Alex Elder979ed482012-11-01 08:39:26 -05004251 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elder86b00e02012-10-25 23:34:42 -05004252 if (IS_ERR(image_id)) {
4253 ret = PTR_ERR(image_id);
4254 goto out_err;
4255 }
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004256 ceph_decode_64_safe(&p, end, snap_id, out_err);
Alex Elder86b00e02012-10-25 23:34:42 -05004257 ceph_decode_64_safe(&p, end, overlap, out_err);
4258
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004259 /*
4260 * The parent won't change (except when the clone is
4261 * flattened, already handled that). So we only need to
4262 * record the parent spec we have not already done so.
4263 */
4264 if (!rbd_dev->parent_spec) {
4265 parent_spec->pool_id = pool_id;
4266 parent_spec->image_id = image_id;
4267 parent_spec->snap_id = snap_id;
Alex Elder70cf49c2013-05-06 17:40:33 -05004268 rbd_dev->parent_spec = parent_spec;
4269 parent_spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovfbba11b2014-06-27 21:46:33 +04004270 } else {
4271 kfree(image_id);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004272 }
4273
4274 /*
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004275 * We always update the parent overlap. If it's zero we issue
4276 * a warning, as we will proceed as if there was no parent.
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004277 */
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004278 if (!overlap) {
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004279 if (parent_spec) {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004280 /* refresh, careful to warn just once */
4281 if (rbd_dev->parent_overlap)
4282 rbd_warn(rbd_dev,
4283 "clone now standalone (overlap became 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004284 } else {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004285 /* initial probe */
4286 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004287 }
Alex Elder70cf49c2013-05-06 17:40:33 -05004288 }
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004289 rbd_dev->parent_overlap = overlap;
4290
Alex Elder86b00e02012-10-25 23:34:42 -05004291out:
4292 ret = 0;
4293out_err:
4294 kfree(reply_buf);
4295 rbd_spec_put(parent_spec);
4296
4297 return ret;
4298}
4299
Alex Eldercc070d52013-04-21 12:14:45 -05004300static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4301{
4302 struct {
4303 __le64 stripe_unit;
4304 __le64 stripe_count;
4305 } __attribute__ ((packed)) striping_info_buf = { 0 };
4306 size_t size = sizeof (striping_info_buf);
4307 void *p;
4308 u64 obj_size;
4309 u64 stripe_unit;
4310 u64 stripe_count;
4311 int ret;
4312
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004313 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Eldercc070d52013-04-21 12:14:45 -05004314 "rbd", "get_stripe_unit_count", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004315 (char *)&striping_info_buf, size);
Alex Eldercc070d52013-04-21 12:14:45 -05004316 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4317 if (ret < 0)
4318 return ret;
4319 if (ret < size)
4320 return -ERANGE;
4321
4322 /*
4323 * We don't actually support the "fancy striping" feature
4324 * (STRIPINGV2) yet, but if the striping sizes are the
4325 * defaults the behavior is the same as before. So find
4326 * out, and only fail if the image has non-default values.
4327 */
4328 ret = -EINVAL;
4329 obj_size = (u64)1 << rbd_dev->header.obj_order;
4330 p = &striping_info_buf;
4331 stripe_unit = ceph_decode_64(&p);
4332 if (stripe_unit != obj_size) {
4333 rbd_warn(rbd_dev, "unsupported stripe unit "
4334 "(got %llu want %llu)",
4335 stripe_unit, obj_size);
4336 return -EINVAL;
4337 }
4338 stripe_count = ceph_decode_64(&p);
4339 if (stripe_count != 1) {
4340 rbd_warn(rbd_dev, "unsupported stripe count "
4341 "(got %llu want 1)", stripe_count);
4342 return -EINVAL;
4343 }
Alex Elder500d0c02013-04-26 09:43:47 -05004344 rbd_dev->header.stripe_unit = stripe_unit;
4345 rbd_dev->header.stripe_count = stripe_count;
Alex Eldercc070d52013-04-21 12:14:45 -05004346
4347 return 0;
4348}
4349
Alex Elder9e15b772012-10-30 19:40:33 -05004350static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4351{
4352 size_t image_id_size;
4353 char *image_id;
4354 void *p;
4355 void *end;
4356 size_t size;
4357 void *reply_buf = NULL;
4358 size_t len = 0;
4359 char *image_name = NULL;
4360 int ret;
4361
4362 rbd_assert(!rbd_dev->spec->image_name);
4363
Alex Elder69e7a022012-11-01 08:39:26 -05004364 len = strlen(rbd_dev->spec->image_id);
4365 image_id_size = sizeof (__le32) + len;
Alex Elder9e15b772012-10-30 19:40:33 -05004366 image_id = kmalloc(image_id_size, GFP_KERNEL);
4367 if (!image_id)
4368 return NULL;
4369
4370 p = image_id;
Alex Elder41579762013-04-21 12:14:45 -05004371 end = image_id + image_id_size;
Alex Elder57385b52013-04-21 12:14:45 -05004372 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
Alex Elder9e15b772012-10-30 19:40:33 -05004373
4374 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4375 reply_buf = kmalloc(size, GFP_KERNEL);
4376 if (!reply_buf)
4377 goto out;
4378
Alex Elder36be9a72013-01-19 00:30:28 -06004379 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
Alex Elder9e15b772012-10-30 19:40:33 -05004380 "rbd", "dir_get_name",
4381 image_id, image_id_size,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004382 reply_buf, size);
Alex Elder9e15b772012-10-30 19:40:33 -05004383 if (ret < 0)
4384 goto out;
4385 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05004386 end = reply_buf + ret;
4387
Alex Elder9e15b772012-10-30 19:40:33 -05004388 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4389 if (IS_ERR(image_name))
4390 image_name = NULL;
4391 else
4392 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4393out:
4394 kfree(reply_buf);
4395 kfree(image_id);
4396
4397 return image_name;
4398}
4399
Alex Elder2ad3d712013-04-30 00:44:33 -05004400static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4401{
4402 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4403 const char *snap_name;
4404 u32 which = 0;
4405
4406 /* Skip over names until we find the one we are looking for */
4407
4408 snap_name = rbd_dev->header.snap_names;
4409 while (which < snapc->num_snaps) {
4410 if (!strcmp(name, snap_name))
4411 return snapc->snaps[which];
4412 snap_name += strlen(snap_name) + 1;
4413 which++;
4414 }
4415 return CEPH_NOSNAP;
4416}
4417
4418static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4419{
4420 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4421 u32 which;
4422 bool found = false;
4423 u64 snap_id;
4424
4425 for (which = 0; !found && which < snapc->num_snaps; which++) {
4426 const char *snap_name;
4427
4428 snap_id = snapc->snaps[which];
4429 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
Josh Durginefadc982013-08-29 19:16:42 -07004430 if (IS_ERR(snap_name)) {
4431 /* ignore no-longer existing snapshots */
4432 if (PTR_ERR(snap_name) == -ENOENT)
4433 continue;
4434 else
4435 break;
4436 }
Alex Elder2ad3d712013-04-30 00:44:33 -05004437 found = !strcmp(name, snap_name);
4438 kfree(snap_name);
4439 }
4440 return found ? snap_id : CEPH_NOSNAP;
4441}
4442
4443/*
4444 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4445 * no snapshot by that name is found, or if an error occurs.
4446 */
4447static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4448{
4449 if (rbd_dev->image_format == 1)
4450 return rbd_v1_snap_id_by_name(rbd_dev, name);
4451
4452 return rbd_v2_snap_id_by_name(rbd_dev, name);
4453}
4454
Alex Elder9e15b772012-10-30 19:40:33 -05004455/*
Ilya Dryomov04077592014-07-23 17:11:20 +04004456 * An image being mapped will have everything but the snap id.
Alex Elder9e15b772012-10-30 19:40:33 -05004457 */
Ilya Dryomov04077592014-07-23 17:11:20 +04004458static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4459{
4460 struct rbd_spec *spec = rbd_dev->spec;
4461
4462 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4463 rbd_assert(spec->image_id && spec->image_name);
4464 rbd_assert(spec->snap_name);
4465
4466 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4467 u64 snap_id;
4468
4469 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4470 if (snap_id == CEPH_NOSNAP)
4471 return -ENOENT;
4472
4473 spec->snap_id = snap_id;
4474 } else {
4475 spec->snap_id = CEPH_NOSNAP;
4476 }
4477
4478 return 0;
4479}
4480
4481/*
4482 * A parent image will have all ids but none of the names.
4483 *
4484 * All names in an rbd spec are dynamically allocated. It's OK if we
4485 * can't figure out the name for an image id.
4486 */
4487static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
Alex Elder9e15b772012-10-30 19:40:33 -05004488{
Alex Elder2e9f7f12013-04-26 09:43:48 -05004489 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4490 struct rbd_spec *spec = rbd_dev->spec;
4491 const char *pool_name;
4492 const char *image_name;
4493 const char *snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05004494 int ret;
4495
Ilya Dryomov04077592014-07-23 17:11:20 +04004496 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4497 rbd_assert(spec->image_id);
4498 rbd_assert(spec->snap_id != CEPH_NOSNAP);
Alex Elder9e15b772012-10-30 19:40:33 -05004499
Alex Elder2e9f7f12013-04-26 09:43:48 -05004500 /* Get the pool name; we have to make our own copy of this */
Alex Elder9e15b772012-10-30 19:40:33 -05004501
Alex Elder2e9f7f12013-04-26 09:43:48 -05004502 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4503 if (!pool_name) {
4504 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
Alex Elder935dc892012-11-01 10:17:15 -05004505 return -EIO;
4506 }
Alex Elder2e9f7f12013-04-26 09:43:48 -05004507 pool_name = kstrdup(pool_name, GFP_KERNEL);
4508 if (!pool_name)
Alex Elder9e15b772012-10-30 19:40:33 -05004509 return -ENOMEM;
4510
4511 /* Fetch the image name; tolerate failure here */
4512
Alex Elder2e9f7f12013-04-26 09:43:48 -05004513 image_name = rbd_dev_image_name(rbd_dev);
4514 if (!image_name)
Alex Elder06ecc6c2012-11-01 10:17:15 -05004515 rbd_warn(rbd_dev, "unable to get image name");
Alex Elder9e15b772012-10-30 19:40:33 -05004516
Ilya Dryomov04077592014-07-23 17:11:20 +04004517 /* Fetch the snapshot name */
Alex Elder9e15b772012-10-30 19:40:33 -05004518
Alex Elder2e9f7f12013-04-26 09:43:48 -05004519 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
Josh Durginda6a6b62013-09-04 17:57:31 -07004520 if (IS_ERR(snap_name)) {
4521 ret = PTR_ERR(snap_name);
Alex Elder9e15b772012-10-30 19:40:33 -05004522 goto out_err;
Alex Elder2e9f7f12013-04-26 09:43:48 -05004523 }
4524
4525 spec->pool_name = pool_name;
4526 spec->image_name = image_name;
4527 spec->snap_name = snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05004528
4529 return 0;
Ilya Dryomov04077592014-07-23 17:11:20 +04004530
Alex Elder9e15b772012-10-30 19:40:33 -05004531out_err:
Alex Elder2e9f7f12013-04-26 09:43:48 -05004532 kfree(image_name);
4533 kfree(pool_name);
Alex Elder9e15b772012-10-30 19:40:33 -05004534 return ret;
4535}
4536
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004537static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
Alex Elder35d489f2012-07-03 16:01:19 -05004538{
4539 size_t size;
4540 int ret;
4541 void *reply_buf;
4542 void *p;
4543 void *end;
4544 u64 seq;
4545 u32 snap_count;
4546 struct ceph_snap_context *snapc;
4547 u32 i;
4548
4549 /*
4550 * We'll need room for the seq value (maximum snapshot id),
4551 * snapshot count, and array of that many snapshot ids.
4552 * For now we have a fixed upper limit on the number we're
4553 * prepared to receive.
4554 */
4555 size = sizeof (__le64) + sizeof (__le32) +
4556 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4557 reply_buf = kzalloc(size, GFP_KERNEL);
4558 if (!reply_buf)
4559 return -ENOMEM;
4560
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004561 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elder41579762013-04-21 12:14:45 -05004562 "rbd", "get_snapcontext", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004563 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004564 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder35d489f2012-07-03 16:01:19 -05004565 if (ret < 0)
4566 goto out;
4567
Alex Elder35d489f2012-07-03 16:01:19 -05004568 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05004569 end = reply_buf + ret;
4570 ret = -ERANGE;
Alex Elder35d489f2012-07-03 16:01:19 -05004571 ceph_decode_64_safe(&p, end, seq, out);
4572 ceph_decode_32_safe(&p, end, snap_count, out);
4573
4574 /*
4575 * Make sure the reported number of snapshot ids wouldn't go
4576 * beyond the end of our buffer. But before checking that,
4577 * make sure the computed size of the snapshot context we
4578 * allocate is representable in a size_t.
4579 */
4580 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4581 / sizeof (u64)) {
4582 ret = -EINVAL;
4583 goto out;
4584 }
4585 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4586 goto out;
Alex Elder468521c2013-04-26 09:43:47 -05004587 ret = 0;
Alex Elder35d489f2012-07-03 16:01:19 -05004588
Alex Elder812164f82013-04-30 00:44:32 -05004589 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
Alex Elder35d489f2012-07-03 16:01:19 -05004590 if (!snapc) {
4591 ret = -ENOMEM;
4592 goto out;
4593 }
Alex Elder35d489f2012-07-03 16:01:19 -05004594 snapc->seq = seq;
Alex Elder35d489f2012-07-03 16:01:19 -05004595 for (i = 0; i < snap_count; i++)
4596 snapc->snaps[i] = ceph_decode_64(&p);
4597
Alex Elder49ece552013-05-06 08:37:00 -05004598 ceph_put_snap_context(rbd_dev->header.snapc);
Alex Elder35d489f2012-07-03 16:01:19 -05004599 rbd_dev->header.snapc = snapc;
4600
4601 dout(" snap context seq = %llu, snap_count = %u\n",
Alex Elder57385b52013-04-21 12:14:45 -05004602 (unsigned long long)seq, (unsigned int)snap_count);
Alex Elder35d489f2012-07-03 16:01:19 -05004603out:
4604 kfree(reply_buf);
4605
Alex Elder57385b52013-04-21 12:14:45 -05004606 return ret;
Alex Elder35d489f2012-07-03 16:01:19 -05004607}
4608
Alex Elder54cac612013-04-30 00:44:33 -05004609static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4610 u64 snap_id)
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004611{
4612 size_t size;
4613 void *reply_buf;
Alex Elder54cac612013-04-30 00:44:33 -05004614 __le64 snapid;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004615 int ret;
4616 void *p;
4617 void *end;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004618 char *snap_name;
4619
4620 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4621 reply_buf = kmalloc(size, GFP_KERNEL);
4622 if (!reply_buf)
4623 return ERR_PTR(-ENOMEM);
4624
Alex Elder54cac612013-04-30 00:44:33 -05004625 snapid = cpu_to_le64(snap_id);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004626 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004627 "rbd", "get_snapshot_name",
Alex Elder54cac612013-04-30 00:44:33 -05004628 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004629 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004630 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderf40eb342013-04-25 15:09:42 -05004631 if (ret < 0) {
4632 snap_name = ERR_PTR(ret);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004633 goto out;
Alex Elderf40eb342013-04-25 15:09:42 -05004634 }
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004635
4636 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05004637 end = reply_buf + ret;
Alex Eldere5c35532012-10-25 23:34:41 -05004638 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elderf40eb342013-04-25 15:09:42 -05004639 if (IS_ERR(snap_name))
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004640 goto out;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004641
Alex Elderf40eb342013-04-25 15:09:42 -05004642 dout(" snap_id 0x%016llx snap_name = %s\n",
Alex Elder54cac612013-04-30 00:44:33 -05004643 (unsigned long long)snap_id, snap_name);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004644out:
4645 kfree(reply_buf);
4646
Alex Elderf40eb342013-04-25 15:09:42 -05004647 return snap_name;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004648}
4649
Alex Elder2df3fac2013-05-06 09:51:30 -05004650static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
Alex Elder117973f2012-08-31 17:29:55 -05004651{
Alex Elder2df3fac2013-05-06 09:51:30 -05004652 bool first_time = rbd_dev->header.object_prefix == NULL;
Alex Elder117973f2012-08-31 17:29:55 -05004653 int ret;
Alex Elder117973f2012-08-31 17:29:55 -05004654
Josh Durgin1617e402013-06-12 14:43:10 -07004655 ret = rbd_dev_v2_image_size(rbd_dev);
4656 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05004657 return ret;
Josh Durgin1617e402013-06-12 14:43:10 -07004658
Alex Elder2df3fac2013-05-06 09:51:30 -05004659 if (first_time) {
4660 ret = rbd_dev_v2_header_onetime(rbd_dev);
4661 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05004662 return ret;
Alex Elder2df3fac2013-05-06 09:51:30 -05004663 }
4664
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004665 ret = rbd_dev_v2_snap_context(rbd_dev);
Ilya Dryomovd194cd12015-08-31 18:22:10 +03004666 if (ret && first_time) {
4667 kfree(rbd_dev->header.object_prefix);
4668 rbd_dev->header.object_prefix = NULL;
4669 }
Alex Elder117973f2012-08-31 17:29:55 -05004670
4671 return ret;
4672}
4673
Ilya Dryomova720ae02014-07-23 17:11:19 +04004674static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4675{
4676 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4677
4678 if (rbd_dev->image_format == 1)
4679 return rbd_dev_v1_header_info(rbd_dev);
4680
4681 return rbd_dev_v2_header_info(rbd_dev);
4682}
4683
Alex Elder1ddbe942012-01-29 13:57:44 -06004684/*
Alex Eldere28fff262012-02-02 08:13:30 -06004685 * Skips over white space at *buf, and updates *buf to point to the
4686 * first found non-space character (if any). Returns the length of
Alex Elder593a9e72012-02-07 12:03:37 -06004687 * the token (string of non-white space characters) found. Note
4688 * that *buf must be terminated with '\0'.
Alex Eldere28fff262012-02-02 08:13:30 -06004689 */
4690static inline size_t next_token(const char **buf)
4691{
4692 /*
4693 * These are the characters that produce nonzero for
4694 * isspace() in the "C" and "POSIX" locales.
4695 */
4696 const char *spaces = " \f\n\r\t\v";
4697
4698 *buf += strspn(*buf, spaces); /* Find start of token */
4699
4700 return strcspn(*buf, spaces); /* Return token length */
4701}
4702
4703/*
Alex Elderea3352f2012-07-09 21:04:23 -05004704 * Finds the next token in *buf, dynamically allocates a buffer big
4705 * enough to hold a copy of it, and copies the token into the new
4706 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4707 * that a duplicate buffer is created even for a zero-length token.
4708 *
4709 * Returns a pointer to the newly-allocated duplicate, or a null
4710 * pointer if memory for the duplicate was not available. If
4711 * the lenp argument is a non-null pointer, the length of the token
4712 * (not including the '\0') is returned in *lenp.
4713 *
4714 * If successful, the *buf pointer will be updated to point beyond
4715 * the end of the found token.
4716 *
4717 * Note: uses GFP_KERNEL for allocation.
4718 */
4719static inline char *dup_token(const char **buf, size_t *lenp)
4720{
4721 char *dup;
4722 size_t len;
4723
4724 len = next_token(buf);
Alex Elder4caf35f2012-11-01 08:39:27 -05004725 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
Alex Elderea3352f2012-07-09 21:04:23 -05004726 if (!dup)
4727 return NULL;
Alex Elderea3352f2012-07-09 21:04:23 -05004728 *(dup + len) = '\0';
4729 *buf += len;
4730
4731 if (lenp)
4732 *lenp = len;
4733
4734 return dup;
4735}
4736
4737/*
Alex Elder859c31d2012-10-25 23:34:42 -05004738 * Parse the options provided for an "rbd add" (i.e., rbd image
4739 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4740 * and the data written is passed here via a NUL-terminated buffer.
4741 * Returns 0 if successful or an error code otherwise.
Alex Elderd22f76e2012-07-12 10:46:35 -05004742 *
Alex Elder859c31d2012-10-25 23:34:42 -05004743 * The information extracted from these options is recorded in
4744 * the other parameters which return dynamically-allocated
4745 * structures:
4746 * ceph_opts
4747 * The address of a pointer that will refer to a ceph options
4748 * structure. Caller must release the returned pointer using
4749 * ceph_destroy_options() when it is no longer needed.
4750 * rbd_opts
4751 * Address of an rbd options pointer. Fully initialized by
4752 * this function; caller must release with kfree().
4753 * spec
4754 * Address of an rbd image specification pointer. Fully
4755 * initialized by this function based on parsed options.
4756 * Caller must release with rbd_spec_put().
4757 *
4758 * The options passed take this form:
4759 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4760 * where:
4761 * <mon_addrs>
4762 * A comma-separated list of one or more monitor addresses.
4763 * A monitor address is an ip address, optionally followed
4764 * by a port number (separated by a colon).
4765 * I.e.: ip1[:port1][,ip2[:port2]...]
4766 * <options>
4767 * A comma-separated list of ceph and/or rbd options.
4768 * <pool_name>
4769 * The name of the rados pool containing the rbd image.
4770 * <image_name>
4771 * The name of the image in that pool to map.
4772 * <snap_id>
4773 * An optional snapshot id. If provided, the mapping will
4774 * present data from the image at the time that snapshot was
4775 * created. The image head is used if no snapshot id is
4776 * provided. Snapshot mappings are always read-only.
Alex Eldera725f65e2012-02-02 08:13:30 -06004777 */
Alex Elder859c31d2012-10-25 23:34:42 -05004778static int rbd_add_parse_args(const char *buf,
Alex Elderdc79b112012-10-25 23:34:41 -05004779 struct ceph_options **ceph_opts,
Alex Elder859c31d2012-10-25 23:34:42 -05004780 struct rbd_options **opts,
4781 struct rbd_spec **rbd_spec)
Alex Eldera725f65e2012-02-02 08:13:30 -06004782{
Alex Elderd22f76e2012-07-12 10:46:35 -05004783 size_t len;
Alex Elder859c31d2012-10-25 23:34:42 -05004784 char *options;
Alex Elder0ddebc02012-10-25 23:34:41 -05004785 const char *mon_addrs;
Alex Elderecb4dc22013-04-26 09:43:47 -05004786 char *snap_name;
Alex Elder0ddebc02012-10-25 23:34:41 -05004787 size_t mon_addrs_size;
Alex Elder859c31d2012-10-25 23:34:42 -05004788 struct rbd_spec *spec = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004789 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05004790 struct ceph_options *copts;
Alex Elderdc79b112012-10-25 23:34:41 -05004791 int ret;
Alex Eldere28fff262012-02-02 08:13:30 -06004792
4793 /* The first four tokens are required */
4794
Alex Elder7ef32142012-02-02 08:13:30 -06004795 len = next_token(&buf);
Alex Elder4fb5d6712012-11-01 10:17:15 -05004796 if (!len) {
4797 rbd_warn(NULL, "no monitor address(es) provided");
4798 return -EINVAL;
4799 }
Alex Elder0ddebc02012-10-25 23:34:41 -05004800 mon_addrs = buf;
Alex Elderf28e5652012-10-25 23:34:41 -05004801 mon_addrs_size = len + 1;
Alex Elder7ef32142012-02-02 08:13:30 -06004802 buf += len;
Alex Eldera725f65e2012-02-02 08:13:30 -06004803
Alex Elderdc79b112012-10-25 23:34:41 -05004804 ret = -EINVAL;
Alex Elderf28e5652012-10-25 23:34:41 -05004805 options = dup_token(&buf, NULL);
4806 if (!options)
Alex Elderdc79b112012-10-25 23:34:41 -05004807 return -ENOMEM;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004808 if (!*options) {
4809 rbd_warn(NULL, "no options provided");
4810 goto out_err;
4811 }
Alex Eldera725f65e2012-02-02 08:13:30 -06004812
Alex Elder859c31d2012-10-25 23:34:42 -05004813 spec = rbd_spec_alloc();
4814 if (!spec)
Alex Elderf28e5652012-10-25 23:34:41 -05004815 goto out_mem;
Alex Elder859c31d2012-10-25 23:34:42 -05004816
4817 spec->pool_name = dup_token(&buf, NULL);
4818 if (!spec->pool_name)
4819 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004820 if (!*spec->pool_name) {
4821 rbd_warn(NULL, "no pool name provided");
4822 goto out_err;
4823 }
Alex Eldere28fff262012-02-02 08:13:30 -06004824
Alex Elder69e7a022012-11-01 08:39:26 -05004825 spec->image_name = dup_token(&buf, NULL);
Alex Elder859c31d2012-10-25 23:34:42 -05004826 if (!spec->image_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004827 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004828 if (!*spec->image_name) {
4829 rbd_warn(NULL, "no image name provided");
4830 goto out_err;
4831 }
Alex Eldere28fff262012-02-02 08:13:30 -06004832
Alex Elderf28e5652012-10-25 23:34:41 -05004833 /*
4834 * Snapshot name is optional; default is to use "-"
4835 * (indicating the head/no snapshot).
4836 */
Alex Elder3feeb8942012-08-31 17:29:52 -05004837 len = next_token(&buf);
Alex Elder820a5f32012-07-09 21:04:24 -05004838 if (!len) {
Alex Elder3feeb8942012-08-31 17:29:52 -05004839 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4840 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
Alex Elderf28e5652012-10-25 23:34:41 -05004841 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
Alex Elderdc79b112012-10-25 23:34:41 -05004842 ret = -ENAMETOOLONG;
Alex Elderf28e5652012-10-25 23:34:41 -05004843 goto out_err;
Alex Elder849b4262012-07-09 21:04:24 -05004844 }
Alex Elderecb4dc22013-04-26 09:43:47 -05004845 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4846 if (!snap_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004847 goto out_mem;
Alex Elderecb4dc22013-04-26 09:43:47 -05004848 *(snap_name + len) = '\0';
4849 spec->snap_name = snap_name;
Alex Eldere5c35532012-10-25 23:34:41 -05004850
Alex Elder0ddebc02012-10-25 23:34:41 -05004851 /* Initialize all rbd options to the defaults */
Alex Eldere28fff262012-02-02 08:13:30 -06004852
Alex Elder4e9afeb2012-10-25 23:34:41 -05004853 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4854 if (!rbd_opts)
4855 goto out_mem;
4856
4857 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
Ilya Dryomovb5584182015-06-23 16:21:19 +03004858 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
Alex Elderd22f76e2012-07-12 10:46:35 -05004859
Alex Elder859c31d2012-10-25 23:34:42 -05004860 copts = ceph_parse_options(options, mon_addrs,
Alex Elder0ddebc02012-10-25 23:34:41 -05004861 mon_addrs + mon_addrs_size - 1,
Alex Elder4e9afeb2012-10-25 23:34:41 -05004862 parse_rbd_opts_token, rbd_opts);
Alex Elder859c31d2012-10-25 23:34:42 -05004863 if (IS_ERR(copts)) {
4864 ret = PTR_ERR(copts);
Alex Elderdc79b112012-10-25 23:34:41 -05004865 goto out_err;
4866 }
Alex Elder859c31d2012-10-25 23:34:42 -05004867 kfree(options);
4868
4869 *ceph_opts = copts;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004870 *opts = rbd_opts;
Alex Elder859c31d2012-10-25 23:34:42 -05004871 *rbd_spec = spec;
Alex Elder0ddebc02012-10-25 23:34:41 -05004872
Alex Elderdc79b112012-10-25 23:34:41 -05004873 return 0;
Alex Elderf28e5652012-10-25 23:34:41 -05004874out_mem:
Alex Elderdc79b112012-10-25 23:34:41 -05004875 ret = -ENOMEM;
Alex Elderd22f76e2012-07-12 10:46:35 -05004876out_err:
Alex Elder859c31d2012-10-25 23:34:42 -05004877 kfree(rbd_opts);
4878 rbd_spec_put(spec);
Alex Elderf28e5652012-10-25 23:34:41 -05004879 kfree(options);
Alex Elderd22f76e2012-07-12 10:46:35 -05004880
Alex Elderdc79b112012-10-25 23:34:41 -05004881 return ret;
Alex Eldera725f65e2012-02-02 08:13:30 -06004882}
4883
Alex Elder589d30e2012-07-10 20:30:11 -05004884/*
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004885 * Return pool id (>= 0) or a negative error code.
4886 */
4887static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4888{
Ilya Dryomova319bf52015-05-15 12:02:17 +03004889 struct ceph_options *opts = rbdc->client->options;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004890 u64 newest_epoch;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004891 int tries = 0;
4892 int ret;
4893
4894again:
4895 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4896 if (ret == -ENOENT && tries++ < 1) {
Ilya Dryomovd0b19702016-04-28 16:07:27 +02004897 ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
4898 &newest_epoch);
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004899 if (ret < 0)
4900 return ret;
4901
4902 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
Ilya Dryomov7cca78c2016-04-28 16:07:28 +02004903 ceph_osdc_maybe_request_map(&rbdc->client->osdc);
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004904 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
Ilya Dryomova319bf52015-05-15 12:02:17 +03004905 newest_epoch,
4906 opts->mount_timeout);
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004907 goto again;
4908 } else {
4909 /* the osdmap we have is new enough */
4910 return -ENOENT;
4911 }
4912 }
4913
4914 return ret;
4915}
4916
4917/*
Alex Elder589d30e2012-07-10 20:30:11 -05004918 * An rbd format 2 image has a unique identifier, distinct from the
4919 * name given to it by the user. Internally, that identifier is
4920 * what's used to specify the names of objects related to the image.
4921 *
4922 * A special "rbd id" object is used to map an rbd image name to its
4923 * id. If that object doesn't exist, then there is no v2 rbd image
4924 * with the supplied name.
4925 *
4926 * This function will record the given rbd_dev's image_id field if
4927 * it can be determined, and in that case will return 0. If any
4928 * errors occur a negative errno will be returned and the rbd_dev's
4929 * image_id field will be unchanged (and should be NULL).
4930 */
4931static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4932{
4933 int ret;
4934 size_t size;
4935 char *object_name;
4936 void *response;
Alex Elderc0fba362013-04-25 23:15:08 -05004937 char *image_id;
Alex Elder2f82ee52012-10-30 19:40:33 -05004938
Alex Elder589d30e2012-07-10 20:30:11 -05004939 /*
Alex Elder2c0d0a12012-10-30 19:40:33 -05004940 * When probing a parent image, the image id is already
4941 * known (and the image name likely is not). There's no
Alex Elderc0fba362013-04-25 23:15:08 -05004942 * need to fetch the image id again in this case. We
4943 * do still need to set the image format though.
Alex Elder2c0d0a12012-10-30 19:40:33 -05004944 */
Alex Elderc0fba362013-04-25 23:15:08 -05004945 if (rbd_dev->spec->image_id) {
4946 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4947
Alex Elder2c0d0a12012-10-30 19:40:33 -05004948 return 0;
Alex Elderc0fba362013-04-25 23:15:08 -05004949 }
Alex Elder2c0d0a12012-10-30 19:40:33 -05004950
4951 /*
Alex Elder589d30e2012-07-10 20:30:11 -05004952 * First, see if the format 2 image id file exists, and if
4953 * so, get the image's persistent id from it.
4954 */
Alex Elder69e7a022012-11-01 08:39:26 -05004955 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05004956 object_name = kmalloc(size, GFP_NOIO);
4957 if (!object_name)
4958 return -ENOMEM;
Alex Elder0d7dbfc2012-10-25 23:34:41 -05004959 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05004960 dout("rbd id object name is %s\n", object_name);
4961
4962 /* Response will be an encoded string, which includes a length */
4963
4964 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4965 response = kzalloc(size, GFP_NOIO);
4966 if (!response) {
4967 ret = -ENOMEM;
4968 goto out;
4969 }
4970
Alex Elderc0fba362013-04-25 23:15:08 -05004971 /* If it doesn't exist we'll assume it's a format 1 image */
4972
Alex Elder36be9a72013-01-19 00:30:28 -06004973 ret = rbd_obj_method_sync(rbd_dev, object_name,
Alex Elder41579762013-04-21 12:14:45 -05004974 "rbd", "get_id", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004975 response, RBD_IMAGE_ID_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06004976 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderc0fba362013-04-25 23:15:08 -05004977 if (ret == -ENOENT) {
4978 image_id = kstrdup("", GFP_KERNEL);
4979 ret = image_id ? 0 : -ENOMEM;
4980 if (!ret)
4981 rbd_dev->image_format = 1;
Ilya Dryomov7dd440c2014-09-11 18:49:18 +04004982 } else if (ret >= 0) {
Alex Elderc0fba362013-04-25 23:15:08 -05004983 void *p = response;
Alex Elder589d30e2012-07-10 20:30:11 -05004984
Alex Elderc0fba362013-04-25 23:15:08 -05004985 image_id = ceph_extract_encoded_string(&p, p + ret,
Alex Elder979ed482012-11-01 08:39:26 -05004986 NULL, GFP_NOIO);
Duan Jiong461f7582014-04-11 16:38:12 +08004987 ret = PTR_ERR_OR_ZERO(image_id);
Alex Elderc0fba362013-04-25 23:15:08 -05004988 if (!ret)
4989 rbd_dev->image_format = 2;
Alex Elderc0fba362013-04-25 23:15:08 -05004990 }
4991
4992 if (!ret) {
4993 rbd_dev->spec->image_id = image_id;
4994 dout("image_id is %s\n", image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05004995 }
4996out:
4997 kfree(response);
4998 kfree(object_name);
4999
5000 return ret;
5001}
5002
Alex Elder3abef3b2013-05-13 20:35:37 -05005003/*
5004 * Undo whatever state changes are made by v1 or v2 header info
5005 * call.
5006 */
Alex Elder6fd48b32013-04-28 23:32:34 -05005007static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5008{
5009 struct rbd_image_header *header;
5010
Ilya Dryomove69b8d42015-01-19 12:06:14 +03005011 rbd_dev_parent_put(rbd_dev);
Alex Elder6fd48b32013-04-28 23:32:34 -05005012
5013 /* Free dynamic fields from the header, then zero it out */
5014
5015 header = &rbd_dev->header;
Alex Elder812164f82013-04-30 00:44:32 -05005016 ceph_put_snap_context(header->snapc);
Alex Elder6fd48b32013-04-28 23:32:34 -05005017 kfree(header->snap_sizes);
5018 kfree(header->snap_names);
5019 kfree(header->object_prefix);
5020 memset(header, 0, sizeof (*header));
5021}
5022
Alex Elder2df3fac2013-05-06 09:51:30 -05005023static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
Alex Eldera30b71b2012-07-10 20:30:11 -05005024{
5025 int ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005026
Alex Elder1e130192012-07-03 16:01:19 -05005027 ret = rbd_dev_v2_object_prefix(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005028 if (ret)
Alex Elder1e130192012-07-03 16:01:19 -05005029 goto out_err;
Alex Elderb1b54022012-07-03 16:01:19 -05005030
Alex Elder2df3fac2013-05-06 09:51:30 -05005031 /*
5032 * Get the and check features for the image. Currently the
5033 * features are assumed to never change.
5034 */
Alex Elderb1b54022012-07-03 16:01:19 -05005035 ret = rbd_dev_v2_features(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005036 if (ret)
Alex Elderb1b54022012-07-03 16:01:19 -05005037 goto out_err;
Alex Elder35d489f2012-07-03 16:01:19 -05005038
Alex Eldercc070d52013-04-21 12:14:45 -05005039 /* If the image supports fancy striping, get its parameters */
5040
5041 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5042 ret = rbd_dev_v2_striping_info(rbd_dev);
5043 if (ret < 0)
5044 goto out_err;
5045 }
Alex Elder2df3fac2013-05-06 09:51:30 -05005046 /* No support for crypto and compression type format 2 images */
Alex Eldera30b71b2012-07-10 20:30:11 -05005047
Alex Elder35152972012-08-31 17:29:55 -05005048 return 0;
Alex Elder9d475de2012-07-03 16:01:19 -05005049out_err:
Alex Elder642a2532013-05-06 17:40:33 -05005050 rbd_dev->header.features = 0;
Alex Elder1e130192012-07-03 16:01:19 -05005051 kfree(rbd_dev->header.object_prefix);
5052 rbd_dev->header.object_prefix = NULL;
Alex Elder9d475de2012-07-03 16:01:19 -05005053
5054 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005055}
5056
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005057/*
5058 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5059 * rbd_dev_image_probe() recursion depth, which means it's also the
5060 * length of the already discovered part of the parent chain.
5061 */
5062static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
Alex Elder83a06262012-10-30 15:47:17 -05005063{
Alex Elder2f82ee52012-10-30 19:40:33 -05005064 struct rbd_device *parent = NULL;
Alex Elder124afba2013-04-26 15:44:36 -05005065 int ret;
5066
5067 if (!rbd_dev->parent_spec)
5068 return 0;
Alex Elder124afba2013-04-26 15:44:36 -05005069
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005070 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5071 pr_info("parent chain is too long (%d)\n", depth);
5072 ret = -EINVAL;
5073 goto out_err;
5074 }
5075
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02005076 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005077 if (!parent) {
5078 ret = -ENOMEM;
Alex Elder124afba2013-04-26 15:44:36 -05005079 goto out_err;
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005080 }
5081
5082 /*
5083 * Images related by parent/child relationships always share
5084 * rbd_client and spec/parent_spec, so bump their refcounts.
5085 */
5086 __rbd_get_client(rbd_dev->rbd_client);
5087 rbd_spec_get(rbd_dev->parent_spec);
Alex Elder124afba2013-04-26 15:44:36 -05005088
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005089 ret = rbd_dev_image_probe(parent, depth);
Alex Elder124afba2013-04-26 15:44:36 -05005090 if (ret < 0)
5091 goto out_err;
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005092
Alex Elder124afba2013-04-26 15:44:36 -05005093 rbd_dev->parent = parent;
Alex Eldera2acd002013-05-08 22:50:04 -05005094 atomic_set(&rbd_dev->parent_ref, 1);
Alex Elder124afba2013-04-26 15:44:36 -05005095 return 0;
Alex Elder124afba2013-04-26 15:44:36 -05005096
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005097out_err:
5098 rbd_dev_unparent(rbd_dev);
Markus Elfring1761b222015-11-23 20:16:45 +01005099 rbd_dev_destroy(parent);
Alex Elder124afba2013-04-26 15:44:36 -05005100 return ret;
5101}
5102
Ilya Dryomov811c6682016-04-15 16:22:16 +02005103/*
5104 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5105 * upon return.
5106 */
Alex Elder200a6a82013-04-28 23:32:34 -05005107static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
Alex Elder124afba2013-04-26 15:44:36 -05005108{
Alex Elder83a06262012-10-30 15:47:17 -05005109 int ret;
Alex Elder83a06262012-10-30 15:47:17 -05005110
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005111 /* Record our major and minor device numbers. */
Alex Elder83a06262012-10-30 15:47:17 -05005112
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005113 if (!single_major) {
5114 ret = register_blkdev(0, rbd_dev->name);
5115 if (ret < 0)
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02005116 goto err_out_unlock;
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005117
5118 rbd_dev->major = ret;
5119 rbd_dev->minor = 0;
5120 } else {
5121 rbd_dev->major = rbd_major;
5122 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5123 }
Alex Elder83a06262012-10-30 15:47:17 -05005124
5125 /* Set up the blkdev mapping. */
5126
5127 ret = rbd_init_disk(rbd_dev);
5128 if (ret)
5129 goto err_out_blkdev;
5130
Alex Elderf35a4de2013-05-06 09:51:29 -05005131 ret = rbd_dev_mapping_set(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005132 if (ret)
5133 goto err_out_disk;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04005134
Alex Elderf35a4de2013-05-06 09:51:29 -05005135 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
Josh Durgin22001f62013-09-30 20:10:04 -07005136 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
Alex Elderf35a4de2013-05-06 09:51:29 -05005137
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005138 dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
5139 ret = device_add(&rbd_dev->dev);
Alex Elderf35a4de2013-05-06 09:51:29 -05005140 if (ret)
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005141 goto err_out_mapping;
Alex Elder83a06262012-10-30 15:47:17 -05005142
Alex Elder83a06262012-10-30 15:47:17 -05005143 /* Everything's ready. Announce the disk to the world. */
5144
Alex Elder129b79d2013-04-26 15:44:36 -05005145 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Ilya Dryomov811c6682016-04-15 16:22:16 +02005146 up_write(&rbd_dev->header_rwsem);
Alex Elder83a06262012-10-30 15:47:17 -05005147
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02005148 spin_lock(&rbd_dev_list_lock);
5149 list_add_tail(&rbd_dev->node, &rbd_dev_list);
5150 spin_unlock(&rbd_dev_list_lock);
5151
Ilya Dryomov811c6682016-04-15 16:22:16 +02005152 add_disk(rbd_dev->disk);
Alex Elder83a06262012-10-30 15:47:17 -05005153 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5154 (unsigned long long) rbd_dev->mapping.size);
5155
5156 return ret;
Alex Elder2f82ee52012-10-30 19:40:33 -05005157
Alex Elderf35a4de2013-05-06 09:51:29 -05005158err_out_mapping:
5159 rbd_dev_mapping_clear(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005160err_out_disk:
5161 rbd_free_disk(rbd_dev);
5162err_out_blkdev:
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005163 if (!single_major)
5164 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Ilya Dryomov811c6682016-04-15 16:22:16 +02005165err_out_unlock:
5166 up_write(&rbd_dev->header_rwsem);
Alex Elder83a06262012-10-30 15:47:17 -05005167 return ret;
5168}
5169
Alex Elder332bb122013-04-27 09:59:30 -05005170static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5171{
5172 struct rbd_spec *spec = rbd_dev->spec;
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005173 int ret;
Alex Elder332bb122013-04-27 09:59:30 -05005174
5175 /* Record the header object name for this rbd image. */
5176
5177 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5178
Yan, Zheng76271512016-02-03 21:24:49 +08005179 rbd_dev->header_oloc.pool = rbd_dev->layout.pool_id;
Alex Elder332bb122013-04-27 09:59:30 -05005180 if (rbd_dev->image_format == 1)
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005181 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5182 spec->image_name, RBD_SUFFIX);
Alex Elder332bb122013-04-27 09:59:30 -05005183 else
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005184 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5185 RBD_HEADER_PREFIX, spec->image_id);
Alex Elder332bb122013-04-27 09:59:30 -05005186
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005187 return ret;
Alex Elder332bb122013-04-27 09:59:30 -05005188}
5189
Alex Elder200a6a82013-04-28 23:32:34 -05005190static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5191{
Alex Elder6fd48b32013-04-28 23:32:34 -05005192 rbd_dev_unprobe(rbd_dev);
Alex Elder6fd48b32013-04-28 23:32:34 -05005193 rbd_dev->image_format = 0;
5194 kfree(rbd_dev->spec->image_id);
5195 rbd_dev->spec->image_id = NULL;
5196
Alex Elder200a6a82013-04-28 23:32:34 -05005197 rbd_dev_destroy(rbd_dev);
5198}
5199
Alex Eldera30b71b2012-07-10 20:30:11 -05005200/*
5201 * Probe for the existence of the header object for the given rbd
Alex Elder1f3ef782013-05-06 17:40:33 -05005202 * device. If this image is the one being mapped (i.e., not a
5203 * parent), initiate a watch on its header object before using that
5204 * object to get detailed information about the rbd image.
Alex Eldera30b71b2012-07-10 20:30:11 -05005205 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005206static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
Alex Eldera30b71b2012-07-10 20:30:11 -05005207{
5208 int ret;
5209
5210 /*
Alex Elder3abef3b2013-05-13 20:35:37 -05005211 * Get the id from the image id object. Unless there's an
5212 * error, rbd_dev->spec->image_id will be filled in with
5213 * a dynamically-allocated string, and rbd_dev->image_format
5214 * will be set to either 1 or 2.
Alex Eldera30b71b2012-07-10 20:30:11 -05005215 */
5216 ret = rbd_dev_image_id(rbd_dev);
5217 if (ret)
Alex Elderc0fba362013-04-25 23:15:08 -05005218 return ret;
Alex Elderc0fba362013-04-25 23:15:08 -05005219
Alex Elder332bb122013-04-27 09:59:30 -05005220 ret = rbd_dev_header_name(rbd_dev);
5221 if (ret)
5222 goto err_out_format;
5223
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005224 if (!depth) {
Ilya Dryomovfca27062013-12-16 18:02:40 +02005225 ret = rbd_dev_header_watch_sync(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005226 if (ret) {
5227 if (ret == -ENOENT)
5228 pr_info("image %s/%s does not exist\n",
5229 rbd_dev->spec->pool_name,
5230 rbd_dev->spec->image_name);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005231 goto err_out_format;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005232 }
Alex Elder1f3ef782013-05-06 17:40:33 -05005233 }
Alex Elderb644de22013-04-27 09:59:31 -05005234
Ilya Dryomova720ae02014-07-23 17:11:19 +04005235 ret = rbd_dev_header_info(rbd_dev);
Alex Elder5655c4d2013-04-25 23:15:08 -05005236 if (ret)
Alex Elderb644de22013-04-27 09:59:31 -05005237 goto err_out_watch;
Alex Elder83a06262012-10-30 15:47:17 -05005238
Ilya Dryomov04077592014-07-23 17:11:20 +04005239 /*
5240 * If this image is the one being mapped, we have pool name and
5241 * id, image name and id, and snap name - need to fill snap id.
5242 * Otherwise this is a parent image, identified by pool, image
5243 * and snap ids - need to fill in names for those ids.
5244 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005245 if (!depth)
Ilya Dryomov04077592014-07-23 17:11:20 +04005246 ret = rbd_spec_fill_snap_id(rbd_dev);
5247 else
5248 ret = rbd_spec_fill_names(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005249 if (ret) {
5250 if (ret == -ENOENT)
5251 pr_info("snap %s/%s@%s does not exist\n",
5252 rbd_dev->spec->pool_name,
5253 rbd_dev->spec->image_name,
5254 rbd_dev->spec->snap_name);
Alex Elder33dca392013-04-30 00:44:33 -05005255 goto err_out_probe;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005256 }
Alex Elder9bb81c92013-04-27 09:59:30 -05005257
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005258 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5259 ret = rbd_dev_v2_parent_info(rbd_dev);
5260 if (ret)
5261 goto err_out_probe;
5262
5263 /*
5264 * Need to warn users if this image is the one being
5265 * mapped and has a parent.
5266 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005267 if (!depth && rbd_dev->parent_spec)
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005268 rbd_warn(rbd_dev,
5269 "WARNING: kernel layering is EXPERIMENTAL!");
5270 }
5271
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005272 ret = rbd_dev_probe_parent(rbd_dev, depth);
Alex Elder30d60ba2013-05-06 09:51:30 -05005273 if (ret)
5274 goto err_out_probe;
Alex Elder83a06262012-10-30 15:47:17 -05005275
Alex Elder30d60ba2013-05-06 09:51:30 -05005276 dout("discovered format %u image, header name is %s\n",
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005277 rbd_dev->image_format, rbd_dev->header_oid.name);
Alex Elder30d60ba2013-05-06 09:51:30 -05005278 return 0;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005279
Alex Elder6fd48b32013-04-28 23:32:34 -05005280err_out_probe:
5281 rbd_dev_unprobe(rbd_dev);
Alex Elderb644de22013-04-27 09:59:31 -05005282err_out_watch:
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005283 if (!depth)
Ilya Dryomovfca27062013-12-16 18:02:40 +02005284 rbd_dev_header_unwatch_sync(rbd_dev);
Alex Elder332bb122013-04-27 09:59:30 -05005285err_out_format:
5286 rbd_dev->image_format = 0;
Alex Elder5655c4d2013-04-25 23:15:08 -05005287 kfree(rbd_dev->spec->image_id);
5288 rbd_dev->spec->image_id = NULL;
Alex Elder5655c4d2013-04-25 23:15:08 -05005289 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005290}
5291
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005292static ssize_t do_rbd_add(struct bus_type *bus,
5293 const char *buf,
5294 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005295{
Alex Eldercb8627c2012-07-09 21:04:23 -05005296 struct rbd_device *rbd_dev = NULL;
Alex Elderdc79b112012-10-25 23:34:41 -05005297 struct ceph_options *ceph_opts = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05005298 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05005299 struct rbd_spec *spec = NULL;
Alex Elder9d3997f2012-10-25 23:34:42 -05005300 struct rbd_client *rbdc;
Alex Elder51344a32013-05-06 07:40:30 -05005301 bool read_only;
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02005302 int rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005303
5304 if (!try_module_get(THIS_MODULE))
5305 return -ENODEV;
5306
Alex Eldera725f65e2012-02-02 08:13:30 -06005307 /* parse add command */
Alex Elder859c31d2012-10-25 23:34:42 -05005308 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
Alex Elderdc79b112012-10-25 23:34:41 -05005309 if (rc < 0)
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005310 goto out;
Alex Eldera725f65e2012-02-02 08:13:30 -06005311
Alex Elder9d3997f2012-10-25 23:34:42 -05005312 rbdc = rbd_get_client(ceph_opts);
5313 if (IS_ERR(rbdc)) {
5314 rc = PTR_ERR(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05005315 goto err_out_args;
Alex Elder9d3997f2012-10-25 23:34:42 -05005316 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005317
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005318 /* pick the pool */
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005319 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005320 if (rc < 0) {
5321 if (rc == -ENOENT)
5322 pr_info("pool %s does not exist\n", spec->pool_name);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005323 goto err_out_client;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005324 }
Alex Elderc0cd10db2013-04-26 09:43:47 -05005325 spec->pool_id = (u64)rc;
Alex Elder859c31d2012-10-25 23:34:42 -05005326
Ilya Dryomovd1475432015-06-22 13:24:48 +03005327 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02005328 if (!rbd_dev) {
5329 rc = -ENOMEM;
Alex Elderbd4ba652012-10-25 23:34:42 -05005330 goto err_out_client;
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02005331 }
Alex Elderc53d5892012-10-25 23:34:42 -05005332 rbdc = NULL; /* rbd_dev now owns this */
5333 spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovd1475432015-06-22 13:24:48 +03005334 rbd_opts = NULL; /* rbd_dev now owns this */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005335
Ilya Dryomov811c6682016-04-15 16:22:16 +02005336 down_write(&rbd_dev->header_rwsem);
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005337 rc = rbd_dev_image_probe(rbd_dev, 0);
Alex Eldera30b71b2012-07-10 20:30:11 -05005338 if (rc < 0)
Alex Elderc53d5892012-10-25 23:34:42 -05005339 goto err_out_rbd_dev;
Alex Elder05fd6f62012-08-29 17:11:07 -05005340
Alex Elder7ce4eef2013-05-06 17:40:33 -05005341 /* If we are mapping a snapshot it must be marked read-only */
5342
Ilya Dryomovd1475432015-06-22 13:24:48 +03005343 read_only = rbd_dev->opts->read_only;
Alex Elder7ce4eef2013-05-06 17:40:33 -05005344 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5345 read_only = true;
5346 rbd_dev->mapping.read_only = read_only;
5347
Alex Elderb536f692013-04-28 23:32:34 -05005348 rc = rbd_dev_device_setup(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05005349 if (rc) {
Ilya Dryomove37180c2013-12-16 18:02:41 +02005350 /*
5351 * rbd_dev_header_unwatch_sync() can't be moved into
5352 * rbd_dev_image_release() without refactoring, see
5353 * commit 1f3ef78861ac.
5354 */
5355 rbd_dev_header_unwatch_sync(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05005356 rbd_dev_image_release(rbd_dev);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005357 goto out;
Alex Elder3abef3b2013-05-13 20:35:37 -05005358 }
Alex Elderb536f692013-04-28 23:32:34 -05005359
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005360 rc = count;
5361out:
5362 module_put(THIS_MODULE);
5363 return rc;
Alex Elder3abef3b2013-05-13 20:35:37 -05005364
Alex Elderc53d5892012-10-25 23:34:42 -05005365err_out_rbd_dev:
Ilya Dryomov811c6682016-04-15 16:22:16 +02005366 up_write(&rbd_dev->header_rwsem);
Alex Elderc53d5892012-10-25 23:34:42 -05005367 rbd_dev_destroy(rbd_dev);
Alex Elderbd4ba652012-10-25 23:34:42 -05005368err_out_client:
Alex Elder9d3997f2012-10-25 23:34:42 -05005369 rbd_put_client(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05005370err_out_args:
Alex Elder859c31d2012-10-25 23:34:42 -05005371 rbd_spec_put(spec);
Ilya Dryomovd1475432015-06-22 13:24:48 +03005372 kfree(rbd_opts);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005373 goto out;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005374}
5375
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005376static ssize_t rbd_add(struct bus_type *bus,
5377 const char *buf,
5378 size_t count)
5379{
5380 if (single_major)
5381 return -EINVAL;
5382
5383 return do_rbd_add(bus, buf, count);
5384}
5385
5386static ssize_t rbd_add_single_major(struct bus_type *bus,
5387 const char *buf,
5388 size_t count)
5389{
5390 return do_rbd_add(bus, buf, count);
5391}
5392
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005393static void rbd_dev_device_release(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005394{
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005395 rbd_free_disk(rbd_dev);
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02005396
5397 spin_lock(&rbd_dev_list_lock);
5398 list_del_init(&rbd_dev->node);
5399 spin_unlock(&rbd_dev_list_lock);
5400
Alex Elder200a6a82013-04-28 23:32:34 -05005401 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005402 device_del(&rbd_dev->dev);
Alex Elder6d80b132013-05-06 07:40:30 -05005403 rbd_dev_mapping_clear(rbd_dev);
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005404 if (!single_major)
5405 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005406}
5407
Alex Elder05a46af2013-04-26 15:44:36 -05005408static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5409{
Alex Elderad945fc2013-04-26 15:44:36 -05005410 while (rbd_dev->parent) {
Alex Elder05a46af2013-04-26 15:44:36 -05005411 struct rbd_device *first = rbd_dev;
5412 struct rbd_device *second = first->parent;
5413 struct rbd_device *third;
5414
5415 /*
5416 * Follow to the parent with no grandparent and
5417 * remove it.
5418 */
5419 while (second && (third = second->parent)) {
5420 first = second;
5421 second = third;
5422 }
Alex Elderad945fc2013-04-26 15:44:36 -05005423 rbd_assert(second);
Alex Elder8ad42cd2013-04-28 23:32:34 -05005424 rbd_dev_image_release(second);
Alex Elderad945fc2013-04-26 15:44:36 -05005425 first->parent = NULL;
5426 first->parent_overlap = 0;
5427
5428 rbd_assert(first->parent_spec);
Alex Elder05a46af2013-04-26 15:44:36 -05005429 rbd_spec_put(first->parent_spec);
5430 first->parent_spec = NULL;
Alex Elder05a46af2013-04-26 15:44:36 -05005431 }
5432}
5433
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005434static ssize_t do_rbd_remove(struct bus_type *bus,
5435 const char *buf,
5436 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005437{
5438 struct rbd_device *rbd_dev = NULL;
Alex Elder751cc0e2013-05-31 15:17:01 -05005439 struct list_head *tmp;
5440 int dev_id;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005441 unsigned long ul;
Alex Elder82a442d2013-05-31 17:40:44 -05005442 bool already = false;
Alex Elder0d8189e2013-04-27 09:59:30 -05005443 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005444
Jingoo Hanbb8e0e82013-09-11 14:20:07 -07005445 ret = kstrtoul(buf, 10, &ul);
Alex Elder0d8189e2013-04-27 09:59:30 -05005446 if (ret)
5447 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005448
5449 /* convert to int; abort if we lost anything in the conversion */
Alex Elder751cc0e2013-05-31 15:17:01 -05005450 dev_id = (int)ul;
5451 if (dev_id != ul)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005452 return -EINVAL;
5453
Alex Elder751cc0e2013-05-31 15:17:01 -05005454 ret = -ENOENT;
5455 spin_lock(&rbd_dev_list_lock);
5456 list_for_each(tmp, &rbd_dev_list) {
5457 rbd_dev = list_entry(tmp, struct rbd_device, node);
5458 if (rbd_dev->dev_id == dev_id) {
5459 ret = 0;
5460 break;
5461 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005462 }
Alex Elder751cc0e2013-05-31 15:17:01 -05005463 if (!ret) {
5464 spin_lock_irq(&rbd_dev->lock);
5465 if (rbd_dev->open_count)
5466 ret = -EBUSY;
5467 else
Alex Elder82a442d2013-05-31 17:40:44 -05005468 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5469 &rbd_dev->flags);
Alex Elder751cc0e2013-05-31 15:17:01 -05005470 spin_unlock_irq(&rbd_dev->lock);
5471 }
5472 spin_unlock(&rbd_dev_list_lock);
Alex Elder82a442d2013-05-31 17:40:44 -05005473 if (ret < 0 || already)
Alex Elder1ba0f1e2013-05-31 15:17:01 -05005474 return ret;
Alex Elder751cc0e2013-05-31 15:17:01 -05005475
Ilya Dryomovfca27062013-12-16 18:02:40 +02005476 rbd_dev_header_unwatch_sync(rbd_dev);
Ilya Dryomovfca27062013-12-16 18:02:40 +02005477
Josh Durgin98752012013-08-29 17:26:31 -07005478 /*
5479 * Don't free anything from rbd_dev->disk until after all
5480 * notifies are completely processed. Otherwise
5481 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5482 * in a potential use after free of rbd_dev->disk or rbd_dev.
5483 */
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005484 rbd_dev_device_release(rbd_dev);
Alex Elder8ad42cd2013-04-28 23:32:34 -05005485 rbd_dev_image_release(rbd_dev);
Alex Elderaafb2302012-09-06 16:00:54 -05005486
Alex Elder1ba0f1e2013-05-31 15:17:01 -05005487 return count;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005488}
5489
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005490static ssize_t rbd_remove(struct bus_type *bus,
5491 const char *buf,
5492 size_t count)
5493{
5494 if (single_major)
5495 return -EINVAL;
5496
5497 return do_rbd_remove(bus, buf, count);
5498}
5499
5500static ssize_t rbd_remove_single_major(struct bus_type *bus,
5501 const char *buf,
5502 size_t count)
5503{
5504 return do_rbd_remove(bus, buf, count);
5505}
5506
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005507/*
5508 * create control files in sysfs
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005509 * /sys/bus/rbd/...
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005510 */
5511static int rbd_sysfs_init(void)
5512{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005513 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005514
Alex Elderfed4c142012-02-07 12:03:36 -06005515 ret = device_register(&rbd_root_dev);
Alex Elder21079782012-01-24 10:08:36 -06005516 if (ret < 0)
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005517 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005518
Alex Elderfed4c142012-02-07 12:03:36 -06005519 ret = bus_register(&rbd_bus_type);
5520 if (ret < 0)
5521 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005522
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005523 return ret;
5524}
5525
5526static void rbd_sysfs_cleanup(void)
5527{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005528 bus_unregister(&rbd_bus_type);
Alex Elderfed4c142012-02-07 12:03:36 -06005529 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005530}
5531
Alex Elder1c2a9df2013-05-01 12:43:03 -05005532static int rbd_slab_init(void)
5533{
5534 rbd_assert(!rbd_img_request_cache);
Geliang Tang03d94402016-03-13 15:17:32 +08005535 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
Alex Elder868311b2013-05-01 12:43:03 -05005536 if (!rbd_img_request_cache)
5537 return -ENOMEM;
5538
5539 rbd_assert(!rbd_obj_request_cache);
Geliang Tang03d94402016-03-13 15:17:32 +08005540 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
Alex Elder78c2a442013-05-01 12:43:04 -05005541 if (!rbd_obj_request_cache)
5542 goto out_err;
5543
5544 rbd_assert(!rbd_segment_name_cache);
5545 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02005546 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
Alex Elder78c2a442013-05-01 12:43:04 -05005547 if (rbd_segment_name_cache)
Alex Elder1c2a9df2013-05-01 12:43:03 -05005548 return 0;
Alex Elder78c2a442013-05-01 12:43:04 -05005549out_err:
Julia Lawall13bf2832015-09-13 14:15:26 +02005550 kmem_cache_destroy(rbd_obj_request_cache);
5551 rbd_obj_request_cache = NULL;
Alex Elder1c2a9df2013-05-01 12:43:03 -05005552
Alex Elder868311b2013-05-01 12:43:03 -05005553 kmem_cache_destroy(rbd_img_request_cache);
5554 rbd_img_request_cache = NULL;
5555
Alex Elder1c2a9df2013-05-01 12:43:03 -05005556 return -ENOMEM;
5557}
5558
5559static void rbd_slab_exit(void)
5560{
Alex Elder78c2a442013-05-01 12:43:04 -05005561 rbd_assert(rbd_segment_name_cache);
5562 kmem_cache_destroy(rbd_segment_name_cache);
5563 rbd_segment_name_cache = NULL;
5564
Alex Elder868311b2013-05-01 12:43:03 -05005565 rbd_assert(rbd_obj_request_cache);
5566 kmem_cache_destroy(rbd_obj_request_cache);
5567 rbd_obj_request_cache = NULL;
5568
Alex Elder1c2a9df2013-05-01 12:43:03 -05005569 rbd_assert(rbd_img_request_cache);
5570 kmem_cache_destroy(rbd_img_request_cache);
5571 rbd_img_request_cache = NULL;
5572}
5573
Alex Eldercc344fa2013-02-19 12:25:56 -06005574static int __init rbd_init(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005575{
5576 int rc;
5577
Alex Elder1e32d342013-01-30 11:13:33 -06005578 if (!libceph_compatible(NULL)) {
5579 rbd_warn(NULL, "libceph incompatibility (quitting)");
Alex Elder1e32d342013-01-30 11:13:33 -06005580 return -EINVAL;
5581 }
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005582
Alex Elder1c2a9df2013-05-01 12:43:03 -05005583 rc = rbd_slab_init();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005584 if (rc)
5585 return rc;
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005586
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005587 /*
5588 * The number of active work items is limited by the number of
Ilya Dryomovf77303b2015-04-22 18:28:13 +03005589 * rbd devices * queue depth, so leave @max_active at default.
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005590 */
5591 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5592 if (!rbd_wq) {
5593 rc = -ENOMEM;
5594 goto err_out_slab;
5595 }
5596
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005597 if (single_major) {
5598 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5599 if (rbd_major < 0) {
5600 rc = rbd_major;
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005601 goto err_out_wq;
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005602 }
5603 }
5604
Alex Elder1c2a9df2013-05-01 12:43:03 -05005605 rc = rbd_sysfs_init();
5606 if (rc)
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005607 goto err_out_blkdev;
Alex Elder1c2a9df2013-05-01 12:43:03 -05005608
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005609 if (single_major)
5610 pr_info("loaded (major %d)\n", rbd_major);
5611 else
5612 pr_info("loaded\n");
5613
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005614 return 0;
5615
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005616err_out_blkdev:
5617 if (single_major)
5618 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005619err_out_wq:
5620 destroy_workqueue(rbd_wq);
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005621err_out_slab:
5622 rbd_slab_exit();
Alex Elder1c2a9df2013-05-01 12:43:03 -05005623 return rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005624}
5625
Alex Eldercc344fa2013-02-19 12:25:56 -06005626static void __exit rbd_exit(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005627{
Ilya Dryomovffe312c2014-05-20 15:46:04 +04005628 ida_destroy(&rbd_dev_id_ida);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005629 rbd_sysfs_cleanup();
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005630 if (single_major)
5631 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005632 destroy_workqueue(rbd_wq);
Alex Elder1c2a9df2013-05-01 12:43:03 -05005633 rbd_slab_exit();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005634}
5635
5636module_init(rbd_init);
5637module_exit(rbd_exit);
5638
Alex Elderd552c612013-05-31 20:13:09 -05005639MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005640MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5641MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005642/* following authorship retained from original osdblk.c */
5643MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5644
Ilya Dryomov90da2582013-12-13 15:28:56 +02005645MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005646MODULE_LICENSE("GPL");