blob: 6c6519f6492a4198c78cae1eaad5e33e03efd2d9 [file] [log] [blame]
Alex Eldere2a58ee2013-04-30 00:44:33 -05001
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
Yehuda Sadehdfc56062010-11-19 14:51:04 -080025 For usage instructions, please refer to:
Yehuda Sadeh602adf42010-08-12 16:11:25 -070026
Yehuda Sadehdfc56062010-11-19 14:51:04 -080027 Documentation/ABI/testing/sysfs-bus-rbd
Yehuda Sadeh602adf42010-08-12 16:11:25 -070028
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
34#include <linux/ceph/decode.h>
Yehuda Sadeh59c2be12011-03-21 15:10:11 -070035#include <linux/parser.h>
Alex Elder30d1cff2013-05-01 12:43:03 -050036#include <linux/bsearch.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070037
38#include <linux/kernel.h>
39#include <linux/device.h>
40#include <linux/module.h>
Christoph Hellwig7ad18af2015-01-13 17:20:04 +010041#include <linux/blk-mq.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070042#include <linux/fs.h>
43#include <linux/blkdev.h>
Alex Elder1c2a9df2013-05-01 12:43:03 -050044#include <linux/slab.h>
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +020045#include <linux/idr.h>
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +040046#include <linux/workqueue.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070047
48#include "rbd_types.h"
49
Alex Elderaafb2302012-09-06 16:00:54 -050050#define RBD_DEBUG /* Activate rbd_assert() calls */
51
Alex Elder593a9e72012-02-07 12:03:37 -060052/*
53 * The basic unit of block I/O is a sector. It is interpreted in a
54 * number of contexts in Linux (blk, bio, genhd), but the default is
55 * universally 512 bytes. These symbols are just slightly more
56 * meaningful than the bare numbers they represent.
57 */
58#define SECTOR_SHIFT 9
59#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60
Alex Eldera2acd002013-05-08 22:50:04 -050061/*
62 * Increment the given counter and return its updated value.
63 * If the counter is already 0 it will not be incremented.
64 * If the counter is already at its maximum value returns
65 * -EINVAL without updating it.
66 */
67static int atomic_inc_return_safe(atomic_t *v)
68{
69 unsigned int counter;
70
71 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
72 if (counter <= (unsigned int)INT_MAX)
73 return (int)counter;
74
75 atomic_dec(v);
76
77 return -EINVAL;
78}
79
80/* Decrement the counter. Return the resulting value, or -EINVAL */
81static int atomic_dec_return_safe(atomic_t *v)
82{
83 int counter;
84
85 counter = atomic_dec_return(v);
86 if (counter >= 0)
87 return counter;
88
89 atomic_inc(v);
90
91 return -EINVAL;
92}
93
Alex Elderf0f8cef2012-01-29 13:57:44 -060094#define RBD_DRV_NAME "rbd"
Yehuda Sadeh602adf42010-08-12 16:11:25 -070095
Ilya Dryomov7e513d42013-12-16 19:26:32 +020096#define RBD_MINORS_PER_MAJOR 256
97#define RBD_SINGLE_MAJOR_PART_SHIFT 4
Yehuda Sadeh602adf42010-08-12 16:11:25 -070098
Ilya Dryomov6d69bb532015-10-11 19:38:00 +020099#define RBD_MAX_PARENT_CHAIN_LEN 16
100
Alex Elderd4b125e2012-07-03 16:01:19 -0500101#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
102#define RBD_MAX_SNAP_NAME_LEN \
103 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
104
Alex Elder35d489f2012-07-03 16:01:19 -0500105#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700106
107#define RBD_SNAP_HEAD_NAME "-"
108
Alex Elder9682fc62013-04-30 00:44:33 -0500109#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
110
Alex Elder9e15b772012-10-30 19:40:33 -0500111/* This allows a single page to hold an image name sent by OSD */
112#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
Alex Elder1e130192012-07-03 16:01:19 -0500113#define RBD_IMAGE_ID_LEN_MAX 64
Alex Elder9e15b772012-10-30 19:40:33 -0500114
Alex Elder1e130192012-07-03 16:01:19 -0500115#define RBD_OBJ_PREFIX_LEN_MAX 64
Alex Elder589d30e2012-07-10 20:30:11 -0500116
Alex Elderd8891402012-10-09 13:50:17 -0700117/* Feature bits */
118
Alex Elder5cbf6f122013-04-11 09:29:48 -0500119#define RBD_FEATURE_LAYERING (1<<0)
120#define RBD_FEATURE_STRIPINGV2 (1<<1)
121#define RBD_FEATURES_ALL \
122 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
Alex Elderd8891402012-10-09 13:50:17 -0700123
124/* Features supported by this (client software) implementation. */
125
Alex Elder770eba62012-10-25 23:34:40 -0500126#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
Alex Elderd8891402012-10-09 13:50:17 -0700127
Alex Elder81a89792012-02-02 08:13:30 -0600128/*
129 * An RBD device name will be "rbd#", where the "rbd" comes from
130 * RBD_DRV_NAME above, and # is a unique integer identifier.
131 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
132 * enough to hold all possible device names.
133 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700134#define DEV_NAME_LEN 32
Alex Elder81a89792012-02-02 08:13:30 -0600135#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700136
137/*
138 * block device image metadata (in-memory version)
139 */
140struct rbd_image_header {
Alex Elderf35a4de2013-05-06 09:51:29 -0500141 /* These six fields never change for a given rbd image */
Alex Elder849b4262012-07-09 21:04:24 -0500142 char *object_prefix;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700143 __u8 obj_order;
144 __u8 crypt_type;
145 __u8 comp_type;
Alex Elderf35a4de2013-05-06 09:51:29 -0500146 u64 stripe_unit;
147 u64 stripe_count;
148 u64 features; /* Might be changeable someday? */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700149
Alex Elderf84344f2012-08-31 17:29:51 -0500150 /* The remaining fields need to be updated occasionally */
151 u64 image_size;
152 struct ceph_snap_context *snapc;
Alex Elderf35a4de2013-05-06 09:51:29 -0500153 char *snap_names; /* format 1 only */
154 u64 *snap_sizes; /* format 1 only */
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700155};
156
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500157/*
158 * An rbd image specification.
159 *
160 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
Alex Elderc66c6e02012-11-01 08:39:26 -0500161 * identify an image. Each rbd_dev structure includes a pointer to
162 * an rbd_spec structure that encapsulates this identity.
163 *
164 * Each of the id's in an rbd_spec has an associated name. For a
165 * user-mapped image, the names are supplied and the id's associated
166 * with them are looked up. For a layered image, a parent image is
167 * defined by the tuple, and the names are looked up.
168 *
169 * An rbd_dev structure contains a parent_spec pointer which is
170 * non-null if the image it represents is a child in a layered
171 * image. This pointer will refer to the rbd_spec structure used
172 * by the parent rbd_dev for its own identity (i.e., the structure
173 * is shared between the parent and child).
174 *
175 * Since these structures are populated once, during the discovery
176 * phase of image construction, they are effectively immutable so
177 * we make no effort to synchronize access to them.
178 *
179 * Note that code herein does not assume the image name is known (it
180 * could be a null pointer).
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500181 */
182struct rbd_spec {
183 u64 pool_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500184 const char *pool_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500185
Alex Elderecb4dc22013-04-26 09:43:47 -0500186 const char *image_id;
187 const char *image_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500188
189 u64 snap_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500190 const char *snap_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500191
192 struct kref kref;
193};
194
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700195/*
Alex Elderf0f8cef2012-01-29 13:57:44 -0600196 * an instance of the client. multiple devices may share an rbd client.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700197 */
198struct rbd_client {
199 struct ceph_client *client;
200 struct kref kref;
201 struct list_head node;
202};
203
Alex Elderbf0d5f502012-11-22 00:00:08 -0600204struct rbd_img_request;
205typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
206
207#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
208
209struct rbd_obj_request;
210typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
211
Alex Elder9969ebc2013-01-18 12:31:10 -0600212enum obj_request_type {
213 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
214};
Alex Elderbf0d5f502012-11-22 00:00:08 -0600215
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800216enum obj_operation_type {
217 OBJ_OP_WRITE,
218 OBJ_OP_READ,
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800219 OBJ_OP_DISCARD,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800220};
221
Alex Elder926f9b32013-02-11 12:33:24 -0600222enum obj_req_flags {
223 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
Alex Elder6365d332013-02-11 12:33:24 -0600224 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
Alex Elder5679c592013-02-11 12:33:24 -0600225 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
226 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
Alex Elder926f9b32013-02-11 12:33:24 -0600227};
228
Alex Elderbf0d5f502012-11-22 00:00:08 -0600229struct rbd_obj_request {
230 const char *object_name;
231 u64 offset; /* object start byte */
232 u64 length; /* bytes from offset */
Alex Elder926f9b32013-02-11 12:33:24 -0600233 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600234
Alex Elderc5b5ef62013-02-11 12:33:24 -0600235 /*
236 * An object request associated with an image will have its
237 * img_data flag set; a standalone object request will not.
238 *
239 * A standalone object request will have which == BAD_WHICH
240 * and a null obj_request pointer.
241 *
242 * An object request initiated in support of a layered image
243 * object (to check for its existence before a write) will
244 * have which == BAD_WHICH and a non-null obj_request pointer.
245 *
246 * Finally, an object request for rbd image data will have
247 * which != BAD_WHICH, and will have a non-null img_request
248 * pointer. The value of which will be in the range
249 * 0..(img_request->obj_request_count-1).
250 */
251 union {
252 struct rbd_obj_request *obj_request; /* STAT op */
253 struct {
254 struct rbd_img_request *img_request;
255 u64 img_offset;
256 /* links for img_request->obj_requests list */
257 struct list_head links;
258 };
259 };
Alex Elderbf0d5f502012-11-22 00:00:08 -0600260 u32 which; /* posn image request list */
261
262 enum obj_request_type type;
Alex Elder788e2df2013-01-17 12:25:27 -0600263 union {
264 struct bio *bio_list;
265 struct {
266 struct page **pages;
267 u32 page_count;
268 };
269 };
Alex Elder0eefd472013-04-19 15:34:50 -0500270 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500271 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600272
273 struct ceph_osd_request *osd_req;
274
275 u64 xferred; /* bytes transferred */
Sage Weil1b83bef2013-02-25 16:11:12 -0800276 int result;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600277
278 rbd_obj_callback_t callback;
Alex Elder788e2df2013-01-17 12:25:27 -0600279 struct completion completion;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600280
281 struct kref kref;
282};
283
Alex Elder0c425242013-02-08 09:55:49 -0600284enum img_req_flags {
Alex Elder9849e982013-01-24 16:13:36 -0600285 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
286 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
Alex Elderd0b2e942013-01-24 16:13:36 -0600287 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800288 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
Alex Elder0c425242013-02-08 09:55:49 -0600289};
290
Alex Elderbf0d5f502012-11-22 00:00:08 -0600291struct rbd_img_request {
Alex Elderbf0d5f502012-11-22 00:00:08 -0600292 struct rbd_device *rbd_dev;
293 u64 offset; /* starting image byte offset */
294 u64 length; /* byte count from offset */
Alex Elder0c425242013-02-08 09:55:49 -0600295 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600296 union {
Alex Elder9849e982013-01-24 16:13:36 -0600297 u64 snap_id; /* for reads */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600298 struct ceph_snap_context *snapc; /* for writes */
Alex Elder9849e982013-01-24 16:13:36 -0600299 };
300 union {
301 struct request *rq; /* block request */
302 struct rbd_obj_request *obj_request; /* obj req initiator */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600303 };
Alex Elder3d7efd12013-04-19 15:34:50 -0500304 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500305 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600306 spinlock_t completion_lock;/* protects next_completion */
307 u32 next_completion;
308 rbd_img_callback_t callback;
Alex Elder55f27e02013-04-10 12:34:25 -0500309 u64 xferred;/* aggregate bytes transferred */
Alex Eldera5a337d2013-01-24 16:13:36 -0600310 int result; /* first nonzero obj_request result */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600311
312 u32 obj_request_count;
313 struct list_head obj_requests; /* rbd_obj_request structs */
314
315 struct kref kref;
316};
317
318#define for_each_obj_request(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600319 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600320#define for_each_obj_request_from(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600321 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600322#define for_each_obj_request_safe(ireq, oreq, n) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600323 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600324
Alex Elderf84344f2012-08-31 17:29:51 -0500325struct rbd_mapping {
Alex Elder99c1f082012-08-30 14:42:15 -0500326 u64 size;
Alex Elder34b13182012-07-13 20:35:12 -0500327 u64 features;
Alex Elderf84344f2012-08-31 17:29:51 -0500328 bool read_only;
329};
330
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700331/*
332 * a single device
333 */
334struct rbd_device {
Alex Elderde71a292012-07-03 16:01:19 -0500335 int dev_id; /* blkdev unique id */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700336
337 int major; /* blkdev assigned major */
Ilya Dryomovdd82fff2013-12-13 15:28:57 +0200338 int minor;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700339 struct gendisk *disk; /* blkdev's gendisk and rq */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700340
Alex Eldera30b71b2012-07-10 20:30:11 -0500341 u32 image_format; /* Either 1 or 2 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700342 struct rbd_client *rbd_client;
343
344 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
345
Alex Elderb82d1672013-01-14 12:43:31 -0600346 spinlock_t lock; /* queue, flags, open_count */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700347
348 struct rbd_image_header header;
Alex Elderb82d1672013-01-14 12:43:31 -0600349 unsigned long flags; /* possibly lock protected */
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500350 struct rbd_spec *spec;
Ilya Dryomovd1475432015-06-22 13:24:48 +0300351 struct rbd_options *opts;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700352
Ilya Dryomovc41d13a2016-04-29 20:01:25 +0200353 struct ceph_object_id header_oid;
Ilya Dryomov922dab62016-05-26 01:15:02 +0200354 struct ceph_object_locator header_oloc;
Alex Elder971f8392012-10-25 23:34:41 -0500355
Alex Elder0903e872012-11-14 12:25:19 -0600356 struct ceph_file_layout layout;
357
Ilya Dryomov922dab62016-05-26 01:15:02 +0200358 struct ceph_osd_linger_request *watch_handle;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700359
Alex Elder86b00e02012-10-25 23:34:42 -0500360 struct rbd_spec *parent_spec;
361 u64 parent_overlap;
Alex Eldera2acd002013-05-08 22:50:04 -0500362 atomic_t parent_ref;
Alex Elder2f82ee52012-10-30 19:40:33 -0500363 struct rbd_device *parent;
Alex Elder86b00e02012-10-25 23:34:42 -0500364
Christoph Hellwig7ad18af2015-01-13 17:20:04 +0100365 /* Block layer tags. */
366 struct blk_mq_tag_set tag_set;
367
Josh Durginc6666012011-11-21 17:11:12 -0800368 /* protects updating the header */
369 struct rw_semaphore header_rwsem;
Alex Elderf84344f2012-08-31 17:29:51 -0500370
371 struct rbd_mapping mapping;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700372
373 struct list_head node;
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800374
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800375 /* sysfs related */
376 struct device dev;
Alex Elderb82d1672013-01-14 12:43:31 -0600377 unsigned long open_count; /* protected by lock */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800378};
379
Alex Elderb82d1672013-01-14 12:43:31 -0600380/*
381 * Flag bits for rbd_dev->flags. If atomicity is required,
382 * rbd_dev->lock is used to protect access.
383 *
384 * Currently, only the "removing" flag (which is coupled with the
385 * "open_count" field) requires atomic access.
386 */
Alex Elder6d292902013-01-14 12:43:31 -0600387enum rbd_dev_flags {
388 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
Alex Elderb82d1672013-01-14 12:43:31 -0600389 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
Alex Elder6d292902013-01-14 12:43:31 -0600390};
391
Alex Eldercfbf6372013-05-31 17:40:45 -0500392static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
Alex Eldere124a82f2012-01-29 13:57:44 -0600393
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700394static LIST_HEAD(rbd_dev_list); /* devices */
Alex Eldere124a82f2012-01-29 13:57:44 -0600395static DEFINE_SPINLOCK(rbd_dev_list_lock);
396
Alex Elder432b8582012-01-29 13:57:44 -0600397static LIST_HEAD(rbd_client_list); /* clients */
398static DEFINE_SPINLOCK(rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700399
Alex Elder78c2a442013-05-01 12:43:04 -0500400/* Slab caches for frequently-allocated structures */
401
Alex Elder1c2a9df2013-05-01 12:43:03 -0500402static struct kmem_cache *rbd_img_request_cache;
Alex Elder868311b2013-05-01 12:43:03 -0500403static struct kmem_cache *rbd_obj_request_cache;
Alex Elder78c2a442013-05-01 12:43:04 -0500404static struct kmem_cache *rbd_segment_name_cache;
Alex Elder1c2a9df2013-05-01 12:43:03 -0500405
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200406static int rbd_major;
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +0200407static DEFINE_IDA(rbd_dev_id_ida);
408
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +0400409static struct workqueue_struct *rbd_wq;
410
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200411/*
412 * Default to false for now, as single-major requires >= 0.75 version of
413 * userspace rbd utility.
414 */
415static bool single_major = false;
416module_param(single_major, bool, S_IRUGO);
417MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
418
Alex Elder3d7efd12013-04-19 15:34:50 -0500419static int rbd_img_request_submit(struct rbd_img_request *img_request);
420
Alex Elderf0f8cef2012-01-29 13:57:44 -0600421static ssize_t rbd_add(struct bus_type *bus, const char *buf,
422 size_t count);
423static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
424 size_t count);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200425static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
426 size_t count);
427static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
428 size_t count);
Ilya Dryomov6d69bb532015-10-11 19:38:00 +0200429static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
Alex Eldera2acd002013-05-08 22:50:04 -0500430static void rbd_spec_put(struct rbd_spec *spec);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600431
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200432static int rbd_dev_id_to_minor(int dev_id)
433{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200434 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200435}
436
437static int minor_to_rbd_dev_id(int minor)
438{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200439 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200440}
441
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700442static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
443static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200444static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
445static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700446
447static struct attribute *rbd_bus_attrs[] = {
448 &bus_attr_add.attr,
449 &bus_attr_remove.attr,
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200450 &bus_attr_add_single_major.attr,
451 &bus_attr_remove_single_major.attr,
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700452 NULL,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600453};
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200454
455static umode_t rbd_bus_is_visible(struct kobject *kobj,
456 struct attribute *attr, int index)
457{
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200458 if (!single_major &&
459 (attr == &bus_attr_add_single_major.attr ||
460 attr == &bus_attr_remove_single_major.attr))
461 return 0;
462
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200463 return attr->mode;
464}
465
466static const struct attribute_group rbd_bus_group = {
467 .attrs = rbd_bus_attrs,
468 .is_visible = rbd_bus_is_visible,
469};
470__ATTRIBUTE_GROUPS(rbd_bus);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600471
472static struct bus_type rbd_bus_type = {
473 .name = "rbd",
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700474 .bus_groups = rbd_bus_groups,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600475};
476
477static void rbd_root_dev_release(struct device *dev)
478{
479}
480
481static struct device rbd_root_dev = {
482 .init_name = "rbd",
483 .release = rbd_root_dev_release,
484};
485
Alex Elder06ecc6c2012-11-01 10:17:15 -0500486static __printf(2, 3)
487void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
488{
489 struct va_format vaf;
490 va_list args;
491
492 va_start(args, fmt);
493 vaf.fmt = fmt;
494 vaf.va = &args;
495
496 if (!rbd_dev)
497 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
498 else if (rbd_dev->disk)
499 printk(KERN_WARNING "%s: %s: %pV\n",
500 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
501 else if (rbd_dev->spec && rbd_dev->spec->image_name)
502 printk(KERN_WARNING "%s: image %s: %pV\n",
503 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
504 else if (rbd_dev->spec && rbd_dev->spec->image_id)
505 printk(KERN_WARNING "%s: id %s: %pV\n",
506 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
507 else /* punt */
508 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
509 RBD_DRV_NAME, rbd_dev, &vaf);
510 va_end(args);
511}
512
Alex Elderaafb2302012-09-06 16:00:54 -0500513#ifdef RBD_DEBUG
514#define rbd_assert(expr) \
515 if (unlikely(!(expr))) { \
516 printk(KERN_ERR "\nAssertion failure in %s() " \
517 "at line %d:\n\n" \
518 "\trbd_assert(%s);\n\n", \
519 __func__, __LINE__, #expr); \
520 BUG(); \
521 }
522#else /* !RBD_DEBUG */
523# define rbd_assert(expr) ((void) 0)
524#endif /* !RBD_DEBUG */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800525
Ilya Dryomov27617132015-07-16 17:36:11 +0300526static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
Alex Elderb454e362013-04-19 15:34:50 -0500527static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
Alex Elder05a46af2013-04-26 15:44:36 -0500528static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
529static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
Alex Elder8b3e1a52013-01-24 16:13:36 -0600530
Alex Eldercc4a38bd2013-04-30 00:44:33 -0500531static int rbd_dev_refresh(struct rbd_device *rbd_dev);
Alex Elder2df3fac2013-05-06 09:51:30 -0500532static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
Ilya Dryomova720ae02014-07-23 17:11:19 +0400533static int rbd_dev_header_info(struct rbd_device *rbd_dev);
Ilya Dryomove8f59b52014-07-24 10:42:13 +0400534static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
Alex Elder54cac612013-04-30 00:44:33 -0500535static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
536 u64 snap_id);
Alex Elder2ad3d712013-04-30 00:44:33 -0500537static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
538 u8 *order, u64 *snap_size);
539static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
540 u64 *snap_features);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700541
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700542static int rbd_open(struct block_device *bdev, fmode_t mode)
543{
Alex Elderf0f8cef2012-01-29 13:57:44 -0600544 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600545 bool removing = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700546
Alex Elderf84344f2012-08-31 17:29:51 -0500547 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700548 return -EROFS;
549
Alex Eldera14ea262013-02-05 13:23:12 -0600550 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600551 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
552 removing = true;
553 else
554 rbd_dev->open_count++;
Alex Eldera14ea262013-02-05 13:23:12 -0600555 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600556 if (removing)
557 return -ENOENT;
558
Alex Elderc3e946c2012-11-16 09:29:16 -0600559 (void) get_device(&rbd_dev->dev);
Alex Elder340c7a22012-08-10 13:12:07 -0700560
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700561 return 0;
562}
563
Al Virodb2a1442013-05-05 21:52:57 -0400564static void rbd_release(struct gendisk *disk, fmode_t mode)
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800565{
566 struct rbd_device *rbd_dev = disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600567 unsigned long open_count_before;
568
Alex Eldera14ea262013-02-05 13:23:12 -0600569 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600570 open_count_before = rbd_dev->open_count--;
Alex Eldera14ea262013-02-05 13:23:12 -0600571 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600572 rbd_assert(open_count_before > 0);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800573
Alex Elderc3e946c2012-11-16 09:29:16 -0600574 put_device(&rbd_dev->dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800575}
576
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800577static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
578{
Josh Durgin77f33c02013-09-30 17:09:54 -0700579 int ret = 0;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800580 int val;
581 bool ro;
Josh Durgin77f33c02013-09-30 17:09:54 -0700582 bool ro_changed = false;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800583
Josh Durgin77f33c02013-09-30 17:09:54 -0700584 /* get_user() may sleep, so call it before taking rbd_dev->lock */
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800585 if (get_user(val, (int __user *)(arg)))
586 return -EFAULT;
587
588 ro = val ? true : false;
589 /* Snapshot doesn't allow to write*/
590 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
591 return -EROFS;
592
Josh Durgin77f33c02013-09-30 17:09:54 -0700593 spin_lock_irq(&rbd_dev->lock);
594 /* prevent others open this device */
595 if (rbd_dev->open_count > 1) {
596 ret = -EBUSY;
597 goto out;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800598 }
599
Josh Durgin77f33c02013-09-30 17:09:54 -0700600 if (rbd_dev->mapping.read_only != ro) {
601 rbd_dev->mapping.read_only = ro;
602 ro_changed = true;
603 }
604
605out:
606 spin_unlock_irq(&rbd_dev->lock);
607 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
608 if (ret == 0 && ro_changed)
609 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
610
611 return ret;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800612}
613
614static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
615 unsigned int cmd, unsigned long arg)
616{
617 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
618 int ret = 0;
619
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800620 switch (cmd) {
621 case BLKROSET:
622 ret = rbd_ioctl_set_ro(rbd_dev, arg);
623 break;
624 default:
625 ret = -ENOTTY;
626 }
627
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800628 return ret;
629}
630
631#ifdef CONFIG_COMPAT
632static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
633 unsigned int cmd, unsigned long arg)
634{
635 return rbd_ioctl(bdev, mode, cmd, arg);
636}
637#endif /* CONFIG_COMPAT */
638
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700639static const struct block_device_operations rbd_bd_ops = {
640 .owner = THIS_MODULE,
641 .open = rbd_open,
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800642 .release = rbd_release,
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800643 .ioctl = rbd_ioctl,
644#ifdef CONFIG_COMPAT
645 .compat_ioctl = rbd_compat_ioctl,
646#endif
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700647};
648
649/*
Alex Elder7262cfc2013-05-16 15:04:20 -0500650 * Initialize an rbd client instance. Success or not, this function
Alex Eldercfbf6372013-05-31 17:40:45 -0500651 * consumes ceph_opts. Caller holds client_mutex.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700652 */
Alex Elderf8c38922012-08-10 13:12:07 -0700653static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700654{
655 struct rbd_client *rbdc;
656 int ret = -ENOMEM;
657
Alex Elder37206ee2013-02-20 17:32:08 -0600658 dout("%s:\n", __func__);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700659 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
660 if (!rbdc)
661 goto out_opt;
662
663 kref_init(&rbdc->kref);
664 INIT_LIST_HEAD(&rbdc->node);
665
Alex Elder43ae4702012-07-03 16:01:18 -0500666 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700667 if (IS_ERR(rbdc->client))
Alex Elder08f75462013-05-29 11:19:00 -0500668 goto out_rbdc;
Alex Elder43ae4702012-07-03 16:01:18 -0500669 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700670
671 ret = ceph_open_session(rbdc->client);
672 if (ret < 0)
Alex Elder08f75462013-05-29 11:19:00 -0500673 goto out_client;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700674
Alex Elder432b8582012-01-29 13:57:44 -0600675 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700676 list_add_tail(&rbdc->node, &rbd_client_list);
Alex Elder432b8582012-01-29 13:57:44 -0600677 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700678
Alex Elder37206ee2013-02-20 17:32:08 -0600679 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Elderbc534d82012-01-29 13:57:44 -0600680
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700681 return rbdc;
Alex Elder08f75462013-05-29 11:19:00 -0500682out_client:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700683 ceph_destroy_client(rbdc->client);
Alex Elder08f75462013-05-29 11:19:00 -0500684out_rbdc:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700685 kfree(rbdc);
686out_opt:
Alex Elder43ae4702012-07-03 16:01:18 -0500687 if (ceph_opts)
688 ceph_destroy_options(ceph_opts);
Alex Elder37206ee2013-02-20 17:32:08 -0600689 dout("%s: error %d\n", __func__, ret);
690
Vasiliy Kulikov28f259b2010-09-26 12:59:37 +0400691 return ERR_PTR(ret);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700692}
693
Alex Elder2f82ee52012-10-30 19:40:33 -0500694static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
695{
696 kref_get(&rbdc->kref);
697
698 return rbdc;
699}
700
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700701/*
Alex Elder1f7ba332012-08-10 13:12:07 -0700702 * Find a ceph client with specific addr and configuration. If
703 * found, bump its reference count.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700704 */
Alex Elder1f7ba332012-08-10 13:12:07 -0700705static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700706{
707 struct rbd_client *client_node;
Alex Elder1f7ba332012-08-10 13:12:07 -0700708 bool found = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700709
Alex Elder43ae4702012-07-03 16:01:18 -0500710 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700711 return NULL;
712
Alex Elder1f7ba332012-08-10 13:12:07 -0700713 spin_lock(&rbd_client_list_lock);
714 list_for_each_entry(client_node, &rbd_client_list, node) {
715 if (!ceph_compare_options(ceph_opts, client_node->client)) {
Alex Elder2f82ee52012-10-30 19:40:33 -0500716 __rbd_get_client(client_node);
717
Alex Elder1f7ba332012-08-10 13:12:07 -0700718 found = true;
719 break;
720 }
721 }
722 spin_unlock(&rbd_client_list_lock);
723
724 return found ? client_node : NULL;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700725}
726
727/*
Ilya Dryomov210c1042015-06-22 13:24:48 +0300728 * (Per device) rbd map options
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700729 */
730enum {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300731 Opt_queue_depth,
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700732 Opt_last_int,
733 /* int args above */
734 Opt_last_string,
735 /* string args above */
Alex Eldercc0538b2012-08-10 13:12:07 -0700736 Opt_read_only,
737 Opt_read_write,
Ilya Dryomov210c1042015-06-22 13:24:48 +0300738 Opt_err
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700739};
740
Alex Elder43ae4702012-07-03 16:01:18 -0500741static match_table_t rbd_opts_tokens = {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300742 {Opt_queue_depth, "queue_depth=%d"},
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700743 /* int args above */
744 /* string args above */
Alex Elderbe466c12012-10-22 11:31:26 -0500745 {Opt_read_only, "read_only"},
Alex Eldercc0538b2012-08-10 13:12:07 -0700746 {Opt_read_only, "ro"}, /* Alternate spelling */
747 {Opt_read_write, "read_write"},
748 {Opt_read_write, "rw"}, /* Alternate spelling */
Ilya Dryomov210c1042015-06-22 13:24:48 +0300749 {Opt_err, NULL}
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700750};
751
Alex Elder98571b52013-01-20 14:44:42 -0600752struct rbd_options {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300753 int queue_depth;
Alex Elder98571b52013-01-20 14:44:42 -0600754 bool read_only;
755};
756
Ilya Dryomovb5584182015-06-23 16:21:19 +0300757#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
Alex Elder98571b52013-01-20 14:44:42 -0600758#define RBD_READ_ONLY_DEFAULT false
759
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700760static int parse_rbd_opts_token(char *c, void *private)
761{
Alex Elder43ae4702012-07-03 16:01:18 -0500762 struct rbd_options *rbd_opts = private;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700763 substring_t argstr[MAX_OPT_ARGS];
764 int token, intval, ret;
765
Alex Elder43ae4702012-07-03 16:01:18 -0500766 token = match_token(c, rbd_opts_tokens, argstr);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700767 if (token < Opt_last_int) {
768 ret = match_int(&argstr[0], &intval);
769 if (ret < 0) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300770 pr_err("bad mount option arg (not int) at '%s'\n", c);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700771 return ret;
772 }
773 dout("got int token %d val %d\n", token, intval);
774 } else if (token > Opt_last_int && token < Opt_last_string) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300775 dout("got string token %d val %s\n", token, argstr[0].from);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700776 } else {
777 dout("got token %d\n", token);
778 }
779
780 switch (token) {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300781 case Opt_queue_depth:
782 if (intval < 1) {
783 pr_err("queue_depth out of range\n");
784 return -EINVAL;
785 }
786 rbd_opts->queue_depth = intval;
787 break;
Alex Eldercc0538b2012-08-10 13:12:07 -0700788 case Opt_read_only:
789 rbd_opts->read_only = true;
790 break;
791 case Opt_read_write:
792 rbd_opts->read_only = false;
793 break;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700794 default:
Ilya Dryomov210c1042015-06-22 13:24:48 +0300795 /* libceph prints "bad option" msg */
796 return -EINVAL;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700797 }
Ilya Dryomov210c1042015-06-22 13:24:48 +0300798
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700799 return 0;
800}
801
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800802static char* obj_op_name(enum obj_operation_type op_type)
803{
804 switch (op_type) {
805 case OBJ_OP_READ:
806 return "read";
807 case OBJ_OP_WRITE:
808 return "write";
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800809 case OBJ_OP_DISCARD:
810 return "discard";
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800811 default:
812 return "???";
813 }
814}
815
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700816/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700817 * Get a ceph client with specific addr and configuration, if one does
Alex Elder7262cfc2013-05-16 15:04:20 -0500818 * not exist create it. Either way, ceph_opts is consumed by this
819 * function.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700820 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500821static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700822{
Alex Elderf8c38922012-08-10 13:12:07 -0700823 struct rbd_client *rbdc;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700824
Alex Eldercfbf6372013-05-31 17:40:45 -0500825 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
Alex Elder1f7ba332012-08-10 13:12:07 -0700826 rbdc = rbd_client_find(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500827 if (rbdc) /* using an existing client */
Alex Elder43ae4702012-07-03 16:01:18 -0500828 ceph_destroy_options(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500829 else
Alex Elderf8c38922012-08-10 13:12:07 -0700830 rbdc = rbd_client_create(ceph_opts);
Alex Eldercfbf6372013-05-31 17:40:45 -0500831 mutex_unlock(&client_mutex);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700832
Alex Elder9d3997f2012-10-25 23:34:42 -0500833 return rbdc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700834}
835
836/*
837 * Destroy ceph client
Alex Elderd23a4b32012-01-29 13:57:43 -0600838 *
Alex Elder432b8582012-01-29 13:57:44 -0600839 * Caller must hold rbd_client_list_lock.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700840 */
841static void rbd_client_release(struct kref *kref)
842{
843 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
844
Alex Elder37206ee2013-02-20 17:32:08 -0600845 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500846 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700847 list_del(&rbdc->node);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500848 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700849
850 ceph_destroy_client(rbdc->client);
851 kfree(rbdc);
852}
853
854/*
855 * Drop reference to ceph client node. If it's not referenced anymore, release
856 * it.
857 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500858static void rbd_put_client(struct rbd_client *rbdc)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700859{
Alex Elderc53d5892012-10-25 23:34:42 -0500860 if (rbdc)
861 kref_put(&rbdc->kref, rbd_client_release);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700862}
863
Alex Eldera30b71b2012-07-10 20:30:11 -0500864static bool rbd_image_format_valid(u32 image_format)
865{
866 return image_format == 1 || image_format == 2;
867}
868
Alex Elder8e94af82012-07-25 09:32:40 -0500869static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
870{
Alex Elder103a1502012-08-02 11:29:45 -0500871 size_t size;
872 u32 snap_count;
873
874 /* The header has to start with the magic rbd header text */
875 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
876 return false;
877
Alex Elderdb2388b2012-10-20 22:17:27 -0500878 /* The bio layer requires at least sector-sized I/O */
879
880 if (ondisk->options.order < SECTOR_SHIFT)
881 return false;
882
883 /* If we use u64 in a few spots we may be able to loosen this */
884
885 if (ondisk->options.order > 8 * sizeof (int) - 1)
886 return false;
887
Alex Elder103a1502012-08-02 11:29:45 -0500888 /*
889 * The size of a snapshot header has to fit in a size_t, and
890 * that limits the number of snapshots.
891 */
892 snap_count = le32_to_cpu(ondisk->snap_count);
893 size = SIZE_MAX - sizeof (struct ceph_snap_context);
894 if (snap_count > size / sizeof (__le64))
895 return false;
896
897 /*
898 * Not only that, but the size of the entire the snapshot
899 * header must also be representable in a size_t.
900 */
901 size -= snap_count * sizeof (__le64);
902 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
903 return false;
904
905 return true;
Alex Elder8e94af82012-07-25 09:32:40 -0500906}
907
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700908/*
Alex Elderbb23e372013-05-06 09:51:29 -0500909 * Fill an rbd image header with information from the given format 1
910 * on-disk header.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700911 */
Alex Elder662518b2013-05-06 09:51:29 -0500912static int rbd_header_from_disk(struct rbd_device *rbd_dev,
Alex Elder4156d992012-08-02 11:29:46 -0500913 struct rbd_image_header_ondisk *ondisk)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700914{
Alex Elder662518b2013-05-06 09:51:29 -0500915 struct rbd_image_header *header = &rbd_dev->header;
Alex Elderbb23e372013-05-06 09:51:29 -0500916 bool first_time = header->object_prefix == NULL;
917 struct ceph_snap_context *snapc;
918 char *object_prefix = NULL;
919 char *snap_names = NULL;
920 u64 *snap_sizes = NULL;
Alex Elderccece232012-07-10 20:30:10 -0500921 u32 snap_count;
Alex Elderd2bb24e2012-07-26 23:37:14 -0500922 size_t size;
Alex Elderbb23e372013-05-06 09:51:29 -0500923 int ret = -ENOMEM;
Alex Elder621901d2012-08-23 23:22:06 -0500924 u32 i;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700925
Alex Elderbb23e372013-05-06 09:51:29 -0500926 /* Allocate this now to avoid having to handle failure below */
927
928 if (first_time) {
929 size_t len;
930
931 len = strnlen(ondisk->object_prefix,
932 sizeof (ondisk->object_prefix));
933 object_prefix = kmalloc(len + 1, GFP_KERNEL);
934 if (!object_prefix)
935 return -ENOMEM;
936 memcpy(object_prefix, ondisk->object_prefix, len);
937 object_prefix[len] = '\0';
938 }
939
940 /* Allocate the snapshot context and fill it in */
Alex Elder6a523252012-07-19 17:12:59 -0500941
Alex Elder103a1502012-08-02 11:29:45 -0500942 snap_count = le32_to_cpu(ondisk->snap_count);
Alex Elderbb23e372013-05-06 09:51:29 -0500943 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
944 if (!snapc)
945 goto out_err;
946 snapc->seq = le64_to_cpu(ondisk->snap_seq);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700947 if (snap_count) {
Alex Elderbb23e372013-05-06 09:51:29 -0500948 struct rbd_image_snap_ondisk *snaps;
Alex Elderf785cc12012-08-23 23:22:06 -0500949 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
950
Alex Elderbb23e372013-05-06 09:51:29 -0500951 /* We'll keep a copy of the snapshot names... */
Alex Elder621901d2012-08-23 23:22:06 -0500952
Alex Elderbb23e372013-05-06 09:51:29 -0500953 if (snap_names_len > (u64)SIZE_MAX)
954 goto out_2big;
955 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
956 if (!snap_names)
Alex Elder6a523252012-07-19 17:12:59 -0500957 goto out_err;
Alex Elderbb23e372013-05-06 09:51:29 -0500958
959 /* ...as well as the array of their sizes. */
960
961 size = snap_count * sizeof (*header->snap_sizes);
962 snap_sizes = kmalloc(size, GFP_KERNEL);
963 if (!snap_sizes)
964 goto out_err;
965
Alex Elderf785cc12012-08-23 23:22:06 -0500966 /*
Alex Elderbb23e372013-05-06 09:51:29 -0500967 * Copy the names, and fill in each snapshot's id
968 * and size.
969 *
Alex Elder99a41eb2013-05-06 09:51:30 -0500970 * Note that rbd_dev_v1_header_info() guarantees the
Alex Elderbb23e372013-05-06 09:51:29 -0500971 * ondisk buffer we're working with has
Alex Elderf785cc12012-08-23 23:22:06 -0500972 * snap_names_len bytes beyond the end of the
973 * snapshot id array, this memcpy() is safe.
974 */
Alex Elderbb23e372013-05-06 09:51:29 -0500975 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
976 snaps = ondisk->snaps;
977 for (i = 0; i < snap_count; i++) {
978 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
979 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
980 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700981 }
Alex Elder849b4262012-07-09 21:04:24 -0500982
Alex Elderbb23e372013-05-06 09:51:29 -0500983 /* We won't fail any more, fill in the header */
Alex Elder6a523252012-07-19 17:12:59 -0500984
Alex Elderbb23e372013-05-06 09:51:29 -0500985 if (first_time) {
986 header->object_prefix = object_prefix;
987 header->obj_order = ondisk->options.order;
988 header->crypt_type = ondisk->options.crypt_type;
989 header->comp_type = ondisk->options.comp_type;
990 /* The rest aren't used for format 1 images */
991 header->stripe_unit = 0;
992 header->stripe_count = 0;
993 header->features = 0;
Alex Elder662518b2013-05-06 09:51:29 -0500994 } else {
995 ceph_put_snap_context(header->snapc);
996 kfree(header->snap_names);
997 kfree(header->snap_sizes);
Alex Elderbb23e372013-05-06 09:51:29 -0500998 }
999
1000 /* The remaining fields always get updated (when we refresh) */
Alex Elder621901d2012-08-23 23:22:06 -05001001
Alex Elderf84344f2012-08-31 17:29:51 -05001002 header->image_size = le64_to_cpu(ondisk->image_size);
Alex Elderbb23e372013-05-06 09:51:29 -05001003 header->snapc = snapc;
1004 header->snap_names = snap_names;
1005 header->snap_sizes = snap_sizes;
Alex Elder468521c2013-04-26 09:43:47 -05001006
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001007 return 0;
Alex Elderbb23e372013-05-06 09:51:29 -05001008out_2big:
1009 ret = -EIO;
Alex Elder6a523252012-07-19 17:12:59 -05001010out_err:
Alex Elderbb23e372013-05-06 09:51:29 -05001011 kfree(snap_sizes);
1012 kfree(snap_names);
1013 ceph_put_snap_context(snapc);
1014 kfree(object_prefix);
Alex Elderccece232012-07-10 20:30:10 -05001015
Alex Elderbb23e372013-05-06 09:51:29 -05001016 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001017}
1018
Alex Elder9682fc62013-04-30 00:44:33 -05001019static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1020{
1021 const char *snap_name;
1022
1023 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1024
1025 /* Skip over names until we find the one we are looking for */
1026
1027 snap_name = rbd_dev->header.snap_names;
1028 while (which--)
1029 snap_name += strlen(snap_name) + 1;
1030
1031 return kstrdup(snap_name, GFP_KERNEL);
1032}
1033
Alex Elder30d1cff2013-05-01 12:43:03 -05001034/*
1035 * Snapshot id comparison function for use with qsort()/bsearch().
1036 * Note that result is for snapshots in *descending* order.
1037 */
1038static int snapid_compare_reverse(const void *s1, const void *s2)
1039{
1040 u64 snap_id1 = *(u64 *)s1;
1041 u64 snap_id2 = *(u64 *)s2;
1042
1043 if (snap_id1 < snap_id2)
1044 return 1;
1045 return snap_id1 == snap_id2 ? 0 : -1;
1046}
1047
1048/*
1049 * Search a snapshot context to see if the given snapshot id is
1050 * present.
1051 *
1052 * Returns the position of the snapshot id in the array if it's found,
1053 * or BAD_SNAP_INDEX otherwise.
1054 *
1055 * Note: The snapshot array is in kept sorted (by the osd) in
1056 * reverse order, highest snapshot id first.
1057 */
Alex Elder9682fc62013-04-30 00:44:33 -05001058static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1059{
1060 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
Alex Elder30d1cff2013-05-01 12:43:03 -05001061 u64 *found;
Alex Elder9682fc62013-04-30 00:44:33 -05001062
Alex Elder30d1cff2013-05-01 12:43:03 -05001063 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1064 sizeof (snap_id), snapid_compare_reverse);
Alex Elder9682fc62013-04-30 00:44:33 -05001065
Alex Elder30d1cff2013-05-01 12:43:03 -05001066 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
Alex Elder9682fc62013-04-30 00:44:33 -05001067}
1068
Alex Elder2ad3d712013-04-30 00:44:33 -05001069static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1070 u64 snap_id)
Alex Elder54cac612013-04-30 00:44:33 -05001071{
1072 u32 which;
Josh Durginda6a6b62013-09-04 17:57:31 -07001073 const char *snap_name;
Alex Elder54cac612013-04-30 00:44:33 -05001074
1075 which = rbd_dev_snap_index(rbd_dev, snap_id);
1076 if (which == BAD_SNAP_INDEX)
Josh Durginda6a6b62013-09-04 17:57:31 -07001077 return ERR_PTR(-ENOENT);
Alex Elder54cac612013-04-30 00:44:33 -05001078
Josh Durginda6a6b62013-09-04 17:57:31 -07001079 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1080 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
Alex Elder54cac612013-04-30 00:44:33 -05001081}
1082
Alex Elder9e15b772012-10-30 19:40:33 -05001083static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1084{
Alex Elder9e15b772012-10-30 19:40:33 -05001085 if (snap_id == CEPH_NOSNAP)
1086 return RBD_SNAP_HEAD_NAME;
1087
Alex Elder54cac612013-04-30 00:44:33 -05001088 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1089 if (rbd_dev->image_format == 1)
1090 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001091
Alex Elder54cac612013-04-30 00:44:33 -05001092 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001093}
1094
Alex Elder2ad3d712013-04-30 00:44:33 -05001095static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1096 u64 *snap_size)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001097{
Alex Elder2ad3d712013-04-30 00:44:33 -05001098 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1099 if (snap_id == CEPH_NOSNAP) {
1100 *snap_size = rbd_dev->header.image_size;
1101 } else if (rbd_dev->image_format == 1) {
1102 u32 which;
Alex Elder00f1f362012-02-07 12:03:36 -06001103
Alex Elder2ad3d712013-04-30 00:44:33 -05001104 which = rbd_dev_snap_index(rbd_dev, snap_id);
1105 if (which == BAD_SNAP_INDEX)
1106 return -ENOENT;
Alex Elder00f1f362012-02-07 12:03:36 -06001107
Alex Elder2ad3d712013-04-30 00:44:33 -05001108 *snap_size = rbd_dev->header.snap_sizes[which];
1109 } else {
1110 u64 size = 0;
1111 int ret;
1112
1113 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1114 if (ret)
1115 return ret;
1116
1117 *snap_size = size;
1118 }
1119 return 0;
1120}
1121
1122static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1123 u64 *snap_features)
1124{
1125 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1126 if (snap_id == CEPH_NOSNAP) {
1127 *snap_features = rbd_dev->header.features;
1128 } else if (rbd_dev->image_format == 1) {
1129 *snap_features = 0; /* No features for format 1 */
1130 } else {
1131 u64 features = 0;
1132 int ret;
1133
1134 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1135 if (ret)
1136 return ret;
1137
1138 *snap_features = features;
1139 }
1140 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001141}
1142
Alex Elderd1cf5782013-04-27 09:59:30 -05001143static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001144{
Alex Elder8f4b7d92013-05-06 07:40:30 -05001145 u64 snap_id = rbd_dev->spec->snap_id;
Alex Elder2ad3d712013-04-30 00:44:33 -05001146 u64 size = 0;
1147 u64 features = 0;
1148 int ret;
Alex Elder8b0241f2013-04-25 23:15:08 -05001149
Alex Elder2ad3d712013-04-30 00:44:33 -05001150 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1151 if (ret)
1152 return ret;
1153 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1154 if (ret)
1155 return ret;
1156
1157 rbd_dev->mapping.size = size;
1158 rbd_dev->mapping.features = features;
1159
Alex Elder8b0241f2013-04-25 23:15:08 -05001160 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001161}
1162
Alex Elderd1cf5782013-04-27 09:59:30 -05001163static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1164{
1165 rbd_dev->mapping.size = 0;
1166 rbd_dev->mapping.features = 0;
Alex Elder200a6a82013-04-28 23:32:34 -05001167}
1168
Himangi Saraogi7d5079a2014-07-24 03:17:07 +05301169static void rbd_segment_name_free(const char *name)
1170{
1171 /* The explicit cast here is needed to drop the const qualifier */
1172
1173 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1174}
1175
Alex Elder98571b52013-01-20 14:44:42 -06001176static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001177{
Alex Elder65ccfe22012-08-09 10:33:26 -07001178 char *name;
1179 u64 segment;
1180 int ret;
Josh Durgin3a96d5c2013-06-12 19:15:06 -07001181 char *name_format;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001182
Alex Elder78c2a442013-05-01 12:43:04 -05001183 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
Alex Elder65ccfe22012-08-09 10:33:26 -07001184 if (!name)
1185 return NULL;
1186 segment = offset >> rbd_dev->header.obj_order;
Josh Durgin3a96d5c2013-06-12 19:15:06 -07001187 name_format = "%s.%012llx";
1188 if (rbd_dev->image_format == 2)
1189 name_format = "%s.%016llx";
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02001190 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
Alex Elder65ccfe22012-08-09 10:33:26 -07001191 rbd_dev->header.object_prefix, segment);
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02001192 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
Alex Elder65ccfe22012-08-09 10:33:26 -07001193 pr_err("error formatting segment name for #%llu (%d)\n",
1194 segment, ret);
Himangi Saraogi7d5079a2014-07-24 03:17:07 +05301195 rbd_segment_name_free(name);
Alex Elder65ccfe22012-08-09 10:33:26 -07001196 name = NULL;
1197 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001198
Alex Elder65ccfe22012-08-09 10:33:26 -07001199 return name;
1200}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001201
Alex Elder65ccfe22012-08-09 10:33:26 -07001202static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1203{
1204 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001205
Alex Elder65ccfe22012-08-09 10:33:26 -07001206 return offset & (segment_size - 1);
1207}
1208
1209static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1210 u64 offset, u64 length)
1211{
1212 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1213
1214 offset &= segment_size - 1;
1215
Alex Elderaafb2302012-09-06 16:00:54 -05001216 rbd_assert(length <= U64_MAX - offset);
Alex Elder65ccfe22012-08-09 10:33:26 -07001217 if (offset + length > segment_size)
1218 length = segment_size - offset;
1219
1220 return length;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001221}
1222
1223/*
Josh Durgin029bcbd2011-07-22 11:35:23 -07001224 * returns the size of an object in the image
1225 */
1226static u64 rbd_obj_bytes(struct rbd_image_header *header)
1227{
1228 return 1 << header->obj_order;
1229}
1230
1231/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001232 * bio helpers
1233 */
1234
1235static void bio_chain_put(struct bio *chain)
1236{
1237 struct bio *tmp;
1238
1239 while (chain) {
1240 tmp = chain;
1241 chain = chain->bi_next;
1242 bio_put(tmp);
1243 }
1244}
1245
1246/*
1247 * zeros a bio chain, starting at specific offset
1248 */
1249static void zero_bio_chain(struct bio *chain, int start_ofs)
1250{
Kent Overstreet79886132013-11-23 17:19:00 -08001251 struct bio_vec bv;
1252 struct bvec_iter iter;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001253 unsigned long flags;
1254 void *buf;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001255 int pos = 0;
1256
1257 while (chain) {
Kent Overstreet79886132013-11-23 17:19:00 -08001258 bio_for_each_segment(bv, chain, iter) {
1259 if (pos + bv.bv_len > start_ofs) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001260 int remainder = max(start_ofs - pos, 0);
Kent Overstreet79886132013-11-23 17:19:00 -08001261 buf = bvec_kmap_irq(&bv, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001262 memset(buf + remainder, 0,
Kent Overstreet79886132013-11-23 17:19:00 -08001263 bv.bv_len - remainder);
1264 flush_dcache_page(bv.bv_page);
Dan Carpenter85b5aaa2010-10-11 21:15:11 +02001265 bvec_kunmap_irq(buf, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001266 }
Kent Overstreet79886132013-11-23 17:19:00 -08001267 pos += bv.bv_len;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001268 }
1269
1270 chain = chain->bi_next;
1271 }
1272}
1273
1274/*
Alex Elderb9434c52013-04-19 15:34:50 -05001275 * similar to zero_bio_chain(), zeros data defined by a page array,
1276 * starting at the given byte offset from the start of the array and
1277 * continuing up to the given end offset. The pages array is
1278 * assumed to be big enough to hold all bytes up to the end.
1279 */
1280static void zero_pages(struct page **pages, u64 offset, u64 end)
1281{
1282 struct page **page = &pages[offset >> PAGE_SHIFT];
1283
1284 rbd_assert(end > offset);
1285 rbd_assert(end - offset <= (u64)SIZE_MAX);
1286 while (offset < end) {
1287 size_t page_offset;
1288 size_t length;
1289 unsigned long flags;
1290 void *kaddr;
1291
Geert Uytterhoeven491205a2013-05-13 20:35:37 -05001292 page_offset = offset & ~PAGE_MASK;
1293 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
Alex Elderb9434c52013-04-19 15:34:50 -05001294 local_irq_save(flags);
1295 kaddr = kmap_atomic(*page);
1296 memset(kaddr + page_offset, 0, length);
Alex Eldere2156052013-05-22 20:54:25 -05001297 flush_dcache_page(*page);
Alex Elderb9434c52013-04-19 15:34:50 -05001298 kunmap_atomic(kaddr);
1299 local_irq_restore(flags);
1300
1301 offset += length;
1302 page++;
1303 }
1304}
1305
1306/*
Alex Elderf7760da2012-10-20 22:17:27 -05001307 * Clone a portion of a bio, starting at the given byte offset
1308 * and continuing for the number of bytes indicated.
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001309 */
Alex Elderf7760da2012-10-20 22:17:27 -05001310static struct bio *bio_clone_range(struct bio *bio_src,
1311 unsigned int offset,
1312 unsigned int len,
1313 gfp_t gfpmask)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001314{
Alex Elderf7760da2012-10-20 22:17:27 -05001315 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001316
Kent Overstreet5341a6272013-08-07 14:31:11 -07001317 bio = bio_clone(bio_src, gfpmask);
Alex Elderf7760da2012-10-20 22:17:27 -05001318 if (!bio)
1319 return NULL; /* ENOMEM */
1320
Kent Overstreet5341a6272013-08-07 14:31:11 -07001321 bio_advance(bio, offset);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001322 bio->bi_iter.bi_size = len;
Alex Elder542582f2012-08-09 10:33:25 -07001323
Alex Elderf7760da2012-10-20 22:17:27 -05001324 return bio;
1325}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001326
Alex Elderf7760da2012-10-20 22:17:27 -05001327/*
1328 * Clone a portion of a bio chain, starting at the given byte offset
1329 * into the first bio in the source chain and continuing for the
1330 * number of bytes indicated. The result is another bio chain of
1331 * exactly the given length, or a null pointer on error.
1332 *
1333 * The bio_src and offset parameters are both in-out. On entry they
1334 * refer to the first source bio and the offset into that bio where
1335 * the start of data to be cloned is located.
1336 *
1337 * On return, bio_src is updated to refer to the bio in the source
1338 * chain that contains first un-cloned byte, and *offset will
1339 * contain the offset of that byte within that bio.
1340 */
1341static struct bio *bio_chain_clone_range(struct bio **bio_src,
1342 unsigned int *offset,
1343 unsigned int len,
1344 gfp_t gfpmask)
1345{
1346 struct bio *bi = *bio_src;
1347 unsigned int off = *offset;
1348 struct bio *chain = NULL;
1349 struct bio **end;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001350
Alex Elderf7760da2012-10-20 22:17:27 -05001351 /* Build up a chain of clone bios up to the limit */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001352
Kent Overstreet4f024f32013-10-11 15:44:27 -07001353 if (!bi || off >= bi->bi_iter.bi_size || !len)
Alex Elderf7760da2012-10-20 22:17:27 -05001354 return NULL; /* Nothing to clone */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001355
Alex Elderf7760da2012-10-20 22:17:27 -05001356 end = &chain;
1357 while (len) {
1358 unsigned int bi_size;
1359 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001360
Alex Elderf5400b72012-11-01 10:17:15 -05001361 if (!bi) {
1362 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
Alex Elderf7760da2012-10-20 22:17:27 -05001363 goto out_err; /* EINVAL; ran out of bio's */
Alex Elderf5400b72012-11-01 10:17:15 -05001364 }
Kent Overstreet4f024f32013-10-11 15:44:27 -07001365 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
Alex Elderf7760da2012-10-20 22:17:27 -05001366 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1367 if (!bio)
1368 goto out_err; /* ENOMEM */
1369
1370 *end = bio;
1371 end = &bio->bi_next;
1372
1373 off += bi_size;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001374 if (off == bi->bi_iter.bi_size) {
Alex Elderf7760da2012-10-20 22:17:27 -05001375 bi = bi->bi_next;
1376 off = 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001377 }
Alex Elderf7760da2012-10-20 22:17:27 -05001378 len -= bi_size;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001379 }
Alex Elderf7760da2012-10-20 22:17:27 -05001380 *bio_src = bi;
1381 *offset = off;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001382
Alex Elderf7760da2012-10-20 22:17:27 -05001383 return chain;
1384out_err:
1385 bio_chain_put(chain);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001386
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001387 return NULL;
1388}
1389
Alex Elder926f9b32013-02-11 12:33:24 -06001390/*
1391 * The default/initial value for all object request flags is 0. For
1392 * each flag, once its value is set to 1 it is never reset to 0
1393 * again.
1394 */
Alex Elder6365d332013-02-11 12:33:24 -06001395static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1396{
1397 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
Alex Elder6365d332013-02-11 12:33:24 -06001398 struct rbd_device *rbd_dev;
1399
Alex Elder57acbaa2013-02-11 12:33:24 -06001400 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001401 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
Alex Elder6365d332013-02-11 12:33:24 -06001402 obj_request);
1403 }
1404}
1405
1406static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1407{
1408 smp_mb();
1409 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1410}
1411
Alex Elder57acbaa2013-02-11 12:33:24 -06001412static void obj_request_done_set(struct rbd_obj_request *obj_request)
1413{
1414 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1415 struct rbd_device *rbd_dev = NULL;
1416
1417 if (obj_request_img_data_test(obj_request))
1418 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001419 rbd_warn(rbd_dev, "obj_request %p already marked done",
Alex Elder57acbaa2013-02-11 12:33:24 -06001420 obj_request);
1421 }
1422}
1423
1424static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1425{
1426 smp_mb();
1427 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1428}
1429
Alex Elder5679c592013-02-11 12:33:24 -06001430/*
1431 * This sets the KNOWN flag after (possibly) setting the EXISTS
1432 * flag. The latter is set based on the "exists" value provided.
1433 *
1434 * Note that for our purposes once an object exists it never goes
1435 * away again. It's possible that the response from two existence
1436 * checks are separated by the creation of the target object, and
1437 * the first ("doesn't exist") response arrives *after* the second
1438 * ("does exist"). In that case we ignore the second one.
1439 */
1440static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1441 bool exists)
1442{
1443 if (exists)
1444 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1445 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1446 smp_mb();
1447}
1448
1449static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1450{
1451 smp_mb();
1452 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1453}
1454
1455static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1456{
1457 smp_mb();
1458 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1459}
1460
Ilya Dryomov96385562014-06-10 13:53:29 +04001461static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1462{
1463 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1464
1465 return obj_request->img_offset <
1466 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1467}
1468
Alex Elderbf0d5f502012-11-22 00:00:08 -06001469static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1470{
Alex Elder37206ee2013-02-20 17:32:08 -06001471 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1472 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001473 kref_get(&obj_request->kref);
1474}
1475
1476static void rbd_obj_request_destroy(struct kref *kref);
1477static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1478{
1479 rbd_assert(obj_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001480 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1481 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001482 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1483}
1484
Alex Elder0f2d5be2014-04-26 14:21:44 +04001485static void rbd_img_request_get(struct rbd_img_request *img_request)
1486{
1487 dout("%s: img %p (was %d)\n", __func__, img_request,
1488 atomic_read(&img_request->kref.refcount));
1489 kref_get(&img_request->kref);
1490}
1491
Alex Eldere93f3152013-05-08 22:50:04 -05001492static bool img_request_child_test(struct rbd_img_request *img_request);
1493static void rbd_parent_request_destroy(struct kref *kref);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001494static void rbd_img_request_destroy(struct kref *kref);
1495static void rbd_img_request_put(struct rbd_img_request *img_request)
1496{
1497 rbd_assert(img_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001498 dout("%s: img %p (was %d)\n", __func__, img_request,
1499 atomic_read(&img_request->kref.refcount));
Alex Eldere93f3152013-05-08 22:50:04 -05001500 if (img_request_child_test(img_request))
1501 kref_put(&img_request->kref, rbd_parent_request_destroy);
1502 else
1503 kref_put(&img_request->kref, rbd_img_request_destroy);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001504}
1505
1506static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1507 struct rbd_obj_request *obj_request)
1508{
Alex Elder25dcf952013-01-25 17:08:55 -06001509 rbd_assert(obj_request->img_request == NULL);
1510
Alex Elderb155e862013-04-15 14:50:37 -05001511 /* Image request now owns object's original reference */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001512 obj_request->img_request = img_request;
Alex Elder25dcf952013-01-25 17:08:55 -06001513 obj_request->which = img_request->obj_request_count;
Alex Elder6365d332013-02-11 12:33:24 -06001514 rbd_assert(!obj_request_img_data_test(obj_request));
1515 obj_request_img_data_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001516 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001517 img_request->obj_request_count++;
1518 list_add_tail(&obj_request->links, &img_request->obj_requests);
Alex Elder37206ee2013-02-20 17:32:08 -06001519 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1520 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001521}
1522
1523static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1524 struct rbd_obj_request *obj_request)
1525{
1526 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001527
Alex Elder37206ee2013-02-20 17:32:08 -06001528 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1529 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001530 list_del(&obj_request->links);
Alex Elder25dcf952013-01-25 17:08:55 -06001531 rbd_assert(img_request->obj_request_count > 0);
1532 img_request->obj_request_count--;
1533 rbd_assert(obj_request->which == img_request->obj_request_count);
1534 obj_request->which = BAD_WHICH;
Alex Elder6365d332013-02-11 12:33:24 -06001535 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001536 rbd_assert(obj_request->img_request == img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001537 obj_request->img_request = NULL;
Alex Elder25dcf952013-01-25 17:08:55 -06001538 obj_request->callback = NULL;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001539 rbd_obj_request_put(obj_request);
1540}
1541
1542static bool obj_request_type_valid(enum obj_request_type type)
1543{
1544 switch (type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06001545 case OBJ_REQUEST_NODATA:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001546 case OBJ_REQUEST_BIO:
Alex Elder788e2df2013-01-17 12:25:27 -06001547 case OBJ_REQUEST_PAGES:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001548 return true;
1549 default:
1550 return false;
1551 }
1552}
1553
Alex Elderbf0d5f502012-11-22 00:00:08 -06001554static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1555 struct rbd_obj_request *obj_request)
1556{
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001557 dout("%s %p\n", __func__, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001558 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1559}
1560
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001561static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1562{
1563 dout("%s %p\n", __func__, obj_request);
1564 ceph_osdc_cancel_request(obj_request->osd_req);
1565}
1566
1567/*
1568 * Wait for an object request to complete. If interrupted, cancel the
1569 * underlying osd request.
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001570 *
1571 * @timeout: in jiffies, 0 means "wait forever"
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001572 */
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001573static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1574 unsigned long timeout)
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001575{
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001576 long ret;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001577
1578 dout("%s %p\n", __func__, obj_request);
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001579 ret = wait_for_completion_interruptible_timeout(
1580 &obj_request->completion,
1581 ceph_timeout_jiffies(timeout));
1582 if (ret <= 0) {
1583 if (ret == 0)
1584 ret = -ETIMEDOUT;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001585 rbd_obj_request_end(obj_request);
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001586 } else {
1587 ret = 0;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001588 }
1589
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001590 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1591 return ret;
1592}
1593
1594static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1595{
1596 return __rbd_obj_request_wait(obj_request, 0);
1597}
1598
Alex Elderbf0d5f502012-11-22 00:00:08 -06001599static void rbd_img_request_complete(struct rbd_img_request *img_request)
1600{
Alex Elder55f27e02013-04-10 12:34:25 -05001601
Alex Elder37206ee2013-02-20 17:32:08 -06001602 dout("%s: img %p\n", __func__, img_request);
Alex Elder55f27e02013-04-10 12:34:25 -05001603
1604 /*
1605 * If no error occurred, compute the aggregate transfer
1606 * count for the image request. We could instead use
1607 * atomic64_cmpxchg() to update it as each object request
1608 * completes; not clear which way is better off hand.
1609 */
1610 if (!img_request->result) {
1611 struct rbd_obj_request *obj_request;
1612 u64 xferred = 0;
1613
1614 for_each_obj_request(img_request, obj_request)
1615 xferred += obj_request->xferred;
1616 img_request->xferred = xferred;
1617 }
1618
Alex Elderbf0d5f502012-11-22 00:00:08 -06001619 if (img_request->callback)
1620 img_request->callback(img_request);
1621 else
1622 rbd_img_request_put(img_request);
1623}
1624
Alex Elder0c425242013-02-08 09:55:49 -06001625/*
1626 * The default/initial value for all image request flags is 0. Each
1627 * is conditionally set to 1 at image request initialization time
1628 * and currently never change thereafter.
1629 */
1630static void img_request_write_set(struct rbd_img_request *img_request)
1631{
1632 set_bit(IMG_REQ_WRITE, &img_request->flags);
1633 smp_mb();
1634}
1635
1636static bool img_request_write_test(struct rbd_img_request *img_request)
1637{
1638 smp_mb();
1639 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1640}
1641
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001642/*
1643 * Set the discard flag when the img_request is an discard request
1644 */
1645static void img_request_discard_set(struct rbd_img_request *img_request)
1646{
1647 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1648 smp_mb();
1649}
1650
1651static bool img_request_discard_test(struct rbd_img_request *img_request)
1652{
1653 smp_mb();
1654 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1655}
1656
Alex Elder9849e982013-01-24 16:13:36 -06001657static void img_request_child_set(struct rbd_img_request *img_request)
1658{
1659 set_bit(IMG_REQ_CHILD, &img_request->flags);
1660 smp_mb();
1661}
1662
Alex Eldere93f3152013-05-08 22:50:04 -05001663static void img_request_child_clear(struct rbd_img_request *img_request)
1664{
1665 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1666 smp_mb();
1667}
1668
Alex Elder9849e982013-01-24 16:13:36 -06001669static bool img_request_child_test(struct rbd_img_request *img_request)
1670{
1671 smp_mb();
1672 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1673}
1674
Alex Elderd0b2e942013-01-24 16:13:36 -06001675static void img_request_layered_set(struct rbd_img_request *img_request)
1676{
1677 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1678 smp_mb();
1679}
1680
Alex Eldera2acd002013-05-08 22:50:04 -05001681static void img_request_layered_clear(struct rbd_img_request *img_request)
1682{
1683 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1684 smp_mb();
1685}
1686
Alex Elderd0b2e942013-01-24 16:13:36 -06001687static bool img_request_layered_test(struct rbd_img_request *img_request)
1688{
1689 smp_mb();
1690 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1691}
1692
Josh Durgin3b434a2a2014-04-04 17:32:15 -07001693static enum obj_operation_type
1694rbd_img_request_op_type(struct rbd_img_request *img_request)
1695{
1696 if (img_request_write_test(img_request))
1697 return OBJ_OP_WRITE;
1698 else if (img_request_discard_test(img_request))
1699 return OBJ_OP_DISCARD;
1700 else
1701 return OBJ_OP_READ;
1702}
1703
Alex Elder6e2a4502013-03-27 09:16:30 -05001704static void
1705rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1706{
Alex Elderb9434c52013-04-19 15:34:50 -05001707 u64 xferred = obj_request->xferred;
1708 u64 length = obj_request->length;
1709
Alex Elder6e2a4502013-03-27 09:16:30 -05001710 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1711 obj_request, obj_request->img_request, obj_request->result,
Alex Elderb9434c52013-04-19 15:34:50 -05001712 xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001713 /*
Josh Durgin17c1cc12013-08-26 17:55:38 -07001714 * ENOENT means a hole in the image. We zero-fill the entire
1715 * length of the request. A short read also implies zero-fill
1716 * to the end of the request. An error requires the whole
1717 * length of the request to be reported finished with an error
1718 * to the block layer. In each case we update the xferred
1719 * count to indicate the whole request was satisfied.
Alex Elder6e2a4502013-03-27 09:16:30 -05001720 */
Alex Elderb9434c52013-04-19 15:34:50 -05001721 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
Alex Elder6e2a4502013-03-27 09:16:30 -05001722 if (obj_request->result == -ENOENT) {
Alex Elderb9434c52013-04-19 15:34:50 -05001723 if (obj_request->type == OBJ_REQUEST_BIO)
1724 zero_bio_chain(obj_request->bio_list, 0);
1725 else
1726 zero_pages(obj_request->pages, 0, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001727 obj_request->result = 0;
Alex Elderb9434c52013-04-19 15:34:50 -05001728 } else if (xferred < length && !obj_request->result) {
1729 if (obj_request->type == OBJ_REQUEST_BIO)
1730 zero_bio_chain(obj_request->bio_list, xferred);
1731 else
1732 zero_pages(obj_request->pages, xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001733 }
Josh Durgin17c1cc12013-08-26 17:55:38 -07001734 obj_request->xferred = length;
Alex Elder6e2a4502013-03-27 09:16:30 -05001735 obj_request_done_set(obj_request);
1736}
1737
Alex Elderbf0d5f502012-11-22 00:00:08 -06001738static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1739{
Alex Elder37206ee2013-02-20 17:32:08 -06001740 dout("%s: obj %p cb %p\n", __func__, obj_request,
1741 obj_request->callback);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001742 if (obj_request->callback)
1743 obj_request->callback(obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06001744 else
1745 complete_all(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001746}
1747
Alex Elderc47f9372013-02-26 14:23:07 -06001748static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001749{
Alex Elder57acbaa2013-02-11 12:33:24 -06001750 struct rbd_img_request *img_request = NULL;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001751 struct rbd_device *rbd_dev = NULL;
Alex Elder57acbaa2013-02-11 12:33:24 -06001752 bool layered = false;
1753
1754 if (obj_request_img_data_test(obj_request)) {
1755 img_request = obj_request->img_request;
1756 layered = img_request && img_request_layered_test(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001757 rbd_dev = img_request->rbd_dev;
Alex Elder57acbaa2013-02-11 12:33:24 -06001758 }
Alex Elder8b3e1a52013-01-24 16:13:36 -06001759
1760 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1761 obj_request, img_request, obj_request->result,
1762 obj_request->xferred, obj_request->length);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001763 if (layered && obj_request->result == -ENOENT &&
1764 obj_request->img_offset < rbd_dev->parent_overlap)
Alex Elder8b3e1a52013-01-24 16:13:36 -06001765 rbd_img_parent_read(obj_request);
1766 else if (img_request)
Alex Elder6e2a4502013-03-27 09:16:30 -05001767 rbd_img_obj_request_read_callback(obj_request);
1768 else
1769 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001770}
1771
Alex Elderc47f9372013-02-26 14:23:07 -06001772static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001773{
Sage Weil1b83bef2013-02-25 16:11:12 -08001774 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1775 obj_request->result, obj_request->length);
1776 /*
Alex Elder8b3e1a52013-01-24 16:13:36 -06001777 * There is no such thing as a successful short write. Set
1778 * it to our originally-requested length.
Sage Weil1b83bef2013-02-25 16:11:12 -08001779 */
1780 obj_request->xferred = obj_request->length;
Alex Elder07741302013-02-05 23:41:50 -06001781 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001782}
1783
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001784static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1785{
1786 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1787 obj_request->result, obj_request->length);
1788 /*
1789 * There is no such thing as a successful short discard. Set
1790 * it to our originally-requested length.
1791 */
1792 obj_request->xferred = obj_request->length;
Josh Durgind0265de2014-04-07 16:54:10 -07001793 /* discarding a non-existent object is not a problem */
1794 if (obj_request->result == -ENOENT)
1795 obj_request->result = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001796 obj_request_done_set(obj_request);
1797}
1798
Alex Elderfbfab532013-02-08 09:55:48 -06001799/*
1800 * For a simple stat call there's nothing to do. We'll do more if
1801 * this is part of a write sequence for a layered image.
1802 */
Alex Elderc47f9372013-02-26 14:23:07 -06001803static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
Alex Elderfbfab532013-02-08 09:55:48 -06001804{
Alex Elder37206ee2013-02-20 17:32:08 -06001805 dout("%s: obj %p\n", __func__, obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001806 obj_request_done_set(obj_request);
1807}
1808
Ilya Dryomov27617132015-07-16 17:36:11 +03001809static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1810{
1811 dout("%s: obj %p\n", __func__, obj_request);
1812
1813 if (obj_request_img_data_test(obj_request))
1814 rbd_osd_copyup_callback(obj_request);
1815 else
1816 obj_request_done_set(obj_request);
1817}
1818
Ilya Dryomov85e084f2016-04-28 16:07:24 +02001819static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001820{
1821 struct rbd_obj_request *obj_request = osd_req->r_priv;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001822 u16 opcode;
1823
Ilya Dryomov85e084f2016-04-28 16:07:24 +02001824 dout("%s: osd_req %p\n", __func__, osd_req);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001825 rbd_assert(osd_req == obj_request->osd_req);
Alex Elder57acbaa2013-02-11 12:33:24 -06001826 if (obj_request_img_data_test(obj_request)) {
1827 rbd_assert(obj_request->img_request);
1828 rbd_assert(obj_request->which != BAD_WHICH);
1829 } else {
1830 rbd_assert(obj_request->which == BAD_WHICH);
1831 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06001832
Sage Weil1b83bef2013-02-25 16:11:12 -08001833 if (osd_req->r_result < 0)
1834 obj_request->result = osd_req->r_result;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001835
Alex Elderc47f9372013-02-26 14:23:07 -06001836 /*
1837 * We support a 64-bit length, but ultimately it has to be
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01001838 * passed to the block layer, which just supports a 32-bit
1839 * length field.
Alex Elderc47f9372013-02-26 14:23:07 -06001840 */
Yan, Zheng7665d852016-01-07 16:48:57 +08001841 obj_request->xferred = osd_req->r_ops[0].outdata_len;
Alex Elder8b3e1a52013-01-24 16:13:36 -06001842 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001843
Alex Elder79528732013-04-03 21:32:51 -05001844 opcode = osd_req->r_ops[0].op;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001845 switch (opcode) {
1846 case CEPH_OSD_OP_READ:
Alex Elderc47f9372013-02-26 14:23:07 -06001847 rbd_osd_read_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001848 break;
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001849 case CEPH_OSD_OP_SETALLOCHINT:
Ilya Dryomove30b7572015-10-07 17:27:17 +02001850 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
1851 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001852 /* fall through */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001853 case CEPH_OSD_OP_WRITE:
Ilya Dryomove30b7572015-10-07 17:27:17 +02001854 case CEPH_OSD_OP_WRITEFULL:
Alex Elderc47f9372013-02-26 14:23:07 -06001855 rbd_osd_write_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001856 break;
Alex Elderfbfab532013-02-08 09:55:48 -06001857 case CEPH_OSD_OP_STAT:
Alex Elderc47f9372013-02-26 14:23:07 -06001858 rbd_osd_stat_callback(obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001859 break;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001860 case CEPH_OSD_OP_DELETE:
1861 case CEPH_OSD_OP_TRUNCATE:
1862 case CEPH_OSD_OP_ZERO:
1863 rbd_osd_discard_callback(obj_request);
1864 break;
Alex Elder36be9a72013-01-19 00:30:28 -06001865 case CEPH_OSD_OP_CALL:
Ilya Dryomov27617132015-07-16 17:36:11 +03001866 rbd_osd_call_callback(obj_request);
1867 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001868 default:
Ilya Dryomov9584d502014-07-11 12:11:20 +04001869 rbd_warn(NULL, "%s: unsupported op %hu",
Alex Elderbf0d5f502012-11-22 00:00:08 -06001870 obj_request->object_name, (unsigned short) opcode);
1871 break;
1872 }
1873
Alex Elder07741302013-02-05 23:41:50 -06001874 if (obj_request_done_test(obj_request))
Alex Elderbf0d5f502012-11-22 00:00:08 -06001875 rbd_obj_request_complete(obj_request);
1876}
1877
Alex Elder9d4df012013-04-19 15:34:50 -05001878static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
Alex Elder430c28c2013-04-03 21:32:51 -05001879{
1880 struct rbd_img_request *img_request = obj_request->img_request;
Alex Elder8c042b02013-04-03 01:28:58 -05001881 struct ceph_osd_request *osd_req = obj_request->osd_req;
Alex Elder430c28c2013-04-03 21:32:51 -05001882
Ilya Dryomovbb873b52016-05-26 00:29:52 +02001883 if (img_request)
1884 osd_req->r_snapid = img_request->snap_id;
Alex Elder9d4df012013-04-19 15:34:50 -05001885}
1886
1887static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1888{
Alex Elder9d4df012013-04-19 15:34:50 -05001889 struct ceph_osd_request *osd_req = obj_request->osd_req;
Alex Elder9d4df012013-04-19 15:34:50 -05001890
Ilya Dryomovbb873b52016-05-26 00:29:52 +02001891 osd_req->r_mtime = CURRENT_TIME;
1892 osd_req->r_data_offset = obj_request->offset;
Alex Elder430c28c2013-04-03 21:32:51 -05001893}
1894
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001895/*
1896 * Create an osd request. A read request has one osd op (read).
1897 * A write request has either one (watch) or two (hint+write) osd ops.
1898 * (All rbd data writes are prefixed with an allocation hint op, but
1899 * technically osd watch is a write request, hence this distinction.)
1900 */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001901static struct ceph_osd_request *rbd_osd_req_create(
1902 struct rbd_device *rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001903 enum obj_operation_type op_type,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001904 unsigned int num_ops,
Alex Elder430c28c2013-04-03 21:32:51 -05001905 struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001906{
Alex Elderbf0d5f502012-11-22 00:00:08 -06001907 struct ceph_snap_context *snapc = NULL;
1908 struct ceph_osd_client *osdc;
1909 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001910
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001911 if (obj_request_img_data_test(obj_request) &&
1912 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
Alex Elder6365d332013-02-11 12:33:24 -06001913 struct rbd_img_request *img_request = obj_request->img_request;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001914 if (op_type == OBJ_OP_WRITE) {
1915 rbd_assert(img_request_write_test(img_request));
1916 } else {
1917 rbd_assert(img_request_discard_test(img_request));
1918 }
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001919 snapc = img_request->snapc;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001920 }
1921
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001922 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001923
1924 /* Allocate and initialize the request, for the num_ops ops */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001925
1926 osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001927 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
David Disseldorp2224d872016-04-05 11:13:39 +02001928 GFP_NOIO);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001929 if (!osd_req)
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02001930 goto fail;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001931
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001932 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001933 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
Alex Elder430c28c2013-04-03 21:32:51 -05001934 else
Alex Elderbf0d5f502012-11-22 00:00:08 -06001935 osd_req->r_flags = CEPH_OSD_FLAG_READ;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001936
1937 osd_req->r_callback = rbd_osd_req_callback;
1938 osd_req->r_priv = obj_request;
1939
Yan, Zheng76271512016-02-03 21:24:49 +08001940 osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
Ilya Dryomovd30291b2016-04-29 19:54:20 +02001941 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
1942 obj_request->object_name))
1943 goto fail;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001944
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02001945 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
1946 goto fail;
1947
Alex Elderbf0d5f502012-11-22 00:00:08 -06001948 return osd_req;
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02001949
1950fail:
1951 ceph_osdc_put_request(osd_req);
1952 return NULL;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001953}
1954
Alex Elder0eefd472013-04-19 15:34:50 -05001955/*
Josh Durgind3246fb2014-04-07 16:49:21 -07001956 * Create a copyup osd request based on the information in the object
1957 * request supplied. A copyup request has two or three osd ops, a
1958 * copyup method call, potentially a hint op, and a write or truncate
1959 * or zero op.
Alex Elder0eefd472013-04-19 15:34:50 -05001960 */
1961static struct ceph_osd_request *
1962rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1963{
1964 struct rbd_img_request *img_request;
1965 struct ceph_snap_context *snapc;
1966 struct rbd_device *rbd_dev;
1967 struct ceph_osd_client *osdc;
1968 struct ceph_osd_request *osd_req;
Josh Durgind3246fb2014-04-07 16:49:21 -07001969 int num_osd_ops = 3;
Alex Elder0eefd472013-04-19 15:34:50 -05001970
1971 rbd_assert(obj_request_img_data_test(obj_request));
1972 img_request = obj_request->img_request;
1973 rbd_assert(img_request);
Josh Durgind3246fb2014-04-07 16:49:21 -07001974 rbd_assert(img_request_write_test(img_request) ||
1975 img_request_discard_test(img_request));
Alex Elder0eefd472013-04-19 15:34:50 -05001976
Josh Durgind3246fb2014-04-07 16:49:21 -07001977 if (img_request_discard_test(img_request))
1978 num_osd_ops = 2;
1979
1980 /* Allocate and initialize the request, for all the ops */
Alex Elder0eefd472013-04-19 15:34:50 -05001981
1982 snapc = img_request->snapc;
1983 rbd_dev = img_request->rbd_dev;
1984 osdc = &rbd_dev->rbd_client->client->osdc;
Josh Durgind3246fb2014-04-07 16:49:21 -07001985 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
David Disseldorp2224d872016-04-05 11:13:39 +02001986 false, GFP_NOIO);
Alex Elder0eefd472013-04-19 15:34:50 -05001987 if (!osd_req)
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02001988 goto fail;
Alex Elder0eefd472013-04-19 15:34:50 -05001989
1990 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1991 osd_req->r_callback = rbd_osd_req_callback;
1992 osd_req->r_priv = obj_request;
1993
Yan, Zheng76271512016-02-03 21:24:49 +08001994 osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
Ilya Dryomovd30291b2016-04-29 19:54:20 +02001995 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
1996 obj_request->object_name))
1997 goto fail;
Alex Elder0eefd472013-04-19 15:34:50 -05001998
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02001999 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
2000 goto fail;
2001
Alex Elder0eefd472013-04-19 15:34:50 -05002002 return osd_req;
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02002003
2004fail:
2005 ceph_osdc_put_request(osd_req);
2006 return NULL;
Alex Elder0eefd472013-04-19 15:34:50 -05002007}
2008
2009
Alex Elderbf0d5f502012-11-22 00:00:08 -06002010static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2011{
2012 ceph_osdc_put_request(osd_req);
2013}
2014
2015/* object_name is assumed to be a non-null pointer and NUL-terminated */
2016
2017static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2018 u64 offset, u64 length,
2019 enum obj_request_type type)
2020{
2021 struct rbd_obj_request *obj_request;
2022 size_t size;
2023 char *name;
2024
2025 rbd_assert(obj_request_type_valid(type));
2026
2027 size = strlen(object_name) + 1;
Ilya Dryomov5a60e872015-06-24 17:24:33 +03002028 name = kmalloc(size, GFP_NOIO);
Alex Elderf907ad52013-05-01 12:43:03 -05002029 if (!name)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002030 return NULL;
2031
Ilya Dryomov5a60e872015-06-24 17:24:33 +03002032 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
Alex Elderf907ad52013-05-01 12:43:03 -05002033 if (!obj_request) {
2034 kfree(name);
2035 return NULL;
2036 }
2037
Alex Elderbf0d5f502012-11-22 00:00:08 -06002038 obj_request->object_name = memcpy(name, object_name, size);
2039 obj_request->offset = offset;
2040 obj_request->length = length;
Alex Elder926f9b32013-02-11 12:33:24 -06002041 obj_request->flags = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002042 obj_request->which = BAD_WHICH;
2043 obj_request->type = type;
2044 INIT_LIST_HEAD(&obj_request->links);
Alex Elder788e2df2013-01-17 12:25:27 -06002045 init_completion(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002046 kref_init(&obj_request->kref);
2047
Alex Elder37206ee2013-02-20 17:32:08 -06002048 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2049 offset, length, (int)type, obj_request);
2050
Alex Elderbf0d5f502012-11-22 00:00:08 -06002051 return obj_request;
2052}
2053
2054static void rbd_obj_request_destroy(struct kref *kref)
2055{
2056 struct rbd_obj_request *obj_request;
2057
2058 obj_request = container_of(kref, struct rbd_obj_request, kref);
2059
Alex Elder37206ee2013-02-20 17:32:08 -06002060 dout("%s: obj %p\n", __func__, obj_request);
2061
Alex Elderbf0d5f502012-11-22 00:00:08 -06002062 rbd_assert(obj_request->img_request == NULL);
2063 rbd_assert(obj_request->which == BAD_WHICH);
2064
2065 if (obj_request->osd_req)
2066 rbd_osd_req_destroy(obj_request->osd_req);
2067
2068 rbd_assert(obj_request_type_valid(obj_request->type));
2069 switch (obj_request->type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06002070 case OBJ_REQUEST_NODATA:
2071 break; /* Nothing to do */
Alex Elderbf0d5f502012-11-22 00:00:08 -06002072 case OBJ_REQUEST_BIO:
2073 if (obj_request->bio_list)
2074 bio_chain_put(obj_request->bio_list);
2075 break;
Alex Elder788e2df2013-01-17 12:25:27 -06002076 case OBJ_REQUEST_PAGES:
2077 if (obj_request->pages)
2078 ceph_release_page_vector(obj_request->pages,
2079 obj_request->page_count);
2080 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002081 }
2082
Alex Elderf907ad52013-05-01 12:43:03 -05002083 kfree(obj_request->object_name);
Alex Elder868311b2013-05-01 12:43:03 -05002084 obj_request->object_name = NULL;
2085 kmem_cache_free(rbd_obj_request_cache, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002086}
2087
Alex Elderfb65d2282013-05-08 22:50:04 -05002088/* It's OK to call this for a device with no parent */
2089
2090static void rbd_spec_put(struct rbd_spec *spec);
2091static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2092{
2093 rbd_dev_remove_parent(rbd_dev);
2094 rbd_spec_put(rbd_dev->parent_spec);
2095 rbd_dev->parent_spec = NULL;
2096 rbd_dev->parent_overlap = 0;
2097}
2098
Alex Elderbf0d5f502012-11-22 00:00:08 -06002099/*
Alex Eldera2acd002013-05-08 22:50:04 -05002100 * Parent image reference counting is used to determine when an
2101 * image's parent fields can be safely torn down--after there are no
2102 * more in-flight requests to the parent image. When the last
2103 * reference is dropped, cleaning them up is safe.
2104 */
2105static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2106{
2107 int counter;
2108
2109 if (!rbd_dev->parent_spec)
2110 return;
2111
2112 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2113 if (counter > 0)
2114 return;
2115
2116 /* Last reference; clean up parent data structures */
2117
2118 if (!counter)
2119 rbd_dev_unparent(rbd_dev);
2120 else
Ilya Dryomov9584d502014-07-11 12:11:20 +04002121 rbd_warn(rbd_dev, "parent reference underflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002122}
2123
2124/*
2125 * If an image has a non-zero parent overlap, get a reference to its
2126 * parent.
2127 *
2128 * Returns true if the rbd device has a parent with a non-zero
2129 * overlap and a reference for it was successfully taken, or
2130 * false otherwise.
2131 */
2132static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2133{
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002134 int counter = 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002135
2136 if (!rbd_dev->parent_spec)
2137 return false;
2138
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002139 down_read(&rbd_dev->header_rwsem);
2140 if (rbd_dev->parent_overlap)
2141 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2142 up_read(&rbd_dev->header_rwsem);
Alex Eldera2acd002013-05-08 22:50:04 -05002143
2144 if (counter < 0)
Ilya Dryomov9584d502014-07-11 12:11:20 +04002145 rbd_warn(rbd_dev, "parent reference overflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002146
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002147 return counter > 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002148}
2149
Alex Elderbf0d5f502012-11-22 00:00:08 -06002150/*
2151 * Caller is responsible for filling in the list of object requests
2152 * that comprises the image request, and the Linux request pointer
2153 * (if there is one).
2154 */
Alex Eldercc344fa2013-02-19 12:25:56 -06002155static struct rbd_img_request *rbd_img_request_create(
2156 struct rbd_device *rbd_dev,
Alex Elderbf0d5f502012-11-22 00:00:08 -06002157 u64 offset, u64 length,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002158 enum obj_operation_type op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07002159 struct ceph_snap_context *snapc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002160{
2161 struct rbd_img_request *img_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002162
Ilya Dryomov7a716aa2014-08-05 11:25:54 +04002163 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002164 if (!img_request)
2165 return NULL;
2166
Alex Elderbf0d5f502012-11-22 00:00:08 -06002167 img_request->rq = NULL;
2168 img_request->rbd_dev = rbd_dev;
2169 img_request->offset = offset;
2170 img_request->length = length;
Alex Elder0c425242013-02-08 09:55:49 -06002171 img_request->flags = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002172 if (op_type == OBJ_OP_DISCARD) {
2173 img_request_discard_set(img_request);
2174 img_request->snapc = snapc;
2175 } else if (op_type == OBJ_OP_WRITE) {
Alex Elder0c425242013-02-08 09:55:49 -06002176 img_request_write_set(img_request);
Josh Durgin4e752f02014-04-08 11:12:11 -07002177 img_request->snapc = snapc;
Alex Elder0c425242013-02-08 09:55:49 -06002178 } else {
Alex Elderbf0d5f502012-11-22 00:00:08 -06002179 img_request->snap_id = rbd_dev->spec->snap_id;
Alex Elder0c425242013-02-08 09:55:49 -06002180 }
Alex Eldera2acd002013-05-08 22:50:04 -05002181 if (rbd_dev_parent_get(rbd_dev))
Alex Elderd0b2e942013-01-24 16:13:36 -06002182 img_request_layered_set(img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002183 spin_lock_init(&img_request->completion_lock);
2184 img_request->next_completion = 0;
2185 img_request->callback = NULL;
Alex Eldera5a337d2013-01-24 16:13:36 -06002186 img_request->result = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002187 img_request->obj_request_count = 0;
2188 INIT_LIST_HEAD(&img_request->obj_requests);
2189 kref_init(&img_request->kref);
2190
Alex Elder37206ee2013-02-20 17:32:08 -06002191 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002192 obj_op_name(op_type), offset, length, img_request);
Alex Elder37206ee2013-02-20 17:32:08 -06002193
Alex Elderbf0d5f502012-11-22 00:00:08 -06002194 return img_request;
2195}
2196
2197static void rbd_img_request_destroy(struct kref *kref)
2198{
2199 struct rbd_img_request *img_request;
2200 struct rbd_obj_request *obj_request;
2201 struct rbd_obj_request *next_obj_request;
2202
2203 img_request = container_of(kref, struct rbd_img_request, kref);
2204
Alex Elder37206ee2013-02-20 17:32:08 -06002205 dout("%s: img %p\n", __func__, img_request);
2206
Alex Elderbf0d5f502012-11-22 00:00:08 -06002207 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2208 rbd_img_obj_request_del(img_request, obj_request);
Alex Elder25dcf952013-01-25 17:08:55 -06002209 rbd_assert(img_request->obj_request_count == 0);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002210
Alex Eldera2acd002013-05-08 22:50:04 -05002211 if (img_request_layered_test(img_request)) {
2212 img_request_layered_clear(img_request);
2213 rbd_dev_parent_put(img_request->rbd_dev);
2214 }
2215
Josh Durginbef95452014-04-04 17:47:52 -07002216 if (img_request_write_test(img_request) ||
2217 img_request_discard_test(img_request))
Alex Elder812164f82013-04-30 00:44:32 -05002218 ceph_put_snap_context(img_request->snapc);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002219
Alex Elder1c2a9df2013-05-01 12:43:03 -05002220 kmem_cache_free(rbd_img_request_cache, img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002221}
2222
Alex Eldere93f3152013-05-08 22:50:04 -05002223static struct rbd_img_request *rbd_parent_request_create(
2224 struct rbd_obj_request *obj_request,
2225 u64 img_offset, u64 length)
2226{
2227 struct rbd_img_request *parent_request;
2228 struct rbd_device *rbd_dev;
2229
2230 rbd_assert(obj_request->img_request);
2231 rbd_dev = obj_request->img_request->rbd_dev;
2232
Josh Durgin4e752f02014-04-08 11:12:11 -07002233 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002234 length, OBJ_OP_READ, NULL);
Alex Eldere93f3152013-05-08 22:50:04 -05002235 if (!parent_request)
2236 return NULL;
2237
2238 img_request_child_set(parent_request);
2239 rbd_obj_request_get(obj_request);
2240 parent_request->obj_request = obj_request;
2241
2242 return parent_request;
2243}
2244
2245static void rbd_parent_request_destroy(struct kref *kref)
2246{
2247 struct rbd_img_request *parent_request;
2248 struct rbd_obj_request *orig_request;
2249
2250 parent_request = container_of(kref, struct rbd_img_request, kref);
2251 orig_request = parent_request->obj_request;
2252
2253 parent_request->obj_request = NULL;
2254 rbd_obj_request_put(orig_request);
2255 img_request_child_clear(parent_request);
2256
2257 rbd_img_request_destroy(kref);
2258}
2259
Alex Elder12178572013-02-08 09:55:49 -06002260static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2261{
Alex Elder6365d332013-02-11 12:33:24 -06002262 struct rbd_img_request *img_request;
Alex Elder12178572013-02-08 09:55:49 -06002263 unsigned int xferred;
2264 int result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002265 bool more;
Alex Elder12178572013-02-08 09:55:49 -06002266
Alex Elder6365d332013-02-11 12:33:24 -06002267 rbd_assert(obj_request_img_data_test(obj_request));
2268 img_request = obj_request->img_request;
2269
Alex Elder12178572013-02-08 09:55:49 -06002270 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2271 xferred = (unsigned int)obj_request->xferred;
2272 result = obj_request->result;
2273 if (result) {
2274 struct rbd_device *rbd_dev = img_request->rbd_dev;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002275 enum obj_operation_type op_type;
2276
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002277 if (img_request_discard_test(img_request))
2278 op_type = OBJ_OP_DISCARD;
2279 else if (img_request_write_test(img_request))
2280 op_type = OBJ_OP_WRITE;
2281 else
2282 op_type = OBJ_OP_READ;
Alex Elder12178572013-02-08 09:55:49 -06002283
Ilya Dryomov9584d502014-07-11 12:11:20 +04002284 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002285 obj_op_name(op_type), obj_request->length,
2286 obj_request->img_offset, obj_request->offset);
Ilya Dryomov9584d502014-07-11 12:11:20 +04002287 rbd_warn(rbd_dev, " result %d xferred %x",
Alex Elder12178572013-02-08 09:55:49 -06002288 result, xferred);
2289 if (!img_request->result)
2290 img_request->result = result;
Ilya Dryomov082a75d2015-04-25 15:56:15 +03002291 /*
2292 * Need to end I/O on the entire obj_request worth of
2293 * bytes in case of error.
2294 */
2295 xferred = obj_request->length;
Alex Elder12178572013-02-08 09:55:49 -06002296 }
2297
Alex Elderf1a47392013-04-19 15:34:50 -05002298 /* Image object requests don't own their page array */
2299
2300 if (obj_request->type == OBJ_REQUEST_PAGES) {
2301 obj_request->pages = NULL;
2302 obj_request->page_count = 0;
2303 }
2304
Alex Elder8b3e1a52013-01-24 16:13:36 -06002305 if (img_request_child_test(img_request)) {
2306 rbd_assert(img_request->obj_request != NULL);
2307 more = obj_request->which < img_request->obj_request_count - 1;
2308 } else {
2309 rbd_assert(img_request->rq != NULL);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01002310
2311 more = blk_update_request(img_request->rq, result, xferred);
2312 if (!more)
2313 __blk_mq_end_request(img_request->rq, result);
Alex Elder8b3e1a52013-01-24 16:13:36 -06002314 }
2315
2316 return more;
Alex Elder12178572013-02-08 09:55:49 -06002317}
2318
Alex Elder21692382013-04-05 01:27:12 -05002319static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2320{
2321 struct rbd_img_request *img_request;
2322 u32 which = obj_request->which;
2323 bool more = true;
2324
Alex Elder6365d332013-02-11 12:33:24 -06002325 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elder21692382013-04-05 01:27:12 -05002326 img_request = obj_request->img_request;
2327
2328 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2329 rbd_assert(img_request != NULL);
Alex Elder21692382013-04-05 01:27:12 -05002330 rbd_assert(img_request->obj_request_count > 0);
2331 rbd_assert(which != BAD_WHICH);
2332 rbd_assert(which < img_request->obj_request_count);
Alex Elder21692382013-04-05 01:27:12 -05002333
2334 spin_lock_irq(&img_request->completion_lock);
2335 if (which != img_request->next_completion)
2336 goto out;
2337
2338 for_each_obj_request_from(img_request, obj_request) {
Alex Elder21692382013-04-05 01:27:12 -05002339 rbd_assert(more);
2340 rbd_assert(which < img_request->obj_request_count);
2341
2342 if (!obj_request_done_test(obj_request))
2343 break;
Alex Elder12178572013-02-08 09:55:49 -06002344 more = rbd_img_obj_end_request(obj_request);
Alex Elder21692382013-04-05 01:27:12 -05002345 which++;
2346 }
2347
2348 rbd_assert(more ^ (which == img_request->obj_request_count));
2349 img_request->next_completion = which;
2350out:
2351 spin_unlock_irq(&img_request->completion_lock);
Alex Elder0f2d5be2014-04-26 14:21:44 +04002352 rbd_img_request_put(img_request);
Alex Elder21692382013-04-05 01:27:12 -05002353
2354 if (!more)
2355 rbd_img_request_complete(img_request);
2356}
2357
Alex Elderf1a47392013-04-19 15:34:50 -05002358/*
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002359 * Add individual osd ops to the given ceph_osd_request and prepare
2360 * them for submission. num_ops is the current number of
2361 * osd operations already to the object request.
2362 */
2363static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2364 struct ceph_osd_request *osd_request,
2365 enum obj_operation_type op_type,
2366 unsigned int num_ops)
2367{
2368 struct rbd_img_request *img_request = obj_request->img_request;
2369 struct rbd_device *rbd_dev = img_request->rbd_dev;
2370 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2371 u64 offset = obj_request->offset;
2372 u64 length = obj_request->length;
2373 u64 img_end;
2374 u16 opcode;
2375
2376 if (op_type == OBJ_OP_DISCARD) {
Josh Durgind3246fb2014-04-07 16:49:21 -07002377 if (!offset && length == object_size &&
2378 (!img_request_layered_test(img_request) ||
2379 !obj_request_overlaps_parent(obj_request))) {
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002380 opcode = CEPH_OSD_OP_DELETE;
2381 } else if ((offset + length == object_size)) {
2382 opcode = CEPH_OSD_OP_TRUNCATE;
2383 } else {
2384 down_read(&rbd_dev->header_rwsem);
2385 img_end = rbd_dev->header.image_size;
2386 up_read(&rbd_dev->header_rwsem);
2387
2388 if (obj_request->img_offset + length == img_end)
2389 opcode = CEPH_OSD_OP_TRUNCATE;
2390 else
2391 opcode = CEPH_OSD_OP_ZERO;
2392 }
2393 } else if (op_type == OBJ_OP_WRITE) {
Ilya Dryomove30b7572015-10-07 17:27:17 +02002394 if (!offset && length == object_size)
2395 opcode = CEPH_OSD_OP_WRITEFULL;
2396 else
2397 opcode = CEPH_OSD_OP_WRITE;
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002398 osd_req_op_alloc_hint_init(osd_request, num_ops,
2399 object_size, object_size);
2400 num_ops++;
2401 } else {
2402 opcode = CEPH_OSD_OP_READ;
2403 }
2404
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002405 if (opcode == CEPH_OSD_OP_DELETE)
Yan, Zheng144cba12015-04-27 11:09:54 +08002406 osd_req_op_init(osd_request, num_ops, opcode, 0);
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002407 else
2408 osd_req_op_extent_init(osd_request, num_ops, opcode,
2409 offset, length, 0, 0);
2410
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002411 if (obj_request->type == OBJ_REQUEST_BIO)
2412 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2413 obj_request->bio_list, length);
2414 else if (obj_request->type == OBJ_REQUEST_PAGES)
2415 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2416 obj_request->pages, length,
2417 offset & ~PAGE_MASK, false, false);
2418
2419 /* Discards are also writes */
2420 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2421 rbd_osd_req_format_write(obj_request);
2422 else
2423 rbd_osd_req_format_read(obj_request);
2424}
2425
2426/*
Alex Elderf1a47392013-04-19 15:34:50 -05002427 * Split up an image request into one or more object requests, each
2428 * to a different object. The "type" parameter indicates whether
2429 * "data_desc" is the pointer to the head of a list of bio
2430 * structures, or the base of a page array. In either case this
2431 * function assumes data_desc describes memory sufficient to hold
2432 * all data described by the image request.
2433 */
2434static int rbd_img_request_fill(struct rbd_img_request *img_request,
2435 enum obj_request_type type,
2436 void *data_desc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002437{
2438 struct rbd_device *rbd_dev = img_request->rbd_dev;
2439 struct rbd_obj_request *obj_request = NULL;
2440 struct rbd_obj_request *next_obj_request;
Jingoo Hana1580732013-08-09 13:04:35 +09002441 struct bio *bio_list = NULL;
Alex Elderf1a47392013-04-19 15:34:50 -05002442 unsigned int bio_offset = 0;
Jingoo Hana1580732013-08-09 13:04:35 +09002443 struct page **pages = NULL;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002444 enum obj_operation_type op_type;
Alex Elder7da22d22013-01-24 16:13:36 -06002445 u64 img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002446 u64 resid;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002447
Alex Elderf1a47392013-04-19 15:34:50 -05002448 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2449 (int)type, data_desc);
Alex Elder37206ee2013-02-20 17:32:08 -06002450
Alex Elder7da22d22013-01-24 16:13:36 -06002451 img_offset = img_request->offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002452 resid = img_request->length;
Alex Elder4dda41d2013-02-20 21:59:33 -06002453 rbd_assert(resid > 0);
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002454 op_type = rbd_img_request_op_type(img_request);
Alex Elderf1a47392013-04-19 15:34:50 -05002455
2456 if (type == OBJ_REQUEST_BIO) {
2457 bio_list = data_desc;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002458 rbd_assert(img_offset ==
2459 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002460 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002461 pages = data_desc;
2462 }
2463
Alex Elderbf0d5f502012-11-22 00:00:08 -06002464 while (resid) {
Alex Elder2fa12322013-04-05 01:27:12 -05002465 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002466 const char *object_name;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002467 u64 offset;
2468 u64 length;
2469
Alex Elder7da22d22013-01-24 16:13:36 -06002470 object_name = rbd_segment_name(rbd_dev, img_offset);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002471 if (!object_name)
2472 goto out_unwind;
Alex Elder7da22d22013-01-24 16:13:36 -06002473 offset = rbd_segment_offset(rbd_dev, img_offset);
2474 length = rbd_segment_length(rbd_dev, img_offset, resid);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002475 obj_request = rbd_obj_request_create(object_name,
Alex Elderf1a47392013-04-19 15:34:50 -05002476 offset, length, type);
Alex Elder78c2a442013-05-01 12:43:04 -05002477 /* object request has its own copy of the object name */
2478 rbd_segment_name_free(object_name);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002479 if (!obj_request)
2480 goto out_unwind;
Ilya Dryomov62054da2014-03-04 11:57:17 +02002481
Josh Durgin03507db2013-08-27 14:45:46 -07002482 /*
2483 * set obj_request->img_request before creating the
2484 * osd_request so that it gets the right snapc
2485 */
2486 rbd_img_obj_request_add(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002487
Alex Elderf1a47392013-04-19 15:34:50 -05002488 if (type == OBJ_REQUEST_BIO) {
2489 unsigned int clone_size;
2490
2491 rbd_assert(length <= (u64)UINT_MAX);
2492 clone_size = (unsigned int)length;
2493 obj_request->bio_list =
2494 bio_chain_clone_range(&bio_list,
2495 &bio_offset,
2496 clone_size,
David Disseldorp2224d872016-04-05 11:13:39 +02002497 GFP_NOIO);
Alex Elderf1a47392013-04-19 15:34:50 -05002498 if (!obj_request->bio_list)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002499 goto out_unwind;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002500 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002501 unsigned int page_count;
2502
2503 obj_request->pages = pages;
2504 page_count = (u32)calc_pages_for(offset, length);
2505 obj_request->page_count = page_count;
2506 if ((offset + length) & ~PAGE_MASK)
2507 page_count--; /* more on last page */
2508 pages += page_count;
2509 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06002510
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002511 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2512 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2513 obj_request);
Alex Elder2fa12322013-04-05 01:27:12 -05002514 if (!osd_req)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002515 goto out_unwind;
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002516
Alex Elder2fa12322013-04-05 01:27:12 -05002517 obj_request->osd_req = osd_req;
Alex Elder21692382013-04-05 01:27:12 -05002518 obj_request->callback = rbd_img_obj_callback;
Alex Elder7da22d22013-01-24 16:13:36 -06002519 obj_request->img_offset = img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002520
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002521 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2522
2523 rbd_img_request_get(img_request);
2524
Alex Elder7da22d22013-01-24 16:13:36 -06002525 img_offset += length;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002526 resid -= length;
2527 }
2528
2529 return 0;
2530
Alex Elderbf0d5f502012-11-22 00:00:08 -06002531out_unwind:
2532 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
Ilya Dryomov42dd0372014-03-04 11:57:17 +02002533 rbd_img_obj_request_del(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002534
2535 return -ENOMEM;
2536}
2537
Alex Elder3d7efd12013-04-19 15:34:50 -05002538static void
Ilya Dryomov27617132015-07-16 17:36:11 +03002539rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
Alex Elder0eefd472013-04-19 15:34:50 -05002540{
2541 struct rbd_img_request *img_request;
2542 struct rbd_device *rbd_dev;
Alex Elderebda6402013-05-10 16:29:22 -05002543 struct page **pages;
Alex Elder0eefd472013-04-19 15:34:50 -05002544 u32 page_count;
2545
Ilya Dryomov27617132015-07-16 17:36:11 +03002546 dout("%s: obj %p\n", __func__, obj_request);
2547
Josh Durgind3246fb2014-04-07 16:49:21 -07002548 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2549 obj_request->type == OBJ_REQUEST_NODATA);
Alex Elder0eefd472013-04-19 15:34:50 -05002550 rbd_assert(obj_request_img_data_test(obj_request));
2551 img_request = obj_request->img_request;
2552 rbd_assert(img_request);
2553
2554 rbd_dev = img_request->rbd_dev;
2555 rbd_assert(rbd_dev);
Alex Elder0eefd472013-04-19 15:34:50 -05002556
Alex Elderebda6402013-05-10 16:29:22 -05002557 pages = obj_request->copyup_pages;
2558 rbd_assert(pages != NULL);
Alex Elder0eefd472013-04-19 15:34:50 -05002559 obj_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002560 page_count = obj_request->copyup_page_count;
2561 rbd_assert(page_count);
2562 obj_request->copyup_page_count = 0;
2563 ceph_release_page_vector(pages, page_count);
Alex Elder0eefd472013-04-19 15:34:50 -05002564
2565 /*
2566 * We want the transfer count to reflect the size of the
2567 * original write request. There is no such thing as a
2568 * successful short write, so if the request was successful
2569 * we can just set it to the originally-requested length.
2570 */
2571 if (!obj_request->result)
2572 obj_request->xferred = obj_request->length;
2573
Ilya Dryomov27617132015-07-16 17:36:11 +03002574 obj_request_done_set(obj_request);
Alex Elder0eefd472013-04-19 15:34:50 -05002575}
2576
2577static void
Alex Elder3d7efd12013-04-19 15:34:50 -05002578rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2579{
2580 struct rbd_obj_request *orig_request;
Alex Elder0eefd472013-04-19 15:34:50 -05002581 struct ceph_osd_request *osd_req;
2582 struct ceph_osd_client *osdc;
2583 struct rbd_device *rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002584 struct page **pages;
Josh Durgind3246fb2014-04-07 16:49:21 -07002585 enum obj_operation_type op_type;
Alex Elderebda6402013-05-10 16:29:22 -05002586 u32 page_count;
Alex Elderbbea1c12013-05-06 17:40:33 -05002587 int img_result;
Alex Elderebda6402013-05-10 16:29:22 -05002588 u64 parent_length;
Alex Elder3d7efd12013-04-19 15:34:50 -05002589
2590 rbd_assert(img_request_child_test(img_request));
2591
2592 /* First get what we need from the image request */
2593
2594 pages = img_request->copyup_pages;
2595 rbd_assert(pages != NULL);
2596 img_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002597 page_count = img_request->copyup_page_count;
2598 rbd_assert(page_count);
2599 img_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002600
2601 orig_request = img_request->obj_request;
2602 rbd_assert(orig_request != NULL);
Alex Elderb91f09f2013-05-10 16:29:22 -05002603 rbd_assert(obj_request_type_valid(orig_request->type));
Alex Elderbbea1c12013-05-06 17:40:33 -05002604 img_result = img_request->result;
Alex Elderebda6402013-05-10 16:29:22 -05002605 parent_length = img_request->length;
2606 rbd_assert(parent_length == img_request->xferred);
Alex Elder3d7efd12013-04-19 15:34:50 -05002607 rbd_img_request_put(img_request);
2608
Alex Elder91c6feb2013-05-06 17:40:32 -05002609 rbd_assert(orig_request->img_request);
2610 rbd_dev = orig_request->img_request->rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002611 rbd_assert(rbd_dev);
Alex Elder3d7efd12013-04-19 15:34:50 -05002612
Alex Elderbbea1c12013-05-06 17:40:33 -05002613 /*
2614 * If the overlap has become 0 (most likely because the
2615 * image has been flattened) we need to free the pages
2616 * and re-submit the original write request.
2617 */
2618 if (!rbd_dev->parent_overlap) {
2619 struct ceph_osd_client *osdc;
2620
2621 ceph_release_page_vector(pages, page_count);
2622 osdc = &rbd_dev->rbd_client->client->osdc;
2623 img_result = rbd_obj_request_submit(osdc, orig_request);
2624 if (!img_result)
2625 return;
2626 }
2627
2628 if (img_result)
Alex Elder0eefd472013-04-19 15:34:50 -05002629 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002630
Alex Elder8785b1d2013-05-09 10:08:49 -05002631 /*
2632 * The original osd request is of no use to use any more.
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002633 * We need a new one that can hold the three ops in a copyup
Alex Elder8785b1d2013-05-09 10:08:49 -05002634 * request. Allocate the new copyup osd request for the
2635 * original request, and release the old one.
2636 */
Alex Elderbbea1c12013-05-06 17:40:33 -05002637 img_result = -ENOMEM;
Alex Elder0eefd472013-04-19 15:34:50 -05002638 osd_req = rbd_osd_req_create_copyup(orig_request);
2639 if (!osd_req)
2640 goto out_err;
Alex Elder8785b1d2013-05-09 10:08:49 -05002641 rbd_osd_req_destroy(orig_request->osd_req);
Alex Elder0eefd472013-04-19 15:34:50 -05002642 orig_request->osd_req = osd_req;
2643 orig_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002644 orig_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002645
Alex Elder0eefd472013-04-19 15:34:50 -05002646 /* Initialize the copyup op */
2647
2648 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
Alex Elderebda6402013-05-10 16:29:22 -05002649 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
Alex Elder0eefd472013-04-19 15:34:50 -05002650 false, false);
2651
Josh Durgind3246fb2014-04-07 16:49:21 -07002652 /* Add the other op(s) */
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002653
Josh Durgind3246fb2014-04-07 16:49:21 -07002654 op_type = rbd_img_request_op_type(orig_request->img_request);
2655 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
Alex Elder0eefd472013-04-19 15:34:50 -05002656
2657 /* All set, send it off. */
2658
Alex Elder0eefd472013-04-19 15:34:50 -05002659 osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elderbbea1c12013-05-06 17:40:33 -05002660 img_result = rbd_obj_request_submit(osdc, orig_request);
2661 if (!img_result)
Alex Elder0eefd472013-04-19 15:34:50 -05002662 return;
2663out_err:
2664 /* Record the error code and complete the request */
2665
Alex Elderbbea1c12013-05-06 17:40:33 -05002666 orig_request->result = img_result;
Alex Elder0eefd472013-04-19 15:34:50 -05002667 orig_request->xferred = 0;
2668 obj_request_done_set(orig_request);
2669 rbd_obj_request_complete(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002670}
2671
2672/*
2673 * Read from the parent image the range of data that covers the
2674 * entire target of the given object request. This is used for
2675 * satisfying a layered image write request when the target of an
2676 * object request from the image request does not exist.
2677 *
2678 * A page array big enough to hold the returned data is allocated
2679 * and supplied to rbd_img_request_fill() as the "data descriptor."
2680 * When the read completes, this page array will be transferred to
2681 * the original object request for the copyup operation.
2682 *
2683 * If an error occurs, record it as the result of the original
2684 * object request and mark it done so it gets completed.
2685 */
2686static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2687{
2688 struct rbd_img_request *img_request = NULL;
2689 struct rbd_img_request *parent_request = NULL;
2690 struct rbd_device *rbd_dev;
2691 u64 img_offset;
2692 u64 length;
2693 struct page **pages = NULL;
2694 u32 page_count;
2695 int result;
2696
2697 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderb91f09f2013-05-10 16:29:22 -05002698 rbd_assert(obj_request_type_valid(obj_request->type));
Alex Elder3d7efd12013-04-19 15:34:50 -05002699
2700 img_request = obj_request->img_request;
2701 rbd_assert(img_request != NULL);
2702 rbd_dev = img_request->rbd_dev;
2703 rbd_assert(rbd_dev->parent != NULL);
2704
2705 /*
2706 * Determine the byte range covered by the object in the
2707 * child image to which the original request was to be sent.
2708 */
2709 img_offset = obj_request->img_offset - obj_request->offset;
2710 length = (u64)1 << rbd_dev->header.obj_order;
2711
2712 /*
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002713 * There is no defined parent data beyond the parent
2714 * overlap, so limit what we read at that boundary if
2715 * necessary.
2716 */
2717 if (img_offset + length > rbd_dev->parent_overlap) {
2718 rbd_assert(img_offset < rbd_dev->parent_overlap);
2719 length = rbd_dev->parent_overlap - img_offset;
2720 }
2721
2722 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002723 * Allocate a page array big enough to receive the data read
2724 * from the parent.
2725 */
2726 page_count = (u32)calc_pages_for(0, length);
2727 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2728 if (IS_ERR(pages)) {
2729 result = PTR_ERR(pages);
2730 pages = NULL;
2731 goto out_err;
2732 }
2733
2734 result = -ENOMEM;
Alex Eldere93f3152013-05-08 22:50:04 -05002735 parent_request = rbd_parent_request_create(obj_request,
2736 img_offset, length);
Alex Elder3d7efd12013-04-19 15:34:50 -05002737 if (!parent_request)
2738 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002739
2740 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2741 if (result)
2742 goto out_err;
2743 parent_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002744 parent_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002745
2746 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2747 result = rbd_img_request_submit(parent_request);
2748 if (!result)
2749 return 0;
2750
2751 parent_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002752 parent_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002753 parent_request->obj_request = NULL;
2754 rbd_obj_request_put(obj_request);
2755out_err:
2756 if (pages)
2757 ceph_release_page_vector(pages, page_count);
2758 if (parent_request)
2759 rbd_img_request_put(parent_request);
2760 obj_request->result = result;
2761 obj_request->xferred = 0;
2762 obj_request_done_set(obj_request);
2763
2764 return result;
2765}
2766
Alex Elderc5b5ef62013-02-11 12:33:24 -06002767static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2768{
Alex Elderc5b5ef62013-02-11 12:33:24 -06002769 struct rbd_obj_request *orig_request;
Alex Elder638f5ab2013-05-06 17:40:33 -05002770 struct rbd_device *rbd_dev;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002771 int result;
2772
2773 rbd_assert(!obj_request_img_data_test(obj_request));
2774
2775 /*
2776 * All we need from the object request is the original
2777 * request and the result of the STAT op. Grab those, then
2778 * we're done with the request.
2779 */
2780 orig_request = obj_request->obj_request;
2781 obj_request->obj_request = NULL;
Alex Elder912c3172013-05-13 20:35:38 -05002782 rbd_obj_request_put(orig_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002783 rbd_assert(orig_request);
2784 rbd_assert(orig_request->img_request);
2785
2786 result = obj_request->result;
2787 obj_request->result = 0;
2788
2789 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2790 obj_request, orig_request, result,
2791 obj_request->xferred, obj_request->length);
2792 rbd_obj_request_put(obj_request);
2793
Alex Elder638f5ab2013-05-06 17:40:33 -05002794 /*
2795 * If the overlap has become 0 (most likely because the
2796 * image has been flattened) we need to free the pages
2797 * and re-submit the original write request.
2798 */
2799 rbd_dev = orig_request->img_request->rbd_dev;
2800 if (!rbd_dev->parent_overlap) {
2801 struct ceph_osd_client *osdc;
2802
Alex Elder638f5ab2013-05-06 17:40:33 -05002803 osdc = &rbd_dev->rbd_client->client->osdc;
2804 result = rbd_obj_request_submit(osdc, orig_request);
2805 if (!result)
2806 return;
2807 }
Alex Elderc5b5ef62013-02-11 12:33:24 -06002808
2809 /*
2810 * Our only purpose here is to determine whether the object
2811 * exists, and we don't want to treat the non-existence as
2812 * an error. If something else comes back, transfer the
2813 * error to the original request and complete it now.
2814 */
2815 if (!result) {
2816 obj_request_existence_set(orig_request, true);
2817 } else if (result == -ENOENT) {
2818 obj_request_existence_set(orig_request, false);
2819 } else if (result) {
2820 orig_request->result = result;
Alex Elder3d7efd12013-04-19 15:34:50 -05002821 goto out;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002822 }
2823
2824 /*
2825 * Resubmit the original request now that we have recorded
2826 * whether the target object exists.
2827 */
Alex Elderb454e362013-04-19 15:34:50 -05002828 orig_request->result = rbd_img_obj_request_submit(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002829out:
Alex Elderc5b5ef62013-02-11 12:33:24 -06002830 if (orig_request->result)
2831 rbd_obj_request_complete(orig_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002832}
2833
2834static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2835{
2836 struct rbd_obj_request *stat_request;
2837 struct rbd_device *rbd_dev;
2838 struct ceph_osd_client *osdc;
2839 struct page **pages = NULL;
2840 u32 page_count;
2841 size_t size;
2842 int ret;
2843
2844 /*
2845 * The response data for a STAT call consists of:
2846 * le64 length;
2847 * struct {
2848 * le32 tv_sec;
2849 * le32 tv_nsec;
2850 * } mtime;
2851 */
2852 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2853 page_count = (u32)calc_pages_for(0, size);
2854 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2855 if (IS_ERR(pages))
2856 return PTR_ERR(pages);
2857
2858 ret = -ENOMEM;
2859 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2860 OBJ_REQUEST_PAGES);
2861 if (!stat_request)
2862 goto out;
2863
2864 rbd_obj_request_get(obj_request);
2865 stat_request->obj_request = obj_request;
2866 stat_request->pages = pages;
2867 stat_request->page_count = page_count;
2868
2869 rbd_assert(obj_request->img_request);
2870 rbd_dev = obj_request->img_request->rbd_dev;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002871 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02002872 stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002873 if (!stat_request->osd_req)
2874 goto out;
2875 stat_request->callback = rbd_img_obj_exists_callback;
2876
Yan, Zheng144cba12015-04-27 11:09:54 +08002877 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002878 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2879 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05002880 rbd_osd_req_format_read(stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002881
2882 osdc = &rbd_dev->rbd_client->client->osdc;
2883 ret = rbd_obj_request_submit(osdc, stat_request);
2884out:
2885 if (ret)
2886 rbd_obj_request_put(obj_request);
2887
2888 return ret;
2889}
2890
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002891static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
Alex Elderb454e362013-04-19 15:34:50 -05002892{
2893 struct rbd_img_request *img_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002894 struct rbd_device *rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002895
2896 rbd_assert(obj_request_img_data_test(obj_request));
2897
2898 img_request = obj_request->img_request;
2899 rbd_assert(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002900 rbd_dev = img_request->rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002901
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002902 /* Reads */
Josh Durgin1c220882014-04-04 17:49:12 -07002903 if (!img_request_write_test(img_request) &&
2904 !img_request_discard_test(img_request))
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002905 return true;
Alex Elderb454e362013-04-19 15:34:50 -05002906
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002907 /* Non-layered writes */
2908 if (!img_request_layered_test(img_request))
2909 return true;
2910
2911 /*
2912 * Layered writes outside of the parent overlap range don't
2913 * share any data with the parent.
2914 */
2915 if (!obj_request_overlaps_parent(obj_request))
2916 return true;
2917
2918 /*
Guangliang Zhaoc622d222014-04-01 22:22:15 +08002919 * Entire-object layered writes - we will overwrite whatever
2920 * parent data there is anyway.
2921 */
2922 if (!obj_request->offset &&
2923 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2924 return true;
2925
2926 /*
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002927 * If the object is known to already exist, its parent data has
2928 * already been copied.
2929 */
2930 if (obj_request_known_test(obj_request) &&
2931 obj_request_exists_test(obj_request))
2932 return true;
2933
2934 return false;
2935}
2936
2937static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2938{
2939 if (img_obj_request_simple(obj_request)) {
Alex Elderb454e362013-04-19 15:34:50 -05002940 struct rbd_device *rbd_dev;
2941 struct ceph_osd_client *osdc;
2942
2943 rbd_dev = obj_request->img_request->rbd_dev;
2944 osdc = &rbd_dev->rbd_client->client->osdc;
2945
2946 return rbd_obj_request_submit(osdc, obj_request);
2947 }
2948
2949 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002950 * It's a layered write. The target object might exist but
2951 * we may not know that yet. If we know it doesn't exist,
2952 * start by reading the data for the full target object from
2953 * the parent so we can use it for a copyup to the target.
Alex Elderb454e362013-04-19 15:34:50 -05002954 */
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002955 if (obj_request_known_test(obj_request))
Alex Elder3d7efd12013-04-19 15:34:50 -05002956 return rbd_img_obj_parent_read_full(obj_request);
2957
2958 /* We don't know whether the target exists. Go find out. */
Alex Elderb454e362013-04-19 15:34:50 -05002959
2960 return rbd_img_obj_exists_submit(obj_request);
2961}
2962
Alex Elderbf0d5f502012-11-22 00:00:08 -06002963static int rbd_img_request_submit(struct rbd_img_request *img_request)
2964{
Alex Elderbf0d5f502012-11-22 00:00:08 -06002965 struct rbd_obj_request *obj_request;
Alex Elder46faeed2013-04-10 17:47:46 -05002966 struct rbd_obj_request *next_obj_request;
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002967 int ret = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002968
Alex Elder37206ee2013-02-20 17:32:08 -06002969 dout("%s: img %p\n", __func__, img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002970
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002971 rbd_img_request_get(img_request);
2972 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
Alex Elderb454e362013-04-19 15:34:50 -05002973 ret = rbd_img_obj_request_submit(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002974 if (ret)
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002975 goto out_put_ireq;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002976 }
2977
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002978out_put_ireq:
2979 rbd_img_request_put(img_request);
2980 return ret;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002981}
2982
Alex Elder8b3e1a52013-01-24 16:13:36 -06002983static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2984{
2985 struct rbd_obj_request *obj_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002986 struct rbd_device *rbd_dev;
2987 u64 obj_end;
Alex Elder02c74fb2013-05-06 17:40:33 -05002988 u64 img_xferred;
2989 int img_result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002990
2991 rbd_assert(img_request_child_test(img_request));
2992
Alex Elder02c74fb2013-05-06 17:40:33 -05002993 /* First get what we need from the image request and release it */
2994
Alex Elder8b3e1a52013-01-24 16:13:36 -06002995 obj_request = img_request->obj_request;
Alex Elder02c74fb2013-05-06 17:40:33 -05002996 img_xferred = img_request->xferred;
2997 img_result = img_request->result;
2998 rbd_img_request_put(img_request);
2999
3000 /*
3001 * If the overlap has become 0 (most likely because the
3002 * image has been flattened) we need to re-submit the
3003 * original request.
3004 */
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003005 rbd_assert(obj_request);
3006 rbd_assert(obj_request->img_request);
Alex Elder02c74fb2013-05-06 17:40:33 -05003007 rbd_dev = obj_request->img_request->rbd_dev;
3008 if (!rbd_dev->parent_overlap) {
3009 struct ceph_osd_client *osdc;
Alex Elder8b3e1a52013-01-24 16:13:36 -06003010
Alex Elder02c74fb2013-05-06 17:40:33 -05003011 osdc = &rbd_dev->rbd_client->client->osdc;
3012 img_result = rbd_obj_request_submit(osdc, obj_request);
3013 if (!img_result)
3014 return;
3015 }
3016
3017 obj_request->result = img_result;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003018 if (obj_request->result)
3019 goto out;
3020
3021 /*
3022 * We need to zero anything beyond the parent overlap
3023 * boundary. Since rbd_img_obj_request_read_callback()
3024 * will zero anything beyond the end of a short read, an
3025 * easy way to do this is to pretend the data from the
3026 * parent came up short--ending at the overlap boundary.
3027 */
3028 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3029 obj_end = obj_request->img_offset + obj_request->length;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003030 if (obj_end > rbd_dev->parent_overlap) {
3031 u64 xferred = 0;
3032
3033 if (obj_request->img_offset < rbd_dev->parent_overlap)
3034 xferred = rbd_dev->parent_overlap -
3035 obj_request->img_offset;
3036
Alex Elder02c74fb2013-05-06 17:40:33 -05003037 obj_request->xferred = min(img_xferred, xferred);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003038 } else {
Alex Elder02c74fb2013-05-06 17:40:33 -05003039 obj_request->xferred = img_xferred;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003040 }
3041out:
Alex Elder8b3e1a52013-01-24 16:13:36 -06003042 rbd_img_obj_request_read_callback(obj_request);
3043 rbd_obj_request_complete(obj_request);
3044}
3045
3046static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3047{
Alex Elder8b3e1a52013-01-24 16:13:36 -06003048 struct rbd_img_request *img_request;
3049 int result;
3050
3051 rbd_assert(obj_request_img_data_test(obj_request));
3052 rbd_assert(obj_request->img_request != NULL);
3053 rbd_assert(obj_request->result == (s32) -ENOENT);
Alex Elder5b2ab722013-05-06 17:40:33 -05003054 rbd_assert(obj_request_type_valid(obj_request->type));
Alex Elder8b3e1a52013-01-24 16:13:36 -06003055
Alex Elder8b3e1a52013-01-24 16:13:36 -06003056 /* rbd_read_finish(obj_request, obj_request->length); */
Alex Eldere93f3152013-05-08 22:50:04 -05003057 img_request = rbd_parent_request_create(obj_request,
Alex Elder8b3e1a52013-01-24 16:13:36 -06003058 obj_request->img_offset,
Alex Eldere93f3152013-05-08 22:50:04 -05003059 obj_request->length);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003060 result = -ENOMEM;
3061 if (!img_request)
3062 goto out_err;
3063
Alex Elder5b2ab722013-05-06 17:40:33 -05003064 if (obj_request->type == OBJ_REQUEST_BIO)
3065 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3066 obj_request->bio_list);
3067 else
3068 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3069 obj_request->pages);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003070 if (result)
3071 goto out_err;
3072
3073 img_request->callback = rbd_img_parent_read_callback;
3074 result = rbd_img_request_submit(img_request);
3075 if (result)
3076 goto out_err;
3077
3078 return;
3079out_err:
3080 if (img_request)
3081 rbd_img_request_put(img_request);
3082 obj_request->result = result;
3083 obj_request->xferred = 0;
3084 obj_request_done_set(obj_request);
3085}
3086
Ilya Dryomov922dab62016-05-26 01:15:02 +02003087static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev);
3088static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev);
3089
3090static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3091 u64 notifier_id, void *data, size_t data_len)
Alex Elderb8d70032012-11-30 17:53:04 -06003092{
Ilya Dryomov922dab62016-05-26 01:15:02 +02003093 struct rbd_device *rbd_dev = arg;
Alex Elder21692382013-04-05 01:27:12 -05003094 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elderb8d70032012-11-30 17:53:04 -06003095 int ret;
3096
Ilya Dryomov922dab62016-05-26 01:15:02 +02003097 dout("%s rbd_dev %p cookie %llu notify_id %llu\n", __func__, rbd_dev,
3098 cookie, notify_id);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003099
3100 /*
3101 * Until adequate refresh error handling is in place, there is
3102 * not much we can do here, except warn.
3103 *
3104 * See http://tracker.ceph.com/issues/5040
3105 */
Alex Eldere627db02013-05-06 07:40:30 -05003106 ret = rbd_dev_refresh(rbd_dev);
3107 if (ret)
Ilya Dryomov9584d502014-07-11 12:11:20 +04003108 rbd_warn(rbd_dev, "refresh failed: %d", ret);
Alex Elderb8d70032012-11-30 17:53:04 -06003109
Ilya Dryomov922dab62016-05-26 01:15:02 +02003110 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3111 &rbd_dev->header_oloc, notify_id, cookie,
3112 NULL, 0);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003113 if (ret)
Ilya Dryomov9584d502014-07-11 12:11:20 +04003114 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
Alex Elderb8d70032012-11-30 17:53:04 -06003115}
3116
Ilya Dryomov922dab62016-05-26 01:15:02 +02003117static void rbd_watch_errcb(void *arg, u64 cookie, int err)
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003118{
Ilya Dryomov922dab62016-05-26 01:15:02 +02003119 struct rbd_device *rbd_dev = arg;
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003120 int ret;
3121
Ilya Dryomov922dab62016-05-26 01:15:02 +02003122 rbd_warn(rbd_dev, "encountered watch error: %d", err);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003123
Ilya Dryomov922dab62016-05-26 01:15:02 +02003124 __rbd_dev_header_unwatch_sync(rbd_dev);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003125
Ilya Dryomov922dab62016-05-26 01:15:02 +02003126 ret = rbd_dev_header_watch_sync(rbd_dev);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003127 if (ret) {
Ilya Dryomov922dab62016-05-26 01:15:02 +02003128 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
3129 return;
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003130 }
3131
Ilya Dryomov922dab62016-05-26 01:15:02 +02003132 ret = rbd_dev_refresh(rbd_dev);
3133 if (ret)
3134 rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003135}
3136
3137/*
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003138 * Initiate a watch request, synchronously.
Alex Elder9969ebc2013-01-18 12:31:10 -06003139 */
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003140static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
Alex Elder9969ebc2013-01-18 12:31:10 -06003141{
3142 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomov922dab62016-05-26 01:15:02 +02003143 struct ceph_osd_linger_request *handle;
Alex Elder9969ebc2013-01-18 12:31:10 -06003144
Ilya Dryomov922dab62016-05-26 01:15:02 +02003145 rbd_assert(!rbd_dev->watch_handle);
Alex Elder9969ebc2013-01-18 12:31:10 -06003146
Ilya Dryomov922dab62016-05-26 01:15:02 +02003147 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3148 &rbd_dev->header_oloc, rbd_watch_cb,
3149 rbd_watch_errcb, rbd_dev);
3150 if (IS_ERR(handle))
3151 return PTR_ERR(handle);
Alex Elder9969ebc2013-01-18 12:31:10 -06003152
Ilya Dryomov922dab62016-05-26 01:15:02 +02003153 rbd_dev->watch_handle = handle;
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003154 return 0;
Alex Elder9969ebc2013-01-18 12:31:10 -06003155}
3156
Ilya Dryomovc525f032016-04-28 16:07:26 +02003157static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
Ilya Dryomovfca27062013-12-16 18:02:40 +02003158{
Ilya Dryomov922dab62016-05-26 01:15:02 +02003159 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3160 int ret;
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003161
Ilya Dryomov922dab62016-05-26 01:15:02 +02003162 if (!rbd_dev->watch_handle)
3163 return;
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003164
Ilya Dryomov922dab62016-05-26 01:15:02 +02003165 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3166 if (ret)
3167 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003168
Ilya Dryomov922dab62016-05-26 01:15:02 +02003169 rbd_dev->watch_handle = NULL;
Ilya Dryomovc525f032016-04-28 16:07:26 +02003170}
3171
3172/*
3173 * Tear down a watch request, synchronously.
3174 */
3175static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3176{
3177 __rbd_dev_header_unwatch_sync(rbd_dev);
Ilya Dryomov811c6682016-04-15 16:22:16 +02003178
3179 dout("%s flushing notifies\n", __func__);
3180 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
Ilya Dryomovfca27062013-12-16 18:02:40 +02003181}
3182
Alex Elder36be9a72013-01-19 00:30:28 -06003183/*
Alex Elderf40eb342013-04-25 15:09:42 -05003184 * Synchronous osd object method call. Returns the number of bytes
3185 * returned in the outbound buffer, or a negative error code.
Alex Elder36be9a72013-01-19 00:30:28 -06003186 */
3187static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3188 const char *object_name,
3189 const char *class_name,
3190 const char *method_name,
Alex Elder41579762013-04-21 12:14:45 -05003191 const void *outbound,
Alex Elder36be9a72013-01-19 00:30:28 -06003192 size_t outbound_size,
Alex Elder41579762013-04-21 12:14:45 -05003193 void *inbound,
Alex Eldere2a58ee2013-04-30 00:44:33 -05003194 size_t inbound_size)
Alex Elder36be9a72013-01-19 00:30:28 -06003195{
Alex Elder21692382013-04-05 01:27:12 -05003196 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder36be9a72013-01-19 00:30:28 -06003197 struct rbd_obj_request *obj_request;
Alex Elder36be9a72013-01-19 00:30:28 -06003198 struct page **pages;
3199 u32 page_count;
3200 int ret;
3201
3202 /*
Alex Elder6010a452013-04-05 01:27:11 -05003203 * Method calls are ultimately read operations. The result
3204 * should placed into the inbound buffer provided. They
3205 * also supply outbound data--parameters for the object
3206 * method. Currently if this is present it will be a
3207 * snapshot id.
Alex Elder36be9a72013-01-19 00:30:28 -06003208 */
Alex Elder57385b52013-04-21 12:14:45 -05003209 page_count = (u32)calc_pages_for(0, inbound_size);
Alex Elder36be9a72013-01-19 00:30:28 -06003210 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3211 if (IS_ERR(pages))
3212 return PTR_ERR(pages);
3213
3214 ret = -ENOMEM;
Alex Elder6010a452013-04-05 01:27:11 -05003215 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
Alex Elder36be9a72013-01-19 00:30:28 -06003216 OBJ_REQUEST_PAGES);
3217 if (!obj_request)
3218 goto out;
3219
3220 obj_request->pages = pages;
3221 obj_request->page_count = page_count;
3222
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003223 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003224 obj_request);
Alex Elder36be9a72013-01-19 00:30:28 -06003225 if (!obj_request->osd_req)
3226 goto out;
3227
Alex Elderc99d2d42013-04-05 01:27:11 -05003228 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
Alex Elder04017e22013-04-05 14:46:02 -05003229 class_name, method_name);
3230 if (outbound_size) {
3231 struct ceph_pagelist *pagelist;
3232
3233 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3234 if (!pagelist)
3235 goto out;
3236
3237 ceph_pagelist_init(pagelist);
3238 ceph_pagelist_append(pagelist, outbound, outbound_size);
3239 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3240 pagelist);
3241 }
Alex Eldera4ce40a2013-04-05 01:27:12 -05003242 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3243 obj_request->pages, inbound_size,
Alex Elder44cd1882013-04-05 01:27:12 -05003244 0, false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05003245 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003246
Alex Elder36be9a72013-01-19 00:30:28 -06003247 ret = rbd_obj_request_submit(osdc, obj_request);
3248 if (ret)
3249 goto out;
3250 ret = rbd_obj_request_wait(obj_request);
3251 if (ret)
3252 goto out;
3253
3254 ret = obj_request->result;
3255 if (ret < 0)
3256 goto out;
Alex Elder57385b52013-04-21 12:14:45 -05003257
3258 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3259 ret = (int)obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06003260 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
Alex Elder36be9a72013-01-19 00:30:28 -06003261out:
3262 if (obj_request)
3263 rbd_obj_request_put(obj_request);
3264 else
3265 ceph_release_page_vector(pages, page_count);
3266
3267 return ret;
3268}
3269
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003270static void rbd_queue_workfn(struct work_struct *work)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003271{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003272 struct request *rq = blk_mq_rq_from_pdu(work);
3273 struct rbd_device *rbd_dev = rq->q->queuedata;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003274 struct rbd_img_request *img_request;
Josh Durgin4e752f02014-04-08 11:12:11 -07003275 struct ceph_snap_context *snapc = NULL;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003276 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3277 u64 length = blk_rq_bytes(rq);
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003278 enum obj_operation_type op_type;
Josh Durgin4e752f02014-04-08 11:12:11 -07003279 u64 mapping_size;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003280 int result;
3281
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003282 if (rq->cmd_type != REQ_TYPE_FS) {
3283 dout("%s: non-fs request type %d\n", __func__,
3284 (int) rq->cmd_type);
3285 result = -EIO;
3286 goto err;
3287 }
3288
Mike Christiec2df40d2016-06-05 14:32:17 -05003289 if (req_op(rq) == REQ_OP_DISCARD)
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003290 op_type = OBJ_OP_DISCARD;
Mike Christiec2df40d2016-06-05 14:32:17 -05003291 else if (req_op(rq) == REQ_OP_WRITE)
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003292 op_type = OBJ_OP_WRITE;
3293 else
3294 op_type = OBJ_OP_READ;
3295
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003296 /* Ignore/skip any zero-length requests */
3297
3298 if (!length) {
3299 dout("%s: zero-length request\n", __func__);
3300 result = 0;
3301 goto err_rq;
3302 }
3303
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003304 /* Only reads are allowed to a read-only device */
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003305
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003306 if (op_type != OBJ_OP_READ) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003307 if (rbd_dev->mapping.read_only) {
3308 result = -EROFS;
3309 goto err_rq;
3310 }
3311 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3312 }
3313
3314 /*
3315 * Quit early if the mapped snapshot no longer exists. It's
3316 * still possible the snapshot will have disappeared by the
3317 * time our request arrives at the osd, but there's no sense in
3318 * sending it if we already know.
3319 */
3320 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3321 dout("request for non-existent snapshot");
3322 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3323 result = -ENXIO;
3324 goto err_rq;
3325 }
3326
3327 if (offset && length > U64_MAX - offset + 1) {
3328 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3329 length);
3330 result = -EINVAL;
3331 goto err_rq; /* Shouldn't happen */
3332 }
3333
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003334 blk_mq_start_request(rq);
3335
Josh Durgin4e752f02014-04-08 11:12:11 -07003336 down_read(&rbd_dev->header_rwsem);
3337 mapping_size = rbd_dev->mapping.size;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003338 if (op_type != OBJ_OP_READ) {
Josh Durgin4e752f02014-04-08 11:12:11 -07003339 snapc = rbd_dev->header.snapc;
3340 ceph_get_snap_context(snapc);
3341 }
3342 up_read(&rbd_dev->header_rwsem);
3343
3344 if (offset + length > mapping_size) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003345 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
Josh Durgin4e752f02014-04-08 11:12:11 -07003346 length, mapping_size);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003347 result = -EIO;
3348 goto err_rq;
3349 }
3350
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003351 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07003352 snapc);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003353 if (!img_request) {
3354 result = -ENOMEM;
3355 goto err_rq;
3356 }
3357 img_request->rq = rq;
Ilya Dryomov70b16db2015-11-27 19:23:24 +01003358 snapc = NULL; /* img_request consumes a ref */
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003359
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003360 if (op_type == OBJ_OP_DISCARD)
3361 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3362 NULL);
3363 else
3364 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3365 rq->bio);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003366 if (result)
3367 goto err_img_request;
3368
3369 result = rbd_img_request_submit(img_request);
3370 if (result)
3371 goto err_img_request;
3372
3373 return;
3374
3375err_img_request:
3376 rbd_img_request_put(img_request);
3377err_rq:
3378 if (result)
3379 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003380 obj_op_name(op_type), length, offset, result);
SF Markus Elfringe96a6502014-11-02 15:20:59 +01003381 ceph_put_snap_context(snapc);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003382err:
3383 blk_mq_end_request(rq, result);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003384}
3385
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003386static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3387 const struct blk_mq_queue_data *bd)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003388{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003389 struct request *rq = bd->rq;
3390 struct work_struct *work = blk_mq_rq_to_pdu(rq);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003391
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003392 queue_work(rbd_wq, work);
3393 return BLK_MQ_RQ_QUEUE_OK;
Alex Elderbf0d5f502012-11-22 00:00:08 -06003394}
3395
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003396static void rbd_free_disk(struct rbd_device *rbd_dev)
3397{
3398 struct gendisk *disk = rbd_dev->disk;
3399
3400 if (!disk)
3401 return;
3402
Alex Eldera0cab922013-04-25 23:15:08 -05003403 rbd_dev->disk = NULL;
3404 if (disk->flags & GENHD_FL_UP) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003405 del_gendisk(disk);
Alex Eldera0cab922013-04-25 23:15:08 -05003406 if (disk->queue)
3407 blk_cleanup_queue(disk->queue);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003408 blk_mq_free_tag_set(&rbd_dev->tag_set);
Alex Eldera0cab922013-04-25 23:15:08 -05003409 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003410 put_disk(disk);
3411}
3412
Alex Elder788e2df2013-01-17 12:25:27 -06003413static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3414 const char *object_name,
Alex Elder7097f8d2013-04-30 00:44:33 -05003415 u64 offset, u64 length, void *buf)
Alex Elder788e2df2013-01-17 12:25:27 -06003416
3417{
Alex Elder21692382013-04-05 01:27:12 -05003418 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder788e2df2013-01-17 12:25:27 -06003419 struct rbd_obj_request *obj_request;
Alex Elder788e2df2013-01-17 12:25:27 -06003420 struct page **pages = NULL;
3421 u32 page_count;
Alex Elder1ceae7e2013-02-06 13:11:38 -06003422 size_t size;
Alex Elder788e2df2013-01-17 12:25:27 -06003423 int ret;
3424
3425 page_count = (u32) calc_pages_for(offset, length);
3426 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3427 if (IS_ERR(pages))
Jan Karaa8d42052014-10-22 09:17:24 +02003428 return PTR_ERR(pages);
Alex Elder788e2df2013-01-17 12:25:27 -06003429
3430 ret = -ENOMEM;
3431 obj_request = rbd_obj_request_create(object_name, offset, length,
Alex Elder36be9a72013-01-19 00:30:28 -06003432 OBJ_REQUEST_PAGES);
Alex Elder788e2df2013-01-17 12:25:27 -06003433 if (!obj_request)
3434 goto out;
3435
3436 obj_request->pages = pages;
3437 obj_request->page_count = page_count;
3438
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003439 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003440 obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06003441 if (!obj_request->osd_req)
3442 goto out;
3443
Alex Elderc99d2d42013-04-05 01:27:11 -05003444 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3445 offset, length, 0, 0);
Alex Elder406e2c92013-04-15 14:50:36 -05003446 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
Alex Eldera4ce40a2013-04-05 01:27:12 -05003447 obj_request->pages,
Alex Elder44cd1882013-04-05 01:27:12 -05003448 obj_request->length,
3449 obj_request->offset & ~PAGE_MASK,
3450 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05003451 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003452
Alex Elder788e2df2013-01-17 12:25:27 -06003453 ret = rbd_obj_request_submit(osdc, obj_request);
3454 if (ret)
3455 goto out;
3456 ret = rbd_obj_request_wait(obj_request);
3457 if (ret)
3458 goto out;
3459
3460 ret = obj_request->result;
3461 if (ret < 0)
3462 goto out;
Alex Elder1ceae7e2013-02-06 13:11:38 -06003463
3464 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3465 size = (size_t) obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06003466 ceph_copy_from_page_vector(pages, buf, 0, size);
Alex Elder7097f8d2013-04-30 00:44:33 -05003467 rbd_assert(size <= (size_t)INT_MAX);
3468 ret = (int)size;
Alex Elder788e2df2013-01-17 12:25:27 -06003469out:
3470 if (obj_request)
3471 rbd_obj_request_put(obj_request);
3472 else
3473 ceph_release_page_vector(pages, page_count);
3474
3475 return ret;
3476}
3477
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003478/*
Alex Elder662518b2013-05-06 09:51:29 -05003479 * Read the complete header for the given rbd device. On successful
3480 * return, the rbd_dev->header field will contain up-to-date
3481 * information about the image.
Alex Elder4156d992012-08-02 11:29:46 -05003482 */
Alex Elder99a41eb2013-05-06 09:51:30 -05003483static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
Alex Elder4156d992012-08-02 11:29:46 -05003484{
3485 struct rbd_image_header_ondisk *ondisk = NULL;
3486 u32 snap_count = 0;
3487 u64 names_size = 0;
3488 u32 want_count;
3489 int ret;
3490
3491 /*
3492 * The complete header will include an array of its 64-bit
3493 * snapshot ids, followed by the names of those snapshots as
3494 * a contiguous block of NUL-terminated strings. Note that
3495 * the number of snapshots could change by the time we read
3496 * it in, in which case we re-read it.
3497 */
3498 do {
3499 size_t size;
3500
3501 kfree(ondisk);
3502
3503 size = sizeof (*ondisk);
3504 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3505 size += names_size;
3506 ondisk = kmalloc(size, GFP_KERNEL);
3507 if (!ondisk)
Alex Elder662518b2013-05-06 09:51:29 -05003508 return -ENOMEM;
Alex Elder4156d992012-08-02 11:29:46 -05003509
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02003510 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elder7097f8d2013-04-30 00:44:33 -05003511 0, size, ondisk);
Alex Elder4156d992012-08-02 11:29:46 -05003512 if (ret < 0)
Alex Elder662518b2013-05-06 09:51:29 -05003513 goto out;
Alex Elderc0cd10db2013-04-26 09:43:47 -05003514 if ((size_t)ret < size) {
Alex Elder4156d992012-08-02 11:29:46 -05003515 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003516 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3517 size, ret);
Alex Elder662518b2013-05-06 09:51:29 -05003518 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05003519 }
3520 if (!rbd_dev_ondisk_valid(ondisk)) {
3521 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003522 rbd_warn(rbd_dev, "invalid header");
Alex Elder662518b2013-05-06 09:51:29 -05003523 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05003524 }
3525
3526 names_size = le64_to_cpu(ondisk->snap_names_len);
3527 want_count = snap_count;
3528 snap_count = le32_to_cpu(ondisk->snap_count);
3529 } while (snap_count != want_count);
3530
Alex Elder662518b2013-05-06 09:51:29 -05003531 ret = rbd_header_from_disk(rbd_dev, ondisk);
3532out:
Alex Elder4156d992012-08-02 11:29:46 -05003533 kfree(ondisk);
3534
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003535 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003536}
3537
Alex Elder15228ed2013-05-01 12:43:03 -05003538/*
3539 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3540 * has disappeared from the (just updated) snapshot context.
3541 */
3542static void rbd_exists_validate(struct rbd_device *rbd_dev)
3543{
3544 u64 snap_id;
3545
3546 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3547 return;
3548
3549 snap_id = rbd_dev->spec->snap_id;
3550 if (snap_id == CEPH_NOSNAP)
3551 return;
3552
3553 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3554 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3555}
3556
Josh Durgin98752012013-08-29 17:26:31 -07003557static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3558{
3559 sector_t size;
Josh Durgin98752012013-08-29 17:26:31 -07003560
3561 /*
Ilya Dryomov811c6682016-04-15 16:22:16 +02003562 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3563 * try to update its size. If REMOVING is set, updating size
3564 * is just useless work since the device can't be opened.
Josh Durgin98752012013-08-29 17:26:31 -07003565 */
Ilya Dryomov811c6682016-04-15 16:22:16 +02003566 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3567 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
Josh Durgin98752012013-08-29 17:26:31 -07003568 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3569 dout("setting size to %llu sectors", (unsigned long long)size);
3570 set_capacity(rbd_dev->disk, size);
3571 revalidate_disk(rbd_dev->disk);
3572 }
3573}
3574
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003575static int rbd_dev_refresh(struct rbd_device *rbd_dev)
Alex Elder1fe5e992012-07-25 09:32:41 -05003576{
Alex Eldere627db02013-05-06 07:40:30 -05003577 u64 mapping_size;
Alex Elder1fe5e992012-07-25 09:32:41 -05003578 int ret;
3579
Alex Eldercfbf6372013-05-31 17:40:45 -05003580 down_write(&rbd_dev->header_rwsem);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05003581 mapping_size = rbd_dev->mapping.size;
Ilya Dryomova720ae02014-07-23 17:11:19 +04003582
3583 ret = rbd_dev_header_info(rbd_dev);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003584 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003585 goto out;
Alex Elder15228ed2013-05-01 12:43:03 -05003586
Ilya Dryomove8f59b52014-07-24 10:42:13 +04003587 /*
3588 * If there is a parent, see if it has disappeared due to the
3589 * mapped image getting flattened.
3590 */
3591 if (rbd_dev->parent) {
3592 ret = rbd_dev_v2_parent_info(rbd_dev);
3593 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003594 goto out;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04003595 }
3596
Ilya Dryomov5ff11082014-07-23 17:11:21 +04003597 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003598 rbd_dev->mapping.size = rbd_dev->header.image_size;
Ilya Dryomov5ff11082014-07-23 17:11:21 +04003599 } else {
3600 /* validate mapped snapshot's EXISTS flag */
3601 rbd_exists_validate(rbd_dev);
3602 }
Alex Elder15228ed2013-05-01 12:43:03 -05003603
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003604out:
Alex Eldercfbf6372013-05-31 17:40:45 -05003605 up_write(&rbd_dev->header_rwsem);
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003606 if (!ret && mapping_size != rbd_dev->mapping.size)
Josh Durgin98752012013-08-29 17:26:31 -07003607 rbd_dev_update_size(rbd_dev);
Alex Elder1fe5e992012-07-25 09:32:41 -05003608
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003609 return ret;
Alex Elder1fe5e992012-07-25 09:32:41 -05003610}
3611
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003612static int rbd_init_request(void *data, struct request *rq,
3613 unsigned int hctx_idx, unsigned int request_idx,
3614 unsigned int numa_node)
3615{
3616 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3617
3618 INIT_WORK(work, rbd_queue_workfn);
3619 return 0;
3620}
3621
3622static struct blk_mq_ops rbd_mq_ops = {
3623 .queue_rq = rbd_queue_rq,
3624 .map_queue = blk_mq_map_queue,
3625 .init_request = rbd_init_request,
3626};
3627
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003628static int rbd_init_disk(struct rbd_device *rbd_dev)
3629{
3630 struct gendisk *disk;
3631 struct request_queue *q;
Alex Elder593a9e72012-02-07 12:03:37 -06003632 u64 segment_size;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003633 int err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003634
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003635 /* create gendisk info */
Ilya Dryomov7e513d42013-12-16 19:26:32 +02003636 disk = alloc_disk(single_major ?
3637 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3638 RBD_MINORS_PER_MAJOR);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003639 if (!disk)
Alex Elder1fcdb8a2012-08-29 17:11:06 -05003640 return -ENOMEM;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003641
Alex Elderf0f8cef2012-01-29 13:57:44 -06003642 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
Alex Elderde71a292012-07-03 16:01:19 -05003643 rbd_dev->dev_id);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003644 disk->major = rbd_dev->major;
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003645 disk->first_minor = rbd_dev->minor;
Ilya Dryomov7e513d42013-12-16 19:26:32 +02003646 if (single_major)
3647 disk->flags |= GENHD_FL_EXT_DEVT;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003648 disk->fops = &rbd_bd_ops;
3649 disk->private_data = rbd_dev;
3650
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003651 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3652 rbd_dev->tag_set.ops = &rbd_mq_ops;
Ilya Dryomovb5584182015-06-23 16:21:19 +03003653 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003654 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
Ilya Dryomovb5584182015-06-23 16:21:19 +03003655 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003656 rbd_dev->tag_set.nr_hw_queues = 1;
3657 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3658
3659 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3660 if (err)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003661 goto out_disk;
Josh Durgin029bcbd2011-07-22 11:35:23 -07003662
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003663 q = blk_mq_init_queue(&rbd_dev->tag_set);
3664 if (IS_ERR(q)) {
3665 err = PTR_ERR(q);
3666 goto out_tag_set;
3667 }
3668
Ilya Dryomovd8a2c892015-03-24 16:15:17 +03003669 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
3670 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
Alex Elder593a9e72012-02-07 12:03:37 -06003671
Josh Durgin029bcbd2011-07-22 11:35:23 -07003672 /* set io sizes to object size */
Alex Elder593a9e72012-02-07 12:03:37 -06003673 segment_size = rbd_obj_bytes(&rbd_dev->header);
3674 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
Ilya Dryomov0d9fde42015-10-07 16:09:35 +02003675 q->limits.max_sectors = queue_max_hw_sectors(q);
Ilya Dryomovd3834fe2015-06-12 19:19:02 +03003676 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
Alex Elder593a9e72012-02-07 12:03:37 -06003677 blk_queue_max_segment_size(q, segment_size);
3678 blk_queue_io_min(q, segment_size);
3679 blk_queue_io_opt(q, segment_size);
Josh Durgin029bcbd2011-07-22 11:35:23 -07003680
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003681 /* enable the discard support */
3682 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3683 q->limits.discard_granularity = segment_size;
3684 q->limits.discard_alignment = segment_size;
Jens Axboe2bb4cd52015-07-14 08:15:12 -06003685 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
Josh Durginb76f8232014-04-07 16:52:03 -07003686 q->limits.discard_zeroes_data = 1;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003687
Ronny Hegewaldbae818e2015-10-15 18:50:46 +00003688 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
3689 q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
3690
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003691 disk->queue = q;
3692
3693 q->queuedata = rbd_dev;
3694
3695 rbd_dev->disk = disk;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003696
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003697 return 0;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003698out_tag_set:
3699 blk_mq_free_tag_set(&rbd_dev->tag_set);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003700out_disk:
3701 put_disk(disk);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003702 return err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003703}
3704
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003705/*
3706 sysfs
3707*/
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003708
Alex Elder593a9e72012-02-07 12:03:37 -06003709static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3710{
3711 return container_of(dev, struct rbd_device, dev);
3712}
3713
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003714static ssize_t rbd_size_show(struct device *dev,
3715 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003716{
Alex Elder593a9e72012-02-07 12:03:37 -06003717 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003718
Alex Elderfc71d832013-04-26 15:44:36 -05003719 return sprintf(buf, "%llu\n",
3720 (unsigned long long)rbd_dev->mapping.size);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003721}
3722
Alex Elder34b13182012-07-13 20:35:12 -05003723/*
3724 * Note this shows the features for whatever's mapped, which is not
3725 * necessarily the base image.
3726 */
3727static ssize_t rbd_features_show(struct device *dev,
3728 struct device_attribute *attr, char *buf)
3729{
3730 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3731
3732 return sprintf(buf, "0x%016llx\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003733 (unsigned long long)rbd_dev->mapping.features);
Alex Elder34b13182012-07-13 20:35:12 -05003734}
3735
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003736static ssize_t rbd_major_show(struct device *dev,
3737 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003738{
Alex Elder593a9e72012-02-07 12:03:37 -06003739 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003740
Alex Elderfc71d832013-04-26 15:44:36 -05003741 if (rbd_dev->major)
3742 return sprintf(buf, "%d\n", rbd_dev->major);
3743
3744 return sprintf(buf, "(none)\n");
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003745}
Alex Elderfc71d832013-04-26 15:44:36 -05003746
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003747static ssize_t rbd_minor_show(struct device *dev,
3748 struct device_attribute *attr, char *buf)
3749{
3750 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3751
3752 return sprintf(buf, "%d\n", rbd_dev->minor);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003753}
3754
3755static ssize_t rbd_client_id_show(struct device *dev,
3756 struct device_attribute *attr, char *buf)
3757{
Alex Elder593a9e72012-02-07 12:03:37 -06003758 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003759
Alex Elder1dbb4392012-01-24 10:08:37 -06003760 return sprintf(buf, "client%lld\n",
3761 ceph_client_id(rbd_dev->rbd_client->client));
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003762}
3763
3764static ssize_t rbd_pool_show(struct device *dev,
3765 struct device_attribute *attr, char *buf)
3766{
Alex Elder593a9e72012-02-07 12:03:37 -06003767 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003768
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003769 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003770}
3771
Alex Elder9bb2f332012-07-12 10:46:35 -05003772static ssize_t rbd_pool_id_show(struct device *dev,
3773 struct device_attribute *attr, char *buf)
3774{
3775 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3776
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003777 return sprintf(buf, "%llu\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003778 (unsigned long long) rbd_dev->spec->pool_id);
Alex Elder9bb2f332012-07-12 10:46:35 -05003779}
3780
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003781static ssize_t rbd_name_show(struct device *dev,
3782 struct device_attribute *attr, char *buf)
3783{
Alex Elder593a9e72012-02-07 12:03:37 -06003784 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003785
Alex Eldera92ffdf2012-10-30 19:40:33 -05003786 if (rbd_dev->spec->image_name)
3787 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3788
3789 return sprintf(buf, "(unknown)\n");
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003790}
3791
Alex Elder589d30e2012-07-10 20:30:11 -05003792static ssize_t rbd_image_id_show(struct device *dev,
3793 struct device_attribute *attr, char *buf)
3794{
3795 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3796
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003797 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05003798}
3799
Alex Elder34b13182012-07-13 20:35:12 -05003800/*
3801 * Shows the name of the currently-mapped snapshot (or
3802 * RBD_SNAP_HEAD_NAME for the base image).
3803 */
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003804static ssize_t rbd_snap_show(struct device *dev,
3805 struct device_attribute *attr,
3806 char *buf)
3807{
Alex Elder593a9e72012-02-07 12:03:37 -06003808 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003809
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003810 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003811}
3812
Alex Elder86b00e02012-10-25 23:34:42 -05003813/*
Ilya Dryomovff961282014-07-22 21:53:07 +04003814 * For a v2 image, shows the chain of parent images, separated by empty
3815 * lines. For v1 images or if there is no parent, shows "(no parent
3816 * image)".
Alex Elder86b00e02012-10-25 23:34:42 -05003817 */
3818static ssize_t rbd_parent_show(struct device *dev,
Ilya Dryomovff961282014-07-22 21:53:07 +04003819 struct device_attribute *attr,
3820 char *buf)
Alex Elder86b00e02012-10-25 23:34:42 -05003821{
3822 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Ilya Dryomovff961282014-07-22 21:53:07 +04003823 ssize_t count = 0;
Alex Elder86b00e02012-10-25 23:34:42 -05003824
Ilya Dryomovff961282014-07-22 21:53:07 +04003825 if (!rbd_dev->parent)
Alex Elder86b00e02012-10-25 23:34:42 -05003826 return sprintf(buf, "(no parent image)\n");
3827
Ilya Dryomovff961282014-07-22 21:53:07 +04003828 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3829 struct rbd_spec *spec = rbd_dev->parent_spec;
Alex Elder86b00e02012-10-25 23:34:42 -05003830
Ilya Dryomovff961282014-07-22 21:53:07 +04003831 count += sprintf(&buf[count], "%s"
3832 "pool_id %llu\npool_name %s\n"
3833 "image_id %s\nimage_name %s\n"
3834 "snap_id %llu\nsnap_name %s\n"
3835 "overlap %llu\n",
3836 !count ? "" : "\n", /* first? */
3837 spec->pool_id, spec->pool_name,
3838 spec->image_id, spec->image_name ?: "(unknown)",
3839 spec->snap_id, spec->snap_name,
3840 rbd_dev->parent_overlap);
3841 }
Alex Elder86b00e02012-10-25 23:34:42 -05003842
Ilya Dryomovff961282014-07-22 21:53:07 +04003843 return count;
Alex Elder86b00e02012-10-25 23:34:42 -05003844}
3845
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003846static ssize_t rbd_image_refresh(struct device *dev,
3847 struct device_attribute *attr,
3848 const char *buf,
3849 size_t size)
3850{
Alex Elder593a9e72012-02-07 12:03:37 -06003851 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Alex Elderb8136232012-07-25 09:32:41 -05003852 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003853
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003854 ret = rbd_dev_refresh(rbd_dev);
Alex Eldere627db02013-05-06 07:40:30 -05003855 if (ret)
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003856 return ret;
Alex Elderb8136232012-07-25 09:32:41 -05003857
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003858 return size;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003859}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003860
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003861static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
Alex Elder34b13182012-07-13 20:35:12 -05003862static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003863static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003864static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003865static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3866static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
Alex Elder9bb2f332012-07-12 10:46:35 -05003867static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003868static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
Alex Elder589d30e2012-07-10 20:30:11 -05003869static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003870static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3871static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
Alex Elder86b00e02012-10-25 23:34:42 -05003872static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003873
3874static struct attribute *rbd_attrs[] = {
3875 &dev_attr_size.attr,
Alex Elder34b13182012-07-13 20:35:12 -05003876 &dev_attr_features.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003877 &dev_attr_major.attr,
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003878 &dev_attr_minor.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003879 &dev_attr_client_id.attr,
3880 &dev_attr_pool.attr,
Alex Elder9bb2f332012-07-12 10:46:35 -05003881 &dev_attr_pool_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003882 &dev_attr_name.attr,
Alex Elder589d30e2012-07-10 20:30:11 -05003883 &dev_attr_image_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003884 &dev_attr_current_snap.attr,
Alex Elder86b00e02012-10-25 23:34:42 -05003885 &dev_attr_parent.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003886 &dev_attr_refresh.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003887 NULL
3888};
3889
3890static struct attribute_group rbd_attr_group = {
3891 .attrs = rbd_attrs,
3892};
3893
3894static const struct attribute_group *rbd_attr_groups[] = {
3895 &rbd_attr_group,
3896 NULL
3897};
3898
Ilya Dryomov6cac4692015-10-16 20:11:25 +02003899static void rbd_dev_release(struct device *dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003900
3901static struct device_type rbd_device_type = {
3902 .name = "rbd",
3903 .groups = rbd_attr_groups,
Ilya Dryomov6cac4692015-10-16 20:11:25 +02003904 .release = rbd_dev_release,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003905};
3906
Alex Elder8b8fb992012-10-26 17:25:24 -05003907static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3908{
3909 kref_get(&spec->kref);
3910
3911 return spec;
3912}
3913
3914static void rbd_spec_free(struct kref *kref);
3915static void rbd_spec_put(struct rbd_spec *spec)
3916{
3917 if (spec)
3918 kref_put(&spec->kref, rbd_spec_free);
3919}
3920
3921static struct rbd_spec *rbd_spec_alloc(void)
3922{
3923 struct rbd_spec *spec;
3924
3925 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3926 if (!spec)
3927 return NULL;
Ilya Dryomov04077592014-07-23 17:11:20 +04003928
3929 spec->pool_id = CEPH_NOPOOL;
3930 spec->snap_id = CEPH_NOSNAP;
Alex Elder8b8fb992012-10-26 17:25:24 -05003931 kref_init(&spec->kref);
3932
Alex Elder8b8fb992012-10-26 17:25:24 -05003933 return spec;
3934}
3935
3936static void rbd_spec_free(struct kref *kref)
3937{
3938 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3939
3940 kfree(spec->pool_name);
3941 kfree(spec->image_id);
3942 kfree(spec->image_name);
3943 kfree(spec->snap_name);
3944 kfree(spec);
3945}
3946
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02003947static void rbd_dev_release(struct device *dev)
3948{
3949 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3950 bool need_put = !!rbd_dev->opts;
3951
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02003952 ceph_oid_destroy(&rbd_dev->header_oid);
Ilya Dryomov6b6dddb2016-08-05 16:15:38 +02003953 ceph_oloc_destroy(&rbd_dev->header_oloc);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02003954
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02003955 rbd_put_client(rbd_dev->rbd_client);
3956 rbd_spec_put(rbd_dev->spec);
3957 kfree(rbd_dev->opts);
3958 kfree(rbd_dev);
3959
3960 /*
3961 * This is racy, but way better than putting module outside of
3962 * the release callback. The race window is pretty small, so
3963 * doing something similar to dm (dm-builtin.c) is overkill.
3964 */
3965 if (need_put)
3966 module_put(THIS_MODULE);
3967}
3968
Alex Eldercc344fa2013-02-19 12:25:56 -06003969static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
Ilya Dryomovd1475432015-06-22 13:24:48 +03003970 struct rbd_spec *spec,
3971 struct rbd_options *opts)
Alex Elderc53d5892012-10-25 23:34:42 -05003972{
3973 struct rbd_device *rbd_dev;
3974
3975 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3976 if (!rbd_dev)
3977 return NULL;
3978
3979 spin_lock_init(&rbd_dev->lock);
Alex Elder6d292902013-01-14 12:43:31 -06003980 rbd_dev->flags = 0;
Alex Eldera2acd002013-05-08 22:50:04 -05003981 atomic_set(&rbd_dev->parent_ref, 0);
Alex Elderc53d5892012-10-25 23:34:42 -05003982 INIT_LIST_HEAD(&rbd_dev->node);
Alex Elderc53d5892012-10-25 23:34:42 -05003983 init_rwsem(&rbd_dev->header_rwsem);
3984
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02003985 ceph_oid_init(&rbd_dev->header_oid);
Ilya Dryomov922dab62016-05-26 01:15:02 +02003986 ceph_oloc_init(&rbd_dev->header_oloc);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02003987
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02003988 rbd_dev->dev.bus = &rbd_bus_type;
3989 rbd_dev->dev.type = &rbd_device_type;
3990 rbd_dev->dev.parent = &rbd_root_dev;
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02003991 device_initialize(&rbd_dev->dev);
3992
Alex Elderc53d5892012-10-25 23:34:42 -05003993 rbd_dev->rbd_client = rbdc;
Ilya Dryomovd1475432015-06-22 13:24:48 +03003994 rbd_dev->spec = spec;
3995 rbd_dev->opts = opts;
Alex Elderc53d5892012-10-25 23:34:42 -05003996
Alex Elder0903e872012-11-14 12:25:19 -06003997 /* Initialize the layout used for all rbd requests */
3998
Yan, Zheng76271512016-02-03 21:24:49 +08003999 rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER;
4000 rbd_dev->layout.stripe_count = 1;
4001 rbd_dev->layout.object_size = 1 << RBD_MAX_OBJ_ORDER;
4002 rbd_dev->layout.pool_id = spec->pool_id;
Yan, Zheng30c156d2016-02-14 11:24:31 +08004003 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
Alex Elder0903e872012-11-14 12:25:19 -06004004
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004005 /*
4006 * If this is a mapping rbd_dev (as opposed to a parent one),
4007 * pin our module. We have a ref from do_rbd_add(), so use
4008 * __module_get().
4009 */
4010 if (rbd_dev->opts)
4011 __module_get(THIS_MODULE);
4012
Alex Elderc53d5892012-10-25 23:34:42 -05004013 return rbd_dev;
4014}
4015
4016static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4017{
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004018 if (rbd_dev)
4019 put_device(&rbd_dev->dev);
Alex Elderc53d5892012-10-25 23:34:42 -05004020}
4021
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004022/*
Alex Elder9d475de2012-07-03 16:01:19 -05004023 * Get the size and object order for an image snapshot, or if
4024 * snap_id is CEPH_NOSNAP, gets this information for the base
4025 * image.
4026 */
4027static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4028 u8 *order, u64 *snap_size)
4029{
4030 __le64 snapid = cpu_to_le64(snap_id);
4031 int ret;
4032 struct {
4033 u8 order;
4034 __le64 size;
4035 } __attribute__ ((packed)) size_buf = { 0 };
4036
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004037 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elder9d475de2012-07-03 16:01:19 -05004038 "rbd", "get_size",
Alex Elder41579762013-04-21 12:14:45 -05004039 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004040 &size_buf, sizeof (size_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004041 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder9d475de2012-07-03 16:01:19 -05004042 if (ret < 0)
4043 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004044 if (ret < sizeof (size_buf))
4045 return -ERANGE;
Alex Elder9d475de2012-07-03 16:01:19 -05004046
Josh Durginc3545572013-08-28 17:08:10 -07004047 if (order) {
Alex Elderc86f86e2013-04-25 15:09:41 -05004048 *order = size_buf.order;
Josh Durginc3545572013-08-28 17:08:10 -07004049 dout(" order %u", (unsigned int)*order);
4050 }
Alex Elder9d475de2012-07-03 16:01:19 -05004051 *snap_size = le64_to_cpu(size_buf.size);
4052
Josh Durginc3545572013-08-28 17:08:10 -07004053 dout(" snap_id 0x%016llx snap_size = %llu\n",
4054 (unsigned long long)snap_id,
Alex Elder57385b52013-04-21 12:14:45 -05004055 (unsigned long long)*snap_size);
Alex Elder9d475de2012-07-03 16:01:19 -05004056
4057 return 0;
4058}
4059
4060static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4061{
4062 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4063 &rbd_dev->header.obj_order,
4064 &rbd_dev->header.image_size);
4065}
4066
Alex Elder1e130192012-07-03 16:01:19 -05004067static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4068{
4069 void *reply_buf;
4070 int ret;
4071 void *p;
4072
4073 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4074 if (!reply_buf)
4075 return -ENOMEM;
4076
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004077 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elder41579762013-04-21 12:14:45 -05004078 "rbd", "get_object_prefix", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004079 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06004080 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder1e130192012-07-03 16:01:19 -05004081 if (ret < 0)
4082 goto out;
4083
4084 p = reply_buf;
4085 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
Alex Elder57385b52013-04-21 12:14:45 -05004086 p + ret, NULL, GFP_NOIO);
4087 ret = 0;
Alex Elder1e130192012-07-03 16:01:19 -05004088
4089 if (IS_ERR(rbd_dev->header.object_prefix)) {
4090 ret = PTR_ERR(rbd_dev->header.object_prefix);
4091 rbd_dev->header.object_prefix = NULL;
4092 } else {
4093 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4094 }
Alex Elder1e130192012-07-03 16:01:19 -05004095out:
4096 kfree(reply_buf);
4097
4098 return ret;
4099}
4100
Alex Elderb1b54022012-07-03 16:01:19 -05004101static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4102 u64 *snap_features)
4103{
4104 __le64 snapid = cpu_to_le64(snap_id);
4105 struct {
4106 __le64 features;
4107 __le64 incompat;
Alex Elder41579762013-04-21 12:14:45 -05004108 } __attribute__ ((packed)) features_buf = { 0 };
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004109 u64 unsup;
Alex Elderb1b54022012-07-03 16:01:19 -05004110 int ret;
4111
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004112 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elderb1b54022012-07-03 16:01:19 -05004113 "rbd", "get_features",
Alex Elder41579762013-04-21 12:14:45 -05004114 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004115 &features_buf, sizeof (features_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004116 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderb1b54022012-07-03 16:01:19 -05004117 if (ret < 0)
4118 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004119 if (ret < sizeof (features_buf))
4120 return -ERANGE;
Alex Elderd8891402012-10-09 13:50:17 -07004121
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004122 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4123 if (unsup) {
4124 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4125 unsup);
Alex Elderb8f5c6e2012-11-01 08:39:26 -05004126 return -ENXIO;
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004127 }
Alex Elderd8891402012-10-09 13:50:17 -07004128
Alex Elderb1b54022012-07-03 16:01:19 -05004129 *snap_features = le64_to_cpu(features_buf.features);
4130
4131 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
Alex Elder57385b52013-04-21 12:14:45 -05004132 (unsigned long long)snap_id,
4133 (unsigned long long)*snap_features,
4134 (unsigned long long)le64_to_cpu(features_buf.incompat));
Alex Elderb1b54022012-07-03 16:01:19 -05004135
4136 return 0;
4137}
4138
4139static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4140{
4141 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4142 &rbd_dev->header.features);
4143}
4144
Alex Elder86b00e02012-10-25 23:34:42 -05004145static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4146{
4147 struct rbd_spec *parent_spec;
4148 size_t size;
4149 void *reply_buf = NULL;
4150 __le64 snapid;
4151 void *p;
4152 void *end;
Alex Elder642a2532013-05-06 17:40:33 -05004153 u64 pool_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004154 char *image_id;
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004155 u64 snap_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004156 u64 overlap;
Alex Elder86b00e02012-10-25 23:34:42 -05004157 int ret;
4158
4159 parent_spec = rbd_spec_alloc();
4160 if (!parent_spec)
4161 return -ENOMEM;
4162
4163 size = sizeof (__le64) + /* pool_id */
4164 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4165 sizeof (__le64) + /* snap_id */
4166 sizeof (__le64); /* overlap */
4167 reply_buf = kmalloc(size, GFP_KERNEL);
4168 if (!reply_buf) {
4169 ret = -ENOMEM;
4170 goto out_err;
4171 }
4172
Ilya Dryomov4d9b67c2014-07-24 10:42:13 +04004173 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004174 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elder86b00e02012-10-25 23:34:42 -05004175 "rbd", "get_parent",
Alex Elder41579762013-04-21 12:14:45 -05004176 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004177 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004178 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder86b00e02012-10-25 23:34:42 -05004179 if (ret < 0)
4180 goto out_err;
4181
Alex Elder86b00e02012-10-25 23:34:42 -05004182 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05004183 end = reply_buf + ret;
4184 ret = -ERANGE;
Alex Elder642a2532013-05-06 17:40:33 -05004185 ceph_decode_64_safe(&p, end, pool_id, out_err);
Alex Elder392a9da2013-05-06 17:40:33 -05004186 if (pool_id == CEPH_NOPOOL) {
4187 /*
4188 * Either the parent never existed, or we have
4189 * record of it but the image got flattened so it no
4190 * longer has a parent. When the parent of a
4191 * layered image disappears we immediately set the
4192 * overlap to 0. The effect of this is that all new
4193 * requests will be treated as if the image had no
4194 * parent.
4195 */
4196 if (rbd_dev->parent_overlap) {
4197 rbd_dev->parent_overlap = 0;
Alex Elder392a9da2013-05-06 17:40:33 -05004198 rbd_dev_parent_put(rbd_dev);
4199 pr_info("%s: clone image has been flattened\n",
4200 rbd_dev->disk->disk_name);
4201 }
4202
Alex Elder86b00e02012-10-25 23:34:42 -05004203 goto out; /* No parent? No problem. */
Alex Elder392a9da2013-05-06 17:40:33 -05004204 }
Alex Elder86b00e02012-10-25 23:34:42 -05004205
Alex Elder0903e872012-11-14 12:25:19 -06004206 /* The ceph file layout needs to fit pool id in 32 bits */
4207
4208 ret = -EIO;
Alex Elder642a2532013-05-06 17:40:33 -05004209 if (pool_id > (u64)U32_MAX) {
Ilya Dryomov9584d502014-07-11 12:11:20 +04004210 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
Alex Elder642a2532013-05-06 17:40:33 -05004211 (unsigned long long)pool_id, U32_MAX);
Alex Elder57385b52013-04-21 12:14:45 -05004212 goto out_err;
Alex Elderc0cd10db2013-04-26 09:43:47 -05004213 }
Alex Elder0903e872012-11-14 12:25:19 -06004214
Alex Elder979ed482012-11-01 08:39:26 -05004215 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elder86b00e02012-10-25 23:34:42 -05004216 if (IS_ERR(image_id)) {
4217 ret = PTR_ERR(image_id);
4218 goto out_err;
4219 }
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004220 ceph_decode_64_safe(&p, end, snap_id, out_err);
Alex Elder86b00e02012-10-25 23:34:42 -05004221 ceph_decode_64_safe(&p, end, overlap, out_err);
4222
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004223 /*
4224 * The parent won't change (except when the clone is
4225 * flattened, already handled that). So we only need to
4226 * record the parent spec we have not already done so.
4227 */
4228 if (!rbd_dev->parent_spec) {
4229 parent_spec->pool_id = pool_id;
4230 parent_spec->image_id = image_id;
4231 parent_spec->snap_id = snap_id;
Alex Elder70cf49c2013-05-06 17:40:33 -05004232 rbd_dev->parent_spec = parent_spec;
4233 parent_spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovfbba11b2014-06-27 21:46:33 +04004234 } else {
4235 kfree(image_id);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004236 }
4237
4238 /*
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004239 * We always update the parent overlap. If it's zero we issue
4240 * a warning, as we will proceed as if there was no parent.
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004241 */
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004242 if (!overlap) {
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004243 if (parent_spec) {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004244 /* refresh, careful to warn just once */
4245 if (rbd_dev->parent_overlap)
4246 rbd_warn(rbd_dev,
4247 "clone now standalone (overlap became 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004248 } else {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004249 /* initial probe */
4250 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004251 }
Alex Elder70cf49c2013-05-06 17:40:33 -05004252 }
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004253 rbd_dev->parent_overlap = overlap;
4254
Alex Elder86b00e02012-10-25 23:34:42 -05004255out:
4256 ret = 0;
4257out_err:
4258 kfree(reply_buf);
4259 rbd_spec_put(parent_spec);
4260
4261 return ret;
4262}
4263
Alex Eldercc070d52013-04-21 12:14:45 -05004264static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4265{
4266 struct {
4267 __le64 stripe_unit;
4268 __le64 stripe_count;
4269 } __attribute__ ((packed)) striping_info_buf = { 0 };
4270 size_t size = sizeof (striping_info_buf);
4271 void *p;
4272 u64 obj_size;
4273 u64 stripe_unit;
4274 u64 stripe_count;
4275 int ret;
4276
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004277 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Eldercc070d52013-04-21 12:14:45 -05004278 "rbd", "get_stripe_unit_count", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004279 (char *)&striping_info_buf, size);
Alex Eldercc070d52013-04-21 12:14:45 -05004280 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4281 if (ret < 0)
4282 return ret;
4283 if (ret < size)
4284 return -ERANGE;
4285
4286 /*
4287 * We don't actually support the "fancy striping" feature
4288 * (STRIPINGV2) yet, but if the striping sizes are the
4289 * defaults the behavior is the same as before. So find
4290 * out, and only fail if the image has non-default values.
4291 */
4292 ret = -EINVAL;
4293 obj_size = (u64)1 << rbd_dev->header.obj_order;
4294 p = &striping_info_buf;
4295 stripe_unit = ceph_decode_64(&p);
4296 if (stripe_unit != obj_size) {
4297 rbd_warn(rbd_dev, "unsupported stripe unit "
4298 "(got %llu want %llu)",
4299 stripe_unit, obj_size);
4300 return -EINVAL;
4301 }
4302 stripe_count = ceph_decode_64(&p);
4303 if (stripe_count != 1) {
4304 rbd_warn(rbd_dev, "unsupported stripe count "
4305 "(got %llu want 1)", stripe_count);
4306 return -EINVAL;
4307 }
Alex Elder500d0c02013-04-26 09:43:47 -05004308 rbd_dev->header.stripe_unit = stripe_unit;
4309 rbd_dev->header.stripe_count = stripe_count;
Alex Eldercc070d52013-04-21 12:14:45 -05004310
4311 return 0;
4312}
4313
Alex Elder9e15b772012-10-30 19:40:33 -05004314static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4315{
4316 size_t image_id_size;
4317 char *image_id;
4318 void *p;
4319 void *end;
4320 size_t size;
4321 void *reply_buf = NULL;
4322 size_t len = 0;
4323 char *image_name = NULL;
4324 int ret;
4325
4326 rbd_assert(!rbd_dev->spec->image_name);
4327
Alex Elder69e7a022012-11-01 08:39:26 -05004328 len = strlen(rbd_dev->spec->image_id);
4329 image_id_size = sizeof (__le32) + len;
Alex Elder9e15b772012-10-30 19:40:33 -05004330 image_id = kmalloc(image_id_size, GFP_KERNEL);
4331 if (!image_id)
4332 return NULL;
4333
4334 p = image_id;
Alex Elder41579762013-04-21 12:14:45 -05004335 end = image_id + image_id_size;
Alex Elder57385b52013-04-21 12:14:45 -05004336 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
Alex Elder9e15b772012-10-30 19:40:33 -05004337
4338 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4339 reply_buf = kmalloc(size, GFP_KERNEL);
4340 if (!reply_buf)
4341 goto out;
4342
Alex Elder36be9a72013-01-19 00:30:28 -06004343 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
Alex Elder9e15b772012-10-30 19:40:33 -05004344 "rbd", "dir_get_name",
4345 image_id, image_id_size,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004346 reply_buf, size);
Alex Elder9e15b772012-10-30 19:40:33 -05004347 if (ret < 0)
4348 goto out;
4349 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05004350 end = reply_buf + ret;
4351
Alex Elder9e15b772012-10-30 19:40:33 -05004352 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4353 if (IS_ERR(image_name))
4354 image_name = NULL;
4355 else
4356 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4357out:
4358 kfree(reply_buf);
4359 kfree(image_id);
4360
4361 return image_name;
4362}
4363
Alex Elder2ad3d712013-04-30 00:44:33 -05004364static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4365{
4366 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4367 const char *snap_name;
4368 u32 which = 0;
4369
4370 /* Skip over names until we find the one we are looking for */
4371
4372 snap_name = rbd_dev->header.snap_names;
4373 while (which < snapc->num_snaps) {
4374 if (!strcmp(name, snap_name))
4375 return snapc->snaps[which];
4376 snap_name += strlen(snap_name) + 1;
4377 which++;
4378 }
4379 return CEPH_NOSNAP;
4380}
4381
4382static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4383{
4384 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4385 u32 which;
4386 bool found = false;
4387 u64 snap_id;
4388
4389 for (which = 0; !found && which < snapc->num_snaps; which++) {
4390 const char *snap_name;
4391
4392 snap_id = snapc->snaps[which];
4393 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
Josh Durginefadc982013-08-29 19:16:42 -07004394 if (IS_ERR(snap_name)) {
4395 /* ignore no-longer existing snapshots */
4396 if (PTR_ERR(snap_name) == -ENOENT)
4397 continue;
4398 else
4399 break;
4400 }
Alex Elder2ad3d712013-04-30 00:44:33 -05004401 found = !strcmp(name, snap_name);
4402 kfree(snap_name);
4403 }
4404 return found ? snap_id : CEPH_NOSNAP;
4405}
4406
4407/*
4408 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4409 * no snapshot by that name is found, or if an error occurs.
4410 */
4411static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4412{
4413 if (rbd_dev->image_format == 1)
4414 return rbd_v1_snap_id_by_name(rbd_dev, name);
4415
4416 return rbd_v2_snap_id_by_name(rbd_dev, name);
4417}
4418
Alex Elder9e15b772012-10-30 19:40:33 -05004419/*
Ilya Dryomov04077592014-07-23 17:11:20 +04004420 * An image being mapped will have everything but the snap id.
Alex Elder9e15b772012-10-30 19:40:33 -05004421 */
Ilya Dryomov04077592014-07-23 17:11:20 +04004422static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4423{
4424 struct rbd_spec *spec = rbd_dev->spec;
4425
4426 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4427 rbd_assert(spec->image_id && spec->image_name);
4428 rbd_assert(spec->snap_name);
4429
4430 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4431 u64 snap_id;
4432
4433 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4434 if (snap_id == CEPH_NOSNAP)
4435 return -ENOENT;
4436
4437 spec->snap_id = snap_id;
4438 } else {
4439 spec->snap_id = CEPH_NOSNAP;
4440 }
4441
4442 return 0;
4443}
4444
4445/*
4446 * A parent image will have all ids but none of the names.
4447 *
4448 * All names in an rbd spec are dynamically allocated. It's OK if we
4449 * can't figure out the name for an image id.
4450 */
4451static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
Alex Elder9e15b772012-10-30 19:40:33 -05004452{
Alex Elder2e9f7f12013-04-26 09:43:48 -05004453 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4454 struct rbd_spec *spec = rbd_dev->spec;
4455 const char *pool_name;
4456 const char *image_name;
4457 const char *snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05004458 int ret;
4459
Ilya Dryomov04077592014-07-23 17:11:20 +04004460 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4461 rbd_assert(spec->image_id);
4462 rbd_assert(spec->snap_id != CEPH_NOSNAP);
Alex Elder9e15b772012-10-30 19:40:33 -05004463
Alex Elder2e9f7f12013-04-26 09:43:48 -05004464 /* Get the pool name; we have to make our own copy of this */
Alex Elder9e15b772012-10-30 19:40:33 -05004465
Alex Elder2e9f7f12013-04-26 09:43:48 -05004466 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4467 if (!pool_name) {
4468 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
Alex Elder935dc892012-11-01 10:17:15 -05004469 return -EIO;
4470 }
Alex Elder2e9f7f12013-04-26 09:43:48 -05004471 pool_name = kstrdup(pool_name, GFP_KERNEL);
4472 if (!pool_name)
Alex Elder9e15b772012-10-30 19:40:33 -05004473 return -ENOMEM;
4474
4475 /* Fetch the image name; tolerate failure here */
4476
Alex Elder2e9f7f12013-04-26 09:43:48 -05004477 image_name = rbd_dev_image_name(rbd_dev);
4478 if (!image_name)
Alex Elder06ecc6c2012-11-01 10:17:15 -05004479 rbd_warn(rbd_dev, "unable to get image name");
Alex Elder9e15b772012-10-30 19:40:33 -05004480
Ilya Dryomov04077592014-07-23 17:11:20 +04004481 /* Fetch the snapshot name */
Alex Elder9e15b772012-10-30 19:40:33 -05004482
Alex Elder2e9f7f12013-04-26 09:43:48 -05004483 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
Josh Durginda6a6b62013-09-04 17:57:31 -07004484 if (IS_ERR(snap_name)) {
4485 ret = PTR_ERR(snap_name);
Alex Elder9e15b772012-10-30 19:40:33 -05004486 goto out_err;
Alex Elder2e9f7f12013-04-26 09:43:48 -05004487 }
4488
4489 spec->pool_name = pool_name;
4490 spec->image_name = image_name;
4491 spec->snap_name = snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05004492
4493 return 0;
Ilya Dryomov04077592014-07-23 17:11:20 +04004494
Alex Elder9e15b772012-10-30 19:40:33 -05004495out_err:
Alex Elder2e9f7f12013-04-26 09:43:48 -05004496 kfree(image_name);
4497 kfree(pool_name);
Alex Elder9e15b772012-10-30 19:40:33 -05004498 return ret;
4499}
4500
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004501static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
Alex Elder35d489f2012-07-03 16:01:19 -05004502{
4503 size_t size;
4504 int ret;
4505 void *reply_buf;
4506 void *p;
4507 void *end;
4508 u64 seq;
4509 u32 snap_count;
4510 struct ceph_snap_context *snapc;
4511 u32 i;
4512
4513 /*
4514 * We'll need room for the seq value (maximum snapshot id),
4515 * snapshot count, and array of that many snapshot ids.
4516 * For now we have a fixed upper limit on the number we're
4517 * prepared to receive.
4518 */
4519 size = sizeof (__le64) + sizeof (__le32) +
4520 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4521 reply_buf = kzalloc(size, GFP_KERNEL);
4522 if (!reply_buf)
4523 return -ENOMEM;
4524
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004525 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elder41579762013-04-21 12:14:45 -05004526 "rbd", "get_snapcontext", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004527 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004528 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder35d489f2012-07-03 16:01:19 -05004529 if (ret < 0)
4530 goto out;
4531
Alex Elder35d489f2012-07-03 16:01:19 -05004532 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05004533 end = reply_buf + ret;
4534 ret = -ERANGE;
Alex Elder35d489f2012-07-03 16:01:19 -05004535 ceph_decode_64_safe(&p, end, seq, out);
4536 ceph_decode_32_safe(&p, end, snap_count, out);
4537
4538 /*
4539 * Make sure the reported number of snapshot ids wouldn't go
4540 * beyond the end of our buffer. But before checking that,
4541 * make sure the computed size of the snapshot context we
4542 * allocate is representable in a size_t.
4543 */
4544 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4545 / sizeof (u64)) {
4546 ret = -EINVAL;
4547 goto out;
4548 }
4549 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4550 goto out;
Alex Elder468521c2013-04-26 09:43:47 -05004551 ret = 0;
Alex Elder35d489f2012-07-03 16:01:19 -05004552
Alex Elder812164f82013-04-30 00:44:32 -05004553 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
Alex Elder35d489f2012-07-03 16:01:19 -05004554 if (!snapc) {
4555 ret = -ENOMEM;
4556 goto out;
4557 }
Alex Elder35d489f2012-07-03 16:01:19 -05004558 snapc->seq = seq;
Alex Elder35d489f2012-07-03 16:01:19 -05004559 for (i = 0; i < snap_count; i++)
4560 snapc->snaps[i] = ceph_decode_64(&p);
4561
Alex Elder49ece552013-05-06 08:37:00 -05004562 ceph_put_snap_context(rbd_dev->header.snapc);
Alex Elder35d489f2012-07-03 16:01:19 -05004563 rbd_dev->header.snapc = snapc;
4564
4565 dout(" snap context seq = %llu, snap_count = %u\n",
Alex Elder57385b52013-04-21 12:14:45 -05004566 (unsigned long long)seq, (unsigned int)snap_count);
Alex Elder35d489f2012-07-03 16:01:19 -05004567out:
4568 kfree(reply_buf);
4569
Alex Elder57385b52013-04-21 12:14:45 -05004570 return ret;
Alex Elder35d489f2012-07-03 16:01:19 -05004571}
4572
Alex Elder54cac612013-04-30 00:44:33 -05004573static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4574 u64 snap_id)
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004575{
4576 size_t size;
4577 void *reply_buf;
Alex Elder54cac612013-04-30 00:44:33 -05004578 __le64 snapid;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004579 int ret;
4580 void *p;
4581 void *end;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004582 char *snap_name;
4583
4584 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4585 reply_buf = kmalloc(size, GFP_KERNEL);
4586 if (!reply_buf)
4587 return ERR_PTR(-ENOMEM);
4588
Alex Elder54cac612013-04-30 00:44:33 -05004589 snapid = cpu_to_le64(snap_id);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004590 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004591 "rbd", "get_snapshot_name",
Alex Elder54cac612013-04-30 00:44:33 -05004592 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004593 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004594 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderf40eb342013-04-25 15:09:42 -05004595 if (ret < 0) {
4596 snap_name = ERR_PTR(ret);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004597 goto out;
Alex Elderf40eb342013-04-25 15:09:42 -05004598 }
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004599
4600 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05004601 end = reply_buf + ret;
Alex Eldere5c35532012-10-25 23:34:41 -05004602 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elderf40eb342013-04-25 15:09:42 -05004603 if (IS_ERR(snap_name))
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004604 goto out;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004605
Alex Elderf40eb342013-04-25 15:09:42 -05004606 dout(" snap_id 0x%016llx snap_name = %s\n",
Alex Elder54cac612013-04-30 00:44:33 -05004607 (unsigned long long)snap_id, snap_name);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004608out:
4609 kfree(reply_buf);
4610
Alex Elderf40eb342013-04-25 15:09:42 -05004611 return snap_name;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004612}
4613
Alex Elder2df3fac2013-05-06 09:51:30 -05004614static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
Alex Elder117973f2012-08-31 17:29:55 -05004615{
Alex Elder2df3fac2013-05-06 09:51:30 -05004616 bool first_time = rbd_dev->header.object_prefix == NULL;
Alex Elder117973f2012-08-31 17:29:55 -05004617 int ret;
Alex Elder117973f2012-08-31 17:29:55 -05004618
Josh Durgin1617e402013-06-12 14:43:10 -07004619 ret = rbd_dev_v2_image_size(rbd_dev);
4620 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05004621 return ret;
Josh Durgin1617e402013-06-12 14:43:10 -07004622
Alex Elder2df3fac2013-05-06 09:51:30 -05004623 if (first_time) {
4624 ret = rbd_dev_v2_header_onetime(rbd_dev);
4625 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05004626 return ret;
Alex Elder2df3fac2013-05-06 09:51:30 -05004627 }
4628
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004629 ret = rbd_dev_v2_snap_context(rbd_dev);
Ilya Dryomovd194cd12015-08-31 18:22:10 +03004630 if (ret && first_time) {
4631 kfree(rbd_dev->header.object_prefix);
4632 rbd_dev->header.object_prefix = NULL;
4633 }
Alex Elder117973f2012-08-31 17:29:55 -05004634
4635 return ret;
4636}
4637
Ilya Dryomova720ae02014-07-23 17:11:19 +04004638static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4639{
4640 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4641
4642 if (rbd_dev->image_format == 1)
4643 return rbd_dev_v1_header_info(rbd_dev);
4644
4645 return rbd_dev_v2_header_info(rbd_dev);
4646}
4647
Alex Elder1ddbe942012-01-29 13:57:44 -06004648/*
Alex Elder499afd52012-02-02 08:13:29 -06004649 * Get a unique rbd identifier for the given new rbd_dev, and add
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004650 * the rbd_dev to the global list.
Alex Elder1ddbe942012-01-29 13:57:44 -06004651 */
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004652static int rbd_dev_id_get(struct rbd_device *rbd_dev)
Alex Elderb7f23c32012-01-29 13:57:43 -06004653{
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004654 int new_dev_id;
4655
Ilya Dryomov9b60e702013-12-13 15:28:57 +02004656 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4657 0, minor_to_rbd_dev_id(1 << MINORBITS),
4658 GFP_KERNEL);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004659 if (new_dev_id < 0)
4660 return new_dev_id;
4661
4662 rbd_dev->dev_id = new_dev_id;
Alex Elder499afd52012-02-02 08:13:29 -06004663
4664 spin_lock(&rbd_dev_list_lock);
4665 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4666 spin_unlock(&rbd_dev_list_lock);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004667
Ilya Dryomov70eebd22013-12-13 15:28:56 +02004668 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004669
4670 return 0;
Alex Elder1ddbe942012-01-29 13:57:44 -06004671}
Alex Elderb7f23c32012-01-29 13:57:43 -06004672
Alex Elder1ddbe942012-01-29 13:57:44 -06004673/*
Alex Elder499afd52012-02-02 08:13:29 -06004674 * Remove an rbd_dev from the global list, and record that its
4675 * identifier is no longer in use.
Alex Elder1ddbe942012-01-29 13:57:44 -06004676 */
Alex Eldere2839302012-08-29 17:11:06 -05004677static void rbd_dev_id_put(struct rbd_device *rbd_dev)
Alex Elder1ddbe942012-01-29 13:57:44 -06004678{
Alex Elder499afd52012-02-02 08:13:29 -06004679 spin_lock(&rbd_dev_list_lock);
4680 list_del_init(&rbd_dev->node);
4681 spin_unlock(&rbd_dev_list_lock);
Alex Elderb7f23c32012-01-29 13:57:43 -06004682
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004683 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4684
4685 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
Alex Elderb7f23c32012-01-29 13:57:43 -06004686}
4687
Alex Eldera725f65e2012-02-02 08:13:30 -06004688/*
Alex Eldere28fff262012-02-02 08:13:30 -06004689 * Skips over white space at *buf, and updates *buf to point to the
4690 * first found non-space character (if any). Returns the length of
Alex Elder593a9e72012-02-07 12:03:37 -06004691 * the token (string of non-white space characters) found. Note
4692 * that *buf must be terminated with '\0'.
Alex Eldere28fff262012-02-02 08:13:30 -06004693 */
4694static inline size_t next_token(const char **buf)
4695{
4696 /*
4697 * These are the characters that produce nonzero for
4698 * isspace() in the "C" and "POSIX" locales.
4699 */
4700 const char *spaces = " \f\n\r\t\v";
4701
4702 *buf += strspn(*buf, spaces); /* Find start of token */
4703
4704 return strcspn(*buf, spaces); /* Return token length */
4705}
4706
4707/*
Alex Elderea3352f2012-07-09 21:04:23 -05004708 * Finds the next token in *buf, dynamically allocates a buffer big
4709 * enough to hold a copy of it, and copies the token into the new
4710 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4711 * that a duplicate buffer is created even for a zero-length token.
4712 *
4713 * Returns a pointer to the newly-allocated duplicate, or a null
4714 * pointer if memory for the duplicate was not available. If
4715 * the lenp argument is a non-null pointer, the length of the token
4716 * (not including the '\0') is returned in *lenp.
4717 *
4718 * If successful, the *buf pointer will be updated to point beyond
4719 * the end of the found token.
4720 *
4721 * Note: uses GFP_KERNEL for allocation.
4722 */
4723static inline char *dup_token(const char **buf, size_t *lenp)
4724{
4725 char *dup;
4726 size_t len;
4727
4728 len = next_token(buf);
Alex Elder4caf35f2012-11-01 08:39:27 -05004729 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
Alex Elderea3352f2012-07-09 21:04:23 -05004730 if (!dup)
4731 return NULL;
Alex Elderea3352f2012-07-09 21:04:23 -05004732 *(dup + len) = '\0';
4733 *buf += len;
4734
4735 if (lenp)
4736 *lenp = len;
4737
4738 return dup;
4739}
4740
4741/*
Alex Elder859c31d2012-10-25 23:34:42 -05004742 * Parse the options provided for an "rbd add" (i.e., rbd image
4743 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4744 * and the data written is passed here via a NUL-terminated buffer.
4745 * Returns 0 if successful or an error code otherwise.
Alex Elderd22f76e2012-07-12 10:46:35 -05004746 *
Alex Elder859c31d2012-10-25 23:34:42 -05004747 * The information extracted from these options is recorded in
4748 * the other parameters which return dynamically-allocated
4749 * structures:
4750 * ceph_opts
4751 * The address of a pointer that will refer to a ceph options
4752 * structure. Caller must release the returned pointer using
4753 * ceph_destroy_options() when it is no longer needed.
4754 * rbd_opts
4755 * Address of an rbd options pointer. Fully initialized by
4756 * this function; caller must release with kfree().
4757 * spec
4758 * Address of an rbd image specification pointer. Fully
4759 * initialized by this function based on parsed options.
4760 * Caller must release with rbd_spec_put().
4761 *
4762 * The options passed take this form:
4763 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4764 * where:
4765 * <mon_addrs>
4766 * A comma-separated list of one or more monitor addresses.
4767 * A monitor address is an ip address, optionally followed
4768 * by a port number (separated by a colon).
4769 * I.e.: ip1[:port1][,ip2[:port2]...]
4770 * <options>
4771 * A comma-separated list of ceph and/or rbd options.
4772 * <pool_name>
4773 * The name of the rados pool containing the rbd image.
4774 * <image_name>
4775 * The name of the image in that pool to map.
4776 * <snap_id>
4777 * An optional snapshot id. If provided, the mapping will
4778 * present data from the image at the time that snapshot was
4779 * created. The image head is used if no snapshot id is
4780 * provided. Snapshot mappings are always read-only.
Alex Eldera725f65e2012-02-02 08:13:30 -06004781 */
Alex Elder859c31d2012-10-25 23:34:42 -05004782static int rbd_add_parse_args(const char *buf,
Alex Elderdc79b112012-10-25 23:34:41 -05004783 struct ceph_options **ceph_opts,
Alex Elder859c31d2012-10-25 23:34:42 -05004784 struct rbd_options **opts,
4785 struct rbd_spec **rbd_spec)
Alex Eldera725f65e2012-02-02 08:13:30 -06004786{
Alex Elderd22f76e2012-07-12 10:46:35 -05004787 size_t len;
Alex Elder859c31d2012-10-25 23:34:42 -05004788 char *options;
Alex Elder0ddebc02012-10-25 23:34:41 -05004789 const char *mon_addrs;
Alex Elderecb4dc22013-04-26 09:43:47 -05004790 char *snap_name;
Alex Elder0ddebc02012-10-25 23:34:41 -05004791 size_t mon_addrs_size;
Alex Elder859c31d2012-10-25 23:34:42 -05004792 struct rbd_spec *spec = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004793 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05004794 struct ceph_options *copts;
Alex Elderdc79b112012-10-25 23:34:41 -05004795 int ret;
Alex Eldere28fff262012-02-02 08:13:30 -06004796
4797 /* The first four tokens are required */
4798
Alex Elder7ef32142012-02-02 08:13:30 -06004799 len = next_token(&buf);
Alex Elder4fb5d6712012-11-01 10:17:15 -05004800 if (!len) {
4801 rbd_warn(NULL, "no monitor address(es) provided");
4802 return -EINVAL;
4803 }
Alex Elder0ddebc02012-10-25 23:34:41 -05004804 mon_addrs = buf;
Alex Elderf28e5652012-10-25 23:34:41 -05004805 mon_addrs_size = len + 1;
Alex Elder7ef32142012-02-02 08:13:30 -06004806 buf += len;
Alex Eldera725f65e2012-02-02 08:13:30 -06004807
Alex Elderdc79b112012-10-25 23:34:41 -05004808 ret = -EINVAL;
Alex Elderf28e5652012-10-25 23:34:41 -05004809 options = dup_token(&buf, NULL);
4810 if (!options)
Alex Elderdc79b112012-10-25 23:34:41 -05004811 return -ENOMEM;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004812 if (!*options) {
4813 rbd_warn(NULL, "no options provided");
4814 goto out_err;
4815 }
Alex Eldera725f65e2012-02-02 08:13:30 -06004816
Alex Elder859c31d2012-10-25 23:34:42 -05004817 spec = rbd_spec_alloc();
4818 if (!spec)
Alex Elderf28e5652012-10-25 23:34:41 -05004819 goto out_mem;
Alex Elder859c31d2012-10-25 23:34:42 -05004820
4821 spec->pool_name = dup_token(&buf, NULL);
4822 if (!spec->pool_name)
4823 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004824 if (!*spec->pool_name) {
4825 rbd_warn(NULL, "no pool name provided");
4826 goto out_err;
4827 }
Alex Eldere28fff262012-02-02 08:13:30 -06004828
Alex Elder69e7a022012-11-01 08:39:26 -05004829 spec->image_name = dup_token(&buf, NULL);
Alex Elder859c31d2012-10-25 23:34:42 -05004830 if (!spec->image_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004831 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004832 if (!*spec->image_name) {
4833 rbd_warn(NULL, "no image name provided");
4834 goto out_err;
4835 }
Alex Eldere28fff262012-02-02 08:13:30 -06004836
Alex Elderf28e5652012-10-25 23:34:41 -05004837 /*
4838 * Snapshot name is optional; default is to use "-"
4839 * (indicating the head/no snapshot).
4840 */
Alex Elder3feeb8942012-08-31 17:29:52 -05004841 len = next_token(&buf);
Alex Elder820a5f32012-07-09 21:04:24 -05004842 if (!len) {
Alex Elder3feeb8942012-08-31 17:29:52 -05004843 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4844 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
Alex Elderf28e5652012-10-25 23:34:41 -05004845 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
Alex Elderdc79b112012-10-25 23:34:41 -05004846 ret = -ENAMETOOLONG;
Alex Elderf28e5652012-10-25 23:34:41 -05004847 goto out_err;
Alex Elder849b4262012-07-09 21:04:24 -05004848 }
Alex Elderecb4dc22013-04-26 09:43:47 -05004849 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4850 if (!snap_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004851 goto out_mem;
Alex Elderecb4dc22013-04-26 09:43:47 -05004852 *(snap_name + len) = '\0';
4853 spec->snap_name = snap_name;
Alex Eldere5c35532012-10-25 23:34:41 -05004854
Alex Elder0ddebc02012-10-25 23:34:41 -05004855 /* Initialize all rbd options to the defaults */
Alex Eldere28fff262012-02-02 08:13:30 -06004856
Alex Elder4e9afeb2012-10-25 23:34:41 -05004857 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4858 if (!rbd_opts)
4859 goto out_mem;
4860
4861 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
Ilya Dryomovb5584182015-06-23 16:21:19 +03004862 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
Alex Elderd22f76e2012-07-12 10:46:35 -05004863
Alex Elder859c31d2012-10-25 23:34:42 -05004864 copts = ceph_parse_options(options, mon_addrs,
Alex Elder0ddebc02012-10-25 23:34:41 -05004865 mon_addrs + mon_addrs_size - 1,
Alex Elder4e9afeb2012-10-25 23:34:41 -05004866 parse_rbd_opts_token, rbd_opts);
Alex Elder859c31d2012-10-25 23:34:42 -05004867 if (IS_ERR(copts)) {
4868 ret = PTR_ERR(copts);
Alex Elderdc79b112012-10-25 23:34:41 -05004869 goto out_err;
4870 }
Alex Elder859c31d2012-10-25 23:34:42 -05004871 kfree(options);
4872
4873 *ceph_opts = copts;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004874 *opts = rbd_opts;
Alex Elder859c31d2012-10-25 23:34:42 -05004875 *rbd_spec = spec;
Alex Elder0ddebc02012-10-25 23:34:41 -05004876
Alex Elderdc79b112012-10-25 23:34:41 -05004877 return 0;
Alex Elderf28e5652012-10-25 23:34:41 -05004878out_mem:
Alex Elderdc79b112012-10-25 23:34:41 -05004879 ret = -ENOMEM;
Alex Elderd22f76e2012-07-12 10:46:35 -05004880out_err:
Alex Elder859c31d2012-10-25 23:34:42 -05004881 kfree(rbd_opts);
4882 rbd_spec_put(spec);
Alex Elderf28e5652012-10-25 23:34:41 -05004883 kfree(options);
Alex Elderd22f76e2012-07-12 10:46:35 -05004884
Alex Elderdc79b112012-10-25 23:34:41 -05004885 return ret;
Alex Eldera725f65e2012-02-02 08:13:30 -06004886}
4887
Alex Elder589d30e2012-07-10 20:30:11 -05004888/*
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004889 * Return pool id (>= 0) or a negative error code.
4890 */
4891static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4892{
Ilya Dryomova319bf52015-05-15 12:02:17 +03004893 struct ceph_options *opts = rbdc->client->options;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004894 u64 newest_epoch;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004895 int tries = 0;
4896 int ret;
4897
4898again:
4899 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4900 if (ret == -ENOENT && tries++ < 1) {
Ilya Dryomovd0b19702016-04-28 16:07:27 +02004901 ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
4902 &newest_epoch);
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004903 if (ret < 0)
4904 return ret;
4905
4906 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
Ilya Dryomov7cca78c2016-04-28 16:07:28 +02004907 ceph_osdc_maybe_request_map(&rbdc->client->osdc);
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004908 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
Ilya Dryomova319bf52015-05-15 12:02:17 +03004909 newest_epoch,
4910 opts->mount_timeout);
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004911 goto again;
4912 } else {
4913 /* the osdmap we have is new enough */
4914 return -ENOENT;
4915 }
4916 }
4917
4918 return ret;
4919}
4920
4921/*
Alex Elder589d30e2012-07-10 20:30:11 -05004922 * An rbd format 2 image has a unique identifier, distinct from the
4923 * name given to it by the user. Internally, that identifier is
4924 * what's used to specify the names of objects related to the image.
4925 *
4926 * A special "rbd id" object is used to map an rbd image name to its
4927 * id. If that object doesn't exist, then there is no v2 rbd image
4928 * with the supplied name.
4929 *
4930 * This function will record the given rbd_dev's image_id field if
4931 * it can be determined, and in that case will return 0. If any
4932 * errors occur a negative errno will be returned and the rbd_dev's
4933 * image_id field will be unchanged (and should be NULL).
4934 */
4935static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4936{
4937 int ret;
4938 size_t size;
4939 char *object_name;
4940 void *response;
Alex Elderc0fba362013-04-25 23:15:08 -05004941 char *image_id;
Alex Elder2f82ee52012-10-30 19:40:33 -05004942
Alex Elder589d30e2012-07-10 20:30:11 -05004943 /*
Alex Elder2c0d0a12012-10-30 19:40:33 -05004944 * When probing a parent image, the image id is already
4945 * known (and the image name likely is not). There's no
Alex Elderc0fba362013-04-25 23:15:08 -05004946 * need to fetch the image id again in this case. We
4947 * do still need to set the image format though.
Alex Elder2c0d0a12012-10-30 19:40:33 -05004948 */
Alex Elderc0fba362013-04-25 23:15:08 -05004949 if (rbd_dev->spec->image_id) {
4950 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4951
Alex Elder2c0d0a12012-10-30 19:40:33 -05004952 return 0;
Alex Elderc0fba362013-04-25 23:15:08 -05004953 }
Alex Elder2c0d0a12012-10-30 19:40:33 -05004954
4955 /*
Alex Elder589d30e2012-07-10 20:30:11 -05004956 * First, see if the format 2 image id file exists, and if
4957 * so, get the image's persistent id from it.
4958 */
Alex Elder69e7a022012-11-01 08:39:26 -05004959 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05004960 object_name = kmalloc(size, GFP_NOIO);
4961 if (!object_name)
4962 return -ENOMEM;
Alex Elder0d7dbfc2012-10-25 23:34:41 -05004963 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05004964 dout("rbd id object name is %s\n", object_name);
4965
4966 /* Response will be an encoded string, which includes a length */
4967
4968 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4969 response = kzalloc(size, GFP_NOIO);
4970 if (!response) {
4971 ret = -ENOMEM;
4972 goto out;
4973 }
4974
Alex Elderc0fba362013-04-25 23:15:08 -05004975 /* If it doesn't exist we'll assume it's a format 1 image */
4976
Alex Elder36be9a72013-01-19 00:30:28 -06004977 ret = rbd_obj_method_sync(rbd_dev, object_name,
Alex Elder41579762013-04-21 12:14:45 -05004978 "rbd", "get_id", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004979 response, RBD_IMAGE_ID_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06004980 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderc0fba362013-04-25 23:15:08 -05004981 if (ret == -ENOENT) {
4982 image_id = kstrdup("", GFP_KERNEL);
4983 ret = image_id ? 0 : -ENOMEM;
4984 if (!ret)
4985 rbd_dev->image_format = 1;
Ilya Dryomov7dd440c2014-09-11 18:49:18 +04004986 } else if (ret >= 0) {
Alex Elderc0fba362013-04-25 23:15:08 -05004987 void *p = response;
Alex Elder589d30e2012-07-10 20:30:11 -05004988
Alex Elderc0fba362013-04-25 23:15:08 -05004989 image_id = ceph_extract_encoded_string(&p, p + ret,
Alex Elder979ed482012-11-01 08:39:26 -05004990 NULL, GFP_NOIO);
Duan Jiong461f7582014-04-11 16:38:12 +08004991 ret = PTR_ERR_OR_ZERO(image_id);
Alex Elderc0fba362013-04-25 23:15:08 -05004992 if (!ret)
4993 rbd_dev->image_format = 2;
Alex Elderc0fba362013-04-25 23:15:08 -05004994 }
4995
4996 if (!ret) {
4997 rbd_dev->spec->image_id = image_id;
4998 dout("image_id is %s\n", image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05004999 }
5000out:
5001 kfree(response);
5002 kfree(object_name);
5003
5004 return ret;
5005}
5006
Alex Elder3abef3b2013-05-13 20:35:37 -05005007/*
5008 * Undo whatever state changes are made by v1 or v2 header info
5009 * call.
5010 */
Alex Elder6fd48b32013-04-28 23:32:34 -05005011static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5012{
5013 struct rbd_image_header *header;
5014
Ilya Dryomove69b8d42015-01-19 12:06:14 +03005015 rbd_dev_parent_put(rbd_dev);
Alex Elder6fd48b32013-04-28 23:32:34 -05005016
5017 /* Free dynamic fields from the header, then zero it out */
5018
5019 header = &rbd_dev->header;
Alex Elder812164f82013-04-30 00:44:32 -05005020 ceph_put_snap_context(header->snapc);
Alex Elder6fd48b32013-04-28 23:32:34 -05005021 kfree(header->snap_sizes);
5022 kfree(header->snap_names);
5023 kfree(header->object_prefix);
5024 memset(header, 0, sizeof (*header));
5025}
5026
Alex Elder2df3fac2013-05-06 09:51:30 -05005027static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
Alex Eldera30b71b2012-07-10 20:30:11 -05005028{
5029 int ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005030
Alex Elder1e130192012-07-03 16:01:19 -05005031 ret = rbd_dev_v2_object_prefix(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005032 if (ret)
Alex Elder1e130192012-07-03 16:01:19 -05005033 goto out_err;
Alex Elderb1b54022012-07-03 16:01:19 -05005034
Alex Elder2df3fac2013-05-06 09:51:30 -05005035 /*
5036 * Get the and check features for the image. Currently the
5037 * features are assumed to never change.
5038 */
Alex Elderb1b54022012-07-03 16:01:19 -05005039 ret = rbd_dev_v2_features(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005040 if (ret)
Alex Elderb1b54022012-07-03 16:01:19 -05005041 goto out_err;
Alex Elder35d489f2012-07-03 16:01:19 -05005042
Alex Eldercc070d52013-04-21 12:14:45 -05005043 /* If the image supports fancy striping, get its parameters */
5044
5045 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5046 ret = rbd_dev_v2_striping_info(rbd_dev);
5047 if (ret < 0)
5048 goto out_err;
5049 }
Alex Elder2df3fac2013-05-06 09:51:30 -05005050 /* No support for crypto and compression type format 2 images */
Alex Eldera30b71b2012-07-10 20:30:11 -05005051
Alex Elder35152972012-08-31 17:29:55 -05005052 return 0;
Alex Elder9d475de2012-07-03 16:01:19 -05005053out_err:
Alex Elder642a2532013-05-06 17:40:33 -05005054 rbd_dev->header.features = 0;
Alex Elder1e130192012-07-03 16:01:19 -05005055 kfree(rbd_dev->header.object_prefix);
5056 rbd_dev->header.object_prefix = NULL;
Alex Elder9d475de2012-07-03 16:01:19 -05005057
5058 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005059}
5060
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005061/*
5062 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5063 * rbd_dev_image_probe() recursion depth, which means it's also the
5064 * length of the already discovered part of the parent chain.
5065 */
5066static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
Alex Elder83a06262012-10-30 15:47:17 -05005067{
Alex Elder2f82ee52012-10-30 19:40:33 -05005068 struct rbd_device *parent = NULL;
Alex Elder124afba2013-04-26 15:44:36 -05005069 int ret;
5070
5071 if (!rbd_dev->parent_spec)
5072 return 0;
Alex Elder124afba2013-04-26 15:44:36 -05005073
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005074 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5075 pr_info("parent chain is too long (%d)\n", depth);
5076 ret = -EINVAL;
5077 goto out_err;
5078 }
5079
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005080 parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
5081 NULL);
5082 if (!parent) {
5083 ret = -ENOMEM;
Alex Elder124afba2013-04-26 15:44:36 -05005084 goto out_err;
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005085 }
5086
5087 /*
5088 * Images related by parent/child relationships always share
5089 * rbd_client and spec/parent_spec, so bump their refcounts.
5090 */
5091 __rbd_get_client(rbd_dev->rbd_client);
5092 rbd_spec_get(rbd_dev->parent_spec);
Alex Elder124afba2013-04-26 15:44:36 -05005093
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005094 ret = rbd_dev_image_probe(parent, depth);
Alex Elder124afba2013-04-26 15:44:36 -05005095 if (ret < 0)
5096 goto out_err;
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005097
Alex Elder124afba2013-04-26 15:44:36 -05005098 rbd_dev->parent = parent;
Alex Eldera2acd002013-05-08 22:50:04 -05005099 atomic_set(&rbd_dev->parent_ref, 1);
Alex Elder124afba2013-04-26 15:44:36 -05005100 return 0;
Alex Elder124afba2013-04-26 15:44:36 -05005101
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005102out_err:
5103 rbd_dev_unparent(rbd_dev);
Markus Elfring1761b222015-11-23 20:16:45 +01005104 rbd_dev_destroy(parent);
Alex Elder124afba2013-04-26 15:44:36 -05005105 return ret;
5106}
5107
Ilya Dryomov811c6682016-04-15 16:22:16 +02005108/*
5109 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5110 * upon return.
5111 */
Alex Elder200a6a82013-04-28 23:32:34 -05005112static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
Alex Elder124afba2013-04-26 15:44:36 -05005113{
Alex Elder83a06262012-10-30 15:47:17 -05005114 int ret;
Alex Elder83a06262012-10-30 15:47:17 -05005115
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005116 /* Get an id and fill in device name. */
Alex Elder83a06262012-10-30 15:47:17 -05005117
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005118 ret = rbd_dev_id_get(rbd_dev);
5119 if (ret)
Ilya Dryomov811c6682016-04-15 16:22:16 +02005120 goto err_out_unlock;
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005121
Alex Elder83a06262012-10-30 15:47:17 -05005122 BUILD_BUG_ON(DEV_NAME_LEN
5123 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5124 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5125
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005126 /* Record our major and minor device numbers. */
Alex Elder83a06262012-10-30 15:47:17 -05005127
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005128 if (!single_major) {
5129 ret = register_blkdev(0, rbd_dev->name);
5130 if (ret < 0)
5131 goto err_out_id;
5132
5133 rbd_dev->major = ret;
5134 rbd_dev->minor = 0;
5135 } else {
5136 rbd_dev->major = rbd_major;
5137 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5138 }
Alex Elder83a06262012-10-30 15:47:17 -05005139
5140 /* Set up the blkdev mapping. */
5141
5142 ret = rbd_init_disk(rbd_dev);
5143 if (ret)
5144 goto err_out_blkdev;
5145
Alex Elderf35a4de2013-05-06 09:51:29 -05005146 ret = rbd_dev_mapping_set(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005147 if (ret)
5148 goto err_out_disk;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04005149
Alex Elderf35a4de2013-05-06 09:51:29 -05005150 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
Josh Durgin22001f62013-09-30 20:10:04 -07005151 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
Alex Elderf35a4de2013-05-06 09:51:29 -05005152
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005153 dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
5154 ret = device_add(&rbd_dev->dev);
Alex Elderf35a4de2013-05-06 09:51:29 -05005155 if (ret)
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005156 goto err_out_mapping;
Alex Elder83a06262012-10-30 15:47:17 -05005157
Alex Elder83a06262012-10-30 15:47:17 -05005158 /* Everything's ready. Announce the disk to the world. */
5159
Alex Elder129b79d2013-04-26 15:44:36 -05005160 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Ilya Dryomov811c6682016-04-15 16:22:16 +02005161 up_write(&rbd_dev->header_rwsem);
Alex Elder83a06262012-10-30 15:47:17 -05005162
Ilya Dryomov811c6682016-04-15 16:22:16 +02005163 add_disk(rbd_dev->disk);
Alex Elder83a06262012-10-30 15:47:17 -05005164 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5165 (unsigned long long) rbd_dev->mapping.size);
5166
5167 return ret;
Alex Elder2f82ee52012-10-30 19:40:33 -05005168
Alex Elderf35a4de2013-05-06 09:51:29 -05005169err_out_mapping:
5170 rbd_dev_mapping_clear(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005171err_out_disk:
5172 rbd_free_disk(rbd_dev);
5173err_out_blkdev:
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005174 if (!single_major)
5175 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Alex Elder83a06262012-10-30 15:47:17 -05005176err_out_id:
5177 rbd_dev_id_put(rbd_dev);
Ilya Dryomov811c6682016-04-15 16:22:16 +02005178err_out_unlock:
5179 up_write(&rbd_dev->header_rwsem);
Alex Elder83a06262012-10-30 15:47:17 -05005180 return ret;
5181}
5182
Alex Elder332bb122013-04-27 09:59:30 -05005183static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5184{
5185 struct rbd_spec *spec = rbd_dev->spec;
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005186 int ret;
Alex Elder332bb122013-04-27 09:59:30 -05005187
5188 /* Record the header object name for this rbd image. */
5189
5190 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5191
Yan, Zheng76271512016-02-03 21:24:49 +08005192 rbd_dev->header_oloc.pool = rbd_dev->layout.pool_id;
Alex Elder332bb122013-04-27 09:59:30 -05005193 if (rbd_dev->image_format == 1)
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005194 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5195 spec->image_name, RBD_SUFFIX);
Alex Elder332bb122013-04-27 09:59:30 -05005196 else
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005197 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5198 RBD_HEADER_PREFIX, spec->image_id);
Alex Elder332bb122013-04-27 09:59:30 -05005199
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005200 return ret;
Alex Elder332bb122013-04-27 09:59:30 -05005201}
5202
Alex Elder200a6a82013-04-28 23:32:34 -05005203static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5204{
Alex Elder6fd48b32013-04-28 23:32:34 -05005205 rbd_dev_unprobe(rbd_dev);
Alex Elder6fd48b32013-04-28 23:32:34 -05005206 rbd_dev->image_format = 0;
5207 kfree(rbd_dev->spec->image_id);
5208 rbd_dev->spec->image_id = NULL;
5209
Alex Elder200a6a82013-04-28 23:32:34 -05005210 rbd_dev_destroy(rbd_dev);
5211}
5212
Alex Eldera30b71b2012-07-10 20:30:11 -05005213/*
5214 * Probe for the existence of the header object for the given rbd
Alex Elder1f3ef782013-05-06 17:40:33 -05005215 * device. If this image is the one being mapped (i.e., not a
5216 * parent), initiate a watch on its header object before using that
5217 * object to get detailed information about the rbd image.
Alex Eldera30b71b2012-07-10 20:30:11 -05005218 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005219static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
Alex Eldera30b71b2012-07-10 20:30:11 -05005220{
5221 int ret;
5222
5223 /*
Alex Elder3abef3b2013-05-13 20:35:37 -05005224 * Get the id from the image id object. Unless there's an
5225 * error, rbd_dev->spec->image_id will be filled in with
5226 * a dynamically-allocated string, and rbd_dev->image_format
5227 * will be set to either 1 or 2.
Alex Eldera30b71b2012-07-10 20:30:11 -05005228 */
5229 ret = rbd_dev_image_id(rbd_dev);
5230 if (ret)
Alex Elderc0fba362013-04-25 23:15:08 -05005231 return ret;
Alex Elderc0fba362013-04-25 23:15:08 -05005232
Alex Elder332bb122013-04-27 09:59:30 -05005233 ret = rbd_dev_header_name(rbd_dev);
5234 if (ret)
5235 goto err_out_format;
5236
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005237 if (!depth) {
Ilya Dryomovfca27062013-12-16 18:02:40 +02005238 ret = rbd_dev_header_watch_sync(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005239 if (ret) {
5240 if (ret == -ENOENT)
5241 pr_info("image %s/%s does not exist\n",
5242 rbd_dev->spec->pool_name,
5243 rbd_dev->spec->image_name);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005244 goto err_out_format;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005245 }
Alex Elder1f3ef782013-05-06 17:40:33 -05005246 }
Alex Elderb644de22013-04-27 09:59:31 -05005247
Ilya Dryomova720ae02014-07-23 17:11:19 +04005248 ret = rbd_dev_header_info(rbd_dev);
Alex Elder5655c4d2013-04-25 23:15:08 -05005249 if (ret)
Alex Elderb644de22013-04-27 09:59:31 -05005250 goto err_out_watch;
Alex Elder83a06262012-10-30 15:47:17 -05005251
Ilya Dryomov04077592014-07-23 17:11:20 +04005252 /*
5253 * If this image is the one being mapped, we have pool name and
5254 * id, image name and id, and snap name - need to fill snap id.
5255 * Otherwise this is a parent image, identified by pool, image
5256 * and snap ids - need to fill in names for those ids.
5257 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005258 if (!depth)
Ilya Dryomov04077592014-07-23 17:11:20 +04005259 ret = rbd_spec_fill_snap_id(rbd_dev);
5260 else
5261 ret = rbd_spec_fill_names(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005262 if (ret) {
5263 if (ret == -ENOENT)
5264 pr_info("snap %s/%s@%s does not exist\n",
5265 rbd_dev->spec->pool_name,
5266 rbd_dev->spec->image_name,
5267 rbd_dev->spec->snap_name);
Alex Elder33dca392013-04-30 00:44:33 -05005268 goto err_out_probe;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005269 }
Alex Elder9bb81c92013-04-27 09:59:30 -05005270
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005271 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5272 ret = rbd_dev_v2_parent_info(rbd_dev);
5273 if (ret)
5274 goto err_out_probe;
5275
5276 /*
5277 * Need to warn users if this image is the one being
5278 * mapped and has a parent.
5279 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005280 if (!depth && rbd_dev->parent_spec)
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005281 rbd_warn(rbd_dev,
5282 "WARNING: kernel layering is EXPERIMENTAL!");
5283 }
5284
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005285 ret = rbd_dev_probe_parent(rbd_dev, depth);
Alex Elder30d60ba2013-05-06 09:51:30 -05005286 if (ret)
5287 goto err_out_probe;
Alex Elder83a06262012-10-30 15:47:17 -05005288
Alex Elder30d60ba2013-05-06 09:51:30 -05005289 dout("discovered format %u image, header name is %s\n",
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005290 rbd_dev->image_format, rbd_dev->header_oid.name);
Alex Elder30d60ba2013-05-06 09:51:30 -05005291 return 0;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005292
Alex Elder6fd48b32013-04-28 23:32:34 -05005293err_out_probe:
5294 rbd_dev_unprobe(rbd_dev);
Alex Elderb644de22013-04-27 09:59:31 -05005295err_out_watch:
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005296 if (!depth)
Ilya Dryomovfca27062013-12-16 18:02:40 +02005297 rbd_dev_header_unwatch_sync(rbd_dev);
Alex Elder332bb122013-04-27 09:59:30 -05005298err_out_format:
5299 rbd_dev->image_format = 0;
Alex Elder5655c4d2013-04-25 23:15:08 -05005300 kfree(rbd_dev->spec->image_id);
5301 rbd_dev->spec->image_id = NULL;
Alex Elder5655c4d2013-04-25 23:15:08 -05005302 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005303}
5304
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005305static ssize_t do_rbd_add(struct bus_type *bus,
5306 const char *buf,
5307 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005308{
Alex Eldercb8627c2012-07-09 21:04:23 -05005309 struct rbd_device *rbd_dev = NULL;
Alex Elderdc79b112012-10-25 23:34:41 -05005310 struct ceph_options *ceph_opts = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05005311 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05005312 struct rbd_spec *spec = NULL;
Alex Elder9d3997f2012-10-25 23:34:42 -05005313 struct rbd_client *rbdc;
Alex Elder51344a32013-05-06 07:40:30 -05005314 bool read_only;
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02005315 int rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005316
5317 if (!try_module_get(THIS_MODULE))
5318 return -ENODEV;
5319
Alex Eldera725f65e2012-02-02 08:13:30 -06005320 /* parse add command */
Alex Elder859c31d2012-10-25 23:34:42 -05005321 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
Alex Elderdc79b112012-10-25 23:34:41 -05005322 if (rc < 0)
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005323 goto out;
Alex Eldera725f65e2012-02-02 08:13:30 -06005324
Alex Elder9d3997f2012-10-25 23:34:42 -05005325 rbdc = rbd_get_client(ceph_opts);
5326 if (IS_ERR(rbdc)) {
5327 rc = PTR_ERR(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05005328 goto err_out_args;
Alex Elder9d3997f2012-10-25 23:34:42 -05005329 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005330
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005331 /* pick the pool */
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005332 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005333 if (rc < 0) {
5334 if (rc == -ENOENT)
5335 pr_info("pool %s does not exist\n", spec->pool_name);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005336 goto err_out_client;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005337 }
Alex Elderc0cd10db2013-04-26 09:43:47 -05005338 spec->pool_id = (u64)rc;
Alex Elder859c31d2012-10-25 23:34:42 -05005339
Ilya Dryomovd1475432015-06-22 13:24:48 +03005340 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02005341 if (!rbd_dev) {
5342 rc = -ENOMEM;
Alex Elderbd4ba652012-10-25 23:34:42 -05005343 goto err_out_client;
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02005344 }
Alex Elderc53d5892012-10-25 23:34:42 -05005345 rbdc = NULL; /* rbd_dev now owns this */
5346 spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovd1475432015-06-22 13:24:48 +03005347 rbd_opts = NULL; /* rbd_dev now owns this */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005348
Ilya Dryomov811c6682016-04-15 16:22:16 +02005349 down_write(&rbd_dev->header_rwsem);
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005350 rc = rbd_dev_image_probe(rbd_dev, 0);
Alex Eldera30b71b2012-07-10 20:30:11 -05005351 if (rc < 0)
Alex Elderc53d5892012-10-25 23:34:42 -05005352 goto err_out_rbd_dev;
Alex Elder05fd6f62012-08-29 17:11:07 -05005353
Alex Elder7ce4eef2013-05-06 17:40:33 -05005354 /* If we are mapping a snapshot it must be marked read-only */
5355
Ilya Dryomovd1475432015-06-22 13:24:48 +03005356 read_only = rbd_dev->opts->read_only;
Alex Elder7ce4eef2013-05-06 17:40:33 -05005357 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5358 read_only = true;
5359 rbd_dev->mapping.read_only = read_only;
5360
Alex Elderb536f692013-04-28 23:32:34 -05005361 rc = rbd_dev_device_setup(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05005362 if (rc) {
Ilya Dryomove37180c2013-12-16 18:02:41 +02005363 /*
5364 * rbd_dev_header_unwatch_sync() can't be moved into
5365 * rbd_dev_image_release() without refactoring, see
5366 * commit 1f3ef78861ac.
5367 */
5368 rbd_dev_header_unwatch_sync(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05005369 rbd_dev_image_release(rbd_dev);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005370 goto out;
Alex Elder3abef3b2013-05-13 20:35:37 -05005371 }
Alex Elderb536f692013-04-28 23:32:34 -05005372
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005373 rc = count;
5374out:
5375 module_put(THIS_MODULE);
5376 return rc;
Alex Elder3abef3b2013-05-13 20:35:37 -05005377
Alex Elderc53d5892012-10-25 23:34:42 -05005378err_out_rbd_dev:
Ilya Dryomov811c6682016-04-15 16:22:16 +02005379 up_write(&rbd_dev->header_rwsem);
Alex Elderc53d5892012-10-25 23:34:42 -05005380 rbd_dev_destroy(rbd_dev);
Alex Elderbd4ba652012-10-25 23:34:42 -05005381err_out_client:
Alex Elder9d3997f2012-10-25 23:34:42 -05005382 rbd_put_client(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05005383err_out_args:
Alex Elder859c31d2012-10-25 23:34:42 -05005384 rbd_spec_put(spec);
Ilya Dryomovd1475432015-06-22 13:24:48 +03005385 kfree(rbd_opts);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005386 goto out;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005387}
5388
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005389static ssize_t rbd_add(struct bus_type *bus,
5390 const char *buf,
5391 size_t count)
5392{
5393 if (single_major)
5394 return -EINVAL;
5395
5396 return do_rbd_add(bus, buf, count);
5397}
5398
5399static ssize_t rbd_add_single_major(struct bus_type *bus,
5400 const char *buf,
5401 size_t count)
5402{
5403 return do_rbd_add(bus, buf, count);
5404}
5405
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005406static void rbd_dev_device_release(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005407{
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005408 rbd_free_disk(rbd_dev);
Alex Elder200a6a82013-04-28 23:32:34 -05005409 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005410 device_del(&rbd_dev->dev);
Alex Elder6d80b132013-05-06 07:40:30 -05005411 rbd_dev_mapping_clear(rbd_dev);
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005412 if (!single_major)
5413 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Alex Eldere2839302012-08-29 17:11:06 -05005414 rbd_dev_id_put(rbd_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005415}
5416
Alex Elder05a46af2013-04-26 15:44:36 -05005417static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5418{
Alex Elderad945fc2013-04-26 15:44:36 -05005419 while (rbd_dev->parent) {
Alex Elder05a46af2013-04-26 15:44:36 -05005420 struct rbd_device *first = rbd_dev;
5421 struct rbd_device *second = first->parent;
5422 struct rbd_device *third;
5423
5424 /*
5425 * Follow to the parent with no grandparent and
5426 * remove it.
5427 */
5428 while (second && (third = second->parent)) {
5429 first = second;
5430 second = third;
5431 }
Alex Elderad945fc2013-04-26 15:44:36 -05005432 rbd_assert(second);
Alex Elder8ad42cd2013-04-28 23:32:34 -05005433 rbd_dev_image_release(second);
Alex Elderad945fc2013-04-26 15:44:36 -05005434 first->parent = NULL;
5435 first->parent_overlap = 0;
5436
5437 rbd_assert(first->parent_spec);
Alex Elder05a46af2013-04-26 15:44:36 -05005438 rbd_spec_put(first->parent_spec);
5439 first->parent_spec = NULL;
Alex Elder05a46af2013-04-26 15:44:36 -05005440 }
5441}
5442
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005443static ssize_t do_rbd_remove(struct bus_type *bus,
5444 const char *buf,
5445 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005446{
5447 struct rbd_device *rbd_dev = NULL;
Alex Elder751cc0e2013-05-31 15:17:01 -05005448 struct list_head *tmp;
5449 int dev_id;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005450 unsigned long ul;
Alex Elder82a442d2013-05-31 17:40:44 -05005451 bool already = false;
Alex Elder0d8189e2013-04-27 09:59:30 -05005452 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005453
Jingoo Hanbb8e0e82013-09-11 14:20:07 -07005454 ret = kstrtoul(buf, 10, &ul);
Alex Elder0d8189e2013-04-27 09:59:30 -05005455 if (ret)
5456 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005457
5458 /* convert to int; abort if we lost anything in the conversion */
Alex Elder751cc0e2013-05-31 15:17:01 -05005459 dev_id = (int)ul;
5460 if (dev_id != ul)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005461 return -EINVAL;
5462
Alex Elder751cc0e2013-05-31 15:17:01 -05005463 ret = -ENOENT;
5464 spin_lock(&rbd_dev_list_lock);
5465 list_for_each(tmp, &rbd_dev_list) {
5466 rbd_dev = list_entry(tmp, struct rbd_device, node);
5467 if (rbd_dev->dev_id == dev_id) {
5468 ret = 0;
5469 break;
5470 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005471 }
Alex Elder751cc0e2013-05-31 15:17:01 -05005472 if (!ret) {
5473 spin_lock_irq(&rbd_dev->lock);
5474 if (rbd_dev->open_count)
5475 ret = -EBUSY;
5476 else
Alex Elder82a442d2013-05-31 17:40:44 -05005477 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5478 &rbd_dev->flags);
Alex Elder751cc0e2013-05-31 15:17:01 -05005479 spin_unlock_irq(&rbd_dev->lock);
5480 }
5481 spin_unlock(&rbd_dev_list_lock);
Alex Elder82a442d2013-05-31 17:40:44 -05005482 if (ret < 0 || already)
Alex Elder1ba0f1e2013-05-31 15:17:01 -05005483 return ret;
Alex Elder751cc0e2013-05-31 15:17:01 -05005484
Ilya Dryomovfca27062013-12-16 18:02:40 +02005485 rbd_dev_header_unwatch_sync(rbd_dev);
Ilya Dryomovfca27062013-12-16 18:02:40 +02005486
Josh Durgin98752012013-08-29 17:26:31 -07005487 /*
5488 * Don't free anything from rbd_dev->disk until after all
5489 * notifies are completely processed. Otherwise
5490 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5491 * in a potential use after free of rbd_dev->disk or rbd_dev.
5492 */
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005493 rbd_dev_device_release(rbd_dev);
Alex Elder8ad42cd2013-04-28 23:32:34 -05005494 rbd_dev_image_release(rbd_dev);
Alex Elderaafb2302012-09-06 16:00:54 -05005495
Alex Elder1ba0f1e2013-05-31 15:17:01 -05005496 return count;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005497}
5498
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005499static ssize_t rbd_remove(struct bus_type *bus,
5500 const char *buf,
5501 size_t count)
5502{
5503 if (single_major)
5504 return -EINVAL;
5505
5506 return do_rbd_remove(bus, buf, count);
5507}
5508
5509static ssize_t rbd_remove_single_major(struct bus_type *bus,
5510 const char *buf,
5511 size_t count)
5512{
5513 return do_rbd_remove(bus, buf, count);
5514}
5515
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005516/*
5517 * create control files in sysfs
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005518 * /sys/bus/rbd/...
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005519 */
5520static int rbd_sysfs_init(void)
5521{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005522 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005523
Alex Elderfed4c142012-02-07 12:03:36 -06005524 ret = device_register(&rbd_root_dev);
Alex Elder21079782012-01-24 10:08:36 -06005525 if (ret < 0)
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005526 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005527
Alex Elderfed4c142012-02-07 12:03:36 -06005528 ret = bus_register(&rbd_bus_type);
5529 if (ret < 0)
5530 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005531
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005532 return ret;
5533}
5534
5535static void rbd_sysfs_cleanup(void)
5536{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005537 bus_unregister(&rbd_bus_type);
Alex Elderfed4c142012-02-07 12:03:36 -06005538 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005539}
5540
Alex Elder1c2a9df2013-05-01 12:43:03 -05005541static int rbd_slab_init(void)
5542{
5543 rbd_assert(!rbd_img_request_cache);
Geliang Tang03d94402016-03-13 15:17:32 +08005544 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
Alex Elder868311b2013-05-01 12:43:03 -05005545 if (!rbd_img_request_cache)
5546 return -ENOMEM;
5547
5548 rbd_assert(!rbd_obj_request_cache);
Geliang Tang03d94402016-03-13 15:17:32 +08005549 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
Alex Elder78c2a442013-05-01 12:43:04 -05005550 if (!rbd_obj_request_cache)
5551 goto out_err;
5552
5553 rbd_assert(!rbd_segment_name_cache);
5554 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02005555 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
Alex Elder78c2a442013-05-01 12:43:04 -05005556 if (rbd_segment_name_cache)
Alex Elder1c2a9df2013-05-01 12:43:03 -05005557 return 0;
Alex Elder78c2a442013-05-01 12:43:04 -05005558out_err:
Julia Lawall13bf2832015-09-13 14:15:26 +02005559 kmem_cache_destroy(rbd_obj_request_cache);
5560 rbd_obj_request_cache = NULL;
Alex Elder1c2a9df2013-05-01 12:43:03 -05005561
Alex Elder868311b2013-05-01 12:43:03 -05005562 kmem_cache_destroy(rbd_img_request_cache);
5563 rbd_img_request_cache = NULL;
5564
Alex Elder1c2a9df2013-05-01 12:43:03 -05005565 return -ENOMEM;
5566}
5567
5568static void rbd_slab_exit(void)
5569{
Alex Elder78c2a442013-05-01 12:43:04 -05005570 rbd_assert(rbd_segment_name_cache);
5571 kmem_cache_destroy(rbd_segment_name_cache);
5572 rbd_segment_name_cache = NULL;
5573
Alex Elder868311b2013-05-01 12:43:03 -05005574 rbd_assert(rbd_obj_request_cache);
5575 kmem_cache_destroy(rbd_obj_request_cache);
5576 rbd_obj_request_cache = NULL;
5577
Alex Elder1c2a9df2013-05-01 12:43:03 -05005578 rbd_assert(rbd_img_request_cache);
5579 kmem_cache_destroy(rbd_img_request_cache);
5580 rbd_img_request_cache = NULL;
5581}
5582
Alex Eldercc344fa2013-02-19 12:25:56 -06005583static int __init rbd_init(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005584{
5585 int rc;
5586
Alex Elder1e32d342013-01-30 11:13:33 -06005587 if (!libceph_compatible(NULL)) {
5588 rbd_warn(NULL, "libceph incompatibility (quitting)");
Alex Elder1e32d342013-01-30 11:13:33 -06005589 return -EINVAL;
5590 }
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005591
Alex Elder1c2a9df2013-05-01 12:43:03 -05005592 rc = rbd_slab_init();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005593 if (rc)
5594 return rc;
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005595
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005596 /*
5597 * The number of active work items is limited by the number of
Ilya Dryomovf77303b2015-04-22 18:28:13 +03005598 * rbd devices * queue depth, so leave @max_active at default.
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005599 */
5600 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5601 if (!rbd_wq) {
5602 rc = -ENOMEM;
5603 goto err_out_slab;
5604 }
5605
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005606 if (single_major) {
5607 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5608 if (rbd_major < 0) {
5609 rc = rbd_major;
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005610 goto err_out_wq;
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005611 }
5612 }
5613
Alex Elder1c2a9df2013-05-01 12:43:03 -05005614 rc = rbd_sysfs_init();
5615 if (rc)
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005616 goto err_out_blkdev;
Alex Elder1c2a9df2013-05-01 12:43:03 -05005617
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005618 if (single_major)
5619 pr_info("loaded (major %d)\n", rbd_major);
5620 else
5621 pr_info("loaded\n");
5622
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005623 return 0;
5624
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005625err_out_blkdev:
5626 if (single_major)
5627 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005628err_out_wq:
5629 destroy_workqueue(rbd_wq);
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005630err_out_slab:
5631 rbd_slab_exit();
Alex Elder1c2a9df2013-05-01 12:43:03 -05005632 return rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005633}
5634
Alex Eldercc344fa2013-02-19 12:25:56 -06005635static void __exit rbd_exit(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005636{
Ilya Dryomovffe312c2014-05-20 15:46:04 +04005637 ida_destroy(&rbd_dev_id_ida);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005638 rbd_sysfs_cleanup();
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005639 if (single_major)
5640 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005641 destroy_workqueue(rbd_wq);
Alex Elder1c2a9df2013-05-01 12:43:03 -05005642 rbd_slab_exit();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005643}
5644
5645module_init(rbd_init);
5646module_exit(rbd_exit);
5647
Alex Elderd552c612013-05-31 20:13:09 -05005648MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005649MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5650MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005651/* following authorship retained from original osdblk.c */
5652MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5653
Ilya Dryomov90da2582013-12-13 15:28:56 +02005654MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005655MODULE_LICENSE("GPL");