blob: c3089f32a3929453e810228a4e27166158b7de43 [file] [log] [blame]
Alex Eldere2a58ee2013-04-30 00:44:33 -05001
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
Yehuda Sadehdfc56062010-11-19 14:51:04 -080025 For usage instructions, please refer to:
Yehuda Sadeh602adf42010-08-12 16:11:25 -070026
Yehuda Sadehdfc56062010-11-19 14:51:04 -080027 Documentation/ABI/testing/sysfs-bus-rbd
Yehuda Sadeh602adf42010-08-12 16:11:25 -070028
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
34#include <linux/ceph/decode.h>
Yehuda Sadeh59c2be12011-03-21 15:10:11 -070035#include <linux/parser.h>
Alex Elder30d1cff2013-05-01 12:43:03 -050036#include <linux/bsearch.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070037
38#include <linux/kernel.h>
39#include <linux/device.h>
40#include <linux/module.h>
Christoph Hellwig7ad18af2015-01-13 17:20:04 +010041#include <linux/blk-mq.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070042#include <linux/fs.h>
43#include <linux/blkdev.h>
Alex Elder1c2a9df2013-05-01 12:43:03 -050044#include <linux/slab.h>
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +020045#include <linux/idr.h>
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +040046#include <linux/workqueue.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070047
48#include "rbd_types.h"
49
Alex Elderaafb2302012-09-06 16:00:54 -050050#define RBD_DEBUG /* Activate rbd_assert() calls */
51
Alex Elder593a9e72012-02-07 12:03:37 -060052/*
53 * The basic unit of block I/O is a sector. It is interpreted in a
54 * number of contexts in Linux (blk, bio, genhd), but the default is
55 * universally 512 bytes. These symbols are just slightly more
56 * meaningful than the bare numbers they represent.
57 */
58#define SECTOR_SHIFT 9
59#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60
Alex Eldera2acd002013-05-08 22:50:04 -050061/*
62 * Increment the given counter and return its updated value.
63 * If the counter is already 0 it will not be incremented.
64 * If the counter is already at its maximum value returns
65 * -EINVAL without updating it.
66 */
67static int atomic_inc_return_safe(atomic_t *v)
68{
69 unsigned int counter;
70
71 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
72 if (counter <= (unsigned int)INT_MAX)
73 return (int)counter;
74
75 atomic_dec(v);
76
77 return -EINVAL;
78}
79
80/* Decrement the counter. Return the resulting value, or -EINVAL */
81static int atomic_dec_return_safe(atomic_t *v)
82{
83 int counter;
84
85 counter = atomic_dec_return(v);
86 if (counter >= 0)
87 return counter;
88
89 atomic_inc(v);
90
91 return -EINVAL;
92}
93
Alex Elderf0f8cef2012-01-29 13:57:44 -060094#define RBD_DRV_NAME "rbd"
Yehuda Sadeh602adf42010-08-12 16:11:25 -070095
Ilya Dryomov7e513d42013-12-16 19:26:32 +020096#define RBD_MINORS_PER_MAJOR 256
97#define RBD_SINGLE_MAJOR_PART_SHIFT 4
Yehuda Sadeh602adf42010-08-12 16:11:25 -070098
Ilya Dryomov6d69bb532015-10-11 19:38:00 +020099#define RBD_MAX_PARENT_CHAIN_LEN 16
100
Alex Elderd4b125e2012-07-03 16:01:19 -0500101#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
102#define RBD_MAX_SNAP_NAME_LEN \
103 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
104
Alex Elder35d489f2012-07-03 16:01:19 -0500105#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700106
107#define RBD_SNAP_HEAD_NAME "-"
108
Alex Elder9682fc62013-04-30 00:44:33 -0500109#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
110
Alex Elder9e15b772012-10-30 19:40:33 -0500111/* This allows a single page to hold an image name sent by OSD */
112#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
Alex Elder1e130192012-07-03 16:01:19 -0500113#define RBD_IMAGE_ID_LEN_MAX 64
Alex Elder9e15b772012-10-30 19:40:33 -0500114
Alex Elder1e130192012-07-03 16:01:19 -0500115#define RBD_OBJ_PREFIX_LEN_MAX 64
Alex Elder589d30e2012-07-10 20:30:11 -0500116
Alex Elderd8891402012-10-09 13:50:17 -0700117/* Feature bits */
118
Alex Elder5cbf6f122013-04-11 09:29:48 -0500119#define RBD_FEATURE_LAYERING (1<<0)
120#define RBD_FEATURE_STRIPINGV2 (1<<1)
121#define RBD_FEATURES_ALL \
122 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
Alex Elderd8891402012-10-09 13:50:17 -0700123
124/* Features supported by this (client software) implementation. */
125
Alex Elder770eba62012-10-25 23:34:40 -0500126#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
Alex Elderd8891402012-10-09 13:50:17 -0700127
Alex Elder81a89792012-02-02 08:13:30 -0600128/*
129 * An RBD device name will be "rbd#", where the "rbd" comes from
130 * RBD_DRV_NAME above, and # is a unique integer identifier.
131 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
132 * enough to hold all possible device names.
133 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700134#define DEV_NAME_LEN 32
Alex Elder81a89792012-02-02 08:13:30 -0600135#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700136
137/*
138 * block device image metadata (in-memory version)
139 */
140struct rbd_image_header {
Alex Elderf35a4de2013-05-06 09:51:29 -0500141 /* These six fields never change for a given rbd image */
Alex Elder849b4262012-07-09 21:04:24 -0500142 char *object_prefix;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700143 __u8 obj_order;
144 __u8 crypt_type;
145 __u8 comp_type;
Alex Elderf35a4de2013-05-06 09:51:29 -0500146 u64 stripe_unit;
147 u64 stripe_count;
148 u64 features; /* Might be changeable someday? */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700149
Alex Elderf84344f2012-08-31 17:29:51 -0500150 /* The remaining fields need to be updated occasionally */
151 u64 image_size;
152 struct ceph_snap_context *snapc;
Alex Elderf35a4de2013-05-06 09:51:29 -0500153 char *snap_names; /* format 1 only */
154 u64 *snap_sizes; /* format 1 only */
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700155};
156
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500157/*
158 * An rbd image specification.
159 *
160 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
Alex Elderc66c6e02012-11-01 08:39:26 -0500161 * identify an image. Each rbd_dev structure includes a pointer to
162 * an rbd_spec structure that encapsulates this identity.
163 *
164 * Each of the id's in an rbd_spec has an associated name. For a
165 * user-mapped image, the names are supplied and the id's associated
166 * with them are looked up. For a layered image, a parent image is
167 * defined by the tuple, and the names are looked up.
168 *
169 * An rbd_dev structure contains a parent_spec pointer which is
170 * non-null if the image it represents is a child in a layered
171 * image. This pointer will refer to the rbd_spec structure used
172 * by the parent rbd_dev for its own identity (i.e., the structure
173 * is shared between the parent and child).
174 *
175 * Since these structures are populated once, during the discovery
176 * phase of image construction, they are effectively immutable so
177 * we make no effort to synchronize access to them.
178 *
179 * Note that code herein does not assume the image name is known (it
180 * could be a null pointer).
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500181 */
182struct rbd_spec {
183 u64 pool_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500184 const char *pool_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500185
Alex Elderecb4dc22013-04-26 09:43:47 -0500186 const char *image_id;
187 const char *image_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500188
189 u64 snap_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500190 const char *snap_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500191
192 struct kref kref;
193};
194
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700195/*
Alex Elderf0f8cef2012-01-29 13:57:44 -0600196 * an instance of the client. multiple devices may share an rbd client.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700197 */
198struct rbd_client {
199 struct ceph_client *client;
200 struct kref kref;
201 struct list_head node;
202};
203
Alex Elderbf0d5f502012-11-22 00:00:08 -0600204struct rbd_img_request;
205typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
206
207#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
208
209struct rbd_obj_request;
210typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
211
Alex Elder9969ebc2013-01-18 12:31:10 -0600212enum obj_request_type {
213 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
214};
Alex Elderbf0d5f502012-11-22 00:00:08 -0600215
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800216enum obj_operation_type {
217 OBJ_OP_WRITE,
218 OBJ_OP_READ,
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800219 OBJ_OP_DISCARD,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800220};
221
Alex Elder926f9b32013-02-11 12:33:24 -0600222enum obj_req_flags {
223 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
Alex Elder6365d332013-02-11 12:33:24 -0600224 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
Alex Elder5679c592013-02-11 12:33:24 -0600225 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
226 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
Alex Elder926f9b32013-02-11 12:33:24 -0600227};
228
Alex Elderbf0d5f502012-11-22 00:00:08 -0600229struct rbd_obj_request {
230 const char *object_name;
231 u64 offset; /* object start byte */
232 u64 length; /* bytes from offset */
Alex Elder926f9b32013-02-11 12:33:24 -0600233 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600234
Alex Elderc5b5ef62013-02-11 12:33:24 -0600235 /*
236 * An object request associated with an image will have its
237 * img_data flag set; a standalone object request will not.
238 *
239 * A standalone object request will have which == BAD_WHICH
240 * and a null obj_request pointer.
241 *
242 * An object request initiated in support of a layered image
243 * object (to check for its existence before a write) will
244 * have which == BAD_WHICH and a non-null obj_request pointer.
245 *
246 * Finally, an object request for rbd image data will have
247 * which != BAD_WHICH, and will have a non-null img_request
248 * pointer. The value of which will be in the range
249 * 0..(img_request->obj_request_count-1).
250 */
251 union {
252 struct rbd_obj_request *obj_request; /* STAT op */
253 struct {
254 struct rbd_img_request *img_request;
255 u64 img_offset;
256 /* links for img_request->obj_requests list */
257 struct list_head links;
258 };
259 };
Alex Elderbf0d5f502012-11-22 00:00:08 -0600260 u32 which; /* posn image request list */
261
262 enum obj_request_type type;
Alex Elder788e2df2013-01-17 12:25:27 -0600263 union {
264 struct bio *bio_list;
265 struct {
266 struct page **pages;
267 u32 page_count;
268 };
269 };
Alex Elder0eefd472013-04-19 15:34:50 -0500270 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500271 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600272
273 struct ceph_osd_request *osd_req;
274
275 u64 xferred; /* bytes transferred */
Sage Weil1b83bef2013-02-25 16:11:12 -0800276 int result;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600277
278 rbd_obj_callback_t callback;
Alex Elder788e2df2013-01-17 12:25:27 -0600279 struct completion completion;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600280
281 struct kref kref;
282};
283
Alex Elder0c425242013-02-08 09:55:49 -0600284enum img_req_flags {
Alex Elder9849e982013-01-24 16:13:36 -0600285 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
286 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
Alex Elderd0b2e942013-01-24 16:13:36 -0600287 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800288 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
Alex Elder0c425242013-02-08 09:55:49 -0600289};
290
Alex Elderbf0d5f502012-11-22 00:00:08 -0600291struct rbd_img_request {
Alex Elderbf0d5f502012-11-22 00:00:08 -0600292 struct rbd_device *rbd_dev;
293 u64 offset; /* starting image byte offset */
294 u64 length; /* byte count from offset */
Alex Elder0c425242013-02-08 09:55:49 -0600295 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600296 union {
Alex Elder9849e982013-01-24 16:13:36 -0600297 u64 snap_id; /* for reads */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600298 struct ceph_snap_context *snapc; /* for writes */
Alex Elder9849e982013-01-24 16:13:36 -0600299 };
300 union {
301 struct request *rq; /* block request */
302 struct rbd_obj_request *obj_request; /* obj req initiator */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600303 };
Alex Elder3d7efd12013-04-19 15:34:50 -0500304 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500305 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600306 spinlock_t completion_lock;/* protects next_completion */
307 u32 next_completion;
308 rbd_img_callback_t callback;
Alex Elder55f27e02013-04-10 12:34:25 -0500309 u64 xferred;/* aggregate bytes transferred */
Alex Eldera5a337d2013-01-24 16:13:36 -0600310 int result; /* first nonzero obj_request result */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600311
312 u32 obj_request_count;
313 struct list_head obj_requests; /* rbd_obj_request structs */
314
315 struct kref kref;
316};
317
318#define for_each_obj_request(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600319 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600320#define for_each_obj_request_from(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600321 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600322#define for_each_obj_request_safe(ireq, oreq, n) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600323 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600324
Alex Elderf84344f2012-08-31 17:29:51 -0500325struct rbd_mapping {
Alex Elder99c1f082012-08-30 14:42:15 -0500326 u64 size;
Alex Elder34b13182012-07-13 20:35:12 -0500327 u64 features;
Alex Elderf84344f2012-08-31 17:29:51 -0500328 bool read_only;
329};
330
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700331/*
332 * a single device
333 */
334struct rbd_device {
Alex Elderde71a292012-07-03 16:01:19 -0500335 int dev_id; /* blkdev unique id */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700336
337 int major; /* blkdev assigned major */
Ilya Dryomovdd82fff2013-12-13 15:28:57 +0200338 int minor;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700339 struct gendisk *disk; /* blkdev's gendisk and rq */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700340
Alex Eldera30b71b2012-07-10 20:30:11 -0500341 u32 image_format; /* Either 1 or 2 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700342 struct rbd_client *rbd_client;
343
344 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
345
Alex Elderb82d1672013-01-14 12:43:31 -0600346 spinlock_t lock; /* queue, flags, open_count */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700347
348 struct rbd_image_header header;
Alex Elderb82d1672013-01-14 12:43:31 -0600349 unsigned long flags; /* possibly lock protected */
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500350 struct rbd_spec *spec;
Ilya Dryomovd1475432015-06-22 13:24:48 +0300351 struct rbd_options *opts;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700352
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500353 char *header_name;
Alex Elder971f8392012-10-25 23:34:41 -0500354
Alex Elder0903e872012-11-14 12:25:19 -0600355 struct ceph_file_layout layout;
356
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700357 struct ceph_osd_event *watch_event;
Alex Elder975241a2013-01-25 17:08:55 -0600358 struct rbd_obj_request *watch_request;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700359
Alex Elder86b00e02012-10-25 23:34:42 -0500360 struct rbd_spec *parent_spec;
361 u64 parent_overlap;
Alex Eldera2acd002013-05-08 22:50:04 -0500362 atomic_t parent_ref;
Alex Elder2f82ee52012-10-30 19:40:33 -0500363 struct rbd_device *parent;
Alex Elder86b00e02012-10-25 23:34:42 -0500364
Christoph Hellwig7ad18af2015-01-13 17:20:04 +0100365 /* Block layer tags. */
366 struct blk_mq_tag_set tag_set;
367
Josh Durginc6666012011-11-21 17:11:12 -0800368 /* protects updating the header */
369 struct rw_semaphore header_rwsem;
Alex Elderf84344f2012-08-31 17:29:51 -0500370
371 struct rbd_mapping mapping;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700372
373 struct list_head node;
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800374
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800375 /* sysfs related */
376 struct device dev;
Alex Elderb82d1672013-01-14 12:43:31 -0600377 unsigned long open_count; /* protected by lock */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800378};
379
Alex Elderb82d1672013-01-14 12:43:31 -0600380/*
381 * Flag bits for rbd_dev->flags. If atomicity is required,
382 * rbd_dev->lock is used to protect access.
383 *
384 * Currently, only the "removing" flag (which is coupled with the
385 * "open_count" field) requires atomic access.
386 */
Alex Elder6d292902013-01-14 12:43:31 -0600387enum rbd_dev_flags {
388 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
Alex Elderb82d1672013-01-14 12:43:31 -0600389 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
Alex Elder6d292902013-01-14 12:43:31 -0600390};
391
Alex Eldercfbf6372013-05-31 17:40:45 -0500392static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
Alex Eldere124a82f2012-01-29 13:57:44 -0600393
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700394static LIST_HEAD(rbd_dev_list); /* devices */
Alex Eldere124a82f2012-01-29 13:57:44 -0600395static DEFINE_SPINLOCK(rbd_dev_list_lock);
396
Alex Elder432b8582012-01-29 13:57:44 -0600397static LIST_HEAD(rbd_client_list); /* clients */
398static DEFINE_SPINLOCK(rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700399
Alex Elder78c2a442013-05-01 12:43:04 -0500400/* Slab caches for frequently-allocated structures */
401
Alex Elder1c2a9df2013-05-01 12:43:03 -0500402static struct kmem_cache *rbd_img_request_cache;
Alex Elder868311b2013-05-01 12:43:03 -0500403static struct kmem_cache *rbd_obj_request_cache;
Alex Elder78c2a442013-05-01 12:43:04 -0500404static struct kmem_cache *rbd_segment_name_cache;
Alex Elder1c2a9df2013-05-01 12:43:03 -0500405
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200406static int rbd_major;
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +0200407static DEFINE_IDA(rbd_dev_id_ida);
408
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +0400409static struct workqueue_struct *rbd_wq;
410
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200411/*
412 * Default to false for now, as single-major requires >= 0.75 version of
413 * userspace rbd utility.
414 */
415static bool single_major = false;
416module_param(single_major, bool, S_IRUGO);
417MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
418
Alex Elder3d7efd12013-04-19 15:34:50 -0500419static int rbd_img_request_submit(struct rbd_img_request *img_request);
420
Alex Elderf0f8cef2012-01-29 13:57:44 -0600421static ssize_t rbd_add(struct bus_type *bus, const char *buf,
422 size_t count);
423static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
424 size_t count);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200425static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
426 size_t count);
427static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
428 size_t count);
Ilya Dryomov6d69bb532015-10-11 19:38:00 +0200429static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
Alex Eldera2acd002013-05-08 22:50:04 -0500430static void rbd_spec_put(struct rbd_spec *spec);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600431
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200432static int rbd_dev_id_to_minor(int dev_id)
433{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200434 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200435}
436
437static int minor_to_rbd_dev_id(int minor)
438{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200439 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200440}
441
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700442static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
443static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200444static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
445static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700446
447static struct attribute *rbd_bus_attrs[] = {
448 &bus_attr_add.attr,
449 &bus_attr_remove.attr,
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200450 &bus_attr_add_single_major.attr,
451 &bus_attr_remove_single_major.attr,
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700452 NULL,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600453};
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200454
455static umode_t rbd_bus_is_visible(struct kobject *kobj,
456 struct attribute *attr, int index)
457{
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200458 if (!single_major &&
459 (attr == &bus_attr_add_single_major.attr ||
460 attr == &bus_attr_remove_single_major.attr))
461 return 0;
462
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200463 return attr->mode;
464}
465
466static const struct attribute_group rbd_bus_group = {
467 .attrs = rbd_bus_attrs,
468 .is_visible = rbd_bus_is_visible,
469};
470__ATTRIBUTE_GROUPS(rbd_bus);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600471
472static struct bus_type rbd_bus_type = {
473 .name = "rbd",
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700474 .bus_groups = rbd_bus_groups,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600475};
476
477static void rbd_root_dev_release(struct device *dev)
478{
479}
480
481static struct device rbd_root_dev = {
482 .init_name = "rbd",
483 .release = rbd_root_dev_release,
484};
485
Alex Elder06ecc6c2012-11-01 10:17:15 -0500486static __printf(2, 3)
487void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
488{
489 struct va_format vaf;
490 va_list args;
491
492 va_start(args, fmt);
493 vaf.fmt = fmt;
494 vaf.va = &args;
495
496 if (!rbd_dev)
497 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
498 else if (rbd_dev->disk)
499 printk(KERN_WARNING "%s: %s: %pV\n",
500 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
501 else if (rbd_dev->spec && rbd_dev->spec->image_name)
502 printk(KERN_WARNING "%s: image %s: %pV\n",
503 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
504 else if (rbd_dev->spec && rbd_dev->spec->image_id)
505 printk(KERN_WARNING "%s: id %s: %pV\n",
506 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
507 else /* punt */
508 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
509 RBD_DRV_NAME, rbd_dev, &vaf);
510 va_end(args);
511}
512
Alex Elderaafb2302012-09-06 16:00:54 -0500513#ifdef RBD_DEBUG
514#define rbd_assert(expr) \
515 if (unlikely(!(expr))) { \
516 printk(KERN_ERR "\nAssertion failure in %s() " \
517 "at line %d:\n\n" \
518 "\trbd_assert(%s);\n\n", \
519 __func__, __LINE__, #expr); \
520 BUG(); \
521 }
522#else /* !RBD_DEBUG */
523# define rbd_assert(expr) ((void) 0)
524#endif /* !RBD_DEBUG */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800525
Ilya Dryomov27617132015-07-16 17:36:11 +0300526static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
Alex Elderb454e362013-04-19 15:34:50 -0500527static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
Alex Elder05a46af2013-04-26 15:44:36 -0500528static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
529static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
Alex Elder8b3e1a52013-01-24 16:13:36 -0600530
Alex Eldercc4a38bd2013-04-30 00:44:33 -0500531static int rbd_dev_refresh(struct rbd_device *rbd_dev);
Alex Elder2df3fac2013-05-06 09:51:30 -0500532static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
Ilya Dryomova720ae02014-07-23 17:11:19 +0400533static int rbd_dev_header_info(struct rbd_device *rbd_dev);
Ilya Dryomove8f59b52014-07-24 10:42:13 +0400534static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
Alex Elder54cac612013-04-30 00:44:33 -0500535static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
536 u64 snap_id);
Alex Elder2ad3d712013-04-30 00:44:33 -0500537static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
538 u8 *order, u64 *snap_size);
539static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
540 u64 *snap_features);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700541
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700542static int rbd_open(struct block_device *bdev, fmode_t mode)
543{
Alex Elderf0f8cef2012-01-29 13:57:44 -0600544 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600545 bool removing = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700546
Alex Elderf84344f2012-08-31 17:29:51 -0500547 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700548 return -EROFS;
549
Alex Eldera14ea262013-02-05 13:23:12 -0600550 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600551 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
552 removing = true;
553 else
554 rbd_dev->open_count++;
Alex Eldera14ea262013-02-05 13:23:12 -0600555 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600556 if (removing)
557 return -ENOENT;
558
Alex Elderc3e946c2012-11-16 09:29:16 -0600559 (void) get_device(&rbd_dev->dev);
Alex Elder340c7a22012-08-10 13:12:07 -0700560
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700561 return 0;
562}
563
Al Virodb2a1442013-05-05 21:52:57 -0400564static void rbd_release(struct gendisk *disk, fmode_t mode)
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800565{
566 struct rbd_device *rbd_dev = disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600567 unsigned long open_count_before;
568
Alex Eldera14ea262013-02-05 13:23:12 -0600569 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600570 open_count_before = rbd_dev->open_count--;
Alex Eldera14ea262013-02-05 13:23:12 -0600571 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600572 rbd_assert(open_count_before > 0);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800573
Alex Elderc3e946c2012-11-16 09:29:16 -0600574 put_device(&rbd_dev->dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800575}
576
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800577static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
578{
Josh Durgin77f33c02013-09-30 17:09:54 -0700579 int ret = 0;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800580 int val;
581 bool ro;
Josh Durgin77f33c02013-09-30 17:09:54 -0700582 bool ro_changed = false;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800583
Josh Durgin77f33c02013-09-30 17:09:54 -0700584 /* get_user() may sleep, so call it before taking rbd_dev->lock */
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800585 if (get_user(val, (int __user *)(arg)))
586 return -EFAULT;
587
588 ro = val ? true : false;
589 /* Snapshot doesn't allow to write*/
590 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
591 return -EROFS;
592
Josh Durgin77f33c02013-09-30 17:09:54 -0700593 spin_lock_irq(&rbd_dev->lock);
594 /* prevent others open this device */
595 if (rbd_dev->open_count > 1) {
596 ret = -EBUSY;
597 goto out;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800598 }
599
Josh Durgin77f33c02013-09-30 17:09:54 -0700600 if (rbd_dev->mapping.read_only != ro) {
601 rbd_dev->mapping.read_only = ro;
602 ro_changed = true;
603 }
604
605out:
606 spin_unlock_irq(&rbd_dev->lock);
607 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
608 if (ret == 0 && ro_changed)
609 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
610
611 return ret;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800612}
613
614static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
615 unsigned int cmd, unsigned long arg)
616{
617 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
618 int ret = 0;
619
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800620 switch (cmd) {
621 case BLKROSET:
622 ret = rbd_ioctl_set_ro(rbd_dev, arg);
623 break;
624 default:
625 ret = -ENOTTY;
626 }
627
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800628 return ret;
629}
630
631#ifdef CONFIG_COMPAT
632static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
633 unsigned int cmd, unsigned long arg)
634{
635 return rbd_ioctl(bdev, mode, cmd, arg);
636}
637#endif /* CONFIG_COMPAT */
638
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700639static const struct block_device_operations rbd_bd_ops = {
640 .owner = THIS_MODULE,
641 .open = rbd_open,
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800642 .release = rbd_release,
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800643 .ioctl = rbd_ioctl,
644#ifdef CONFIG_COMPAT
645 .compat_ioctl = rbd_compat_ioctl,
646#endif
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700647};
648
649/*
Alex Elder7262cfc2013-05-16 15:04:20 -0500650 * Initialize an rbd client instance. Success or not, this function
Alex Eldercfbf6372013-05-31 17:40:45 -0500651 * consumes ceph_opts. Caller holds client_mutex.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700652 */
Alex Elderf8c38922012-08-10 13:12:07 -0700653static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700654{
655 struct rbd_client *rbdc;
656 int ret = -ENOMEM;
657
Alex Elder37206ee2013-02-20 17:32:08 -0600658 dout("%s:\n", __func__);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700659 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
660 if (!rbdc)
661 goto out_opt;
662
663 kref_init(&rbdc->kref);
664 INIT_LIST_HEAD(&rbdc->node);
665
Alex Elder43ae4702012-07-03 16:01:18 -0500666 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700667 if (IS_ERR(rbdc->client))
Alex Elder08f75462013-05-29 11:19:00 -0500668 goto out_rbdc;
Alex Elder43ae4702012-07-03 16:01:18 -0500669 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700670
671 ret = ceph_open_session(rbdc->client);
672 if (ret < 0)
Alex Elder08f75462013-05-29 11:19:00 -0500673 goto out_client;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700674
Alex Elder432b8582012-01-29 13:57:44 -0600675 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700676 list_add_tail(&rbdc->node, &rbd_client_list);
Alex Elder432b8582012-01-29 13:57:44 -0600677 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700678
Alex Elder37206ee2013-02-20 17:32:08 -0600679 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Elderbc534d82012-01-29 13:57:44 -0600680
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700681 return rbdc;
Alex Elder08f75462013-05-29 11:19:00 -0500682out_client:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700683 ceph_destroy_client(rbdc->client);
Alex Elder08f75462013-05-29 11:19:00 -0500684out_rbdc:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700685 kfree(rbdc);
686out_opt:
Alex Elder43ae4702012-07-03 16:01:18 -0500687 if (ceph_opts)
688 ceph_destroy_options(ceph_opts);
Alex Elder37206ee2013-02-20 17:32:08 -0600689 dout("%s: error %d\n", __func__, ret);
690
Vasiliy Kulikov28f259b2010-09-26 12:59:37 +0400691 return ERR_PTR(ret);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700692}
693
Alex Elder2f82ee52012-10-30 19:40:33 -0500694static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
695{
696 kref_get(&rbdc->kref);
697
698 return rbdc;
699}
700
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700701/*
Alex Elder1f7ba332012-08-10 13:12:07 -0700702 * Find a ceph client with specific addr and configuration. If
703 * found, bump its reference count.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700704 */
Alex Elder1f7ba332012-08-10 13:12:07 -0700705static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700706{
707 struct rbd_client *client_node;
Alex Elder1f7ba332012-08-10 13:12:07 -0700708 bool found = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700709
Alex Elder43ae4702012-07-03 16:01:18 -0500710 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700711 return NULL;
712
Alex Elder1f7ba332012-08-10 13:12:07 -0700713 spin_lock(&rbd_client_list_lock);
714 list_for_each_entry(client_node, &rbd_client_list, node) {
715 if (!ceph_compare_options(ceph_opts, client_node->client)) {
Alex Elder2f82ee52012-10-30 19:40:33 -0500716 __rbd_get_client(client_node);
717
Alex Elder1f7ba332012-08-10 13:12:07 -0700718 found = true;
719 break;
720 }
721 }
722 spin_unlock(&rbd_client_list_lock);
723
724 return found ? client_node : NULL;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700725}
726
727/*
Ilya Dryomov210c1042015-06-22 13:24:48 +0300728 * (Per device) rbd map options
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700729 */
730enum {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300731 Opt_queue_depth,
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700732 Opt_last_int,
733 /* int args above */
734 Opt_last_string,
735 /* string args above */
Alex Eldercc0538b2012-08-10 13:12:07 -0700736 Opt_read_only,
737 Opt_read_write,
Ilya Dryomov210c1042015-06-22 13:24:48 +0300738 Opt_err
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700739};
740
Alex Elder43ae4702012-07-03 16:01:18 -0500741static match_table_t rbd_opts_tokens = {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300742 {Opt_queue_depth, "queue_depth=%d"},
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700743 /* int args above */
744 /* string args above */
Alex Elderbe466c12012-10-22 11:31:26 -0500745 {Opt_read_only, "read_only"},
Alex Eldercc0538b2012-08-10 13:12:07 -0700746 {Opt_read_only, "ro"}, /* Alternate spelling */
747 {Opt_read_write, "read_write"},
748 {Opt_read_write, "rw"}, /* Alternate spelling */
Ilya Dryomov210c1042015-06-22 13:24:48 +0300749 {Opt_err, NULL}
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700750};
751
Alex Elder98571b52013-01-20 14:44:42 -0600752struct rbd_options {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300753 int queue_depth;
Alex Elder98571b52013-01-20 14:44:42 -0600754 bool read_only;
755};
756
Ilya Dryomovb5584182015-06-23 16:21:19 +0300757#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
Alex Elder98571b52013-01-20 14:44:42 -0600758#define RBD_READ_ONLY_DEFAULT false
759
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700760static int parse_rbd_opts_token(char *c, void *private)
761{
Alex Elder43ae4702012-07-03 16:01:18 -0500762 struct rbd_options *rbd_opts = private;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700763 substring_t argstr[MAX_OPT_ARGS];
764 int token, intval, ret;
765
Alex Elder43ae4702012-07-03 16:01:18 -0500766 token = match_token(c, rbd_opts_tokens, argstr);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700767 if (token < Opt_last_int) {
768 ret = match_int(&argstr[0], &intval);
769 if (ret < 0) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300770 pr_err("bad mount option arg (not int) at '%s'\n", c);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700771 return ret;
772 }
773 dout("got int token %d val %d\n", token, intval);
774 } else if (token > Opt_last_int && token < Opt_last_string) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300775 dout("got string token %d val %s\n", token, argstr[0].from);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700776 } else {
777 dout("got token %d\n", token);
778 }
779
780 switch (token) {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300781 case Opt_queue_depth:
782 if (intval < 1) {
783 pr_err("queue_depth out of range\n");
784 return -EINVAL;
785 }
786 rbd_opts->queue_depth = intval;
787 break;
Alex Eldercc0538b2012-08-10 13:12:07 -0700788 case Opt_read_only:
789 rbd_opts->read_only = true;
790 break;
791 case Opt_read_write:
792 rbd_opts->read_only = false;
793 break;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700794 default:
Ilya Dryomov210c1042015-06-22 13:24:48 +0300795 /* libceph prints "bad option" msg */
796 return -EINVAL;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700797 }
Ilya Dryomov210c1042015-06-22 13:24:48 +0300798
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700799 return 0;
800}
801
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800802static char* obj_op_name(enum obj_operation_type op_type)
803{
804 switch (op_type) {
805 case OBJ_OP_READ:
806 return "read";
807 case OBJ_OP_WRITE:
808 return "write";
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800809 case OBJ_OP_DISCARD:
810 return "discard";
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800811 default:
812 return "???";
813 }
814}
815
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700816/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700817 * Get a ceph client with specific addr and configuration, if one does
Alex Elder7262cfc2013-05-16 15:04:20 -0500818 * not exist create it. Either way, ceph_opts is consumed by this
819 * function.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700820 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500821static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700822{
Alex Elderf8c38922012-08-10 13:12:07 -0700823 struct rbd_client *rbdc;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700824
Alex Eldercfbf6372013-05-31 17:40:45 -0500825 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
Alex Elder1f7ba332012-08-10 13:12:07 -0700826 rbdc = rbd_client_find(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500827 if (rbdc) /* using an existing client */
Alex Elder43ae4702012-07-03 16:01:18 -0500828 ceph_destroy_options(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500829 else
Alex Elderf8c38922012-08-10 13:12:07 -0700830 rbdc = rbd_client_create(ceph_opts);
Alex Eldercfbf6372013-05-31 17:40:45 -0500831 mutex_unlock(&client_mutex);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700832
Alex Elder9d3997f2012-10-25 23:34:42 -0500833 return rbdc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700834}
835
836/*
837 * Destroy ceph client
Alex Elderd23a4b32012-01-29 13:57:43 -0600838 *
Alex Elder432b8582012-01-29 13:57:44 -0600839 * Caller must hold rbd_client_list_lock.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700840 */
841static void rbd_client_release(struct kref *kref)
842{
843 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
844
Alex Elder37206ee2013-02-20 17:32:08 -0600845 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500846 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700847 list_del(&rbdc->node);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500848 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700849
850 ceph_destroy_client(rbdc->client);
851 kfree(rbdc);
852}
853
854/*
855 * Drop reference to ceph client node. If it's not referenced anymore, release
856 * it.
857 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500858static void rbd_put_client(struct rbd_client *rbdc)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700859{
Alex Elderc53d5892012-10-25 23:34:42 -0500860 if (rbdc)
861 kref_put(&rbdc->kref, rbd_client_release);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700862}
863
Alex Eldera30b71b2012-07-10 20:30:11 -0500864static bool rbd_image_format_valid(u32 image_format)
865{
866 return image_format == 1 || image_format == 2;
867}
868
Alex Elder8e94af82012-07-25 09:32:40 -0500869static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
870{
Alex Elder103a1502012-08-02 11:29:45 -0500871 size_t size;
872 u32 snap_count;
873
874 /* The header has to start with the magic rbd header text */
875 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
876 return false;
877
Alex Elderdb2388b2012-10-20 22:17:27 -0500878 /* The bio layer requires at least sector-sized I/O */
879
880 if (ondisk->options.order < SECTOR_SHIFT)
881 return false;
882
883 /* If we use u64 in a few spots we may be able to loosen this */
884
885 if (ondisk->options.order > 8 * sizeof (int) - 1)
886 return false;
887
Alex Elder103a1502012-08-02 11:29:45 -0500888 /*
889 * The size of a snapshot header has to fit in a size_t, and
890 * that limits the number of snapshots.
891 */
892 snap_count = le32_to_cpu(ondisk->snap_count);
893 size = SIZE_MAX - sizeof (struct ceph_snap_context);
894 if (snap_count > size / sizeof (__le64))
895 return false;
896
897 /*
898 * Not only that, but the size of the entire the snapshot
899 * header must also be representable in a size_t.
900 */
901 size -= snap_count * sizeof (__le64);
902 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
903 return false;
904
905 return true;
Alex Elder8e94af82012-07-25 09:32:40 -0500906}
907
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700908/*
Alex Elderbb23e372013-05-06 09:51:29 -0500909 * Fill an rbd image header with information from the given format 1
910 * on-disk header.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700911 */
Alex Elder662518b2013-05-06 09:51:29 -0500912static int rbd_header_from_disk(struct rbd_device *rbd_dev,
Alex Elder4156d992012-08-02 11:29:46 -0500913 struct rbd_image_header_ondisk *ondisk)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700914{
Alex Elder662518b2013-05-06 09:51:29 -0500915 struct rbd_image_header *header = &rbd_dev->header;
Alex Elderbb23e372013-05-06 09:51:29 -0500916 bool first_time = header->object_prefix == NULL;
917 struct ceph_snap_context *snapc;
918 char *object_prefix = NULL;
919 char *snap_names = NULL;
920 u64 *snap_sizes = NULL;
Alex Elderccece232012-07-10 20:30:10 -0500921 u32 snap_count;
Alex Elderd2bb24e2012-07-26 23:37:14 -0500922 size_t size;
Alex Elderbb23e372013-05-06 09:51:29 -0500923 int ret = -ENOMEM;
Alex Elder621901d2012-08-23 23:22:06 -0500924 u32 i;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700925
Alex Elderbb23e372013-05-06 09:51:29 -0500926 /* Allocate this now to avoid having to handle failure below */
927
928 if (first_time) {
929 size_t len;
930
931 len = strnlen(ondisk->object_prefix,
932 sizeof (ondisk->object_prefix));
933 object_prefix = kmalloc(len + 1, GFP_KERNEL);
934 if (!object_prefix)
935 return -ENOMEM;
936 memcpy(object_prefix, ondisk->object_prefix, len);
937 object_prefix[len] = '\0';
938 }
939
940 /* Allocate the snapshot context and fill it in */
Alex Elder6a523252012-07-19 17:12:59 -0500941
Alex Elder103a1502012-08-02 11:29:45 -0500942 snap_count = le32_to_cpu(ondisk->snap_count);
Alex Elderbb23e372013-05-06 09:51:29 -0500943 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
944 if (!snapc)
945 goto out_err;
946 snapc->seq = le64_to_cpu(ondisk->snap_seq);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700947 if (snap_count) {
Alex Elderbb23e372013-05-06 09:51:29 -0500948 struct rbd_image_snap_ondisk *snaps;
Alex Elderf785cc12012-08-23 23:22:06 -0500949 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
950
Alex Elderbb23e372013-05-06 09:51:29 -0500951 /* We'll keep a copy of the snapshot names... */
Alex Elder621901d2012-08-23 23:22:06 -0500952
Alex Elderbb23e372013-05-06 09:51:29 -0500953 if (snap_names_len > (u64)SIZE_MAX)
954 goto out_2big;
955 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
956 if (!snap_names)
Alex Elder6a523252012-07-19 17:12:59 -0500957 goto out_err;
Alex Elderbb23e372013-05-06 09:51:29 -0500958
959 /* ...as well as the array of their sizes. */
960
961 size = snap_count * sizeof (*header->snap_sizes);
962 snap_sizes = kmalloc(size, GFP_KERNEL);
963 if (!snap_sizes)
964 goto out_err;
965
Alex Elderf785cc12012-08-23 23:22:06 -0500966 /*
Alex Elderbb23e372013-05-06 09:51:29 -0500967 * Copy the names, and fill in each snapshot's id
968 * and size.
969 *
Alex Elder99a41eb2013-05-06 09:51:30 -0500970 * Note that rbd_dev_v1_header_info() guarantees the
Alex Elderbb23e372013-05-06 09:51:29 -0500971 * ondisk buffer we're working with has
Alex Elderf785cc12012-08-23 23:22:06 -0500972 * snap_names_len bytes beyond the end of the
973 * snapshot id array, this memcpy() is safe.
974 */
Alex Elderbb23e372013-05-06 09:51:29 -0500975 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
976 snaps = ondisk->snaps;
977 for (i = 0; i < snap_count; i++) {
978 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
979 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
980 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700981 }
Alex Elder849b4262012-07-09 21:04:24 -0500982
Alex Elderbb23e372013-05-06 09:51:29 -0500983 /* We won't fail any more, fill in the header */
Alex Elder6a523252012-07-19 17:12:59 -0500984
Alex Elderbb23e372013-05-06 09:51:29 -0500985 if (first_time) {
986 header->object_prefix = object_prefix;
987 header->obj_order = ondisk->options.order;
988 header->crypt_type = ondisk->options.crypt_type;
989 header->comp_type = ondisk->options.comp_type;
990 /* The rest aren't used for format 1 images */
991 header->stripe_unit = 0;
992 header->stripe_count = 0;
993 header->features = 0;
Alex Elder662518b2013-05-06 09:51:29 -0500994 } else {
995 ceph_put_snap_context(header->snapc);
996 kfree(header->snap_names);
997 kfree(header->snap_sizes);
Alex Elderbb23e372013-05-06 09:51:29 -0500998 }
999
1000 /* The remaining fields always get updated (when we refresh) */
Alex Elder621901d2012-08-23 23:22:06 -05001001
Alex Elderf84344f2012-08-31 17:29:51 -05001002 header->image_size = le64_to_cpu(ondisk->image_size);
Alex Elderbb23e372013-05-06 09:51:29 -05001003 header->snapc = snapc;
1004 header->snap_names = snap_names;
1005 header->snap_sizes = snap_sizes;
Alex Elder468521c2013-04-26 09:43:47 -05001006
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001007 return 0;
Alex Elderbb23e372013-05-06 09:51:29 -05001008out_2big:
1009 ret = -EIO;
Alex Elder6a523252012-07-19 17:12:59 -05001010out_err:
Alex Elderbb23e372013-05-06 09:51:29 -05001011 kfree(snap_sizes);
1012 kfree(snap_names);
1013 ceph_put_snap_context(snapc);
1014 kfree(object_prefix);
Alex Elderccece232012-07-10 20:30:10 -05001015
Alex Elderbb23e372013-05-06 09:51:29 -05001016 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001017}
1018
Alex Elder9682fc62013-04-30 00:44:33 -05001019static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1020{
1021 const char *snap_name;
1022
1023 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1024
1025 /* Skip over names until we find the one we are looking for */
1026
1027 snap_name = rbd_dev->header.snap_names;
1028 while (which--)
1029 snap_name += strlen(snap_name) + 1;
1030
1031 return kstrdup(snap_name, GFP_KERNEL);
1032}
1033
Alex Elder30d1cff2013-05-01 12:43:03 -05001034/*
1035 * Snapshot id comparison function for use with qsort()/bsearch().
1036 * Note that result is for snapshots in *descending* order.
1037 */
1038static int snapid_compare_reverse(const void *s1, const void *s2)
1039{
1040 u64 snap_id1 = *(u64 *)s1;
1041 u64 snap_id2 = *(u64 *)s2;
1042
1043 if (snap_id1 < snap_id2)
1044 return 1;
1045 return snap_id1 == snap_id2 ? 0 : -1;
1046}
1047
1048/*
1049 * Search a snapshot context to see if the given snapshot id is
1050 * present.
1051 *
1052 * Returns the position of the snapshot id in the array if it's found,
1053 * or BAD_SNAP_INDEX otherwise.
1054 *
1055 * Note: The snapshot array is in kept sorted (by the osd) in
1056 * reverse order, highest snapshot id first.
1057 */
Alex Elder9682fc62013-04-30 00:44:33 -05001058static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1059{
1060 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
Alex Elder30d1cff2013-05-01 12:43:03 -05001061 u64 *found;
Alex Elder9682fc62013-04-30 00:44:33 -05001062
Alex Elder30d1cff2013-05-01 12:43:03 -05001063 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1064 sizeof (snap_id), snapid_compare_reverse);
Alex Elder9682fc62013-04-30 00:44:33 -05001065
Alex Elder30d1cff2013-05-01 12:43:03 -05001066 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
Alex Elder9682fc62013-04-30 00:44:33 -05001067}
1068
Alex Elder2ad3d712013-04-30 00:44:33 -05001069static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1070 u64 snap_id)
Alex Elder54cac612013-04-30 00:44:33 -05001071{
1072 u32 which;
Josh Durginda6a6b62013-09-04 17:57:31 -07001073 const char *snap_name;
Alex Elder54cac612013-04-30 00:44:33 -05001074
1075 which = rbd_dev_snap_index(rbd_dev, snap_id);
1076 if (which == BAD_SNAP_INDEX)
Josh Durginda6a6b62013-09-04 17:57:31 -07001077 return ERR_PTR(-ENOENT);
Alex Elder54cac612013-04-30 00:44:33 -05001078
Josh Durginda6a6b62013-09-04 17:57:31 -07001079 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1080 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
Alex Elder54cac612013-04-30 00:44:33 -05001081}
1082
Alex Elder9e15b772012-10-30 19:40:33 -05001083static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1084{
Alex Elder9e15b772012-10-30 19:40:33 -05001085 if (snap_id == CEPH_NOSNAP)
1086 return RBD_SNAP_HEAD_NAME;
1087
Alex Elder54cac612013-04-30 00:44:33 -05001088 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1089 if (rbd_dev->image_format == 1)
1090 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001091
Alex Elder54cac612013-04-30 00:44:33 -05001092 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001093}
1094
Alex Elder2ad3d712013-04-30 00:44:33 -05001095static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1096 u64 *snap_size)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001097{
Alex Elder2ad3d712013-04-30 00:44:33 -05001098 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1099 if (snap_id == CEPH_NOSNAP) {
1100 *snap_size = rbd_dev->header.image_size;
1101 } else if (rbd_dev->image_format == 1) {
1102 u32 which;
Alex Elder00f1f362012-02-07 12:03:36 -06001103
Alex Elder2ad3d712013-04-30 00:44:33 -05001104 which = rbd_dev_snap_index(rbd_dev, snap_id);
1105 if (which == BAD_SNAP_INDEX)
1106 return -ENOENT;
Alex Elder00f1f362012-02-07 12:03:36 -06001107
Alex Elder2ad3d712013-04-30 00:44:33 -05001108 *snap_size = rbd_dev->header.snap_sizes[which];
1109 } else {
1110 u64 size = 0;
1111 int ret;
1112
1113 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1114 if (ret)
1115 return ret;
1116
1117 *snap_size = size;
1118 }
1119 return 0;
1120}
1121
1122static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1123 u64 *snap_features)
1124{
1125 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1126 if (snap_id == CEPH_NOSNAP) {
1127 *snap_features = rbd_dev->header.features;
1128 } else if (rbd_dev->image_format == 1) {
1129 *snap_features = 0; /* No features for format 1 */
1130 } else {
1131 u64 features = 0;
1132 int ret;
1133
1134 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1135 if (ret)
1136 return ret;
1137
1138 *snap_features = features;
1139 }
1140 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001141}
1142
Alex Elderd1cf5782013-04-27 09:59:30 -05001143static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001144{
Alex Elder8f4b7d92013-05-06 07:40:30 -05001145 u64 snap_id = rbd_dev->spec->snap_id;
Alex Elder2ad3d712013-04-30 00:44:33 -05001146 u64 size = 0;
1147 u64 features = 0;
1148 int ret;
Alex Elder8b0241f2013-04-25 23:15:08 -05001149
Alex Elder2ad3d712013-04-30 00:44:33 -05001150 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1151 if (ret)
1152 return ret;
1153 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1154 if (ret)
1155 return ret;
1156
1157 rbd_dev->mapping.size = size;
1158 rbd_dev->mapping.features = features;
1159
Alex Elder8b0241f2013-04-25 23:15:08 -05001160 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001161}
1162
Alex Elderd1cf5782013-04-27 09:59:30 -05001163static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1164{
1165 rbd_dev->mapping.size = 0;
1166 rbd_dev->mapping.features = 0;
Alex Elder200a6a82013-04-28 23:32:34 -05001167}
1168
Himangi Saraogi7d5079a2014-07-24 03:17:07 +05301169static void rbd_segment_name_free(const char *name)
1170{
1171 /* The explicit cast here is needed to drop the const qualifier */
1172
1173 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1174}
1175
Alex Elder98571b52013-01-20 14:44:42 -06001176static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001177{
Alex Elder65ccfe22012-08-09 10:33:26 -07001178 char *name;
1179 u64 segment;
1180 int ret;
Josh Durgin3a96d5c2013-06-12 19:15:06 -07001181 char *name_format;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001182
Alex Elder78c2a442013-05-01 12:43:04 -05001183 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
Alex Elder65ccfe22012-08-09 10:33:26 -07001184 if (!name)
1185 return NULL;
1186 segment = offset >> rbd_dev->header.obj_order;
Josh Durgin3a96d5c2013-06-12 19:15:06 -07001187 name_format = "%s.%012llx";
1188 if (rbd_dev->image_format == 2)
1189 name_format = "%s.%016llx";
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02001190 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
Alex Elder65ccfe22012-08-09 10:33:26 -07001191 rbd_dev->header.object_prefix, segment);
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02001192 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
Alex Elder65ccfe22012-08-09 10:33:26 -07001193 pr_err("error formatting segment name for #%llu (%d)\n",
1194 segment, ret);
Himangi Saraogi7d5079a2014-07-24 03:17:07 +05301195 rbd_segment_name_free(name);
Alex Elder65ccfe22012-08-09 10:33:26 -07001196 name = NULL;
1197 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001198
Alex Elder65ccfe22012-08-09 10:33:26 -07001199 return name;
1200}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001201
Alex Elder65ccfe22012-08-09 10:33:26 -07001202static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1203{
1204 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001205
Alex Elder65ccfe22012-08-09 10:33:26 -07001206 return offset & (segment_size - 1);
1207}
1208
1209static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1210 u64 offset, u64 length)
1211{
1212 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1213
1214 offset &= segment_size - 1;
1215
Alex Elderaafb2302012-09-06 16:00:54 -05001216 rbd_assert(length <= U64_MAX - offset);
Alex Elder65ccfe22012-08-09 10:33:26 -07001217 if (offset + length > segment_size)
1218 length = segment_size - offset;
1219
1220 return length;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001221}
1222
1223/*
Josh Durgin029bcbd2011-07-22 11:35:23 -07001224 * returns the size of an object in the image
1225 */
1226static u64 rbd_obj_bytes(struct rbd_image_header *header)
1227{
1228 return 1 << header->obj_order;
1229}
1230
1231/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001232 * bio helpers
1233 */
1234
1235static void bio_chain_put(struct bio *chain)
1236{
1237 struct bio *tmp;
1238
1239 while (chain) {
1240 tmp = chain;
1241 chain = chain->bi_next;
1242 bio_put(tmp);
1243 }
1244}
1245
1246/*
1247 * zeros a bio chain, starting at specific offset
1248 */
1249static void zero_bio_chain(struct bio *chain, int start_ofs)
1250{
Kent Overstreet79886132013-11-23 17:19:00 -08001251 struct bio_vec bv;
1252 struct bvec_iter iter;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001253 unsigned long flags;
1254 void *buf;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001255 int pos = 0;
1256
1257 while (chain) {
Kent Overstreet79886132013-11-23 17:19:00 -08001258 bio_for_each_segment(bv, chain, iter) {
1259 if (pos + bv.bv_len > start_ofs) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001260 int remainder = max(start_ofs - pos, 0);
Kent Overstreet79886132013-11-23 17:19:00 -08001261 buf = bvec_kmap_irq(&bv, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001262 memset(buf + remainder, 0,
Kent Overstreet79886132013-11-23 17:19:00 -08001263 bv.bv_len - remainder);
1264 flush_dcache_page(bv.bv_page);
Dan Carpenter85b5aaa2010-10-11 21:15:11 +02001265 bvec_kunmap_irq(buf, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001266 }
Kent Overstreet79886132013-11-23 17:19:00 -08001267 pos += bv.bv_len;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001268 }
1269
1270 chain = chain->bi_next;
1271 }
1272}
1273
1274/*
Alex Elderb9434c52013-04-19 15:34:50 -05001275 * similar to zero_bio_chain(), zeros data defined by a page array,
1276 * starting at the given byte offset from the start of the array and
1277 * continuing up to the given end offset. The pages array is
1278 * assumed to be big enough to hold all bytes up to the end.
1279 */
1280static void zero_pages(struct page **pages, u64 offset, u64 end)
1281{
1282 struct page **page = &pages[offset >> PAGE_SHIFT];
1283
1284 rbd_assert(end > offset);
1285 rbd_assert(end - offset <= (u64)SIZE_MAX);
1286 while (offset < end) {
1287 size_t page_offset;
1288 size_t length;
1289 unsigned long flags;
1290 void *kaddr;
1291
Geert Uytterhoeven491205a2013-05-13 20:35:37 -05001292 page_offset = offset & ~PAGE_MASK;
1293 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
Alex Elderb9434c52013-04-19 15:34:50 -05001294 local_irq_save(flags);
1295 kaddr = kmap_atomic(*page);
1296 memset(kaddr + page_offset, 0, length);
Alex Eldere2156052013-05-22 20:54:25 -05001297 flush_dcache_page(*page);
Alex Elderb9434c52013-04-19 15:34:50 -05001298 kunmap_atomic(kaddr);
1299 local_irq_restore(flags);
1300
1301 offset += length;
1302 page++;
1303 }
1304}
1305
1306/*
Alex Elderf7760da2012-10-20 22:17:27 -05001307 * Clone a portion of a bio, starting at the given byte offset
1308 * and continuing for the number of bytes indicated.
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001309 */
Alex Elderf7760da2012-10-20 22:17:27 -05001310static struct bio *bio_clone_range(struct bio *bio_src,
1311 unsigned int offset,
1312 unsigned int len,
1313 gfp_t gfpmask)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001314{
Alex Elderf7760da2012-10-20 22:17:27 -05001315 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001316
Kent Overstreet5341a6272013-08-07 14:31:11 -07001317 bio = bio_clone(bio_src, gfpmask);
Alex Elderf7760da2012-10-20 22:17:27 -05001318 if (!bio)
1319 return NULL; /* ENOMEM */
1320
Kent Overstreet5341a6272013-08-07 14:31:11 -07001321 bio_advance(bio, offset);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001322 bio->bi_iter.bi_size = len;
Alex Elder542582f2012-08-09 10:33:25 -07001323
Alex Elderf7760da2012-10-20 22:17:27 -05001324 return bio;
1325}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001326
Alex Elderf7760da2012-10-20 22:17:27 -05001327/*
1328 * Clone a portion of a bio chain, starting at the given byte offset
1329 * into the first bio in the source chain and continuing for the
1330 * number of bytes indicated. The result is another bio chain of
1331 * exactly the given length, or a null pointer on error.
1332 *
1333 * The bio_src and offset parameters are both in-out. On entry they
1334 * refer to the first source bio and the offset into that bio where
1335 * the start of data to be cloned is located.
1336 *
1337 * On return, bio_src is updated to refer to the bio in the source
1338 * chain that contains first un-cloned byte, and *offset will
1339 * contain the offset of that byte within that bio.
1340 */
1341static struct bio *bio_chain_clone_range(struct bio **bio_src,
1342 unsigned int *offset,
1343 unsigned int len,
1344 gfp_t gfpmask)
1345{
1346 struct bio *bi = *bio_src;
1347 unsigned int off = *offset;
1348 struct bio *chain = NULL;
1349 struct bio **end;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001350
Alex Elderf7760da2012-10-20 22:17:27 -05001351 /* Build up a chain of clone bios up to the limit */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001352
Kent Overstreet4f024f32013-10-11 15:44:27 -07001353 if (!bi || off >= bi->bi_iter.bi_size || !len)
Alex Elderf7760da2012-10-20 22:17:27 -05001354 return NULL; /* Nothing to clone */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001355
Alex Elderf7760da2012-10-20 22:17:27 -05001356 end = &chain;
1357 while (len) {
1358 unsigned int bi_size;
1359 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001360
Alex Elderf5400b72012-11-01 10:17:15 -05001361 if (!bi) {
1362 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
Alex Elderf7760da2012-10-20 22:17:27 -05001363 goto out_err; /* EINVAL; ran out of bio's */
Alex Elderf5400b72012-11-01 10:17:15 -05001364 }
Kent Overstreet4f024f32013-10-11 15:44:27 -07001365 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
Alex Elderf7760da2012-10-20 22:17:27 -05001366 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1367 if (!bio)
1368 goto out_err; /* ENOMEM */
1369
1370 *end = bio;
1371 end = &bio->bi_next;
1372
1373 off += bi_size;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001374 if (off == bi->bi_iter.bi_size) {
Alex Elderf7760da2012-10-20 22:17:27 -05001375 bi = bi->bi_next;
1376 off = 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001377 }
Alex Elderf7760da2012-10-20 22:17:27 -05001378 len -= bi_size;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001379 }
Alex Elderf7760da2012-10-20 22:17:27 -05001380 *bio_src = bi;
1381 *offset = off;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001382
Alex Elderf7760da2012-10-20 22:17:27 -05001383 return chain;
1384out_err:
1385 bio_chain_put(chain);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001386
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001387 return NULL;
1388}
1389
Alex Elder926f9b32013-02-11 12:33:24 -06001390/*
1391 * The default/initial value for all object request flags is 0. For
1392 * each flag, once its value is set to 1 it is never reset to 0
1393 * again.
1394 */
Alex Elder6365d332013-02-11 12:33:24 -06001395static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1396{
1397 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
Alex Elder6365d332013-02-11 12:33:24 -06001398 struct rbd_device *rbd_dev;
1399
Alex Elder57acbaa2013-02-11 12:33:24 -06001400 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001401 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
Alex Elder6365d332013-02-11 12:33:24 -06001402 obj_request);
1403 }
1404}
1405
1406static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1407{
1408 smp_mb();
1409 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1410}
1411
Alex Elder57acbaa2013-02-11 12:33:24 -06001412static void obj_request_done_set(struct rbd_obj_request *obj_request)
1413{
1414 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1415 struct rbd_device *rbd_dev = NULL;
1416
1417 if (obj_request_img_data_test(obj_request))
1418 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001419 rbd_warn(rbd_dev, "obj_request %p already marked done",
Alex Elder57acbaa2013-02-11 12:33:24 -06001420 obj_request);
1421 }
1422}
1423
1424static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1425{
1426 smp_mb();
1427 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1428}
1429
Alex Elder5679c592013-02-11 12:33:24 -06001430/*
1431 * This sets the KNOWN flag after (possibly) setting the EXISTS
1432 * flag. The latter is set based on the "exists" value provided.
1433 *
1434 * Note that for our purposes once an object exists it never goes
1435 * away again. It's possible that the response from two existence
1436 * checks are separated by the creation of the target object, and
1437 * the first ("doesn't exist") response arrives *after* the second
1438 * ("does exist"). In that case we ignore the second one.
1439 */
1440static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1441 bool exists)
1442{
1443 if (exists)
1444 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1445 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1446 smp_mb();
1447}
1448
1449static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1450{
1451 smp_mb();
1452 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1453}
1454
1455static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1456{
1457 smp_mb();
1458 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1459}
1460
Ilya Dryomov96385562014-06-10 13:53:29 +04001461static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1462{
1463 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1464
1465 return obj_request->img_offset <
1466 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1467}
1468
Alex Elderbf0d5f502012-11-22 00:00:08 -06001469static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1470{
Alex Elder37206ee2013-02-20 17:32:08 -06001471 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1472 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001473 kref_get(&obj_request->kref);
1474}
1475
1476static void rbd_obj_request_destroy(struct kref *kref);
1477static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1478{
1479 rbd_assert(obj_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001480 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1481 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001482 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1483}
1484
Alex Elder0f2d5be2014-04-26 14:21:44 +04001485static void rbd_img_request_get(struct rbd_img_request *img_request)
1486{
1487 dout("%s: img %p (was %d)\n", __func__, img_request,
1488 atomic_read(&img_request->kref.refcount));
1489 kref_get(&img_request->kref);
1490}
1491
Alex Eldere93f3152013-05-08 22:50:04 -05001492static bool img_request_child_test(struct rbd_img_request *img_request);
1493static void rbd_parent_request_destroy(struct kref *kref);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001494static void rbd_img_request_destroy(struct kref *kref);
1495static void rbd_img_request_put(struct rbd_img_request *img_request)
1496{
1497 rbd_assert(img_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001498 dout("%s: img %p (was %d)\n", __func__, img_request,
1499 atomic_read(&img_request->kref.refcount));
Alex Eldere93f3152013-05-08 22:50:04 -05001500 if (img_request_child_test(img_request))
1501 kref_put(&img_request->kref, rbd_parent_request_destroy);
1502 else
1503 kref_put(&img_request->kref, rbd_img_request_destroy);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001504}
1505
1506static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1507 struct rbd_obj_request *obj_request)
1508{
Alex Elder25dcf952013-01-25 17:08:55 -06001509 rbd_assert(obj_request->img_request == NULL);
1510
Alex Elderb155e862013-04-15 14:50:37 -05001511 /* Image request now owns object's original reference */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001512 obj_request->img_request = img_request;
Alex Elder25dcf952013-01-25 17:08:55 -06001513 obj_request->which = img_request->obj_request_count;
Alex Elder6365d332013-02-11 12:33:24 -06001514 rbd_assert(!obj_request_img_data_test(obj_request));
1515 obj_request_img_data_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001516 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001517 img_request->obj_request_count++;
1518 list_add_tail(&obj_request->links, &img_request->obj_requests);
Alex Elder37206ee2013-02-20 17:32:08 -06001519 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1520 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001521}
1522
1523static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1524 struct rbd_obj_request *obj_request)
1525{
1526 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001527
Alex Elder37206ee2013-02-20 17:32:08 -06001528 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1529 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001530 list_del(&obj_request->links);
Alex Elder25dcf952013-01-25 17:08:55 -06001531 rbd_assert(img_request->obj_request_count > 0);
1532 img_request->obj_request_count--;
1533 rbd_assert(obj_request->which == img_request->obj_request_count);
1534 obj_request->which = BAD_WHICH;
Alex Elder6365d332013-02-11 12:33:24 -06001535 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001536 rbd_assert(obj_request->img_request == img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001537 obj_request->img_request = NULL;
Alex Elder25dcf952013-01-25 17:08:55 -06001538 obj_request->callback = NULL;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001539 rbd_obj_request_put(obj_request);
1540}
1541
1542static bool obj_request_type_valid(enum obj_request_type type)
1543{
1544 switch (type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06001545 case OBJ_REQUEST_NODATA:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001546 case OBJ_REQUEST_BIO:
Alex Elder788e2df2013-01-17 12:25:27 -06001547 case OBJ_REQUEST_PAGES:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001548 return true;
1549 default:
1550 return false;
1551 }
1552}
1553
Alex Elderbf0d5f502012-11-22 00:00:08 -06001554static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1555 struct rbd_obj_request *obj_request)
1556{
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001557 dout("%s %p\n", __func__, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001558 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1559}
1560
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001561static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1562{
1563 dout("%s %p\n", __func__, obj_request);
1564 ceph_osdc_cancel_request(obj_request->osd_req);
1565}
1566
1567/*
1568 * Wait for an object request to complete. If interrupted, cancel the
1569 * underlying osd request.
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001570 *
1571 * @timeout: in jiffies, 0 means "wait forever"
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001572 */
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001573static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1574 unsigned long timeout)
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001575{
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001576 long ret;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001577
1578 dout("%s %p\n", __func__, obj_request);
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001579 ret = wait_for_completion_interruptible_timeout(
1580 &obj_request->completion,
1581 ceph_timeout_jiffies(timeout));
1582 if (ret <= 0) {
1583 if (ret == 0)
1584 ret = -ETIMEDOUT;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001585 rbd_obj_request_end(obj_request);
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001586 } else {
1587 ret = 0;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001588 }
1589
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001590 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1591 return ret;
1592}
1593
1594static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1595{
1596 return __rbd_obj_request_wait(obj_request, 0);
1597}
1598
1599static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
1600 unsigned long timeout)
1601{
1602 return __rbd_obj_request_wait(obj_request, timeout);
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001603}
1604
Alex Elderbf0d5f502012-11-22 00:00:08 -06001605static void rbd_img_request_complete(struct rbd_img_request *img_request)
1606{
Alex Elder55f27e02013-04-10 12:34:25 -05001607
Alex Elder37206ee2013-02-20 17:32:08 -06001608 dout("%s: img %p\n", __func__, img_request);
Alex Elder55f27e02013-04-10 12:34:25 -05001609
1610 /*
1611 * If no error occurred, compute the aggregate transfer
1612 * count for the image request. We could instead use
1613 * atomic64_cmpxchg() to update it as each object request
1614 * completes; not clear which way is better off hand.
1615 */
1616 if (!img_request->result) {
1617 struct rbd_obj_request *obj_request;
1618 u64 xferred = 0;
1619
1620 for_each_obj_request(img_request, obj_request)
1621 xferred += obj_request->xferred;
1622 img_request->xferred = xferred;
1623 }
1624
Alex Elderbf0d5f502012-11-22 00:00:08 -06001625 if (img_request->callback)
1626 img_request->callback(img_request);
1627 else
1628 rbd_img_request_put(img_request);
1629}
1630
Alex Elder0c425242013-02-08 09:55:49 -06001631/*
1632 * The default/initial value for all image request flags is 0. Each
1633 * is conditionally set to 1 at image request initialization time
1634 * and currently never change thereafter.
1635 */
1636static void img_request_write_set(struct rbd_img_request *img_request)
1637{
1638 set_bit(IMG_REQ_WRITE, &img_request->flags);
1639 smp_mb();
1640}
1641
1642static bool img_request_write_test(struct rbd_img_request *img_request)
1643{
1644 smp_mb();
1645 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1646}
1647
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001648/*
1649 * Set the discard flag when the img_request is an discard request
1650 */
1651static void img_request_discard_set(struct rbd_img_request *img_request)
1652{
1653 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1654 smp_mb();
1655}
1656
1657static bool img_request_discard_test(struct rbd_img_request *img_request)
1658{
1659 smp_mb();
1660 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1661}
1662
Alex Elder9849e982013-01-24 16:13:36 -06001663static void img_request_child_set(struct rbd_img_request *img_request)
1664{
1665 set_bit(IMG_REQ_CHILD, &img_request->flags);
1666 smp_mb();
1667}
1668
Alex Eldere93f3152013-05-08 22:50:04 -05001669static void img_request_child_clear(struct rbd_img_request *img_request)
1670{
1671 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1672 smp_mb();
1673}
1674
Alex Elder9849e982013-01-24 16:13:36 -06001675static bool img_request_child_test(struct rbd_img_request *img_request)
1676{
1677 smp_mb();
1678 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1679}
1680
Alex Elderd0b2e942013-01-24 16:13:36 -06001681static void img_request_layered_set(struct rbd_img_request *img_request)
1682{
1683 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1684 smp_mb();
1685}
1686
Alex Eldera2acd002013-05-08 22:50:04 -05001687static void img_request_layered_clear(struct rbd_img_request *img_request)
1688{
1689 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1690 smp_mb();
1691}
1692
Alex Elderd0b2e942013-01-24 16:13:36 -06001693static bool img_request_layered_test(struct rbd_img_request *img_request)
1694{
1695 smp_mb();
1696 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1697}
1698
Josh Durgin3b434a2a2014-04-04 17:32:15 -07001699static enum obj_operation_type
1700rbd_img_request_op_type(struct rbd_img_request *img_request)
1701{
1702 if (img_request_write_test(img_request))
1703 return OBJ_OP_WRITE;
1704 else if (img_request_discard_test(img_request))
1705 return OBJ_OP_DISCARD;
1706 else
1707 return OBJ_OP_READ;
1708}
1709
Alex Elder6e2a4502013-03-27 09:16:30 -05001710static void
1711rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1712{
Alex Elderb9434c52013-04-19 15:34:50 -05001713 u64 xferred = obj_request->xferred;
1714 u64 length = obj_request->length;
1715
Alex Elder6e2a4502013-03-27 09:16:30 -05001716 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1717 obj_request, obj_request->img_request, obj_request->result,
Alex Elderb9434c52013-04-19 15:34:50 -05001718 xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001719 /*
Josh Durgin17c1cc12013-08-26 17:55:38 -07001720 * ENOENT means a hole in the image. We zero-fill the entire
1721 * length of the request. A short read also implies zero-fill
1722 * to the end of the request. An error requires the whole
1723 * length of the request to be reported finished with an error
1724 * to the block layer. In each case we update the xferred
1725 * count to indicate the whole request was satisfied.
Alex Elder6e2a4502013-03-27 09:16:30 -05001726 */
Alex Elderb9434c52013-04-19 15:34:50 -05001727 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
Alex Elder6e2a4502013-03-27 09:16:30 -05001728 if (obj_request->result == -ENOENT) {
Alex Elderb9434c52013-04-19 15:34:50 -05001729 if (obj_request->type == OBJ_REQUEST_BIO)
1730 zero_bio_chain(obj_request->bio_list, 0);
1731 else
1732 zero_pages(obj_request->pages, 0, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001733 obj_request->result = 0;
Alex Elderb9434c52013-04-19 15:34:50 -05001734 } else if (xferred < length && !obj_request->result) {
1735 if (obj_request->type == OBJ_REQUEST_BIO)
1736 zero_bio_chain(obj_request->bio_list, xferred);
1737 else
1738 zero_pages(obj_request->pages, xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001739 }
Josh Durgin17c1cc12013-08-26 17:55:38 -07001740 obj_request->xferred = length;
Alex Elder6e2a4502013-03-27 09:16:30 -05001741 obj_request_done_set(obj_request);
1742}
1743
Alex Elderbf0d5f502012-11-22 00:00:08 -06001744static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1745{
Alex Elder37206ee2013-02-20 17:32:08 -06001746 dout("%s: obj %p cb %p\n", __func__, obj_request,
1747 obj_request->callback);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001748 if (obj_request->callback)
1749 obj_request->callback(obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06001750 else
1751 complete_all(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001752}
1753
Alex Elderc47f9372013-02-26 14:23:07 -06001754static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
Alex Elder39bf2c52013-02-26 14:23:07 -06001755{
1756 dout("%s: obj %p\n", __func__, obj_request);
1757 obj_request_done_set(obj_request);
1758}
1759
Alex Elderc47f9372013-02-26 14:23:07 -06001760static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001761{
Alex Elder57acbaa2013-02-11 12:33:24 -06001762 struct rbd_img_request *img_request = NULL;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001763 struct rbd_device *rbd_dev = NULL;
Alex Elder57acbaa2013-02-11 12:33:24 -06001764 bool layered = false;
1765
1766 if (obj_request_img_data_test(obj_request)) {
1767 img_request = obj_request->img_request;
1768 layered = img_request && img_request_layered_test(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001769 rbd_dev = img_request->rbd_dev;
Alex Elder57acbaa2013-02-11 12:33:24 -06001770 }
Alex Elder8b3e1a52013-01-24 16:13:36 -06001771
1772 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1773 obj_request, img_request, obj_request->result,
1774 obj_request->xferred, obj_request->length);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001775 if (layered && obj_request->result == -ENOENT &&
1776 obj_request->img_offset < rbd_dev->parent_overlap)
Alex Elder8b3e1a52013-01-24 16:13:36 -06001777 rbd_img_parent_read(obj_request);
1778 else if (img_request)
Alex Elder6e2a4502013-03-27 09:16:30 -05001779 rbd_img_obj_request_read_callback(obj_request);
1780 else
1781 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001782}
1783
Alex Elderc47f9372013-02-26 14:23:07 -06001784static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001785{
Sage Weil1b83bef2013-02-25 16:11:12 -08001786 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1787 obj_request->result, obj_request->length);
1788 /*
Alex Elder8b3e1a52013-01-24 16:13:36 -06001789 * There is no such thing as a successful short write. Set
1790 * it to our originally-requested length.
Sage Weil1b83bef2013-02-25 16:11:12 -08001791 */
1792 obj_request->xferred = obj_request->length;
Alex Elder07741302013-02-05 23:41:50 -06001793 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001794}
1795
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001796static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1797{
1798 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1799 obj_request->result, obj_request->length);
1800 /*
1801 * There is no such thing as a successful short discard. Set
1802 * it to our originally-requested length.
1803 */
1804 obj_request->xferred = obj_request->length;
Josh Durgind0265de2014-04-07 16:54:10 -07001805 /* discarding a non-existent object is not a problem */
1806 if (obj_request->result == -ENOENT)
1807 obj_request->result = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001808 obj_request_done_set(obj_request);
1809}
1810
Alex Elderfbfab532013-02-08 09:55:48 -06001811/*
1812 * For a simple stat call there's nothing to do. We'll do more if
1813 * this is part of a write sequence for a layered image.
1814 */
Alex Elderc47f9372013-02-26 14:23:07 -06001815static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
Alex Elderfbfab532013-02-08 09:55:48 -06001816{
Alex Elder37206ee2013-02-20 17:32:08 -06001817 dout("%s: obj %p\n", __func__, obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001818 obj_request_done_set(obj_request);
1819}
1820
Ilya Dryomov27617132015-07-16 17:36:11 +03001821static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1822{
1823 dout("%s: obj %p\n", __func__, obj_request);
1824
1825 if (obj_request_img_data_test(obj_request))
1826 rbd_osd_copyup_callback(obj_request);
1827 else
1828 obj_request_done_set(obj_request);
1829}
1830
Alex Elderbf0d5f502012-11-22 00:00:08 -06001831static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1832 struct ceph_msg *msg)
1833{
1834 struct rbd_obj_request *obj_request = osd_req->r_priv;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001835 u16 opcode;
1836
Alex Elder37206ee2013-02-20 17:32:08 -06001837 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001838 rbd_assert(osd_req == obj_request->osd_req);
Alex Elder57acbaa2013-02-11 12:33:24 -06001839 if (obj_request_img_data_test(obj_request)) {
1840 rbd_assert(obj_request->img_request);
1841 rbd_assert(obj_request->which != BAD_WHICH);
1842 } else {
1843 rbd_assert(obj_request->which == BAD_WHICH);
1844 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06001845
Sage Weil1b83bef2013-02-25 16:11:12 -08001846 if (osd_req->r_result < 0)
1847 obj_request->result = osd_req->r_result;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001848
Alex Elderc47f9372013-02-26 14:23:07 -06001849 /*
1850 * We support a 64-bit length, but ultimately it has to be
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01001851 * passed to the block layer, which just supports a 32-bit
1852 * length field.
Alex Elderc47f9372013-02-26 14:23:07 -06001853 */
Yan, Zheng7665d852016-01-07 16:48:57 +08001854 obj_request->xferred = osd_req->r_ops[0].outdata_len;
Alex Elder8b3e1a52013-01-24 16:13:36 -06001855 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001856
Alex Elder79528732013-04-03 21:32:51 -05001857 opcode = osd_req->r_ops[0].op;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001858 switch (opcode) {
1859 case CEPH_OSD_OP_READ:
Alex Elderc47f9372013-02-26 14:23:07 -06001860 rbd_osd_read_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001861 break;
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001862 case CEPH_OSD_OP_SETALLOCHINT:
Ilya Dryomove30b7572015-10-07 17:27:17 +02001863 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
1864 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001865 /* fall through */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001866 case CEPH_OSD_OP_WRITE:
Ilya Dryomove30b7572015-10-07 17:27:17 +02001867 case CEPH_OSD_OP_WRITEFULL:
Alex Elderc47f9372013-02-26 14:23:07 -06001868 rbd_osd_write_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001869 break;
Alex Elderfbfab532013-02-08 09:55:48 -06001870 case CEPH_OSD_OP_STAT:
Alex Elderc47f9372013-02-26 14:23:07 -06001871 rbd_osd_stat_callback(obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001872 break;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001873 case CEPH_OSD_OP_DELETE:
1874 case CEPH_OSD_OP_TRUNCATE:
1875 case CEPH_OSD_OP_ZERO:
1876 rbd_osd_discard_callback(obj_request);
1877 break;
Alex Elder36be9a72013-01-19 00:30:28 -06001878 case CEPH_OSD_OP_CALL:
Ilya Dryomov27617132015-07-16 17:36:11 +03001879 rbd_osd_call_callback(obj_request);
1880 break;
Alex Elderb8d70032012-11-30 17:53:04 -06001881 case CEPH_OSD_OP_NOTIFY_ACK:
Alex Elder9969ebc2013-01-18 12:31:10 -06001882 case CEPH_OSD_OP_WATCH:
Alex Elderc47f9372013-02-26 14:23:07 -06001883 rbd_osd_trivial_callback(obj_request);
Alex Elder9969ebc2013-01-18 12:31:10 -06001884 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001885 default:
Ilya Dryomov9584d502014-07-11 12:11:20 +04001886 rbd_warn(NULL, "%s: unsupported op %hu",
Alex Elderbf0d5f502012-11-22 00:00:08 -06001887 obj_request->object_name, (unsigned short) opcode);
1888 break;
1889 }
1890
Alex Elder07741302013-02-05 23:41:50 -06001891 if (obj_request_done_test(obj_request))
Alex Elderbf0d5f502012-11-22 00:00:08 -06001892 rbd_obj_request_complete(obj_request);
1893}
1894
Alex Elder9d4df012013-04-19 15:34:50 -05001895static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
Alex Elder430c28c2013-04-03 21:32:51 -05001896{
1897 struct rbd_img_request *img_request = obj_request->img_request;
Alex Elder8c042b02013-04-03 01:28:58 -05001898 struct ceph_osd_request *osd_req = obj_request->osd_req;
Alex Elder9d4df012013-04-19 15:34:50 -05001899 u64 snap_id;
Alex Elder430c28c2013-04-03 21:32:51 -05001900
Alex Elder8c042b02013-04-03 01:28:58 -05001901 rbd_assert(osd_req != NULL);
Alex Elder430c28c2013-04-03 21:32:51 -05001902
Alex Elder9d4df012013-04-19 15:34:50 -05001903 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
Alex Elder8c042b02013-04-03 01:28:58 -05001904 ceph_osdc_build_request(osd_req, obj_request->offset,
Alex Elder9d4df012013-04-19 15:34:50 -05001905 NULL, snap_id, NULL);
1906}
1907
1908static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1909{
1910 struct rbd_img_request *img_request = obj_request->img_request;
1911 struct ceph_osd_request *osd_req = obj_request->osd_req;
1912 struct ceph_snap_context *snapc;
1913 struct timespec mtime = CURRENT_TIME;
1914
1915 rbd_assert(osd_req != NULL);
1916
1917 snapc = img_request ? img_request->snapc : NULL;
1918 ceph_osdc_build_request(osd_req, obj_request->offset,
1919 snapc, CEPH_NOSNAP, &mtime);
Alex Elder430c28c2013-04-03 21:32:51 -05001920}
1921
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001922/*
1923 * Create an osd request. A read request has one osd op (read).
1924 * A write request has either one (watch) or two (hint+write) osd ops.
1925 * (All rbd data writes are prefixed with an allocation hint op, but
1926 * technically osd watch is a write request, hence this distinction.)
1927 */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001928static struct ceph_osd_request *rbd_osd_req_create(
1929 struct rbd_device *rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001930 enum obj_operation_type op_type,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001931 unsigned int num_ops,
Alex Elder430c28c2013-04-03 21:32:51 -05001932 struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001933{
Alex Elderbf0d5f502012-11-22 00:00:08 -06001934 struct ceph_snap_context *snapc = NULL;
1935 struct ceph_osd_client *osdc;
1936 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001937
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001938 if (obj_request_img_data_test(obj_request) &&
1939 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
Alex Elder6365d332013-02-11 12:33:24 -06001940 struct rbd_img_request *img_request = obj_request->img_request;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001941 if (op_type == OBJ_OP_WRITE) {
1942 rbd_assert(img_request_write_test(img_request));
1943 } else {
1944 rbd_assert(img_request_discard_test(img_request));
1945 }
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001946 snapc = img_request->snapc;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001947 }
1948
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001949 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001950
1951 /* Allocate and initialize the request, for the num_ops ops */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001952
1953 osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001954 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
David Disseldorp2224d872016-04-05 11:13:39 +02001955 GFP_NOIO);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001956 if (!osd_req)
1957 return NULL; /* ENOMEM */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001958
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001959 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001960 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
Alex Elder430c28c2013-04-03 21:32:51 -05001961 else
Alex Elderbf0d5f502012-11-22 00:00:08 -06001962 osd_req->r_flags = CEPH_OSD_FLAG_READ;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001963
1964 osd_req->r_callback = rbd_osd_req_callback;
1965 osd_req->r_priv = obj_request;
1966
Ilya Dryomov3c972c92014-01-27 17:40:20 +02001967 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1968 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001969
Alex Elderbf0d5f502012-11-22 00:00:08 -06001970 return osd_req;
1971}
1972
Alex Elder0eefd472013-04-19 15:34:50 -05001973/*
Josh Durgind3246fb2014-04-07 16:49:21 -07001974 * Create a copyup osd request based on the information in the object
1975 * request supplied. A copyup request has two or three osd ops, a
1976 * copyup method call, potentially a hint op, and a write or truncate
1977 * or zero op.
Alex Elder0eefd472013-04-19 15:34:50 -05001978 */
1979static struct ceph_osd_request *
1980rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1981{
1982 struct rbd_img_request *img_request;
1983 struct ceph_snap_context *snapc;
1984 struct rbd_device *rbd_dev;
1985 struct ceph_osd_client *osdc;
1986 struct ceph_osd_request *osd_req;
Josh Durgind3246fb2014-04-07 16:49:21 -07001987 int num_osd_ops = 3;
Alex Elder0eefd472013-04-19 15:34:50 -05001988
1989 rbd_assert(obj_request_img_data_test(obj_request));
1990 img_request = obj_request->img_request;
1991 rbd_assert(img_request);
Josh Durgind3246fb2014-04-07 16:49:21 -07001992 rbd_assert(img_request_write_test(img_request) ||
1993 img_request_discard_test(img_request));
Alex Elder0eefd472013-04-19 15:34:50 -05001994
Josh Durgind3246fb2014-04-07 16:49:21 -07001995 if (img_request_discard_test(img_request))
1996 num_osd_ops = 2;
1997
1998 /* Allocate and initialize the request, for all the ops */
Alex Elder0eefd472013-04-19 15:34:50 -05001999
2000 snapc = img_request->snapc;
2001 rbd_dev = img_request->rbd_dev;
2002 osdc = &rbd_dev->rbd_client->client->osdc;
Josh Durgind3246fb2014-04-07 16:49:21 -07002003 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
David Disseldorp2224d872016-04-05 11:13:39 +02002004 false, GFP_NOIO);
Alex Elder0eefd472013-04-19 15:34:50 -05002005 if (!osd_req)
2006 return NULL; /* ENOMEM */
2007
2008 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2009 osd_req->r_callback = rbd_osd_req_callback;
2010 osd_req->r_priv = obj_request;
2011
Ilya Dryomov3c972c92014-01-27 17:40:20 +02002012 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
2013 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
Alex Elder0eefd472013-04-19 15:34:50 -05002014
Alex Elder0eefd472013-04-19 15:34:50 -05002015 return osd_req;
2016}
2017
2018
Alex Elderbf0d5f502012-11-22 00:00:08 -06002019static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2020{
2021 ceph_osdc_put_request(osd_req);
2022}
2023
2024/* object_name is assumed to be a non-null pointer and NUL-terminated */
2025
2026static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2027 u64 offset, u64 length,
2028 enum obj_request_type type)
2029{
2030 struct rbd_obj_request *obj_request;
2031 size_t size;
2032 char *name;
2033
2034 rbd_assert(obj_request_type_valid(type));
2035
2036 size = strlen(object_name) + 1;
Ilya Dryomov5a60e872015-06-24 17:24:33 +03002037 name = kmalloc(size, GFP_NOIO);
Alex Elderf907ad52013-05-01 12:43:03 -05002038 if (!name)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002039 return NULL;
2040
Ilya Dryomov5a60e872015-06-24 17:24:33 +03002041 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
Alex Elderf907ad52013-05-01 12:43:03 -05002042 if (!obj_request) {
2043 kfree(name);
2044 return NULL;
2045 }
2046
Alex Elderbf0d5f502012-11-22 00:00:08 -06002047 obj_request->object_name = memcpy(name, object_name, size);
2048 obj_request->offset = offset;
2049 obj_request->length = length;
Alex Elder926f9b32013-02-11 12:33:24 -06002050 obj_request->flags = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002051 obj_request->which = BAD_WHICH;
2052 obj_request->type = type;
2053 INIT_LIST_HEAD(&obj_request->links);
Alex Elder788e2df2013-01-17 12:25:27 -06002054 init_completion(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002055 kref_init(&obj_request->kref);
2056
Alex Elder37206ee2013-02-20 17:32:08 -06002057 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2058 offset, length, (int)type, obj_request);
2059
Alex Elderbf0d5f502012-11-22 00:00:08 -06002060 return obj_request;
2061}
2062
2063static void rbd_obj_request_destroy(struct kref *kref)
2064{
2065 struct rbd_obj_request *obj_request;
2066
2067 obj_request = container_of(kref, struct rbd_obj_request, kref);
2068
Alex Elder37206ee2013-02-20 17:32:08 -06002069 dout("%s: obj %p\n", __func__, obj_request);
2070
Alex Elderbf0d5f502012-11-22 00:00:08 -06002071 rbd_assert(obj_request->img_request == NULL);
2072 rbd_assert(obj_request->which == BAD_WHICH);
2073
2074 if (obj_request->osd_req)
2075 rbd_osd_req_destroy(obj_request->osd_req);
2076
2077 rbd_assert(obj_request_type_valid(obj_request->type));
2078 switch (obj_request->type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06002079 case OBJ_REQUEST_NODATA:
2080 break; /* Nothing to do */
Alex Elderbf0d5f502012-11-22 00:00:08 -06002081 case OBJ_REQUEST_BIO:
2082 if (obj_request->bio_list)
2083 bio_chain_put(obj_request->bio_list);
2084 break;
Alex Elder788e2df2013-01-17 12:25:27 -06002085 case OBJ_REQUEST_PAGES:
2086 if (obj_request->pages)
2087 ceph_release_page_vector(obj_request->pages,
2088 obj_request->page_count);
2089 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002090 }
2091
Alex Elderf907ad52013-05-01 12:43:03 -05002092 kfree(obj_request->object_name);
Alex Elder868311b2013-05-01 12:43:03 -05002093 obj_request->object_name = NULL;
2094 kmem_cache_free(rbd_obj_request_cache, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002095}
2096
Alex Elderfb65d2282013-05-08 22:50:04 -05002097/* It's OK to call this for a device with no parent */
2098
2099static void rbd_spec_put(struct rbd_spec *spec);
2100static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2101{
2102 rbd_dev_remove_parent(rbd_dev);
2103 rbd_spec_put(rbd_dev->parent_spec);
2104 rbd_dev->parent_spec = NULL;
2105 rbd_dev->parent_overlap = 0;
2106}
2107
Alex Elderbf0d5f502012-11-22 00:00:08 -06002108/*
Alex Eldera2acd002013-05-08 22:50:04 -05002109 * Parent image reference counting is used to determine when an
2110 * image's parent fields can be safely torn down--after there are no
2111 * more in-flight requests to the parent image. When the last
2112 * reference is dropped, cleaning them up is safe.
2113 */
2114static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2115{
2116 int counter;
2117
2118 if (!rbd_dev->parent_spec)
2119 return;
2120
2121 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2122 if (counter > 0)
2123 return;
2124
2125 /* Last reference; clean up parent data structures */
2126
2127 if (!counter)
2128 rbd_dev_unparent(rbd_dev);
2129 else
Ilya Dryomov9584d502014-07-11 12:11:20 +04002130 rbd_warn(rbd_dev, "parent reference underflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002131}
2132
2133/*
2134 * If an image has a non-zero parent overlap, get a reference to its
2135 * parent.
2136 *
2137 * Returns true if the rbd device has a parent with a non-zero
2138 * overlap and a reference for it was successfully taken, or
2139 * false otherwise.
2140 */
2141static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2142{
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002143 int counter = 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002144
2145 if (!rbd_dev->parent_spec)
2146 return false;
2147
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002148 down_read(&rbd_dev->header_rwsem);
2149 if (rbd_dev->parent_overlap)
2150 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2151 up_read(&rbd_dev->header_rwsem);
Alex Eldera2acd002013-05-08 22:50:04 -05002152
2153 if (counter < 0)
Ilya Dryomov9584d502014-07-11 12:11:20 +04002154 rbd_warn(rbd_dev, "parent reference overflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002155
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002156 return counter > 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002157}
2158
Alex Elderbf0d5f502012-11-22 00:00:08 -06002159/*
2160 * Caller is responsible for filling in the list of object requests
2161 * that comprises the image request, and the Linux request pointer
2162 * (if there is one).
2163 */
Alex Eldercc344fa2013-02-19 12:25:56 -06002164static struct rbd_img_request *rbd_img_request_create(
2165 struct rbd_device *rbd_dev,
Alex Elderbf0d5f502012-11-22 00:00:08 -06002166 u64 offset, u64 length,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002167 enum obj_operation_type op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07002168 struct ceph_snap_context *snapc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002169{
2170 struct rbd_img_request *img_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002171
Ilya Dryomov7a716aa2014-08-05 11:25:54 +04002172 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002173 if (!img_request)
2174 return NULL;
2175
Alex Elderbf0d5f502012-11-22 00:00:08 -06002176 img_request->rq = NULL;
2177 img_request->rbd_dev = rbd_dev;
2178 img_request->offset = offset;
2179 img_request->length = length;
Alex Elder0c425242013-02-08 09:55:49 -06002180 img_request->flags = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002181 if (op_type == OBJ_OP_DISCARD) {
2182 img_request_discard_set(img_request);
2183 img_request->snapc = snapc;
2184 } else if (op_type == OBJ_OP_WRITE) {
Alex Elder0c425242013-02-08 09:55:49 -06002185 img_request_write_set(img_request);
Josh Durgin4e752f02014-04-08 11:12:11 -07002186 img_request->snapc = snapc;
Alex Elder0c425242013-02-08 09:55:49 -06002187 } else {
Alex Elderbf0d5f502012-11-22 00:00:08 -06002188 img_request->snap_id = rbd_dev->spec->snap_id;
Alex Elder0c425242013-02-08 09:55:49 -06002189 }
Alex Eldera2acd002013-05-08 22:50:04 -05002190 if (rbd_dev_parent_get(rbd_dev))
Alex Elderd0b2e942013-01-24 16:13:36 -06002191 img_request_layered_set(img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002192 spin_lock_init(&img_request->completion_lock);
2193 img_request->next_completion = 0;
2194 img_request->callback = NULL;
Alex Eldera5a337d2013-01-24 16:13:36 -06002195 img_request->result = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002196 img_request->obj_request_count = 0;
2197 INIT_LIST_HEAD(&img_request->obj_requests);
2198 kref_init(&img_request->kref);
2199
Alex Elder37206ee2013-02-20 17:32:08 -06002200 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002201 obj_op_name(op_type), offset, length, img_request);
Alex Elder37206ee2013-02-20 17:32:08 -06002202
Alex Elderbf0d5f502012-11-22 00:00:08 -06002203 return img_request;
2204}
2205
2206static void rbd_img_request_destroy(struct kref *kref)
2207{
2208 struct rbd_img_request *img_request;
2209 struct rbd_obj_request *obj_request;
2210 struct rbd_obj_request *next_obj_request;
2211
2212 img_request = container_of(kref, struct rbd_img_request, kref);
2213
Alex Elder37206ee2013-02-20 17:32:08 -06002214 dout("%s: img %p\n", __func__, img_request);
2215
Alex Elderbf0d5f502012-11-22 00:00:08 -06002216 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2217 rbd_img_obj_request_del(img_request, obj_request);
Alex Elder25dcf952013-01-25 17:08:55 -06002218 rbd_assert(img_request->obj_request_count == 0);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002219
Alex Eldera2acd002013-05-08 22:50:04 -05002220 if (img_request_layered_test(img_request)) {
2221 img_request_layered_clear(img_request);
2222 rbd_dev_parent_put(img_request->rbd_dev);
2223 }
2224
Josh Durginbef95452014-04-04 17:47:52 -07002225 if (img_request_write_test(img_request) ||
2226 img_request_discard_test(img_request))
Alex Elder812164f82013-04-30 00:44:32 -05002227 ceph_put_snap_context(img_request->snapc);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002228
Alex Elder1c2a9df2013-05-01 12:43:03 -05002229 kmem_cache_free(rbd_img_request_cache, img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002230}
2231
Alex Eldere93f3152013-05-08 22:50:04 -05002232static struct rbd_img_request *rbd_parent_request_create(
2233 struct rbd_obj_request *obj_request,
2234 u64 img_offset, u64 length)
2235{
2236 struct rbd_img_request *parent_request;
2237 struct rbd_device *rbd_dev;
2238
2239 rbd_assert(obj_request->img_request);
2240 rbd_dev = obj_request->img_request->rbd_dev;
2241
Josh Durgin4e752f02014-04-08 11:12:11 -07002242 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002243 length, OBJ_OP_READ, NULL);
Alex Eldere93f3152013-05-08 22:50:04 -05002244 if (!parent_request)
2245 return NULL;
2246
2247 img_request_child_set(parent_request);
2248 rbd_obj_request_get(obj_request);
2249 parent_request->obj_request = obj_request;
2250
2251 return parent_request;
2252}
2253
2254static void rbd_parent_request_destroy(struct kref *kref)
2255{
2256 struct rbd_img_request *parent_request;
2257 struct rbd_obj_request *orig_request;
2258
2259 parent_request = container_of(kref, struct rbd_img_request, kref);
2260 orig_request = parent_request->obj_request;
2261
2262 parent_request->obj_request = NULL;
2263 rbd_obj_request_put(orig_request);
2264 img_request_child_clear(parent_request);
2265
2266 rbd_img_request_destroy(kref);
2267}
2268
Alex Elder12178572013-02-08 09:55:49 -06002269static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2270{
Alex Elder6365d332013-02-11 12:33:24 -06002271 struct rbd_img_request *img_request;
Alex Elder12178572013-02-08 09:55:49 -06002272 unsigned int xferred;
2273 int result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002274 bool more;
Alex Elder12178572013-02-08 09:55:49 -06002275
Alex Elder6365d332013-02-11 12:33:24 -06002276 rbd_assert(obj_request_img_data_test(obj_request));
2277 img_request = obj_request->img_request;
2278
Alex Elder12178572013-02-08 09:55:49 -06002279 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2280 xferred = (unsigned int)obj_request->xferred;
2281 result = obj_request->result;
2282 if (result) {
2283 struct rbd_device *rbd_dev = img_request->rbd_dev;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002284 enum obj_operation_type op_type;
2285
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002286 if (img_request_discard_test(img_request))
2287 op_type = OBJ_OP_DISCARD;
2288 else if (img_request_write_test(img_request))
2289 op_type = OBJ_OP_WRITE;
2290 else
2291 op_type = OBJ_OP_READ;
Alex Elder12178572013-02-08 09:55:49 -06002292
Ilya Dryomov9584d502014-07-11 12:11:20 +04002293 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002294 obj_op_name(op_type), obj_request->length,
2295 obj_request->img_offset, obj_request->offset);
Ilya Dryomov9584d502014-07-11 12:11:20 +04002296 rbd_warn(rbd_dev, " result %d xferred %x",
Alex Elder12178572013-02-08 09:55:49 -06002297 result, xferred);
2298 if (!img_request->result)
2299 img_request->result = result;
Ilya Dryomov082a75d2015-04-25 15:56:15 +03002300 /*
2301 * Need to end I/O on the entire obj_request worth of
2302 * bytes in case of error.
2303 */
2304 xferred = obj_request->length;
Alex Elder12178572013-02-08 09:55:49 -06002305 }
2306
Alex Elderf1a47392013-04-19 15:34:50 -05002307 /* Image object requests don't own their page array */
2308
2309 if (obj_request->type == OBJ_REQUEST_PAGES) {
2310 obj_request->pages = NULL;
2311 obj_request->page_count = 0;
2312 }
2313
Alex Elder8b3e1a52013-01-24 16:13:36 -06002314 if (img_request_child_test(img_request)) {
2315 rbd_assert(img_request->obj_request != NULL);
2316 more = obj_request->which < img_request->obj_request_count - 1;
2317 } else {
2318 rbd_assert(img_request->rq != NULL);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01002319
2320 more = blk_update_request(img_request->rq, result, xferred);
2321 if (!more)
2322 __blk_mq_end_request(img_request->rq, result);
Alex Elder8b3e1a52013-01-24 16:13:36 -06002323 }
2324
2325 return more;
Alex Elder12178572013-02-08 09:55:49 -06002326}
2327
Alex Elder21692382013-04-05 01:27:12 -05002328static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2329{
2330 struct rbd_img_request *img_request;
2331 u32 which = obj_request->which;
2332 bool more = true;
2333
Alex Elder6365d332013-02-11 12:33:24 -06002334 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elder21692382013-04-05 01:27:12 -05002335 img_request = obj_request->img_request;
2336
2337 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2338 rbd_assert(img_request != NULL);
Alex Elder21692382013-04-05 01:27:12 -05002339 rbd_assert(img_request->obj_request_count > 0);
2340 rbd_assert(which != BAD_WHICH);
2341 rbd_assert(which < img_request->obj_request_count);
Alex Elder21692382013-04-05 01:27:12 -05002342
2343 spin_lock_irq(&img_request->completion_lock);
2344 if (which != img_request->next_completion)
2345 goto out;
2346
2347 for_each_obj_request_from(img_request, obj_request) {
Alex Elder21692382013-04-05 01:27:12 -05002348 rbd_assert(more);
2349 rbd_assert(which < img_request->obj_request_count);
2350
2351 if (!obj_request_done_test(obj_request))
2352 break;
Alex Elder12178572013-02-08 09:55:49 -06002353 more = rbd_img_obj_end_request(obj_request);
Alex Elder21692382013-04-05 01:27:12 -05002354 which++;
2355 }
2356
2357 rbd_assert(more ^ (which == img_request->obj_request_count));
2358 img_request->next_completion = which;
2359out:
2360 spin_unlock_irq(&img_request->completion_lock);
Alex Elder0f2d5be2014-04-26 14:21:44 +04002361 rbd_img_request_put(img_request);
Alex Elder21692382013-04-05 01:27:12 -05002362
2363 if (!more)
2364 rbd_img_request_complete(img_request);
2365}
2366
Alex Elderf1a47392013-04-19 15:34:50 -05002367/*
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002368 * Add individual osd ops to the given ceph_osd_request and prepare
2369 * them for submission. num_ops is the current number of
2370 * osd operations already to the object request.
2371 */
2372static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2373 struct ceph_osd_request *osd_request,
2374 enum obj_operation_type op_type,
2375 unsigned int num_ops)
2376{
2377 struct rbd_img_request *img_request = obj_request->img_request;
2378 struct rbd_device *rbd_dev = img_request->rbd_dev;
2379 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2380 u64 offset = obj_request->offset;
2381 u64 length = obj_request->length;
2382 u64 img_end;
2383 u16 opcode;
2384
2385 if (op_type == OBJ_OP_DISCARD) {
Josh Durgind3246fb2014-04-07 16:49:21 -07002386 if (!offset && length == object_size &&
2387 (!img_request_layered_test(img_request) ||
2388 !obj_request_overlaps_parent(obj_request))) {
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002389 opcode = CEPH_OSD_OP_DELETE;
2390 } else if ((offset + length == object_size)) {
2391 opcode = CEPH_OSD_OP_TRUNCATE;
2392 } else {
2393 down_read(&rbd_dev->header_rwsem);
2394 img_end = rbd_dev->header.image_size;
2395 up_read(&rbd_dev->header_rwsem);
2396
2397 if (obj_request->img_offset + length == img_end)
2398 opcode = CEPH_OSD_OP_TRUNCATE;
2399 else
2400 opcode = CEPH_OSD_OP_ZERO;
2401 }
2402 } else if (op_type == OBJ_OP_WRITE) {
Ilya Dryomove30b7572015-10-07 17:27:17 +02002403 if (!offset && length == object_size)
2404 opcode = CEPH_OSD_OP_WRITEFULL;
2405 else
2406 opcode = CEPH_OSD_OP_WRITE;
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002407 osd_req_op_alloc_hint_init(osd_request, num_ops,
2408 object_size, object_size);
2409 num_ops++;
2410 } else {
2411 opcode = CEPH_OSD_OP_READ;
2412 }
2413
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002414 if (opcode == CEPH_OSD_OP_DELETE)
Yan, Zheng144cba12015-04-27 11:09:54 +08002415 osd_req_op_init(osd_request, num_ops, opcode, 0);
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002416 else
2417 osd_req_op_extent_init(osd_request, num_ops, opcode,
2418 offset, length, 0, 0);
2419
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002420 if (obj_request->type == OBJ_REQUEST_BIO)
2421 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2422 obj_request->bio_list, length);
2423 else if (obj_request->type == OBJ_REQUEST_PAGES)
2424 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2425 obj_request->pages, length,
2426 offset & ~PAGE_MASK, false, false);
2427
2428 /* Discards are also writes */
2429 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2430 rbd_osd_req_format_write(obj_request);
2431 else
2432 rbd_osd_req_format_read(obj_request);
2433}
2434
2435/*
Alex Elderf1a47392013-04-19 15:34:50 -05002436 * Split up an image request into one or more object requests, each
2437 * to a different object. The "type" parameter indicates whether
2438 * "data_desc" is the pointer to the head of a list of bio
2439 * structures, or the base of a page array. In either case this
2440 * function assumes data_desc describes memory sufficient to hold
2441 * all data described by the image request.
2442 */
2443static int rbd_img_request_fill(struct rbd_img_request *img_request,
2444 enum obj_request_type type,
2445 void *data_desc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002446{
2447 struct rbd_device *rbd_dev = img_request->rbd_dev;
2448 struct rbd_obj_request *obj_request = NULL;
2449 struct rbd_obj_request *next_obj_request;
Jingoo Hana1580732013-08-09 13:04:35 +09002450 struct bio *bio_list = NULL;
Alex Elderf1a47392013-04-19 15:34:50 -05002451 unsigned int bio_offset = 0;
Jingoo Hana1580732013-08-09 13:04:35 +09002452 struct page **pages = NULL;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002453 enum obj_operation_type op_type;
Alex Elder7da22d22013-01-24 16:13:36 -06002454 u64 img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002455 u64 resid;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002456
Alex Elderf1a47392013-04-19 15:34:50 -05002457 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2458 (int)type, data_desc);
Alex Elder37206ee2013-02-20 17:32:08 -06002459
Alex Elder7da22d22013-01-24 16:13:36 -06002460 img_offset = img_request->offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002461 resid = img_request->length;
Alex Elder4dda41d2013-02-20 21:59:33 -06002462 rbd_assert(resid > 0);
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002463 op_type = rbd_img_request_op_type(img_request);
Alex Elderf1a47392013-04-19 15:34:50 -05002464
2465 if (type == OBJ_REQUEST_BIO) {
2466 bio_list = data_desc;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002467 rbd_assert(img_offset ==
2468 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002469 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002470 pages = data_desc;
2471 }
2472
Alex Elderbf0d5f502012-11-22 00:00:08 -06002473 while (resid) {
Alex Elder2fa12322013-04-05 01:27:12 -05002474 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002475 const char *object_name;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002476 u64 offset;
2477 u64 length;
2478
Alex Elder7da22d22013-01-24 16:13:36 -06002479 object_name = rbd_segment_name(rbd_dev, img_offset);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002480 if (!object_name)
2481 goto out_unwind;
Alex Elder7da22d22013-01-24 16:13:36 -06002482 offset = rbd_segment_offset(rbd_dev, img_offset);
2483 length = rbd_segment_length(rbd_dev, img_offset, resid);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002484 obj_request = rbd_obj_request_create(object_name,
Alex Elderf1a47392013-04-19 15:34:50 -05002485 offset, length, type);
Alex Elder78c2a442013-05-01 12:43:04 -05002486 /* object request has its own copy of the object name */
2487 rbd_segment_name_free(object_name);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002488 if (!obj_request)
2489 goto out_unwind;
Ilya Dryomov62054da2014-03-04 11:57:17 +02002490
Josh Durgin03507db2013-08-27 14:45:46 -07002491 /*
2492 * set obj_request->img_request before creating the
2493 * osd_request so that it gets the right snapc
2494 */
2495 rbd_img_obj_request_add(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002496
Alex Elderf1a47392013-04-19 15:34:50 -05002497 if (type == OBJ_REQUEST_BIO) {
2498 unsigned int clone_size;
2499
2500 rbd_assert(length <= (u64)UINT_MAX);
2501 clone_size = (unsigned int)length;
2502 obj_request->bio_list =
2503 bio_chain_clone_range(&bio_list,
2504 &bio_offset,
2505 clone_size,
David Disseldorp2224d872016-04-05 11:13:39 +02002506 GFP_NOIO);
Alex Elderf1a47392013-04-19 15:34:50 -05002507 if (!obj_request->bio_list)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002508 goto out_unwind;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002509 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002510 unsigned int page_count;
2511
2512 obj_request->pages = pages;
2513 page_count = (u32)calc_pages_for(offset, length);
2514 obj_request->page_count = page_count;
2515 if ((offset + length) & ~PAGE_MASK)
2516 page_count--; /* more on last page */
2517 pages += page_count;
2518 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06002519
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002520 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2521 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2522 obj_request);
Alex Elder2fa12322013-04-05 01:27:12 -05002523 if (!osd_req)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002524 goto out_unwind;
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002525
Alex Elder2fa12322013-04-05 01:27:12 -05002526 obj_request->osd_req = osd_req;
Alex Elder21692382013-04-05 01:27:12 -05002527 obj_request->callback = rbd_img_obj_callback;
Alex Elder7da22d22013-01-24 16:13:36 -06002528 obj_request->img_offset = img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002529
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002530 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2531
2532 rbd_img_request_get(img_request);
2533
Alex Elder7da22d22013-01-24 16:13:36 -06002534 img_offset += length;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002535 resid -= length;
2536 }
2537
2538 return 0;
2539
Alex Elderbf0d5f502012-11-22 00:00:08 -06002540out_unwind:
2541 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
Ilya Dryomov42dd0372014-03-04 11:57:17 +02002542 rbd_img_obj_request_del(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002543
2544 return -ENOMEM;
2545}
2546
Alex Elder3d7efd12013-04-19 15:34:50 -05002547static void
Ilya Dryomov27617132015-07-16 17:36:11 +03002548rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
Alex Elder0eefd472013-04-19 15:34:50 -05002549{
2550 struct rbd_img_request *img_request;
2551 struct rbd_device *rbd_dev;
Alex Elderebda6402013-05-10 16:29:22 -05002552 struct page **pages;
Alex Elder0eefd472013-04-19 15:34:50 -05002553 u32 page_count;
2554
Ilya Dryomov27617132015-07-16 17:36:11 +03002555 dout("%s: obj %p\n", __func__, obj_request);
2556
Josh Durgind3246fb2014-04-07 16:49:21 -07002557 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2558 obj_request->type == OBJ_REQUEST_NODATA);
Alex Elder0eefd472013-04-19 15:34:50 -05002559 rbd_assert(obj_request_img_data_test(obj_request));
2560 img_request = obj_request->img_request;
2561 rbd_assert(img_request);
2562
2563 rbd_dev = img_request->rbd_dev;
2564 rbd_assert(rbd_dev);
Alex Elder0eefd472013-04-19 15:34:50 -05002565
Alex Elderebda6402013-05-10 16:29:22 -05002566 pages = obj_request->copyup_pages;
2567 rbd_assert(pages != NULL);
Alex Elder0eefd472013-04-19 15:34:50 -05002568 obj_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002569 page_count = obj_request->copyup_page_count;
2570 rbd_assert(page_count);
2571 obj_request->copyup_page_count = 0;
2572 ceph_release_page_vector(pages, page_count);
Alex Elder0eefd472013-04-19 15:34:50 -05002573
2574 /*
2575 * We want the transfer count to reflect the size of the
2576 * original write request. There is no such thing as a
2577 * successful short write, so if the request was successful
2578 * we can just set it to the originally-requested length.
2579 */
2580 if (!obj_request->result)
2581 obj_request->xferred = obj_request->length;
2582
Ilya Dryomov27617132015-07-16 17:36:11 +03002583 obj_request_done_set(obj_request);
Alex Elder0eefd472013-04-19 15:34:50 -05002584}
2585
2586static void
Alex Elder3d7efd12013-04-19 15:34:50 -05002587rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2588{
2589 struct rbd_obj_request *orig_request;
Alex Elder0eefd472013-04-19 15:34:50 -05002590 struct ceph_osd_request *osd_req;
2591 struct ceph_osd_client *osdc;
2592 struct rbd_device *rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002593 struct page **pages;
Josh Durgind3246fb2014-04-07 16:49:21 -07002594 enum obj_operation_type op_type;
Alex Elderebda6402013-05-10 16:29:22 -05002595 u32 page_count;
Alex Elderbbea1c12013-05-06 17:40:33 -05002596 int img_result;
Alex Elderebda6402013-05-10 16:29:22 -05002597 u64 parent_length;
Alex Elder3d7efd12013-04-19 15:34:50 -05002598
2599 rbd_assert(img_request_child_test(img_request));
2600
2601 /* First get what we need from the image request */
2602
2603 pages = img_request->copyup_pages;
2604 rbd_assert(pages != NULL);
2605 img_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002606 page_count = img_request->copyup_page_count;
2607 rbd_assert(page_count);
2608 img_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002609
2610 orig_request = img_request->obj_request;
2611 rbd_assert(orig_request != NULL);
Alex Elderb91f09f2013-05-10 16:29:22 -05002612 rbd_assert(obj_request_type_valid(orig_request->type));
Alex Elderbbea1c12013-05-06 17:40:33 -05002613 img_result = img_request->result;
Alex Elderebda6402013-05-10 16:29:22 -05002614 parent_length = img_request->length;
2615 rbd_assert(parent_length == img_request->xferred);
Alex Elder3d7efd12013-04-19 15:34:50 -05002616 rbd_img_request_put(img_request);
2617
Alex Elder91c6feb2013-05-06 17:40:32 -05002618 rbd_assert(orig_request->img_request);
2619 rbd_dev = orig_request->img_request->rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002620 rbd_assert(rbd_dev);
Alex Elder3d7efd12013-04-19 15:34:50 -05002621
Alex Elderbbea1c12013-05-06 17:40:33 -05002622 /*
2623 * If the overlap has become 0 (most likely because the
2624 * image has been flattened) we need to free the pages
2625 * and re-submit the original write request.
2626 */
2627 if (!rbd_dev->parent_overlap) {
2628 struct ceph_osd_client *osdc;
2629
2630 ceph_release_page_vector(pages, page_count);
2631 osdc = &rbd_dev->rbd_client->client->osdc;
2632 img_result = rbd_obj_request_submit(osdc, orig_request);
2633 if (!img_result)
2634 return;
2635 }
2636
2637 if (img_result)
Alex Elder0eefd472013-04-19 15:34:50 -05002638 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002639
Alex Elder8785b1d2013-05-09 10:08:49 -05002640 /*
2641 * The original osd request is of no use to use any more.
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002642 * We need a new one that can hold the three ops in a copyup
Alex Elder8785b1d2013-05-09 10:08:49 -05002643 * request. Allocate the new copyup osd request for the
2644 * original request, and release the old one.
2645 */
Alex Elderbbea1c12013-05-06 17:40:33 -05002646 img_result = -ENOMEM;
Alex Elder0eefd472013-04-19 15:34:50 -05002647 osd_req = rbd_osd_req_create_copyup(orig_request);
2648 if (!osd_req)
2649 goto out_err;
Alex Elder8785b1d2013-05-09 10:08:49 -05002650 rbd_osd_req_destroy(orig_request->osd_req);
Alex Elder0eefd472013-04-19 15:34:50 -05002651 orig_request->osd_req = osd_req;
2652 orig_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002653 orig_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002654
Alex Elder0eefd472013-04-19 15:34:50 -05002655 /* Initialize the copyup op */
2656
2657 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
Alex Elderebda6402013-05-10 16:29:22 -05002658 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
Alex Elder0eefd472013-04-19 15:34:50 -05002659 false, false);
2660
Josh Durgind3246fb2014-04-07 16:49:21 -07002661 /* Add the other op(s) */
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002662
Josh Durgind3246fb2014-04-07 16:49:21 -07002663 op_type = rbd_img_request_op_type(orig_request->img_request);
2664 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
Alex Elder0eefd472013-04-19 15:34:50 -05002665
2666 /* All set, send it off. */
2667
Alex Elder0eefd472013-04-19 15:34:50 -05002668 osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elderbbea1c12013-05-06 17:40:33 -05002669 img_result = rbd_obj_request_submit(osdc, orig_request);
2670 if (!img_result)
Alex Elder0eefd472013-04-19 15:34:50 -05002671 return;
2672out_err:
2673 /* Record the error code and complete the request */
2674
Alex Elderbbea1c12013-05-06 17:40:33 -05002675 orig_request->result = img_result;
Alex Elder0eefd472013-04-19 15:34:50 -05002676 orig_request->xferred = 0;
2677 obj_request_done_set(orig_request);
2678 rbd_obj_request_complete(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002679}
2680
2681/*
2682 * Read from the parent image the range of data that covers the
2683 * entire target of the given object request. This is used for
2684 * satisfying a layered image write request when the target of an
2685 * object request from the image request does not exist.
2686 *
2687 * A page array big enough to hold the returned data is allocated
2688 * and supplied to rbd_img_request_fill() as the "data descriptor."
2689 * When the read completes, this page array will be transferred to
2690 * the original object request for the copyup operation.
2691 *
2692 * If an error occurs, record it as the result of the original
2693 * object request and mark it done so it gets completed.
2694 */
2695static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2696{
2697 struct rbd_img_request *img_request = NULL;
2698 struct rbd_img_request *parent_request = NULL;
2699 struct rbd_device *rbd_dev;
2700 u64 img_offset;
2701 u64 length;
2702 struct page **pages = NULL;
2703 u32 page_count;
2704 int result;
2705
2706 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderb91f09f2013-05-10 16:29:22 -05002707 rbd_assert(obj_request_type_valid(obj_request->type));
Alex Elder3d7efd12013-04-19 15:34:50 -05002708
2709 img_request = obj_request->img_request;
2710 rbd_assert(img_request != NULL);
2711 rbd_dev = img_request->rbd_dev;
2712 rbd_assert(rbd_dev->parent != NULL);
2713
2714 /*
2715 * Determine the byte range covered by the object in the
2716 * child image to which the original request was to be sent.
2717 */
2718 img_offset = obj_request->img_offset - obj_request->offset;
2719 length = (u64)1 << rbd_dev->header.obj_order;
2720
2721 /*
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002722 * There is no defined parent data beyond the parent
2723 * overlap, so limit what we read at that boundary if
2724 * necessary.
2725 */
2726 if (img_offset + length > rbd_dev->parent_overlap) {
2727 rbd_assert(img_offset < rbd_dev->parent_overlap);
2728 length = rbd_dev->parent_overlap - img_offset;
2729 }
2730
2731 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002732 * Allocate a page array big enough to receive the data read
2733 * from the parent.
2734 */
2735 page_count = (u32)calc_pages_for(0, length);
2736 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2737 if (IS_ERR(pages)) {
2738 result = PTR_ERR(pages);
2739 pages = NULL;
2740 goto out_err;
2741 }
2742
2743 result = -ENOMEM;
Alex Eldere93f3152013-05-08 22:50:04 -05002744 parent_request = rbd_parent_request_create(obj_request,
2745 img_offset, length);
Alex Elder3d7efd12013-04-19 15:34:50 -05002746 if (!parent_request)
2747 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002748
2749 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2750 if (result)
2751 goto out_err;
2752 parent_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002753 parent_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002754
2755 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2756 result = rbd_img_request_submit(parent_request);
2757 if (!result)
2758 return 0;
2759
2760 parent_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002761 parent_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002762 parent_request->obj_request = NULL;
2763 rbd_obj_request_put(obj_request);
2764out_err:
2765 if (pages)
2766 ceph_release_page_vector(pages, page_count);
2767 if (parent_request)
2768 rbd_img_request_put(parent_request);
2769 obj_request->result = result;
2770 obj_request->xferred = 0;
2771 obj_request_done_set(obj_request);
2772
2773 return result;
2774}
2775
Alex Elderc5b5ef62013-02-11 12:33:24 -06002776static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2777{
Alex Elderc5b5ef62013-02-11 12:33:24 -06002778 struct rbd_obj_request *orig_request;
Alex Elder638f5ab2013-05-06 17:40:33 -05002779 struct rbd_device *rbd_dev;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002780 int result;
2781
2782 rbd_assert(!obj_request_img_data_test(obj_request));
2783
2784 /*
2785 * All we need from the object request is the original
2786 * request and the result of the STAT op. Grab those, then
2787 * we're done with the request.
2788 */
2789 orig_request = obj_request->obj_request;
2790 obj_request->obj_request = NULL;
Alex Elder912c3172013-05-13 20:35:38 -05002791 rbd_obj_request_put(orig_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002792 rbd_assert(orig_request);
2793 rbd_assert(orig_request->img_request);
2794
2795 result = obj_request->result;
2796 obj_request->result = 0;
2797
2798 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2799 obj_request, orig_request, result,
2800 obj_request->xferred, obj_request->length);
2801 rbd_obj_request_put(obj_request);
2802
Alex Elder638f5ab2013-05-06 17:40:33 -05002803 /*
2804 * If the overlap has become 0 (most likely because the
2805 * image has been flattened) we need to free the pages
2806 * and re-submit the original write request.
2807 */
2808 rbd_dev = orig_request->img_request->rbd_dev;
2809 if (!rbd_dev->parent_overlap) {
2810 struct ceph_osd_client *osdc;
2811
Alex Elder638f5ab2013-05-06 17:40:33 -05002812 osdc = &rbd_dev->rbd_client->client->osdc;
2813 result = rbd_obj_request_submit(osdc, orig_request);
2814 if (!result)
2815 return;
2816 }
Alex Elderc5b5ef62013-02-11 12:33:24 -06002817
2818 /*
2819 * Our only purpose here is to determine whether the object
2820 * exists, and we don't want to treat the non-existence as
2821 * an error. If something else comes back, transfer the
2822 * error to the original request and complete it now.
2823 */
2824 if (!result) {
2825 obj_request_existence_set(orig_request, true);
2826 } else if (result == -ENOENT) {
2827 obj_request_existence_set(orig_request, false);
2828 } else if (result) {
2829 orig_request->result = result;
Alex Elder3d7efd12013-04-19 15:34:50 -05002830 goto out;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002831 }
2832
2833 /*
2834 * Resubmit the original request now that we have recorded
2835 * whether the target object exists.
2836 */
Alex Elderb454e362013-04-19 15:34:50 -05002837 orig_request->result = rbd_img_obj_request_submit(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002838out:
Alex Elderc5b5ef62013-02-11 12:33:24 -06002839 if (orig_request->result)
2840 rbd_obj_request_complete(orig_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002841}
2842
2843static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2844{
2845 struct rbd_obj_request *stat_request;
2846 struct rbd_device *rbd_dev;
2847 struct ceph_osd_client *osdc;
2848 struct page **pages = NULL;
2849 u32 page_count;
2850 size_t size;
2851 int ret;
2852
2853 /*
2854 * The response data for a STAT call consists of:
2855 * le64 length;
2856 * struct {
2857 * le32 tv_sec;
2858 * le32 tv_nsec;
2859 * } mtime;
2860 */
2861 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2862 page_count = (u32)calc_pages_for(0, size);
2863 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2864 if (IS_ERR(pages))
2865 return PTR_ERR(pages);
2866
2867 ret = -ENOMEM;
2868 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2869 OBJ_REQUEST_PAGES);
2870 if (!stat_request)
2871 goto out;
2872
2873 rbd_obj_request_get(obj_request);
2874 stat_request->obj_request = obj_request;
2875 stat_request->pages = pages;
2876 stat_request->page_count = page_count;
2877
2878 rbd_assert(obj_request->img_request);
2879 rbd_dev = obj_request->img_request->rbd_dev;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002880 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02002881 stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002882 if (!stat_request->osd_req)
2883 goto out;
2884 stat_request->callback = rbd_img_obj_exists_callback;
2885
Yan, Zheng144cba12015-04-27 11:09:54 +08002886 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002887 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2888 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05002889 rbd_osd_req_format_read(stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002890
2891 osdc = &rbd_dev->rbd_client->client->osdc;
2892 ret = rbd_obj_request_submit(osdc, stat_request);
2893out:
2894 if (ret)
2895 rbd_obj_request_put(obj_request);
2896
2897 return ret;
2898}
2899
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002900static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
Alex Elderb454e362013-04-19 15:34:50 -05002901{
2902 struct rbd_img_request *img_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002903 struct rbd_device *rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002904
2905 rbd_assert(obj_request_img_data_test(obj_request));
2906
2907 img_request = obj_request->img_request;
2908 rbd_assert(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002909 rbd_dev = img_request->rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002910
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002911 /* Reads */
Josh Durgin1c220882014-04-04 17:49:12 -07002912 if (!img_request_write_test(img_request) &&
2913 !img_request_discard_test(img_request))
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002914 return true;
Alex Elderb454e362013-04-19 15:34:50 -05002915
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002916 /* Non-layered writes */
2917 if (!img_request_layered_test(img_request))
2918 return true;
2919
2920 /*
2921 * Layered writes outside of the parent overlap range don't
2922 * share any data with the parent.
2923 */
2924 if (!obj_request_overlaps_parent(obj_request))
2925 return true;
2926
2927 /*
Guangliang Zhaoc622d222014-04-01 22:22:15 +08002928 * Entire-object layered writes - we will overwrite whatever
2929 * parent data there is anyway.
2930 */
2931 if (!obj_request->offset &&
2932 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2933 return true;
2934
2935 /*
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002936 * If the object is known to already exist, its parent data has
2937 * already been copied.
2938 */
2939 if (obj_request_known_test(obj_request) &&
2940 obj_request_exists_test(obj_request))
2941 return true;
2942
2943 return false;
2944}
2945
2946static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2947{
2948 if (img_obj_request_simple(obj_request)) {
Alex Elderb454e362013-04-19 15:34:50 -05002949 struct rbd_device *rbd_dev;
2950 struct ceph_osd_client *osdc;
2951
2952 rbd_dev = obj_request->img_request->rbd_dev;
2953 osdc = &rbd_dev->rbd_client->client->osdc;
2954
2955 return rbd_obj_request_submit(osdc, obj_request);
2956 }
2957
2958 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002959 * It's a layered write. The target object might exist but
2960 * we may not know that yet. If we know it doesn't exist,
2961 * start by reading the data for the full target object from
2962 * the parent so we can use it for a copyup to the target.
Alex Elderb454e362013-04-19 15:34:50 -05002963 */
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002964 if (obj_request_known_test(obj_request))
Alex Elder3d7efd12013-04-19 15:34:50 -05002965 return rbd_img_obj_parent_read_full(obj_request);
2966
2967 /* We don't know whether the target exists. Go find out. */
Alex Elderb454e362013-04-19 15:34:50 -05002968
2969 return rbd_img_obj_exists_submit(obj_request);
2970}
2971
Alex Elderbf0d5f502012-11-22 00:00:08 -06002972static int rbd_img_request_submit(struct rbd_img_request *img_request)
2973{
Alex Elderbf0d5f502012-11-22 00:00:08 -06002974 struct rbd_obj_request *obj_request;
Alex Elder46faeed2013-04-10 17:47:46 -05002975 struct rbd_obj_request *next_obj_request;
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002976 int ret = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002977
Alex Elder37206ee2013-02-20 17:32:08 -06002978 dout("%s: img %p\n", __func__, img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002979
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002980 rbd_img_request_get(img_request);
2981 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
Alex Elderb454e362013-04-19 15:34:50 -05002982 ret = rbd_img_obj_request_submit(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002983 if (ret)
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002984 goto out_put_ireq;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002985 }
2986
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002987out_put_ireq:
2988 rbd_img_request_put(img_request);
2989 return ret;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002990}
2991
Alex Elder8b3e1a52013-01-24 16:13:36 -06002992static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2993{
2994 struct rbd_obj_request *obj_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002995 struct rbd_device *rbd_dev;
2996 u64 obj_end;
Alex Elder02c74fb2013-05-06 17:40:33 -05002997 u64 img_xferred;
2998 int img_result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002999
3000 rbd_assert(img_request_child_test(img_request));
3001
Alex Elder02c74fb2013-05-06 17:40:33 -05003002 /* First get what we need from the image request and release it */
3003
Alex Elder8b3e1a52013-01-24 16:13:36 -06003004 obj_request = img_request->obj_request;
Alex Elder02c74fb2013-05-06 17:40:33 -05003005 img_xferred = img_request->xferred;
3006 img_result = img_request->result;
3007 rbd_img_request_put(img_request);
3008
3009 /*
3010 * If the overlap has become 0 (most likely because the
3011 * image has been flattened) we need to re-submit the
3012 * original request.
3013 */
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003014 rbd_assert(obj_request);
3015 rbd_assert(obj_request->img_request);
Alex Elder02c74fb2013-05-06 17:40:33 -05003016 rbd_dev = obj_request->img_request->rbd_dev;
3017 if (!rbd_dev->parent_overlap) {
3018 struct ceph_osd_client *osdc;
Alex Elder8b3e1a52013-01-24 16:13:36 -06003019
Alex Elder02c74fb2013-05-06 17:40:33 -05003020 osdc = &rbd_dev->rbd_client->client->osdc;
3021 img_result = rbd_obj_request_submit(osdc, obj_request);
3022 if (!img_result)
3023 return;
3024 }
3025
3026 obj_request->result = img_result;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003027 if (obj_request->result)
3028 goto out;
3029
3030 /*
3031 * We need to zero anything beyond the parent overlap
3032 * boundary. Since rbd_img_obj_request_read_callback()
3033 * will zero anything beyond the end of a short read, an
3034 * easy way to do this is to pretend the data from the
3035 * parent came up short--ending at the overlap boundary.
3036 */
3037 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3038 obj_end = obj_request->img_offset + obj_request->length;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003039 if (obj_end > rbd_dev->parent_overlap) {
3040 u64 xferred = 0;
3041
3042 if (obj_request->img_offset < rbd_dev->parent_overlap)
3043 xferred = rbd_dev->parent_overlap -
3044 obj_request->img_offset;
3045
Alex Elder02c74fb2013-05-06 17:40:33 -05003046 obj_request->xferred = min(img_xferred, xferred);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003047 } else {
Alex Elder02c74fb2013-05-06 17:40:33 -05003048 obj_request->xferred = img_xferred;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003049 }
3050out:
Alex Elder8b3e1a52013-01-24 16:13:36 -06003051 rbd_img_obj_request_read_callback(obj_request);
3052 rbd_obj_request_complete(obj_request);
3053}
3054
3055static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3056{
Alex Elder8b3e1a52013-01-24 16:13:36 -06003057 struct rbd_img_request *img_request;
3058 int result;
3059
3060 rbd_assert(obj_request_img_data_test(obj_request));
3061 rbd_assert(obj_request->img_request != NULL);
3062 rbd_assert(obj_request->result == (s32) -ENOENT);
Alex Elder5b2ab722013-05-06 17:40:33 -05003063 rbd_assert(obj_request_type_valid(obj_request->type));
Alex Elder8b3e1a52013-01-24 16:13:36 -06003064
Alex Elder8b3e1a52013-01-24 16:13:36 -06003065 /* rbd_read_finish(obj_request, obj_request->length); */
Alex Eldere93f3152013-05-08 22:50:04 -05003066 img_request = rbd_parent_request_create(obj_request,
Alex Elder8b3e1a52013-01-24 16:13:36 -06003067 obj_request->img_offset,
Alex Eldere93f3152013-05-08 22:50:04 -05003068 obj_request->length);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003069 result = -ENOMEM;
3070 if (!img_request)
3071 goto out_err;
3072
Alex Elder5b2ab722013-05-06 17:40:33 -05003073 if (obj_request->type == OBJ_REQUEST_BIO)
3074 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3075 obj_request->bio_list);
3076 else
3077 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3078 obj_request->pages);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003079 if (result)
3080 goto out_err;
3081
3082 img_request->callback = rbd_img_parent_read_callback;
3083 result = rbd_img_request_submit(img_request);
3084 if (result)
3085 goto out_err;
3086
3087 return;
3088out_err:
3089 if (img_request)
3090 rbd_img_request_put(img_request);
3091 obj_request->result = result;
3092 obj_request->xferred = 0;
3093 obj_request_done_set(obj_request);
3094}
3095
Josh Durgin20e0af62013-08-29 17:36:03 -07003096static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
Alex Elderb8d70032012-11-30 17:53:04 -06003097{
3098 struct rbd_obj_request *obj_request;
Alex Elder21692382013-04-05 01:27:12 -05003099 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elderb8d70032012-11-30 17:53:04 -06003100 int ret;
3101
3102 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3103 OBJ_REQUEST_NODATA);
3104 if (!obj_request)
3105 return -ENOMEM;
3106
3107 ret = -ENOMEM;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003108 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003109 obj_request);
Alex Elderb8d70032012-11-30 17:53:04 -06003110 if (!obj_request->osd_req)
3111 goto out;
3112
Alex Elderc99d2d42013-04-05 01:27:11 -05003113 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003114 notify_id, 0, 0);
Alex Elder9d4df012013-04-19 15:34:50 -05003115 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003116
Alex Elderb8d70032012-11-30 17:53:04 -06003117 ret = rbd_obj_request_submit(osdc, obj_request);
Alex Eldercf81b602013-01-17 12:18:46 -06003118 if (ret)
Josh Durgin20e0af62013-08-29 17:36:03 -07003119 goto out;
3120 ret = rbd_obj_request_wait(obj_request);
3121out:
3122 rbd_obj_request_put(obj_request);
Alex Elderb8d70032012-11-30 17:53:04 -06003123
3124 return ret;
3125}
3126
3127static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3128{
3129 struct rbd_device *rbd_dev = (struct rbd_device *)data;
Alex Eldere627db02013-05-06 07:40:30 -05003130 int ret;
Alex Elderb8d70032012-11-30 17:53:04 -06003131
Alex Elder37206ee2013-02-20 17:32:08 -06003132 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003133 rbd_dev->header_name, (unsigned long long)notify_id,
3134 (unsigned int)opcode);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003135
3136 /*
3137 * Until adequate refresh error handling is in place, there is
3138 * not much we can do here, except warn.
3139 *
3140 * See http://tracker.ceph.com/issues/5040
3141 */
Alex Eldere627db02013-05-06 07:40:30 -05003142 ret = rbd_dev_refresh(rbd_dev);
3143 if (ret)
Ilya Dryomov9584d502014-07-11 12:11:20 +04003144 rbd_warn(rbd_dev, "refresh failed: %d", ret);
Alex Elderb8d70032012-11-30 17:53:04 -06003145
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003146 ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
3147 if (ret)
Ilya Dryomov9584d502014-07-11 12:11:20 +04003148 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
Alex Elderb8d70032012-11-30 17:53:04 -06003149}
3150
Alex Elder9969ebc2013-01-18 12:31:10 -06003151/*
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003152 * Send a (un)watch request and wait for the ack. Return a request
3153 * with a ref held on success or error.
3154 */
3155static struct rbd_obj_request *rbd_obj_watch_request_helper(
3156 struct rbd_device *rbd_dev,
3157 bool watch)
3158{
3159 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03003160 struct ceph_options *opts = osdc->client->options;
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003161 struct rbd_obj_request *obj_request;
3162 int ret;
3163
3164 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3165 OBJ_REQUEST_NODATA);
3166 if (!obj_request)
3167 return ERR_PTR(-ENOMEM);
3168
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003169 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003170 obj_request);
3171 if (!obj_request->osd_req) {
3172 ret = -ENOMEM;
3173 goto out;
3174 }
3175
3176 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3177 rbd_dev->watch_event->cookie, 0, watch);
3178 rbd_osd_req_format_write(obj_request);
3179
3180 if (watch)
3181 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3182
3183 ret = rbd_obj_request_submit(osdc, obj_request);
3184 if (ret)
3185 goto out;
3186
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03003187 ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003188 if (ret)
3189 goto out;
3190
3191 ret = obj_request->result;
3192 if (ret) {
3193 if (watch)
3194 rbd_obj_request_end(obj_request);
3195 goto out;
3196 }
3197
3198 return obj_request;
3199
3200out:
3201 rbd_obj_request_put(obj_request);
3202 return ERR_PTR(ret);
3203}
3204
3205/*
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003206 * Initiate a watch request, synchronously.
Alex Elder9969ebc2013-01-18 12:31:10 -06003207 */
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003208static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
Alex Elder9969ebc2013-01-18 12:31:10 -06003209{
3210 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3211 struct rbd_obj_request *obj_request;
Alex Elder9969ebc2013-01-18 12:31:10 -06003212 int ret;
3213
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003214 rbd_assert(!rbd_dev->watch_event);
3215 rbd_assert(!rbd_dev->watch_request);
Alex Elder9969ebc2013-01-18 12:31:10 -06003216
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003217 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3218 &rbd_dev->watch_event);
3219 if (ret < 0)
3220 return ret;
Alex Elder9969ebc2013-01-18 12:31:10 -06003221
Ilya Dryomov76756a52014-06-20 18:29:20 +04003222 obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
3223 if (IS_ERR(obj_request)) {
3224 ceph_osdc_cancel_event(rbd_dev->watch_event);
3225 rbd_dev->watch_event = NULL;
3226 return PTR_ERR(obj_request);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003227 }
Alex Elder9969ebc2013-01-18 12:31:10 -06003228
Alex Elder8eb87562013-01-25 17:08:55 -06003229 /*
3230 * A watch request is set to linger, so the underlying osd
3231 * request won't go away until we unregister it. We retain
3232 * a pointer to the object request during that time (in
Ilya Dryomov76756a52014-06-20 18:29:20 +04003233 * rbd_dev->watch_request), so we'll keep a reference to it.
3234 * We'll drop that reference after we've unregistered it in
3235 * rbd_dev_header_unwatch_sync().
Alex Elder8eb87562013-01-25 17:08:55 -06003236 */
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003237 rbd_dev->watch_request = obj_request;
Alex Elder8eb87562013-01-25 17:08:55 -06003238
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003239 return 0;
Alex Elder9969ebc2013-01-18 12:31:10 -06003240}
3241
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003242/*
3243 * Tear down a watch request, synchronously.
3244 */
Ilya Dryomov76756a52014-06-20 18:29:20 +04003245static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
Ilya Dryomovfca27062013-12-16 18:02:40 +02003246{
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003247 struct rbd_obj_request *obj_request;
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003248
3249 rbd_assert(rbd_dev->watch_event);
3250 rbd_assert(rbd_dev->watch_request);
3251
Ilya Dryomov76756a52014-06-20 18:29:20 +04003252 rbd_obj_request_end(rbd_dev->watch_request);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003253 rbd_obj_request_put(rbd_dev->watch_request);
3254 rbd_dev->watch_request = NULL;
3255
Ilya Dryomov76756a52014-06-20 18:29:20 +04003256 obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
3257 if (!IS_ERR(obj_request))
3258 rbd_obj_request_put(obj_request);
3259 else
3260 rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
3261 PTR_ERR(obj_request));
3262
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003263 ceph_osdc_cancel_event(rbd_dev->watch_event);
3264 rbd_dev->watch_event = NULL;
Ilya Dryomov811c6682016-04-15 16:22:16 +02003265
3266 dout("%s flushing notifies\n", __func__);
3267 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
Ilya Dryomovfca27062013-12-16 18:02:40 +02003268}
3269
Alex Elder36be9a72013-01-19 00:30:28 -06003270/*
Alex Elderf40eb342013-04-25 15:09:42 -05003271 * Synchronous osd object method call. Returns the number of bytes
3272 * returned in the outbound buffer, or a negative error code.
Alex Elder36be9a72013-01-19 00:30:28 -06003273 */
3274static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3275 const char *object_name,
3276 const char *class_name,
3277 const char *method_name,
Alex Elder41579762013-04-21 12:14:45 -05003278 const void *outbound,
Alex Elder36be9a72013-01-19 00:30:28 -06003279 size_t outbound_size,
Alex Elder41579762013-04-21 12:14:45 -05003280 void *inbound,
Alex Eldere2a58ee2013-04-30 00:44:33 -05003281 size_t inbound_size)
Alex Elder36be9a72013-01-19 00:30:28 -06003282{
Alex Elder21692382013-04-05 01:27:12 -05003283 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder36be9a72013-01-19 00:30:28 -06003284 struct rbd_obj_request *obj_request;
Alex Elder36be9a72013-01-19 00:30:28 -06003285 struct page **pages;
3286 u32 page_count;
3287 int ret;
3288
3289 /*
Alex Elder6010a452013-04-05 01:27:11 -05003290 * Method calls are ultimately read operations. The result
3291 * should placed into the inbound buffer provided. They
3292 * also supply outbound data--parameters for the object
3293 * method. Currently if this is present it will be a
3294 * snapshot id.
Alex Elder36be9a72013-01-19 00:30:28 -06003295 */
Alex Elder57385b52013-04-21 12:14:45 -05003296 page_count = (u32)calc_pages_for(0, inbound_size);
Alex Elder36be9a72013-01-19 00:30:28 -06003297 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3298 if (IS_ERR(pages))
3299 return PTR_ERR(pages);
3300
3301 ret = -ENOMEM;
Alex Elder6010a452013-04-05 01:27:11 -05003302 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
Alex Elder36be9a72013-01-19 00:30:28 -06003303 OBJ_REQUEST_PAGES);
3304 if (!obj_request)
3305 goto out;
3306
3307 obj_request->pages = pages;
3308 obj_request->page_count = page_count;
3309
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003310 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003311 obj_request);
Alex Elder36be9a72013-01-19 00:30:28 -06003312 if (!obj_request->osd_req)
3313 goto out;
3314
Alex Elderc99d2d42013-04-05 01:27:11 -05003315 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
Alex Elder04017e22013-04-05 14:46:02 -05003316 class_name, method_name);
3317 if (outbound_size) {
3318 struct ceph_pagelist *pagelist;
3319
3320 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3321 if (!pagelist)
3322 goto out;
3323
3324 ceph_pagelist_init(pagelist);
3325 ceph_pagelist_append(pagelist, outbound, outbound_size);
3326 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3327 pagelist);
3328 }
Alex Eldera4ce40a2013-04-05 01:27:12 -05003329 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3330 obj_request->pages, inbound_size,
Alex Elder44cd1882013-04-05 01:27:12 -05003331 0, false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05003332 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003333
Alex Elder36be9a72013-01-19 00:30:28 -06003334 ret = rbd_obj_request_submit(osdc, obj_request);
3335 if (ret)
3336 goto out;
3337 ret = rbd_obj_request_wait(obj_request);
3338 if (ret)
3339 goto out;
3340
3341 ret = obj_request->result;
3342 if (ret < 0)
3343 goto out;
Alex Elder57385b52013-04-21 12:14:45 -05003344
3345 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3346 ret = (int)obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06003347 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
Alex Elder36be9a72013-01-19 00:30:28 -06003348out:
3349 if (obj_request)
3350 rbd_obj_request_put(obj_request);
3351 else
3352 ceph_release_page_vector(pages, page_count);
3353
3354 return ret;
3355}
3356
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003357static void rbd_queue_workfn(struct work_struct *work)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003358{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003359 struct request *rq = blk_mq_rq_from_pdu(work);
3360 struct rbd_device *rbd_dev = rq->q->queuedata;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003361 struct rbd_img_request *img_request;
Josh Durgin4e752f02014-04-08 11:12:11 -07003362 struct ceph_snap_context *snapc = NULL;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003363 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3364 u64 length = blk_rq_bytes(rq);
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003365 enum obj_operation_type op_type;
Josh Durgin4e752f02014-04-08 11:12:11 -07003366 u64 mapping_size;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003367 int result;
3368
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003369 if (rq->cmd_type != REQ_TYPE_FS) {
3370 dout("%s: non-fs request type %d\n", __func__,
3371 (int) rq->cmd_type);
3372 result = -EIO;
3373 goto err;
3374 }
3375
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003376 if (rq->cmd_flags & REQ_DISCARD)
3377 op_type = OBJ_OP_DISCARD;
3378 else if (rq->cmd_flags & REQ_WRITE)
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003379 op_type = OBJ_OP_WRITE;
3380 else
3381 op_type = OBJ_OP_READ;
3382
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003383 /* Ignore/skip any zero-length requests */
3384
3385 if (!length) {
3386 dout("%s: zero-length request\n", __func__);
3387 result = 0;
3388 goto err_rq;
3389 }
3390
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003391 /* Only reads are allowed to a read-only device */
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003392
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003393 if (op_type != OBJ_OP_READ) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003394 if (rbd_dev->mapping.read_only) {
3395 result = -EROFS;
3396 goto err_rq;
3397 }
3398 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3399 }
3400
3401 /*
3402 * Quit early if the mapped snapshot no longer exists. It's
3403 * still possible the snapshot will have disappeared by the
3404 * time our request arrives at the osd, but there's no sense in
3405 * sending it if we already know.
3406 */
3407 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3408 dout("request for non-existent snapshot");
3409 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3410 result = -ENXIO;
3411 goto err_rq;
3412 }
3413
3414 if (offset && length > U64_MAX - offset + 1) {
3415 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3416 length);
3417 result = -EINVAL;
3418 goto err_rq; /* Shouldn't happen */
3419 }
3420
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003421 blk_mq_start_request(rq);
3422
Josh Durgin4e752f02014-04-08 11:12:11 -07003423 down_read(&rbd_dev->header_rwsem);
3424 mapping_size = rbd_dev->mapping.size;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003425 if (op_type != OBJ_OP_READ) {
Josh Durgin4e752f02014-04-08 11:12:11 -07003426 snapc = rbd_dev->header.snapc;
3427 ceph_get_snap_context(snapc);
3428 }
3429 up_read(&rbd_dev->header_rwsem);
3430
3431 if (offset + length > mapping_size) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003432 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
Josh Durgin4e752f02014-04-08 11:12:11 -07003433 length, mapping_size);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003434 result = -EIO;
3435 goto err_rq;
3436 }
3437
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003438 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07003439 snapc);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003440 if (!img_request) {
3441 result = -ENOMEM;
3442 goto err_rq;
3443 }
3444 img_request->rq = rq;
Ilya Dryomov70b16db2015-11-27 19:23:24 +01003445 snapc = NULL; /* img_request consumes a ref */
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003446
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003447 if (op_type == OBJ_OP_DISCARD)
3448 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3449 NULL);
3450 else
3451 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3452 rq->bio);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003453 if (result)
3454 goto err_img_request;
3455
3456 result = rbd_img_request_submit(img_request);
3457 if (result)
3458 goto err_img_request;
3459
3460 return;
3461
3462err_img_request:
3463 rbd_img_request_put(img_request);
3464err_rq:
3465 if (result)
3466 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003467 obj_op_name(op_type), length, offset, result);
SF Markus Elfringe96a6502014-11-02 15:20:59 +01003468 ceph_put_snap_context(snapc);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003469err:
3470 blk_mq_end_request(rq, result);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003471}
3472
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003473static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3474 const struct blk_mq_queue_data *bd)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003475{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003476 struct request *rq = bd->rq;
3477 struct work_struct *work = blk_mq_rq_to_pdu(rq);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003478
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003479 queue_work(rbd_wq, work);
3480 return BLK_MQ_RQ_QUEUE_OK;
Alex Elderbf0d5f502012-11-22 00:00:08 -06003481}
3482
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003483static void rbd_free_disk(struct rbd_device *rbd_dev)
3484{
3485 struct gendisk *disk = rbd_dev->disk;
3486
3487 if (!disk)
3488 return;
3489
Alex Eldera0cab922013-04-25 23:15:08 -05003490 rbd_dev->disk = NULL;
3491 if (disk->flags & GENHD_FL_UP) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003492 del_gendisk(disk);
Alex Eldera0cab922013-04-25 23:15:08 -05003493 if (disk->queue)
3494 blk_cleanup_queue(disk->queue);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003495 blk_mq_free_tag_set(&rbd_dev->tag_set);
Alex Eldera0cab922013-04-25 23:15:08 -05003496 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003497 put_disk(disk);
3498}
3499
Alex Elder788e2df2013-01-17 12:25:27 -06003500static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3501 const char *object_name,
Alex Elder7097f8d2013-04-30 00:44:33 -05003502 u64 offset, u64 length, void *buf)
Alex Elder788e2df2013-01-17 12:25:27 -06003503
3504{
Alex Elder21692382013-04-05 01:27:12 -05003505 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder788e2df2013-01-17 12:25:27 -06003506 struct rbd_obj_request *obj_request;
Alex Elder788e2df2013-01-17 12:25:27 -06003507 struct page **pages = NULL;
3508 u32 page_count;
Alex Elder1ceae7e2013-02-06 13:11:38 -06003509 size_t size;
Alex Elder788e2df2013-01-17 12:25:27 -06003510 int ret;
3511
3512 page_count = (u32) calc_pages_for(offset, length);
3513 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3514 if (IS_ERR(pages))
Jan Karaa8d42052014-10-22 09:17:24 +02003515 return PTR_ERR(pages);
Alex Elder788e2df2013-01-17 12:25:27 -06003516
3517 ret = -ENOMEM;
3518 obj_request = rbd_obj_request_create(object_name, offset, length,
Alex Elder36be9a72013-01-19 00:30:28 -06003519 OBJ_REQUEST_PAGES);
Alex Elder788e2df2013-01-17 12:25:27 -06003520 if (!obj_request)
3521 goto out;
3522
3523 obj_request->pages = pages;
3524 obj_request->page_count = page_count;
3525
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003526 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003527 obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06003528 if (!obj_request->osd_req)
3529 goto out;
3530
Alex Elderc99d2d42013-04-05 01:27:11 -05003531 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3532 offset, length, 0, 0);
Alex Elder406e2c92013-04-15 14:50:36 -05003533 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
Alex Eldera4ce40a2013-04-05 01:27:12 -05003534 obj_request->pages,
Alex Elder44cd1882013-04-05 01:27:12 -05003535 obj_request->length,
3536 obj_request->offset & ~PAGE_MASK,
3537 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05003538 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003539
Alex Elder788e2df2013-01-17 12:25:27 -06003540 ret = rbd_obj_request_submit(osdc, obj_request);
3541 if (ret)
3542 goto out;
3543 ret = rbd_obj_request_wait(obj_request);
3544 if (ret)
3545 goto out;
3546
3547 ret = obj_request->result;
3548 if (ret < 0)
3549 goto out;
Alex Elder1ceae7e2013-02-06 13:11:38 -06003550
3551 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3552 size = (size_t) obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06003553 ceph_copy_from_page_vector(pages, buf, 0, size);
Alex Elder7097f8d2013-04-30 00:44:33 -05003554 rbd_assert(size <= (size_t)INT_MAX);
3555 ret = (int)size;
Alex Elder788e2df2013-01-17 12:25:27 -06003556out:
3557 if (obj_request)
3558 rbd_obj_request_put(obj_request);
3559 else
3560 ceph_release_page_vector(pages, page_count);
3561
3562 return ret;
3563}
3564
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003565/*
Alex Elder662518b2013-05-06 09:51:29 -05003566 * Read the complete header for the given rbd device. On successful
3567 * return, the rbd_dev->header field will contain up-to-date
3568 * information about the image.
Alex Elder4156d992012-08-02 11:29:46 -05003569 */
Alex Elder99a41eb2013-05-06 09:51:30 -05003570static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
Alex Elder4156d992012-08-02 11:29:46 -05003571{
3572 struct rbd_image_header_ondisk *ondisk = NULL;
3573 u32 snap_count = 0;
3574 u64 names_size = 0;
3575 u32 want_count;
3576 int ret;
3577
3578 /*
3579 * The complete header will include an array of its 64-bit
3580 * snapshot ids, followed by the names of those snapshots as
3581 * a contiguous block of NUL-terminated strings. Note that
3582 * the number of snapshots could change by the time we read
3583 * it in, in which case we re-read it.
3584 */
3585 do {
3586 size_t size;
3587
3588 kfree(ondisk);
3589
3590 size = sizeof (*ondisk);
3591 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3592 size += names_size;
3593 ondisk = kmalloc(size, GFP_KERNEL);
3594 if (!ondisk)
Alex Elder662518b2013-05-06 09:51:29 -05003595 return -ENOMEM;
Alex Elder4156d992012-08-02 11:29:46 -05003596
Alex Elder788e2df2013-01-17 12:25:27 -06003597 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
Alex Elder7097f8d2013-04-30 00:44:33 -05003598 0, size, ondisk);
Alex Elder4156d992012-08-02 11:29:46 -05003599 if (ret < 0)
Alex Elder662518b2013-05-06 09:51:29 -05003600 goto out;
Alex Elderc0cd10db2013-04-26 09:43:47 -05003601 if ((size_t)ret < size) {
Alex Elder4156d992012-08-02 11:29:46 -05003602 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003603 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3604 size, ret);
Alex Elder662518b2013-05-06 09:51:29 -05003605 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05003606 }
3607 if (!rbd_dev_ondisk_valid(ondisk)) {
3608 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003609 rbd_warn(rbd_dev, "invalid header");
Alex Elder662518b2013-05-06 09:51:29 -05003610 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05003611 }
3612
3613 names_size = le64_to_cpu(ondisk->snap_names_len);
3614 want_count = snap_count;
3615 snap_count = le32_to_cpu(ondisk->snap_count);
3616 } while (snap_count != want_count);
3617
Alex Elder662518b2013-05-06 09:51:29 -05003618 ret = rbd_header_from_disk(rbd_dev, ondisk);
3619out:
Alex Elder4156d992012-08-02 11:29:46 -05003620 kfree(ondisk);
3621
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003622 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003623}
3624
Alex Elder15228ed2013-05-01 12:43:03 -05003625/*
3626 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3627 * has disappeared from the (just updated) snapshot context.
3628 */
3629static void rbd_exists_validate(struct rbd_device *rbd_dev)
3630{
3631 u64 snap_id;
3632
3633 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3634 return;
3635
3636 snap_id = rbd_dev->spec->snap_id;
3637 if (snap_id == CEPH_NOSNAP)
3638 return;
3639
3640 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3641 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3642}
3643
Josh Durgin98752012013-08-29 17:26:31 -07003644static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3645{
3646 sector_t size;
Josh Durgin98752012013-08-29 17:26:31 -07003647
3648 /*
Ilya Dryomov811c6682016-04-15 16:22:16 +02003649 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3650 * try to update its size. If REMOVING is set, updating size
3651 * is just useless work since the device can't be opened.
Josh Durgin98752012013-08-29 17:26:31 -07003652 */
Ilya Dryomov811c6682016-04-15 16:22:16 +02003653 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3654 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
Josh Durgin98752012013-08-29 17:26:31 -07003655 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3656 dout("setting size to %llu sectors", (unsigned long long)size);
3657 set_capacity(rbd_dev->disk, size);
3658 revalidate_disk(rbd_dev->disk);
3659 }
3660}
3661
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003662static int rbd_dev_refresh(struct rbd_device *rbd_dev)
Alex Elder1fe5e992012-07-25 09:32:41 -05003663{
Alex Eldere627db02013-05-06 07:40:30 -05003664 u64 mapping_size;
Alex Elder1fe5e992012-07-25 09:32:41 -05003665 int ret;
3666
Alex Eldercfbf6372013-05-31 17:40:45 -05003667 down_write(&rbd_dev->header_rwsem);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05003668 mapping_size = rbd_dev->mapping.size;
Ilya Dryomova720ae02014-07-23 17:11:19 +04003669
3670 ret = rbd_dev_header_info(rbd_dev);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003671 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003672 goto out;
Alex Elder15228ed2013-05-01 12:43:03 -05003673
Ilya Dryomove8f59b52014-07-24 10:42:13 +04003674 /*
3675 * If there is a parent, see if it has disappeared due to the
3676 * mapped image getting flattened.
3677 */
3678 if (rbd_dev->parent) {
3679 ret = rbd_dev_v2_parent_info(rbd_dev);
3680 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003681 goto out;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04003682 }
3683
Ilya Dryomov5ff11082014-07-23 17:11:21 +04003684 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003685 rbd_dev->mapping.size = rbd_dev->header.image_size;
Ilya Dryomov5ff11082014-07-23 17:11:21 +04003686 } else {
3687 /* validate mapped snapshot's EXISTS flag */
3688 rbd_exists_validate(rbd_dev);
3689 }
Alex Elder15228ed2013-05-01 12:43:03 -05003690
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003691out:
Alex Eldercfbf6372013-05-31 17:40:45 -05003692 up_write(&rbd_dev->header_rwsem);
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003693 if (!ret && mapping_size != rbd_dev->mapping.size)
Josh Durgin98752012013-08-29 17:26:31 -07003694 rbd_dev_update_size(rbd_dev);
Alex Elder1fe5e992012-07-25 09:32:41 -05003695
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003696 return ret;
Alex Elder1fe5e992012-07-25 09:32:41 -05003697}
3698
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003699static int rbd_init_request(void *data, struct request *rq,
3700 unsigned int hctx_idx, unsigned int request_idx,
3701 unsigned int numa_node)
3702{
3703 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3704
3705 INIT_WORK(work, rbd_queue_workfn);
3706 return 0;
3707}
3708
3709static struct blk_mq_ops rbd_mq_ops = {
3710 .queue_rq = rbd_queue_rq,
3711 .map_queue = blk_mq_map_queue,
3712 .init_request = rbd_init_request,
3713};
3714
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003715static int rbd_init_disk(struct rbd_device *rbd_dev)
3716{
3717 struct gendisk *disk;
3718 struct request_queue *q;
Alex Elder593a9e72012-02-07 12:03:37 -06003719 u64 segment_size;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003720 int err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003721
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003722 /* create gendisk info */
Ilya Dryomov7e513d42013-12-16 19:26:32 +02003723 disk = alloc_disk(single_major ?
3724 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3725 RBD_MINORS_PER_MAJOR);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003726 if (!disk)
Alex Elder1fcdb8a2012-08-29 17:11:06 -05003727 return -ENOMEM;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003728
Alex Elderf0f8cef2012-01-29 13:57:44 -06003729 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
Alex Elderde71a292012-07-03 16:01:19 -05003730 rbd_dev->dev_id);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003731 disk->major = rbd_dev->major;
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003732 disk->first_minor = rbd_dev->minor;
Ilya Dryomov7e513d42013-12-16 19:26:32 +02003733 if (single_major)
3734 disk->flags |= GENHD_FL_EXT_DEVT;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003735 disk->fops = &rbd_bd_ops;
3736 disk->private_data = rbd_dev;
3737
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003738 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3739 rbd_dev->tag_set.ops = &rbd_mq_ops;
Ilya Dryomovb5584182015-06-23 16:21:19 +03003740 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003741 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
Ilya Dryomovb5584182015-06-23 16:21:19 +03003742 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003743 rbd_dev->tag_set.nr_hw_queues = 1;
3744 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3745
3746 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3747 if (err)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003748 goto out_disk;
Josh Durgin029bcbd2011-07-22 11:35:23 -07003749
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003750 q = blk_mq_init_queue(&rbd_dev->tag_set);
3751 if (IS_ERR(q)) {
3752 err = PTR_ERR(q);
3753 goto out_tag_set;
3754 }
3755
Ilya Dryomovd8a2c892015-03-24 16:15:17 +03003756 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
3757 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
Alex Elder593a9e72012-02-07 12:03:37 -06003758
Josh Durgin029bcbd2011-07-22 11:35:23 -07003759 /* set io sizes to object size */
Alex Elder593a9e72012-02-07 12:03:37 -06003760 segment_size = rbd_obj_bytes(&rbd_dev->header);
3761 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
Ilya Dryomov0d9fde42015-10-07 16:09:35 +02003762 q->limits.max_sectors = queue_max_hw_sectors(q);
Ilya Dryomovd3834fe2015-06-12 19:19:02 +03003763 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
Alex Elder593a9e72012-02-07 12:03:37 -06003764 blk_queue_max_segment_size(q, segment_size);
3765 blk_queue_io_min(q, segment_size);
3766 blk_queue_io_opt(q, segment_size);
Josh Durgin029bcbd2011-07-22 11:35:23 -07003767
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003768 /* enable the discard support */
3769 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3770 q->limits.discard_granularity = segment_size;
3771 q->limits.discard_alignment = segment_size;
Jens Axboe2bb4cd52015-07-14 08:15:12 -06003772 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
Josh Durginb76f8232014-04-07 16:52:03 -07003773 q->limits.discard_zeroes_data = 1;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003774
Ronny Hegewaldbae818e2015-10-15 18:50:46 +00003775 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
3776 q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
3777
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003778 disk->queue = q;
3779
3780 q->queuedata = rbd_dev;
3781
3782 rbd_dev->disk = disk;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003783
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003784 return 0;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003785out_tag_set:
3786 blk_mq_free_tag_set(&rbd_dev->tag_set);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003787out_disk:
3788 put_disk(disk);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003789 return err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003790}
3791
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003792/*
3793 sysfs
3794*/
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003795
Alex Elder593a9e72012-02-07 12:03:37 -06003796static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3797{
3798 return container_of(dev, struct rbd_device, dev);
3799}
3800
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003801static ssize_t rbd_size_show(struct device *dev,
3802 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003803{
Alex Elder593a9e72012-02-07 12:03:37 -06003804 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003805
Alex Elderfc71d832013-04-26 15:44:36 -05003806 return sprintf(buf, "%llu\n",
3807 (unsigned long long)rbd_dev->mapping.size);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003808}
3809
Alex Elder34b13182012-07-13 20:35:12 -05003810/*
3811 * Note this shows the features for whatever's mapped, which is not
3812 * necessarily the base image.
3813 */
3814static ssize_t rbd_features_show(struct device *dev,
3815 struct device_attribute *attr, char *buf)
3816{
3817 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3818
3819 return sprintf(buf, "0x%016llx\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003820 (unsigned long long)rbd_dev->mapping.features);
Alex Elder34b13182012-07-13 20:35:12 -05003821}
3822
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003823static ssize_t rbd_major_show(struct device *dev,
3824 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003825{
Alex Elder593a9e72012-02-07 12:03:37 -06003826 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003827
Alex Elderfc71d832013-04-26 15:44:36 -05003828 if (rbd_dev->major)
3829 return sprintf(buf, "%d\n", rbd_dev->major);
3830
3831 return sprintf(buf, "(none)\n");
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003832}
Alex Elderfc71d832013-04-26 15:44:36 -05003833
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003834static ssize_t rbd_minor_show(struct device *dev,
3835 struct device_attribute *attr, char *buf)
3836{
3837 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3838
3839 return sprintf(buf, "%d\n", rbd_dev->minor);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003840}
3841
3842static ssize_t rbd_client_id_show(struct device *dev,
3843 struct device_attribute *attr, char *buf)
3844{
Alex Elder593a9e72012-02-07 12:03:37 -06003845 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003846
Alex Elder1dbb4392012-01-24 10:08:37 -06003847 return sprintf(buf, "client%lld\n",
3848 ceph_client_id(rbd_dev->rbd_client->client));
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003849}
3850
3851static ssize_t rbd_pool_show(struct device *dev,
3852 struct device_attribute *attr, char *buf)
3853{
Alex Elder593a9e72012-02-07 12:03:37 -06003854 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003855
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003856 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003857}
3858
Alex Elder9bb2f332012-07-12 10:46:35 -05003859static ssize_t rbd_pool_id_show(struct device *dev,
3860 struct device_attribute *attr, char *buf)
3861{
3862 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3863
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003864 return sprintf(buf, "%llu\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003865 (unsigned long long) rbd_dev->spec->pool_id);
Alex Elder9bb2f332012-07-12 10:46:35 -05003866}
3867
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003868static ssize_t rbd_name_show(struct device *dev,
3869 struct device_attribute *attr, char *buf)
3870{
Alex Elder593a9e72012-02-07 12:03:37 -06003871 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003872
Alex Eldera92ffdf2012-10-30 19:40:33 -05003873 if (rbd_dev->spec->image_name)
3874 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3875
3876 return sprintf(buf, "(unknown)\n");
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003877}
3878
Alex Elder589d30e2012-07-10 20:30:11 -05003879static ssize_t rbd_image_id_show(struct device *dev,
3880 struct device_attribute *attr, char *buf)
3881{
3882 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3883
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003884 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05003885}
3886
Alex Elder34b13182012-07-13 20:35:12 -05003887/*
3888 * Shows the name of the currently-mapped snapshot (or
3889 * RBD_SNAP_HEAD_NAME for the base image).
3890 */
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003891static ssize_t rbd_snap_show(struct device *dev,
3892 struct device_attribute *attr,
3893 char *buf)
3894{
Alex Elder593a9e72012-02-07 12:03:37 -06003895 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003896
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003897 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003898}
3899
Alex Elder86b00e02012-10-25 23:34:42 -05003900/*
Ilya Dryomovff961282014-07-22 21:53:07 +04003901 * For a v2 image, shows the chain of parent images, separated by empty
3902 * lines. For v1 images or if there is no parent, shows "(no parent
3903 * image)".
Alex Elder86b00e02012-10-25 23:34:42 -05003904 */
3905static ssize_t rbd_parent_show(struct device *dev,
Ilya Dryomovff961282014-07-22 21:53:07 +04003906 struct device_attribute *attr,
3907 char *buf)
Alex Elder86b00e02012-10-25 23:34:42 -05003908{
3909 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Ilya Dryomovff961282014-07-22 21:53:07 +04003910 ssize_t count = 0;
Alex Elder86b00e02012-10-25 23:34:42 -05003911
Ilya Dryomovff961282014-07-22 21:53:07 +04003912 if (!rbd_dev->parent)
Alex Elder86b00e02012-10-25 23:34:42 -05003913 return sprintf(buf, "(no parent image)\n");
3914
Ilya Dryomovff961282014-07-22 21:53:07 +04003915 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3916 struct rbd_spec *spec = rbd_dev->parent_spec;
Alex Elder86b00e02012-10-25 23:34:42 -05003917
Ilya Dryomovff961282014-07-22 21:53:07 +04003918 count += sprintf(&buf[count], "%s"
3919 "pool_id %llu\npool_name %s\n"
3920 "image_id %s\nimage_name %s\n"
3921 "snap_id %llu\nsnap_name %s\n"
3922 "overlap %llu\n",
3923 !count ? "" : "\n", /* first? */
3924 spec->pool_id, spec->pool_name,
3925 spec->image_id, spec->image_name ?: "(unknown)",
3926 spec->snap_id, spec->snap_name,
3927 rbd_dev->parent_overlap);
3928 }
Alex Elder86b00e02012-10-25 23:34:42 -05003929
Ilya Dryomovff961282014-07-22 21:53:07 +04003930 return count;
Alex Elder86b00e02012-10-25 23:34:42 -05003931}
3932
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003933static ssize_t rbd_image_refresh(struct device *dev,
3934 struct device_attribute *attr,
3935 const char *buf,
3936 size_t size)
3937{
Alex Elder593a9e72012-02-07 12:03:37 -06003938 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Alex Elderb8136232012-07-25 09:32:41 -05003939 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003940
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003941 ret = rbd_dev_refresh(rbd_dev);
Alex Eldere627db02013-05-06 07:40:30 -05003942 if (ret)
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003943 return ret;
Alex Elderb8136232012-07-25 09:32:41 -05003944
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003945 return size;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003946}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003947
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003948static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
Alex Elder34b13182012-07-13 20:35:12 -05003949static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003950static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003951static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003952static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3953static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
Alex Elder9bb2f332012-07-12 10:46:35 -05003954static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003955static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
Alex Elder589d30e2012-07-10 20:30:11 -05003956static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003957static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3958static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
Alex Elder86b00e02012-10-25 23:34:42 -05003959static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003960
3961static struct attribute *rbd_attrs[] = {
3962 &dev_attr_size.attr,
Alex Elder34b13182012-07-13 20:35:12 -05003963 &dev_attr_features.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003964 &dev_attr_major.attr,
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003965 &dev_attr_minor.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003966 &dev_attr_client_id.attr,
3967 &dev_attr_pool.attr,
Alex Elder9bb2f332012-07-12 10:46:35 -05003968 &dev_attr_pool_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003969 &dev_attr_name.attr,
Alex Elder589d30e2012-07-10 20:30:11 -05003970 &dev_attr_image_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003971 &dev_attr_current_snap.attr,
Alex Elder86b00e02012-10-25 23:34:42 -05003972 &dev_attr_parent.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003973 &dev_attr_refresh.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003974 NULL
3975};
3976
3977static struct attribute_group rbd_attr_group = {
3978 .attrs = rbd_attrs,
3979};
3980
3981static const struct attribute_group *rbd_attr_groups[] = {
3982 &rbd_attr_group,
3983 NULL
3984};
3985
Ilya Dryomov6cac4692015-10-16 20:11:25 +02003986static void rbd_dev_release(struct device *dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003987
3988static struct device_type rbd_device_type = {
3989 .name = "rbd",
3990 .groups = rbd_attr_groups,
Ilya Dryomov6cac4692015-10-16 20:11:25 +02003991 .release = rbd_dev_release,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003992};
3993
Alex Elder8b8fb992012-10-26 17:25:24 -05003994static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3995{
3996 kref_get(&spec->kref);
3997
3998 return spec;
3999}
4000
4001static void rbd_spec_free(struct kref *kref);
4002static void rbd_spec_put(struct rbd_spec *spec)
4003{
4004 if (spec)
4005 kref_put(&spec->kref, rbd_spec_free);
4006}
4007
4008static struct rbd_spec *rbd_spec_alloc(void)
4009{
4010 struct rbd_spec *spec;
4011
4012 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4013 if (!spec)
4014 return NULL;
Ilya Dryomov04077592014-07-23 17:11:20 +04004015
4016 spec->pool_id = CEPH_NOPOOL;
4017 spec->snap_id = CEPH_NOSNAP;
Alex Elder8b8fb992012-10-26 17:25:24 -05004018 kref_init(&spec->kref);
4019
Alex Elder8b8fb992012-10-26 17:25:24 -05004020 return spec;
4021}
4022
4023static void rbd_spec_free(struct kref *kref)
4024{
4025 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4026
4027 kfree(spec->pool_name);
4028 kfree(spec->image_id);
4029 kfree(spec->image_name);
4030 kfree(spec->snap_name);
4031 kfree(spec);
4032}
4033
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004034static void rbd_dev_release(struct device *dev)
4035{
4036 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4037 bool need_put = !!rbd_dev->opts;
4038
4039 rbd_put_client(rbd_dev->rbd_client);
4040 rbd_spec_put(rbd_dev->spec);
4041 kfree(rbd_dev->opts);
4042 kfree(rbd_dev);
4043
4044 /*
4045 * This is racy, but way better than putting module outside of
4046 * the release callback. The race window is pretty small, so
4047 * doing something similar to dm (dm-builtin.c) is overkill.
4048 */
4049 if (need_put)
4050 module_put(THIS_MODULE);
4051}
4052
Alex Eldercc344fa2013-02-19 12:25:56 -06004053static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
Ilya Dryomovd1475432015-06-22 13:24:48 +03004054 struct rbd_spec *spec,
4055 struct rbd_options *opts)
Alex Elderc53d5892012-10-25 23:34:42 -05004056{
4057 struct rbd_device *rbd_dev;
4058
4059 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
4060 if (!rbd_dev)
4061 return NULL;
4062
4063 spin_lock_init(&rbd_dev->lock);
Alex Elder6d292902013-01-14 12:43:31 -06004064 rbd_dev->flags = 0;
Alex Eldera2acd002013-05-08 22:50:04 -05004065 atomic_set(&rbd_dev->parent_ref, 0);
Alex Elderc53d5892012-10-25 23:34:42 -05004066 INIT_LIST_HEAD(&rbd_dev->node);
Alex Elderc53d5892012-10-25 23:34:42 -05004067 init_rwsem(&rbd_dev->header_rwsem);
4068
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004069 rbd_dev->dev.bus = &rbd_bus_type;
4070 rbd_dev->dev.type = &rbd_device_type;
4071 rbd_dev->dev.parent = &rbd_root_dev;
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004072 device_initialize(&rbd_dev->dev);
4073
Alex Elderc53d5892012-10-25 23:34:42 -05004074 rbd_dev->rbd_client = rbdc;
Ilya Dryomovd1475432015-06-22 13:24:48 +03004075 rbd_dev->spec = spec;
4076 rbd_dev->opts = opts;
Alex Elderc53d5892012-10-25 23:34:42 -05004077
Alex Elder0903e872012-11-14 12:25:19 -06004078 /* Initialize the layout used for all rbd requests */
4079
4080 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4081 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
4082 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4083 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
4084
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004085 /*
4086 * If this is a mapping rbd_dev (as opposed to a parent one),
4087 * pin our module. We have a ref from do_rbd_add(), so use
4088 * __module_get().
4089 */
4090 if (rbd_dev->opts)
4091 __module_get(THIS_MODULE);
4092
Alex Elderc53d5892012-10-25 23:34:42 -05004093 return rbd_dev;
4094}
4095
4096static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4097{
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004098 if (rbd_dev)
4099 put_device(&rbd_dev->dev);
Alex Elderc53d5892012-10-25 23:34:42 -05004100}
4101
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004102/*
Alex Elder9d475de2012-07-03 16:01:19 -05004103 * Get the size and object order for an image snapshot, or if
4104 * snap_id is CEPH_NOSNAP, gets this information for the base
4105 * image.
4106 */
4107static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4108 u8 *order, u64 *snap_size)
4109{
4110 __le64 snapid = cpu_to_le64(snap_id);
4111 int ret;
4112 struct {
4113 u8 order;
4114 __le64 size;
4115 } __attribute__ ((packed)) size_buf = { 0 };
4116
Alex Elder36be9a72013-01-19 00:30:28 -06004117 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder9d475de2012-07-03 16:01:19 -05004118 "rbd", "get_size",
Alex Elder41579762013-04-21 12:14:45 -05004119 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004120 &size_buf, sizeof (size_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004121 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder9d475de2012-07-03 16:01:19 -05004122 if (ret < 0)
4123 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004124 if (ret < sizeof (size_buf))
4125 return -ERANGE;
Alex Elder9d475de2012-07-03 16:01:19 -05004126
Josh Durginc3545572013-08-28 17:08:10 -07004127 if (order) {
Alex Elderc86f86e2013-04-25 15:09:41 -05004128 *order = size_buf.order;
Josh Durginc3545572013-08-28 17:08:10 -07004129 dout(" order %u", (unsigned int)*order);
4130 }
Alex Elder9d475de2012-07-03 16:01:19 -05004131 *snap_size = le64_to_cpu(size_buf.size);
4132
Josh Durginc3545572013-08-28 17:08:10 -07004133 dout(" snap_id 0x%016llx snap_size = %llu\n",
4134 (unsigned long long)snap_id,
Alex Elder57385b52013-04-21 12:14:45 -05004135 (unsigned long long)*snap_size);
Alex Elder9d475de2012-07-03 16:01:19 -05004136
4137 return 0;
4138}
4139
4140static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4141{
4142 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4143 &rbd_dev->header.obj_order,
4144 &rbd_dev->header.image_size);
4145}
4146
Alex Elder1e130192012-07-03 16:01:19 -05004147static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4148{
4149 void *reply_buf;
4150 int ret;
4151 void *p;
4152
4153 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4154 if (!reply_buf)
4155 return -ENOMEM;
4156
Alex Elder36be9a72013-01-19 00:30:28 -06004157 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder41579762013-04-21 12:14:45 -05004158 "rbd", "get_object_prefix", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004159 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06004160 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder1e130192012-07-03 16:01:19 -05004161 if (ret < 0)
4162 goto out;
4163
4164 p = reply_buf;
4165 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
Alex Elder57385b52013-04-21 12:14:45 -05004166 p + ret, NULL, GFP_NOIO);
4167 ret = 0;
Alex Elder1e130192012-07-03 16:01:19 -05004168
4169 if (IS_ERR(rbd_dev->header.object_prefix)) {
4170 ret = PTR_ERR(rbd_dev->header.object_prefix);
4171 rbd_dev->header.object_prefix = NULL;
4172 } else {
4173 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4174 }
Alex Elder1e130192012-07-03 16:01:19 -05004175out:
4176 kfree(reply_buf);
4177
4178 return ret;
4179}
4180
Alex Elderb1b54022012-07-03 16:01:19 -05004181static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4182 u64 *snap_features)
4183{
4184 __le64 snapid = cpu_to_le64(snap_id);
4185 struct {
4186 __le64 features;
4187 __le64 incompat;
Alex Elder41579762013-04-21 12:14:45 -05004188 } __attribute__ ((packed)) features_buf = { 0 };
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004189 u64 unsup;
Alex Elderb1b54022012-07-03 16:01:19 -05004190 int ret;
4191
Alex Elder36be9a72013-01-19 00:30:28 -06004192 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elderb1b54022012-07-03 16:01:19 -05004193 "rbd", "get_features",
Alex Elder41579762013-04-21 12:14:45 -05004194 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004195 &features_buf, sizeof (features_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004196 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderb1b54022012-07-03 16:01:19 -05004197 if (ret < 0)
4198 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004199 if (ret < sizeof (features_buf))
4200 return -ERANGE;
Alex Elderd8891402012-10-09 13:50:17 -07004201
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004202 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4203 if (unsup) {
4204 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4205 unsup);
Alex Elderb8f5c6e2012-11-01 08:39:26 -05004206 return -ENXIO;
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004207 }
Alex Elderd8891402012-10-09 13:50:17 -07004208
Alex Elderb1b54022012-07-03 16:01:19 -05004209 *snap_features = le64_to_cpu(features_buf.features);
4210
4211 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
Alex Elder57385b52013-04-21 12:14:45 -05004212 (unsigned long long)snap_id,
4213 (unsigned long long)*snap_features,
4214 (unsigned long long)le64_to_cpu(features_buf.incompat));
Alex Elderb1b54022012-07-03 16:01:19 -05004215
4216 return 0;
4217}
4218
4219static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4220{
4221 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4222 &rbd_dev->header.features);
4223}
4224
Alex Elder86b00e02012-10-25 23:34:42 -05004225static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4226{
4227 struct rbd_spec *parent_spec;
4228 size_t size;
4229 void *reply_buf = NULL;
4230 __le64 snapid;
4231 void *p;
4232 void *end;
Alex Elder642a2532013-05-06 17:40:33 -05004233 u64 pool_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004234 char *image_id;
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004235 u64 snap_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004236 u64 overlap;
Alex Elder86b00e02012-10-25 23:34:42 -05004237 int ret;
4238
4239 parent_spec = rbd_spec_alloc();
4240 if (!parent_spec)
4241 return -ENOMEM;
4242
4243 size = sizeof (__le64) + /* pool_id */
4244 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4245 sizeof (__le64) + /* snap_id */
4246 sizeof (__le64); /* overlap */
4247 reply_buf = kmalloc(size, GFP_KERNEL);
4248 if (!reply_buf) {
4249 ret = -ENOMEM;
4250 goto out_err;
4251 }
4252
Ilya Dryomov4d9b67c2014-07-24 10:42:13 +04004253 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
Alex Elder36be9a72013-01-19 00:30:28 -06004254 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder86b00e02012-10-25 23:34:42 -05004255 "rbd", "get_parent",
Alex Elder41579762013-04-21 12:14:45 -05004256 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004257 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004258 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder86b00e02012-10-25 23:34:42 -05004259 if (ret < 0)
4260 goto out_err;
4261
Alex Elder86b00e02012-10-25 23:34:42 -05004262 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05004263 end = reply_buf + ret;
4264 ret = -ERANGE;
Alex Elder642a2532013-05-06 17:40:33 -05004265 ceph_decode_64_safe(&p, end, pool_id, out_err);
Alex Elder392a9da2013-05-06 17:40:33 -05004266 if (pool_id == CEPH_NOPOOL) {
4267 /*
4268 * Either the parent never existed, or we have
4269 * record of it but the image got flattened so it no
4270 * longer has a parent. When the parent of a
4271 * layered image disappears we immediately set the
4272 * overlap to 0. The effect of this is that all new
4273 * requests will be treated as if the image had no
4274 * parent.
4275 */
4276 if (rbd_dev->parent_overlap) {
4277 rbd_dev->parent_overlap = 0;
Alex Elder392a9da2013-05-06 17:40:33 -05004278 rbd_dev_parent_put(rbd_dev);
4279 pr_info("%s: clone image has been flattened\n",
4280 rbd_dev->disk->disk_name);
4281 }
4282
Alex Elder86b00e02012-10-25 23:34:42 -05004283 goto out; /* No parent? No problem. */
Alex Elder392a9da2013-05-06 17:40:33 -05004284 }
Alex Elder86b00e02012-10-25 23:34:42 -05004285
Alex Elder0903e872012-11-14 12:25:19 -06004286 /* The ceph file layout needs to fit pool id in 32 bits */
4287
4288 ret = -EIO;
Alex Elder642a2532013-05-06 17:40:33 -05004289 if (pool_id > (u64)U32_MAX) {
Ilya Dryomov9584d502014-07-11 12:11:20 +04004290 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
Alex Elder642a2532013-05-06 17:40:33 -05004291 (unsigned long long)pool_id, U32_MAX);
Alex Elder57385b52013-04-21 12:14:45 -05004292 goto out_err;
Alex Elderc0cd10db2013-04-26 09:43:47 -05004293 }
Alex Elder0903e872012-11-14 12:25:19 -06004294
Alex Elder979ed482012-11-01 08:39:26 -05004295 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elder86b00e02012-10-25 23:34:42 -05004296 if (IS_ERR(image_id)) {
4297 ret = PTR_ERR(image_id);
4298 goto out_err;
4299 }
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004300 ceph_decode_64_safe(&p, end, snap_id, out_err);
Alex Elder86b00e02012-10-25 23:34:42 -05004301 ceph_decode_64_safe(&p, end, overlap, out_err);
4302
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004303 /*
4304 * The parent won't change (except when the clone is
4305 * flattened, already handled that). So we only need to
4306 * record the parent spec we have not already done so.
4307 */
4308 if (!rbd_dev->parent_spec) {
4309 parent_spec->pool_id = pool_id;
4310 parent_spec->image_id = image_id;
4311 parent_spec->snap_id = snap_id;
Alex Elder70cf49c2013-05-06 17:40:33 -05004312 rbd_dev->parent_spec = parent_spec;
4313 parent_spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovfbba11b2014-06-27 21:46:33 +04004314 } else {
4315 kfree(image_id);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004316 }
4317
4318 /*
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004319 * We always update the parent overlap. If it's zero we issue
4320 * a warning, as we will proceed as if there was no parent.
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004321 */
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004322 if (!overlap) {
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004323 if (parent_spec) {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004324 /* refresh, careful to warn just once */
4325 if (rbd_dev->parent_overlap)
4326 rbd_warn(rbd_dev,
4327 "clone now standalone (overlap became 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004328 } else {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004329 /* initial probe */
4330 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004331 }
Alex Elder70cf49c2013-05-06 17:40:33 -05004332 }
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004333 rbd_dev->parent_overlap = overlap;
4334
Alex Elder86b00e02012-10-25 23:34:42 -05004335out:
4336 ret = 0;
4337out_err:
4338 kfree(reply_buf);
4339 rbd_spec_put(parent_spec);
4340
4341 return ret;
4342}
4343
Alex Eldercc070d52013-04-21 12:14:45 -05004344static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4345{
4346 struct {
4347 __le64 stripe_unit;
4348 __le64 stripe_count;
4349 } __attribute__ ((packed)) striping_info_buf = { 0 };
4350 size_t size = sizeof (striping_info_buf);
4351 void *p;
4352 u64 obj_size;
4353 u64 stripe_unit;
4354 u64 stripe_count;
4355 int ret;
4356
4357 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4358 "rbd", "get_stripe_unit_count", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004359 (char *)&striping_info_buf, size);
Alex Eldercc070d52013-04-21 12:14:45 -05004360 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4361 if (ret < 0)
4362 return ret;
4363 if (ret < size)
4364 return -ERANGE;
4365
4366 /*
4367 * We don't actually support the "fancy striping" feature
4368 * (STRIPINGV2) yet, but if the striping sizes are the
4369 * defaults the behavior is the same as before. So find
4370 * out, and only fail if the image has non-default values.
4371 */
4372 ret = -EINVAL;
4373 obj_size = (u64)1 << rbd_dev->header.obj_order;
4374 p = &striping_info_buf;
4375 stripe_unit = ceph_decode_64(&p);
4376 if (stripe_unit != obj_size) {
4377 rbd_warn(rbd_dev, "unsupported stripe unit "
4378 "(got %llu want %llu)",
4379 stripe_unit, obj_size);
4380 return -EINVAL;
4381 }
4382 stripe_count = ceph_decode_64(&p);
4383 if (stripe_count != 1) {
4384 rbd_warn(rbd_dev, "unsupported stripe count "
4385 "(got %llu want 1)", stripe_count);
4386 return -EINVAL;
4387 }
Alex Elder500d0c02013-04-26 09:43:47 -05004388 rbd_dev->header.stripe_unit = stripe_unit;
4389 rbd_dev->header.stripe_count = stripe_count;
Alex Eldercc070d52013-04-21 12:14:45 -05004390
4391 return 0;
4392}
4393
Alex Elder9e15b772012-10-30 19:40:33 -05004394static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4395{
4396 size_t image_id_size;
4397 char *image_id;
4398 void *p;
4399 void *end;
4400 size_t size;
4401 void *reply_buf = NULL;
4402 size_t len = 0;
4403 char *image_name = NULL;
4404 int ret;
4405
4406 rbd_assert(!rbd_dev->spec->image_name);
4407
Alex Elder69e7a022012-11-01 08:39:26 -05004408 len = strlen(rbd_dev->spec->image_id);
4409 image_id_size = sizeof (__le32) + len;
Alex Elder9e15b772012-10-30 19:40:33 -05004410 image_id = kmalloc(image_id_size, GFP_KERNEL);
4411 if (!image_id)
4412 return NULL;
4413
4414 p = image_id;
Alex Elder41579762013-04-21 12:14:45 -05004415 end = image_id + image_id_size;
Alex Elder57385b52013-04-21 12:14:45 -05004416 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
Alex Elder9e15b772012-10-30 19:40:33 -05004417
4418 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4419 reply_buf = kmalloc(size, GFP_KERNEL);
4420 if (!reply_buf)
4421 goto out;
4422
Alex Elder36be9a72013-01-19 00:30:28 -06004423 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
Alex Elder9e15b772012-10-30 19:40:33 -05004424 "rbd", "dir_get_name",
4425 image_id, image_id_size,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004426 reply_buf, size);
Alex Elder9e15b772012-10-30 19:40:33 -05004427 if (ret < 0)
4428 goto out;
4429 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05004430 end = reply_buf + ret;
4431
Alex Elder9e15b772012-10-30 19:40:33 -05004432 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4433 if (IS_ERR(image_name))
4434 image_name = NULL;
4435 else
4436 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4437out:
4438 kfree(reply_buf);
4439 kfree(image_id);
4440
4441 return image_name;
4442}
4443
Alex Elder2ad3d712013-04-30 00:44:33 -05004444static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4445{
4446 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4447 const char *snap_name;
4448 u32 which = 0;
4449
4450 /* Skip over names until we find the one we are looking for */
4451
4452 snap_name = rbd_dev->header.snap_names;
4453 while (which < snapc->num_snaps) {
4454 if (!strcmp(name, snap_name))
4455 return snapc->snaps[which];
4456 snap_name += strlen(snap_name) + 1;
4457 which++;
4458 }
4459 return CEPH_NOSNAP;
4460}
4461
4462static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4463{
4464 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4465 u32 which;
4466 bool found = false;
4467 u64 snap_id;
4468
4469 for (which = 0; !found && which < snapc->num_snaps; which++) {
4470 const char *snap_name;
4471
4472 snap_id = snapc->snaps[which];
4473 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
Josh Durginefadc982013-08-29 19:16:42 -07004474 if (IS_ERR(snap_name)) {
4475 /* ignore no-longer existing snapshots */
4476 if (PTR_ERR(snap_name) == -ENOENT)
4477 continue;
4478 else
4479 break;
4480 }
Alex Elder2ad3d712013-04-30 00:44:33 -05004481 found = !strcmp(name, snap_name);
4482 kfree(snap_name);
4483 }
4484 return found ? snap_id : CEPH_NOSNAP;
4485}
4486
4487/*
4488 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4489 * no snapshot by that name is found, or if an error occurs.
4490 */
4491static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4492{
4493 if (rbd_dev->image_format == 1)
4494 return rbd_v1_snap_id_by_name(rbd_dev, name);
4495
4496 return rbd_v2_snap_id_by_name(rbd_dev, name);
4497}
4498
Alex Elder9e15b772012-10-30 19:40:33 -05004499/*
Ilya Dryomov04077592014-07-23 17:11:20 +04004500 * An image being mapped will have everything but the snap id.
Alex Elder9e15b772012-10-30 19:40:33 -05004501 */
Ilya Dryomov04077592014-07-23 17:11:20 +04004502static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4503{
4504 struct rbd_spec *spec = rbd_dev->spec;
4505
4506 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4507 rbd_assert(spec->image_id && spec->image_name);
4508 rbd_assert(spec->snap_name);
4509
4510 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4511 u64 snap_id;
4512
4513 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4514 if (snap_id == CEPH_NOSNAP)
4515 return -ENOENT;
4516
4517 spec->snap_id = snap_id;
4518 } else {
4519 spec->snap_id = CEPH_NOSNAP;
4520 }
4521
4522 return 0;
4523}
4524
4525/*
4526 * A parent image will have all ids but none of the names.
4527 *
4528 * All names in an rbd spec are dynamically allocated. It's OK if we
4529 * can't figure out the name for an image id.
4530 */
4531static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
Alex Elder9e15b772012-10-30 19:40:33 -05004532{
Alex Elder2e9f7f12013-04-26 09:43:48 -05004533 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4534 struct rbd_spec *spec = rbd_dev->spec;
4535 const char *pool_name;
4536 const char *image_name;
4537 const char *snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05004538 int ret;
4539
Ilya Dryomov04077592014-07-23 17:11:20 +04004540 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4541 rbd_assert(spec->image_id);
4542 rbd_assert(spec->snap_id != CEPH_NOSNAP);
Alex Elder9e15b772012-10-30 19:40:33 -05004543
Alex Elder2e9f7f12013-04-26 09:43:48 -05004544 /* Get the pool name; we have to make our own copy of this */
Alex Elder9e15b772012-10-30 19:40:33 -05004545
Alex Elder2e9f7f12013-04-26 09:43:48 -05004546 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4547 if (!pool_name) {
4548 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
Alex Elder935dc892012-11-01 10:17:15 -05004549 return -EIO;
4550 }
Alex Elder2e9f7f12013-04-26 09:43:48 -05004551 pool_name = kstrdup(pool_name, GFP_KERNEL);
4552 if (!pool_name)
Alex Elder9e15b772012-10-30 19:40:33 -05004553 return -ENOMEM;
4554
4555 /* Fetch the image name; tolerate failure here */
4556
Alex Elder2e9f7f12013-04-26 09:43:48 -05004557 image_name = rbd_dev_image_name(rbd_dev);
4558 if (!image_name)
Alex Elder06ecc6c2012-11-01 10:17:15 -05004559 rbd_warn(rbd_dev, "unable to get image name");
Alex Elder9e15b772012-10-30 19:40:33 -05004560
Ilya Dryomov04077592014-07-23 17:11:20 +04004561 /* Fetch the snapshot name */
Alex Elder9e15b772012-10-30 19:40:33 -05004562
Alex Elder2e9f7f12013-04-26 09:43:48 -05004563 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
Josh Durginda6a6b62013-09-04 17:57:31 -07004564 if (IS_ERR(snap_name)) {
4565 ret = PTR_ERR(snap_name);
Alex Elder9e15b772012-10-30 19:40:33 -05004566 goto out_err;
Alex Elder2e9f7f12013-04-26 09:43:48 -05004567 }
4568
4569 spec->pool_name = pool_name;
4570 spec->image_name = image_name;
4571 spec->snap_name = snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05004572
4573 return 0;
Ilya Dryomov04077592014-07-23 17:11:20 +04004574
Alex Elder9e15b772012-10-30 19:40:33 -05004575out_err:
Alex Elder2e9f7f12013-04-26 09:43:48 -05004576 kfree(image_name);
4577 kfree(pool_name);
Alex Elder9e15b772012-10-30 19:40:33 -05004578 return ret;
4579}
4580
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004581static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
Alex Elder35d489f2012-07-03 16:01:19 -05004582{
4583 size_t size;
4584 int ret;
4585 void *reply_buf;
4586 void *p;
4587 void *end;
4588 u64 seq;
4589 u32 snap_count;
4590 struct ceph_snap_context *snapc;
4591 u32 i;
4592
4593 /*
4594 * We'll need room for the seq value (maximum snapshot id),
4595 * snapshot count, and array of that many snapshot ids.
4596 * For now we have a fixed upper limit on the number we're
4597 * prepared to receive.
4598 */
4599 size = sizeof (__le64) + sizeof (__le32) +
4600 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4601 reply_buf = kzalloc(size, GFP_KERNEL);
4602 if (!reply_buf)
4603 return -ENOMEM;
4604
Alex Elder36be9a72013-01-19 00:30:28 -06004605 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder41579762013-04-21 12:14:45 -05004606 "rbd", "get_snapcontext", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004607 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004608 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder35d489f2012-07-03 16:01:19 -05004609 if (ret < 0)
4610 goto out;
4611
Alex Elder35d489f2012-07-03 16:01:19 -05004612 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05004613 end = reply_buf + ret;
4614 ret = -ERANGE;
Alex Elder35d489f2012-07-03 16:01:19 -05004615 ceph_decode_64_safe(&p, end, seq, out);
4616 ceph_decode_32_safe(&p, end, snap_count, out);
4617
4618 /*
4619 * Make sure the reported number of snapshot ids wouldn't go
4620 * beyond the end of our buffer. But before checking that,
4621 * make sure the computed size of the snapshot context we
4622 * allocate is representable in a size_t.
4623 */
4624 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4625 / sizeof (u64)) {
4626 ret = -EINVAL;
4627 goto out;
4628 }
4629 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4630 goto out;
Alex Elder468521c2013-04-26 09:43:47 -05004631 ret = 0;
Alex Elder35d489f2012-07-03 16:01:19 -05004632
Alex Elder812164f82013-04-30 00:44:32 -05004633 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
Alex Elder35d489f2012-07-03 16:01:19 -05004634 if (!snapc) {
4635 ret = -ENOMEM;
4636 goto out;
4637 }
Alex Elder35d489f2012-07-03 16:01:19 -05004638 snapc->seq = seq;
Alex Elder35d489f2012-07-03 16:01:19 -05004639 for (i = 0; i < snap_count; i++)
4640 snapc->snaps[i] = ceph_decode_64(&p);
4641
Alex Elder49ece552013-05-06 08:37:00 -05004642 ceph_put_snap_context(rbd_dev->header.snapc);
Alex Elder35d489f2012-07-03 16:01:19 -05004643 rbd_dev->header.snapc = snapc;
4644
4645 dout(" snap context seq = %llu, snap_count = %u\n",
Alex Elder57385b52013-04-21 12:14:45 -05004646 (unsigned long long)seq, (unsigned int)snap_count);
Alex Elder35d489f2012-07-03 16:01:19 -05004647out:
4648 kfree(reply_buf);
4649
Alex Elder57385b52013-04-21 12:14:45 -05004650 return ret;
Alex Elder35d489f2012-07-03 16:01:19 -05004651}
4652
Alex Elder54cac612013-04-30 00:44:33 -05004653static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4654 u64 snap_id)
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004655{
4656 size_t size;
4657 void *reply_buf;
Alex Elder54cac612013-04-30 00:44:33 -05004658 __le64 snapid;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004659 int ret;
4660 void *p;
4661 void *end;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004662 char *snap_name;
4663
4664 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4665 reply_buf = kmalloc(size, GFP_KERNEL);
4666 if (!reply_buf)
4667 return ERR_PTR(-ENOMEM);
4668
Alex Elder54cac612013-04-30 00:44:33 -05004669 snapid = cpu_to_le64(snap_id);
Alex Elder36be9a72013-01-19 00:30:28 -06004670 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004671 "rbd", "get_snapshot_name",
Alex Elder54cac612013-04-30 00:44:33 -05004672 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004673 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004674 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderf40eb342013-04-25 15:09:42 -05004675 if (ret < 0) {
4676 snap_name = ERR_PTR(ret);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004677 goto out;
Alex Elderf40eb342013-04-25 15:09:42 -05004678 }
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004679
4680 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05004681 end = reply_buf + ret;
Alex Eldere5c35532012-10-25 23:34:41 -05004682 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elderf40eb342013-04-25 15:09:42 -05004683 if (IS_ERR(snap_name))
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004684 goto out;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004685
Alex Elderf40eb342013-04-25 15:09:42 -05004686 dout(" snap_id 0x%016llx snap_name = %s\n",
Alex Elder54cac612013-04-30 00:44:33 -05004687 (unsigned long long)snap_id, snap_name);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004688out:
4689 kfree(reply_buf);
4690
Alex Elderf40eb342013-04-25 15:09:42 -05004691 return snap_name;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004692}
4693
Alex Elder2df3fac2013-05-06 09:51:30 -05004694static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
Alex Elder117973f2012-08-31 17:29:55 -05004695{
Alex Elder2df3fac2013-05-06 09:51:30 -05004696 bool first_time = rbd_dev->header.object_prefix == NULL;
Alex Elder117973f2012-08-31 17:29:55 -05004697 int ret;
Alex Elder117973f2012-08-31 17:29:55 -05004698
Josh Durgin1617e402013-06-12 14:43:10 -07004699 ret = rbd_dev_v2_image_size(rbd_dev);
4700 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05004701 return ret;
Josh Durgin1617e402013-06-12 14:43:10 -07004702
Alex Elder2df3fac2013-05-06 09:51:30 -05004703 if (first_time) {
4704 ret = rbd_dev_v2_header_onetime(rbd_dev);
4705 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05004706 return ret;
Alex Elder2df3fac2013-05-06 09:51:30 -05004707 }
4708
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004709 ret = rbd_dev_v2_snap_context(rbd_dev);
Ilya Dryomovd194cd12015-08-31 18:22:10 +03004710 if (ret && first_time) {
4711 kfree(rbd_dev->header.object_prefix);
4712 rbd_dev->header.object_prefix = NULL;
4713 }
Alex Elder117973f2012-08-31 17:29:55 -05004714
4715 return ret;
4716}
4717
Ilya Dryomova720ae02014-07-23 17:11:19 +04004718static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4719{
4720 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4721
4722 if (rbd_dev->image_format == 1)
4723 return rbd_dev_v1_header_info(rbd_dev);
4724
4725 return rbd_dev_v2_header_info(rbd_dev);
4726}
4727
Alex Elder1ddbe942012-01-29 13:57:44 -06004728/*
Alex Elder499afd52012-02-02 08:13:29 -06004729 * Get a unique rbd identifier for the given new rbd_dev, and add
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004730 * the rbd_dev to the global list.
Alex Elder1ddbe942012-01-29 13:57:44 -06004731 */
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004732static int rbd_dev_id_get(struct rbd_device *rbd_dev)
Alex Elderb7f23c32012-01-29 13:57:43 -06004733{
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004734 int new_dev_id;
4735
Ilya Dryomov9b60e702013-12-13 15:28:57 +02004736 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4737 0, minor_to_rbd_dev_id(1 << MINORBITS),
4738 GFP_KERNEL);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004739 if (new_dev_id < 0)
4740 return new_dev_id;
4741
4742 rbd_dev->dev_id = new_dev_id;
Alex Elder499afd52012-02-02 08:13:29 -06004743
4744 spin_lock(&rbd_dev_list_lock);
4745 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4746 spin_unlock(&rbd_dev_list_lock);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004747
Ilya Dryomov70eebd22013-12-13 15:28:56 +02004748 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004749
4750 return 0;
Alex Elder1ddbe942012-01-29 13:57:44 -06004751}
Alex Elderb7f23c32012-01-29 13:57:43 -06004752
Alex Elder1ddbe942012-01-29 13:57:44 -06004753/*
Alex Elder499afd52012-02-02 08:13:29 -06004754 * Remove an rbd_dev from the global list, and record that its
4755 * identifier is no longer in use.
Alex Elder1ddbe942012-01-29 13:57:44 -06004756 */
Alex Eldere2839302012-08-29 17:11:06 -05004757static void rbd_dev_id_put(struct rbd_device *rbd_dev)
Alex Elder1ddbe942012-01-29 13:57:44 -06004758{
Alex Elder499afd52012-02-02 08:13:29 -06004759 spin_lock(&rbd_dev_list_lock);
4760 list_del_init(&rbd_dev->node);
4761 spin_unlock(&rbd_dev_list_lock);
Alex Elderb7f23c32012-01-29 13:57:43 -06004762
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004763 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4764
4765 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
Alex Elderb7f23c32012-01-29 13:57:43 -06004766}
4767
Alex Eldera725f65e2012-02-02 08:13:30 -06004768/*
Alex Eldere28fff262012-02-02 08:13:30 -06004769 * Skips over white space at *buf, and updates *buf to point to the
4770 * first found non-space character (if any). Returns the length of
Alex Elder593a9e72012-02-07 12:03:37 -06004771 * the token (string of non-white space characters) found. Note
4772 * that *buf must be terminated with '\0'.
Alex Eldere28fff262012-02-02 08:13:30 -06004773 */
4774static inline size_t next_token(const char **buf)
4775{
4776 /*
4777 * These are the characters that produce nonzero for
4778 * isspace() in the "C" and "POSIX" locales.
4779 */
4780 const char *spaces = " \f\n\r\t\v";
4781
4782 *buf += strspn(*buf, spaces); /* Find start of token */
4783
4784 return strcspn(*buf, spaces); /* Return token length */
4785}
4786
4787/*
Alex Elderea3352f2012-07-09 21:04:23 -05004788 * Finds the next token in *buf, dynamically allocates a buffer big
4789 * enough to hold a copy of it, and copies the token into the new
4790 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4791 * that a duplicate buffer is created even for a zero-length token.
4792 *
4793 * Returns a pointer to the newly-allocated duplicate, or a null
4794 * pointer if memory for the duplicate was not available. If
4795 * the lenp argument is a non-null pointer, the length of the token
4796 * (not including the '\0') is returned in *lenp.
4797 *
4798 * If successful, the *buf pointer will be updated to point beyond
4799 * the end of the found token.
4800 *
4801 * Note: uses GFP_KERNEL for allocation.
4802 */
4803static inline char *dup_token(const char **buf, size_t *lenp)
4804{
4805 char *dup;
4806 size_t len;
4807
4808 len = next_token(buf);
Alex Elder4caf35f2012-11-01 08:39:27 -05004809 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
Alex Elderea3352f2012-07-09 21:04:23 -05004810 if (!dup)
4811 return NULL;
Alex Elderea3352f2012-07-09 21:04:23 -05004812 *(dup + len) = '\0';
4813 *buf += len;
4814
4815 if (lenp)
4816 *lenp = len;
4817
4818 return dup;
4819}
4820
4821/*
Alex Elder859c31d2012-10-25 23:34:42 -05004822 * Parse the options provided for an "rbd add" (i.e., rbd image
4823 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4824 * and the data written is passed here via a NUL-terminated buffer.
4825 * Returns 0 if successful or an error code otherwise.
Alex Elderd22f76e2012-07-12 10:46:35 -05004826 *
Alex Elder859c31d2012-10-25 23:34:42 -05004827 * The information extracted from these options is recorded in
4828 * the other parameters which return dynamically-allocated
4829 * structures:
4830 * ceph_opts
4831 * The address of a pointer that will refer to a ceph options
4832 * structure. Caller must release the returned pointer using
4833 * ceph_destroy_options() when it is no longer needed.
4834 * rbd_opts
4835 * Address of an rbd options pointer. Fully initialized by
4836 * this function; caller must release with kfree().
4837 * spec
4838 * Address of an rbd image specification pointer. Fully
4839 * initialized by this function based on parsed options.
4840 * Caller must release with rbd_spec_put().
4841 *
4842 * The options passed take this form:
4843 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4844 * where:
4845 * <mon_addrs>
4846 * A comma-separated list of one or more monitor addresses.
4847 * A monitor address is an ip address, optionally followed
4848 * by a port number (separated by a colon).
4849 * I.e.: ip1[:port1][,ip2[:port2]...]
4850 * <options>
4851 * A comma-separated list of ceph and/or rbd options.
4852 * <pool_name>
4853 * The name of the rados pool containing the rbd image.
4854 * <image_name>
4855 * The name of the image in that pool to map.
4856 * <snap_id>
4857 * An optional snapshot id. If provided, the mapping will
4858 * present data from the image at the time that snapshot was
4859 * created. The image head is used if no snapshot id is
4860 * provided. Snapshot mappings are always read-only.
Alex Eldera725f65e2012-02-02 08:13:30 -06004861 */
Alex Elder859c31d2012-10-25 23:34:42 -05004862static int rbd_add_parse_args(const char *buf,
Alex Elderdc79b112012-10-25 23:34:41 -05004863 struct ceph_options **ceph_opts,
Alex Elder859c31d2012-10-25 23:34:42 -05004864 struct rbd_options **opts,
4865 struct rbd_spec **rbd_spec)
Alex Eldera725f65e2012-02-02 08:13:30 -06004866{
Alex Elderd22f76e2012-07-12 10:46:35 -05004867 size_t len;
Alex Elder859c31d2012-10-25 23:34:42 -05004868 char *options;
Alex Elder0ddebc02012-10-25 23:34:41 -05004869 const char *mon_addrs;
Alex Elderecb4dc22013-04-26 09:43:47 -05004870 char *snap_name;
Alex Elder0ddebc02012-10-25 23:34:41 -05004871 size_t mon_addrs_size;
Alex Elder859c31d2012-10-25 23:34:42 -05004872 struct rbd_spec *spec = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004873 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05004874 struct ceph_options *copts;
Alex Elderdc79b112012-10-25 23:34:41 -05004875 int ret;
Alex Eldere28fff262012-02-02 08:13:30 -06004876
4877 /* The first four tokens are required */
4878
Alex Elder7ef32142012-02-02 08:13:30 -06004879 len = next_token(&buf);
Alex Elder4fb5d6712012-11-01 10:17:15 -05004880 if (!len) {
4881 rbd_warn(NULL, "no monitor address(es) provided");
4882 return -EINVAL;
4883 }
Alex Elder0ddebc02012-10-25 23:34:41 -05004884 mon_addrs = buf;
Alex Elderf28e5652012-10-25 23:34:41 -05004885 mon_addrs_size = len + 1;
Alex Elder7ef32142012-02-02 08:13:30 -06004886 buf += len;
Alex Eldera725f65e2012-02-02 08:13:30 -06004887
Alex Elderdc79b112012-10-25 23:34:41 -05004888 ret = -EINVAL;
Alex Elderf28e5652012-10-25 23:34:41 -05004889 options = dup_token(&buf, NULL);
4890 if (!options)
Alex Elderdc79b112012-10-25 23:34:41 -05004891 return -ENOMEM;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004892 if (!*options) {
4893 rbd_warn(NULL, "no options provided");
4894 goto out_err;
4895 }
Alex Eldera725f65e2012-02-02 08:13:30 -06004896
Alex Elder859c31d2012-10-25 23:34:42 -05004897 spec = rbd_spec_alloc();
4898 if (!spec)
Alex Elderf28e5652012-10-25 23:34:41 -05004899 goto out_mem;
Alex Elder859c31d2012-10-25 23:34:42 -05004900
4901 spec->pool_name = dup_token(&buf, NULL);
4902 if (!spec->pool_name)
4903 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004904 if (!*spec->pool_name) {
4905 rbd_warn(NULL, "no pool name provided");
4906 goto out_err;
4907 }
Alex Eldere28fff262012-02-02 08:13:30 -06004908
Alex Elder69e7a022012-11-01 08:39:26 -05004909 spec->image_name = dup_token(&buf, NULL);
Alex Elder859c31d2012-10-25 23:34:42 -05004910 if (!spec->image_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004911 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004912 if (!*spec->image_name) {
4913 rbd_warn(NULL, "no image name provided");
4914 goto out_err;
4915 }
Alex Eldere28fff262012-02-02 08:13:30 -06004916
Alex Elderf28e5652012-10-25 23:34:41 -05004917 /*
4918 * Snapshot name is optional; default is to use "-"
4919 * (indicating the head/no snapshot).
4920 */
Alex Elder3feeb8942012-08-31 17:29:52 -05004921 len = next_token(&buf);
Alex Elder820a5f32012-07-09 21:04:24 -05004922 if (!len) {
Alex Elder3feeb8942012-08-31 17:29:52 -05004923 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4924 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
Alex Elderf28e5652012-10-25 23:34:41 -05004925 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
Alex Elderdc79b112012-10-25 23:34:41 -05004926 ret = -ENAMETOOLONG;
Alex Elderf28e5652012-10-25 23:34:41 -05004927 goto out_err;
Alex Elder849b4262012-07-09 21:04:24 -05004928 }
Alex Elderecb4dc22013-04-26 09:43:47 -05004929 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4930 if (!snap_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004931 goto out_mem;
Alex Elderecb4dc22013-04-26 09:43:47 -05004932 *(snap_name + len) = '\0';
4933 spec->snap_name = snap_name;
Alex Eldere5c35532012-10-25 23:34:41 -05004934
Alex Elder0ddebc02012-10-25 23:34:41 -05004935 /* Initialize all rbd options to the defaults */
Alex Eldere28fff262012-02-02 08:13:30 -06004936
Alex Elder4e9afeb2012-10-25 23:34:41 -05004937 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4938 if (!rbd_opts)
4939 goto out_mem;
4940
4941 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
Ilya Dryomovb5584182015-06-23 16:21:19 +03004942 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
Alex Elderd22f76e2012-07-12 10:46:35 -05004943
Alex Elder859c31d2012-10-25 23:34:42 -05004944 copts = ceph_parse_options(options, mon_addrs,
Alex Elder0ddebc02012-10-25 23:34:41 -05004945 mon_addrs + mon_addrs_size - 1,
Alex Elder4e9afeb2012-10-25 23:34:41 -05004946 parse_rbd_opts_token, rbd_opts);
Alex Elder859c31d2012-10-25 23:34:42 -05004947 if (IS_ERR(copts)) {
4948 ret = PTR_ERR(copts);
Alex Elderdc79b112012-10-25 23:34:41 -05004949 goto out_err;
4950 }
Alex Elder859c31d2012-10-25 23:34:42 -05004951 kfree(options);
4952
4953 *ceph_opts = copts;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004954 *opts = rbd_opts;
Alex Elder859c31d2012-10-25 23:34:42 -05004955 *rbd_spec = spec;
Alex Elder0ddebc02012-10-25 23:34:41 -05004956
Alex Elderdc79b112012-10-25 23:34:41 -05004957 return 0;
Alex Elderf28e5652012-10-25 23:34:41 -05004958out_mem:
Alex Elderdc79b112012-10-25 23:34:41 -05004959 ret = -ENOMEM;
Alex Elderd22f76e2012-07-12 10:46:35 -05004960out_err:
Alex Elder859c31d2012-10-25 23:34:42 -05004961 kfree(rbd_opts);
4962 rbd_spec_put(spec);
Alex Elderf28e5652012-10-25 23:34:41 -05004963 kfree(options);
Alex Elderd22f76e2012-07-12 10:46:35 -05004964
Alex Elderdc79b112012-10-25 23:34:41 -05004965 return ret;
Alex Eldera725f65e2012-02-02 08:13:30 -06004966}
4967
Alex Elder589d30e2012-07-10 20:30:11 -05004968/*
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004969 * Return pool id (>= 0) or a negative error code.
4970 */
4971static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4972{
Ilya Dryomova319bf52015-05-15 12:02:17 +03004973 struct ceph_options *opts = rbdc->client->options;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004974 u64 newest_epoch;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004975 int tries = 0;
4976 int ret;
4977
4978again:
4979 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4980 if (ret == -ENOENT && tries++ < 1) {
4981 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4982 &newest_epoch);
4983 if (ret < 0)
4984 return ret;
4985
4986 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4987 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4988 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
Ilya Dryomova319bf52015-05-15 12:02:17 +03004989 newest_epoch,
4990 opts->mount_timeout);
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004991 goto again;
4992 } else {
4993 /* the osdmap we have is new enough */
4994 return -ENOENT;
4995 }
4996 }
4997
4998 return ret;
4999}
5000
5001/*
Alex Elder589d30e2012-07-10 20:30:11 -05005002 * An rbd format 2 image has a unique identifier, distinct from the
5003 * name given to it by the user. Internally, that identifier is
5004 * what's used to specify the names of objects related to the image.
5005 *
5006 * A special "rbd id" object is used to map an rbd image name to its
5007 * id. If that object doesn't exist, then there is no v2 rbd image
5008 * with the supplied name.
5009 *
5010 * This function will record the given rbd_dev's image_id field if
5011 * it can be determined, and in that case will return 0. If any
5012 * errors occur a negative errno will be returned and the rbd_dev's
5013 * image_id field will be unchanged (and should be NULL).
5014 */
5015static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5016{
5017 int ret;
5018 size_t size;
5019 char *object_name;
5020 void *response;
Alex Elderc0fba362013-04-25 23:15:08 -05005021 char *image_id;
Alex Elder2f82ee52012-10-30 19:40:33 -05005022
Alex Elder589d30e2012-07-10 20:30:11 -05005023 /*
Alex Elder2c0d0a12012-10-30 19:40:33 -05005024 * When probing a parent image, the image id is already
5025 * known (and the image name likely is not). There's no
Alex Elderc0fba362013-04-25 23:15:08 -05005026 * need to fetch the image id again in this case. We
5027 * do still need to set the image format though.
Alex Elder2c0d0a12012-10-30 19:40:33 -05005028 */
Alex Elderc0fba362013-04-25 23:15:08 -05005029 if (rbd_dev->spec->image_id) {
5030 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5031
Alex Elder2c0d0a12012-10-30 19:40:33 -05005032 return 0;
Alex Elderc0fba362013-04-25 23:15:08 -05005033 }
Alex Elder2c0d0a12012-10-30 19:40:33 -05005034
5035 /*
Alex Elder589d30e2012-07-10 20:30:11 -05005036 * First, see if the format 2 image id file exists, and if
5037 * so, get the image's persistent id from it.
5038 */
Alex Elder69e7a022012-11-01 08:39:26 -05005039 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05005040 object_name = kmalloc(size, GFP_NOIO);
5041 if (!object_name)
5042 return -ENOMEM;
Alex Elder0d7dbfc2012-10-25 23:34:41 -05005043 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05005044 dout("rbd id object name is %s\n", object_name);
5045
5046 /* Response will be an encoded string, which includes a length */
5047
5048 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5049 response = kzalloc(size, GFP_NOIO);
5050 if (!response) {
5051 ret = -ENOMEM;
5052 goto out;
5053 }
5054
Alex Elderc0fba362013-04-25 23:15:08 -05005055 /* If it doesn't exist we'll assume it's a format 1 image */
5056
Alex Elder36be9a72013-01-19 00:30:28 -06005057 ret = rbd_obj_method_sync(rbd_dev, object_name,
Alex Elder41579762013-04-21 12:14:45 -05005058 "rbd", "get_id", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05005059 response, RBD_IMAGE_ID_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06005060 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderc0fba362013-04-25 23:15:08 -05005061 if (ret == -ENOENT) {
5062 image_id = kstrdup("", GFP_KERNEL);
5063 ret = image_id ? 0 : -ENOMEM;
5064 if (!ret)
5065 rbd_dev->image_format = 1;
Ilya Dryomov7dd440c2014-09-11 18:49:18 +04005066 } else if (ret >= 0) {
Alex Elderc0fba362013-04-25 23:15:08 -05005067 void *p = response;
Alex Elder589d30e2012-07-10 20:30:11 -05005068
Alex Elderc0fba362013-04-25 23:15:08 -05005069 image_id = ceph_extract_encoded_string(&p, p + ret,
Alex Elder979ed482012-11-01 08:39:26 -05005070 NULL, GFP_NOIO);
Duan Jiong461f7582014-04-11 16:38:12 +08005071 ret = PTR_ERR_OR_ZERO(image_id);
Alex Elderc0fba362013-04-25 23:15:08 -05005072 if (!ret)
5073 rbd_dev->image_format = 2;
Alex Elderc0fba362013-04-25 23:15:08 -05005074 }
5075
5076 if (!ret) {
5077 rbd_dev->spec->image_id = image_id;
5078 dout("image_id is %s\n", image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05005079 }
5080out:
5081 kfree(response);
5082 kfree(object_name);
5083
5084 return ret;
5085}
5086
Alex Elder3abef3b2013-05-13 20:35:37 -05005087/*
5088 * Undo whatever state changes are made by v1 or v2 header info
5089 * call.
5090 */
Alex Elder6fd48b32013-04-28 23:32:34 -05005091static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5092{
5093 struct rbd_image_header *header;
5094
Ilya Dryomove69b8d42015-01-19 12:06:14 +03005095 rbd_dev_parent_put(rbd_dev);
Alex Elder6fd48b32013-04-28 23:32:34 -05005096
5097 /* Free dynamic fields from the header, then zero it out */
5098
5099 header = &rbd_dev->header;
Alex Elder812164f82013-04-30 00:44:32 -05005100 ceph_put_snap_context(header->snapc);
Alex Elder6fd48b32013-04-28 23:32:34 -05005101 kfree(header->snap_sizes);
5102 kfree(header->snap_names);
5103 kfree(header->object_prefix);
5104 memset(header, 0, sizeof (*header));
5105}
5106
Alex Elder2df3fac2013-05-06 09:51:30 -05005107static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
Alex Eldera30b71b2012-07-10 20:30:11 -05005108{
5109 int ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005110
Alex Elder1e130192012-07-03 16:01:19 -05005111 ret = rbd_dev_v2_object_prefix(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005112 if (ret)
Alex Elder1e130192012-07-03 16:01:19 -05005113 goto out_err;
Alex Elderb1b54022012-07-03 16:01:19 -05005114
Alex Elder2df3fac2013-05-06 09:51:30 -05005115 /*
5116 * Get the and check features for the image. Currently the
5117 * features are assumed to never change.
5118 */
Alex Elderb1b54022012-07-03 16:01:19 -05005119 ret = rbd_dev_v2_features(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005120 if (ret)
Alex Elderb1b54022012-07-03 16:01:19 -05005121 goto out_err;
Alex Elder35d489f2012-07-03 16:01:19 -05005122
Alex Eldercc070d52013-04-21 12:14:45 -05005123 /* If the image supports fancy striping, get its parameters */
5124
5125 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5126 ret = rbd_dev_v2_striping_info(rbd_dev);
5127 if (ret < 0)
5128 goto out_err;
5129 }
Alex Elder2df3fac2013-05-06 09:51:30 -05005130 /* No support for crypto and compression type format 2 images */
Alex Eldera30b71b2012-07-10 20:30:11 -05005131
Alex Elder35152972012-08-31 17:29:55 -05005132 return 0;
Alex Elder9d475de2012-07-03 16:01:19 -05005133out_err:
Alex Elder642a2532013-05-06 17:40:33 -05005134 rbd_dev->header.features = 0;
Alex Elder1e130192012-07-03 16:01:19 -05005135 kfree(rbd_dev->header.object_prefix);
5136 rbd_dev->header.object_prefix = NULL;
Alex Elder9d475de2012-07-03 16:01:19 -05005137
5138 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005139}
5140
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005141/*
5142 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5143 * rbd_dev_image_probe() recursion depth, which means it's also the
5144 * length of the already discovered part of the parent chain.
5145 */
5146static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
Alex Elder83a06262012-10-30 15:47:17 -05005147{
Alex Elder2f82ee52012-10-30 19:40:33 -05005148 struct rbd_device *parent = NULL;
Alex Elder124afba2013-04-26 15:44:36 -05005149 int ret;
5150
5151 if (!rbd_dev->parent_spec)
5152 return 0;
Alex Elder124afba2013-04-26 15:44:36 -05005153
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005154 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5155 pr_info("parent chain is too long (%d)\n", depth);
5156 ret = -EINVAL;
5157 goto out_err;
5158 }
5159
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005160 parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
5161 NULL);
5162 if (!parent) {
5163 ret = -ENOMEM;
Alex Elder124afba2013-04-26 15:44:36 -05005164 goto out_err;
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005165 }
5166
5167 /*
5168 * Images related by parent/child relationships always share
5169 * rbd_client and spec/parent_spec, so bump their refcounts.
5170 */
5171 __rbd_get_client(rbd_dev->rbd_client);
5172 rbd_spec_get(rbd_dev->parent_spec);
Alex Elder124afba2013-04-26 15:44:36 -05005173
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005174 ret = rbd_dev_image_probe(parent, depth);
Alex Elder124afba2013-04-26 15:44:36 -05005175 if (ret < 0)
5176 goto out_err;
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005177
Alex Elder124afba2013-04-26 15:44:36 -05005178 rbd_dev->parent = parent;
Alex Eldera2acd002013-05-08 22:50:04 -05005179 atomic_set(&rbd_dev->parent_ref, 1);
Alex Elder124afba2013-04-26 15:44:36 -05005180 return 0;
Alex Elder124afba2013-04-26 15:44:36 -05005181
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005182out_err:
5183 rbd_dev_unparent(rbd_dev);
Markus Elfring1761b222015-11-23 20:16:45 +01005184 rbd_dev_destroy(parent);
Alex Elder124afba2013-04-26 15:44:36 -05005185 return ret;
5186}
5187
Ilya Dryomov811c6682016-04-15 16:22:16 +02005188/*
5189 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5190 * upon return.
5191 */
Alex Elder200a6a82013-04-28 23:32:34 -05005192static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
Alex Elder124afba2013-04-26 15:44:36 -05005193{
Alex Elder83a06262012-10-30 15:47:17 -05005194 int ret;
Alex Elder83a06262012-10-30 15:47:17 -05005195
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005196 /* Get an id and fill in device name. */
Alex Elder83a06262012-10-30 15:47:17 -05005197
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005198 ret = rbd_dev_id_get(rbd_dev);
5199 if (ret)
Ilya Dryomov811c6682016-04-15 16:22:16 +02005200 goto err_out_unlock;
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005201
Alex Elder83a06262012-10-30 15:47:17 -05005202 BUILD_BUG_ON(DEV_NAME_LEN
5203 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5204 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5205
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005206 /* Record our major and minor device numbers. */
Alex Elder83a06262012-10-30 15:47:17 -05005207
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005208 if (!single_major) {
5209 ret = register_blkdev(0, rbd_dev->name);
5210 if (ret < 0)
5211 goto err_out_id;
5212
5213 rbd_dev->major = ret;
5214 rbd_dev->minor = 0;
5215 } else {
5216 rbd_dev->major = rbd_major;
5217 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5218 }
Alex Elder83a06262012-10-30 15:47:17 -05005219
5220 /* Set up the blkdev mapping. */
5221
5222 ret = rbd_init_disk(rbd_dev);
5223 if (ret)
5224 goto err_out_blkdev;
5225
Alex Elderf35a4de2013-05-06 09:51:29 -05005226 ret = rbd_dev_mapping_set(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005227 if (ret)
5228 goto err_out_disk;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04005229
Alex Elderf35a4de2013-05-06 09:51:29 -05005230 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
Josh Durgin22001f62013-09-30 20:10:04 -07005231 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
Alex Elderf35a4de2013-05-06 09:51:29 -05005232
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005233 dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
5234 ret = device_add(&rbd_dev->dev);
Alex Elderf35a4de2013-05-06 09:51:29 -05005235 if (ret)
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005236 goto err_out_mapping;
Alex Elder83a06262012-10-30 15:47:17 -05005237
Alex Elder83a06262012-10-30 15:47:17 -05005238 /* Everything's ready. Announce the disk to the world. */
5239
Alex Elder129b79d2013-04-26 15:44:36 -05005240 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Ilya Dryomov811c6682016-04-15 16:22:16 +02005241 up_write(&rbd_dev->header_rwsem);
Alex Elder83a06262012-10-30 15:47:17 -05005242
Ilya Dryomov811c6682016-04-15 16:22:16 +02005243 add_disk(rbd_dev->disk);
Alex Elder83a06262012-10-30 15:47:17 -05005244 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5245 (unsigned long long) rbd_dev->mapping.size);
5246
5247 return ret;
Alex Elder2f82ee52012-10-30 19:40:33 -05005248
Alex Elderf35a4de2013-05-06 09:51:29 -05005249err_out_mapping:
5250 rbd_dev_mapping_clear(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005251err_out_disk:
5252 rbd_free_disk(rbd_dev);
5253err_out_blkdev:
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005254 if (!single_major)
5255 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Alex Elder83a06262012-10-30 15:47:17 -05005256err_out_id:
5257 rbd_dev_id_put(rbd_dev);
Ilya Dryomov811c6682016-04-15 16:22:16 +02005258err_out_unlock:
5259 up_write(&rbd_dev->header_rwsem);
Alex Elder83a06262012-10-30 15:47:17 -05005260 return ret;
5261}
5262
Alex Elder332bb122013-04-27 09:59:30 -05005263static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5264{
5265 struct rbd_spec *spec = rbd_dev->spec;
5266 size_t size;
5267
5268 /* Record the header object name for this rbd image. */
5269
5270 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5271
5272 if (rbd_dev->image_format == 1)
5273 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5274 else
5275 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5276
5277 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5278 if (!rbd_dev->header_name)
5279 return -ENOMEM;
5280
5281 if (rbd_dev->image_format == 1)
5282 sprintf(rbd_dev->header_name, "%s%s",
5283 spec->image_name, RBD_SUFFIX);
5284 else
5285 sprintf(rbd_dev->header_name, "%s%s",
5286 RBD_HEADER_PREFIX, spec->image_id);
5287 return 0;
5288}
5289
Alex Elder200a6a82013-04-28 23:32:34 -05005290static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5291{
Alex Elder6fd48b32013-04-28 23:32:34 -05005292 rbd_dev_unprobe(rbd_dev);
Alex Elder200a6a82013-04-28 23:32:34 -05005293 kfree(rbd_dev->header_name);
Alex Elder6fd48b32013-04-28 23:32:34 -05005294 rbd_dev->header_name = NULL;
5295 rbd_dev->image_format = 0;
5296 kfree(rbd_dev->spec->image_id);
5297 rbd_dev->spec->image_id = NULL;
5298
Alex Elder200a6a82013-04-28 23:32:34 -05005299 rbd_dev_destroy(rbd_dev);
5300}
5301
Alex Eldera30b71b2012-07-10 20:30:11 -05005302/*
5303 * Probe for the existence of the header object for the given rbd
Alex Elder1f3ef782013-05-06 17:40:33 -05005304 * device. If this image is the one being mapped (i.e., not a
5305 * parent), initiate a watch on its header object before using that
5306 * object to get detailed information about the rbd image.
Alex Eldera30b71b2012-07-10 20:30:11 -05005307 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005308static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
Alex Eldera30b71b2012-07-10 20:30:11 -05005309{
5310 int ret;
5311
5312 /*
Alex Elder3abef3b2013-05-13 20:35:37 -05005313 * Get the id from the image id object. Unless there's an
5314 * error, rbd_dev->spec->image_id will be filled in with
5315 * a dynamically-allocated string, and rbd_dev->image_format
5316 * will be set to either 1 or 2.
Alex Eldera30b71b2012-07-10 20:30:11 -05005317 */
5318 ret = rbd_dev_image_id(rbd_dev);
5319 if (ret)
Alex Elderc0fba362013-04-25 23:15:08 -05005320 return ret;
Alex Elderc0fba362013-04-25 23:15:08 -05005321
Alex Elder332bb122013-04-27 09:59:30 -05005322 ret = rbd_dev_header_name(rbd_dev);
5323 if (ret)
5324 goto err_out_format;
5325
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005326 if (!depth) {
Ilya Dryomovfca27062013-12-16 18:02:40 +02005327 ret = rbd_dev_header_watch_sync(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005328 if (ret) {
5329 if (ret == -ENOENT)
5330 pr_info("image %s/%s does not exist\n",
5331 rbd_dev->spec->pool_name,
5332 rbd_dev->spec->image_name);
Alex Elder1f3ef782013-05-06 17:40:33 -05005333 goto out_header_name;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005334 }
Alex Elder1f3ef782013-05-06 17:40:33 -05005335 }
Alex Elderb644de22013-04-27 09:59:31 -05005336
Ilya Dryomova720ae02014-07-23 17:11:19 +04005337 ret = rbd_dev_header_info(rbd_dev);
Alex Elder5655c4d2013-04-25 23:15:08 -05005338 if (ret)
Alex Elderb644de22013-04-27 09:59:31 -05005339 goto err_out_watch;
Alex Elder83a06262012-10-30 15:47:17 -05005340
Ilya Dryomov04077592014-07-23 17:11:20 +04005341 /*
5342 * If this image is the one being mapped, we have pool name and
5343 * id, image name and id, and snap name - need to fill snap id.
5344 * Otherwise this is a parent image, identified by pool, image
5345 * and snap ids - need to fill in names for those ids.
5346 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005347 if (!depth)
Ilya Dryomov04077592014-07-23 17:11:20 +04005348 ret = rbd_spec_fill_snap_id(rbd_dev);
5349 else
5350 ret = rbd_spec_fill_names(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005351 if (ret) {
5352 if (ret == -ENOENT)
5353 pr_info("snap %s/%s@%s does not exist\n",
5354 rbd_dev->spec->pool_name,
5355 rbd_dev->spec->image_name,
5356 rbd_dev->spec->snap_name);
Alex Elder33dca392013-04-30 00:44:33 -05005357 goto err_out_probe;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005358 }
Alex Elder9bb81c92013-04-27 09:59:30 -05005359
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005360 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5361 ret = rbd_dev_v2_parent_info(rbd_dev);
5362 if (ret)
5363 goto err_out_probe;
5364
5365 /*
5366 * Need to warn users if this image is the one being
5367 * mapped and has a parent.
5368 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005369 if (!depth && rbd_dev->parent_spec)
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005370 rbd_warn(rbd_dev,
5371 "WARNING: kernel layering is EXPERIMENTAL!");
5372 }
5373
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005374 ret = rbd_dev_probe_parent(rbd_dev, depth);
Alex Elder30d60ba2013-05-06 09:51:30 -05005375 if (ret)
5376 goto err_out_probe;
Alex Elder83a06262012-10-30 15:47:17 -05005377
Alex Elder30d60ba2013-05-06 09:51:30 -05005378 dout("discovered format %u image, header name is %s\n",
5379 rbd_dev->image_format, rbd_dev->header_name);
Alex Elder30d60ba2013-05-06 09:51:30 -05005380 return 0;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005381
Alex Elder6fd48b32013-04-28 23:32:34 -05005382err_out_probe:
5383 rbd_dev_unprobe(rbd_dev);
Alex Elderb644de22013-04-27 09:59:31 -05005384err_out_watch:
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005385 if (!depth)
Ilya Dryomovfca27062013-12-16 18:02:40 +02005386 rbd_dev_header_unwatch_sync(rbd_dev);
Alex Elder332bb122013-04-27 09:59:30 -05005387out_header_name:
5388 kfree(rbd_dev->header_name);
5389 rbd_dev->header_name = NULL;
5390err_out_format:
5391 rbd_dev->image_format = 0;
Alex Elder5655c4d2013-04-25 23:15:08 -05005392 kfree(rbd_dev->spec->image_id);
5393 rbd_dev->spec->image_id = NULL;
Alex Elder5655c4d2013-04-25 23:15:08 -05005394 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005395}
5396
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005397static ssize_t do_rbd_add(struct bus_type *bus,
5398 const char *buf,
5399 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005400{
Alex Eldercb8627c2012-07-09 21:04:23 -05005401 struct rbd_device *rbd_dev = NULL;
Alex Elderdc79b112012-10-25 23:34:41 -05005402 struct ceph_options *ceph_opts = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05005403 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05005404 struct rbd_spec *spec = NULL;
Alex Elder9d3997f2012-10-25 23:34:42 -05005405 struct rbd_client *rbdc;
Alex Elder51344a32013-05-06 07:40:30 -05005406 bool read_only;
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02005407 int rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005408
5409 if (!try_module_get(THIS_MODULE))
5410 return -ENODEV;
5411
Alex Eldera725f65e2012-02-02 08:13:30 -06005412 /* parse add command */
Alex Elder859c31d2012-10-25 23:34:42 -05005413 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
Alex Elderdc79b112012-10-25 23:34:41 -05005414 if (rc < 0)
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005415 goto out;
Alex Eldera725f65e2012-02-02 08:13:30 -06005416
Alex Elder9d3997f2012-10-25 23:34:42 -05005417 rbdc = rbd_get_client(ceph_opts);
5418 if (IS_ERR(rbdc)) {
5419 rc = PTR_ERR(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05005420 goto err_out_args;
Alex Elder9d3997f2012-10-25 23:34:42 -05005421 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005422
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005423 /* pick the pool */
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005424 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005425 if (rc < 0) {
5426 if (rc == -ENOENT)
5427 pr_info("pool %s does not exist\n", spec->pool_name);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005428 goto err_out_client;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005429 }
Alex Elderc0cd10db2013-04-26 09:43:47 -05005430 spec->pool_id = (u64)rc;
Alex Elder859c31d2012-10-25 23:34:42 -05005431
Alex Elder0903e872012-11-14 12:25:19 -06005432 /* The ceph file layout needs to fit pool id in 32 bits */
5433
Alex Elderc0cd10db2013-04-26 09:43:47 -05005434 if (spec->pool_id > (u64)U32_MAX) {
Ilya Dryomov9584d502014-07-11 12:11:20 +04005435 rbd_warn(NULL, "pool id too large (%llu > %u)",
Alex Elderc0cd10db2013-04-26 09:43:47 -05005436 (unsigned long long)spec->pool_id, U32_MAX);
Alex Elder0903e872012-11-14 12:25:19 -06005437 rc = -EIO;
5438 goto err_out_client;
5439 }
5440
Ilya Dryomovd1475432015-06-22 13:24:48 +03005441 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02005442 if (!rbd_dev) {
5443 rc = -ENOMEM;
Alex Elderbd4ba652012-10-25 23:34:42 -05005444 goto err_out_client;
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02005445 }
Alex Elderc53d5892012-10-25 23:34:42 -05005446 rbdc = NULL; /* rbd_dev now owns this */
5447 spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovd1475432015-06-22 13:24:48 +03005448 rbd_opts = NULL; /* rbd_dev now owns this */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005449
Ilya Dryomov811c6682016-04-15 16:22:16 +02005450 down_write(&rbd_dev->header_rwsem);
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005451 rc = rbd_dev_image_probe(rbd_dev, 0);
Alex Eldera30b71b2012-07-10 20:30:11 -05005452 if (rc < 0)
Alex Elderc53d5892012-10-25 23:34:42 -05005453 goto err_out_rbd_dev;
Alex Elder05fd6f62012-08-29 17:11:07 -05005454
Alex Elder7ce4eef2013-05-06 17:40:33 -05005455 /* If we are mapping a snapshot it must be marked read-only */
5456
Ilya Dryomovd1475432015-06-22 13:24:48 +03005457 read_only = rbd_dev->opts->read_only;
Alex Elder7ce4eef2013-05-06 17:40:33 -05005458 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5459 read_only = true;
5460 rbd_dev->mapping.read_only = read_only;
5461
Alex Elderb536f692013-04-28 23:32:34 -05005462 rc = rbd_dev_device_setup(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05005463 if (rc) {
Ilya Dryomove37180c2013-12-16 18:02:41 +02005464 /*
5465 * rbd_dev_header_unwatch_sync() can't be moved into
5466 * rbd_dev_image_release() without refactoring, see
5467 * commit 1f3ef78861ac.
5468 */
5469 rbd_dev_header_unwatch_sync(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05005470 rbd_dev_image_release(rbd_dev);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005471 goto out;
Alex Elder3abef3b2013-05-13 20:35:37 -05005472 }
Alex Elderb536f692013-04-28 23:32:34 -05005473
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005474 rc = count;
5475out:
5476 module_put(THIS_MODULE);
5477 return rc;
Alex Elder3abef3b2013-05-13 20:35:37 -05005478
Alex Elderc53d5892012-10-25 23:34:42 -05005479err_out_rbd_dev:
Ilya Dryomov811c6682016-04-15 16:22:16 +02005480 up_write(&rbd_dev->header_rwsem);
Alex Elderc53d5892012-10-25 23:34:42 -05005481 rbd_dev_destroy(rbd_dev);
Alex Elderbd4ba652012-10-25 23:34:42 -05005482err_out_client:
Alex Elder9d3997f2012-10-25 23:34:42 -05005483 rbd_put_client(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05005484err_out_args:
Alex Elder859c31d2012-10-25 23:34:42 -05005485 rbd_spec_put(spec);
Ilya Dryomovd1475432015-06-22 13:24:48 +03005486 kfree(rbd_opts);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005487 goto out;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005488}
5489
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005490static ssize_t rbd_add(struct bus_type *bus,
5491 const char *buf,
5492 size_t count)
5493{
5494 if (single_major)
5495 return -EINVAL;
5496
5497 return do_rbd_add(bus, buf, count);
5498}
5499
5500static ssize_t rbd_add_single_major(struct bus_type *bus,
5501 const char *buf,
5502 size_t count)
5503{
5504 return do_rbd_add(bus, buf, count);
5505}
5506
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005507static void rbd_dev_device_release(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005508{
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005509 rbd_free_disk(rbd_dev);
Alex Elder200a6a82013-04-28 23:32:34 -05005510 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005511 device_del(&rbd_dev->dev);
Alex Elder6d80b132013-05-06 07:40:30 -05005512 rbd_dev_mapping_clear(rbd_dev);
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005513 if (!single_major)
5514 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Alex Eldere2839302012-08-29 17:11:06 -05005515 rbd_dev_id_put(rbd_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005516}
5517
Alex Elder05a46af2013-04-26 15:44:36 -05005518static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5519{
Alex Elderad945fc2013-04-26 15:44:36 -05005520 while (rbd_dev->parent) {
Alex Elder05a46af2013-04-26 15:44:36 -05005521 struct rbd_device *first = rbd_dev;
5522 struct rbd_device *second = first->parent;
5523 struct rbd_device *third;
5524
5525 /*
5526 * Follow to the parent with no grandparent and
5527 * remove it.
5528 */
5529 while (second && (third = second->parent)) {
5530 first = second;
5531 second = third;
5532 }
Alex Elderad945fc2013-04-26 15:44:36 -05005533 rbd_assert(second);
Alex Elder8ad42cd2013-04-28 23:32:34 -05005534 rbd_dev_image_release(second);
Alex Elderad945fc2013-04-26 15:44:36 -05005535 first->parent = NULL;
5536 first->parent_overlap = 0;
5537
5538 rbd_assert(first->parent_spec);
Alex Elder05a46af2013-04-26 15:44:36 -05005539 rbd_spec_put(first->parent_spec);
5540 first->parent_spec = NULL;
Alex Elder05a46af2013-04-26 15:44:36 -05005541 }
5542}
5543
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005544static ssize_t do_rbd_remove(struct bus_type *bus,
5545 const char *buf,
5546 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005547{
5548 struct rbd_device *rbd_dev = NULL;
Alex Elder751cc0e2013-05-31 15:17:01 -05005549 struct list_head *tmp;
5550 int dev_id;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005551 unsigned long ul;
Alex Elder82a442d2013-05-31 17:40:44 -05005552 bool already = false;
Alex Elder0d8189e2013-04-27 09:59:30 -05005553 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005554
Jingoo Hanbb8e0e82013-09-11 14:20:07 -07005555 ret = kstrtoul(buf, 10, &ul);
Alex Elder0d8189e2013-04-27 09:59:30 -05005556 if (ret)
5557 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005558
5559 /* convert to int; abort if we lost anything in the conversion */
Alex Elder751cc0e2013-05-31 15:17:01 -05005560 dev_id = (int)ul;
5561 if (dev_id != ul)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005562 return -EINVAL;
5563
Alex Elder751cc0e2013-05-31 15:17:01 -05005564 ret = -ENOENT;
5565 spin_lock(&rbd_dev_list_lock);
5566 list_for_each(tmp, &rbd_dev_list) {
5567 rbd_dev = list_entry(tmp, struct rbd_device, node);
5568 if (rbd_dev->dev_id == dev_id) {
5569 ret = 0;
5570 break;
5571 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005572 }
Alex Elder751cc0e2013-05-31 15:17:01 -05005573 if (!ret) {
5574 spin_lock_irq(&rbd_dev->lock);
5575 if (rbd_dev->open_count)
5576 ret = -EBUSY;
5577 else
Alex Elder82a442d2013-05-31 17:40:44 -05005578 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5579 &rbd_dev->flags);
Alex Elder751cc0e2013-05-31 15:17:01 -05005580 spin_unlock_irq(&rbd_dev->lock);
5581 }
5582 spin_unlock(&rbd_dev_list_lock);
Alex Elder82a442d2013-05-31 17:40:44 -05005583 if (ret < 0 || already)
Alex Elder1ba0f1e2013-05-31 15:17:01 -05005584 return ret;
Alex Elder751cc0e2013-05-31 15:17:01 -05005585
Ilya Dryomovfca27062013-12-16 18:02:40 +02005586 rbd_dev_header_unwatch_sync(rbd_dev);
Ilya Dryomovfca27062013-12-16 18:02:40 +02005587
Josh Durgin98752012013-08-29 17:26:31 -07005588 /*
5589 * Don't free anything from rbd_dev->disk until after all
5590 * notifies are completely processed. Otherwise
5591 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5592 * in a potential use after free of rbd_dev->disk or rbd_dev.
5593 */
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005594 rbd_dev_device_release(rbd_dev);
Alex Elder8ad42cd2013-04-28 23:32:34 -05005595 rbd_dev_image_release(rbd_dev);
Alex Elderaafb2302012-09-06 16:00:54 -05005596
Alex Elder1ba0f1e2013-05-31 15:17:01 -05005597 return count;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005598}
5599
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005600static ssize_t rbd_remove(struct bus_type *bus,
5601 const char *buf,
5602 size_t count)
5603{
5604 if (single_major)
5605 return -EINVAL;
5606
5607 return do_rbd_remove(bus, buf, count);
5608}
5609
5610static ssize_t rbd_remove_single_major(struct bus_type *bus,
5611 const char *buf,
5612 size_t count)
5613{
5614 return do_rbd_remove(bus, buf, count);
5615}
5616
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005617/*
5618 * create control files in sysfs
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005619 * /sys/bus/rbd/...
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005620 */
5621static int rbd_sysfs_init(void)
5622{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005623 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005624
Alex Elderfed4c142012-02-07 12:03:36 -06005625 ret = device_register(&rbd_root_dev);
Alex Elder21079782012-01-24 10:08:36 -06005626 if (ret < 0)
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005627 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005628
Alex Elderfed4c142012-02-07 12:03:36 -06005629 ret = bus_register(&rbd_bus_type);
5630 if (ret < 0)
5631 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005632
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005633 return ret;
5634}
5635
5636static void rbd_sysfs_cleanup(void)
5637{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005638 bus_unregister(&rbd_bus_type);
Alex Elderfed4c142012-02-07 12:03:36 -06005639 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005640}
5641
Alex Elder1c2a9df2013-05-01 12:43:03 -05005642static int rbd_slab_init(void)
5643{
5644 rbd_assert(!rbd_img_request_cache);
Geliang Tang03d94402016-03-13 15:17:32 +08005645 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
Alex Elder868311b2013-05-01 12:43:03 -05005646 if (!rbd_img_request_cache)
5647 return -ENOMEM;
5648
5649 rbd_assert(!rbd_obj_request_cache);
Geliang Tang03d94402016-03-13 15:17:32 +08005650 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
Alex Elder78c2a442013-05-01 12:43:04 -05005651 if (!rbd_obj_request_cache)
5652 goto out_err;
5653
5654 rbd_assert(!rbd_segment_name_cache);
5655 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02005656 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
Alex Elder78c2a442013-05-01 12:43:04 -05005657 if (rbd_segment_name_cache)
Alex Elder1c2a9df2013-05-01 12:43:03 -05005658 return 0;
Alex Elder78c2a442013-05-01 12:43:04 -05005659out_err:
Julia Lawall13bf2832015-09-13 14:15:26 +02005660 kmem_cache_destroy(rbd_obj_request_cache);
5661 rbd_obj_request_cache = NULL;
Alex Elder1c2a9df2013-05-01 12:43:03 -05005662
Alex Elder868311b2013-05-01 12:43:03 -05005663 kmem_cache_destroy(rbd_img_request_cache);
5664 rbd_img_request_cache = NULL;
5665
Alex Elder1c2a9df2013-05-01 12:43:03 -05005666 return -ENOMEM;
5667}
5668
5669static void rbd_slab_exit(void)
5670{
Alex Elder78c2a442013-05-01 12:43:04 -05005671 rbd_assert(rbd_segment_name_cache);
5672 kmem_cache_destroy(rbd_segment_name_cache);
5673 rbd_segment_name_cache = NULL;
5674
Alex Elder868311b2013-05-01 12:43:03 -05005675 rbd_assert(rbd_obj_request_cache);
5676 kmem_cache_destroy(rbd_obj_request_cache);
5677 rbd_obj_request_cache = NULL;
5678
Alex Elder1c2a9df2013-05-01 12:43:03 -05005679 rbd_assert(rbd_img_request_cache);
5680 kmem_cache_destroy(rbd_img_request_cache);
5681 rbd_img_request_cache = NULL;
5682}
5683
Alex Eldercc344fa2013-02-19 12:25:56 -06005684static int __init rbd_init(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005685{
5686 int rc;
5687
Alex Elder1e32d342013-01-30 11:13:33 -06005688 if (!libceph_compatible(NULL)) {
5689 rbd_warn(NULL, "libceph incompatibility (quitting)");
Alex Elder1e32d342013-01-30 11:13:33 -06005690 return -EINVAL;
5691 }
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005692
Alex Elder1c2a9df2013-05-01 12:43:03 -05005693 rc = rbd_slab_init();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005694 if (rc)
5695 return rc;
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005696
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005697 /*
5698 * The number of active work items is limited by the number of
Ilya Dryomovf77303b2015-04-22 18:28:13 +03005699 * rbd devices * queue depth, so leave @max_active at default.
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005700 */
5701 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5702 if (!rbd_wq) {
5703 rc = -ENOMEM;
5704 goto err_out_slab;
5705 }
5706
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005707 if (single_major) {
5708 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5709 if (rbd_major < 0) {
5710 rc = rbd_major;
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005711 goto err_out_wq;
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005712 }
5713 }
5714
Alex Elder1c2a9df2013-05-01 12:43:03 -05005715 rc = rbd_sysfs_init();
5716 if (rc)
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005717 goto err_out_blkdev;
Alex Elder1c2a9df2013-05-01 12:43:03 -05005718
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005719 if (single_major)
5720 pr_info("loaded (major %d)\n", rbd_major);
5721 else
5722 pr_info("loaded\n");
5723
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005724 return 0;
5725
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005726err_out_blkdev:
5727 if (single_major)
5728 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005729err_out_wq:
5730 destroy_workqueue(rbd_wq);
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005731err_out_slab:
5732 rbd_slab_exit();
Alex Elder1c2a9df2013-05-01 12:43:03 -05005733 return rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005734}
5735
Alex Eldercc344fa2013-02-19 12:25:56 -06005736static void __exit rbd_exit(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005737{
Ilya Dryomovffe312c2014-05-20 15:46:04 +04005738 ida_destroy(&rbd_dev_id_ida);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005739 rbd_sysfs_cleanup();
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005740 if (single_major)
5741 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005742 destroy_workqueue(rbd_wq);
Alex Elder1c2a9df2013-05-01 12:43:03 -05005743 rbd_slab_exit();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005744}
5745
5746module_init(rbd_init);
5747module_exit(rbd_exit);
5748
Alex Elderd552c612013-05-31 20:13:09 -05005749MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005750MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5751MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005752/* following authorship retained from original osdblk.c */
5753MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5754
Ilya Dryomov90da2582013-12-13 15:28:56 +02005755MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005756MODULE_LICENSE("GPL");