blob: b316ee48a30b2ee84814946cf88fae3519788026 [file] [log] [blame]
Alex Eldere2a58ee2013-04-30 00:44:33 -05001
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
Yehuda Sadehdfc56062010-11-19 14:51:04 -080025 For usage instructions, please refer to:
Yehuda Sadeh602adf42010-08-12 16:11:25 -070026
Yehuda Sadehdfc56062010-11-19 14:51:04 -080027 Documentation/ABI/testing/sysfs-bus-rbd
Yehuda Sadeh602adf42010-08-12 16:11:25 -070028
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
34#include <linux/ceph/decode.h>
Yehuda Sadeh59c2be12011-03-21 15:10:11 -070035#include <linux/parser.h>
Alex Elder30d1cff2013-05-01 12:43:03 -050036#include <linux/bsearch.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070037
38#include <linux/kernel.h>
39#include <linux/device.h>
40#include <linux/module.h>
Christoph Hellwig7ad18af2015-01-13 17:20:04 +010041#include <linux/blk-mq.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070042#include <linux/fs.h>
43#include <linux/blkdev.h>
Alex Elder1c2a9df2013-05-01 12:43:03 -050044#include <linux/slab.h>
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +020045#include <linux/idr.h>
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +040046#include <linux/workqueue.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070047
48#include "rbd_types.h"
49
Alex Elderaafb2302012-09-06 16:00:54 -050050#define RBD_DEBUG /* Activate rbd_assert() calls */
51
Alex Elder593a9e72012-02-07 12:03:37 -060052/*
53 * The basic unit of block I/O is a sector. It is interpreted in a
54 * number of contexts in Linux (blk, bio, genhd), but the default is
55 * universally 512 bytes. These symbols are just slightly more
56 * meaningful than the bare numbers they represent.
57 */
58#define SECTOR_SHIFT 9
59#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60
Alex Eldera2acd002013-05-08 22:50:04 -050061/*
62 * Increment the given counter and return its updated value.
63 * If the counter is already 0 it will not be incremented.
64 * If the counter is already at its maximum value returns
65 * -EINVAL without updating it.
66 */
67static int atomic_inc_return_safe(atomic_t *v)
68{
69 unsigned int counter;
70
71 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
72 if (counter <= (unsigned int)INT_MAX)
73 return (int)counter;
74
75 atomic_dec(v);
76
77 return -EINVAL;
78}
79
80/* Decrement the counter. Return the resulting value, or -EINVAL */
81static int atomic_dec_return_safe(atomic_t *v)
82{
83 int counter;
84
85 counter = atomic_dec_return(v);
86 if (counter >= 0)
87 return counter;
88
89 atomic_inc(v);
90
91 return -EINVAL;
92}
93
Alex Elderf0f8cef2012-01-29 13:57:44 -060094#define RBD_DRV_NAME "rbd"
Yehuda Sadeh602adf42010-08-12 16:11:25 -070095
Ilya Dryomov7e513d42013-12-16 19:26:32 +020096#define RBD_MINORS_PER_MAJOR 256
97#define RBD_SINGLE_MAJOR_PART_SHIFT 4
Yehuda Sadeh602adf42010-08-12 16:11:25 -070098
Alex Elderd4b125e2012-07-03 16:01:19 -050099#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
100#define RBD_MAX_SNAP_NAME_LEN \
101 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
102
Alex Elder35d489f2012-07-03 16:01:19 -0500103#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700104
105#define RBD_SNAP_HEAD_NAME "-"
106
Alex Elder9682fc62013-04-30 00:44:33 -0500107#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
108
Alex Elder9e15b772012-10-30 19:40:33 -0500109/* This allows a single page to hold an image name sent by OSD */
110#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
Alex Elder1e130192012-07-03 16:01:19 -0500111#define RBD_IMAGE_ID_LEN_MAX 64
Alex Elder9e15b772012-10-30 19:40:33 -0500112
Alex Elder1e130192012-07-03 16:01:19 -0500113#define RBD_OBJ_PREFIX_LEN_MAX 64
Alex Elder589d30e2012-07-10 20:30:11 -0500114
Alex Elderd8891402012-10-09 13:50:17 -0700115/* Feature bits */
116
Alex Elder5cbf6f122013-04-11 09:29:48 -0500117#define RBD_FEATURE_LAYERING (1<<0)
118#define RBD_FEATURE_STRIPINGV2 (1<<1)
119#define RBD_FEATURES_ALL \
120 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
Alex Elderd8891402012-10-09 13:50:17 -0700121
122/* Features supported by this (client software) implementation. */
123
Alex Elder770eba62012-10-25 23:34:40 -0500124#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
Alex Elderd8891402012-10-09 13:50:17 -0700125
Alex Elder81a89792012-02-02 08:13:30 -0600126/*
127 * An RBD device name will be "rbd#", where the "rbd" comes from
128 * RBD_DRV_NAME above, and # is a unique integer identifier.
129 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
130 * enough to hold all possible device names.
131 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700132#define DEV_NAME_LEN 32
Alex Elder81a89792012-02-02 08:13:30 -0600133#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700134
135/*
136 * block device image metadata (in-memory version)
137 */
138struct rbd_image_header {
Alex Elderf35a4de2013-05-06 09:51:29 -0500139 /* These six fields never change for a given rbd image */
Alex Elder849b4262012-07-09 21:04:24 -0500140 char *object_prefix;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700141 __u8 obj_order;
142 __u8 crypt_type;
143 __u8 comp_type;
Alex Elderf35a4de2013-05-06 09:51:29 -0500144 u64 stripe_unit;
145 u64 stripe_count;
146 u64 features; /* Might be changeable someday? */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700147
Alex Elderf84344f2012-08-31 17:29:51 -0500148 /* The remaining fields need to be updated occasionally */
149 u64 image_size;
150 struct ceph_snap_context *snapc;
Alex Elderf35a4de2013-05-06 09:51:29 -0500151 char *snap_names; /* format 1 only */
152 u64 *snap_sizes; /* format 1 only */
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700153};
154
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500155/*
156 * An rbd image specification.
157 *
158 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
Alex Elderc66c6e02012-11-01 08:39:26 -0500159 * identify an image. Each rbd_dev structure includes a pointer to
160 * an rbd_spec structure that encapsulates this identity.
161 *
162 * Each of the id's in an rbd_spec has an associated name. For a
163 * user-mapped image, the names are supplied and the id's associated
164 * with them are looked up. For a layered image, a parent image is
165 * defined by the tuple, and the names are looked up.
166 *
167 * An rbd_dev structure contains a parent_spec pointer which is
168 * non-null if the image it represents is a child in a layered
169 * image. This pointer will refer to the rbd_spec structure used
170 * by the parent rbd_dev for its own identity (i.e., the structure
171 * is shared between the parent and child).
172 *
173 * Since these structures are populated once, during the discovery
174 * phase of image construction, they are effectively immutable so
175 * we make no effort to synchronize access to them.
176 *
177 * Note that code herein does not assume the image name is known (it
178 * could be a null pointer).
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500179 */
180struct rbd_spec {
181 u64 pool_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500182 const char *pool_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500183
Alex Elderecb4dc22013-04-26 09:43:47 -0500184 const char *image_id;
185 const char *image_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500186
187 u64 snap_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500188 const char *snap_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500189
190 struct kref kref;
191};
192
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700193/*
Alex Elderf0f8cef2012-01-29 13:57:44 -0600194 * an instance of the client. multiple devices may share an rbd client.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700195 */
196struct rbd_client {
197 struct ceph_client *client;
198 struct kref kref;
199 struct list_head node;
200};
201
Alex Elderbf0d5f502012-11-22 00:00:08 -0600202struct rbd_img_request;
203typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
204
205#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
206
207struct rbd_obj_request;
208typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
209
Alex Elder9969ebc2013-01-18 12:31:10 -0600210enum obj_request_type {
211 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
212};
Alex Elderbf0d5f502012-11-22 00:00:08 -0600213
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800214enum obj_operation_type {
215 OBJ_OP_WRITE,
216 OBJ_OP_READ,
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800217 OBJ_OP_DISCARD,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800218};
219
Alex Elder926f9b32013-02-11 12:33:24 -0600220enum obj_req_flags {
221 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
Alex Elder6365d332013-02-11 12:33:24 -0600222 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
Alex Elder5679c592013-02-11 12:33:24 -0600223 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
224 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
Alex Elder926f9b32013-02-11 12:33:24 -0600225};
226
Alex Elderbf0d5f502012-11-22 00:00:08 -0600227struct rbd_obj_request {
228 const char *object_name;
229 u64 offset; /* object start byte */
230 u64 length; /* bytes from offset */
Alex Elder926f9b32013-02-11 12:33:24 -0600231 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600232
Alex Elderc5b5ef62013-02-11 12:33:24 -0600233 /*
234 * An object request associated with an image will have its
235 * img_data flag set; a standalone object request will not.
236 *
237 * A standalone object request will have which == BAD_WHICH
238 * and a null obj_request pointer.
239 *
240 * An object request initiated in support of a layered image
241 * object (to check for its existence before a write) will
242 * have which == BAD_WHICH and a non-null obj_request pointer.
243 *
244 * Finally, an object request for rbd image data will have
245 * which != BAD_WHICH, and will have a non-null img_request
246 * pointer. The value of which will be in the range
247 * 0..(img_request->obj_request_count-1).
248 */
249 union {
250 struct rbd_obj_request *obj_request; /* STAT op */
251 struct {
252 struct rbd_img_request *img_request;
253 u64 img_offset;
254 /* links for img_request->obj_requests list */
255 struct list_head links;
256 };
257 };
Alex Elderbf0d5f502012-11-22 00:00:08 -0600258 u32 which; /* posn image request list */
259
260 enum obj_request_type type;
Alex Elder788e2df2013-01-17 12:25:27 -0600261 union {
262 struct bio *bio_list;
263 struct {
264 struct page **pages;
265 u32 page_count;
266 };
267 };
Alex Elder0eefd472013-04-19 15:34:50 -0500268 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500269 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600270
271 struct ceph_osd_request *osd_req;
272
273 u64 xferred; /* bytes transferred */
Sage Weil1b83bef2013-02-25 16:11:12 -0800274 int result;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600275
276 rbd_obj_callback_t callback;
Alex Elder788e2df2013-01-17 12:25:27 -0600277 struct completion completion;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600278
279 struct kref kref;
280};
281
Alex Elder0c425242013-02-08 09:55:49 -0600282enum img_req_flags {
Alex Elder9849e982013-01-24 16:13:36 -0600283 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
284 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
Alex Elderd0b2e942013-01-24 16:13:36 -0600285 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800286 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
Alex Elder0c425242013-02-08 09:55:49 -0600287};
288
Alex Elderbf0d5f502012-11-22 00:00:08 -0600289struct rbd_img_request {
Alex Elderbf0d5f502012-11-22 00:00:08 -0600290 struct rbd_device *rbd_dev;
291 u64 offset; /* starting image byte offset */
292 u64 length; /* byte count from offset */
Alex Elder0c425242013-02-08 09:55:49 -0600293 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600294 union {
Alex Elder9849e982013-01-24 16:13:36 -0600295 u64 snap_id; /* for reads */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600296 struct ceph_snap_context *snapc; /* for writes */
Alex Elder9849e982013-01-24 16:13:36 -0600297 };
298 union {
299 struct request *rq; /* block request */
300 struct rbd_obj_request *obj_request; /* obj req initiator */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600301 };
Alex Elder3d7efd12013-04-19 15:34:50 -0500302 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500303 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600304 spinlock_t completion_lock;/* protects next_completion */
305 u32 next_completion;
306 rbd_img_callback_t callback;
Alex Elder55f27e02013-04-10 12:34:25 -0500307 u64 xferred;/* aggregate bytes transferred */
Alex Eldera5a337d2013-01-24 16:13:36 -0600308 int result; /* first nonzero obj_request result */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600309
310 u32 obj_request_count;
311 struct list_head obj_requests; /* rbd_obj_request structs */
312
313 struct kref kref;
314};
315
316#define for_each_obj_request(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600317 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600318#define for_each_obj_request_from(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600319 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600320#define for_each_obj_request_safe(ireq, oreq, n) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600321 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600322
Alex Elderf84344f2012-08-31 17:29:51 -0500323struct rbd_mapping {
Alex Elder99c1f082012-08-30 14:42:15 -0500324 u64 size;
Alex Elder34b13182012-07-13 20:35:12 -0500325 u64 features;
Alex Elderf84344f2012-08-31 17:29:51 -0500326 bool read_only;
327};
328
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700329/*
330 * a single device
331 */
332struct rbd_device {
Alex Elderde71a292012-07-03 16:01:19 -0500333 int dev_id; /* blkdev unique id */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700334
335 int major; /* blkdev assigned major */
Ilya Dryomovdd82fff2013-12-13 15:28:57 +0200336 int minor;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700337 struct gendisk *disk; /* blkdev's gendisk and rq */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700338
Alex Eldera30b71b2012-07-10 20:30:11 -0500339 u32 image_format; /* Either 1 or 2 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700340 struct rbd_client *rbd_client;
341
342 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
343
Alex Elderb82d1672013-01-14 12:43:31 -0600344 spinlock_t lock; /* queue, flags, open_count */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700345
346 struct rbd_image_header header;
Alex Elderb82d1672013-01-14 12:43:31 -0600347 unsigned long flags; /* possibly lock protected */
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500348 struct rbd_spec *spec;
Ilya Dryomovd1475432015-06-22 13:24:48 +0300349 struct rbd_options *opts;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700350
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500351 char *header_name;
Alex Elder971f8392012-10-25 23:34:41 -0500352
Alex Elder0903e872012-11-14 12:25:19 -0600353 struct ceph_file_layout layout;
354
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700355 struct ceph_osd_event *watch_event;
Alex Elder975241a2013-01-25 17:08:55 -0600356 struct rbd_obj_request *watch_request;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700357
Alex Elder86b00e02012-10-25 23:34:42 -0500358 struct rbd_spec *parent_spec;
359 u64 parent_overlap;
Alex Eldera2acd002013-05-08 22:50:04 -0500360 atomic_t parent_ref;
Alex Elder2f82ee52012-10-30 19:40:33 -0500361 struct rbd_device *parent;
Alex Elder86b00e02012-10-25 23:34:42 -0500362
Christoph Hellwig7ad18af2015-01-13 17:20:04 +0100363 /* Block layer tags. */
364 struct blk_mq_tag_set tag_set;
365
Josh Durginc6666012011-11-21 17:11:12 -0800366 /* protects updating the header */
367 struct rw_semaphore header_rwsem;
Alex Elderf84344f2012-08-31 17:29:51 -0500368
369 struct rbd_mapping mapping;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700370
371 struct list_head node;
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800372
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800373 /* sysfs related */
374 struct device dev;
Alex Elderb82d1672013-01-14 12:43:31 -0600375 unsigned long open_count; /* protected by lock */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800376};
377
Alex Elderb82d1672013-01-14 12:43:31 -0600378/*
379 * Flag bits for rbd_dev->flags. If atomicity is required,
380 * rbd_dev->lock is used to protect access.
381 *
382 * Currently, only the "removing" flag (which is coupled with the
383 * "open_count" field) requires atomic access.
384 */
Alex Elder6d292902013-01-14 12:43:31 -0600385enum rbd_dev_flags {
386 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
Alex Elderb82d1672013-01-14 12:43:31 -0600387 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
Alex Elder6d292902013-01-14 12:43:31 -0600388};
389
Alex Eldercfbf6372013-05-31 17:40:45 -0500390static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
Alex Eldere124a82f2012-01-29 13:57:44 -0600391
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700392static LIST_HEAD(rbd_dev_list); /* devices */
Alex Eldere124a82f2012-01-29 13:57:44 -0600393static DEFINE_SPINLOCK(rbd_dev_list_lock);
394
Alex Elder432b8582012-01-29 13:57:44 -0600395static LIST_HEAD(rbd_client_list); /* clients */
396static DEFINE_SPINLOCK(rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700397
Alex Elder78c2a442013-05-01 12:43:04 -0500398/* Slab caches for frequently-allocated structures */
399
Alex Elder1c2a9df2013-05-01 12:43:03 -0500400static struct kmem_cache *rbd_img_request_cache;
Alex Elder868311b2013-05-01 12:43:03 -0500401static struct kmem_cache *rbd_obj_request_cache;
Alex Elder78c2a442013-05-01 12:43:04 -0500402static struct kmem_cache *rbd_segment_name_cache;
Alex Elder1c2a9df2013-05-01 12:43:03 -0500403
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200404static int rbd_major;
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +0200405static DEFINE_IDA(rbd_dev_id_ida);
406
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +0400407static struct workqueue_struct *rbd_wq;
408
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200409/*
410 * Default to false for now, as single-major requires >= 0.75 version of
411 * userspace rbd utility.
412 */
413static bool single_major = false;
414module_param(single_major, bool, S_IRUGO);
415MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
416
Alex Elder3d7efd12013-04-19 15:34:50 -0500417static int rbd_img_request_submit(struct rbd_img_request *img_request);
418
Alex Elder200a6a82013-04-28 23:32:34 -0500419static void rbd_dev_device_release(struct device *dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800420
Alex Elderf0f8cef2012-01-29 13:57:44 -0600421static ssize_t rbd_add(struct bus_type *bus, const char *buf,
422 size_t count);
423static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
424 size_t count);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200425static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
426 size_t count);
427static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
428 size_t count);
Alex Elder1f3ef782013-05-06 17:40:33 -0500429static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
Alex Eldera2acd002013-05-08 22:50:04 -0500430static void rbd_spec_put(struct rbd_spec *spec);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600431
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200432static int rbd_dev_id_to_minor(int dev_id)
433{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200434 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200435}
436
437static int minor_to_rbd_dev_id(int minor)
438{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200439 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200440}
441
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700442static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
443static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200444static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
445static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700446
447static struct attribute *rbd_bus_attrs[] = {
448 &bus_attr_add.attr,
449 &bus_attr_remove.attr,
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200450 &bus_attr_add_single_major.attr,
451 &bus_attr_remove_single_major.attr,
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700452 NULL,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600453};
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200454
455static umode_t rbd_bus_is_visible(struct kobject *kobj,
456 struct attribute *attr, int index)
457{
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200458 if (!single_major &&
459 (attr == &bus_attr_add_single_major.attr ||
460 attr == &bus_attr_remove_single_major.attr))
461 return 0;
462
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200463 return attr->mode;
464}
465
466static const struct attribute_group rbd_bus_group = {
467 .attrs = rbd_bus_attrs,
468 .is_visible = rbd_bus_is_visible,
469};
470__ATTRIBUTE_GROUPS(rbd_bus);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600471
472static struct bus_type rbd_bus_type = {
473 .name = "rbd",
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700474 .bus_groups = rbd_bus_groups,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600475};
476
477static void rbd_root_dev_release(struct device *dev)
478{
479}
480
481static struct device rbd_root_dev = {
482 .init_name = "rbd",
483 .release = rbd_root_dev_release,
484};
485
Alex Elder06ecc6c2012-11-01 10:17:15 -0500486static __printf(2, 3)
487void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
488{
489 struct va_format vaf;
490 va_list args;
491
492 va_start(args, fmt);
493 vaf.fmt = fmt;
494 vaf.va = &args;
495
496 if (!rbd_dev)
497 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
498 else if (rbd_dev->disk)
499 printk(KERN_WARNING "%s: %s: %pV\n",
500 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
501 else if (rbd_dev->spec && rbd_dev->spec->image_name)
502 printk(KERN_WARNING "%s: image %s: %pV\n",
503 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
504 else if (rbd_dev->spec && rbd_dev->spec->image_id)
505 printk(KERN_WARNING "%s: id %s: %pV\n",
506 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
507 else /* punt */
508 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
509 RBD_DRV_NAME, rbd_dev, &vaf);
510 va_end(args);
511}
512
Alex Elderaafb2302012-09-06 16:00:54 -0500513#ifdef RBD_DEBUG
514#define rbd_assert(expr) \
515 if (unlikely(!(expr))) { \
516 printk(KERN_ERR "\nAssertion failure in %s() " \
517 "at line %d:\n\n" \
518 "\trbd_assert(%s);\n\n", \
519 __func__, __LINE__, #expr); \
520 BUG(); \
521 }
522#else /* !RBD_DEBUG */
523# define rbd_assert(expr) ((void) 0)
524#endif /* !RBD_DEBUG */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800525
Alex Elderb454e362013-04-19 15:34:50 -0500526static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
Alex Elder05a46af2013-04-26 15:44:36 -0500527static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
528static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
Alex Elder8b3e1a52013-01-24 16:13:36 -0600529
Alex Eldercc4a38bd2013-04-30 00:44:33 -0500530static int rbd_dev_refresh(struct rbd_device *rbd_dev);
Alex Elder2df3fac2013-05-06 09:51:30 -0500531static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
Ilya Dryomova720ae02014-07-23 17:11:19 +0400532static int rbd_dev_header_info(struct rbd_device *rbd_dev);
Ilya Dryomove8f59b52014-07-24 10:42:13 +0400533static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
Alex Elder54cac612013-04-30 00:44:33 -0500534static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
535 u64 snap_id);
Alex Elder2ad3d712013-04-30 00:44:33 -0500536static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
537 u8 *order, u64 *snap_size);
538static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
539 u64 *snap_features);
540static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700541
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700542static int rbd_open(struct block_device *bdev, fmode_t mode)
543{
Alex Elderf0f8cef2012-01-29 13:57:44 -0600544 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600545 bool removing = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700546
Alex Elderf84344f2012-08-31 17:29:51 -0500547 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700548 return -EROFS;
549
Alex Eldera14ea262013-02-05 13:23:12 -0600550 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600551 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
552 removing = true;
553 else
554 rbd_dev->open_count++;
Alex Eldera14ea262013-02-05 13:23:12 -0600555 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600556 if (removing)
557 return -ENOENT;
558
Alex Elderc3e946c2012-11-16 09:29:16 -0600559 (void) get_device(&rbd_dev->dev);
Alex Elder340c7a22012-08-10 13:12:07 -0700560
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700561 return 0;
562}
563
Al Virodb2a1442013-05-05 21:52:57 -0400564static void rbd_release(struct gendisk *disk, fmode_t mode)
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800565{
566 struct rbd_device *rbd_dev = disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600567 unsigned long open_count_before;
568
Alex Eldera14ea262013-02-05 13:23:12 -0600569 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600570 open_count_before = rbd_dev->open_count--;
Alex Eldera14ea262013-02-05 13:23:12 -0600571 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600572 rbd_assert(open_count_before > 0);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800573
Alex Elderc3e946c2012-11-16 09:29:16 -0600574 put_device(&rbd_dev->dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800575}
576
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800577static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
578{
Josh Durgin77f33c02013-09-30 17:09:54 -0700579 int ret = 0;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800580 int val;
581 bool ro;
Josh Durgin77f33c02013-09-30 17:09:54 -0700582 bool ro_changed = false;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800583
Josh Durgin77f33c02013-09-30 17:09:54 -0700584 /* get_user() may sleep, so call it before taking rbd_dev->lock */
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800585 if (get_user(val, (int __user *)(arg)))
586 return -EFAULT;
587
588 ro = val ? true : false;
589 /* Snapshot doesn't allow to write*/
590 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
591 return -EROFS;
592
Josh Durgin77f33c02013-09-30 17:09:54 -0700593 spin_lock_irq(&rbd_dev->lock);
594 /* prevent others open this device */
595 if (rbd_dev->open_count > 1) {
596 ret = -EBUSY;
597 goto out;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800598 }
599
Josh Durgin77f33c02013-09-30 17:09:54 -0700600 if (rbd_dev->mapping.read_only != ro) {
601 rbd_dev->mapping.read_only = ro;
602 ro_changed = true;
603 }
604
605out:
606 spin_unlock_irq(&rbd_dev->lock);
607 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
608 if (ret == 0 && ro_changed)
609 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
610
611 return ret;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800612}
613
614static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
615 unsigned int cmd, unsigned long arg)
616{
617 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
618 int ret = 0;
619
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800620 switch (cmd) {
621 case BLKROSET:
622 ret = rbd_ioctl_set_ro(rbd_dev, arg);
623 break;
624 default:
625 ret = -ENOTTY;
626 }
627
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800628 return ret;
629}
630
631#ifdef CONFIG_COMPAT
632static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
633 unsigned int cmd, unsigned long arg)
634{
635 return rbd_ioctl(bdev, mode, cmd, arg);
636}
637#endif /* CONFIG_COMPAT */
638
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700639static const struct block_device_operations rbd_bd_ops = {
640 .owner = THIS_MODULE,
641 .open = rbd_open,
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800642 .release = rbd_release,
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800643 .ioctl = rbd_ioctl,
644#ifdef CONFIG_COMPAT
645 .compat_ioctl = rbd_compat_ioctl,
646#endif
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700647};
648
649/*
Alex Elder7262cfc2013-05-16 15:04:20 -0500650 * Initialize an rbd client instance. Success or not, this function
Alex Eldercfbf6372013-05-31 17:40:45 -0500651 * consumes ceph_opts. Caller holds client_mutex.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700652 */
Alex Elderf8c38922012-08-10 13:12:07 -0700653static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700654{
655 struct rbd_client *rbdc;
656 int ret = -ENOMEM;
657
Alex Elder37206ee2013-02-20 17:32:08 -0600658 dout("%s:\n", __func__);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700659 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
660 if (!rbdc)
661 goto out_opt;
662
663 kref_init(&rbdc->kref);
664 INIT_LIST_HEAD(&rbdc->node);
665
Alex Elder43ae4702012-07-03 16:01:18 -0500666 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700667 if (IS_ERR(rbdc->client))
Alex Elder08f75462013-05-29 11:19:00 -0500668 goto out_rbdc;
Alex Elder43ae4702012-07-03 16:01:18 -0500669 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700670
671 ret = ceph_open_session(rbdc->client);
672 if (ret < 0)
Alex Elder08f75462013-05-29 11:19:00 -0500673 goto out_client;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700674
Alex Elder432b8582012-01-29 13:57:44 -0600675 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700676 list_add_tail(&rbdc->node, &rbd_client_list);
Alex Elder432b8582012-01-29 13:57:44 -0600677 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700678
Alex Elder37206ee2013-02-20 17:32:08 -0600679 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Elderbc534d82012-01-29 13:57:44 -0600680
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700681 return rbdc;
Alex Elder08f75462013-05-29 11:19:00 -0500682out_client:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700683 ceph_destroy_client(rbdc->client);
Alex Elder08f75462013-05-29 11:19:00 -0500684out_rbdc:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700685 kfree(rbdc);
686out_opt:
Alex Elder43ae4702012-07-03 16:01:18 -0500687 if (ceph_opts)
688 ceph_destroy_options(ceph_opts);
Alex Elder37206ee2013-02-20 17:32:08 -0600689 dout("%s: error %d\n", __func__, ret);
690
Vasiliy Kulikov28f259b2010-09-26 12:59:37 +0400691 return ERR_PTR(ret);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700692}
693
Alex Elder2f82ee52012-10-30 19:40:33 -0500694static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
695{
696 kref_get(&rbdc->kref);
697
698 return rbdc;
699}
700
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700701/*
Alex Elder1f7ba332012-08-10 13:12:07 -0700702 * Find a ceph client with specific addr and configuration. If
703 * found, bump its reference count.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700704 */
Alex Elder1f7ba332012-08-10 13:12:07 -0700705static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700706{
707 struct rbd_client *client_node;
Alex Elder1f7ba332012-08-10 13:12:07 -0700708 bool found = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700709
Alex Elder43ae4702012-07-03 16:01:18 -0500710 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700711 return NULL;
712
Alex Elder1f7ba332012-08-10 13:12:07 -0700713 spin_lock(&rbd_client_list_lock);
714 list_for_each_entry(client_node, &rbd_client_list, node) {
715 if (!ceph_compare_options(ceph_opts, client_node->client)) {
Alex Elder2f82ee52012-10-30 19:40:33 -0500716 __rbd_get_client(client_node);
717
Alex Elder1f7ba332012-08-10 13:12:07 -0700718 found = true;
719 break;
720 }
721 }
722 spin_unlock(&rbd_client_list_lock);
723
724 return found ? client_node : NULL;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700725}
726
727/*
Ilya Dryomov210c1042015-06-22 13:24:48 +0300728 * (Per device) rbd map options
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700729 */
730enum {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300731 Opt_queue_depth,
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700732 Opt_last_int,
733 /* int args above */
734 Opt_last_string,
735 /* string args above */
Alex Eldercc0538b2012-08-10 13:12:07 -0700736 Opt_read_only,
737 Opt_read_write,
Ilya Dryomov210c1042015-06-22 13:24:48 +0300738 Opt_err
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700739};
740
Alex Elder43ae4702012-07-03 16:01:18 -0500741static match_table_t rbd_opts_tokens = {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300742 {Opt_queue_depth, "queue_depth=%d"},
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700743 /* int args above */
744 /* string args above */
Alex Elderbe466c12012-10-22 11:31:26 -0500745 {Opt_read_only, "read_only"},
Alex Eldercc0538b2012-08-10 13:12:07 -0700746 {Opt_read_only, "ro"}, /* Alternate spelling */
747 {Opt_read_write, "read_write"},
748 {Opt_read_write, "rw"}, /* Alternate spelling */
Ilya Dryomov210c1042015-06-22 13:24:48 +0300749 {Opt_err, NULL}
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700750};
751
Alex Elder98571b52013-01-20 14:44:42 -0600752struct rbd_options {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300753 int queue_depth;
Alex Elder98571b52013-01-20 14:44:42 -0600754 bool read_only;
755};
756
Ilya Dryomovb5584182015-06-23 16:21:19 +0300757#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
Alex Elder98571b52013-01-20 14:44:42 -0600758#define RBD_READ_ONLY_DEFAULT false
759
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700760static int parse_rbd_opts_token(char *c, void *private)
761{
Alex Elder43ae4702012-07-03 16:01:18 -0500762 struct rbd_options *rbd_opts = private;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700763 substring_t argstr[MAX_OPT_ARGS];
764 int token, intval, ret;
765
Alex Elder43ae4702012-07-03 16:01:18 -0500766 token = match_token(c, rbd_opts_tokens, argstr);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700767 if (token < Opt_last_int) {
768 ret = match_int(&argstr[0], &intval);
769 if (ret < 0) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300770 pr_err("bad mount option arg (not int) at '%s'\n", c);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700771 return ret;
772 }
773 dout("got int token %d val %d\n", token, intval);
774 } else if (token > Opt_last_int && token < Opt_last_string) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300775 dout("got string token %d val %s\n", token, argstr[0].from);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700776 } else {
777 dout("got token %d\n", token);
778 }
779
780 switch (token) {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300781 case Opt_queue_depth:
782 if (intval < 1) {
783 pr_err("queue_depth out of range\n");
784 return -EINVAL;
785 }
786 rbd_opts->queue_depth = intval;
787 break;
Alex Eldercc0538b2012-08-10 13:12:07 -0700788 case Opt_read_only:
789 rbd_opts->read_only = true;
790 break;
791 case Opt_read_write:
792 rbd_opts->read_only = false;
793 break;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700794 default:
Ilya Dryomov210c1042015-06-22 13:24:48 +0300795 /* libceph prints "bad option" msg */
796 return -EINVAL;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700797 }
Ilya Dryomov210c1042015-06-22 13:24:48 +0300798
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700799 return 0;
800}
801
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800802static char* obj_op_name(enum obj_operation_type op_type)
803{
804 switch (op_type) {
805 case OBJ_OP_READ:
806 return "read";
807 case OBJ_OP_WRITE:
808 return "write";
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800809 case OBJ_OP_DISCARD:
810 return "discard";
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800811 default:
812 return "???";
813 }
814}
815
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700816/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700817 * Get a ceph client with specific addr and configuration, if one does
Alex Elder7262cfc2013-05-16 15:04:20 -0500818 * not exist create it. Either way, ceph_opts is consumed by this
819 * function.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700820 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500821static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700822{
Alex Elderf8c38922012-08-10 13:12:07 -0700823 struct rbd_client *rbdc;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700824
Alex Eldercfbf6372013-05-31 17:40:45 -0500825 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
Alex Elder1f7ba332012-08-10 13:12:07 -0700826 rbdc = rbd_client_find(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500827 if (rbdc) /* using an existing client */
Alex Elder43ae4702012-07-03 16:01:18 -0500828 ceph_destroy_options(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500829 else
Alex Elderf8c38922012-08-10 13:12:07 -0700830 rbdc = rbd_client_create(ceph_opts);
Alex Eldercfbf6372013-05-31 17:40:45 -0500831 mutex_unlock(&client_mutex);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700832
Alex Elder9d3997f2012-10-25 23:34:42 -0500833 return rbdc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700834}
835
836/*
837 * Destroy ceph client
Alex Elderd23a4b32012-01-29 13:57:43 -0600838 *
Alex Elder432b8582012-01-29 13:57:44 -0600839 * Caller must hold rbd_client_list_lock.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700840 */
841static void rbd_client_release(struct kref *kref)
842{
843 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
844
Alex Elder37206ee2013-02-20 17:32:08 -0600845 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500846 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700847 list_del(&rbdc->node);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500848 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700849
850 ceph_destroy_client(rbdc->client);
851 kfree(rbdc);
852}
853
854/*
855 * Drop reference to ceph client node. If it's not referenced anymore, release
856 * it.
857 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500858static void rbd_put_client(struct rbd_client *rbdc)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700859{
Alex Elderc53d5892012-10-25 23:34:42 -0500860 if (rbdc)
861 kref_put(&rbdc->kref, rbd_client_release);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700862}
863
Alex Eldera30b71b2012-07-10 20:30:11 -0500864static bool rbd_image_format_valid(u32 image_format)
865{
866 return image_format == 1 || image_format == 2;
867}
868
Alex Elder8e94af82012-07-25 09:32:40 -0500869static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
870{
Alex Elder103a1502012-08-02 11:29:45 -0500871 size_t size;
872 u32 snap_count;
873
874 /* The header has to start with the magic rbd header text */
875 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
876 return false;
877
Alex Elderdb2388b2012-10-20 22:17:27 -0500878 /* The bio layer requires at least sector-sized I/O */
879
880 if (ondisk->options.order < SECTOR_SHIFT)
881 return false;
882
883 /* If we use u64 in a few spots we may be able to loosen this */
884
885 if (ondisk->options.order > 8 * sizeof (int) - 1)
886 return false;
887
Alex Elder103a1502012-08-02 11:29:45 -0500888 /*
889 * The size of a snapshot header has to fit in a size_t, and
890 * that limits the number of snapshots.
891 */
892 snap_count = le32_to_cpu(ondisk->snap_count);
893 size = SIZE_MAX - sizeof (struct ceph_snap_context);
894 if (snap_count > size / sizeof (__le64))
895 return false;
896
897 /*
898 * Not only that, but the size of the entire the snapshot
899 * header must also be representable in a size_t.
900 */
901 size -= snap_count * sizeof (__le64);
902 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
903 return false;
904
905 return true;
Alex Elder8e94af82012-07-25 09:32:40 -0500906}
907
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700908/*
Alex Elderbb23e372013-05-06 09:51:29 -0500909 * Fill an rbd image header with information from the given format 1
910 * on-disk header.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700911 */
Alex Elder662518b2013-05-06 09:51:29 -0500912static int rbd_header_from_disk(struct rbd_device *rbd_dev,
Alex Elder4156d992012-08-02 11:29:46 -0500913 struct rbd_image_header_ondisk *ondisk)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700914{
Alex Elder662518b2013-05-06 09:51:29 -0500915 struct rbd_image_header *header = &rbd_dev->header;
Alex Elderbb23e372013-05-06 09:51:29 -0500916 bool first_time = header->object_prefix == NULL;
917 struct ceph_snap_context *snapc;
918 char *object_prefix = NULL;
919 char *snap_names = NULL;
920 u64 *snap_sizes = NULL;
Alex Elderccece232012-07-10 20:30:10 -0500921 u32 snap_count;
Alex Elderd2bb24e2012-07-26 23:37:14 -0500922 size_t size;
Alex Elderbb23e372013-05-06 09:51:29 -0500923 int ret = -ENOMEM;
Alex Elder621901d2012-08-23 23:22:06 -0500924 u32 i;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700925
Alex Elderbb23e372013-05-06 09:51:29 -0500926 /* Allocate this now to avoid having to handle failure below */
927
928 if (first_time) {
929 size_t len;
930
931 len = strnlen(ondisk->object_prefix,
932 sizeof (ondisk->object_prefix));
933 object_prefix = kmalloc(len + 1, GFP_KERNEL);
934 if (!object_prefix)
935 return -ENOMEM;
936 memcpy(object_prefix, ondisk->object_prefix, len);
937 object_prefix[len] = '\0';
938 }
939
940 /* Allocate the snapshot context and fill it in */
Alex Elder6a523252012-07-19 17:12:59 -0500941
Alex Elder103a1502012-08-02 11:29:45 -0500942 snap_count = le32_to_cpu(ondisk->snap_count);
Alex Elderbb23e372013-05-06 09:51:29 -0500943 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
944 if (!snapc)
945 goto out_err;
946 snapc->seq = le64_to_cpu(ondisk->snap_seq);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700947 if (snap_count) {
Alex Elderbb23e372013-05-06 09:51:29 -0500948 struct rbd_image_snap_ondisk *snaps;
Alex Elderf785cc12012-08-23 23:22:06 -0500949 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
950
Alex Elderbb23e372013-05-06 09:51:29 -0500951 /* We'll keep a copy of the snapshot names... */
Alex Elder621901d2012-08-23 23:22:06 -0500952
Alex Elderbb23e372013-05-06 09:51:29 -0500953 if (snap_names_len > (u64)SIZE_MAX)
954 goto out_2big;
955 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
956 if (!snap_names)
Alex Elder6a523252012-07-19 17:12:59 -0500957 goto out_err;
Alex Elderbb23e372013-05-06 09:51:29 -0500958
959 /* ...as well as the array of their sizes. */
960
961 size = snap_count * sizeof (*header->snap_sizes);
962 snap_sizes = kmalloc(size, GFP_KERNEL);
963 if (!snap_sizes)
964 goto out_err;
965
Alex Elderf785cc12012-08-23 23:22:06 -0500966 /*
Alex Elderbb23e372013-05-06 09:51:29 -0500967 * Copy the names, and fill in each snapshot's id
968 * and size.
969 *
Alex Elder99a41eb2013-05-06 09:51:30 -0500970 * Note that rbd_dev_v1_header_info() guarantees the
Alex Elderbb23e372013-05-06 09:51:29 -0500971 * ondisk buffer we're working with has
Alex Elderf785cc12012-08-23 23:22:06 -0500972 * snap_names_len bytes beyond the end of the
973 * snapshot id array, this memcpy() is safe.
974 */
Alex Elderbb23e372013-05-06 09:51:29 -0500975 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
976 snaps = ondisk->snaps;
977 for (i = 0; i < snap_count; i++) {
978 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
979 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
980 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700981 }
Alex Elder849b4262012-07-09 21:04:24 -0500982
Alex Elderbb23e372013-05-06 09:51:29 -0500983 /* We won't fail any more, fill in the header */
Alex Elder6a523252012-07-19 17:12:59 -0500984
Alex Elderbb23e372013-05-06 09:51:29 -0500985 if (first_time) {
986 header->object_prefix = object_prefix;
987 header->obj_order = ondisk->options.order;
988 header->crypt_type = ondisk->options.crypt_type;
989 header->comp_type = ondisk->options.comp_type;
990 /* The rest aren't used for format 1 images */
991 header->stripe_unit = 0;
992 header->stripe_count = 0;
993 header->features = 0;
Alex Elder662518b2013-05-06 09:51:29 -0500994 } else {
995 ceph_put_snap_context(header->snapc);
996 kfree(header->snap_names);
997 kfree(header->snap_sizes);
Alex Elderbb23e372013-05-06 09:51:29 -0500998 }
999
1000 /* The remaining fields always get updated (when we refresh) */
Alex Elder621901d2012-08-23 23:22:06 -05001001
Alex Elderf84344f2012-08-31 17:29:51 -05001002 header->image_size = le64_to_cpu(ondisk->image_size);
Alex Elderbb23e372013-05-06 09:51:29 -05001003 header->snapc = snapc;
1004 header->snap_names = snap_names;
1005 header->snap_sizes = snap_sizes;
Alex Elder468521c2013-04-26 09:43:47 -05001006
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001007 return 0;
Alex Elderbb23e372013-05-06 09:51:29 -05001008out_2big:
1009 ret = -EIO;
Alex Elder6a523252012-07-19 17:12:59 -05001010out_err:
Alex Elderbb23e372013-05-06 09:51:29 -05001011 kfree(snap_sizes);
1012 kfree(snap_names);
1013 ceph_put_snap_context(snapc);
1014 kfree(object_prefix);
Alex Elderccece232012-07-10 20:30:10 -05001015
Alex Elderbb23e372013-05-06 09:51:29 -05001016 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001017}
1018
Alex Elder9682fc62013-04-30 00:44:33 -05001019static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1020{
1021 const char *snap_name;
1022
1023 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1024
1025 /* Skip over names until we find the one we are looking for */
1026
1027 snap_name = rbd_dev->header.snap_names;
1028 while (which--)
1029 snap_name += strlen(snap_name) + 1;
1030
1031 return kstrdup(snap_name, GFP_KERNEL);
1032}
1033
Alex Elder30d1cff2013-05-01 12:43:03 -05001034/*
1035 * Snapshot id comparison function for use with qsort()/bsearch().
1036 * Note that result is for snapshots in *descending* order.
1037 */
1038static int snapid_compare_reverse(const void *s1, const void *s2)
1039{
1040 u64 snap_id1 = *(u64 *)s1;
1041 u64 snap_id2 = *(u64 *)s2;
1042
1043 if (snap_id1 < snap_id2)
1044 return 1;
1045 return snap_id1 == snap_id2 ? 0 : -1;
1046}
1047
1048/*
1049 * Search a snapshot context to see if the given snapshot id is
1050 * present.
1051 *
1052 * Returns the position of the snapshot id in the array if it's found,
1053 * or BAD_SNAP_INDEX otherwise.
1054 *
1055 * Note: The snapshot array is in kept sorted (by the osd) in
1056 * reverse order, highest snapshot id first.
1057 */
Alex Elder9682fc62013-04-30 00:44:33 -05001058static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1059{
1060 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
Alex Elder30d1cff2013-05-01 12:43:03 -05001061 u64 *found;
Alex Elder9682fc62013-04-30 00:44:33 -05001062
Alex Elder30d1cff2013-05-01 12:43:03 -05001063 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1064 sizeof (snap_id), snapid_compare_reverse);
Alex Elder9682fc62013-04-30 00:44:33 -05001065
Alex Elder30d1cff2013-05-01 12:43:03 -05001066 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
Alex Elder9682fc62013-04-30 00:44:33 -05001067}
1068
Alex Elder2ad3d712013-04-30 00:44:33 -05001069static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1070 u64 snap_id)
Alex Elder54cac612013-04-30 00:44:33 -05001071{
1072 u32 which;
Josh Durginda6a6b62013-09-04 17:57:31 -07001073 const char *snap_name;
Alex Elder54cac612013-04-30 00:44:33 -05001074
1075 which = rbd_dev_snap_index(rbd_dev, snap_id);
1076 if (which == BAD_SNAP_INDEX)
Josh Durginda6a6b62013-09-04 17:57:31 -07001077 return ERR_PTR(-ENOENT);
Alex Elder54cac612013-04-30 00:44:33 -05001078
Josh Durginda6a6b62013-09-04 17:57:31 -07001079 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1080 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
Alex Elder54cac612013-04-30 00:44:33 -05001081}
1082
Alex Elder9e15b772012-10-30 19:40:33 -05001083static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1084{
Alex Elder9e15b772012-10-30 19:40:33 -05001085 if (snap_id == CEPH_NOSNAP)
1086 return RBD_SNAP_HEAD_NAME;
1087
Alex Elder54cac612013-04-30 00:44:33 -05001088 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1089 if (rbd_dev->image_format == 1)
1090 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001091
Alex Elder54cac612013-04-30 00:44:33 -05001092 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001093}
1094
Alex Elder2ad3d712013-04-30 00:44:33 -05001095static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1096 u64 *snap_size)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001097{
Alex Elder2ad3d712013-04-30 00:44:33 -05001098 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1099 if (snap_id == CEPH_NOSNAP) {
1100 *snap_size = rbd_dev->header.image_size;
1101 } else if (rbd_dev->image_format == 1) {
1102 u32 which;
Alex Elder00f1f362012-02-07 12:03:36 -06001103
Alex Elder2ad3d712013-04-30 00:44:33 -05001104 which = rbd_dev_snap_index(rbd_dev, snap_id);
1105 if (which == BAD_SNAP_INDEX)
1106 return -ENOENT;
Alex Elder00f1f362012-02-07 12:03:36 -06001107
Alex Elder2ad3d712013-04-30 00:44:33 -05001108 *snap_size = rbd_dev->header.snap_sizes[which];
1109 } else {
1110 u64 size = 0;
1111 int ret;
1112
1113 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1114 if (ret)
1115 return ret;
1116
1117 *snap_size = size;
1118 }
1119 return 0;
1120}
1121
1122static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1123 u64 *snap_features)
1124{
1125 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1126 if (snap_id == CEPH_NOSNAP) {
1127 *snap_features = rbd_dev->header.features;
1128 } else if (rbd_dev->image_format == 1) {
1129 *snap_features = 0; /* No features for format 1 */
1130 } else {
1131 u64 features = 0;
1132 int ret;
1133
1134 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1135 if (ret)
1136 return ret;
1137
1138 *snap_features = features;
1139 }
1140 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001141}
1142
Alex Elderd1cf5782013-04-27 09:59:30 -05001143static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001144{
Alex Elder8f4b7d92013-05-06 07:40:30 -05001145 u64 snap_id = rbd_dev->spec->snap_id;
Alex Elder2ad3d712013-04-30 00:44:33 -05001146 u64 size = 0;
1147 u64 features = 0;
1148 int ret;
Alex Elder8b0241f2013-04-25 23:15:08 -05001149
Alex Elder2ad3d712013-04-30 00:44:33 -05001150 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1151 if (ret)
1152 return ret;
1153 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1154 if (ret)
1155 return ret;
1156
1157 rbd_dev->mapping.size = size;
1158 rbd_dev->mapping.features = features;
1159
Alex Elder8b0241f2013-04-25 23:15:08 -05001160 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001161}
1162
Alex Elderd1cf5782013-04-27 09:59:30 -05001163static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1164{
1165 rbd_dev->mapping.size = 0;
1166 rbd_dev->mapping.features = 0;
Alex Elder200a6a82013-04-28 23:32:34 -05001167}
1168
Himangi Saraogi7d5079a2014-07-24 03:17:07 +05301169static void rbd_segment_name_free(const char *name)
1170{
1171 /* The explicit cast here is needed to drop the const qualifier */
1172
1173 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1174}
1175
Alex Elder98571b52013-01-20 14:44:42 -06001176static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001177{
Alex Elder65ccfe22012-08-09 10:33:26 -07001178 char *name;
1179 u64 segment;
1180 int ret;
Josh Durgin3a96d5c2013-06-12 19:15:06 -07001181 char *name_format;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001182
Alex Elder78c2a442013-05-01 12:43:04 -05001183 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
Alex Elder65ccfe22012-08-09 10:33:26 -07001184 if (!name)
1185 return NULL;
1186 segment = offset >> rbd_dev->header.obj_order;
Josh Durgin3a96d5c2013-06-12 19:15:06 -07001187 name_format = "%s.%012llx";
1188 if (rbd_dev->image_format == 2)
1189 name_format = "%s.%016llx";
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02001190 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
Alex Elder65ccfe22012-08-09 10:33:26 -07001191 rbd_dev->header.object_prefix, segment);
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02001192 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
Alex Elder65ccfe22012-08-09 10:33:26 -07001193 pr_err("error formatting segment name for #%llu (%d)\n",
1194 segment, ret);
Himangi Saraogi7d5079a2014-07-24 03:17:07 +05301195 rbd_segment_name_free(name);
Alex Elder65ccfe22012-08-09 10:33:26 -07001196 name = NULL;
1197 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001198
Alex Elder65ccfe22012-08-09 10:33:26 -07001199 return name;
1200}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001201
Alex Elder65ccfe22012-08-09 10:33:26 -07001202static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1203{
1204 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001205
Alex Elder65ccfe22012-08-09 10:33:26 -07001206 return offset & (segment_size - 1);
1207}
1208
1209static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1210 u64 offset, u64 length)
1211{
1212 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1213
1214 offset &= segment_size - 1;
1215
Alex Elderaafb2302012-09-06 16:00:54 -05001216 rbd_assert(length <= U64_MAX - offset);
Alex Elder65ccfe22012-08-09 10:33:26 -07001217 if (offset + length > segment_size)
1218 length = segment_size - offset;
1219
1220 return length;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001221}
1222
1223/*
Josh Durgin029bcbd2011-07-22 11:35:23 -07001224 * returns the size of an object in the image
1225 */
1226static u64 rbd_obj_bytes(struct rbd_image_header *header)
1227{
1228 return 1 << header->obj_order;
1229}
1230
1231/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001232 * bio helpers
1233 */
1234
1235static void bio_chain_put(struct bio *chain)
1236{
1237 struct bio *tmp;
1238
1239 while (chain) {
1240 tmp = chain;
1241 chain = chain->bi_next;
1242 bio_put(tmp);
1243 }
1244}
1245
1246/*
1247 * zeros a bio chain, starting at specific offset
1248 */
1249static void zero_bio_chain(struct bio *chain, int start_ofs)
1250{
Kent Overstreet79886132013-11-23 17:19:00 -08001251 struct bio_vec bv;
1252 struct bvec_iter iter;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001253 unsigned long flags;
1254 void *buf;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001255 int pos = 0;
1256
1257 while (chain) {
Kent Overstreet79886132013-11-23 17:19:00 -08001258 bio_for_each_segment(bv, chain, iter) {
1259 if (pos + bv.bv_len > start_ofs) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001260 int remainder = max(start_ofs - pos, 0);
Kent Overstreet79886132013-11-23 17:19:00 -08001261 buf = bvec_kmap_irq(&bv, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001262 memset(buf + remainder, 0,
Kent Overstreet79886132013-11-23 17:19:00 -08001263 bv.bv_len - remainder);
1264 flush_dcache_page(bv.bv_page);
Dan Carpenter85b5aaa2010-10-11 21:15:11 +02001265 bvec_kunmap_irq(buf, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001266 }
Kent Overstreet79886132013-11-23 17:19:00 -08001267 pos += bv.bv_len;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001268 }
1269
1270 chain = chain->bi_next;
1271 }
1272}
1273
1274/*
Alex Elderb9434c52013-04-19 15:34:50 -05001275 * similar to zero_bio_chain(), zeros data defined by a page array,
1276 * starting at the given byte offset from the start of the array and
1277 * continuing up to the given end offset. The pages array is
1278 * assumed to be big enough to hold all bytes up to the end.
1279 */
1280static void zero_pages(struct page **pages, u64 offset, u64 end)
1281{
1282 struct page **page = &pages[offset >> PAGE_SHIFT];
1283
1284 rbd_assert(end > offset);
1285 rbd_assert(end - offset <= (u64)SIZE_MAX);
1286 while (offset < end) {
1287 size_t page_offset;
1288 size_t length;
1289 unsigned long flags;
1290 void *kaddr;
1291
Geert Uytterhoeven491205a2013-05-13 20:35:37 -05001292 page_offset = offset & ~PAGE_MASK;
1293 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
Alex Elderb9434c52013-04-19 15:34:50 -05001294 local_irq_save(flags);
1295 kaddr = kmap_atomic(*page);
1296 memset(kaddr + page_offset, 0, length);
Alex Eldere2156052013-05-22 20:54:25 -05001297 flush_dcache_page(*page);
Alex Elderb9434c52013-04-19 15:34:50 -05001298 kunmap_atomic(kaddr);
1299 local_irq_restore(flags);
1300
1301 offset += length;
1302 page++;
1303 }
1304}
1305
1306/*
Alex Elderf7760da2012-10-20 22:17:27 -05001307 * Clone a portion of a bio, starting at the given byte offset
1308 * and continuing for the number of bytes indicated.
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001309 */
Alex Elderf7760da2012-10-20 22:17:27 -05001310static struct bio *bio_clone_range(struct bio *bio_src,
1311 unsigned int offset,
1312 unsigned int len,
1313 gfp_t gfpmask)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001314{
Alex Elderf7760da2012-10-20 22:17:27 -05001315 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001316
Kent Overstreet5341a6272013-08-07 14:31:11 -07001317 bio = bio_clone(bio_src, gfpmask);
Alex Elderf7760da2012-10-20 22:17:27 -05001318 if (!bio)
1319 return NULL; /* ENOMEM */
1320
Kent Overstreet5341a6272013-08-07 14:31:11 -07001321 bio_advance(bio, offset);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001322 bio->bi_iter.bi_size = len;
Alex Elder542582f2012-08-09 10:33:25 -07001323
Alex Elderf7760da2012-10-20 22:17:27 -05001324 return bio;
1325}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001326
Alex Elderf7760da2012-10-20 22:17:27 -05001327/*
1328 * Clone a portion of a bio chain, starting at the given byte offset
1329 * into the first bio in the source chain and continuing for the
1330 * number of bytes indicated. The result is another bio chain of
1331 * exactly the given length, or a null pointer on error.
1332 *
1333 * The bio_src and offset parameters are both in-out. On entry they
1334 * refer to the first source bio and the offset into that bio where
1335 * the start of data to be cloned is located.
1336 *
1337 * On return, bio_src is updated to refer to the bio in the source
1338 * chain that contains first un-cloned byte, and *offset will
1339 * contain the offset of that byte within that bio.
1340 */
1341static struct bio *bio_chain_clone_range(struct bio **bio_src,
1342 unsigned int *offset,
1343 unsigned int len,
1344 gfp_t gfpmask)
1345{
1346 struct bio *bi = *bio_src;
1347 unsigned int off = *offset;
1348 struct bio *chain = NULL;
1349 struct bio **end;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001350
Alex Elderf7760da2012-10-20 22:17:27 -05001351 /* Build up a chain of clone bios up to the limit */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001352
Kent Overstreet4f024f32013-10-11 15:44:27 -07001353 if (!bi || off >= bi->bi_iter.bi_size || !len)
Alex Elderf7760da2012-10-20 22:17:27 -05001354 return NULL; /* Nothing to clone */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001355
Alex Elderf7760da2012-10-20 22:17:27 -05001356 end = &chain;
1357 while (len) {
1358 unsigned int bi_size;
1359 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001360
Alex Elderf5400b72012-11-01 10:17:15 -05001361 if (!bi) {
1362 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
Alex Elderf7760da2012-10-20 22:17:27 -05001363 goto out_err; /* EINVAL; ran out of bio's */
Alex Elderf5400b72012-11-01 10:17:15 -05001364 }
Kent Overstreet4f024f32013-10-11 15:44:27 -07001365 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
Alex Elderf7760da2012-10-20 22:17:27 -05001366 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1367 if (!bio)
1368 goto out_err; /* ENOMEM */
1369
1370 *end = bio;
1371 end = &bio->bi_next;
1372
1373 off += bi_size;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001374 if (off == bi->bi_iter.bi_size) {
Alex Elderf7760da2012-10-20 22:17:27 -05001375 bi = bi->bi_next;
1376 off = 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001377 }
Alex Elderf7760da2012-10-20 22:17:27 -05001378 len -= bi_size;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001379 }
Alex Elderf7760da2012-10-20 22:17:27 -05001380 *bio_src = bi;
1381 *offset = off;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001382
Alex Elderf7760da2012-10-20 22:17:27 -05001383 return chain;
1384out_err:
1385 bio_chain_put(chain);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001386
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001387 return NULL;
1388}
1389
Alex Elder926f9b32013-02-11 12:33:24 -06001390/*
1391 * The default/initial value for all object request flags is 0. For
1392 * each flag, once its value is set to 1 it is never reset to 0
1393 * again.
1394 */
Alex Elder6365d332013-02-11 12:33:24 -06001395static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1396{
1397 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
Alex Elder6365d332013-02-11 12:33:24 -06001398 struct rbd_device *rbd_dev;
1399
Alex Elder57acbaa2013-02-11 12:33:24 -06001400 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001401 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
Alex Elder6365d332013-02-11 12:33:24 -06001402 obj_request);
1403 }
1404}
1405
1406static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1407{
1408 smp_mb();
1409 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1410}
1411
Alex Elder57acbaa2013-02-11 12:33:24 -06001412static void obj_request_done_set(struct rbd_obj_request *obj_request)
1413{
1414 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1415 struct rbd_device *rbd_dev = NULL;
1416
1417 if (obj_request_img_data_test(obj_request))
1418 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001419 rbd_warn(rbd_dev, "obj_request %p already marked done",
Alex Elder57acbaa2013-02-11 12:33:24 -06001420 obj_request);
1421 }
1422}
1423
1424static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1425{
1426 smp_mb();
1427 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1428}
1429
Alex Elder5679c592013-02-11 12:33:24 -06001430/*
1431 * This sets the KNOWN flag after (possibly) setting the EXISTS
1432 * flag. The latter is set based on the "exists" value provided.
1433 *
1434 * Note that for our purposes once an object exists it never goes
1435 * away again. It's possible that the response from two existence
1436 * checks are separated by the creation of the target object, and
1437 * the first ("doesn't exist") response arrives *after* the second
1438 * ("does exist"). In that case we ignore the second one.
1439 */
1440static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1441 bool exists)
1442{
1443 if (exists)
1444 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1445 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1446 smp_mb();
1447}
1448
1449static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1450{
1451 smp_mb();
1452 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1453}
1454
1455static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1456{
1457 smp_mb();
1458 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1459}
1460
Ilya Dryomov96385562014-06-10 13:53:29 +04001461static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1462{
1463 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1464
1465 return obj_request->img_offset <
1466 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1467}
1468
Alex Elderbf0d5f502012-11-22 00:00:08 -06001469static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1470{
Alex Elder37206ee2013-02-20 17:32:08 -06001471 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1472 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001473 kref_get(&obj_request->kref);
1474}
1475
1476static void rbd_obj_request_destroy(struct kref *kref);
1477static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1478{
1479 rbd_assert(obj_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001480 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1481 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001482 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1483}
1484
Alex Elder0f2d5be2014-04-26 14:21:44 +04001485static void rbd_img_request_get(struct rbd_img_request *img_request)
1486{
1487 dout("%s: img %p (was %d)\n", __func__, img_request,
1488 atomic_read(&img_request->kref.refcount));
1489 kref_get(&img_request->kref);
1490}
1491
Alex Eldere93f3152013-05-08 22:50:04 -05001492static bool img_request_child_test(struct rbd_img_request *img_request);
1493static void rbd_parent_request_destroy(struct kref *kref);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001494static void rbd_img_request_destroy(struct kref *kref);
1495static void rbd_img_request_put(struct rbd_img_request *img_request)
1496{
1497 rbd_assert(img_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001498 dout("%s: img %p (was %d)\n", __func__, img_request,
1499 atomic_read(&img_request->kref.refcount));
Alex Eldere93f3152013-05-08 22:50:04 -05001500 if (img_request_child_test(img_request))
1501 kref_put(&img_request->kref, rbd_parent_request_destroy);
1502 else
1503 kref_put(&img_request->kref, rbd_img_request_destroy);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001504}
1505
1506static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1507 struct rbd_obj_request *obj_request)
1508{
Alex Elder25dcf952013-01-25 17:08:55 -06001509 rbd_assert(obj_request->img_request == NULL);
1510
Alex Elderb155e862013-04-15 14:50:37 -05001511 /* Image request now owns object's original reference */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001512 obj_request->img_request = img_request;
Alex Elder25dcf952013-01-25 17:08:55 -06001513 obj_request->which = img_request->obj_request_count;
Alex Elder6365d332013-02-11 12:33:24 -06001514 rbd_assert(!obj_request_img_data_test(obj_request));
1515 obj_request_img_data_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001516 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001517 img_request->obj_request_count++;
1518 list_add_tail(&obj_request->links, &img_request->obj_requests);
Alex Elder37206ee2013-02-20 17:32:08 -06001519 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1520 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001521}
1522
1523static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1524 struct rbd_obj_request *obj_request)
1525{
1526 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001527
Alex Elder37206ee2013-02-20 17:32:08 -06001528 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1529 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001530 list_del(&obj_request->links);
Alex Elder25dcf952013-01-25 17:08:55 -06001531 rbd_assert(img_request->obj_request_count > 0);
1532 img_request->obj_request_count--;
1533 rbd_assert(obj_request->which == img_request->obj_request_count);
1534 obj_request->which = BAD_WHICH;
Alex Elder6365d332013-02-11 12:33:24 -06001535 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001536 rbd_assert(obj_request->img_request == img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001537 obj_request->img_request = NULL;
Alex Elder25dcf952013-01-25 17:08:55 -06001538 obj_request->callback = NULL;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001539 rbd_obj_request_put(obj_request);
1540}
1541
1542static bool obj_request_type_valid(enum obj_request_type type)
1543{
1544 switch (type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06001545 case OBJ_REQUEST_NODATA:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001546 case OBJ_REQUEST_BIO:
Alex Elder788e2df2013-01-17 12:25:27 -06001547 case OBJ_REQUEST_PAGES:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001548 return true;
1549 default:
1550 return false;
1551 }
1552}
1553
Alex Elderbf0d5f502012-11-22 00:00:08 -06001554static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1555 struct rbd_obj_request *obj_request)
1556{
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001557 dout("%s %p\n", __func__, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001558 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1559}
1560
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001561static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1562{
1563 dout("%s %p\n", __func__, obj_request);
1564 ceph_osdc_cancel_request(obj_request->osd_req);
1565}
1566
1567/*
1568 * Wait for an object request to complete. If interrupted, cancel the
1569 * underlying osd request.
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001570 *
1571 * @timeout: in jiffies, 0 means "wait forever"
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001572 */
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001573static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1574 unsigned long timeout)
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001575{
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001576 long ret;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001577
1578 dout("%s %p\n", __func__, obj_request);
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001579 ret = wait_for_completion_interruptible_timeout(
1580 &obj_request->completion,
1581 ceph_timeout_jiffies(timeout));
1582 if (ret <= 0) {
1583 if (ret == 0)
1584 ret = -ETIMEDOUT;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001585 rbd_obj_request_end(obj_request);
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001586 } else {
1587 ret = 0;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001588 }
1589
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001590 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1591 return ret;
1592}
1593
1594static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1595{
1596 return __rbd_obj_request_wait(obj_request, 0);
1597}
1598
1599static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
1600 unsigned long timeout)
1601{
1602 return __rbd_obj_request_wait(obj_request, timeout);
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001603}
1604
Alex Elderbf0d5f502012-11-22 00:00:08 -06001605static void rbd_img_request_complete(struct rbd_img_request *img_request)
1606{
Alex Elder55f27e02013-04-10 12:34:25 -05001607
Alex Elder37206ee2013-02-20 17:32:08 -06001608 dout("%s: img %p\n", __func__, img_request);
Alex Elder55f27e02013-04-10 12:34:25 -05001609
1610 /*
1611 * If no error occurred, compute the aggregate transfer
1612 * count for the image request. We could instead use
1613 * atomic64_cmpxchg() to update it as each object request
1614 * completes; not clear which way is better off hand.
1615 */
1616 if (!img_request->result) {
1617 struct rbd_obj_request *obj_request;
1618 u64 xferred = 0;
1619
1620 for_each_obj_request(img_request, obj_request)
1621 xferred += obj_request->xferred;
1622 img_request->xferred = xferred;
1623 }
1624
Alex Elderbf0d5f502012-11-22 00:00:08 -06001625 if (img_request->callback)
1626 img_request->callback(img_request);
1627 else
1628 rbd_img_request_put(img_request);
1629}
1630
Alex Elder0c425242013-02-08 09:55:49 -06001631/*
1632 * The default/initial value for all image request flags is 0. Each
1633 * is conditionally set to 1 at image request initialization time
1634 * and currently never change thereafter.
1635 */
1636static void img_request_write_set(struct rbd_img_request *img_request)
1637{
1638 set_bit(IMG_REQ_WRITE, &img_request->flags);
1639 smp_mb();
1640}
1641
1642static bool img_request_write_test(struct rbd_img_request *img_request)
1643{
1644 smp_mb();
1645 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1646}
1647
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001648/*
1649 * Set the discard flag when the img_request is an discard request
1650 */
1651static void img_request_discard_set(struct rbd_img_request *img_request)
1652{
1653 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1654 smp_mb();
1655}
1656
1657static bool img_request_discard_test(struct rbd_img_request *img_request)
1658{
1659 smp_mb();
1660 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1661}
1662
Alex Elder9849e982013-01-24 16:13:36 -06001663static void img_request_child_set(struct rbd_img_request *img_request)
1664{
1665 set_bit(IMG_REQ_CHILD, &img_request->flags);
1666 smp_mb();
1667}
1668
Alex Eldere93f3152013-05-08 22:50:04 -05001669static void img_request_child_clear(struct rbd_img_request *img_request)
1670{
1671 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1672 smp_mb();
1673}
1674
Alex Elder9849e982013-01-24 16:13:36 -06001675static bool img_request_child_test(struct rbd_img_request *img_request)
1676{
1677 smp_mb();
1678 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1679}
1680
Alex Elderd0b2e942013-01-24 16:13:36 -06001681static void img_request_layered_set(struct rbd_img_request *img_request)
1682{
1683 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1684 smp_mb();
1685}
1686
Alex Eldera2acd002013-05-08 22:50:04 -05001687static void img_request_layered_clear(struct rbd_img_request *img_request)
1688{
1689 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1690 smp_mb();
1691}
1692
Alex Elderd0b2e942013-01-24 16:13:36 -06001693static bool img_request_layered_test(struct rbd_img_request *img_request)
1694{
1695 smp_mb();
1696 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1697}
1698
Josh Durgin3b434a2a2014-04-04 17:32:15 -07001699static enum obj_operation_type
1700rbd_img_request_op_type(struct rbd_img_request *img_request)
1701{
1702 if (img_request_write_test(img_request))
1703 return OBJ_OP_WRITE;
1704 else if (img_request_discard_test(img_request))
1705 return OBJ_OP_DISCARD;
1706 else
1707 return OBJ_OP_READ;
1708}
1709
Alex Elder6e2a4502013-03-27 09:16:30 -05001710static void
1711rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1712{
Alex Elderb9434c52013-04-19 15:34:50 -05001713 u64 xferred = obj_request->xferred;
1714 u64 length = obj_request->length;
1715
Alex Elder6e2a4502013-03-27 09:16:30 -05001716 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1717 obj_request, obj_request->img_request, obj_request->result,
Alex Elderb9434c52013-04-19 15:34:50 -05001718 xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001719 /*
Josh Durgin17c1cc12013-08-26 17:55:38 -07001720 * ENOENT means a hole in the image. We zero-fill the entire
1721 * length of the request. A short read also implies zero-fill
1722 * to the end of the request. An error requires the whole
1723 * length of the request to be reported finished with an error
1724 * to the block layer. In each case we update the xferred
1725 * count to indicate the whole request was satisfied.
Alex Elder6e2a4502013-03-27 09:16:30 -05001726 */
Alex Elderb9434c52013-04-19 15:34:50 -05001727 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
Alex Elder6e2a4502013-03-27 09:16:30 -05001728 if (obj_request->result == -ENOENT) {
Alex Elderb9434c52013-04-19 15:34:50 -05001729 if (obj_request->type == OBJ_REQUEST_BIO)
1730 zero_bio_chain(obj_request->bio_list, 0);
1731 else
1732 zero_pages(obj_request->pages, 0, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001733 obj_request->result = 0;
Alex Elderb9434c52013-04-19 15:34:50 -05001734 } else if (xferred < length && !obj_request->result) {
1735 if (obj_request->type == OBJ_REQUEST_BIO)
1736 zero_bio_chain(obj_request->bio_list, xferred);
1737 else
1738 zero_pages(obj_request->pages, xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001739 }
Josh Durgin17c1cc12013-08-26 17:55:38 -07001740 obj_request->xferred = length;
Alex Elder6e2a4502013-03-27 09:16:30 -05001741 obj_request_done_set(obj_request);
1742}
1743
Alex Elderbf0d5f502012-11-22 00:00:08 -06001744static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1745{
Alex Elder37206ee2013-02-20 17:32:08 -06001746 dout("%s: obj %p cb %p\n", __func__, obj_request,
1747 obj_request->callback);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001748 if (obj_request->callback)
1749 obj_request->callback(obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06001750 else
1751 complete_all(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001752}
1753
Alex Elderc47f9372013-02-26 14:23:07 -06001754static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
Alex Elder39bf2c52013-02-26 14:23:07 -06001755{
1756 dout("%s: obj %p\n", __func__, obj_request);
1757 obj_request_done_set(obj_request);
1758}
1759
Alex Elderc47f9372013-02-26 14:23:07 -06001760static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001761{
Alex Elder57acbaa2013-02-11 12:33:24 -06001762 struct rbd_img_request *img_request = NULL;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001763 struct rbd_device *rbd_dev = NULL;
Alex Elder57acbaa2013-02-11 12:33:24 -06001764 bool layered = false;
1765
1766 if (obj_request_img_data_test(obj_request)) {
1767 img_request = obj_request->img_request;
1768 layered = img_request && img_request_layered_test(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001769 rbd_dev = img_request->rbd_dev;
Alex Elder57acbaa2013-02-11 12:33:24 -06001770 }
Alex Elder8b3e1a52013-01-24 16:13:36 -06001771
1772 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1773 obj_request, img_request, obj_request->result,
1774 obj_request->xferred, obj_request->length);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001775 if (layered && obj_request->result == -ENOENT &&
1776 obj_request->img_offset < rbd_dev->parent_overlap)
Alex Elder8b3e1a52013-01-24 16:13:36 -06001777 rbd_img_parent_read(obj_request);
1778 else if (img_request)
Alex Elder6e2a4502013-03-27 09:16:30 -05001779 rbd_img_obj_request_read_callback(obj_request);
1780 else
1781 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001782}
1783
Alex Elderc47f9372013-02-26 14:23:07 -06001784static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001785{
Sage Weil1b83bef2013-02-25 16:11:12 -08001786 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1787 obj_request->result, obj_request->length);
1788 /*
Alex Elder8b3e1a52013-01-24 16:13:36 -06001789 * There is no such thing as a successful short write. Set
1790 * it to our originally-requested length.
Sage Weil1b83bef2013-02-25 16:11:12 -08001791 */
1792 obj_request->xferred = obj_request->length;
Alex Elder07741302013-02-05 23:41:50 -06001793 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001794}
1795
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001796static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1797{
1798 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1799 obj_request->result, obj_request->length);
1800 /*
1801 * There is no such thing as a successful short discard. Set
1802 * it to our originally-requested length.
1803 */
1804 obj_request->xferred = obj_request->length;
Josh Durgind0265de2014-04-07 16:54:10 -07001805 /* discarding a non-existent object is not a problem */
1806 if (obj_request->result == -ENOENT)
1807 obj_request->result = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001808 obj_request_done_set(obj_request);
1809}
1810
Alex Elderfbfab532013-02-08 09:55:48 -06001811/*
1812 * For a simple stat call there's nothing to do. We'll do more if
1813 * this is part of a write sequence for a layered image.
1814 */
Alex Elderc47f9372013-02-26 14:23:07 -06001815static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
Alex Elderfbfab532013-02-08 09:55:48 -06001816{
Alex Elder37206ee2013-02-20 17:32:08 -06001817 dout("%s: obj %p\n", __func__, obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001818 obj_request_done_set(obj_request);
1819}
1820
Alex Elderbf0d5f502012-11-22 00:00:08 -06001821static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1822 struct ceph_msg *msg)
1823{
1824 struct rbd_obj_request *obj_request = osd_req->r_priv;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001825 u16 opcode;
1826
Alex Elder37206ee2013-02-20 17:32:08 -06001827 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001828 rbd_assert(osd_req == obj_request->osd_req);
Alex Elder57acbaa2013-02-11 12:33:24 -06001829 if (obj_request_img_data_test(obj_request)) {
1830 rbd_assert(obj_request->img_request);
1831 rbd_assert(obj_request->which != BAD_WHICH);
1832 } else {
1833 rbd_assert(obj_request->which == BAD_WHICH);
1834 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06001835
Sage Weil1b83bef2013-02-25 16:11:12 -08001836 if (osd_req->r_result < 0)
1837 obj_request->result = osd_req->r_result;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001838
Ilya Dryomov7cc69d42014-02-25 16:22:27 +02001839 rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001840
Alex Elderc47f9372013-02-26 14:23:07 -06001841 /*
1842 * We support a 64-bit length, but ultimately it has to be
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01001843 * passed to the block layer, which just supports a 32-bit
1844 * length field.
Alex Elderc47f9372013-02-26 14:23:07 -06001845 */
Sage Weil1b83bef2013-02-25 16:11:12 -08001846 obj_request->xferred = osd_req->r_reply_op_len[0];
Alex Elder8b3e1a52013-01-24 16:13:36 -06001847 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001848
Alex Elder79528732013-04-03 21:32:51 -05001849 opcode = osd_req->r_ops[0].op;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001850 switch (opcode) {
1851 case CEPH_OSD_OP_READ:
Alex Elderc47f9372013-02-26 14:23:07 -06001852 rbd_osd_read_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001853 break;
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001854 case CEPH_OSD_OP_SETALLOCHINT:
1855 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
1856 /* fall through */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001857 case CEPH_OSD_OP_WRITE:
Alex Elderc47f9372013-02-26 14:23:07 -06001858 rbd_osd_write_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001859 break;
Alex Elderfbfab532013-02-08 09:55:48 -06001860 case CEPH_OSD_OP_STAT:
Alex Elderc47f9372013-02-26 14:23:07 -06001861 rbd_osd_stat_callback(obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001862 break;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001863 case CEPH_OSD_OP_DELETE:
1864 case CEPH_OSD_OP_TRUNCATE:
1865 case CEPH_OSD_OP_ZERO:
1866 rbd_osd_discard_callback(obj_request);
1867 break;
Alex Elder36be9a72013-01-19 00:30:28 -06001868 case CEPH_OSD_OP_CALL:
Alex Elderb8d70032012-11-30 17:53:04 -06001869 case CEPH_OSD_OP_NOTIFY_ACK:
Alex Elder9969ebc2013-01-18 12:31:10 -06001870 case CEPH_OSD_OP_WATCH:
Alex Elderc47f9372013-02-26 14:23:07 -06001871 rbd_osd_trivial_callback(obj_request);
Alex Elder9969ebc2013-01-18 12:31:10 -06001872 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001873 default:
Ilya Dryomov9584d502014-07-11 12:11:20 +04001874 rbd_warn(NULL, "%s: unsupported op %hu",
Alex Elderbf0d5f502012-11-22 00:00:08 -06001875 obj_request->object_name, (unsigned short) opcode);
1876 break;
1877 }
1878
Alex Elder07741302013-02-05 23:41:50 -06001879 if (obj_request_done_test(obj_request))
Alex Elderbf0d5f502012-11-22 00:00:08 -06001880 rbd_obj_request_complete(obj_request);
1881}
1882
Alex Elder9d4df012013-04-19 15:34:50 -05001883static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
Alex Elder430c28c2013-04-03 21:32:51 -05001884{
1885 struct rbd_img_request *img_request = obj_request->img_request;
Alex Elder8c042b02013-04-03 01:28:58 -05001886 struct ceph_osd_request *osd_req = obj_request->osd_req;
Alex Elder9d4df012013-04-19 15:34:50 -05001887 u64 snap_id;
Alex Elder430c28c2013-04-03 21:32:51 -05001888
Alex Elder8c042b02013-04-03 01:28:58 -05001889 rbd_assert(osd_req != NULL);
Alex Elder430c28c2013-04-03 21:32:51 -05001890
Alex Elder9d4df012013-04-19 15:34:50 -05001891 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
Alex Elder8c042b02013-04-03 01:28:58 -05001892 ceph_osdc_build_request(osd_req, obj_request->offset,
Alex Elder9d4df012013-04-19 15:34:50 -05001893 NULL, snap_id, NULL);
1894}
1895
1896static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1897{
1898 struct rbd_img_request *img_request = obj_request->img_request;
1899 struct ceph_osd_request *osd_req = obj_request->osd_req;
1900 struct ceph_snap_context *snapc;
1901 struct timespec mtime = CURRENT_TIME;
1902
1903 rbd_assert(osd_req != NULL);
1904
1905 snapc = img_request ? img_request->snapc : NULL;
1906 ceph_osdc_build_request(osd_req, obj_request->offset,
1907 snapc, CEPH_NOSNAP, &mtime);
Alex Elder430c28c2013-04-03 21:32:51 -05001908}
1909
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001910/*
1911 * Create an osd request. A read request has one osd op (read).
1912 * A write request has either one (watch) or two (hint+write) osd ops.
1913 * (All rbd data writes are prefixed with an allocation hint op, but
1914 * technically osd watch is a write request, hence this distinction.)
1915 */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001916static struct ceph_osd_request *rbd_osd_req_create(
1917 struct rbd_device *rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001918 enum obj_operation_type op_type,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001919 unsigned int num_ops,
Alex Elder430c28c2013-04-03 21:32:51 -05001920 struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001921{
Alex Elderbf0d5f502012-11-22 00:00:08 -06001922 struct ceph_snap_context *snapc = NULL;
1923 struct ceph_osd_client *osdc;
1924 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001925
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001926 if (obj_request_img_data_test(obj_request) &&
1927 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
Alex Elder6365d332013-02-11 12:33:24 -06001928 struct rbd_img_request *img_request = obj_request->img_request;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001929 if (op_type == OBJ_OP_WRITE) {
1930 rbd_assert(img_request_write_test(img_request));
1931 } else {
1932 rbd_assert(img_request_discard_test(img_request));
1933 }
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001934 snapc = img_request->snapc;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001935 }
1936
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001937 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001938
1939 /* Allocate and initialize the request, for the num_ops ops */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001940
1941 osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001942 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1943 GFP_ATOMIC);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001944 if (!osd_req)
1945 return NULL; /* ENOMEM */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001946
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001947 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001948 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
Alex Elder430c28c2013-04-03 21:32:51 -05001949 else
Alex Elderbf0d5f502012-11-22 00:00:08 -06001950 osd_req->r_flags = CEPH_OSD_FLAG_READ;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001951
1952 osd_req->r_callback = rbd_osd_req_callback;
1953 osd_req->r_priv = obj_request;
1954
Ilya Dryomov3c972c92014-01-27 17:40:20 +02001955 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1956 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001957
Alex Elderbf0d5f502012-11-22 00:00:08 -06001958 return osd_req;
1959}
1960
Alex Elder0eefd472013-04-19 15:34:50 -05001961/*
Josh Durgind3246fb2014-04-07 16:49:21 -07001962 * Create a copyup osd request based on the information in the object
1963 * request supplied. A copyup request has two or three osd ops, a
1964 * copyup method call, potentially a hint op, and a write or truncate
1965 * or zero op.
Alex Elder0eefd472013-04-19 15:34:50 -05001966 */
1967static struct ceph_osd_request *
1968rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1969{
1970 struct rbd_img_request *img_request;
1971 struct ceph_snap_context *snapc;
1972 struct rbd_device *rbd_dev;
1973 struct ceph_osd_client *osdc;
1974 struct ceph_osd_request *osd_req;
Josh Durgind3246fb2014-04-07 16:49:21 -07001975 int num_osd_ops = 3;
Alex Elder0eefd472013-04-19 15:34:50 -05001976
1977 rbd_assert(obj_request_img_data_test(obj_request));
1978 img_request = obj_request->img_request;
1979 rbd_assert(img_request);
Josh Durgind3246fb2014-04-07 16:49:21 -07001980 rbd_assert(img_request_write_test(img_request) ||
1981 img_request_discard_test(img_request));
Alex Elder0eefd472013-04-19 15:34:50 -05001982
Josh Durgind3246fb2014-04-07 16:49:21 -07001983 if (img_request_discard_test(img_request))
1984 num_osd_ops = 2;
1985
1986 /* Allocate and initialize the request, for all the ops */
Alex Elder0eefd472013-04-19 15:34:50 -05001987
1988 snapc = img_request->snapc;
1989 rbd_dev = img_request->rbd_dev;
1990 osdc = &rbd_dev->rbd_client->client->osdc;
Josh Durgind3246fb2014-04-07 16:49:21 -07001991 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
1992 false, GFP_ATOMIC);
Alex Elder0eefd472013-04-19 15:34:50 -05001993 if (!osd_req)
1994 return NULL; /* ENOMEM */
1995
1996 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1997 osd_req->r_callback = rbd_osd_req_callback;
1998 osd_req->r_priv = obj_request;
1999
Ilya Dryomov3c972c92014-01-27 17:40:20 +02002000 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
2001 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
Alex Elder0eefd472013-04-19 15:34:50 -05002002
Alex Elder0eefd472013-04-19 15:34:50 -05002003 return osd_req;
2004}
2005
2006
Alex Elderbf0d5f502012-11-22 00:00:08 -06002007static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2008{
2009 ceph_osdc_put_request(osd_req);
2010}
2011
2012/* object_name is assumed to be a non-null pointer and NUL-terminated */
2013
2014static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2015 u64 offset, u64 length,
2016 enum obj_request_type type)
2017{
2018 struct rbd_obj_request *obj_request;
2019 size_t size;
2020 char *name;
2021
2022 rbd_assert(obj_request_type_valid(type));
2023
2024 size = strlen(object_name) + 1;
Alex Elderf907ad52013-05-01 12:43:03 -05002025 name = kmalloc(size, GFP_KERNEL);
2026 if (!name)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002027 return NULL;
2028
Alex Elder868311b2013-05-01 12:43:03 -05002029 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
Alex Elderf907ad52013-05-01 12:43:03 -05002030 if (!obj_request) {
2031 kfree(name);
2032 return NULL;
2033 }
2034
Alex Elderbf0d5f502012-11-22 00:00:08 -06002035 obj_request->object_name = memcpy(name, object_name, size);
2036 obj_request->offset = offset;
2037 obj_request->length = length;
Alex Elder926f9b32013-02-11 12:33:24 -06002038 obj_request->flags = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002039 obj_request->which = BAD_WHICH;
2040 obj_request->type = type;
2041 INIT_LIST_HEAD(&obj_request->links);
Alex Elder788e2df2013-01-17 12:25:27 -06002042 init_completion(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002043 kref_init(&obj_request->kref);
2044
Alex Elder37206ee2013-02-20 17:32:08 -06002045 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2046 offset, length, (int)type, obj_request);
2047
Alex Elderbf0d5f502012-11-22 00:00:08 -06002048 return obj_request;
2049}
2050
2051static void rbd_obj_request_destroy(struct kref *kref)
2052{
2053 struct rbd_obj_request *obj_request;
2054
2055 obj_request = container_of(kref, struct rbd_obj_request, kref);
2056
Alex Elder37206ee2013-02-20 17:32:08 -06002057 dout("%s: obj %p\n", __func__, obj_request);
2058
Alex Elderbf0d5f502012-11-22 00:00:08 -06002059 rbd_assert(obj_request->img_request == NULL);
2060 rbd_assert(obj_request->which == BAD_WHICH);
2061
2062 if (obj_request->osd_req)
2063 rbd_osd_req_destroy(obj_request->osd_req);
2064
2065 rbd_assert(obj_request_type_valid(obj_request->type));
2066 switch (obj_request->type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06002067 case OBJ_REQUEST_NODATA:
2068 break; /* Nothing to do */
Alex Elderbf0d5f502012-11-22 00:00:08 -06002069 case OBJ_REQUEST_BIO:
2070 if (obj_request->bio_list)
2071 bio_chain_put(obj_request->bio_list);
2072 break;
Alex Elder788e2df2013-01-17 12:25:27 -06002073 case OBJ_REQUEST_PAGES:
2074 if (obj_request->pages)
2075 ceph_release_page_vector(obj_request->pages,
2076 obj_request->page_count);
2077 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002078 }
2079
Alex Elderf907ad52013-05-01 12:43:03 -05002080 kfree(obj_request->object_name);
Alex Elder868311b2013-05-01 12:43:03 -05002081 obj_request->object_name = NULL;
2082 kmem_cache_free(rbd_obj_request_cache, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002083}
2084
Alex Elderfb65d2282013-05-08 22:50:04 -05002085/* It's OK to call this for a device with no parent */
2086
2087static void rbd_spec_put(struct rbd_spec *spec);
2088static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2089{
2090 rbd_dev_remove_parent(rbd_dev);
2091 rbd_spec_put(rbd_dev->parent_spec);
2092 rbd_dev->parent_spec = NULL;
2093 rbd_dev->parent_overlap = 0;
2094}
2095
Alex Elderbf0d5f502012-11-22 00:00:08 -06002096/*
Alex Eldera2acd002013-05-08 22:50:04 -05002097 * Parent image reference counting is used to determine when an
2098 * image's parent fields can be safely torn down--after there are no
2099 * more in-flight requests to the parent image. When the last
2100 * reference is dropped, cleaning them up is safe.
2101 */
2102static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2103{
2104 int counter;
2105
2106 if (!rbd_dev->parent_spec)
2107 return;
2108
2109 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2110 if (counter > 0)
2111 return;
2112
2113 /* Last reference; clean up parent data structures */
2114
2115 if (!counter)
2116 rbd_dev_unparent(rbd_dev);
2117 else
Ilya Dryomov9584d502014-07-11 12:11:20 +04002118 rbd_warn(rbd_dev, "parent reference underflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002119}
2120
2121/*
2122 * If an image has a non-zero parent overlap, get a reference to its
2123 * parent.
2124 *
2125 * Returns true if the rbd device has a parent with a non-zero
2126 * overlap and a reference for it was successfully taken, or
2127 * false otherwise.
2128 */
2129static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2130{
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002131 int counter = 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002132
2133 if (!rbd_dev->parent_spec)
2134 return false;
2135
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002136 down_read(&rbd_dev->header_rwsem);
2137 if (rbd_dev->parent_overlap)
2138 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2139 up_read(&rbd_dev->header_rwsem);
Alex Eldera2acd002013-05-08 22:50:04 -05002140
2141 if (counter < 0)
Ilya Dryomov9584d502014-07-11 12:11:20 +04002142 rbd_warn(rbd_dev, "parent reference overflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002143
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002144 return counter > 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002145}
2146
Alex Elderbf0d5f502012-11-22 00:00:08 -06002147/*
2148 * Caller is responsible for filling in the list of object requests
2149 * that comprises the image request, and the Linux request pointer
2150 * (if there is one).
2151 */
Alex Eldercc344fa2013-02-19 12:25:56 -06002152static struct rbd_img_request *rbd_img_request_create(
2153 struct rbd_device *rbd_dev,
Alex Elderbf0d5f502012-11-22 00:00:08 -06002154 u64 offset, u64 length,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002155 enum obj_operation_type op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07002156 struct ceph_snap_context *snapc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002157{
2158 struct rbd_img_request *img_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002159
Ilya Dryomov7a716aa2014-08-05 11:25:54 +04002160 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002161 if (!img_request)
2162 return NULL;
2163
Alex Elderbf0d5f502012-11-22 00:00:08 -06002164 img_request->rq = NULL;
2165 img_request->rbd_dev = rbd_dev;
2166 img_request->offset = offset;
2167 img_request->length = length;
Alex Elder0c425242013-02-08 09:55:49 -06002168 img_request->flags = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002169 if (op_type == OBJ_OP_DISCARD) {
2170 img_request_discard_set(img_request);
2171 img_request->snapc = snapc;
2172 } else if (op_type == OBJ_OP_WRITE) {
Alex Elder0c425242013-02-08 09:55:49 -06002173 img_request_write_set(img_request);
Josh Durgin4e752f02014-04-08 11:12:11 -07002174 img_request->snapc = snapc;
Alex Elder0c425242013-02-08 09:55:49 -06002175 } else {
Alex Elderbf0d5f502012-11-22 00:00:08 -06002176 img_request->snap_id = rbd_dev->spec->snap_id;
Alex Elder0c425242013-02-08 09:55:49 -06002177 }
Alex Eldera2acd002013-05-08 22:50:04 -05002178 if (rbd_dev_parent_get(rbd_dev))
Alex Elderd0b2e942013-01-24 16:13:36 -06002179 img_request_layered_set(img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002180 spin_lock_init(&img_request->completion_lock);
2181 img_request->next_completion = 0;
2182 img_request->callback = NULL;
Alex Eldera5a337d2013-01-24 16:13:36 -06002183 img_request->result = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002184 img_request->obj_request_count = 0;
2185 INIT_LIST_HEAD(&img_request->obj_requests);
2186 kref_init(&img_request->kref);
2187
Alex Elder37206ee2013-02-20 17:32:08 -06002188 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002189 obj_op_name(op_type), offset, length, img_request);
Alex Elder37206ee2013-02-20 17:32:08 -06002190
Alex Elderbf0d5f502012-11-22 00:00:08 -06002191 return img_request;
2192}
2193
2194static void rbd_img_request_destroy(struct kref *kref)
2195{
2196 struct rbd_img_request *img_request;
2197 struct rbd_obj_request *obj_request;
2198 struct rbd_obj_request *next_obj_request;
2199
2200 img_request = container_of(kref, struct rbd_img_request, kref);
2201
Alex Elder37206ee2013-02-20 17:32:08 -06002202 dout("%s: img %p\n", __func__, img_request);
2203
Alex Elderbf0d5f502012-11-22 00:00:08 -06002204 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2205 rbd_img_obj_request_del(img_request, obj_request);
Alex Elder25dcf952013-01-25 17:08:55 -06002206 rbd_assert(img_request->obj_request_count == 0);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002207
Alex Eldera2acd002013-05-08 22:50:04 -05002208 if (img_request_layered_test(img_request)) {
2209 img_request_layered_clear(img_request);
2210 rbd_dev_parent_put(img_request->rbd_dev);
2211 }
2212
Josh Durginbef95452014-04-04 17:47:52 -07002213 if (img_request_write_test(img_request) ||
2214 img_request_discard_test(img_request))
Alex Elder812164f82013-04-30 00:44:32 -05002215 ceph_put_snap_context(img_request->snapc);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002216
Alex Elder1c2a9df2013-05-01 12:43:03 -05002217 kmem_cache_free(rbd_img_request_cache, img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002218}
2219
Alex Eldere93f3152013-05-08 22:50:04 -05002220static struct rbd_img_request *rbd_parent_request_create(
2221 struct rbd_obj_request *obj_request,
2222 u64 img_offset, u64 length)
2223{
2224 struct rbd_img_request *parent_request;
2225 struct rbd_device *rbd_dev;
2226
2227 rbd_assert(obj_request->img_request);
2228 rbd_dev = obj_request->img_request->rbd_dev;
2229
Josh Durgin4e752f02014-04-08 11:12:11 -07002230 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002231 length, OBJ_OP_READ, NULL);
Alex Eldere93f3152013-05-08 22:50:04 -05002232 if (!parent_request)
2233 return NULL;
2234
2235 img_request_child_set(parent_request);
2236 rbd_obj_request_get(obj_request);
2237 parent_request->obj_request = obj_request;
2238
2239 return parent_request;
2240}
2241
2242static void rbd_parent_request_destroy(struct kref *kref)
2243{
2244 struct rbd_img_request *parent_request;
2245 struct rbd_obj_request *orig_request;
2246
2247 parent_request = container_of(kref, struct rbd_img_request, kref);
2248 orig_request = parent_request->obj_request;
2249
2250 parent_request->obj_request = NULL;
2251 rbd_obj_request_put(orig_request);
2252 img_request_child_clear(parent_request);
2253
2254 rbd_img_request_destroy(kref);
2255}
2256
Alex Elder12178572013-02-08 09:55:49 -06002257static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2258{
Alex Elder6365d332013-02-11 12:33:24 -06002259 struct rbd_img_request *img_request;
Alex Elder12178572013-02-08 09:55:49 -06002260 unsigned int xferred;
2261 int result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002262 bool more;
Alex Elder12178572013-02-08 09:55:49 -06002263
Alex Elder6365d332013-02-11 12:33:24 -06002264 rbd_assert(obj_request_img_data_test(obj_request));
2265 img_request = obj_request->img_request;
2266
Alex Elder12178572013-02-08 09:55:49 -06002267 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2268 xferred = (unsigned int)obj_request->xferred;
2269 result = obj_request->result;
2270 if (result) {
2271 struct rbd_device *rbd_dev = img_request->rbd_dev;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002272 enum obj_operation_type op_type;
2273
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002274 if (img_request_discard_test(img_request))
2275 op_type = OBJ_OP_DISCARD;
2276 else if (img_request_write_test(img_request))
2277 op_type = OBJ_OP_WRITE;
2278 else
2279 op_type = OBJ_OP_READ;
Alex Elder12178572013-02-08 09:55:49 -06002280
Ilya Dryomov9584d502014-07-11 12:11:20 +04002281 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002282 obj_op_name(op_type), obj_request->length,
2283 obj_request->img_offset, obj_request->offset);
Ilya Dryomov9584d502014-07-11 12:11:20 +04002284 rbd_warn(rbd_dev, " result %d xferred %x",
Alex Elder12178572013-02-08 09:55:49 -06002285 result, xferred);
2286 if (!img_request->result)
2287 img_request->result = result;
Ilya Dryomov082a75d2015-04-25 15:56:15 +03002288 /*
2289 * Need to end I/O on the entire obj_request worth of
2290 * bytes in case of error.
2291 */
2292 xferred = obj_request->length;
Alex Elder12178572013-02-08 09:55:49 -06002293 }
2294
Alex Elderf1a47392013-04-19 15:34:50 -05002295 /* Image object requests don't own their page array */
2296
2297 if (obj_request->type == OBJ_REQUEST_PAGES) {
2298 obj_request->pages = NULL;
2299 obj_request->page_count = 0;
2300 }
2301
Alex Elder8b3e1a52013-01-24 16:13:36 -06002302 if (img_request_child_test(img_request)) {
2303 rbd_assert(img_request->obj_request != NULL);
2304 more = obj_request->which < img_request->obj_request_count - 1;
2305 } else {
2306 rbd_assert(img_request->rq != NULL);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01002307
2308 more = blk_update_request(img_request->rq, result, xferred);
2309 if (!more)
2310 __blk_mq_end_request(img_request->rq, result);
Alex Elder8b3e1a52013-01-24 16:13:36 -06002311 }
2312
2313 return more;
Alex Elder12178572013-02-08 09:55:49 -06002314}
2315
Alex Elder21692382013-04-05 01:27:12 -05002316static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2317{
2318 struct rbd_img_request *img_request;
2319 u32 which = obj_request->which;
2320 bool more = true;
2321
Alex Elder6365d332013-02-11 12:33:24 -06002322 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elder21692382013-04-05 01:27:12 -05002323 img_request = obj_request->img_request;
2324
2325 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2326 rbd_assert(img_request != NULL);
Alex Elder21692382013-04-05 01:27:12 -05002327 rbd_assert(img_request->obj_request_count > 0);
2328 rbd_assert(which != BAD_WHICH);
2329 rbd_assert(which < img_request->obj_request_count);
Alex Elder21692382013-04-05 01:27:12 -05002330
2331 spin_lock_irq(&img_request->completion_lock);
2332 if (which != img_request->next_completion)
2333 goto out;
2334
2335 for_each_obj_request_from(img_request, obj_request) {
Alex Elder21692382013-04-05 01:27:12 -05002336 rbd_assert(more);
2337 rbd_assert(which < img_request->obj_request_count);
2338
2339 if (!obj_request_done_test(obj_request))
2340 break;
Alex Elder12178572013-02-08 09:55:49 -06002341 more = rbd_img_obj_end_request(obj_request);
Alex Elder21692382013-04-05 01:27:12 -05002342 which++;
2343 }
2344
2345 rbd_assert(more ^ (which == img_request->obj_request_count));
2346 img_request->next_completion = which;
2347out:
2348 spin_unlock_irq(&img_request->completion_lock);
Alex Elder0f2d5be2014-04-26 14:21:44 +04002349 rbd_img_request_put(img_request);
Alex Elder21692382013-04-05 01:27:12 -05002350
2351 if (!more)
2352 rbd_img_request_complete(img_request);
2353}
2354
Alex Elderf1a47392013-04-19 15:34:50 -05002355/*
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002356 * Add individual osd ops to the given ceph_osd_request and prepare
2357 * them for submission. num_ops is the current number of
2358 * osd operations already to the object request.
2359 */
2360static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2361 struct ceph_osd_request *osd_request,
2362 enum obj_operation_type op_type,
2363 unsigned int num_ops)
2364{
2365 struct rbd_img_request *img_request = obj_request->img_request;
2366 struct rbd_device *rbd_dev = img_request->rbd_dev;
2367 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2368 u64 offset = obj_request->offset;
2369 u64 length = obj_request->length;
2370 u64 img_end;
2371 u16 opcode;
2372
2373 if (op_type == OBJ_OP_DISCARD) {
Josh Durgind3246fb2014-04-07 16:49:21 -07002374 if (!offset && length == object_size &&
2375 (!img_request_layered_test(img_request) ||
2376 !obj_request_overlaps_parent(obj_request))) {
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002377 opcode = CEPH_OSD_OP_DELETE;
2378 } else if ((offset + length == object_size)) {
2379 opcode = CEPH_OSD_OP_TRUNCATE;
2380 } else {
2381 down_read(&rbd_dev->header_rwsem);
2382 img_end = rbd_dev->header.image_size;
2383 up_read(&rbd_dev->header_rwsem);
2384
2385 if (obj_request->img_offset + length == img_end)
2386 opcode = CEPH_OSD_OP_TRUNCATE;
2387 else
2388 opcode = CEPH_OSD_OP_ZERO;
2389 }
2390 } else if (op_type == OBJ_OP_WRITE) {
2391 opcode = CEPH_OSD_OP_WRITE;
2392 osd_req_op_alloc_hint_init(osd_request, num_ops,
2393 object_size, object_size);
2394 num_ops++;
2395 } else {
2396 opcode = CEPH_OSD_OP_READ;
2397 }
2398
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002399 if (opcode == CEPH_OSD_OP_DELETE)
Yan, Zheng144cba12015-04-27 11:09:54 +08002400 osd_req_op_init(osd_request, num_ops, opcode, 0);
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002401 else
2402 osd_req_op_extent_init(osd_request, num_ops, opcode,
2403 offset, length, 0, 0);
2404
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002405 if (obj_request->type == OBJ_REQUEST_BIO)
2406 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2407 obj_request->bio_list, length);
2408 else if (obj_request->type == OBJ_REQUEST_PAGES)
2409 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2410 obj_request->pages, length,
2411 offset & ~PAGE_MASK, false, false);
2412
2413 /* Discards are also writes */
2414 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2415 rbd_osd_req_format_write(obj_request);
2416 else
2417 rbd_osd_req_format_read(obj_request);
2418}
2419
2420/*
Alex Elderf1a47392013-04-19 15:34:50 -05002421 * Split up an image request into one or more object requests, each
2422 * to a different object. The "type" parameter indicates whether
2423 * "data_desc" is the pointer to the head of a list of bio
2424 * structures, or the base of a page array. In either case this
2425 * function assumes data_desc describes memory sufficient to hold
2426 * all data described by the image request.
2427 */
2428static int rbd_img_request_fill(struct rbd_img_request *img_request,
2429 enum obj_request_type type,
2430 void *data_desc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002431{
2432 struct rbd_device *rbd_dev = img_request->rbd_dev;
2433 struct rbd_obj_request *obj_request = NULL;
2434 struct rbd_obj_request *next_obj_request;
Jingoo Hana1580732013-08-09 13:04:35 +09002435 struct bio *bio_list = NULL;
Alex Elderf1a47392013-04-19 15:34:50 -05002436 unsigned int bio_offset = 0;
Jingoo Hana1580732013-08-09 13:04:35 +09002437 struct page **pages = NULL;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002438 enum obj_operation_type op_type;
Alex Elder7da22d22013-01-24 16:13:36 -06002439 u64 img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002440 u64 resid;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002441
Alex Elderf1a47392013-04-19 15:34:50 -05002442 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2443 (int)type, data_desc);
Alex Elder37206ee2013-02-20 17:32:08 -06002444
Alex Elder7da22d22013-01-24 16:13:36 -06002445 img_offset = img_request->offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002446 resid = img_request->length;
Alex Elder4dda41d2013-02-20 21:59:33 -06002447 rbd_assert(resid > 0);
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002448 op_type = rbd_img_request_op_type(img_request);
Alex Elderf1a47392013-04-19 15:34:50 -05002449
2450 if (type == OBJ_REQUEST_BIO) {
2451 bio_list = data_desc;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002452 rbd_assert(img_offset ==
2453 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002454 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002455 pages = data_desc;
2456 }
2457
Alex Elderbf0d5f502012-11-22 00:00:08 -06002458 while (resid) {
Alex Elder2fa12322013-04-05 01:27:12 -05002459 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002460 const char *object_name;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002461 u64 offset;
2462 u64 length;
2463
Alex Elder7da22d22013-01-24 16:13:36 -06002464 object_name = rbd_segment_name(rbd_dev, img_offset);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002465 if (!object_name)
2466 goto out_unwind;
Alex Elder7da22d22013-01-24 16:13:36 -06002467 offset = rbd_segment_offset(rbd_dev, img_offset);
2468 length = rbd_segment_length(rbd_dev, img_offset, resid);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002469 obj_request = rbd_obj_request_create(object_name,
Alex Elderf1a47392013-04-19 15:34:50 -05002470 offset, length, type);
Alex Elder78c2a442013-05-01 12:43:04 -05002471 /* object request has its own copy of the object name */
2472 rbd_segment_name_free(object_name);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002473 if (!obj_request)
2474 goto out_unwind;
Ilya Dryomov62054da2014-03-04 11:57:17 +02002475
Josh Durgin03507db2013-08-27 14:45:46 -07002476 /*
2477 * set obj_request->img_request before creating the
2478 * osd_request so that it gets the right snapc
2479 */
2480 rbd_img_obj_request_add(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002481
Alex Elderf1a47392013-04-19 15:34:50 -05002482 if (type == OBJ_REQUEST_BIO) {
2483 unsigned int clone_size;
2484
2485 rbd_assert(length <= (u64)UINT_MAX);
2486 clone_size = (unsigned int)length;
2487 obj_request->bio_list =
2488 bio_chain_clone_range(&bio_list,
2489 &bio_offset,
2490 clone_size,
2491 GFP_ATOMIC);
2492 if (!obj_request->bio_list)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002493 goto out_unwind;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002494 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002495 unsigned int page_count;
2496
2497 obj_request->pages = pages;
2498 page_count = (u32)calc_pages_for(offset, length);
2499 obj_request->page_count = page_count;
2500 if ((offset + length) & ~PAGE_MASK)
2501 page_count--; /* more on last page */
2502 pages += page_count;
2503 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06002504
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002505 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2506 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2507 obj_request);
Alex Elder2fa12322013-04-05 01:27:12 -05002508 if (!osd_req)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002509 goto out_unwind;
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002510
Alex Elder2fa12322013-04-05 01:27:12 -05002511 obj_request->osd_req = osd_req;
Alex Elder21692382013-04-05 01:27:12 -05002512 obj_request->callback = rbd_img_obj_callback;
Alex Elder7da22d22013-01-24 16:13:36 -06002513 obj_request->img_offset = img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002514
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002515 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2516
2517 rbd_img_request_get(img_request);
2518
Alex Elder7da22d22013-01-24 16:13:36 -06002519 img_offset += length;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002520 resid -= length;
2521 }
2522
2523 return 0;
2524
Alex Elderbf0d5f502012-11-22 00:00:08 -06002525out_unwind:
2526 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
Ilya Dryomov42dd0372014-03-04 11:57:17 +02002527 rbd_img_obj_request_del(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002528
2529 return -ENOMEM;
2530}
2531
Alex Elder3d7efd12013-04-19 15:34:50 -05002532static void
Alex Elder0eefd472013-04-19 15:34:50 -05002533rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2534{
2535 struct rbd_img_request *img_request;
2536 struct rbd_device *rbd_dev;
Alex Elderebda6402013-05-10 16:29:22 -05002537 struct page **pages;
Alex Elder0eefd472013-04-19 15:34:50 -05002538 u32 page_count;
2539
Josh Durgind3246fb2014-04-07 16:49:21 -07002540 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2541 obj_request->type == OBJ_REQUEST_NODATA);
Alex Elder0eefd472013-04-19 15:34:50 -05002542 rbd_assert(obj_request_img_data_test(obj_request));
2543 img_request = obj_request->img_request;
2544 rbd_assert(img_request);
2545
2546 rbd_dev = img_request->rbd_dev;
2547 rbd_assert(rbd_dev);
Alex Elder0eefd472013-04-19 15:34:50 -05002548
Alex Elderebda6402013-05-10 16:29:22 -05002549 pages = obj_request->copyup_pages;
2550 rbd_assert(pages != NULL);
Alex Elder0eefd472013-04-19 15:34:50 -05002551 obj_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002552 page_count = obj_request->copyup_page_count;
2553 rbd_assert(page_count);
2554 obj_request->copyup_page_count = 0;
2555 ceph_release_page_vector(pages, page_count);
Alex Elder0eefd472013-04-19 15:34:50 -05002556
2557 /*
2558 * We want the transfer count to reflect the size of the
2559 * original write request. There is no such thing as a
2560 * successful short write, so if the request was successful
2561 * we can just set it to the originally-requested length.
2562 */
2563 if (!obj_request->result)
2564 obj_request->xferred = obj_request->length;
2565
2566 /* Finish up with the normal image object callback */
2567
2568 rbd_img_obj_callback(obj_request);
2569}
2570
2571static void
Alex Elder3d7efd12013-04-19 15:34:50 -05002572rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2573{
2574 struct rbd_obj_request *orig_request;
Alex Elder0eefd472013-04-19 15:34:50 -05002575 struct ceph_osd_request *osd_req;
2576 struct ceph_osd_client *osdc;
2577 struct rbd_device *rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002578 struct page **pages;
Josh Durgind3246fb2014-04-07 16:49:21 -07002579 enum obj_operation_type op_type;
Alex Elderebda6402013-05-10 16:29:22 -05002580 u32 page_count;
Alex Elderbbea1c12013-05-06 17:40:33 -05002581 int img_result;
Alex Elderebda6402013-05-10 16:29:22 -05002582 u64 parent_length;
Alex Elder3d7efd12013-04-19 15:34:50 -05002583
2584 rbd_assert(img_request_child_test(img_request));
2585
2586 /* First get what we need from the image request */
2587
2588 pages = img_request->copyup_pages;
2589 rbd_assert(pages != NULL);
2590 img_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002591 page_count = img_request->copyup_page_count;
2592 rbd_assert(page_count);
2593 img_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002594
2595 orig_request = img_request->obj_request;
2596 rbd_assert(orig_request != NULL);
Alex Elderb91f09f2013-05-10 16:29:22 -05002597 rbd_assert(obj_request_type_valid(orig_request->type));
Alex Elderbbea1c12013-05-06 17:40:33 -05002598 img_result = img_request->result;
Alex Elderebda6402013-05-10 16:29:22 -05002599 parent_length = img_request->length;
2600 rbd_assert(parent_length == img_request->xferred);
Alex Elder3d7efd12013-04-19 15:34:50 -05002601 rbd_img_request_put(img_request);
2602
Alex Elder91c6feb2013-05-06 17:40:32 -05002603 rbd_assert(orig_request->img_request);
2604 rbd_dev = orig_request->img_request->rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002605 rbd_assert(rbd_dev);
Alex Elder3d7efd12013-04-19 15:34:50 -05002606
Alex Elderbbea1c12013-05-06 17:40:33 -05002607 /*
2608 * If the overlap has become 0 (most likely because the
2609 * image has been flattened) we need to free the pages
2610 * and re-submit the original write request.
2611 */
2612 if (!rbd_dev->parent_overlap) {
2613 struct ceph_osd_client *osdc;
2614
2615 ceph_release_page_vector(pages, page_count);
2616 osdc = &rbd_dev->rbd_client->client->osdc;
2617 img_result = rbd_obj_request_submit(osdc, orig_request);
2618 if (!img_result)
2619 return;
2620 }
2621
2622 if (img_result)
Alex Elder0eefd472013-04-19 15:34:50 -05002623 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002624
Alex Elder8785b1d2013-05-09 10:08:49 -05002625 /*
2626 * The original osd request is of no use to use any more.
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002627 * We need a new one that can hold the three ops in a copyup
Alex Elder8785b1d2013-05-09 10:08:49 -05002628 * request. Allocate the new copyup osd request for the
2629 * original request, and release the old one.
2630 */
Alex Elderbbea1c12013-05-06 17:40:33 -05002631 img_result = -ENOMEM;
Alex Elder0eefd472013-04-19 15:34:50 -05002632 osd_req = rbd_osd_req_create_copyup(orig_request);
2633 if (!osd_req)
2634 goto out_err;
Alex Elder8785b1d2013-05-09 10:08:49 -05002635 rbd_osd_req_destroy(orig_request->osd_req);
Alex Elder0eefd472013-04-19 15:34:50 -05002636 orig_request->osd_req = osd_req;
2637 orig_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002638 orig_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002639
Alex Elder0eefd472013-04-19 15:34:50 -05002640 /* Initialize the copyup op */
2641
2642 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
Alex Elderebda6402013-05-10 16:29:22 -05002643 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
Alex Elder0eefd472013-04-19 15:34:50 -05002644 false, false);
2645
Josh Durgind3246fb2014-04-07 16:49:21 -07002646 /* Add the other op(s) */
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002647
Josh Durgind3246fb2014-04-07 16:49:21 -07002648 op_type = rbd_img_request_op_type(orig_request->img_request);
2649 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
Alex Elder0eefd472013-04-19 15:34:50 -05002650
2651 /* All set, send it off. */
2652
2653 orig_request->callback = rbd_img_obj_copyup_callback;
2654 osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elderbbea1c12013-05-06 17:40:33 -05002655 img_result = rbd_obj_request_submit(osdc, orig_request);
2656 if (!img_result)
Alex Elder0eefd472013-04-19 15:34:50 -05002657 return;
2658out_err:
2659 /* Record the error code and complete the request */
2660
Alex Elderbbea1c12013-05-06 17:40:33 -05002661 orig_request->result = img_result;
Alex Elder0eefd472013-04-19 15:34:50 -05002662 orig_request->xferred = 0;
2663 obj_request_done_set(orig_request);
2664 rbd_obj_request_complete(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002665}
2666
2667/*
2668 * Read from the parent image the range of data that covers the
2669 * entire target of the given object request. This is used for
2670 * satisfying a layered image write request when the target of an
2671 * object request from the image request does not exist.
2672 *
2673 * A page array big enough to hold the returned data is allocated
2674 * and supplied to rbd_img_request_fill() as the "data descriptor."
2675 * When the read completes, this page array will be transferred to
2676 * the original object request for the copyup operation.
2677 *
2678 * If an error occurs, record it as the result of the original
2679 * object request and mark it done so it gets completed.
2680 */
2681static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2682{
2683 struct rbd_img_request *img_request = NULL;
2684 struct rbd_img_request *parent_request = NULL;
2685 struct rbd_device *rbd_dev;
2686 u64 img_offset;
2687 u64 length;
2688 struct page **pages = NULL;
2689 u32 page_count;
2690 int result;
2691
2692 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderb91f09f2013-05-10 16:29:22 -05002693 rbd_assert(obj_request_type_valid(obj_request->type));
Alex Elder3d7efd12013-04-19 15:34:50 -05002694
2695 img_request = obj_request->img_request;
2696 rbd_assert(img_request != NULL);
2697 rbd_dev = img_request->rbd_dev;
2698 rbd_assert(rbd_dev->parent != NULL);
2699
2700 /*
2701 * Determine the byte range covered by the object in the
2702 * child image to which the original request was to be sent.
2703 */
2704 img_offset = obj_request->img_offset - obj_request->offset;
2705 length = (u64)1 << rbd_dev->header.obj_order;
2706
2707 /*
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002708 * There is no defined parent data beyond the parent
2709 * overlap, so limit what we read at that boundary if
2710 * necessary.
2711 */
2712 if (img_offset + length > rbd_dev->parent_overlap) {
2713 rbd_assert(img_offset < rbd_dev->parent_overlap);
2714 length = rbd_dev->parent_overlap - img_offset;
2715 }
2716
2717 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002718 * Allocate a page array big enough to receive the data read
2719 * from the parent.
2720 */
2721 page_count = (u32)calc_pages_for(0, length);
2722 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2723 if (IS_ERR(pages)) {
2724 result = PTR_ERR(pages);
2725 pages = NULL;
2726 goto out_err;
2727 }
2728
2729 result = -ENOMEM;
Alex Eldere93f3152013-05-08 22:50:04 -05002730 parent_request = rbd_parent_request_create(obj_request,
2731 img_offset, length);
Alex Elder3d7efd12013-04-19 15:34:50 -05002732 if (!parent_request)
2733 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002734
2735 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2736 if (result)
2737 goto out_err;
2738 parent_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002739 parent_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002740
2741 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2742 result = rbd_img_request_submit(parent_request);
2743 if (!result)
2744 return 0;
2745
2746 parent_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002747 parent_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002748 parent_request->obj_request = NULL;
2749 rbd_obj_request_put(obj_request);
2750out_err:
2751 if (pages)
2752 ceph_release_page_vector(pages, page_count);
2753 if (parent_request)
2754 rbd_img_request_put(parent_request);
2755 obj_request->result = result;
2756 obj_request->xferred = 0;
2757 obj_request_done_set(obj_request);
2758
2759 return result;
2760}
2761
Alex Elderc5b5ef62013-02-11 12:33:24 -06002762static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2763{
Alex Elderc5b5ef62013-02-11 12:33:24 -06002764 struct rbd_obj_request *orig_request;
Alex Elder638f5ab2013-05-06 17:40:33 -05002765 struct rbd_device *rbd_dev;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002766 int result;
2767
2768 rbd_assert(!obj_request_img_data_test(obj_request));
2769
2770 /*
2771 * All we need from the object request is the original
2772 * request and the result of the STAT op. Grab those, then
2773 * we're done with the request.
2774 */
2775 orig_request = obj_request->obj_request;
2776 obj_request->obj_request = NULL;
Alex Elder912c3172013-05-13 20:35:38 -05002777 rbd_obj_request_put(orig_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002778 rbd_assert(orig_request);
2779 rbd_assert(orig_request->img_request);
2780
2781 result = obj_request->result;
2782 obj_request->result = 0;
2783
2784 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2785 obj_request, orig_request, result,
2786 obj_request->xferred, obj_request->length);
2787 rbd_obj_request_put(obj_request);
2788
Alex Elder638f5ab2013-05-06 17:40:33 -05002789 /*
2790 * If the overlap has become 0 (most likely because the
2791 * image has been flattened) we need to free the pages
2792 * and re-submit the original write request.
2793 */
2794 rbd_dev = orig_request->img_request->rbd_dev;
2795 if (!rbd_dev->parent_overlap) {
2796 struct ceph_osd_client *osdc;
2797
Alex Elder638f5ab2013-05-06 17:40:33 -05002798 osdc = &rbd_dev->rbd_client->client->osdc;
2799 result = rbd_obj_request_submit(osdc, orig_request);
2800 if (!result)
2801 return;
2802 }
Alex Elderc5b5ef62013-02-11 12:33:24 -06002803
2804 /*
2805 * Our only purpose here is to determine whether the object
2806 * exists, and we don't want to treat the non-existence as
2807 * an error. If something else comes back, transfer the
2808 * error to the original request and complete it now.
2809 */
2810 if (!result) {
2811 obj_request_existence_set(orig_request, true);
2812 } else if (result == -ENOENT) {
2813 obj_request_existence_set(orig_request, false);
2814 } else if (result) {
2815 orig_request->result = result;
Alex Elder3d7efd12013-04-19 15:34:50 -05002816 goto out;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002817 }
2818
2819 /*
2820 * Resubmit the original request now that we have recorded
2821 * whether the target object exists.
2822 */
Alex Elderb454e362013-04-19 15:34:50 -05002823 orig_request->result = rbd_img_obj_request_submit(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002824out:
Alex Elderc5b5ef62013-02-11 12:33:24 -06002825 if (orig_request->result)
2826 rbd_obj_request_complete(orig_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002827}
2828
2829static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2830{
2831 struct rbd_obj_request *stat_request;
2832 struct rbd_device *rbd_dev;
2833 struct ceph_osd_client *osdc;
2834 struct page **pages = NULL;
2835 u32 page_count;
2836 size_t size;
2837 int ret;
2838
2839 /*
2840 * The response data for a STAT call consists of:
2841 * le64 length;
2842 * struct {
2843 * le32 tv_sec;
2844 * le32 tv_nsec;
2845 * } mtime;
2846 */
2847 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2848 page_count = (u32)calc_pages_for(0, size);
2849 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2850 if (IS_ERR(pages))
2851 return PTR_ERR(pages);
2852
2853 ret = -ENOMEM;
2854 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2855 OBJ_REQUEST_PAGES);
2856 if (!stat_request)
2857 goto out;
2858
2859 rbd_obj_request_get(obj_request);
2860 stat_request->obj_request = obj_request;
2861 stat_request->pages = pages;
2862 stat_request->page_count = page_count;
2863
2864 rbd_assert(obj_request->img_request);
2865 rbd_dev = obj_request->img_request->rbd_dev;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002866 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02002867 stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002868 if (!stat_request->osd_req)
2869 goto out;
2870 stat_request->callback = rbd_img_obj_exists_callback;
2871
Yan, Zheng144cba12015-04-27 11:09:54 +08002872 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002873 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2874 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05002875 rbd_osd_req_format_read(stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002876
2877 osdc = &rbd_dev->rbd_client->client->osdc;
2878 ret = rbd_obj_request_submit(osdc, stat_request);
2879out:
2880 if (ret)
2881 rbd_obj_request_put(obj_request);
2882
2883 return ret;
2884}
2885
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002886static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
Alex Elderb454e362013-04-19 15:34:50 -05002887{
2888 struct rbd_img_request *img_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002889 struct rbd_device *rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002890
2891 rbd_assert(obj_request_img_data_test(obj_request));
2892
2893 img_request = obj_request->img_request;
2894 rbd_assert(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002895 rbd_dev = img_request->rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002896
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002897 /* Reads */
Josh Durgin1c220882014-04-04 17:49:12 -07002898 if (!img_request_write_test(img_request) &&
2899 !img_request_discard_test(img_request))
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002900 return true;
Alex Elderb454e362013-04-19 15:34:50 -05002901
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002902 /* Non-layered writes */
2903 if (!img_request_layered_test(img_request))
2904 return true;
2905
2906 /*
2907 * Layered writes outside of the parent overlap range don't
2908 * share any data with the parent.
2909 */
2910 if (!obj_request_overlaps_parent(obj_request))
2911 return true;
2912
2913 /*
Guangliang Zhaoc622d222014-04-01 22:22:15 +08002914 * Entire-object layered writes - we will overwrite whatever
2915 * parent data there is anyway.
2916 */
2917 if (!obj_request->offset &&
2918 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2919 return true;
2920
2921 /*
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002922 * If the object is known to already exist, its parent data has
2923 * already been copied.
2924 */
2925 if (obj_request_known_test(obj_request) &&
2926 obj_request_exists_test(obj_request))
2927 return true;
2928
2929 return false;
2930}
2931
2932static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2933{
2934 if (img_obj_request_simple(obj_request)) {
Alex Elderb454e362013-04-19 15:34:50 -05002935 struct rbd_device *rbd_dev;
2936 struct ceph_osd_client *osdc;
2937
2938 rbd_dev = obj_request->img_request->rbd_dev;
2939 osdc = &rbd_dev->rbd_client->client->osdc;
2940
2941 return rbd_obj_request_submit(osdc, obj_request);
2942 }
2943
2944 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002945 * It's a layered write. The target object might exist but
2946 * we may not know that yet. If we know it doesn't exist,
2947 * start by reading the data for the full target object from
2948 * the parent so we can use it for a copyup to the target.
Alex Elderb454e362013-04-19 15:34:50 -05002949 */
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002950 if (obj_request_known_test(obj_request))
Alex Elder3d7efd12013-04-19 15:34:50 -05002951 return rbd_img_obj_parent_read_full(obj_request);
2952
2953 /* We don't know whether the target exists. Go find out. */
Alex Elderb454e362013-04-19 15:34:50 -05002954
2955 return rbd_img_obj_exists_submit(obj_request);
2956}
2957
Alex Elderbf0d5f502012-11-22 00:00:08 -06002958static int rbd_img_request_submit(struct rbd_img_request *img_request)
2959{
Alex Elderbf0d5f502012-11-22 00:00:08 -06002960 struct rbd_obj_request *obj_request;
Alex Elder46faeed2013-04-10 17:47:46 -05002961 struct rbd_obj_request *next_obj_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002962
Alex Elder37206ee2013-02-20 17:32:08 -06002963 dout("%s: img %p\n", __func__, img_request);
Alex Elder46faeed2013-04-10 17:47:46 -05002964 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
Alex Elderbf0d5f502012-11-22 00:00:08 -06002965 int ret;
2966
Alex Elderb454e362013-04-19 15:34:50 -05002967 ret = rbd_img_obj_request_submit(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002968 if (ret)
2969 return ret;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002970 }
2971
2972 return 0;
2973}
2974
Alex Elder8b3e1a52013-01-24 16:13:36 -06002975static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2976{
2977 struct rbd_obj_request *obj_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002978 struct rbd_device *rbd_dev;
2979 u64 obj_end;
Alex Elder02c74fb2013-05-06 17:40:33 -05002980 u64 img_xferred;
2981 int img_result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002982
2983 rbd_assert(img_request_child_test(img_request));
2984
Alex Elder02c74fb2013-05-06 17:40:33 -05002985 /* First get what we need from the image request and release it */
2986
Alex Elder8b3e1a52013-01-24 16:13:36 -06002987 obj_request = img_request->obj_request;
Alex Elder02c74fb2013-05-06 17:40:33 -05002988 img_xferred = img_request->xferred;
2989 img_result = img_request->result;
2990 rbd_img_request_put(img_request);
2991
2992 /*
2993 * If the overlap has become 0 (most likely because the
2994 * image has been flattened) we need to re-submit the
2995 * original request.
2996 */
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002997 rbd_assert(obj_request);
2998 rbd_assert(obj_request->img_request);
Alex Elder02c74fb2013-05-06 17:40:33 -05002999 rbd_dev = obj_request->img_request->rbd_dev;
3000 if (!rbd_dev->parent_overlap) {
3001 struct ceph_osd_client *osdc;
Alex Elder8b3e1a52013-01-24 16:13:36 -06003002
Alex Elder02c74fb2013-05-06 17:40:33 -05003003 osdc = &rbd_dev->rbd_client->client->osdc;
3004 img_result = rbd_obj_request_submit(osdc, obj_request);
3005 if (!img_result)
3006 return;
3007 }
3008
3009 obj_request->result = img_result;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003010 if (obj_request->result)
3011 goto out;
3012
3013 /*
3014 * We need to zero anything beyond the parent overlap
3015 * boundary. Since rbd_img_obj_request_read_callback()
3016 * will zero anything beyond the end of a short read, an
3017 * easy way to do this is to pretend the data from the
3018 * parent came up short--ending at the overlap boundary.
3019 */
3020 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3021 obj_end = obj_request->img_offset + obj_request->length;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003022 if (obj_end > rbd_dev->parent_overlap) {
3023 u64 xferred = 0;
3024
3025 if (obj_request->img_offset < rbd_dev->parent_overlap)
3026 xferred = rbd_dev->parent_overlap -
3027 obj_request->img_offset;
3028
Alex Elder02c74fb2013-05-06 17:40:33 -05003029 obj_request->xferred = min(img_xferred, xferred);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003030 } else {
Alex Elder02c74fb2013-05-06 17:40:33 -05003031 obj_request->xferred = img_xferred;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003032 }
3033out:
Alex Elder8b3e1a52013-01-24 16:13:36 -06003034 rbd_img_obj_request_read_callback(obj_request);
3035 rbd_obj_request_complete(obj_request);
3036}
3037
3038static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3039{
Alex Elder8b3e1a52013-01-24 16:13:36 -06003040 struct rbd_img_request *img_request;
3041 int result;
3042
3043 rbd_assert(obj_request_img_data_test(obj_request));
3044 rbd_assert(obj_request->img_request != NULL);
3045 rbd_assert(obj_request->result == (s32) -ENOENT);
Alex Elder5b2ab722013-05-06 17:40:33 -05003046 rbd_assert(obj_request_type_valid(obj_request->type));
Alex Elder8b3e1a52013-01-24 16:13:36 -06003047
Alex Elder8b3e1a52013-01-24 16:13:36 -06003048 /* rbd_read_finish(obj_request, obj_request->length); */
Alex Eldere93f3152013-05-08 22:50:04 -05003049 img_request = rbd_parent_request_create(obj_request,
Alex Elder8b3e1a52013-01-24 16:13:36 -06003050 obj_request->img_offset,
Alex Eldere93f3152013-05-08 22:50:04 -05003051 obj_request->length);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003052 result = -ENOMEM;
3053 if (!img_request)
3054 goto out_err;
3055
Alex Elder5b2ab722013-05-06 17:40:33 -05003056 if (obj_request->type == OBJ_REQUEST_BIO)
3057 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3058 obj_request->bio_list);
3059 else
3060 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3061 obj_request->pages);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003062 if (result)
3063 goto out_err;
3064
3065 img_request->callback = rbd_img_parent_read_callback;
3066 result = rbd_img_request_submit(img_request);
3067 if (result)
3068 goto out_err;
3069
3070 return;
3071out_err:
3072 if (img_request)
3073 rbd_img_request_put(img_request);
3074 obj_request->result = result;
3075 obj_request->xferred = 0;
3076 obj_request_done_set(obj_request);
3077}
3078
Josh Durgin20e0af62013-08-29 17:36:03 -07003079static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
Alex Elderb8d70032012-11-30 17:53:04 -06003080{
3081 struct rbd_obj_request *obj_request;
Alex Elder21692382013-04-05 01:27:12 -05003082 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elderb8d70032012-11-30 17:53:04 -06003083 int ret;
3084
3085 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3086 OBJ_REQUEST_NODATA);
3087 if (!obj_request)
3088 return -ENOMEM;
3089
3090 ret = -ENOMEM;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003091 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003092 obj_request);
Alex Elderb8d70032012-11-30 17:53:04 -06003093 if (!obj_request->osd_req)
3094 goto out;
3095
Alex Elderc99d2d42013-04-05 01:27:11 -05003096 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003097 notify_id, 0, 0);
Alex Elder9d4df012013-04-19 15:34:50 -05003098 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003099
Alex Elderb8d70032012-11-30 17:53:04 -06003100 ret = rbd_obj_request_submit(osdc, obj_request);
Alex Eldercf81b602013-01-17 12:18:46 -06003101 if (ret)
Josh Durgin20e0af62013-08-29 17:36:03 -07003102 goto out;
3103 ret = rbd_obj_request_wait(obj_request);
3104out:
3105 rbd_obj_request_put(obj_request);
Alex Elderb8d70032012-11-30 17:53:04 -06003106
3107 return ret;
3108}
3109
3110static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3111{
3112 struct rbd_device *rbd_dev = (struct rbd_device *)data;
Alex Eldere627db02013-05-06 07:40:30 -05003113 int ret;
Alex Elderb8d70032012-11-30 17:53:04 -06003114
3115 if (!rbd_dev)
3116 return;
3117
Alex Elder37206ee2013-02-20 17:32:08 -06003118 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003119 rbd_dev->header_name, (unsigned long long)notify_id,
3120 (unsigned int)opcode);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003121
3122 /*
3123 * Until adequate refresh error handling is in place, there is
3124 * not much we can do here, except warn.
3125 *
3126 * See http://tracker.ceph.com/issues/5040
3127 */
Alex Eldere627db02013-05-06 07:40:30 -05003128 ret = rbd_dev_refresh(rbd_dev);
3129 if (ret)
Ilya Dryomov9584d502014-07-11 12:11:20 +04003130 rbd_warn(rbd_dev, "refresh failed: %d", ret);
Alex Elderb8d70032012-11-30 17:53:04 -06003131
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003132 ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
3133 if (ret)
Ilya Dryomov9584d502014-07-11 12:11:20 +04003134 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
Alex Elderb8d70032012-11-30 17:53:04 -06003135}
3136
Alex Elder9969ebc2013-01-18 12:31:10 -06003137/*
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003138 * Send a (un)watch request and wait for the ack. Return a request
3139 * with a ref held on success or error.
3140 */
3141static struct rbd_obj_request *rbd_obj_watch_request_helper(
3142 struct rbd_device *rbd_dev,
3143 bool watch)
3144{
3145 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03003146 struct ceph_options *opts = osdc->client->options;
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003147 struct rbd_obj_request *obj_request;
3148 int ret;
3149
3150 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3151 OBJ_REQUEST_NODATA);
3152 if (!obj_request)
3153 return ERR_PTR(-ENOMEM);
3154
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003155 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003156 obj_request);
3157 if (!obj_request->osd_req) {
3158 ret = -ENOMEM;
3159 goto out;
3160 }
3161
3162 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3163 rbd_dev->watch_event->cookie, 0, watch);
3164 rbd_osd_req_format_write(obj_request);
3165
3166 if (watch)
3167 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3168
3169 ret = rbd_obj_request_submit(osdc, obj_request);
3170 if (ret)
3171 goto out;
3172
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03003173 ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003174 if (ret)
3175 goto out;
3176
3177 ret = obj_request->result;
3178 if (ret) {
3179 if (watch)
3180 rbd_obj_request_end(obj_request);
3181 goto out;
3182 }
3183
3184 return obj_request;
3185
3186out:
3187 rbd_obj_request_put(obj_request);
3188 return ERR_PTR(ret);
3189}
3190
3191/*
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003192 * Initiate a watch request, synchronously.
Alex Elder9969ebc2013-01-18 12:31:10 -06003193 */
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003194static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
Alex Elder9969ebc2013-01-18 12:31:10 -06003195{
3196 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3197 struct rbd_obj_request *obj_request;
Alex Elder9969ebc2013-01-18 12:31:10 -06003198 int ret;
3199
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003200 rbd_assert(!rbd_dev->watch_event);
3201 rbd_assert(!rbd_dev->watch_request);
Alex Elder9969ebc2013-01-18 12:31:10 -06003202
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003203 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3204 &rbd_dev->watch_event);
3205 if (ret < 0)
3206 return ret;
Alex Elder9969ebc2013-01-18 12:31:10 -06003207
Ilya Dryomov76756a52014-06-20 18:29:20 +04003208 obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
3209 if (IS_ERR(obj_request)) {
3210 ceph_osdc_cancel_event(rbd_dev->watch_event);
3211 rbd_dev->watch_event = NULL;
3212 return PTR_ERR(obj_request);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003213 }
Alex Elder9969ebc2013-01-18 12:31:10 -06003214
Alex Elder8eb87562013-01-25 17:08:55 -06003215 /*
3216 * A watch request is set to linger, so the underlying osd
3217 * request won't go away until we unregister it. We retain
3218 * a pointer to the object request during that time (in
Ilya Dryomov76756a52014-06-20 18:29:20 +04003219 * rbd_dev->watch_request), so we'll keep a reference to it.
3220 * We'll drop that reference after we've unregistered it in
3221 * rbd_dev_header_unwatch_sync().
Alex Elder8eb87562013-01-25 17:08:55 -06003222 */
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003223 rbd_dev->watch_request = obj_request;
Alex Elder8eb87562013-01-25 17:08:55 -06003224
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003225 return 0;
Alex Elder9969ebc2013-01-18 12:31:10 -06003226}
3227
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003228/*
3229 * Tear down a watch request, synchronously.
3230 */
Ilya Dryomov76756a52014-06-20 18:29:20 +04003231static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
Ilya Dryomovfca27062013-12-16 18:02:40 +02003232{
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003233 struct rbd_obj_request *obj_request;
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003234
3235 rbd_assert(rbd_dev->watch_event);
3236 rbd_assert(rbd_dev->watch_request);
3237
Ilya Dryomov76756a52014-06-20 18:29:20 +04003238 rbd_obj_request_end(rbd_dev->watch_request);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003239 rbd_obj_request_put(rbd_dev->watch_request);
3240 rbd_dev->watch_request = NULL;
3241
Ilya Dryomov76756a52014-06-20 18:29:20 +04003242 obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
3243 if (!IS_ERR(obj_request))
3244 rbd_obj_request_put(obj_request);
3245 else
3246 rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
3247 PTR_ERR(obj_request));
3248
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003249 ceph_osdc_cancel_event(rbd_dev->watch_event);
3250 rbd_dev->watch_event = NULL;
Ilya Dryomovfca27062013-12-16 18:02:40 +02003251}
3252
Alex Elder36be9a72013-01-19 00:30:28 -06003253/*
Alex Elderf40eb342013-04-25 15:09:42 -05003254 * Synchronous osd object method call. Returns the number of bytes
3255 * returned in the outbound buffer, or a negative error code.
Alex Elder36be9a72013-01-19 00:30:28 -06003256 */
3257static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3258 const char *object_name,
3259 const char *class_name,
3260 const char *method_name,
Alex Elder41579762013-04-21 12:14:45 -05003261 const void *outbound,
Alex Elder36be9a72013-01-19 00:30:28 -06003262 size_t outbound_size,
Alex Elder41579762013-04-21 12:14:45 -05003263 void *inbound,
Alex Eldere2a58ee2013-04-30 00:44:33 -05003264 size_t inbound_size)
Alex Elder36be9a72013-01-19 00:30:28 -06003265{
Alex Elder21692382013-04-05 01:27:12 -05003266 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder36be9a72013-01-19 00:30:28 -06003267 struct rbd_obj_request *obj_request;
Alex Elder36be9a72013-01-19 00:30:28 -06003268 struct page **pages;
3269 u32 page_count;
3270 int ret;
3271
3272 /*
Alex Elder6010a452013-04-05 01:27:11 -05003273 * Method calls are ultimately read operations. The result
3274 * should placed into the inbound buffer provided. They
3275 * also supply outbound data--parameters for the object
3276 * method. Currently if this is present it will be a
3277 * snapshot id.
Alex Elder36be9a72013-01-19 00:30:28 -06003278 */
Alex Elder57385b52013-04-21 12:14:45 -05003279 page_count = (u32)calc_pages_for(0, inbound_size);
Alex Elder36be9a72013-01-19 00:30:28 -06003280 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3281 if (IS_ERR(pages))
3282 return PTR_ERR(pages);
3283
3284 ret = -ENOMEM;
Alex Elder6010a452013-04-05 01:27:11 -05003285 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
Alex Elder36be9a72013-01-19 00:30:28 -06003286 OBJ_REQUEST_PAGES);
3287 if (!obj_request)
3288 goto out;
3289
3290 obj_request->pages = pages;
3291 obj_request->page_count = page_count;
3292
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003293 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003294 obj_request);
Alex Elder36be9a72013-01-19 00:30:28 -06003295 if (!obj_request->osd_req)
3296 goto out;
3297
Alex Elderc99d2d42013-04-05 01:27:11 -05003298 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
Alex Elder04017e22013-04-05 14:46:02 -05003299 class_name, method_name);
3300 if (outbound_size) {
3301 struct ceph_pagelist *pagelist;
3302
3303 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3304 if (!pagelist)
3305 goto out;
3306
3307 ceph_pagelist_init(pagelist);
3308 ceph_pagelist_append(pagelist, outbound, outbound_size);
3309 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3310 pagelist);
3311 }
Alex Eldera4ce40a2013-04-05 01:27:12 -05003312 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3313 obj_request->pages, inbound_size,
Alex Elder44cd1882013-04-05 01:27:12 -05003314 0, false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05003315 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003316
Alex Elder36be9a72013-01-19 00:30:28 -06003317 ret = rbd_obj_request_submit(osdc, obj_request);
3318 if (ret)
3319 goto out;
3320 ret = rbd_obj_request_wait(obj_request);
3321 if (ret)
3322 goto out;
3323
3324 ret = obj_request->result;
3325 if (ret < 0)
3326 goto out;
Alex Elder57385b52013-04-21 12:14:45 -05003327
3328 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3329 ret = (int)obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06003330 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
Alex Elder36be9a72013-01-19 00:30:28 -06003331out:
3332 if (obj_request)
3333 rbd_obj_request_put(obj_request);
3334 else
3335 ceph_release_page_vector(pages, page_count);
3336
3337 return ret;
3338}
3339
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003340static void rbd_queue_workfn(struct work_struct *work)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003341{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003342 struct request *rq = blk_mq_rq_from_pdu(work);
3343 struct rbd_device *rbd_dev = rq->q->queuedata;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003344 struct rbd_img_request *img_request;
Josh Durgin4e752f02014-04-08 11:12:11 -07003345 struct ceph_snap_context *snapc = NULL;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003346 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3347 u64 length = blk_rq_bytes(rq);
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003348 enum obj_operation_type op_type;
Josh Durgin4e752f02014-04-08 11:12:11 -07003349 u64 mapping_size;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003350 int result;
3351
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003352 if (rq->cmd_type != REQ_TYPE_FS) {
3353 dout("%s: non-fs request type %d\n", __func__,
3354 (int) rq->cmd_type);
3355 result = -EIO;
3356 goto err;
3357 }
3358
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003359 if (rq->cmd_flags & REQ_DISCARD)
3360 op_type = OBJ_OP_DISCARD;
3361 else if (rq->cmd_flags & REQ_WRITE)
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003362 op_type = OBJ_OP_WRITE;
3363 else
3364 op_type = OBJ_OP_READ;
3365
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003366 /* Ignore/skip any zero-length requests */
3367
3368 if (!length) {
3369 dout("%s: zero-length request\n", __func__);
3370 result = 0;
3371 goto err_rq;
3372 }
3373
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003374 /* Only reads are allowed to a read-only device */
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003375
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003376 if (op_type != OBJ_OP_READ) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003377 if (rbd_dev->mapping.read_only) {
3378 result = -EROFS;
3379 goto err_rq;
3380 }
3381 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3382 }
3383
3384 /*
3385 * Quit early if the mapped snapshot no longer exists. It's
3386 * still possible the snapshot will have disappeared by the
3387 * time our request arrives at the osd, but there's no sense in
3388 * sending it if we already know.
3389 */
3390 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3391 dout("request for non-existent snapshot");
3392 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3393 result = -ENXIO;
3394 goto err_rq;
3395 }
3396
3397 if (offset && length > U64_MAX - offset + 1) {
3398 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3399 length);
3400 result = -EINVAL;
3401 goto err_rq; /* Shouldn't happen */
3402 }
3403
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003404 blk_mq_start_request(rq);
3405
Josh Durgin4e752f02014-04-08 11:12:11 -07003406 down_read(&rbd_dev->header_rwsem);
3407 mapping_size = rbd_dev->mapping.size;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003408 if (op_type != OBJ_OP_READ) {
Josh Durgin4e752f02014-04-08 11:12:11 -07003409 snapc = rbd_dev->header.snapc;
3410 ceph_get_snap_context(snapc);
3411 }
3412 up_read(&rbd_dev->header_rwsem);
3413
3414 if (offset + length > mapping_size) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003415 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
Josh Durgin4e752f02014-04-08 11:12:11 -07003416 length, mapping_size);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003417 result = -EIO;
3418 goto err_rq;
3419 }
3420
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003421 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07003422 snapc);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003423 if (!img_request) {
3424 result = -ENOMEM;
3425 goto err_rq;
3426 }
3427 img_request->rq = rq;
3428
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003429 if (op_type == OBJ_OP_DISCARD)
3430 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3431 NULL);
3432 else
3433 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3434 rq->bio);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003435 if (result)
3436 goto err_img_request;
3437
3438 result = rbd_img_request_submit(img_request);
3439 if (result)
3440 goto err_img_request;
3441
3442 return;
3443
3444err_img_request:
3445 rbd_img_request_put(img_request);
3446err_rq:
3447 if (result)
3448 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003449 obj_op_name(op_type), length, offset, result);
SF Markus Elfringe96a6502014-11-02 15:20:59 +01003450 ceph_put_snap_context(snapc);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003451err:
3452 blk_mq_end_request(rq, result);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003453}
3454
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003455static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3456 const struct blk_mq_queue_data *bd)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003457{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003458 struct request *rq = bd->rq;
3459 struct work_struct *work = blk_mq_rq_to_pdu(rq);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003460
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003461 queue_work(rbd_wq, work);
3462 return BLK_MQ_RQ_QUEUE_OK;
Alex Elderbf0d5f502012-11-22 00:00:08 -06003463}
3464
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003465/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003466 * a queue callback. Makes sure that we don't create a bio that spans across
3467 * multiple osd objects. One exception would be with a single page bios,
Alex Elderf7760da2012-10-20 22:17:27 -05003468 * which we handle later at bio_chain_clone_range()
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003469 */
3470static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3471 struct bio_vec *bvec)
3472{
3473 struct rbd_device *rbd_dev = q->queuedata;
Alex Eldere5cfeed22012-10-20 22:17:27 -05003474 sector_t sector_offset;
3475 sector_t sectors_per_obj;
3476 sector_t obj_sector_offset;
3477 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003478
Alex Eldere5cfeed22012-10-20 22:17:27 -05003479 /*
3480 * Find how far into its rbd object the partition-relative
3481 * bio start sector is to offset relative to the enclosing
3482 * device.
3483 */
3484 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3485 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3486 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
Alex Elder593a9e72012-02-07 12:03:37 -06003487
Alex Eldere5cfeed22012-10-20 22:17:27 -05003488 /*
3489 * Compute the number of bytes from that offset to the end
3490 * of the object. Account for what's already used by the bio.
3491 */
3492 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3493 if (ret > bmd->bi_size)
3494 ret -= bmd->bi_size;
3495 else
3496 ret = 0;
3497
3498 /*
3499 * Don't send back more than was asked for. And if the bio
3500 * was empty, let the whole thing through because: "Note
3501 * that a block device *must* allow a single page to be
3502 * added to an empty bio."
3503 */
3504 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3505 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3506 ret = (int) bvec->bv_len;
3507
3508 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003509}
3510
3511static void rbd_free_disk(struct rbd_device *rbd_dev)
3512{
3513 struct gendisk *disk = rbd_dev->disk;
3514
3515 if (!disk)
3516 return;
3517
Alex Eldera0cab922013-04-25 23:15:08 -05003518 rbd_dev->disk = NULL;
3519 if (disk->flags & GENHD_FL_UP) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003520 del_gendisk(disk);
Alex Eldera0cab922013-04-25 23:15:08 -05003521 if (disk->queue)
3522 blk_cleanup_queue(disk->queue);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003523 blk_mq_free_tag_set(&rbd_dev->tag_set);
Alex Eldera0cab922013-04-25 23:15:08 -05003524 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003525 put_disk(disk);
3526}
3527
Alex Elder788e2df2013-01-17 12:25:27 -06003528static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3529 const char *object_name,
Alex Elder7097f8d2013-04-30 00:44:33 -05003530 u64 offset, u64 length, void *buf)
Alex Elder788e2df2013-01-17 12:25:27 -06003531
3532{
Alex Elder21692382013-04-05 01:27:12 -05003533 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder788e2df2013-01-17 12:25:27 -06003534 struct rbd_obj_request *obj_request;
Alex Elder788e2df2013-01-17 12:25:27 -06003535 struct page **pages = NULL;
3536 u32 page_count;
Alex Elder1ceae7e2013-02-06 13:11:38 -06003537 size_t size;
Alex Elder788e2df2013-01-17 12:25:27 -06003538 int ret;
3539
3540 page_count = (u32) calc_pages_for(offset, length);
3541 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3542 if (IS_ERR(pages))
Jan Karaa8d42052014-10-22 09:17:24 +02003543 return PTR_ERR(pages);
Alex Elder788e2df2013-01-17 12:25:27 -06003544
3545 ret = -ENOMEM;
3546 obj_request = rbd_obj_request_create(object_name, offset, length,
Alex Elder36be9a72013-01-19 00:30:28 -06003547 OBJ_REQUEST_PAGES);
Alex Elder788e2df2013-01-17 12:25:27 -06003548 if (!obj_request)
3549 goto out;
3550
3551 obj_request->pages = pages;
3552 obj_request->page_count = page_count;
3553
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003554 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003555 obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06003556 if (!obj_request->osd_req)
3557 goto out;
3558
Alex Elderc99d2d42013-04-05 01:27:11 -05003559 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3560 offset, length, 0, 0);
Alex Elder406e2c92013-04-15 14:50:36 -05003561 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
Alex Eldera4ce40a2013-04-05 01:27:12 -05003562 obj_request->pages,
Alex Elder44cd1882013-04-05 01:27:12 -05003563 obj_request->length,
3564 obj_request->offset & ~PAGE_MASK,
3565 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05003566 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003567
Alex Elder788e2df2013-01-17 12:25:27 -06003568 ret = rbd_obj_request_submit(osdc, obj_request);
3569 if (ret)
3570 goto out;
3571 ret = rbd_obj_request_wait(obj_request);
3572 if (ret)
3573 goto out;
3574
3575 ret = obj_request->result;
3576 if (ret < 0)
3577 goto out;
Alex Elder1ceae7e2013-02-06 13:11:38 -06003578
3579 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3580 size = (size_t) obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06003581 ceph_copy_from_page_vector(pages, buf, 0, size);
Alex Elder7097f8d2013-04-30 00:44:33 -05003582 rbd_assert(size <= (size_t)INT_MAX);
3583 ret = (int)size;
Alex Elder788e2df2013-01-17 12:25:27 -06003584out:
3585 if (obj_request)
3586 rbd_obj_request_put(obj_request);
3587 else
3588 ceph_release_page_vector(pages, page_count);
3589
3590 return ret;
3591}
3592
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003593/*
Alex Elder662518b2013-05-06 09:51:29 -05003594 * Read the complete header for the given rbd device. On successful
3595 * return, the rbd_dev->header field will contain up-to-date
3596 * information about the image.
Alex Elder4156d992012-08-02 11:29:46 -05003597 */
Alex Elder99a41eb2013-05-06 09:51:30 -05003598static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
Alex Elder4156d992012-08-02 11:29:46 -05003599{
3600 struct rbd_image_header_ondisk *ondisk = NULL;
3601 u32 snap_count = 0;
3602 u64 names_size = 0;
3603 u32 want_count;
3604 int ret;
3605
3606 /*
3607 * The complete header will include an array of its 64-bit
3608 * snapshot ids, followed by the names of those snapshots as
3609 * a contiguous block of NUL-terminated strings. Note that
3610 * the number of snapshots could change by the time we read
3611 * it in, in which case we re-read it.
3612 */
3613 do {
3614 size_t size;
3615
3616 kfree(ondisk);
3617
3618 size = sizeof (*ondisk);
3619 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3620 size += names_size;
3621 ondisk = kmalloc(size, GFP_KERNEL);
3622 if (!ondisk)
Alex Elder662518b2013-05-06 09:51:29 -05003623 return -ENOMEM;
Alex Elder4156d992012-08-02 11:29:46 -05003624
Alex Elder788e2df2013-01-17 12:25:27 -06003625 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
Alex Elder7097f8d2013-04-30 00:44:33 -05003626 0, size, ondisk);
Alex Elder4156d992012-08-02 11:29:46 -05003627 if (ret < 0)
Alex Elder662518b2013-05-06 09:51:29 -05003628 goto out;
Alex Elderc0cd10db2013-04-26 09:43:47 -05003629 if ((size_t)ret < size) {
Alex Elder4156d992012-08-02 11:29:46 -05003630 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003631 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3632 size, ret);
Alex Elder662518b2013-05-06 09:51:29 -05003633 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05003634 }
3635 if (!rbd_dev_ondisk_valid(ondisk)) {
3636 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003637 rbd_warn(rbd_dev, "invalid header");
Alex Elder662518b2013-05-06 09:51:29 -05003638 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05003639 }
3640
3641 names_size = le64_to_cpu(ondisk->snap_names_len);
3642 want_count = snap_count;
3643 snap_count = le32_to_cpu(ondisk->snap_count);
3644 } while (snap_count != want_count);
3645
Alex Elder662518b2013-05-06 09:51:29 -05003646 ret = rbd_header_from_disk(rbd_dev, ondisk);
3647out:
Alex Elder4156d992012-08-02 11:29:46 -05003648 kfree(ondisk);
3649
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003650 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003651}
3652
Alex Elder15228ed2013-05-01 12:43:03 -05003653/*
3654 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3655 * has disappeared from the (just updated) snapshot context.
3656 */
3657static void rbd_exists_validate(struct rbd_device *rbd_dev)
3658{
3659 u64 snap_id;
3660
3661 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3662 return;
3663
3664 snap_id = rbd_dev->spec->snap_id;
3665 if (snap_id == CEPH_NOSNAP)
3666 return;
3667
3668 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3669 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3670}
3671
Josh Durgin98752012013-08-29 17:26:31 -07003672static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3673{
3674 sector_t size;
3675 bool removing;
3676
3677 /*
3678 * Don't hold the lock while doing disk operations,
3679 * or lock ordering will conflict with the bdev mutex via:
3680 * rbd_add() -> blkdev_get() -> rbd_open()
3681 */
3682 spin_lock_irq(&rbd_dev->lock);
3683 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3684 spin_unlock_irq(&rbd_dev->lock);
3685 /*
3686 * If the device is being removed, rbd_dev->disk has
3687 * been destroyed, so don't try to update its size
3688 */
3689 if (!removing) {
3690 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3691 dout("setting size to %llu sectors", (unsigned long long)size);
3692 set_capacity(rbd_dev->disk, size);
3693 revalidate_disk(rbd_dev->disk);
3694 }
3695}
3696
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003697static int rbd_dev_refresh(struct rbd_device *rbd_dev)
Alex Elder1fe5e992012-07-25 09:32:41 -05003698{
Alex Eldere627db02013-05-06 07:40:30 -05003699 u64 mapping_size;
Alex Elder1fe5e992012-07-25 09:32:41 -05003700 int ret;
3701
Alex Eldercfbf6372013-05-31 17:40:45 -05003702 down_write(&rbd_dev->header_rwsem);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05003703 mapping_size = rbd_dev->mapping.size;
Ilya Dryomova720ae02014-07-23 17:11:19 +04003704
3705 ret = rbd_dev_header_info(rbd_dev);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003706 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003707 goto out;
Alex Elder15228ed2013-05-01 12:43:03 -05003708
Ilya Dryomove8f59b52014-07-24 10:42:13 +04003709 /*
3710 * If there is a parent, see if it has disappeared due to the
3711 * mapped image getting flattened.
3712 */
3713 if (rbd_dev->parent) {
3714 ret = rbd_dev_v2_parent_info(rbd_dev);
3715 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003716 goto out;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04003717 }
3718
Ilya Dryomov5ff11082014-07-23 17:11:21 +04003719 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003720 rbd_dev->mapping.size = rbd_dev->header.image_size;
Ilya Dryomov5ff11082014-07-23 17:11:21 +04003721 } else {
3722 /* validate mapped snapshot's EXISTS flag */
3723 rbd_exists_validate(rbd_dev);
3724 }
Alex Elder15228ed2013-05-01 12:43:03 -05003725
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003726out:
Alex Eldercfbf6372013-05-31 17:40:45 -05003727 up_write(&rbd_dev->header_rwsem);
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003728 if (!ret && mapping_size != rbd_dev->mapping.size)
Josh Durgin98752012013-08-29 17:26:31 -07003729 rbd_dev_update_size(rbd_dev);
Alex Elder1fe5e992012-07-25 09:32:41 -05003730
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003731 return ret;
Alex Elder1fe5e992012-07-25 09:32:41 -05003732}
3733
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003734static int rbd_init_request(void *data, struct request *rq,
3735 unsigned int hctx_idx, unsigned int request_idx,
3736 unsigned int numa_node)
3737{
3738 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3739
3740 INIT_WORK(work, rbd_queue_workfn);
3741 return 0;
3742}
3743
3744static struct blk_mq_ops rbd_mq_ops = {
3745 .queue_rq = rbd_queue_rq,
3746 .map_queue = blk_mq_map_queue,
3747 .init_request = rbd_init_request,
3748};
3749
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003750static int rbd_init_disk(struct rbd_device *rbd_dev)
3751{
3752 struct gendisk *disk;
3753 struct request_queue *q;
Alex Elder593a9e72012-02-07 12:03:37 -06003754 u64 segment_size;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003755 int err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003756
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003757 /* create gendisk info */
Ilya Dryomov7e513d42013-12-16 19:26:32 +02003758 disk = alloc_disk(single_major ?
3759 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3760 RBD_MINORS_PER_MAJOR);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003761 if (!disk)
Alex Elder1fcdb8a2012-08-29 17:11:06 -05003762 return -ENOMEM;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003763
Alex Elderf0f8cef2012-01-29 13:57:44 -06003764 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
Alex Elderde71a292012-07-03 16:01:19 -05003765 rbd_dev->dev_id);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003766 disk->major = rbd_dev->major;
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003767 disk->first_minor = rbd_dev->minor;
Ilya Dryomov7e513d42013-12-16 19:26:32 +02003768 if (single_major)
3769 disk->flags |= GENHD_FL_EXT_DEVT;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003770 disk->fops = &rbd_bd_ops;
3771 disk->private_data = rbd_dev;
3772
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003773 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3774 rbd_dev->tag_set.ops = &rbd_mq_ops;
Ilya Dryomovb5584182015-06-23 16:21:19 +03003775 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003776 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
Ilya Dryomovb5584182015-06-23 16:21:19 +03003777 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003778 rbd_dev->tag_set.nr_hw_queues = 1;
3779 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3780
3781 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3782 if (err)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003783 goto out_disk;
Josh Durgin029bcbd2011-07-22 11:35:23 -07003784
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003785 q = blk_mq_init_queue(&rbd_dev->tag_set);
3786 if (IS_ERR(q)) {
3787 err = PTR_ERR(q);
3788 goto out_tag_set;
3789 }
3790
Ilya Dryomovd8a2c892015-03-24 16:15:17 +03003791 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
3792 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
Alex Elder593a9e72012-02-07 12:03:37 -06003793
Josh Durgin029bcbd2011-07-22 11:35:23 -07003794 /* set io sizes to object size */
Alex Elder593a9e72012-02-07 12:03:37 -06003795 segment_size = rbd_obj_bytes(&rbd_dev->header);
3796 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
Ilya Dryomovd3834fe2015-06-12 19:19:02 +03003797 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
Alex Elder593a9e72012-02-07 12:03:37 -06003798 blk_queue_max_segment_size(q, segment_size);
3799 blk_queue_io_min(q, segment_size);
3800 blk_queue_io_opt(q, segment_size);
Josh Durgin029bcbd2011-07-22 11:35:23 -07003801
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003802 /* enable the discard support */
3803 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3804 q->limits.discard_granularity = segment_size;
3805 q->limits.discard_alignment = segment_size;
Josh Durginb76f8232014-04-07 16:52:03 -07003806 q->limits.max_discard_sectors = segment_size / SECTOR_SIZE;
3807 q->limits.discard_zeroes_data = 1;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003808
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003809 blk_queue_merge_bvec(q, rbd_merge_bvec);
3810 disk->queue = q;
3811
3812 q->queuedata = rbd_dev;
3813
3814 rbd_dev->disk = disk;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003815
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003816 return 0;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003817out_tag_set:
3818 blk_mq_free_tag_set(&rbd_dev->tag_set);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003819out_disk:
3820 put_disk(disk);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003821 return err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003822}
3823
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003824/*
3825 sysfs
3826*/
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003827
Alex Elder593a9e72012-02-07 12:03:37 -06003828static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3829{
3830 return container_of(dev, struct rbd_device, dev);
3831}
3832
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003833static ssize_t rbd_size_show(struct device *dev,
3834 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003835{
Alex Elder593a9e72012-02-07 12:03:37 -06003836 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003837
Alex Elderfc71d832013-04-26 15:44:36 -05003838 return sprintf(buf, "%llu\n",
3839 (unsigned long long)rbd_dev->mapping.size);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003840}
3841
Alex Elder34b13182012-07-13 20:35:12 -05003842/*
3843 * Note this shows the features for whatever's mapped, which is not
3844 * necessarily the base image.
3845 */
3846static ssize_t rbd_features_show(struct device *dev,
3847 struct device_attribute *attr, char *buf)
3848{
3849 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3850
3851 return sprintf(buf, "0x%016llx\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003852 (unsigned long long)rbd_dev->mapping.features);
Alex Elder34b13182012-07-13 20:35:12 -05003853}
3854
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003855static ssize_t rbd_major_show(struct device *dev,
3856 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003857{
Alex Elder593a9e72012-02-07 12:03:37 -06003858 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003859
Alex Elderfc71d832013-04-26 15:44:36 -05003860 if (rbd_dev->major)
3861 return sprintf(buf, "%d\n", rbd_dev->major);
3862
3863 return sprintf(buf, "(none)\n");
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003864}
Alex Elderfc71d832013-04-26 15:44:36 -05003865
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003866static ssize_t rbd_minor_show(struct device *dev,
3867 struct device_attribute *attr, char *buf)
3868{
3869 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3870
3871 return sprintf(buf, "%d\n", rbd_dev->minor);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003872}
3873
3874static ssize_t rbd_client_id_show(struct device *dev,
3875 struct device_attribute *attr, char *buf)
3876{
Alex Elder593a9e72012-02-07 12:03:37 -06003877 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003878
Alex Elder1dbb4392012-01-24 10:08:37 -06003879 return sprintf(buf, "client%lld\n",
3880 ceph_client_id(rbd_dev->rbd_client->client));
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003881}
3882
3883static ssize_t rbd_pool_show(struct device *dev,
3884 struct device_attribute *attr, char *buf)
3885{
Alex Elder593a9e72012-02-07 12:03:37 -06003886 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003887
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003888 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003889}
3890
Alex Elder9bb2f332012-07-12 10:46:35 -05003891static ssize_t rbd_pool_id_show(struct device *dev,
3892 struct device_attribute *attr, char *buf)
3893{
3894 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3895
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003896 return sprintf(buf, "%llu\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003897 (unsigned long long) rbd_dev->spec->pool_id);
Alex Elder9bb2f332012-07-12 10:46:35 -05003898}
3899
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003900static ssize_t rbd_name_show(struct device *dev,
3901 struct device_attribute *attr, char *buf)
3902{
Alex Elder593a9e72012-02-07 12:03:37 -06003903 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003904
Alex Eldera92ffdf2012-10-30 19:40:33 -05003905 if (rbd_dev->spec->image_name)
3906 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3907
3908 return sprintf(buf, "(unknown)\n");
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003909}
3910
Alex Elder589d30e2012-07-10 20:30:11 -05003911static ssize_t rbd_image_id_show(struct device *dev,
3912 struct device_attribute *attr, char *buf)
3913{
3914 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3915
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003916 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05003917}
3918
Alex Elder34b13182012-07-13 20:35:12 -05003919/*
3920 * Shows the name of the currently-mapped snapshot (or
3921 * RBD_SNAP_HEAD_NAME for the base image).
3922 */
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003923static ssize_t rbd_snap_show(struct device *dev,
3924 struct device_attribute *attr,
3925 char *buf)
3926{
Alex Elder593a9e72012-02-07 12:03:37 -06003927 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003928
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003929 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003930}
3931
Alex Elder86b00e02012-10-25 23:34:42 -05003932/*
Ilya Dryomovff961282014-07-22 21:53:07 +04003933 * For a v2 image, shows the chain of parent images, separated by empty
3934 * lines. For v1 images or if there is no parent, shows "(no parent
3935 * image)".
Alex Elder86b00e02012-10-25 23:34:42 -05003936 */
3937static ssize_t rbd_parent_show(struct device *dev,
Ilya Dryomovff961282014-07-22 21:53:07 +04003938 struct device_attribute *attr,
3939 char *buf)
Alex Elder86b00e02012-10-25 23:34:42 -05003940{
3941 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Ilya Dryomovff961282014-07-22 21:53:07 +04003942 ssize_t count = 0;
Alex Elder86b00e02012-10-25 23:34:42 -05003943
Ilya Dryomovff961282014-07-22 21:53:07 +04003944 if (!rbd_dev->parent)
Alex Elder86b00e02012-10-25 23:34:42 -05003945 return sprintf(buf, "(no parent image)\n");
3946
Ilya Dryomovff961282014-07-22 21:53:07 +04003947 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3948 struct rbd_spec *spec = rbd_dev->parent_spec;
Alex Elder86b00e02012-10-25 23:34:42 -05003949
Ilya Dryomovff961282014-07-22 21:53:07 +04003950 count += sprintf(&buf[count], "%s"
3951 "pool_id %llu\npool_name %s\n"
3952 "image_id %s\nimage_name %s\n"
3953 "snap_id %llu\nsnap_name %s\n"
3954 "overlap %llu\n",
3955 !count ? "" : "\n", /* first? */
3956 spec->pool_id, spec->pool_name,
3957 spec->image_id, spec->image_name ?: "(unknown)",
3958 spec->snap_id, spec->snap_name,
3959 rbd_dev->parent_overlap);
3960 }
Alex Elder86b00e02012-10-25 23:34:42 -05003961
Ilya Dryomovff961282014-07-22 21:53:07 +04003962 return count;
Alex Elder86b00e02012-10-25 23:34:42 -05003963}
3964
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003965static ssize_t rbd_image_refresh(struct device *dev,
3966 struct device_attribute *attr,
3967 const char *buf,
3968 size_t size)
3969{
Alex Elder593a9e72012-02-07 12:03:37 -06003970 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Alex Elderb8136232012-07-25 09:32:41 -05003971 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003972
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003973 ret = rbd_dev_refresh(rbd_dev);
Alex Eldere627db02013-05-06 07:40:30 -05003974 if (ret)
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003975 return ret;
Alex Elderb8136232012-07-25 09:32:41 -05003976
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003977 return size;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003978}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003979
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003980static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
Alex Elder34b13182012-07-13 20:35:12 -05003981static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003982static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003983static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003984static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3985static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
Alex Elder9bb2f332012-07-12 10:46:35 -05003986static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003987static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
Alex Elder589d30e2012-07-10 20:30:11 -05003988static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003989static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3990static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
Alex Elder86b00e02012-10-25 23:34:42 -05003991static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003992
3993static struct attribute *rbd_attrs[] = {
3994 &dev_attr_size.attr,
Alex Elder34b13182012-07-13 20:35:12 -05003995 &dev_attr_features.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003996 &dev_attr_major.attr,
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003997 &dev_attr_minor.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003998 &dev_attr_client_id.attr,
3999 &dev_attr_pool.attr,
Alex Elder9bb2f332012-07-12 10:46:35 -05004000 &dev_attr_pool_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004001 &dev_attr_name.attr,
Alex Elder589d30e2012-07-10 20:30:11 -05004002 &dev_attr_image_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004003 &dev_attr_current_snap.attr,
Alex Elder86b00e02012-10-25 23:34:42 -05004004 &dev_attr_parent.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004005 &dev_attr_refresh.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004006 NULL
4007};
4008
4009static struct attribute_group rbd_attr_group = {
4010 .attrs = rbd_attrs,
4011};
4012
4013static const struct attribute_group *rbd_attr_groups[] = {
4014 &rbd_attr_group,
4015 NULL
4016};
4017
4018static void rbd_sysfs_dev_release(struct device *dev)
4019{
4020}
4021
4022static struct device_type rbd_device_type = {
4023 .name = "rbd",
4024 .groups = rbd_attr_groups,
4025 .release = rbd_sysfs_dev_release,
4026};
4027
Alex Elder8b8fb992012-10-26 17:25:24 -05004028static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4029{
4030 kref_get(&spec->kref);
4031
4032 return spec;
4033}
4034
4035static void rbd_spec_free(struct kref *kref);
4036static void rbd_spec_put(struct rbd_spec *spec)
4037{
4038 if (spec)
4039 kref_put(&spec->kref, rbd_spec_free);
4040}
4041
4042static struct rbd_spec *rbd_spec_alloc(void)
4043{
4044 struct rbd_spec *spec;
4045
4046 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4047 if (!spec)
4048 return NULL;
Ilya Dryomov04077592014-07-23 17:11:20 +04004049
4050 spec->pool_id = CEPH_NOPOOL;
4051 spec->snap_id = CEPH_NOSNAP;
Alex Elder8b8fb992012-10-26 17:25:24 -05004052 kref_init(&spec->kref);
4053
Alex Elder8b8fb992012-10-26 17:25:24 -05004054 return spec;
4055}
4056
4057static void rbd_spec_free(struct kref *kref)
4058{
4059 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4060
4061 kfree(spec->pool_name);
4062 kfree(spec->image_id);
4063 kfree(spec->image_name);
4064 kfree(spec->snap_name);
4065 kfree(spec);
4066}
4067
Alex Eldercc344fa2013-02-19 12:25:56 -06004068static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
Ilya Dryomovd1475432015-06-22 13:24:48 +03004069 struct rbd_spec *spec,
4070 struct rbd_options *opts)
Alex Elderc53d5892012-10-25 23:34:42 -05004071{
4072 struct rbd_device *rbd_dev;
4073
4074 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
4075 if (!rbd_dev)
4076 return NULL;
4077
4078 spin_lock_init(&rbd_dev->lock);
Alex Elder6d292902013-01-14 12:43:31 -06004079 rbd_dev->flags = 0;
Alex Eldera2acd002013-05-08 22:50:04 -05004080 atomic_set(&rbd_dev->parent_ref, 0);
Alex Elderc53d5892012-10-25 23:34:42 -05004081 INIT_LIST_HEAD(&rbd_dev->node);
Alex Elderc53d5892012-10-25 23:34:42 -05004082 init_rwsem(&rbd_dev->header_rwsem);
4083
Alex Elderc53d5892012-10-25 23:34:42 -05004084 rbd_dev->rbd_client = rbdc;
Ilya Dryomovd1475432015-06-22 13:24:48 +03004085 rbd_dev->spec = spec;
4086 rbd_dev->opts = opts;
Alex Elderc53d5892012-10-25 23:34:42 -05004087
Alex Elder0903e872012-11-14 12:25:19 -06004088 /* Initialize the layout used for all rbd requests */
4089
4090 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4091 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
4092 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4093 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
4094
Alex Elderc53d5892012-10-25 23:34:42 -05004095 return rbd_dev;
4096}
4097
4098static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4099{
Alex Elderc53d5892012-10-25 23:34:42 -05004100 rbd_put_client(rbd_dev->rbd_client);
4101 rbd_spec_put(rbd_dev->spec);
Ilya Dryomovd1475432015-06-22 13:24:48 +03004102 kfree(rbd_dev->opts);
Alex Elderc53d5892012-10-25 23:34:42 -05004103 kfree(rbd_dev);
4104}
4105
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004106/*
Alex Elder9d475de2012-07-03 16:01:19 -05004107 * Get the size and object order for an image snapshot, or if
4108 * snap_id is CEPH_NOSNAP, gets this information for the base
4109 * image.
4110 */
4111static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4112 u8 *order, u64 *snap_size)
4113{
4114 __le64 snapid = cpu_to_le64(snap_id);
4115 int ret;
4116 struct {
4117 u8 order;
4118 __le64 size;
4119 } __attribute__ ((packed)) size_buf = { 0 };
4120
Alex Elder36be9a72013-01-19 00:30:28 -06004121 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder9d475de2012-07-03 16:01:19 -05004122 "rbd", "get_size",
Alex Elder41579762013-04-21 12:14:45 -05004123 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004124 &size_buf, sizeof (size_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004125 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder9d475de2012-07-03 16:01:19 -05004126 if (ret < 0)
4127 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004128 if (ret < sizeof (size_buf))
4129 return -ERANGE;
Alex Elder9d475de2012-07-03 16:01:19 -05004130
Josh Durginc3545572013-08-28 17:08:10 -07004131 if (order) {
Alex Elderc86f86e2013-04-25 15:09:41 -05004132 *order = size_buf.order;
Josh Durginc3545572013-08-28 17:08:10 -07004133 dout(" order %u", (unsigned int)*order);
4134 }
Alex Elder9d475de2012-07-03 16:01:19 -05004135 *snap_size = le64_to_cpu(size_buf.size);
4136
Josh Durginc3545572013-08-28 17:08:10 -07004137 dout(" snap_id 0x%016llx snap_size = %llu\n",
4138 (unsigned long long)snap_id,
Alex Elder57385b52013-04-21 12:14:45 -05004139 (unsigned long long)*snap_size);
Alex Elder9d475de2012-07-03 16:01:19 -05004140
4141 return 0;
4142}
4143
4144static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4145{
4146 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4147 &rbd_dev->header.obj_order,
4148 &rbd_dev->header.image_size);
4149}
4150
Alex Elder1e130192012-07-03 16:01:19 -05004151static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4152{
4153 void *reply_buf;
4154 int ret;
4155 void *p;
4156
4157 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4158 if (!reply_buf)
4159 return -ENOMEM;
4160
Alex Elder36be9a72013-01-19 00:30:28 -06004161 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder41579762013-04-21 12:14:45 -05004162 "rbd", "get_object_prefix", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004163 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06004164 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder1e130192012-07-03 16:01:19 -05004165 if (ret < 0)
4166 goto out;
4167
4168 p = reply_buf;
4169 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
Alex Elder57385b52013-04-21 12:14:45 -05004170 p + ret, NULL, GFP_NOIO);
4171 ret = 0;
Alex Elder1e130192012-07-03 16:01:19 -05004172
4173 if (IS_ERR(rbd_dev->header.object_prefix)) {
4174 ret = PTR_ERR(rbd_dev->header.object_prefix);
4175 rbd_dev->header.object_prefix = NULL;
4176 } else {
4177 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4178 }
Alex Elder1e130192012-07-03 16:01:19 -05004179out:
4180 kfree(reply_buf);
4181
4182 return ret;
4183}
4184
Alex Elderb1b54022012-07-03 16:01:19 -05004185static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4186 u64 *snap_features)
4187{
4188 __le64 snapid = cpu_to_le64(snap_id);
4189 struct {
4190 __le64 features;
4191 __le64 incompat;
Alex Elder41579762013-04-21 12:14:45 -05004192 } __attribute__ ((packed)) features_buf = { 0 };
Alex Elderd8891402012-10-09 13:50:17 -07004193 u64 incompat;
Alex Elderb1b54022012-07-03 16:01:19 -05004194 int ret;
4195
Alex Elder36be9a72013-01-19 00:30:28 -06004196 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elderb1b54022012-07-03 16:01:19 -05004197 "rbd", "get_features",
Alex Elder41579762013-04-21 12:14:45 -05004198 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004199 &features_buf, sizeof (features_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004200 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderb1b54022012-07-03 16:01:19 -05004201 if (ret < 0)
4202 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004203 if (ret < sizeof (features_buf))
4204 return -ERANGE;
Alex Elderd8891402012-10-09 13:50:17 -07004205
4206 incompat = le64_to_cpu(features_buf.incompat);
Alex Elder5cbf6f122013-04-11 09:29:48 -05004207 if (incompat & ~RBD_FEATURES_SUPPORTED)
Alex Elderb8f5c6e2012-11-01 08:39:26 -05004208 return -ENXIO;
Alex Elderd8891402012-10-09 13:50:17 -07004209
Alex Elderb1b54022012-07-03 16:01:19 -05004210 *snap_features = le64_to_cpu(features_buf.features);
4211
4212 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
Alex Elder57385b52013-04-21 12:14:45 -05004213 (unsigned long long)snap_id,
4214 (unsigned long long)*snap_features,
4215 (unsigned long long)le64_to_cpu(features_buf.incompat));
Alex Elderb1b54022012-07-03 16:01:19 -05004216
4217 return 0;
4218}
4219
4220static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4221{
4222 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4223 &rbd_dev->header.features);
4224}
4225
Alex Elder86b00e02012-10-25 23:34:42 -05004226static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4227{
4228 struct rbd_spec *parent_spec;
4229 size_t size;
4230 void *reply_buf = NULL;
4231 __le64 snapid;
4232 void *p;
4233 void *end;
Alex Elder642a2532013-05-06 17:40:33 -05004234 u64 pool_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004235 char *image_id;
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004236 u64 snap_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004237 u64 overlap;
Alex Elder86b00e02012-10-25 23:34:42 -05004238 int ret;
4239
4240 parent_spec = rbd_spec_alloc();
4241 if (!parent_spec)
4242 return -ENOMEM;
4243
4244 size = sizeof (__le64) + /* pool_id */
4245 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4246 sizeof (__le64) + /* snap_id */
4247 sizeof (__le64); /* overlap */
4248 reply_buf = kmalloc(size, GFP_KERNEL);
4249 if (!reply_buf) {
4250 ret = -ENOMEM;
4251 goto out_err;
4252 }
4253
Ilya Dryomov4d9b67c2014-07-24 10:42:13 +04004254 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
Alex Elder36be9a72013-01-19 00:30:28 -06004255 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder86b00e02012-10-25 23:34:42 -05004256 "rbd", "get_parent",
Alex Elder41579762013-04-21 12:14:45 -05004257 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004258 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004259 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder86b00e02012-10-25 23:34:42 -05004260 if (ret < 0)
4261 goto out_err;
4262
Alex Elder86b00e02012-10-25 23:34:42 -05004263 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05004264 end = reply_buf + ret;
4265 ret = -ERANGE;
Alex Elder642a2532013-05-06 17:40:33 -05004266 ceph_decode_64_safe(&p, end, pool_id, out_err);
Alex Elder392a9da2013-05-06 17:40:33 -05004267 if (pool_id == CEPH_NOPOOL) {
4268 /*
4269 * Either the parent never existed, or we have
4270 * record of it but the image got flattened so it no
4271 * longer has a parent. When the parent of a
4272 * layered image disappears we immediately set the
4273 * overlap to 0. The effect of this is that all new
4274 * requests will be treated as if the image had no
4275 * parent.
4276 */
4277 if (rbd_dev->parent_overlap) {
4278 rbd_dev->parent_overlap = 0;
Alex Elder392a9da2013-05-06 17:40:33 -05004279 rbd_dev_parent_put(rbd_dev);
4280 pr_info("%s: clone image has been flattened\n",
4281 rbd_dev->disk->disk_name);
4282 }
4283
Alex Elder86b00e02012-10-25 23:34:42 -05004284 goto out; /* No parent? No problem. */
Alex Elder392a9da2013-05-06 17:40:33 -05004285 }
Alex Elder86b00e02012-10-25 23:34:42 -05004286
Alex Elder0903e872012-11-14 12:25:19 -06004287 /* The ceph file layout needs to fit pool id in 32 bits */
4288
4289 ret = -EIO;
Alex Elder642a2532013-05-06 17:40:33 -05004290 if (pool_id > (u64)U32_MAX) {
Ilya Dryomov9584d502014-07-11 12:11:20 +04004291 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
Alex Elder642a2532013-05-06 17:40:33 -05004292 (unsigned long long)pool_id, U32_MAX);
Alex Elder57385b52013-04-21 12:14:45 -05004293 goto out_err;
Alex Elderc0cd10db2013-04-26 09:43:47 -05004294 }
Alex Elder0903e872012-11-14 12:25:19 -06004295
Alex Elder979ed482012-11-01 08:39:26 -05004296 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elder86b00e02012-10-25 23:34:42 -05004297 if (IS_ERR(image_id)) {
4298 ret = PTR_ERR(image_id);
4299 goto out_err;
4300 }
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004301 ceph_decode_64_safe(&p, end, snap_id, out_err);
Alex Elder86b00e02012-10-25 23:34:42 -05004302 ceph_decode_64_safe(&p, end, overlap, out_err);
4303
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004304 /*
4305 * The parent won't change (except when the clone is
4306 * flattened, already handled that). So we only need to
4307 * record the parent spec we have not already done so.
4308 */
4309 if (!rbd_dev->parent_spec) {
4310 parent_spec->pool_id = pool_id;
4311 parent_spec->image_id = image_id;
4312 parent_spec->snap_id = snap_id;
Alex Elder70cf49c2013-05-06 17:40:33 -05004313 rbd_dev->parent_spec = parent_spec;
4314 parent_spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovfbba11b2014-06-27 21:46:33 +04004315 } else {
4316 kfree(image_id);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004317 }
4318
4319 /*
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004320 * We always update the parent overlap. If it's zero we issue
4321 * a warning, as we will proceed as if there was no parent.
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004322 */
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004323 if (!overlap) {
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004324 if (parent_spec) {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004325 /* refresh, careful to warn just once */
4326 if (rbd_dev->parent_overlap)
4327 rbd_warn(rbd_dev,
4328 "clone now standalone (overlap became 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004329 } else {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004330 /* initial probe */
4331 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004332 }
Alex Elder70cf49c2013-05-06 17:40:33 -05004333 }
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004334 rbd_dev->parent_overlap = overlap;
4335
Alex Elder86b00e02012-10-25 23:34:42 -05004336out:
4337 ret = 0;
4338out_err:
4339 kfree(reply_buf);
4340 rbd_spec_put(parent_spec);
4341
4342 return ret;
4343}
4344
Alex Eldercc070d52013-04-21 12:14:45 -05004345static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4346{
4347 struct {
4348 __le64 stripe_unit;
4349 __le64 stripe_count;
4350 } __attribute__ ((packed)) striping_info_buf = { 0 };
4351 size_t size = sizeof (striping_info_buf);
4352 void *p;
4353 u64 obj_size;
4354 u64 stripe_unit;
4355 u64 stripe_count;
4356 int ret;
4357
4358 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4359 "rbd", "get_stripe_unit_count", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004360 (char *)&striping_info_buf, size);
Alex Eldercc070d52013-04-21 12:14:45 -05004361 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4362 if (ret < 0)
4363 return ret;
4364 if (ret < size)
4365 return -ERANGE;
4366
4367 /*
4368 * We don't actually support the "fancy striping" feature
4369 * (STRIPINGV2) yet, but if the striping sizes are the
4370 * defaults the behavior is the same as before. So find
4371 * out, and only fail if the image has non-default values.
4372 */
4373 ret = -EINVAL;
4374 obj_size = (u64)1 << rbd_dev->header.obj_order;
4375 p = &striping_info_buf;
4376 stripe_unit = ceph_decode_64(&p);
4377 if (stripe_unit != obj_size) {
4378 rbd_warn(rbd_dev, "unsupported stripe unit "
4379 "(got %llu want %llu)",
4380 stripe_unit, obj_size);
4381 return -EINVAL;
4382 }
4383 stripe_count = ceph_decode_64(&p);
4384 if (stripe_count != 1) {
4385 rbd_warn(rbd_dev, "unsupported stripe count "
4386 "(got %llu want 1)", stripe_count);
4387 return -EINVAL;
4388 }
Alex Elder500d0c02013-04-26 09:43:47 -05004389 rbd_dev->header.stripe_unit = stripe_unit;
4390 rbd_dev->header.stripe_count = stripe_count;
Alex Eldercc070d52013-04-21 12:14:45 -05004391
4392 return 0;
4393}
4394
Alex Elder9e15b772012-10-30 19:40:33 -05004395static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4396{
4397 size_t image_id_size;
4398 char *image_id;
4399 void *p;
4400 void *end;
4401 size_t size;
4402 void *reply_buf = NULL;
4403 size_t len = 0;
4404 char *image_name = NULL;
4405 int ret;
4406
4407 rbd_assert(!rbd_dev->spec->image_name);
4408
Alex Elder69e7a022012-11-01 08:39:26 -05004409 len = strlen(rbd_dev->spec->image_id);
4410 image_id_size = sizeof (__le32) + len;
Alex Elder9e15b772012-10-30 19:40:33 -05004411 image_id = kmalloc(image_id_size, GFP_KERNEL);
4412 if (!image_id)
4413 return NULL;
4414
4415 p = image_id;
Alex Elder41579762013-04-21 12:14:45 -05004416 end = image_id + image_id_size;
Alex Elder57385b52013-04-21 12:14:45 -05004417 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
Alex Elder9e15b772012-10-30 19:40:33 -05004418
4419 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4420 reply_buf = kmalloc(size, GFP_KERNEL);
4421 if (!reply_buf)
4422 goto out;
4423
Alex Elder36be9a72013-01-19 00:30:28 -06004424 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
Alex Elder9e15b772012-10-30 19:40:33 -05004425 "rbd", "dir_get_name",
4426 image_id, image_id_size,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004427 reply_buf, size);
Alex Elder9e15b772012-10-30 19:40:33 -05004428 if (ret < 0)
4429 goto out;
4430 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05004431 end = reply_buf + ret;
4432
Alex Elder9e15b772012-10-30 19:40:33 -05004433 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4434 if (IS_ERR(image_name))
4435 image_name = NULL;
4436 else
4437 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4438out:
4439 kfree(reply_buf);
4440 kfree(image_id);
4441
4442 return image_name;
4443}
4444
Alex Elder2ad3d712013-04-30 00:44:33 -05004445static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4446{
4447 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4448 const char *snap_name;
4449 u32 which = 0;
4450
4451 /* Skip over names until we find the one we are looking for */
4452
4453 snap_name = rbd_dev->header.snap_names;
4454 while (which < snapc->num_snaps) {
4455 if (!strcmp(name, snap_name))
4456 return snapc->snaps[which];
4457 snap_name += strlen(snap_name) + 1;
4458 which++;
4459 }
4460 return CEPH_NOSNAP;
4461}
4462
4463static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4464{
4465 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4466 u32 which;
4467 bool found = false;
4468 u64 snap_id;
4469
4470 for (which = 0; !found && which < snapc->num_snaps; which++) {
4471 const char *snap_name;
4472
4473 snap_id = snapc->snaps[which];
4474 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
Josh Durginefadc982013-08-29 19:16:42 -07004475 if (IS_ERR(snap_name)) {
4476 /* ignore no-longer existing snapshots */
4477 if (PTR_ERR(snap_name) == -ENOENT)
4478 continue;
4479 else
4480 break;
4481 }
Alex Elder2ad3d712013-04-30 00:44:33 -05004482 found = !strcmp(name, snap_name);
4483 kfree(snap_name);
4484 }
4485 return found ? snap_id : CEPH_NOSNAP;
4486}
4487
4488/*
4489 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4490 * no snapshot by that name is found, or if an error occurs.
4491 */
4492static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4493{
4494 if (rbd_dev->image_format == 1)
4495 return rbd_v1_snap_id_by_name(rbd_dev, name);
4496
4497 return rbd_v2_snap_id_by_name(rbd_dev, name);
4498}
4499
Alex Elder9e15b772012-10-30 19:40:33 -05004500/*
Ilya Dryomov04077592014-07-23 17:11:20 +04004501 * An image being mapped will have everything but the snap id.
Alex Elder9e15b772012-10-30 19:40:33 -05004502 */
Ilya Dryomov04077592014-07-23 17:11:20 +04004503static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4504{
4505 struct rbd_spec *spec = rbd_dev->spec;
4506
4507 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4508 rbd_assert(spec->image_id && spec->image_name);
4509 rbd_assert(spec->snap_name);
4510
4511 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4512 u64 snap_id;
4513
4514 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4515 if (snap_id == CEPH_NOSNAP)
4516 return -ENOENT;
4517
4518 spec->snap_id = snap_id;
4519 } else {
4520 spec->snap_id = CEPH_NOSNAP;
4521 }
4522
4523 return 0;
4524}
4525
4526/*
4527 * A parent image will have all ids but none of the names.
4528 *
4529 * All names in an rbd spec are dynamically allocated. It's OK if we
4530 * can't figure out the name for an image id.
4531 */
4532static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
Alex Elder9e15b772012-10-30 19:40:33 -05004533{
Alex Elder2e9f7f12013-04-26 09:43:48 -05004534 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4535 struct rbd_spec *spec = rbd_dev->spec;
4536 const char *pool_name;
4537 const char *image_name;
4538 const char *snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05004539 int ret;
4540
Ilya Dryomov04077592014-07-23 17:11:20 +04004541 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4542 rbd_assert(spec->image_id);
4543 rbd_assert(spec->snap_id != CEPH_NOSNAP);
Alex Elder9e15b772012-10-30 19:40:33 -05004544
Alex Elder2e9f7f12013-04-26 09:43:48 -05004545 /* Get the pool name; we have to make our own copy of this */
Alex Elder9e15b772012-10-30 19:40:33 -05004546
Alex Elder2e9f7f12013-04-26 09:43:48 -05004547 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4548 if (!pool_name) {
4549 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
Alex Elder935dc892012-11-01 10:17:15 -05004550 return -EIO;
4551 }
Alex Elder2e9f7f12013-04-26 09:43:48 -05004552 pool_name = kstrdup(pool_name, GFP_KERNEL);
4553 if (!pool_name)
Alex Elder9e15b772012-10-30 19:40:33 -05004554 return -ENOMEM;
4555
4556 /* Fetch the image name; tolerate failure here */
4557
Alex Elder2e9f7f12013-04-26 09:43:48 -05004558 image_name = rbd_dev_image_name(rbd_dev);
4559 if (!image_name)
Alex Elder06ecc6c2012-11-01 10:17:15 -05004560 rbd_warn(rbd_dev, "unable to get image name");
Alex Elder9e15b772012-10-30 19:40:33 -05004561
Ilya Dryomov04077592014-07-23 17:11:20 +04004562 /* Fetch the snapshot name */
Alex Elder9e15b772012-10-30 19:40:33 -05004563
Alex Elder2e9f7f12013-04-26 09:43:48 -05004564 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
Josh Durginda6a6b62013-09-04 17:57:31 -07004565 if (IS_ERR(snap_name)) {
4566 ret = PTR_ERR(snap_name);
Alex Elder9e15b772012-10-30 19:40:33 -05004567 goto out_err;
Alex Elder2e9f7f12013-04-26 09:43:48 -05004568 }
4569
4570 spec->pool_name = pool_name;
4571 spec->image_name = image_name;
4572 spec->snap_name = snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05004573
4574 return 0;
Ilya Dryomov04077592014-07-23 17:11:20 +04004575
Alex Elder9e15b772012-10-30 19:40:33 -05004576out_err:
Alex Elder2e9f7f12013-04-26 09:43:48 -05004577 kfree(image_name);
4578 kfree(pool_name);
Alex Elder9e15b772012-10-30 19:40:33 -05004579 return ret;
4580}
4581
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004582static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
Alex Elder35d489f2012-07-03 16:01:19 -05004583{
4584 size_t size;
4585 int ret;
4586 void *reply_buf;
4587 void *p;
4588 void *end;
4589 u64 seq;
4590 u32 snap_count;
4591 struct ceph_snap_context *snapc;
4592 u32 i;
4593
4594 /*
4595 * We'll need room for the seq value (maximum snapshot id),
4596 * snapshot count, and array of that many snapshot ids.
4597 * For now we have a fixed upper limit on the number we're
4598 * prepared to receive.
4599 */
4600 size = sizeof (__le64) + sizeof (__le32) +
4601 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4602 reply_buf = kzalloc(size, GFP_KERNEL);
4603 if (!reply_buf)
4604 return -ENOMEM;
4605
Alex Elder36be9a72013-01-19 00:30:28 -06004606 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder41579762013-04-21 12:14:45 -05004607 "rbd", "get_snapcontext", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004608 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004609 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder35d489f2012-07-03 16:01:19 -05004610 if (ret < 0)
4611 goto out;
4612
Alex Elder35d489f2012-07-03 16:01:19 -05004613 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05004614 end = reply_buf + ret;
4615 ret = -ERANGE;
Alex Elder35d489f2012-07-03 16:01:19 -05004616 ceph_decode_64_safe(&p, end, seq, out);
4617 ceph_decode_32_safe(&p, end, snap_count, out);
4618
4619 /*
4620 * Make sure the reported number of snapshot ids wouldn't go
4621 * beyond the end of our buffer. But before checking that,
4622 * make sure the computed size of the snapshot context we
4623 * allocate is representable in a size_t.
4624 */
4625 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4626 / sizeof (u64)) {
4627 ret = -EINVAL;
4628 goto out;
4629 }
4630 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4631 goto out;
Alex Elder468521c2013-04-26 09:43:47 -05004632 ret = 0;
Alex Elder35d489f2012-07-03 16:01:19 -05004633
Alex Elder812164f82013-04-30 00:44:32 -05004634 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
Alex Elder35d489f2012-07-03 16:01:19 -05004635 if (!snapc) {
4636 ret = -ENOMEM;
4637 goto out;
4638 }
Alex Elder35d489f2012-07-03 16:01:19 -05004639 snapc->seq = seq;
Alex Elder35d489f2012-07-03 16:01:19 -05004640 for (i = 0; i < snap_count; i++)
4641 snapc->snaps[i] = ceph_decode_64(&p);
4642
Alex Elder49ece552013-05-06 08:37:00 -05004643 ceph_put_snap_context(rbd_dev->header.snapc);
Alex Elder35d489f2012-07-03 16:01:19 -05004644 rbd_dev->header.snapc = snapc;
4645
4646 dout(" snap context seq = %llu, snap_count = %u\n",
Alex Elder57385b52013-04-21 12:14:45 -05004647 (unsigned long long)seq, (unsigned int)snap_count);
Alex Elder35d489f2012-07-03 16:01:19 -05004648out:
4649 kfree(reply_buf);
4650
Alex Elder57385b52013-04-21 12:14:45 -05004651 return ret;
Alex Elder35d489f2012-07-03 16:01:19 -05004652}
4653
Alex Elder54cac612013-04-30 00:44:33 -05004654static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4655 u64 snap_id)
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004656{
4657 size_t size;
4658 void *reply_buf;
Alex Elder54cac612013-04-30 00:44:33 -05004659 __le64 snapid;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004660 int ret;
4661 void *p;
4662 void *end;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004663 char *snap_name;
4664
4665 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4666 reply_buf = kmalloc(size, GFP_KERNEL);
4667 if (!reply_buf)
4668 return ERR_PTR(-ENOMEM);
4669
Alex Elder54cac612013-04-30 00:44:33 -05004670 snapid = cpu_to_le64(snap_id);
Alex Elder36be9a72013-01-19 00:30:28 -06004671 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004672 "rbd", "get_snapshot_name",
Alex Elder54cac612013-04-30 00:44:33 -05004673 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004674 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004675 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderf40eb342013-04-25 15:09:42 -05004676 if (ret < 0) {
4677 snap_name = ERR_PTR(ret);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004678 goto out;
Alex Elderf40eb342013-04-25 15:09:42 -05004679 }
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004680
4681 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05004682 end = reply_buf + ret;
Alex Eldere5c35532012-10-25 23:34:41 -05004683 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elderf40eb342013-04-25 15:09:42 -05004684 if (IS_ERR(snap_name))
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004685 goto out;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004686
Alex Elderf40eb342013-04-25 15:09:42 -05004687 dout(" snap_id 0x%016llx snap_name = %s\n",
Alex Elder54cac612013-04-30 00:44:33 -05004688 (unsigned long long)snap_id, snap_name);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004689out:
4690 kfree(reply_buf);
4691
Alex Elderf40eb342013-04-25 15:09:42 -05004692 return snap_name;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004693}
4694
Alex Elder2df3fac2013-05-06 09:51:30 -05004695static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
Alex Elder117973f2012-08-31 17:29:55 -05004696{
Alex Elder2df3fac2013-05-06 09:51:30 -05004697 bool first_time = rbd_dev->header.object_prefix == NULL;
Alex Elder117973f2012-08-31 17:29:55 -05004698 int ret;
Alex Elder117973f2012-08-31 17:29:55 -05004699
Josh Durgin1617e402013-06-12 14:43:10 -07004700 ret = rbd_dev_v2_image_size(rbd_dev);
4701 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05004702 return ret;
Josh Durgin1617e402013-06-12 14:43:10 -07004703
Alex Elder2df3fac2013-05-06 09:51:30 -05004704 if (first_time) {
4705 ret = rbd_dev_v2_header_onetime(rbd_dev);
4706 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05004707 return ret;
Alex Elder2df3fac2013-05-06 09:51:30 -05004708 }
4709
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004710 ret = rbd_dev_v2_snap_context(rbd_dev);
Alex Elder117973f2012-08-31 17:29:55 -05004711 dout("rbd_dev_v2_snap_context returned %d\n", ret);
Alex Elder117973f2012-08-31 17:29:55 -05004712
4713 return ret;
4714}
4715
Ilya Dryomova720ae02014-07-23 17:11:19 +04004716static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4717{
4718 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4719
4720 if (rbd_dev->image_format == 1)
4721 return rbd_dev_v1_header_info(rbd_dev);
4722
4723 return rbd_dev_v2_header_info(rbd_dev);
4724}
4725
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004726static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4727{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004728 struct device *dev;
Alex Eldercd789ab2012-08-30 00:16:38 -05004729 int ret;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004730
Alex Eldercd789ab2012-08-30 00:16:38 -05004731 dev = &rbd_dev->dev;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004732 dev->bus = &rbd_bus_type;
4733 dev->type = &rbd_device_type;
4734 dev->parent = &rbd_root_dev;
Alex Elder200a6a82013-04-28 23:32:34 -05004735 dev->release = rbd_dev_device_release;
Alex Elderde71a292012-07-03 16:01:19 -05004736 dev_set_name(dev, "%d", rbd_dev->dev_id);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004737 ret = device_register(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004738
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004739 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004740}
4741
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004742static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4743{
4744 device_unregister(&rbd_dev->dev);
4745}
4746
Alex Elder1ddbe942012-01-29 13:57:44 -06004747/*
Alex Elder499afd52012-02-02 08:13:29 -06004748 * Get a unique rbd identifier for the given new rbd_dev, and add
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004749 * the rbd_dev to the global list.
Alex Elder1ddbe942012-01-29 13:57:44 -06004750 */
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004751static int rbd_dev_id_get(struct rbd_device *rbd_dev)
Alex Elderb7f23c32012-01-29 13:57:43 -06004752{
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004753 int new_dev_id;
4754
Ilya Dryomov9b60e702013-12-13 15:28:57 +02004755 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4756 0, minor_to_rbd_dev_id(1 << MINORBITS),
4757 GFP_KERNEL);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004758 if (new_dev_id < 0)
4759 return new_dev_id;
4760
4761 rbd_dev->dev_id = new_dev_id;
Alex Elder499afd52012-02-02 08:13:29 -06004762
4763 spin_lock(&rbd_dev_list_lock);
4764 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4765 spin_unlock(&rbd_dev_list_lock);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004766
Ilya Dryomov70eebd22013-12-13 15:28:56 +02004767 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004768
4769 return 0;
Alex Elder1ddbe942012-01-29 13:57:44 -06004770}
Alex Elderb7f23c32012-01-29 13:57:43 -06004771
Alex Elder1ddbe942012-01-29 13:57:44 -06004772/*
Alex Elder499afd52012-02-02 08:13:29 -06004773 * Remove an rbd_dev from the global list, and record that its
4774 * identifier is no longer in use.
Alex Elder1ddbe942012-01-29 13:57:44 -06004775 */
Alex Eldere2839302012-08-29 17:11:06 -05004776static void rbd_dev_id_put(struct rbd_device *rbd_dev)
Alex Elder1ddbe942012-01-29 13:57:44 -06004777{
Alex Elder499afd52012-02-02 08:13:29 -06004778 spin_lock(&rbd_dev_list_lock);
4779 list_del_init(&rbd_dev->node);
4780 spin_unlock(&rbd_dev_list_lock);
Alex Elderb7f23c32012-01-29 13:57:43 -06004781
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004782 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4783
4784 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
Alex Elderb7f23c32012-01-29 13:57:43 -06004785}
4786
Alex Eldera725f65e2012-02-02 08:13:30 -06004787/*
Alex Eldere28fff262012-02-02 08:13:30 -06004788 * Skips over white space at *buf, and updates *buf to point to the
4789 * first found non-space character (if any). Returns the length of
Alex Elder593a9e72012-02-07 12:03:37 -06004790 * the token (string of non-white space characters) found. Note
4791 * that *buf must be terminated with '\0'.
Alex Eldere28fff262012-02-02 08:13:30 -06004792 */
4793static inline size_t next_token(const char **buf)
4794{
4795 /*
4796 * These are the characters that produce nonzero for
4797 * isspace() in the "C" and "POSIX" locales.
4798 */
4799 const char *spaces = " \f\n\r\t\v";
4800
4801 *buf += strspn(*buf, spaces); /* Find start of token */
4802
4803 return strcspn(*buf, spaces); /* Return token length */
4804}
4805
4806/*
Alex Elderea3352f2012-07-09 21:04:23 -05004807 * Finds the next token in *buf, dynamically allocates a buffer big
4808 * enough to hold a copy of it, and copies the token into the new
4809 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4810 * that a duplicate buffer is created even for a zero-length token.
4811 *
4812 * Returns a pointer to the newly-allocated duplicate, or a null
4813 * pointer if memory for the duplicate was not available. If
4814 * the lenp argument is a non-null pointer, the length of the token
4815 * (not including the '\0') is returned in *lenp.
4816 *
4817 * If successful, the *buf pointer will be updated to point beyond
4818 * the end of the found token.
4819 *
4820 * Note: uses GFP_KERNEL for allocation.
4821 */
4822static inline char *dup_token(const char **buf, size_t *lenp)
4823{
4824 char *dup;
4825 size_t len;
4826
4827 len = next_token(buf);
Alex Elder4caf35f2012-11-01 08:39:27 -05004828 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
Alex Elderea3352f2012-07-09 21:04:23 -05004829 if (!dup)
4830 return NULL;
Alex Elderea3352f2012-07-09 21:04:23 -05004831 *(dup + len) = '\0';
4832 *buf += len;
4833
4834 if (lenp)
4835 *lenp = len;
4836
4837 return dup;
4838}
4839
4840/*
Alex Elder859c31d2012-10-25 23:34:42 -05004841 * Parse the options provided for an "rbd add" (i.e., rbd image
4842 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4843 * and the data written is passed here via a NUL-terminated buffer.
4844 * Returns 0 if successful or an error code otherwise.
Alex Elderd22f76e2012-07-12 10:46:35 -05004845 *
Alex Elder859c31d2012-10-25 23:34:42 -05004846 * The information extracted from these options is recorded in
4847 * the other parameters which return dynamically-allocated
4848 * structures:
4849 * ceph_opts
4850 * The address of a pointer that will refer to a ceph options
4851 * structure. Caller must release the returned pointer using
4852 * ceph_destroy_options() when it is no longer needed.
4853 * rbd_opts
4854 * Address of an rbd options pointer. Fully initialized by
4855 * this function; caller must release with kfree().
4856 * spec
4857 * Address of an rbd image specification pointer. Fully
4858 * initialized by this function based on parsed options.
4859 * Caller must release with rbd_spec_put().
4860 *
4861 * The options passed take this form:
4862 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4863 * where:
4864 * <mon_addrs>
4865 * A comma-separated list of one or more monitor addresses.
4866 * A monitor address is an ip address, optionally followed
4867 * by a port number (separated by a colon).
4868 * I.e.: ip1[:port1][,ip2[:port2]...]
4869 * <options>
4870 * A comma-separated list of ceph and/or rbd options.
4871 * <pool_name>
4872 * The name of the rados pool containing the rbd image.
4873 * <image_name>
4874 * The name of the image in that pool to map.
4875 * <snap_id>
4876 * An optional snapshot id. If provided, the mapping will
4877 * present data from the image at the time that snapshot was
4878 * created. The image head is used if no snapshot id is
4879 * provided. Snapshot mappings are always read-only.
Alex Eldera725f65e2012-02-02 08:13:30 -06004880 */
Alex Elder859c31d2012-10-25 23:34:42 -05004881static int rbd_add_parse_args(const char *buf,
Alex Elderdc79b112012-10-25 23:34:41 -05004882 struct ceph_options **ceph_opts,
Alex Elder859c31d2012-10-25 23:34:42 -05004883 struct rbd_options **opts,
4884 struct rbd_spec **rbd_spec)
Alex Eldera725f65e2012-02-02 08:13:30 -06004885{
Alex Elderd22f76e2012-07-12 10:46:35 -05004886 size_t len;
Alex Elder859c31d2012-10-25 23:34:42 -05004887 char *options;
Alex Elder0ddebc02012-10-25 23:34:41 -05004888 const char *mon_addrs;
Alex Elderecb4dc22013-04-26 09:43:47 -05004889 char *snap_name;
Alex Elder0ddebc02012-10-25 23:34:41 -05004890 size_t mon_addrs_size;
Alex Elder859c31d2012-10-25 23:34:42 -05004891 struct rbd_spec *spec = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004892 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05004893 struct ceph_options *copts;
Alex Elderdc79b112012-10-25 23:34:41 -05004894 int ret;
Alex Eldere28fff262012-02-02 08:13:30 -06004895
4896 /* The first four tokens are required */
4897
Alex Elder7ef32142012-02-02 08:13:30 -06004898 len = next_token(&buf);
Alex Elder4fb5d6712012-11-01 10:17:15 -05004899 if (!len) {
4900 rbd_warn(NULL, "no monitor address(es) provided");
4901 return -EINVAL;
4902 }
Alex Elder0ddebc02012-10-25 23:34:41 -05004903 mon_addrs = buf;
Alex Elderf28e5652012-10-25 23:34:41 -05004904 mon_addrs_size = len + 1;
Alex Elder7ef32142012-02-02 08:13:30 -06004905 buf += len;
Alex Eldera725f65e2012-02-02 08:13:30 -06004906
Alex Elderdc79b112012-10-25 23:34:41 -05004907 ret = -EINVAL;
Alex Elderf28e5652012-10-25 23:34:41 -05004908 options = dup_token(&buf, NULL);
4909 if (!options)
Alex Elderdc79b112012-10-25 23:34:41 -05004910 return -ENOMEM;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004911 if (!*options) {
4912 rbd_warn(NULL, "no options provided");
4913 goto out_err;
4914 }
Alex Eldera725f65e2012-02-02 08:13:30 -06004915
Alex Elder859c31d2012-10-25 23:34:42 -05004916 spec = rbd_spec_alloc();
4917 if (!spec)
Alex Elderf28e5652012-10-25 23:34:41 -05004918 goto out_mem;
Alex Elder859c31d2012-10-25 23:34:42 -05004919
4920 spec->pool_name = dup_token(&buf, NULL);
4921 if (!spec->pool_name)
4922 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004923 if (!*spec->pool_name) {
4924 rbd_warn(NULL, "no pool name provided");
4925 goto out_err;
4926 }
Alex Eldere28fff262012-02-02 08:13:30 -06004927
Alex Elder69e7a022012-11-01 08:39:26 -05004928 spec->image_name = dup_token(&buf, NULL);
Alex Elder859c31d2012-10-25 23:34:42 -05004929 if (!spec->image_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004930 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004931 if (!*spec->image_name) {
4932 rbd_warn(NULL, "no image name provided");
4933 goto out_err;
4934 }
Alex Eldere28fff262012-02-02 08:13:30 -06004935
Alex Elderf28e5652012-10-25 23:34:41 -05004936 /*
4937 * Snapshot name is optional; default is to use "-"
4938 * (indicating the head/no snapshot).
4939 */
Alex Elder3feeb8942012-08-31 17:29:52 -05004940 len = next_token(&buf);
Alex Elder820a5f32012-07-09 21:04:24 -05004941 if (!len) {
Alex Elder3feeb8942012-08-31 17:29:52 -05004942 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4943 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
Alex Elderf28e5652012-10-25 23:34:41 -05004944 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
Alex Elderdc79b112012-10-25 23:34:41 -05004945 ret = -ENAMETOOLONG;
Alex Elderf28e5652012-10-25 23:34:41 -05004946 goto out_err;
Alex Elder849b4262012-07-09 21:04:24 -05004947 }
Alex Elderecb4dc22013-04-26 09:43:47 -05004948 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4949 if (!snap_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004950 goto out_mem;
Alex Elderecb4dc22013-04-26 09:43:47 -05004951 *(snap_name + len) = '\0';
4952 spec->snap_name = snap_name;
Alex Eldere5c35532012-10-25 23:34:41 -05004953
Alex Elder0ddebc02012-10-25 23:34:41 -05004954 /* Initialize all rbd options to the defaults */
Alex Eldere28fff262012-02-02 08:13:30 -06004955
Alex Elder4e9afeb2012-10-25 23:34:41 -05004956 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4957 if (!rbd_opts)
4958 goto out_mem;
4959
4960 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
Ilya Dryomovb5584182015-06-23 16:21:19 +03004961 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
Alex Elderd22f76e2012-07-12 10:46:35 -05004962
Alex Elder859c31d2012-10-25 23:34:42 -05004963 copts = ceph_parse_options(options, mon_addrs,
Alex Elder0ddebc02012-10-25 23:34:41 -05004964 mon_addrs + mon_addrs_size - 1,
Alex Elder4e9afeb2012-10-25 23:34:41 -05004965 parse_rbd_opts_token, rbd_opts);
Alex Elder859c31d2012-10-25 23:34:42 -05004966 if (IS_ERR(copts)) {
4967 ret = PTR_ERR(copts);
Alex Elderdc79b112012-10-25 23:34:41 -05004968 goto out_err;
4969 }
Alex Elder859c31d2012-10-25 23:34:42 -05004970 kfree(options);
4971
4972 *ceph_opts = copts;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004973 *opts = rbd_opts;
Alex Elder859c31d2012-10-25 23:34:42 -05004974 *rbd_spec = spec;
Alex Elder0ddebc02012-10-25 23:34:41 -05004975
Alex Elderdc79b112012-10-25 23:34:41 -05004976 return 0;
Alex Elderf28e5652012-10-25 23:34:41 -05004977out_mem:
Alex Elderdc79b112012-10-25 23:34:41 -05004978 ret = -ENOMEM;
Alex Elderd22f76e2012-07-12 10:46:35 -05004979out_err:
Alex Elder859c31d2012-10-25 23:34:42 -05004980 kfree(rbd_opts);
4981 rbd_spec_put(spec);
Alex Elderf28e5652012-10-25 23:34:41 -05004982 kfree(options);
Alex Elderd22f76e2012-07-12 10:46:35 -05004983
Alex Elderdc79b112012-10-25 23:34:41 -05004984 return ret;
Alex Eldera725f65e2012-02-02 08:13:30 -06004985}
4986
Alex Elder589d30e2012-07-10 20:30:11 -05004987/*
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004988 * Return pool id (>= 0) or a negative error code.
4989 */
4990static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4991{
Ilya Dryomova319bf52015-05-15 12:02:17 +03004992 struct ceph_options *opts = rbdc->client->options;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004993 u64 newest_epoch;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004994 int tries = 0;
4995 int ret;
4996
4997again:
4998 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4999 if (ret == -ENOENT && tries++ < 1) {
5000 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
5001 &newest_epoch);
5002 if (ret < 0)
5003 return ret;
5004
5005 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
5006 ceph_monc_request_next_osdmap(&rbdc->client->monc);
5007 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
Ilya Dryomova319bf52015-05-15 12:02:17 +03005008 newest_epoch,
5009 opts->mount_timeout);
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005010 goto again;
5011 } else {
5012 /* the osdmap we have is new enough */
5013 return -ENOENT;
5014 }
5015 }
5016
5017 return ret;
5018}
5019
5020/*
Alex Elder589d30e2012-07-10 20:30:11 -05005021 * An rbd format 2 image has a unique identifier, distinct from the
5022 * name given to it by the user. Internally, that identifier is
5023 * what's used to specify the names of objects related to the image.
5024 *
5025 * A special "rbd id" object is used to map an rbd image name to its
5026 * id. If that object doesn't exist, then there is no v2 rbd image
5027 * with the supplied name.
5028 *
5029 * This function will record the given rbd_dev's image_id field if
5030 * it can be determined, and in that case will return 0. If any
5031 * errors occur a negative errno will be returned and the rbd_dev's
5032 * image_id field will be unchanged (and should be NULL).
5033 */
5034static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5035{
5036 int ret;
5037 size_t size;
5038 char *object_name;
5039 void *response;
Alex Elderc0fba362013-04-25 23:15:08 -05005040 char *image_id;
Alex Elder2f82ee52012-10-30 19:40:33 -05005041
Alex Elder589d30e2012-07-10 20:30:11 -05005042 /*
Alex Elder2c0d0a12012-10-30 19:40:33 -05005043 * When probing a parent image, the image id is already
5044 * known (and the image name likely is not). There's no
Alex Elderc0fba362013-04-25 23:15:08 -05005045 * need to fetch the image id again in this case. We
5046 * do still need to set the image format though.
Alex Elder2c0d0a12012-10-30 19:40:33 -05005047 */
Alex Elderc0fba362013-04-25 23:15:08 -05005048 if (rbd_dev->spec->image_id) {
5049 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5050
Alex Elder2c0d0a12012-10-30 19:40:33 -05005051 return 0;
Alex Elderc0fba362013-04-25 23:15:08 -05005052 }
Alex Elder2c0d0a12012-10-30 19:40:33 -05005053
5054 /*
Alex Elder589d30e2012-07-10 20:30:11 -05005055 * First, see if the format 2 image id file exists, and if
5056 * so, get the image's persistent id from it.
5057 */
Alex Elder69e7a022012-11-01 08:39:26 -05005058 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05005059 object_name = kmalloc(size, GFP_NOIO);
5060 if (!object_name)
5061 return -ENOMEM;
Alex Elder0d7dbfc2012-10-25 23:34:41 -05005062 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05005063 dout("rbd id object name is %s\n", object_name);
5064
5065 /* Response will be an encoded string, which includes a length */
5066
5067 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5068 response = kzalloc(size, GFP_NOIO);
5069 if (!response) {
5070 ret = -ENOMEM;
5071 goto out;
5072 }
5073
Alex Elderc0fba362013-04-25 23:15:08 -05005074 /* If it doesn't exist we'll assume it's a format 1 image */
5075
Alex Elder36be9a72013-01-19 00:30:28 -06005076 ret = rbd_obj_method_sync(rbd_dev, object_name,
Alex Elder41579762013-04-21 12:14:45 -05005077 "rbd", "get_id", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05005078 response, RBD_IMAGE_ID_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06005079 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderc0fba362013-04-25 23:15:08 -05005080 if (ret == -ENOENT) {
5081 image_id = kstrdup("", GFP_KERNEL);
5082 ret = image_id ? 0 : -ENOMEM;
5083 if (!ret)
5084 rbd_dev->image_format = 1;
Ilya Dryomov7dd440c2014-09-11 18:49:18 +04005085 } else if (ret >= 0) {
Alex Elderc0fba362013-04-25 23:15:08 -05005086 void *p = response;
Alex Elder589d30e2012-07-10 20:30:11 -05005087
Alex Elderc0fba362013-04-25 23:15:08 -05005088 image_id = ceph_extract_encoded_string(&p, p + ret,
Alex Elder979ed482012-11-01 08:39:26 -05005089 NULL, GFP_NOIO);
Duan Jiong461f7582014-04-11 16:38:12 +08005090 ret = PTR_ERR_OR_ZERO(image_id);
Alex Elderc0fba362013-04-25 23:15:08 -05005091 if (!ret)
5092 rbd_dev->image_format = 2;
Alex Elderc0fba362013-04-25 23:15:08 -05005093 }
5094
5095 if (!ret) {
5096 rbd_dev->spec->image_id = image_id;
5097 dout("image_id is %s\n", image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05005098 }
5099out:
5100 kfree(response);
5101 kfree(object_name);
5102
5103 return ret;
5104}
5105
Alex Elder3abef3b2013-05-13 20:35:37 -05005106/*
5107 * Undo whatever state changes are made by v1 or v2 header info
5108 * call.
5109 */
Alex Elder6fd48b32013-04-28 23:32:34 -05005110static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5111{
5112 struct rbd_image_header *header;
5113
Ilya Dryomove69b8d42015-01-19 12:06:14 +03005114 rbd_dev_parent_put(rbd_dev);
Alex Elder6fd48b32013-04-28 23:32:34 -05005115
5116 /* Free dynamic fields from the header, then zero it out */
5117
5118 header = &rbd_dev->header;
Alex Elder812164f82013-04-30 00:44:32 -05005119 ceph_put_snap_context(header->snapc);
Alex Elder6fd48b32013-04-28 23:32:34 -05005120 kfree(header->snap_sizes);
5121 kfree(header->snap_names);
5122 kfree(header->object_prefix);
5123 memset(header, 0, sizeof (*header));
5124}
5125
Alex Elder2df3fac2013-05-06 09:51:30 -05005126static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
Alex Eldera30b71b2012-07-10 20:30:11 -05005127{
5128 int ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005129
Alex Elder1e130192012-07-03 16:01:19 -05005130 ret = rbd_dev_v2_object_prefix(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005131 if (ret)
Alex Elder1e130192012-07-03 16:01:19 -05005132 goto out_err;
Alex Elderb1b54022012-07-03 16:01:19 -05005133
Alex Elder2df3fac2013-05-06 09:51:30 -05005134 /*
5135 * Get the and check features for the image. Currently the
5136 * features are assumed to never change.
5137 */
Alex Elderb1b54022012-07-03 16:01:19 -05005138 ret = rbd_dev_v2_features(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005139 if (ret)
Alex Elderb1b54022012-07-03 16:01:19 -05005140 goto out_err;
Alex Elder35d489f2012-07-03 16:01:19 -05005141
Alex Eldercc070d52013-04-21 12:14:45 -05005142 /* If the image supports fancy striping, get its parameters */
5143
5144 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5145 ret = rbd_dev_v2_striping_info(rbd_dev);
5146 if (ret < 0)
5147 goto out_err;
5148 }
Alex Elder2df3fac2013-05-06 09:51:30 -05005149 /* No support for crypto and compression type format 2 images */
Alex Eldera30b71b2012-07-10 20:30:11 -05005150
Alex Elder35152972012-08-31 17:29:55 -05005151 return 0;
Alex Elder9d475de2012-07-03 16:01:19 -05005152out_err:
Alex Elder642a2532013-05-06 17:40:33 -05005153 rbd_dev->header.features = 0;
Alex Elder1e130192012-07-03 16:01:19 -05005154 kfree(rbd_dev->header.object_prefix);
5155 rbd_dev->header.object_prefix = NULL;
Alex Elder9d475de2012-07-03 16:01:19 -05005156
5157 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005158}
5159
Alex Elder124afba2013-04-26 15:44:36 -05005160static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
Alex Elder83a06262012-10-30 15:47:17 -05005161{
Alex Elder2f82ee52012-10-30 19:40:33 -05005162 struct rbd_device *parent = NULL;
Alex Elder124afba2013-04-26 15:44:36 -05005163 struct rbd_spec *parent_spec;
5164 struct rbd_client *rbdc;
5165 int ret;
5166
5167 if (!rbd_dev->parent_spec)
5168 return 0;
5169 /*
5170 * We need to pass a reference to the client and the parent
5171 * spec when creating the parent rbd_dev. Images related by
5172 * parent/child relationships always share both.
5173 */
5174 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
5175 rbdc = __rbd_get_client(rbd_dev->rbd_client);
5176
5177 ret = -ENOMEM;
Ilya Dryomovd1475432015-06-22 13:24:48 +03005178 parent = rbd_dev_create(rbdc, parent_spec, NULL);
Alex Elder124afba2013-04-26 15:44:36 -05005179 if (!parent)
5180 goto out_err;
5181
Alex Elder1f3ef782013-05-06 17:40:33 -05005182 ret = rbd_dev_image_probe(parent, false);
Alex Elder124afba2013-04-26 15:44:36 -05005183 if (ret < 0)
5184 goto out_err;
5185 rbd_dev->parent = parent;
Alex Eldera2acd002013-05-08 22:50:04 -05005186 atomic_set(&rbd_dev->parent_ref, 1);
Alex Elder124afba2013-04-26 15:44:36 -05005187
5188 return 0;
5189out_err:
5190 if (parent) {
Alex Elderfb65d2282013-05-08 22:50:04 -05005191 rbd_dev_unparent(rbd_dev);
Alex Elder124afba2013-04-26 15:44:36 -05005192 kfree(rbd_dev->header_name);
5193 rbd_dev_destroy(parent);
5194 } else {
5195 rbd_put_client(rbdc);
5196 rbd_spec_put(parent_spec);
5197 }
5198
5199 return ret;
5200}
5201
Alex Elder200a6a82013-04-28 23:32:34 -05005202static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
Alex Elder124afba2013-04-26 15:44:36 -05005203{
Alex Elder83a06262012-10-30 15:47:17 -05005204 int ret;
Alex Elder83a06262012-10-30 15:47:17 -05005205
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005206 /* Get an id and fill in device name. */
Alex Elder83a06262012-10-30 15:47:17 -05005207
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005208 ret = rbd_dev_id_get(rbd_dev);
5209 if (ret)
5210 return ret;
5211
Alex Elder83a06262012-10-30 15:47:17 -05005212 BUILD_BUG_ON(DEV_NAME_LEN
5213 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5214 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5215
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005216 /* Record our major and minor device numbers. */
Alex Elder83a06262012-10-30 15:47:17 -05005217
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005218 if (!single_major) {
5219 ret = register_blkdev(0, rbd_dev->name);
5220 if (ret < 0)
5221 goto err_out_id;
5222
5223 rbd_dev->major = ret;
5224 rbd_dev->minor = 0;
5225 } else {
5226 rbd_dev->major = rbd_major;
5227 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5228 }
Alex Elder83a06262012-10-30 15:47:17 -05005229
5230 /* Set up the blkdev mapping. */
5231
5232 ret = rbd_init_disk(rbd_dev);
5233 if (ret)
5234 goto err_out_blkdev;
5235
Alex Elderf35a4de2013-05-06 09:51:29 -05005236 ret = rbd_dev_mapping_set(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005237 if (ret)
5238 goto err_out_disk;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04005239
Alex Elderf35a4de2013-05-06 09:51:29 -05005240 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
Josh Durgin22001f62013-09-30 20:10:04 -07005241 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
Alex Elderf35a4de2013-05-06 09:51:29 -05005242
5243 ret = rbd_bus_add_dev(rbd_dev);
5244 if (ret)
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005245 goto err_out_mapping;
Alex Elder83a06262012-10-30 15:47:17 -05005246
Alex Elder83a06262012-10-30 15:47:17 -05005247 /* Everything's ready. Announce the disk to the world. */
5248
Alex Elder129b79d2013-04-26 15:44:36 -05005249 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Alex Elder83a06262012-10-30 15:47:17 -05005250 add_disk(rbd_dev->disk);
5251
5252 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5253 (unsigned long long) rbd_dev->mapping.size);
5254
5255 return ret;
Alex Elder2f82ee52012-10-30 19:40:33 -05005256
Alex Elderf35a4de2013-05-06 09:51:29 -05005257err_out_mapping:
5258 rbd_dev_mapping_clear(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005259err_out_disk:
5260 rbd_free_disk(rbd_dev);
5261err_out_blkdev:
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005262 if (!single_major)
5263 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Alex Elder83a06262012-10-30 15:47:17 -05005264err_out_id:
5265 rbd_dev_id_put(rbd_dev);
Alex Elderd1cf5782013-04-27 09:59:30 -05005266 rbd_dev_mapping_clear(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005267
5268 return ret;
5269}
5270
Alex Elder332bb122013-04-27 09:59:30 -05005271static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5272{
5273 struct rbd_spec *spec = rbd_dev->spec;
5274 size_t size;
5275
5276 /* Record the header object name for this rbd image. */
5277
5278 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5279
5280 if (rbd_dev->image_format == 1)
5281 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5282 else
5283 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5284
5285 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5286 if (!rbd_dev->header_name)
5287 return -ENOMEM;
5288
5289 if (rbd_dev->image_format == 1)
5290 sprintf(rbd_dev->header_name, "%s%s",
5291 spec->image_name, RBD_SUFFIX);
5292 else
5293 sprintf(rbd_dev->header_name, "%s%s",
5294 RBD_HEADER_PREFIX, spec->image_id);
5295 return 0;
5296}
5297
Alex Elder200a6a82013-04-28 23:32:34 -05005298static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5299{
Alex Elder6fd48b32013-04-28 23:32:34 -05005300 rbd_dev_unprobe(rbd_dev);
Alex Elder200a6a82013-04-28 23:32:34 -05005301 kfree(rbd_dev->header_name);
Alex Elder6fd48b32013-04-28 23:32:34 -05005302 rbd_dev->header_name = NULL;
5303 rbd_dev->image_format = 0;
5304 kfree(rbd_dev->spec->image_id);
5305 rbd_dev->spec->image_id = NULL;
5306
Alex Elder200a6a82013-04-28 23:32:34 -05005307 rbd_dev_destroy(rbd_dev);
5308}
5309
Alex Eldera30b71b2012-07-10 20:30:11 -05005310/*
5311 * Probe for the existence of the header object for the given rbd
Alex Elder1f3ef782013-05-06 17:40:33 -05005312 * device. If this image is the one being mapped (i.e., not a
5313 * parent), initiate a watch on its header object before using that
5314 * object to get detailed information about the rbd image.
Alex Eldera30b71b2012-07-10 20:30:11 -05005315 */
Alex Elder1f3ef782013-05-06 17:40:33 -05005316static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
Alex Eldera30b71b2012-07-10 20:30:11 -05005317{
5318 int ret;
5319
5320 /*
Alex Elder3abef3b2013-05-13 20:35:37 -05005321 * Get the id from the image id object. Unless there's an
5322 * error, rbd_dev->spec->image_id will be filled in with
5323 * a dynamically-allocated string, and rbd_dev->image_format
5324 * will be set to either 1 or 2.
Alex Eldera30b71b2012-07-10 20:30:11 -05005325 */
5326 ret = rbd_dev_image_id(rbd_dev);
5327 if (ret)
Alex Elderc0fba362013-04-25 23:15:08 -05005328 return ret;
Alex Elderc0fba362013-04-25 23:15:08 -05005329
Alex Elder332bb122013-04-27 09:59:30 -05005330 ret = rbd_dev_header_name(rbd_dev);
5331 if (ret)
5332 goto err_out_format;
5333
Alex Elder1f3ef782013-05-06 17:40:33 -05005334 if (mapping) {
Ilya Dryomovfca27062013-12-16 18:02:40 +02005335 ret = rbd_dev_header_watch_sync(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005336 if (ret) {
5337 if (ret == -ENOENT)
5338 pr_info("image %s/%s does not exist\n",
5339 rbd_dev->spec->pool_name,
5340 rbd_dev->spec->image_name);
Alex Elder1f3ef782013-05-06 17:40:33 -05005341 goto out_header_name;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005342 }
Alex Elder1f3ef782013-05-06 17:40:33 -05005343 }
Alex Elderb644de22013-04-27 09:59:31 -05005344
Ilya Dryomova720ae02014-07-23 17:11:19 +04005345 ret = rbd_dev_header_info(rbd_dev);
Alex Elder5655c4d2013-04-25 23:15:08 -05005346 if (ret)
Alex Elderb644de22013-04-27 09:59:31 -05005347 goto err_out_watch;
Alex Elder83a06262012-10-30 15:47:17 -05005348
Ilya Dryomov04077592014-07-23 17:11:20 +04005349 /*
5350 * If this image is the one being mapped, we have pool name and
5351 * id, image name and id, and snap name - need to fill snap id.
5352 * Otherwise this is a parent image, identified by pool, image
5353 * and snap ids - need to fill in names for those ids.
5354 */
5355 if (mapping)
5356 ret = rbd_spec_fill_snap_id(rbd_dev);
5357 else
5358 ret = rbd_spec_fill_names(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005359 if (ret) {
5360 if (ret == -ENOENT)
5361 pr_info("snap %s/%s@%s does not exist\n",
5362 rbd_dev->spec->pool_name,
5363 rbd_dev->spec->image_name,
5364 rbd_dev->spec->snap_name);
Alex Elder33dca392013-04-30 00:44:33 -05005365 goto err_out_probe;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005366 }
Alex Elder9bb81c92013-04-27 09:59:30 -05005367
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005368 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5369 ret = rbd_dev_v2_parent_info(rbd_dev);
5370 if (ret)
5371 goto err_out_probe;
5372
5373 /*
5374 * Need to warn users if this image is the one being
5375 * mapped and has a parent.
5376 */
5377 if (mapping && rbd_dev->parent_spec)
5378 rbd_warn(rbd_dev,
5379 "WARNING: kernel layering is EXPERIMENTAL!");
5380 }
5381
Alex Elder9bb81c92013-04-27 09:59:30 -05005382 ret = rbd_dev_probe_parent(rbd_dev);
Alex Elder30d60ba2013-05-06 09:51:30 -05005383 if (ret)
5384 goto err_out_probe;
Alex Elder83a06262012-10-30 15:47:17 -05005385
Alex Elder30d60ba2013-05-06 09:51:30 -05005386 dout("discovered format %u image, header name is %s\n",
5387 rbd_dev->image_format, rbd_dev->header_name);
Alex Elder30d60ba2013-05-06 09:51:30 -05005388 return 0;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005389
Alex Elder6fd48b32013-04-28 23:32:34 -05005390err_out_probe:
5391 rbd_dev_unprobe(rbd_dev);
Alex Elderb644de22013-04-27 09:59:31 -05005392err_out_watch:
Ilya Dryomovfca27062013-12-16 18:02:40 +02005393 if (mapping)
5394 rbd_dev_header_unwatch_sync(rbd_dev);
Alex Elder332bb122013-04-27 09:59:30 -05005395out_header_name:
5396 kfree(rbd_dev->header_name);
5397 rbd_dev->header_name = NULL;
5398err_out_format:
5399 rbd_dev->image_format = 0;
Alex Elder5655c4d2013-04-25 23:15:08 -05005400 kfree(rbd_dev->spec->image_id);
5401 rbd_dev->spec->image_id = NULL;
Alex Elder5655c4d2013-04-25 23:15:08 -05005402 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005403}
5404
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005405static ssize_t do_rbd_add(struct bus_type *bus,
5406 const char *buf,
5407 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005408{
Alex Eldercb8627c2012-07-09 21:04:23 -05005409 struct rbd_device *rbd_dev = NULL;
Alex Elderdc79b112012-10-25 23:34:41 -05005410 struct ceph_options *ceph_opts = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05005411 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05005412 struct rbd_spec *spec = NULL;
Alex Elder9d3997f2012-10-25 23:34:42 -05005413 struct rbd_client *rbdc;
Alex Elder51344a32013-05-06 07:40:30 -05005414 bool read_only;
Alex Elder27cc2592012-02-02 08:13:30 -06005415 int rc = -ENOMEM;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005416
5417 if (!try_module_get(THIS_MODULE))
5418 return -ENODEV;
5419
Alex Eldera725f65e2012-02-02 08:13:30 -06005420 /* parse add command */
Alex Elder859c31d2012-10-25 23:34:42 -05005421 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
Alex Elderdc79b112012-10-25 23:34:41 -05005422 if (rc < 0)
Alex Elderbd4ba652012-10-25 23:34:42 -05005423 goto err_out_module;
Alex Eldera725f65e2012-02-02 08:13:30 -06005424
Alex Elder9d3997f2012-10-25 23:34:42 -05005425 rbdc = rbd_get_client(ceph_opts);
5426 if (IS_ERR(rbdc)) {
5427 rc = PTR_ERR(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05005428 goto err_out_args;
Alex Elder9d3997f2012-10-25 23:34:42 -05005429 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005430
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005431 /* pick the pool */
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005432 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005433 if (rc < 0) {
5434 if (rc == -ENOENT)
5435 pr_info("pool %s does not exist\n", spec->pool_name);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005436 goto err_out_client;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005437 }
Alex Elderc0cd10db2013-04-26 09:43:47 -05005438 spec->pool_id = (u64)rc;
Alex Elder859c31d2012-10-25 23:34:42 -05005439
Alex Elder0903e872012-11-14 12:25:19 -06005440 /* The ceph file layout needs to fit pool id in 32 bits */
5441
Alex Elderc0cd10db2013-04-26 09:43:47 -05005442 if (spec->pool_id > (u64)U32_MAX) {
Ilya Dryomov9584d502014-07-11 12:11:20 +04005443 rbd_warn(NULL, "pool id too large (%llu > %u)",
Alex Elderc0cd10db2013-04-26 09:43:47 -05005444 (unsigned long long)spec->pool_id, U32_MAX);
Alex Elder0903e872012-11-14 12:25:19 -06005445 rc = -EIO;
5446 goto err_out_client;
5447 }
5448
Ilya Dryomovd1475432015-06-22 13:24:48 +03005449 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
Alex Elderbd4ba652012-10-25 23:34:42 -05005450 if (!rbd_dev)
5451 goto err_out_client;
Alex Elderc53d5892012-10-25 23:34:42 -05005452 rbdc = NULL; /* rbd_dev now owns this */
5453 spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovd1475432015-06-22 13:24:48 +03005454 rbd_opts = NULL; /* rbd_dev now owns this */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005455
Alex Elder1f3ef782013-05-06 17:40:33 -05005456 rc = rbd_dev_image_probe(rbd_dev, true);
Alex Eldera30b71b2012-07-10 20:30:11 -05005457 if (rc < 0)
Alex Elderc53d5892012-10-25 23:34:42 -05005458 goto err_out_rbd_dev;
Alex Elder05fd6f62012-08-29 17:11:07 -05005459
Alex Elder7ce4eef2013-05-06 17:40:33 -05005460 /* If we are mapping a snapshot it must be marked read-only */
5461
Ilya Dryomovd1475432015-06-22 13:24:48 +03005462 read_only = rbd_dev->opts->read_only;
Alex Elder7ce4eef2013-05-06 17:40:33 -05005463 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5464 read_only = true;
5465 rbd_dev->mapping.read_only = read_only;
5466
Alex Elderb536f692013-04-28 23:32:34 -05005467 rc = rbd_dev_device_setup(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05005468 if (rc) {
Ilya Dryomove37180c2013-12-16 18:02:41 +02005469 /*
5470 * rbd_dev_header_unwatch_sync() can't be moved into
5471 * rbd_dev_image_release() without refactoring, see
5472 * commit 1f3ef78861ac.
5473 */
5474 rbd_dev_header_unwatch_sync(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05005475 rbd_dev_image_release(rbd_dev);
5476 goto err_out_module;
5477 }
Alex Elderb536f692013-04-28 23:32:34 -05005478
Alex Elder3abef3b2013-05-13 20:35:37 -05005479 return count;
5480
Alex Elderc53d5892012-10-25 23:34:42 -05005481err_out_rbd_dev:
5482 rbd_dev_destroy(rbd_dev);
Alex Elderbd4ba652012-10-25 23:34:42 -05005483err_out_client:
Alex Elder9d3997f2012-10-25 23:34:42 -05005484 rbd_put_client(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05005485err_out_args:
Alex Elder859c31d2012-10-25 23:34:42 -05005486 rbd_spec_put(spec);
Ilya Dryomovd1475432015-06-22 13:24:48 +03005487 kfree(rbd_opts);
Alex Elderbd4ba652012-10-25 23:34:42 -05005488err_out_module:
5489 module_put(THIS_MODULE);
Alex Elder27cc2592012-02-02 08:13:30 -06005490
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005491 dout("Error adding device %s\n", buf);
Alex Elder27cc2592012-02-02 08:13:30 -06005492
Alex Elderc0cd10db2013-04-26 09:43:47 -05005493 return (ssize_t)rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005494}
5495
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005496static ssize_t rbd_add(struct bus_type *bus,
5497 const char *buf,
5498 size_t count)
5499{
5500 if (single_major)
5501 return -EINVAL;
5502
5503 return do_rbd_add(bus, buf, count);
5504}
5505
5506static ssize_t rbd_add_single_major(struct bus_type *bus,
5507 const char *buf,
5508 size_t count)
5509{
5510 return do_rbd_add(bus, buf, count);
5511}
5512
Alex Elder200a6a82013-04-28 23:32:34 -05005513static void rbd_dev_device_release(struct device *dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005514{
Alex Elder593a9e72012-02-07 12:03:37 -06005515 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005516
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005517 rbd_free_disk(rbd_dev);
Alex Elder200a6a82013-04-28 23:32:34 -05005518 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Alex Elder6d80b132013-05-06 07:40:30 -05005519 rbd_dev_mapping_clear(rbd_dev);
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005520 if (!single_major)
5521 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Alex Eldere2839302012-08-29 17:11:06 -05005522 rbd_dev_id_put(rbd_dev);
Alex Elderd1cf5782013-04-27 09:59:30 -05005523 rbd_dev_mapping_clear(rbd_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005524}
5525
Alex Elder05a46af2013-04-26 15:44:36 -05005526static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5527{
Alex Elderad945fc2013-04-26 15:44:36 -05005528 while (rbd_dev->parent) {
Alex Elder05a46af2013-04-26 15:44:36 -05005529 struct rbd_device *first = rbd_dev;
5530 struct rbd_device *second = first->parent;
5531 struct rbd_device *third;
5532
5533 /*
5534 * Follow to the parent with no grandparent and
5535 * remove it.
5536 */
5537 while (second && (third = second->parent)) {
5538 first = second;
5539 second = third;
5540 }
Alex Elderad945fc2013-04-26 15:44:36 -05005541 rbd_assert(second);
Alex Elder8ad42cd2013-04-28 23:32:34 -05005542 rbd_dev_image_release(second);
Alex Elderad945fc2013-04-26 15:44:36 -05005543 first->parent = NULL;
5544 first->parent_overlap = 0;
5545
5546 rbd_assert(first->parent_spec);
Alex Elder05a46af2013-04-26 15:44:36 -05005547 rbd_spec_put(first->parent_spec);
5548 first->parent_spec = NULL;
Alex Elder05a46af2013-04-26 15:44:36 -05005549 }
5550}
5551
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005552static ssize_t do_rbd_remove(struct bus_type *bus,
5553 const char *buf,
5554 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005555{
5556 struct rbd_device *rbd_dev = NULL;
Alex Elder751cc0e2013-05-31 15:17:01 -05005557 struct list_head *tmp;
5558 int dev_id;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005559 unsigned long ul;
Alex Elder82a442d2013-05-31 17:40:44 -05005560 bool already = false;
Alex Elder0d8189e2013-04-27 09:59:30 -05005561 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005562
Jingoo Hanbb8e0e82013-09-11 14:20:07 -07005563 ret = kstrtoul(buf, 10, &ul);
Alex Elder0d8189e2013-04-27 09:59:30 -05005564 if (ret)
5565 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005566
5567 /* convert to int; abort if we lost anything in the conversion */
Alex Elder751cc0e2013-05-31 15:17:01 -05005568 dev_id = (int)ul;
5569 if (dev_id != ul)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005570 return -EINVAL;
5571
Alex Elder751cc0e2013-05-31 15:17:01 -05005572 ret = -ENOENT;
5573 spin_lock(&rbd_dev_list_lock);
5574 list_for_each(tmp, &rbd_dev_list) {
5575 rbd_dev = list_entry(tmp, struct rbd_device, node);
5576 if (rbd_dev->dev_id == dev_id) {
5577 ret = 0;
5578 break;
5579 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005580 }
Alex Elder751cc0e2013-05-31 15:17:01 -05005581 if (!ret) {
5582 spin_lock_irq(&rbd_dev->lock);
5583 if (rbd_dev->open_count)
5584 ret = -EBUSY;
5585 else
Alex Elder82a442d2013-05-31 17:40:44 -05005586 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5587 &rbd_dev->flags);
Alex Elder751cc0e2013-05-31 15:17:01 -05005588 spin_unlock_irq(&rbd_dev->lock);
5589 }
5590 spin_unlock(&rbd_dev_list_lock);
Alex Elder82a442d2013-05-31 17:40:44 -05005591 if (ret < 0 || already)
Alex Elder1ba0f1e2013-05-31 15:17:01 -05005592 return ret;
Alex Elder751cc0e2013-05-31 15:17:01 -05005593
Ilya Dryomovfca27062013-12-16 18:02:40 +02005594 rbd_dev_header_unwatch_sync(rbd_dev);
Josh Durgin9abc5992013-08-29 17:31:03 -07005595 /*
5596 * flush remaining watch callbacks - these must be complete
5597 * before the osd_client is shutdown
5598 */
5599 dout("%s: flushing notifies", __func__);
5600 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
Ilya Dryomovfca27062013-12-16 18:02:40 +02005601
Josh Durgin98752012013-08-29 17:26:31 -07005602 /*
5603 * Don't free anything from rbd_dev->disk until after all
5604 * notifies are completely processed. Otherwise
5605 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5606 * in a potential use after free of rbd_dev->disk or rbd_dev.
5607 */
5608 rbd_bus_del_dev(rbd_dev);
Alex Elder8ad42cd2013-04-28 23:32:34 -05005609 rbd_dev_image_release(rbd_dev);
Alex Elder79ab7552013-04-28 23:32:34 -05005610 module_put(THIS_MODULE);
Alex Elderaafb2302012-09-06 16:00:54 -05005611
Alex Elder1ba0f1e2013-05-31 15:17:01 -05005612 return count;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005613}
5614
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005615static ssize_t rbd_remove(struct bus_type *bus,
5616 const char *buf,
5617 size_t count)
5618{
5619 if (single_major)
5620 return -EINVAL;
5621
5622 return do_rbd_remove(bus, buf, count);
5623}
5624
5625static ssize_t rbd_remove_single_major(struct bus_type *bus,
5626 const char *buf,
5627 size_t count)
5628{
5629 return do_rbd_remove(bus, buf, count);
5630}
5631
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005632/*
5633 * create control files in sysfs
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005634 * /sys/bus/rbd/...
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005635 */
5636static int rbd_sysfs_init(void)
5637{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005638 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005639
Alex Elderfed4c142012-02-07 12:03:36 -06005640 ret = device_register(&rbd_root_dev);
Alex Elder21079782012-01-24 10:08:36 -06005641 if (ret < 0)
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005642 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005643
Alex Elderfed4c142012-02-07 12:03:36 -06005644 ret = bus_register(&rbd_bus_type);
5645 if (ret < 0)
5646 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005647
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005648 return ret;
5649}
5650
5651static void rbd_sysfs_cleanup(void)
5652{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005653 bus_unregister(&rbd_bus_type);
Alex Elderfed4c142012-02-07 12:03:36 -06005654 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005655}
5656
Alex Elder1c2a9df2013-05-01 12:43:03 -05005657static int rbd_slab_init(void)
5658{
5659 rbd_assert(!rbd_img_request_cache);
5660 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5661 sizeof (struct rbd_img_request),
5662 __alignof__(struct rbd_img_request),
5663 0, NULL);
Alex Elder868311b2013-05-01 12:43:03 -05005664 if (!rbd_img_request_cache)
5665 return -ENOMEM;
5666
5667 rbd_assert(!rbd_obj_request_cache);
5668 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5669 sizeof (struct rbd_obj_request),
5670 __alignof__(struct rbd_obj_request),
5671 0, NULL);
Alex Elder78c2a442013-05-01 12:43:04 -05005672 if (!rbd_obj_request_cache)
5673 goto out_err;
5674
5675 rbd_assert(!rbd_segment_name_cache);
5676 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02005677 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
Alex Elder78c2a442013-05-01 12:43:04 -05005678 if (rbd_segment_name_cache)
Alex Elder1c2a9df2013-05-01 12:43:03 -05005679 return 0;
Alex Elder78c2a442013-05-01 12:43:04 -05005680out_err:
5681 if (rbd_obj_request_cache) {
5682 kmem_cache_destroy(rbd_obj_request_cache);
5683 rbd_obj_request_cache = NULL;
5684 }
Alex Elder1c2a9df2013-05-01 12:43:03 -05005685
Alex Elder868311b2013-05-01 12:43:03 -05005686 kmem_cache_destroy(rbd_img_request_cache);
5687 rbd_img_request_cache = NULL;
5688
Alex Elder1c2a9df2013-05-01 12:43:03 -05005689 return -ENOMEM;
5690}
5691
5692static void rbd_slab_exit(void)
5693{
Alex Elder78c2a442013-05-01 12:43:04 -05005694 rbd_assert(rbd_segment_name_cache);
5695 kmem_cache_destroy(rbd_segment_name_cache);
5696 rbd_segment_name_cache = NULL;
5697
Alex Elder868311b2013-05-01 12:43:03 -05005698 rbd_assert(rbd_obj_request_cache);
5699 kmem_cache_destroy(rbd_obj_request_cache);
5700 rbd_obj_request_cache = NULL;
5701
Alex Elder1c2a9df2013-05-01 12:43:03 -05005702 rbd_assert(rbd_img_request_cache);
5703 kmem_cache_destroy(rbd_img_request_cache);
5704 rbd_img_request_cache = NULL;
5705}
5706
Alex Eldercc344fa2013-02-19 12:25:56 -06005707static int __init rbd_init(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005708{
5709 int rc;
5710
Alex Elder1e32d342013-01-30 11:13:33 -06005711 if (!libceph_compatible(NULL)) {
5712 rbd_warn(NULL, "libceph incompatibility (quitting)");
Alex Elder1e32d342013-01-30 11:13:33 -06005713 return -EINVAL;
5714 }
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005715
Alex Elder1c2a9df2013-05-01 12:43:03 -05005716 rc = rbd_slab_init();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005717 if (rc)
5718 return rc;
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005719
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005720 /*
5721 * The number of active work items is limited by the number of
Ilya Dryomovf77303b2015-04-22 18:28:13 +03005722 * rbd devices * queue depth, so leave @max_active at default.
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005723 */
5724 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5725 if (!rbd_wq) {
5726 rc = -ENOMEM;
5727 goto err_out_slab;
5728 }
5729
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005730 if (single_major) {
5731 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5732 if (rbd_major < 0) {
5733 rc = rbd_major;
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005734 goto err_out_wq;
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005735 }
5736 }
5737
Alex Elder1c2a9df2013-05-01 12:43:03 -05005738 rc = rbd_sysfs_init();
5739 if (rc)
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005740 goto err_out_blkdev;
Alex Elder1c2a9df2013-05-01 12:43:03 -05005741
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005742 if (single_major)
5743 pr_info("loaded (major %d)\n", rbd_major);
5744 else
5745 pr_info("loaded\n");
5746
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005747 return 0;
5748
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005749err_out_blkdev:
5750 if (single_major)
5751 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005752err_out_wq:
5753 destroy_workqueue(rbd_wq);
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005754err_out_slab:
5755 rbd_slab_exit();
Alex Elder1c2a9df2013-05-01 12:43:03 -05005756 return rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005757}
5758
Alex Eldercc344fa2013-02-19 12:25:56 -06005759static void __exit rbd_exit(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005760{
Ilya Dryomovffe312c2014-05-20 15:46:04 +04005761 ida_destroy(&rbd_dev_id_ida);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005762 rbd_sysfs_cleanup();
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005763 if (single_major)
5764 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005765 destroy_workqueue(rbd_wq);
Alex Elder1c2a9df2013-05-01 12:43:03 -05005766 rbd_slab_exit();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005767}
5768
5769module_init(rbd_init);
5770module_exit(rbd_exit);
5771
Alex Elderd552c612013-05-31 20:13:09 -05005772MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005773MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5774MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005775/* following authorship retained from original osdblk.c */
5776MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5777
Ilya Dryomov90da2582013-12-13 15:28:56 +02005778MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005779MODULE_LICENSE("GPL");