blob: bc67a93aa4f4749f10d1a219789b21661c01ee21 [file] [log] [blame]
Alex Eldere2a58ee2013-04-30 00:44:33 -05001
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
Yehuda Sadehdfc56062010-11-19 14:51:04 -080025 For usage instructions, please refer to:
Yehuda Sadeh602adf42010-08-12 16:11:25 -070026
Yehuda Sadehdfc56062010-11-19 14:51:04 -080027 Documentation/ABI/testing/sysfs-bus-rbd
Yehuda Sadeh602adf42010-08-12 16:11:25 -070028
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
34#include <linux/ceph/decode.h>
Yehuda Sadeh59c2be12011-03-21 15:10:11 -070035#include <linux/parser.h>
Alex Elder30d1cff2013-05-01 12:43:03 -050036#include <linux/bsearch.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070037
38#include <linux/kernel.h>
39#include <linux/device.h>
40#include <linux/module.h>
Christoph Hellwig7ad18af2015-01-13 17:20:04 +010041#include <linux/blk-mq.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070042#include <linux/fs.h>
43#include <linux/blkdev.h>
Alex Elder1c2a9df2013-05-01 12:43:03 -050044#include <linux/slab.h>
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +020045#include <linux/idr.h>
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +040046#include <linux/workqueue.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070047
48#include "rbd_types.h"
49
Alex Elderaafb2302012-09-06 16:00:54 -050050#define RBD_DEBUG /* Activate rbd_assert() calls */
51
Alex Elder593a9e72012-02-07 12:03:37 -060052/*
53 * The basic unit of block I/O is a sector. It is interpreted in a
54 * number of contexts in Linux (blk, bio, genhd), but the default is
55 * universally 512 bytes. These symbols are just slightly more
56 * meaningful than the bare numbers they represent.
57 */
58#define SECTOR_SHIFT 9
59#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60
Alex Eldera2acd002013-05-08 22:50:04 -050061/*
62 * Increment the given counter and return its updated value.
63 * If the counter is already 0 it will not be incremented.
64 * If the counter is already at its maximum value returns
65 * -EINVAL without updating it.
66 */
67static int atomic_inc_return_safe(atomic_t *v)
68{
69 unsigned int counter;
70
71 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
72 if (counter <= (unsigned int)INT_MAX)
73 return (int)counter;
74
75 atomic_dec(v);
76
77 return -EINVAL;
78}
79
80/* Decrement the counter. Return the resulting value, or -EINVAL */
81static int atomic_dec_return_safe(atomic_t *v)
82{
83 int counter;
84
85 counter = atomic_dec_return(v);
86 if (counter >= 0)
87 return counter;
88
89 atomic_inc(v);
90
91 return -EINVAL;
92}
93
Alex Elderf0f8cef2012-01-29 13:57:44 -060094#define RBD_DRV_NAME "rbd"
Yehuda Sadeh602adf42010-08-12 16:11:25 -070095
Ilya Dryomov7e513d42013-12-16 19:26:32 +020096#define RBD_MINORS_PER_MAJOR 256
97#define RBD_SINGLE_MAJOR_PART_SHIFT 4
Yehuda Sadeh602adf42010-08-12 16:11:25 -070098
Alex Elderd4b125e2012-07-03 16:01:19 -050099#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
100#define RBD_MAX_SNAP_NAME_LEN \
101 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
102
Alex Elder35d489f2012-07-03 16:01:19 -0500103#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700104
105#define RBD_SNAP_HEAD_NAME "-"
106
Alex Elder9682fc62013-04-30 00:44:33 -0500107#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
108
Alex Elder9e15b772012-10-30 19:40:33 -0500109/* This allows a single page to hold an image name sent by OSD */
110#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
Alex Elder1e130192012-07-03 16:01:19 -0500111#define RBD_IMAGE_ID_LEN_MAX 64
Alex Elder9e15b772012-10-30 19:40:33 -0500112
Alex Elder1e130192012-07-03 16:01:19 -0500113#define RBD_OBJ_PREFIX_LEN_MAX 64
Alex Elder589d30e2012-07-10 20:30:11 -0500114
Alex Elderd8891402012-10-09 13:50:17 -0700115/* Feature bits */
116
Alex Elder5cbf6f122013-04-11 09:29:48 -0500117#define RBD_FEATURE_LAYERING (1<<0)
118#define RBD_FEATURE_STRIPINGV2 (1<<1)
119#define RBD_FEATURES_ALL \
120 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
Alex Elderd8891402012-10-09 13:50:17 -0700121
122/* Features supported by this (client software) implementation. */
123
Alex Elder770eba62012-10-25 23:34:40 -0500124#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
Alex Elderd8891402012-10-09 13:50:17 -0700125
Alex Elder81a89792012-02-02 08:13:30 -0600126/*
127 * An RBD device name will be "rbd#", where the "rbd" comes from
128 * RBD_DRV_NAME above, and # is a unique integer identifier.
129 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
130 * enough to hold all possible device names.
131 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700132#define DEV_NAME_LEN 32
Alex Elder81a89792012-02-02 08:13:30 -0600133#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700134
135/*
136 * block device image metadata (in-memory version)
137 */
138struct rbd_image_header {
Alex Elderf35a4de2013-05-06 09:51:29 -0500139 /* These six fields never change for a given rbd image */
Alex Elder849b4262012-07-09 21:04:24 -0500140 char *object_prefix;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700141 __u8 obj_order;
142 __u8 crypt_type;
143 __u8 comp_type;
Alex Elderf35a4de2013-05-06 09:51:29 -0500144 u64 stripe_unit;
145 u64 stripe_count;
146 u64 features; /* Might be changeable someday? */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700147
Alex Elderf84344f2012-08-31 17:29:51 -0500148 /* The remaining fields need to be updated occasionally */
149 u64 image_size;
150 struct ceph_snap_context *snapc;
Alex Elderf35a4de2013-05-06 09:51:29 -0500151 char *snap_names; /* format 1 only */
152 u64 *snap_sizes; /* format 1 only */
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700153};
154
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500155/*
156 * An rbd image specification.
157 *
158 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
Alex Elderc66c6e02012-11-01 08:39:26 -0500159 * identify an image. Each rbd_dev structure includes a pointer to
160 * an rbd_spec structure that encapsulates this identity.
161 *
162 * Each of the id's in an rbd_spec has an associated name. For a
163 * user-mapped image, the names are supplied and the id's associated
164 * with them are looked up. For a layered image, a parent image is
165 * defined by the tuple, and the names are looked up.
166 *
167 * An rbd_dev structure contains a parent_spec pointer which is
168 * non-null if the image it represents is a child in a layered
169 * image. This pointer will refer to the rbd_spec structure used
170 * by the parent rbd_dev for its own identity (i.e., the structure
171 * is shared between the parent and child).
172 *
173 * Since these structures are populated once, during the discovery
174 * phase of image construction, they are effectively immutable so
175 * we make no effort to synchronize access to them.
176 *
177 * Note that code herein does not assume the image name is known (it
178 * could be a null pointer).
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500179 */
180struct rbd_spec {
181 u64 pool_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500182 const char *pool_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500183
Alex Elderecb4dc22013-04-26 09:43:47 -0500184 const char *image_id;
185 const char *image_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500186
187 u64 snap_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500188 const char *snap_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500189
190 struct kref kref;
191};
192
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700193/*
Alex Elderf0f8cef2012-01-29 13:57:44 -0600194 * an instance of the client. multiple devices may share an rbd client.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700195 */
196struct rbd_client {
197 struct ceph_client *client;
198 struct kref kref;
199 struct list_head node;
200};
201
Alex Elderbf0d5f502012-11-22 00:00:08 -0600202struct rbd_img_request;
203typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
204
205#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
206
207struct rbd_obj_request;
208typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
209
Alex Elder9969ebc2013-01-18 12:31:10 -0600210enum obj_request_type {
211 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
212};
Alex Elderbf0d5f502012-11-22 00:00:08 -0600213
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800214enum obj_operation_type {
215 OBJ_OP_WRITE,
216 OBJ_OP_READ,
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800217 OBJ_OP_DISCARD,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800218};
219
Alex Elder926f9b32013-02-11 12:33:24 -0600220enum obj_req_flags {
221 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
Alex Elder6365d332013-02-11 12:33:24 -0600222 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
Alex Elder5679c592013-02-11 12:33:24 -0600223 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
224 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
Alex Elder926f9b32013-02-11 12:33:24 -0600225};
226
Alex Elderbf0d5f502012-11-22 00:00:08 -0600227struct rbd_obj_request {
228 const char *object_name;
229 u64 offset; /* object start byte */
230 u64 length; /* bytes from offset */
Alex Elder926f9b32013-02-11 12:33:24 -0600231 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600232
Alex Elderc5b5ef62013-02-11 12:33:24 -0600233 /*
234 * An object request associated with an image will have its
235 * img_data flag set; a standalone object request will not.
236 *
237 * A standalone object request will have which == BAD_WHICH
238 * and a null obj_request pointer.
239 *
240 * An object request initiated in support of a layered image
241 * object (to check for its existence before a write) will
242 * have which == BAD_WHICH and a non-null obj_request pointer.
243 *
244 * Finally, an object request for rbd image data will have
245 * which != BAD_WHICH, and will have a non-null img_request
246 * pointer. The value of which will be in the range
247 * 0..(img_request->obj_request_count-1).
248 */
249 union {
250 struct rbd_obj_request *obj_request; /* STAT op */
251 struct {
252 struct rbd_img_request *img_request;
253 u64 img_offset;
254 /* links for img_request->obj_requests list */
255 struct list_head links;
256 };
257 };
Alex Elderbf0d5f502012-11-22 00:00:08 -0600258 u32 which; /* posn image request list */
259
260 enum obj_request_type type;
Alex Elder788e2df2013-01-17 12:25:27 -0600261 union {
262 struct bio *bio_list;
263 struct {
264 struct page **pages;
265 u32 page_count;
266 };
267 };
Alex Elder0eefd472013-04-19 15:34:50 -0500268 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500269 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600270
271 struct ceph_osd_request *osd_req;
272
273 u64 xferred; /* bytes transferred */
Sage Weil1b83bef2013-02-25 16:11:12 -0800274 int result;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600275
276 rbd_obj_callback_t callback;
Alex Elder788e2df2013-01-17 12:25:27 -0600277 struct completion completion;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600278
279 struct kref kref;
280};
281
Alex Elder0c425242013-02-08 09:55:49 -0600282enum img_req_flags {
Alex Elder9849e982013-01-24 16:13:36 -0600283 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
284 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
Alex Elderd0b2e942013-01-24 16:13:36 -0600285 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800286 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
Alex Elder0c425242013-02-08 09:55:49 -0600287};
288
Alex Elderbf0d5f502012-11-22 00:00:08 -0600289struct rbd_img_request {
Alex Elderbf0d5f502012-11-22 00:00:08 -0600290 struct rbd_device *rbd_dev;
291 u64 offset; /* starting image byte offset */
292 u64 length; /* byte count from offset */
Alex Elder0c425242013-02-08 09:55:49 -0600293 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600294 union {
Alex Elder9849e982013-01-24 16:13:36 -0600295 u64 snap_id; /* for reads */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600296 struct ceph_snap_context *snapc; /* for writes */
Alex Elder9849e982013-01-24 16:13:36 -0600297 };
298 union {
299 struct request *rq; /* block request */
300 struct rbd_obj_request *obj_request; /* obj req initiator */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600301 };
Alex Elder3d7efd12013-04-19 15:34:50 -0500302 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500303 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600304 spinlock_t completion_lock;/* protects next_completion */
305 u32 next_completion;
306 rbd_img_callback_t callback;
Alex Elder55f27e02013-04-10 12:34:25 -0500307 u64 xferred;/* aggregate bytes transferred */
Alex Eldera5a337d2013-01-24 16:13:36 -0600308 int result; /* first nonzero obj_request result */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600309
310 u32 obj_request_count;
311 struct list_head obj_requests; /* rbd_obj_request structs */
312
313 struct kref kref;
314};
315
316#define for_each_obj_request(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600317 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600318#define for_each_obj_request_from(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600319 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600320#define for_each_obj_request_safe(ireq, oreq, n) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600321 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600322
Alex Elderf84344f2012-08-31 17:29:51 -0500323struct rbd_mapping {
Alex Elder99c1f082012-08-30 14:42:15 -0500324 u64 size;
Alex Elder34b13182012-07-13 20:35:12 -0500325 u64 features;
Alex Elderf84344f2012-08-31 17:29:51 -0500326 bool read_only;
327};
328
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700329/*
330 * a single device
331 */
332struct rbd_device {
Alex Elderde71a292012-07-03 16:01:19 -0500333 int dev_id; /* blkdev unique id */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700334
335 int major; /* blkdev assigned major */
Ilya Dryomovdd82fff2013-12-13 15:28:57 +0200336 int minor;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700337 struct gendisk *disk; /* blkdev's gendisk and rq */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700338
Alex Eldera30b71b2012-07-10 20:30:11 -0500339 u32 image_format; /* Either 1 or 2 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700340 struct rbd_client *rbd_client;
341
342 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
343
Alex Elderb82d1672013-01-14 12:43:31 -0600344 spinlock_t lock; /* queue, flags, open_count */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700345
346 struct rbd_image_header header;
Alex Elderb82d1672013-01-14 12:43:31 -0600347 unsigned long flags; /* possibly lock protected */
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500348 struct rbd_spec *spec;
Ilya Dryomovd1475432015-06-22 13:24:48 +0300349 struct rbd_options *opts;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700350
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500351 char *header_name;
Alex Elder971f8392012-10-25 23:34:41 -0500352
Alex Elder0903e872012-11-14 12:25:19 -0600353 struct ceph_file_layout layout;
354
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700355 struct ceph_osd_event *watch_event;
Alex Elder975241a2013-01-25 17:08:55 -0600356 struct rbd_obj_request *watch_request;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700357
Alex Elder86b00e02012-10-25 23:34:42 -0500358 struct rbd_spec *parent_spec;
359 u64 parent_overlap;
Alex Eldera2acd002013-05-08 22:50:04 -0500360 atomic_t parent_ref;
Alex Elder2f82ee52012-10-30 19:40:33 -0500361 struct rbd_device *parent;
Alex Elder86b00e02012-10-25 23:34:42 -0500362
Christoph Hellwig7ad18af2015-01-13 17:20:04 +0100363 /* Block layer tags. */
364 struct blk_mq_tag_set tag_set;
365
Josh Durginc6666012011-11-21 17:11:12 -0800366 /* protects updating the header */
367 struct rw_semaphore header_rwsem;
Alex Elderf84344f2012-08-31 17:29:51 -0500368
369 struct rbd_mapping mapping;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700370
371 struct list_head node;
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800372
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800373 /* sysfs related */
374 struct device dev;
Alex Elderb82d1672013-01-14 12:43:31 -0600375 unsigned long open_count; /* protected by lock */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800376};
377
Alex Elderb82d1672013-01-14 12:43:31 -0600378/*
379 * Flag bits for rbd_dev->flags. If atomicity is required,
380 * rbd_dev->lock is used to protect access.
381 *
382 * Currently, only the "removing" flag (which is coupled with the
383 * "open_count" field) requires atomic access.
384 */
Alex Elder6d292902013-01-14 12:43:31 -0600385enum rbd_dev_flags {
386 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
Alex Elderb82d1672013-01-14 12:43:31 -0600387 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
Alex Elder6d292902013-01-14 12:43:31 -0600388};
389
Alex Eldercfbf6372013-05-31 17:40:45 -0500390static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
Alex Eldere124a82f2012-01-29 13:57:44 -0600391
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700392static LIST_HEAD(rbd_dev_list); /* devices */
Alex Eldere124a82f2012-01-29 13:57:44 -0600393static DEFINE_SPINLOCK(rbd_dev_list_lock);
394
Alex Elder432b8582012-01-29 13:57:44 -0600395static LIST_HEAD(rbd_client_list); /* clients */
396static DEFINE_SPINLOCK(rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700397
Alex Elder78c2a442013-05-01 12:43:04 -0500398/* Slab caches for frequently-allocated structures */
399
Alex Elder1c2a9df2013-05-01 12:43:03 -0500400static struct kmem_cache *rbd_img_request_cache;
Alex Elder868311b2013-05-01 12:43:03 -0500401static struct kmem_cache *rbd_obj_request_cache;
Alex Elder78c2a442013-05-01 12:43:04 -0500402static struct kmem_cache *rbd_segment_name_cache;
Alex Elder1c2a9df2013-05-01 12:43:03 -0500403
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200404static int rbd_major;
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +0200405static DEFINE_IDA(rbd_dev_id_ida);
406
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +0400407static struct workqueue_struct *rbd_wq;
408
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200409/*
410 * Default to false for now, as single-major requires >= 0.75 version of
411 * userspace rbd utility.
412 */
413static bool single_major = false;
414module_param(single_major, bool, S_IRUGO);
415MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
416
Alex Elder3d7efd12013-04-19 15:34:50 -0500417static int rbd_img_request_submit(struct rbd_img_request *img_request);
418
Alex Elder200a6a82013-04-28 23:32:34 -0500419static void rbd_dev_device_release(struct device *dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800420
Alex Elderf0f8cef2012-01-29 13:57:44 -0600421static ssize_t rbd_add(struct bus_type *bus, const char *buf,
422 size_t count);
423static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
424 size_t count);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200425static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
426 size_t count);
427static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
428 size_t count);
Alex Elder1f3ef782013-05-06 17:40:33 -0500429static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
Alex Eldera2acd002013-05-08 22:50:04 -0500430static void rbd_spec_put(struct rbd_spec *spec);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600431
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200432static int rbd_dev_id_to_minor(int dev_id)
433{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200434 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200435}
436
437static int minor_to_rbd_dev_id(int minor)
438{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200439 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200440}
441
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700442static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
443static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200444static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
445static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700446
447static struct attribute *rbd_bus_attrs[] = {
448 &bus_attr_add.attr,
449 &bus_attr_remove.attr,
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200450 &bus_attr_add_single_major.attr,
451 &bus_attr_remove_single_major.attr,
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700452 NULL,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600453};
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200454
455static umode_t rbd_bus_is_visible(struct kobject *kobj,
456 struct attribute *attr, int index)
457{
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200458 if (!single_major &&
459 (attr == &bus_attr_add_single_major.attr ||
460 attr == &bus_attr_remove_single_major.attr))
461 return 0;
462
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200463 return attr->mode;
464}
465
466static const struct attribute_group rbd_bus_group = {
467 .attrs = rbd_bus_attrs,
468 .is_visible = rbd_bus_is_visible,
469};
470__ATTRIBUTE_GROUPS(rbd_bus);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600471
472static struct bus_type rbd_bus_type = {
473 .name = "rbd",
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700474 .bus_groups = rbd_bus_groups,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600475};
476
477static void rbd_root_dev_release(struct device *dev)
478{
479}
480
481static struct device rbd_root_dev = {
482 .init_name = "rbd",
483 .release = rbd_root_dev_release,
484};
485
Alex Elder06ecc6c2012-11-01 10:17:15 -0500486static __printf(2, 3)
487void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
488{
489 struct va_format vaf;
490 va_list args;
491
492 va_start(args, fmt);
493 vaf.fmt = fmt;
494 vaf.va = &args;
495
496 if (!rbd_dev)
497 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
498 else if (rbd_dev->disk)
499 printk(KERN_WARNING "%s: %s: %pV\n",
500 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
501 else if (rbd_dev->spec && rbd_dev->spec->image_name)
502 printk(KERN_WARNING "%s: image %s: %pV\n",
503 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
504 else if (rbd_dev->spec && rbd_dev->spec->image_id)
505 printk(KERN_WARNING "%s: id %s: %pV\n",
506 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
507 else /* punt */
508 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
509 RBD_DRV_NAME, rbd_dev, &vaf);
510 va_end(args);
511}
512
Alex Elderaafb2302012-09-06 16:00:54 -0500513#ifdef RBD_DEBUG
514#define rbd_assert(expr) \
515 if (unlikely(!(expr))) { \
516 printk(KERN_ERR "\nAssertion failure in %s() " \
517 "at line %d:\n\n" \
518 "\trbd_assert(%s);\n\n", \
519 __func__, __LINE__, #expr); \
520 BUG(); \
521 }
522#else /* !RBD_DEBUG */
523# define rbd_assert(expr) ((void) 0)
524#endif /* !RBD_DEBUG */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800525
Ilya Dryomov27617132015-07-16 17:36:11 +0300526static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
Alex Elderb454e362013-04-19 15:34:50 -0500527static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
Alex Elder05a46af2013-04-26 15:44:36 -0500528static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
529static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
Alex Elder8b3e1a52013-01-24 16:13:36 -0600530
Alex Eldercc4a38bd2013-04-30 00:44:33 -0500531static int rbd_dev_refresh(struct rbd_device *rbd_dev);
Alex Elder2df3fac2013-05-06 09:51:30 -0500532static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
Ilya Dryomova720ae02014-07-23 17:11:19 +0400533static int rbd_dev_header_info(struct rbd_device *rbd_dev);
Ilya Dryomove8f59b52014-07-24 10:42:13 +0400534static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
Alex Elder54cac612013-04-30 00:44:33 -0500535static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
536 u64 snap_id);
Alex Elder2ad3d712013-04-30 00:44:33 -0500537static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
538 u8 *order, u64 *snap_size);
539static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
540 u64 *snap_features);
541static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700542
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700543static int rbd_open(struct block_device *bdev, fmode_t mode)
544{
Alex Elderf0f8cef2012-01-29 13:57:44 -0600545 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600546 bool removing = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700547
Alex Elderf84344f2012-08-31 17:29:51 -0500548 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700549 return -EROFS;
550
Alex Eldera14ea262013-02-05 13:23:12 -0600551 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600552 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
553 removing = true;
554 else
555 rbd_dev->open_count++;
Alex Eldera14ea262013-02-05 13:23:12 -0600556 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600557 if (removing)
558 return -ENOENT;
559
Alex Elderc3e946c2012-11-16 09:29:16 -0600560 (void) get_device(&rbd_dev->dev);
Alex Elder340c7a22012-08-10 13:12:07 -0700561
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700562 return 0;
563}
564
Al Virodb2a1442013-05-05 21:52:57 -0400565static void rbd_release(struct gendisk *disk, fmode_t mode)
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800566{
567 struct rbd_device *rbd_dev = disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600568 unsigned long open_count_before;
569
Alex Eldera14ea262013-02-05 13:23:12 -0600570 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600571 open_count_before = rbd_dev->open_count--;
Alex Eldera14ea262013-02-05 13:23:12 -0600572 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600573 rbd_assert(open_count_before > 0);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800574
Alex Elderc3e946c2012-11-16 09:29:16 -0600575 put_device(&rbd_dev->dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800576}
577
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800578static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
579{
Josh Durgin77f33c02013-09-30 17:09:54 -0700580 int ret = 0;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800581 int val;
582 bool ro;
Josh Durgin77f33c02013-09-30 17:09:54 -0700583 bool ro_changed = false;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800584
Josh Durgin77f33c02013-09-30 17:09:54 -0700585 /* get_user() may sleep, so call it before taking rbd_dev->lock */
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800586 if (get_user(val, (int __user *)(arg)))
587 return -EFAULT;
588
589 ro = val ? true : false;
590 /* Snapshot doesn't allow to write*/
591 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
592 return -EROFS;
593
Josh Durgin77f33c02013-09-30 17:09:54 -0700594 spin_lock_irq(&rbd_dev->lock);
595 /* prevent others open this device */
596 if (rbd_dev->open_count > 1) {
597 ret = -EBUSY;
598 goto out;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800599 }
600
Josh Durgin77f33c02013-09-30 17:09:54 -0700601 if (rbd_dev->mapping.read_only != ro) {
602 rbd_dev->mapping.read_only = ro;
603 ro_changed = true;
604 }
605
606out:
607 spin_unlock_irq(&rbd_dev->lock);
608 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
609 if (ret == 0 && ro_changed)
610 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
611
612 return ret;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800613}
614
615static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
616 unsigned int cmd, unsigned long arg)
617{
618 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
619 int ret = 0;
620
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800621 switch (cmd) {
622 case BLKROSET:
623 ret = rbd_ioctl_set_ro(rbd_dev, arg);
624 break;
625 default:
626 ret = -ENOTTY;
627 }
628
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800629 return ret;
630}
631
632#ifdef CONFIG_COMPAT
633static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
634 unsigned int cmd, unsigned long arg)
635{
636 return rbd_ioctl(bdev, mode, cmd, arg);
637}
638#endif /* CONFIG_COMPAT */
639
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700640static const struct block_device_operations rbd_bd_ops = {
641 .owner = THIS_MODULE,
642 .open = rbd_open,
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800643 .release = rbd_release,
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800644 .ioctl = rbd_ioctl,
645#ifdef CONFIG_COMPAT
646 .compat_ioctl = rbd_compat_ioctl,
647#endif
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700648};
649
650/*
Alex Elder7262cfc2013-05-16 15:04:20 -0500651 * Initialize an rbd client instance. Success or not, this function
Alex Eldercfbf6372013-05-31 17:40:45 -0500652 * consumes ceph_opts. Caller holds client_mutex.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700653 */
Alex Elderf8c38922012-08-10 13:12:07 -0700654static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700655{
656 struct rbd_client *rbdc;
657 int ret = -ENOMEM;
658
Alex Elder37206ee2013-02-20 17:32:08 -0600659 dout("%s:\n", __func__);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700660 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
661 if (!rbdc)
662 goto out_opt;
663
664 kref_init(&rbdc->kref);
665 INIT_LIST_HEAD(&rbdc->node);
666
Alex Elder43ae4702012-07-03 16:01:18 -0500667 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700668 if (IS_ERR(rbdc->client))
Alex Elder08f75462013-05-29 11:19:00 -0500669 goto out_rbdc;
Alex Elder43ae4702012-07-03 16:01:18 -0500670 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700671
672 ret = ceph_open_session(rbdc->client);
673 if (ret < 0)
Alex Elder08f75462013-05-29 11:19:00 -0500674 goto out_client;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700675
Alex Elder432b8582012-01-29 13:57:44 -0600676 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700677 list_add_tail(&rbdc->node, &rbd_client_list);
Alex Elder432b8582012-01-29 13:57:44 -0600678 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700679
Alex Elder37206ee2013-02-20 17:32:08 -0600680 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Elderbc534d82012-01-29 13:57:44 -0600681
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700682 return rbdc;
Alex Elder08f75462013-05-29 11:19:00 -0500683out_client:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700684 ceph_destroy_client(rbdc->client);
Alex Elder08f75462013-05-29 11:19:00 -0500685out_rbdc:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700686 kfree(rbdc);
687out_opt:
Alex Elder43ae4702012-07-03 16:01:18 -0500688 if (ceph_opts)
689 ceph_destroy_options(ceph_opts);
Alex Elder37206ee2013-02-20 17:32:08 -0600690 dout("%s: error %d\n", __func__, ret);
691
Vasiliy Kulikov28f259b2010-09-26 12:59:37 +0400692 return ERR_PTR(ret);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700693}
694
Alex Elder2f82ee52012-10-30 19:40:33 -0500695static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
696{
697 kref_get(&rbdc->kref);
698
699 return rbdc;
700}
701
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700702/*
Alex Elder1f7ba332012-08-10 13:12:07 -0700703 * Find a ceph client with specific addr and configuration. If
704 * found, bump its reference count.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700705 */
Alex Elder1f7ba332012-08-10 13:12:07 -0700706static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700707{
708 struct rbd_client *client_node;
Alex Elder1f7ba332012-08-10 13:12:07 -0700709 bool found = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700710
Alex Elder43ae4702012-07-03 16:01:18 -0500711 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700712 return NULL;
713
Alex Elder1f7ba332012-08-10 13:12:07 -0700714 spin_lock(&rbd_client_list_lock);
715 list_for_each_entry(client_node, &rbd_client_list, node) {
716 if (!ceph_compare_options(ceph_opts, client_node->client)) {
Alex Elder2f82ee52012-10-30 19:40:33 -0500717 __rbd_get_client(client_node);
718
Alex Elder1f7ba332012-08-10 13:12:07 -0700719 found = true;
720 break;
721 }
722 }
723 spin_unlock(&rbd_client_list_lock);
724
725 return found ? client_node : NULL;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700726}
727
728/*
Ilya Dryomov210c1042015-06-22 13:24:48 +0300729 * (Per device) rbd map options
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700730 */
731enum {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300732 Opt_queue_depth,
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700733 Opt_last_int,
734 /* int args above */
735 Opt_last_string,
736 /* string args above */
Alex Eldercc0538b2012-08-10 13:12:07 -0700737 Opt_read_only,
738 Opt_read_write,
Ilya Dryomov210c1042015-06-22 13:24:48 +0300739 Opt_err
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700740};
741
Alex Elder43ae4702012-07-03 16:01:18 -0500742static match_table_t rbd_opts_tokens = {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300743 {Opt_queue_depth, "queue_depth=%d"},
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700744 /* int args above */
745 /* string args above */
Alex Elderbe466c12012-10-22 11:31:26 -0500746 {Opt_read_only, "read_only"},
Alex Eldercc0538b2012-08-10 13:12:07 -0700747 {Opt_read_only, "ro"}, /* Alternate spelling */
748 {Opt_read_write, "read_write"},
749 {Opt_read_write, "rw"}, /* Alternate spelling */
Ilya Dryomov210c1042015-06-22 13:24:48 +0300750 {Opt_err, NULL}
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700751};
752
Alex Elder98571b52013-01-20 14:44:42 -0600753struct rbd_options {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300754 int queue_depth;
Alex Elder98571b52013-01-20 14:44:42 -0600755 bool read_only;
756};
757
Ilya Dryomovb5584182015-06-23 16:21:19 +0300758#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
Alex Elder98571b52013-01-20 14:44:42 -0600759#define RBD_READ_ONLY_DEFAULT false
760
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700761static int parse_rbd_opts_token(char *c, void *private)
762{
Alex Elder43ae4702012-07-03 16:01:18 -0500763 struct rbd_options *rbd_opts = private;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700764 substring_t argstr[MAX_OPT_ARGS];
765 int token, intval, ret;
766
Alex Elder43ae4702012-07-03 16:01:18 -0500767 token = match_token(c, rbd_opts_tokens, argstr);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700768 if (token < Opt_last_int) {
769 ret = match_int(&argstr[0], &intval);
770 if (ret < 0) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300771 pr_err("bad mount option arg (not int) at '%s'\n", c);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700772 return ret;
773 }
774 dout("got int token %d val %d\n", token, intval);
775 } else if (token > Opt_last_int && token < Opt_last_string) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300776 dout("got string token %d val %s\n", token, argstr[0].from);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700777 } else {
778 dout("got token %d\n", token);
779 }
780
781 switch (token) {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300782 case Opt_queue_depth:
783 if (intval < 1) {
784 pr_err("queue_depth out of range\n");
785 return -EINVAL;
786 }
787 rbd_opts->queue_depth = intval;
788 break;
Alex Eldercc0538b2012-08-10 13:12:07 -0700789 case Opt_read_only:
790 rbd_opts->read_only = true;
791 break;
792 case Opt_read_write:
793 rbd_opts->read_only = false;
794 break;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700795 default:
Ilya Dryomov210c1042015-06-22 13:24:48 +0300796 /* libceph prints "bad option" msg */
797 return -EINVAL;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700798 }
Ilya Dryomov210c1042015-06-22 13:24:48 +0300799
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700800 return 0;
801}
802
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800803static char* obj_op_name(enum obj_operation_type op_type)
804{
805 switch (op_type) {
806 case OBJ_OP_READ:
807 return "read";
808 case OBJ_OP_WRITE:
809 return "write";
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800810 case OBJ_OP_DISCARD:
811 return "discard";
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800812 default:
813 return "???";
814 }
815}
816
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700817/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700818 * Get a ceph client with specific addr and configuration, if one does
Alex Elder7262cfc2013-05-16 15:04:20 -0500819 * not exist create it. Either way, ceph_opts is consumed by this
820 * function.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700821 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500822static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700823{
Alex Elderf8c38922012-08-10 13:12:07 -0700824 struct rbd_client *rbdc;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700825
Alex Eldercfbf6372013-05-31 17:40:45 -0500826 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
Alex Elder1f7ba332012-08-10 13:12:07 -0700827 rbdc = rbd_client_find(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500828 if (rbdc) /* using an existing client */
Alex Elder43ae4702012-07-03 16:01:18 -0500829 ceph_destroy_options(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500830 else
Alex Elderf8c38922012-08-10 13:12:07 -0700831 rbdc = rbd_client_create(ceph_opts);
Alex Eldercfbf6372013-05-31 17:40:45 -0500832 mutex_unlock(&client_mutex);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700833
Alex Elder9d3997f2012-10-25 23:34:42 -0500834 return rbdc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700835}
836
837/*
838 * Destroy ceph client
Alex Elderd23a4b32012-01-29 13:57:43 -0600839 *
Alex Elder432b8582012-01-29 13:57:44 -0600840 * Caller must hold rbd_client_list_lock.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700841 */
842static void rbd_client_release(struct kref *kref)
843{
844 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
845
Alex Elder37206ee2013-02-20 17:32:08 -0600846 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500847 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700848 list_del(&rbdc->node);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500849 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700850
851 ceph_destroy_client(rbdc->client);
852 kfree(rbdc);
853}
854
855/*
856 * Drop reference to ceph client node. If it's not referenced anymore, release
857 * it.
858 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500859static void rbd_put_client(struct rbd_client *rbdc)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700860{
Alex Elderc53d5892012-10-25 23:34:42 -0500861 if (rbdc)
862 kref_put(&rbdc->kref, rbd_client_release);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700863}
864
Alex Eldera30b71b2012-07-10 20:30:11 -0500865static bool rbd_image_format_valid(u32 image_format)
866{
867 return image_format == 1 || image_format == 2;
868}
869
Alex Elder8e94af82012-07-25 09:32:40 -0500870static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
871{
Alex Elder103a1502012-08-02 11:29:45 -0500872 size_t size;
873 u32 snap_count;
874
875 /* The header has to start with the magic rbd header text */
876 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
877 return false;
878
Alex Elderdb2388b2012-10-20 22:17:27 -0500879 /* The bio layer requires at least sector-sized I/O */
880
881 if (ondisk->options.order < SECTOR_SHIFT)
882 return false;
883
884 /* If we use u64 in a few spots we may be able to loosen this */
885
886 if (ondisk->options.order > 8 * sizeof (int) - 1)
887 return false;
888
Alex Elder103a1502012-08-02 11:29:45 -0500889 /*
890 * The size of a snapshot header has to fit in a size_t, and
891 * that limits the number of snapshots.
892 */
893 snap_count = le32_to_cpu(ondisk->snap_count);
894 size = SIZE_MAX - sizeof (struct ceph_snap_context);
895 if (snap_count > size / sizeof (__le64))
896 return false;
897
898 /*
899 * Not only that, but the size of the entire the snapshot
900 * header must also be representable in a size_t.
901 */
902 size -= snap_count * sizeof (__le64);
903 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
904 return false;
905
906 return true;
Alex Elder8e94af82012-07-25 09:32:40 -0500907}
908
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700909/*
Alex Elderbb23e372013-05-06 09:51:29 -0500910 * Fill an rbd image header with information from the given format 1
911 * on-disk header.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700912 */
Alex Elder662518b2013-05-06 09:51:29 -0500913static int rbd_header_from_disk(struct rbd_device *rbd_dev,
Alex Elder4156d992012-08-02 11:29:46 -0500914 struct rbd_image_header_ondisk *ondisk)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700915{
Alex Elder662518b2013-05-06 09:51:29 -0500916 struct rbd_image_header *header = &rbd_dev->header;
Alex Elderbb23e372013-05-06 09:51:29 -0500917 bool first_time = header->object_prefix == NULL;
918 struct ceph_snap_context *snapc;
919 char *object_prefix = NULL;
920 char *snap_names = NULL;
921 u64 *snap_sizes = NULL;
Alex Elderccece232012-07-10 20:30:10 -0500922 u32 snap_count;
Alex Elderd2bb24e2012-07-26 23:37:14 -0500923 size_t size;
Alex Elderbb23e372013-05-06 09:51:29 -0500924 int ret = -ENOMEM;
Alex Elder621901d2012-08-23 23:22:06 -0500925 u32 i;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700926
Alex Elderbb23e372013-05-06 09:51:29 -0500927 /* Allocate this now to avoid having to handle failure below */
928
929 if (first_time) {
930 size_t len;
931
932 len = strnlen(ondisk->object_prefix,
933 sizeof (ondisk->object_prefix));
934 object_prefix = kmalloc(len + 1, GFP_KERNEL);
935 if (!object_prefix)
936 return -ENOMEM;
937 memcpy(object_prefix, ondisk->object_prefix, len);
938 object_prefix[len] = '\0';
939 }
940
941 /* Allocate the snapshot context and fill it in */
Alex Elder6a523252012-07-19 17:12:59 -0500942
Alex Elder103a1502012-08-02 11:29:45 -0500943 snap_count = le32_to_cpu(ondisk->snap_count);
Alex Elderbb23e372013-05-06 09:51:29 -0500944 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
945 if (!snapc)
946 goto out_err;
947 snapc->seq = le64_to_cpu(ondisk->snap_seq);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700948 if (snap_count) {
Alex Elderbb23e372013-05-06 09:51:29 -0500949 struct rbd_image_snap_ondisk *snaps;
Alex Elderf785cc12012-08-23 23:22:06 -0500950 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
951
Alex Elderbb23e372013-05-06 09:51:29 -0500952 /* We'll keep a copy of the snapshot names... */
Alex Elder621901d2012-08-23 23:22:06 -0500953
Alex Elderbb23e372013-05-06 09:51:29 -0500954 if (snap_names_len > (u64)SIZE_MAX)
955 goto out_2big;
956 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
957 if (!snap_names)
Alex Elder6a523252012-07-19 17:12:59 -0500958 goto out_err;
Alex Elderbb23e372013-05-06 09:51:29 -0500959
960 /* ...as well as the array of their sizes. */
961
962 size = snap_count * sizeof (*header->snap_sizes);
963 snap_sizes = kmalloc(size, GFP_KERNEL);
964 if (!snap_sizes)
965 goto out_err;
966
Alex Elderf785cc12012-08-23 23:22:06 -0500967 /*
Alex Elderbb23e372013-05-06 09:51:29 -0500968 * Copy the names, and fill in each snapshot's id
969 * and size.
970 *
Alex Elder99a41eb2013-05-06 09:51:30 -0500971 * Note that rbd_dev_v1_header_info() guarantees the
Alex Elderbb23e372013-05-06 09:51:29 -0500972 * ondisk buffer we're working with has
Alex Elderf785cc12012-08-23 23:22:06 -0500973 * snap_names_len bytes beyond the end of the
974 * snapshot id array, this memcpy() is safe.
975 */
Alex Elderbb23e372013-05-06 09:51:29 -0500976 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
977 snaps = ondisk->snaps;
978 for (i = 0; i < snap_count; i++) {
979 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
980 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
981 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700982 }
Alex Elder849b4262012-07-09 21:04:24 -0500983
Alex Elderbb23e372013-05-06 09:51:29 -0500984 /* We won't fail any more, fill in the header */
Alex Elder6a523252012-07-19 17:12:59 -0500985
Alex Elderbb23e372013-05-06 09:51:29 -0500986 if (first_time) {
987 header->object_prefix = object_prefix;
988 header->obj_order = ondisk->options.order;
989 header->crypt_type = ondisk->options.crypt_type;
990 header->comp_type = ondisk->options.comp_type;
991 /* The rest aren't used for format 1 images */
992 header->stripe_unit = 0;
993 header->stripe_count = 0;
994 header->features = 0;
Alex Elder662518b2013-05-06 09:51:29 -0500995 } else {
996 ceph_put_snap_context(header->snapc);
997 kfree(header->snap_names);
998 kfree(header->snap_sizes);
Alex Elderbb23e372013-05-06 09:51:29 -0500999 }
1000
1001 /* The remaining fields always get updated (when we refresh) */
Alex Elder621901d2012-08-23 23:22:06 -05001002
Alex Elderf84344f2012-08-31 17:29:51 -05001003 header->image_size = le64_to_cpu(ondisk->image_size);
Alex Elderbb23e372013-05-06 09:51:29 -05001004 header->snapc = snapc;
1005 header->snap_names = snap_names;
1006 header->snap_sizes = snap_sizes;
Alex Elder468521c2013-04-26 09:43:47 -05001007
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001008 return 0;
Alex Elderbb23e372013-05-06 09:51:29 -05001009out_2big:
1010 ret = -EIO;
Alex Elder6a523252012-07-19 17:12:59 -05001011out_err:
Alex Elderbb23e372013-05-06 09:51:29 -05001012 kfree(snap_sizes);
1013 kfree(snap_names);
1014 ceph_put_snap_context(snapc);
1015 kfree(object_prefix);
Alex Elderccece232012-07-10 20:30:10 -05001016
Alex Elderbb23e372013-05-06 09:51:29 -05001017 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001018}
1019
Alex Elder9682fc62013-04-30 00:44:33 -05001020static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1021{
1022 const char *snap_name;
1023
1024 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1025
1026 /* Skip over names until we find the one we are looking for */
1027
1028 snap_name = rbd_dev->header.snap_names;
1029 while (which--)
1030 snap_name += strlen(snap_name) + 1;
1031
1032 return kstrdup(snap_name, GFP_KERNEL);
1033}
1034
Alex Elder30d1cff2013-05-01 12:43:03 -05001035/*
1036 * Snapshot id comparison function for use with qsort()/bsearch().
1037 * Note that result is for snapshots in *descending* order.
1038 */
1039static int snapid_compare_reverse(const void *s1, const void *s2)
1040{
1041 u64 snap_id1 = *(u64 *)s1;
1042 u64 snap_id2 = *(u64 *)s2;
1043
1044 if (snap_id1 < snap_id2)
1045 return 1;
1046 return snap_id1 == snap_id2 ? 0 : -1;
1047}
1048
1049/*
1050 * Search a snapshot context to see if the given snapshot id is
1051 * present.
1052 *
1053 * Returns the position of the snapshot id in the array if it's found,
1054 * or BAD_SNAP_INDEX otherwise.
1055 *
1056 * Note: The snapshot array is in kept sorted (by the osd) in
1057 * reverse order, highest snapshot id first.
1058 */
Alex Elder9682fc62013-04-30 00:44:33 -05001059static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1060{
1061 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
Alex Elder30d1cff2013-05-01 12:43:03 -05001062 u64 *found;
Alex Elder9682fc62013-04-30 00:44:33 -05001063
Alex Elder30d1cff2013-05-01 12:43:03 -05001064 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1065 sizeof (snap_id), snapid_compare_reverse);
Alex Elder9682fc62013-04-30 00:44:33 -05001066
Alex Elder30d1cff2013-05-01 12:43:03 -05001067 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
Alex Elder9682fc62013-04-30 00:44:33 -05001068}
1069
Alex Elder2ad3d712013-04-30 00:44:33 -05001070static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1071 u64 snap_id)
Alex Elder54cac612013-04-30 00:44:33 -05001072{
1073 u32 which;
Josh Durginda6a6b62013-09-04 17:57:31 -07001074 const char *snap_name;
Alex Elder54cac612013-04-30 00:44:33 -05001075
1076 which = rbd_dev_snap_index(rbd_dev, snap_id);
1077 if (which == BAD_SNAP_INDEX)
Josh Durginda6a6b62013-09-04 17:57:31 -07001078 return ERR_PTR(-ENOENT);
Alex Elder54cac612013-04-30 00:44:33 -05001079
Josh Durginda6a6b62013-09-04 17:57:31 -07001080 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1081 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
Alex Elder54cac612013-04-30 00:44:33 -05001082}
1083
Alex Elder9e15b772012-10-30 19:40:33 -05001084static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1085{
Alex Elder9e15b772012-10-30 19:40:33 -05001086 if (snap_id == CEPH_NOSNAP)
1087 return RBD_SNAP_HEAD_NAME;
1088
Alex Elder54cac612013-04-30 00:44:33 -05001089 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1090 if (rbd_dev->image_format == 1)
1091 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001092
Alex Elder54cac612013-04-30 00:44:33 -05001093 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001094}
1095
Alex Elder2ad3d712013-04-30 00:44:33 -05001096static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1097 u64 *snap_size)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001098{
Alex Elder2ad3d712013-04-30 00:44:33 -05001099 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1100 if (snap_id == CEPH_NOSNAP) {
1101 *snap_size = rbd_dev->header.image_size;
1102 } else if (rbd_dev->image_format == 1) {
1103 u32 which;
Alex Elder00f1f362012-02-07 12:03:36 -06001104
Alex Elder2ad3d712013-04-30 00:44:33 -05001105 which = rbd_dev_snap_index(rbd_dev, snap_id);
1106 if (which == BAD_SNAP_INDEX)
1107 return -ENOENT;
Alex Elder00f1f362012-02-07 12:03:36 -06001108
Alex Elder2ad3d712013-04-30 00:44:33 -05001109 *snap_size = rbd_dev->header.snap_sizes[which];
1110 } else {
1111 u64 size = 0;
1112 int ret;
1113
1114 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1115 if (ret)
1116 return ret;
1117
1118 *snap_size = size;
1119 }
1120 return 0;
1121}
1122
1123static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1124 u64 *snap_features)
1125{
1126 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1127 if (snap_id == CEPH_NOSNAP) {
1128 *snap_features = rbd_dev->header.features;
1129 } else if (rbd_dev->image_format == 1) {
1130 *snap_features = 0; /* No features for format 1 */
1131 } else {
1132 u64 features = 0;
1133 int ret;
1134
1135 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1136 if (ret)
1137 return ret;
1138
1139 *snap_features = features;
1140 }
1141 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001142}
1143
Alex Elderd1cf5782013-04-27 09:59:30 -05001144static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001145{
Alex Elder8f4b7d92013-05-06 07:40:30 -05001146 u64 snap_id = rbd_dev->spec->snap_id;
Alex Elder2ad3d712013-04-30 00:44:33 -05001147 u64 size = 0;
1148 u64 features = 0;
1149 int ret;
Alex Elder8b0241f2013-04-25 23:15:08 -05001150
Alex Elder2ad3d712013-04-30 00:44:33 -05001151 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1152 if (ret)
1153 return ret;
1154 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1155 if (ret)
1156 return ret;
1157
1158 rbd_dev->mapping.size = size;
1159 rbd_dev->mapping.features = features;
1160
Alex Elder8b0241f2013-04-25 23:15:08 -05001161 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001162}
1163
Alex Elderd1cf5782013-04-27 09:59:30 -05001164static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1165{
1166 rbd_dev->mapping.size = 0;
1167 rbd_dev->mapping.features = 0;
Alex Elder200a6a82013-04-28 23:32:34 -05001168}
1169
Himangi Saraogi7d5079a2014-07-24 03:17:07 +05301170static void rbd_segment_name_free(const char *name)
1171{
1172 /* The explicit cast here is needed to drop the const qualifier */
1173
1174 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1175}
1176
Alex Elder98571b52013-01-20 14:44:42 -06001177static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001178{
Alex Elder65ccfe22012-08-09 10:33:26 -07001179 char *name;
1180 u64 segment;
1181 int ret;
Josh Durgin3a96d5c2013-06-12 19:15:06 -07001182 char *name_format;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001183
Alex Elder78c2a442013-05-01 12:43:04 -05001184 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
Alex Elder65ccfe22012-08-09 10:33:26 -07001185 if (!name)
1186 return NULL;
1187 segment = offset >> rbd_dev->header.obj_order;
Josh Durgin3a96d5c2013-06-12 19:15:06 -07001188 name_format = "%s.%012llx";
1189 if (rbd_dev->image_format == 2)
1190 name_format = "%s.%016llx";
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02001191 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
Alex Elder65ccfe22012-08-09 10:33:26 -07001192 rbd_dev->header.object_prefix, segment);
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02001193 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
Alex Elder65ccfe22012-08-09 10:33:26 -07001194 pr_err("error formatting segment name for #%llu (%d)\n",
1195 segment, ret);
Himangi Saraogi7d5079a2014-07-24 03:17:07 +05301196 rbd_segment_name_free(name);
Alex Elder65ccfe22012-08-09 10:33:26 -07001197 name = NULL;
1198 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001199
Alex Elder65ccfe22012-08-09 10:33:26 -07001200 return name;
1201}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001202
Alex Elder65ccfe22012-08-09 10:33:26 -07001203static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1204{
1205 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001206
Alex Elder65ccfe22012-08-09 10:33:26 -07001207 return offset & (segment_size - 1);
1208}
1209
1210static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1211 u64 offset, u64 length)
1212{
1213 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1214
1215 offset &= segment_size - 1;
1216
Alex Elderaafb2302012-09-06 16:00:54 -05001217 rbd_assert(length <= U64_MAX - offset);
Alex Elder65ccfe22012-08-09 10:33:26 -07001218 if (offset + length > segment_size)
1219 length = segment_size - offset;
1220
1221 return length;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001222}
1223
1224/*
Josh Durgin029bcbd2011-07-22 11:35:23 -07001225 * returns the size of an object in the image
1226 */
1227static u64 rbd_obj_bytes(struct rbd_image_header *header)
1228{
1229 return 1 << header->obj_order;
1230}
1231
1232/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001233 * bio helpers
1234 */
1235
1236static void bio_chain_put(struct bio *chain)
1237{
1238 struct bio *tmp;
1239
1240 while (chain) {
1241 tmp = chain;
1242 chain = chain->bi_next;
1243 bio_put(tmp);
1244 }
1245}
1246
1247/*
1248 * zeros a bio chain, starting at specific offset
1249 */
1250static void zero_bio_chain(struct bio *chain, int start_ofs)
1251{
Kent Overstreet79886132013-11-23 17:19:00 -08001252 struct bio_vec bv;
1253 struct bvec_iter iter;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001254 unsigned long flags;
1255 void *buf;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001256 int pos = 0;
1257
1258 while (chain) {
Kent Overstreet79886132013-11-23 17:19:00 -08001259 bio_for_each_segment(bv, chain, iter) {
1260 if (pos + bv.bv_len > start_ofs) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001261 int remainder = max(start_ofs - pos, 0);
Kent Overstreet79886132013-11-23 17:19:00 -08001262 buf = bvec_kmap_irq(&bv, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001263 memset(buf + remainder, 0,
Kent Overstreet79886132013-11-23 17:19:00 -08001264 bv.bv_len - remainder);
1265 flush_dcache_page(bv.bv_page);
Dan Carpenter85b5aaa2010-10-11 21:15:11 +02001266 bvec_kunmap_irq(buf, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001267 }
Kent Overstreet79886132013-11-23 17:19:00 -08001268 pos += bv.bv_len;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001269 }
1270
1271 chain = chain->bi_next;
1272 }
1273}
1274
1275/*
Alex Elderb9434c52013-04-19 15:34:50 -05001276 * similar to zero_bio_chain(), zeros data defined by a page array,
1277 * starting at the given byte offset from the start of the array and
1278 * continuing up to the given end offset. The pages array is
1279 * assumed to be big enough to hold all bytes up to the end.
1280 */
1281static void zero_pages(struct page **pages, u64 offset, u64 end)
1282{
1283 struct page **page = &pages[offset >> PAGE_SHIFT];
1284
1285 rbd_assert(end > offset);
1286 rbd_assert(end - offset <= (u64)SIZE_MAX);
1287 while (offset < end) {
1288 size_t page_offset;
1289 size_t length;
1290 unsigned long flags;
1291 void *kaddr;
1292
Geert Uytterhoeven491205a2013-05-13 20:35:37 -05001293 page_offset = offset & ~PAGE_MASK;
1294 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
Alex Elderb9434c52013-04-19 15:34:50 -05001295 local_irq_save(flags);
1296 kaddr = kmap_atomic(*page);
1297 memset(kaddr + page_offset, 0, length);
Alex Eldere2156052013-05-22 20:54:25 -05001298 flush_dcache_page(*page);
Alex Elderb9434c52013-04-19 15:34:50 -05001299 kunmap_atomic(kaddr);
1300 local_irq_restore(flags);
1301
1302 offset += length;
1303 page++;
1304 }
1305}
1306
1307/*
Alex Elderf7760da2012-10-20 22:17:27 -05001308 * Clone a portion of a bio, starting at the given byte offset
1309 * and continuing for the number of bytes indicated.
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001310 */
Alex Elderf7760da2012-10-20 22:17:27 -05001311static struct bio *bio_clone_range(struct bio *bio_src,
1312 unsigned int offset,
1313 unsigned int len,
1314 gfp_t gfpmask)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001315{
Alex Elderf7760da2012-10-20 22:17:27 -05001316 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001317
Kent Overstreet5341a6272013-08-07 14:31:11 -07001318 bio = bio_clone(bio_src, gfpmask);
Alex Elderf7760da2012-10-20 22:17:27 -05001319 if (!bio)
1320 return NULL; /* ENOMEM */
1321
Kent Overstreet5341a6272013-08-07 14:31:11 -07001322 bio_advance(bio, offset);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001323 bio->bi_iter.bi_size = len;
Alex Elder542582f2012-08-09 10:33:25 -07001324
Alex Elderf7760da2012-10-20 22:17:27 -05001325 return bio;
1326}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001327
Alex Elderf7760da2012-10-20 22:17:27 -05001328/*
1329 * Clone a portion of a bio chain, starting at the given byte offset
1330 * into the first bio in the source chain and continuing for the
1331 * number of bytes indicated. The result is another bio chain of
1332 * exactly the given length, or a null pointer on error.
1333 *
1334 * The bio_src and offset parameters are both in-out. On entry they
1335 * refer to the first source bio and the offset into that bio where
1336 * the start of data to be cloned is located.
1337 *
1338 * On return, bio_src is updated to refer to the bio in the source
1339 * chain that contains first un-cloned byte, and *offset will
1340 * contain the offset of that byte within that bio.
1341 */
1342static struct bio *bio_chain_clone_range(struct bio **bio_src,
1343 unsigned int *offset,
1344 unsigned int len,
1345 gfp_t gfpmask)
1346{
1347 struct bio *bi = *bio_src;
1348 unsigned int off = *offset;
1349 struct bio *chain = NULL;
1350 struct bio **end;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001351
Alex Elderf7760da2012-10-20 22:17:27 -05001352 /* Build up a chain of clone bios up to the limit */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001353
Kent Overstreet4f024f32013-10-11 15:44:27 -07001354 if (!bi || off >= bi->bi_iter.bi_size || !len)
Alex Elderf7760da2012-10-20 22:17:27 -05001355 return NULL; /* Nothing to clone */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001356
Alex Elderf7760da2012-10-20 22:17:27 -05001357 end = &chain;
1358 while (len) {
1359 unsigned int bi_size;
1360 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001361
Alex Elderf5400b72012-11-01 10:17:15 -05001362 if (!bi) {
1363 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
Alex Elderf7760da2012-10-20 22:17:27 -05001364 goto out_err; /* EINVAL; ran out of bio's */
Alex Elderf5400b72012-11-01 10:17:15 -05001365 }
Kent Overstreet4f024f32013-10-11 15:44:27 -07001366 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
Alex Elderf7760da2012-10-20 22:17:27 -05001367 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1368 if (!bio)
1369 goto out_err; /* ENOMEM */
1370
1371 *end = bio;
1372 end = &bio->bi_next;
1373
1374 off += bi_size;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001375 if (off == bi->bi_iter.bi_size) {
Alex Elderf7760da2012-10-20 22:17:27 -05001376 bi = bi->bi_next;
1377 off = 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001378 }
Alex Elderf7760da2012-10-20 22:17:27 -05001379 len -= bi_size;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001380 }
Alex Elderf7760da2012-10-20 22:17:27 -05001381 *bio_src = bi;
1382 *offset = off;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001383
Alex Elderf7760da2012-10-20 22:17:27 -05001384 return chain;
1385out_err:
1386 bio_chain_put(chain);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001387
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001388 return NULL;
1389}
1390
Alex Elder926f9b32013-02-11 12:33:24 -06001391/*
1392 * The default/initial value for all object request flags is 0. For
1393 * each flag, once its value is set to 1 it is never reset to 0
1394 * again.
1395 */
Alex Elder6365d332013-02-11 12:33:24 -06001396static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1397{
1398 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
Alex Elder6365d332013-02-11 12:33:24 -06001399 struct rbd_device *rbd_dev;
1400
Alex Elder57acbaa2013-02-11 12:33:24 -06001401 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001402 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
Alex Elder6365d332013-02-11 12:33:24 -06001403 obj_request);
1404 }
1405}
1406
1407static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1408{
1409 smp_mb();
1410 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1411}
1412
Alex Elder57acbaa2013-02-11 12:33:24 -06001413static void obj_request_done_set(struct rbd_obj_request *obj_request)
1414{
1415 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1416 struct rbd_device *rbd_dev = NULL;
1417
1418 if (obj_request_img_data_test(obj_request))
1419 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001420 rbd_warn(rbd_dev, "obj_request %p already marked done",
Alex Elder57acbaa2013-02-11 12:33:24 -06001421 obj_request);
1422 }
1423}
1424
1425static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1426{
1427 smp_mb();
1428 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1429}
1430
Alex Elder5679c592013-02-11 12:33:24 -06001431/*
1432 * This sets the KNOWN flag after (possibly) setting the EXISTS
1433 * flag. The latter is set based on the "exists" value provided.
1434 *
1435 * Note that for our purposes once an object exists it never goes
1436 * away again. It's possible that the response from two existence
1437 * checks are separated by the creation of the target object, and
1438 * the first ("doesn't exist") response arrives *after* the second
1439 * ("does exist"). In that case we ignore the second one.
1440 */
1441static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1442 bool exists)
1443{
1444 if (exists)
1445 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1446 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1447 smp_mb();
1448}
1449
1450static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1451{
1452 smp_mb();
1453 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1454}
1455
1456static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1457{
1458 smp_mb();
1459 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1460}
1461
Ilya Dryomov96385562014-06-10 13:53:29 +04001462static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1463{
1464 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1465
1466 return obj_request->img_offset <
1467 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1468}
1469
Alex Elderbf0d5f502012-11-22 00:00:08 -06001470static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1471{
Alex Elder37206ee2013-02-20 17:32:08 -06001472 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1473 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001474 kref_get(&obj_request->kref);
1475}
1476
1477static void rbd_obj_request_destroy(struct kref *kref);
1478static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1479{
1480 rbd_assert(obj_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001481 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1482 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001483 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1484}
1485
Alex Elder0f2d5be2014-04-26 14:21:44 +04001486static void rbd_img_request_get(struct rbd_img_request *img_request)
1487{
1488 dout("%s: img %p (was %d)\n", __func__, img_request,
1489 atomic_read(&img_request->kref.refcount));
1490 kref_get(&img_request->kref);
1491}
1492
Alex Eldere93f3152013-05-08 22:50:04 -05001493static bool img_request_child_test(struct rbd_img_request *img_request);
1494static void rbd_parent_request_destroy(struct kref *kref);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001495static void rbd_img_request_destroy(struct kref *kref);
1496static void rbd_img_request_put(struct rbd_img_request *img_request)
1497{
1498 rbd_assert(img_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001499 dout("%s: img %p (was %d)\n", __func__, img_request,
1500 atomic_read(&img_request->kref.refcount));
Alex Eldere93f3152013-05-08 22:50:04 -05001501 if (img_request_child_test(img_request))
1502 kref_put(&img_request->kref, rbd_parent_request_destroy);
1503 else
1504 kref_put(&img_request->kref, rbd_img_request_destroy);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001505}
1506
1507static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1508 struct rbd_obj_request *obj_request)
1509{
Alex Elder25dcf952013-01-25 17:08:55 -06001510 rbd_assert(obj_request->img_request == NULL);
1511
Alex Elderb155e862013-04-15 14:50:37 -05001512 /* Image request now owns object's original reference */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001513 obj_request->img_request = img_request;
Alex Elder25dcf952013-01-25 17:08:55 -06001514 obj_request->which = img_request->obj_request_count;
Alex Elder6365d332013-02-11 12:33:24 -06001515 rbd_assert(!obj_request_img_data_test(obj_request));
1516 obj_request_img_data_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001517 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001518 img_request->obj_request_count++;
1519 list_add_tail(&obj_request->links, &img_request->obj_requests);
Alex Elder37206ee2013-02-20 17:32:08 -06001520 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1521 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001522}
1523
1524static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1525 struct rbd_obj_request *obj_request)
1526{
1527 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001528
Alex Elder37206ee2013-02-20 17:32:08 -06001529 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1530 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001531 list_del(&obj_request->links);
Alex Elder25dcf952013-01-25 17:08:55 -06001532 rbd_assert(img_request->obj_request_count > 0);
1533 img_request->obj_request_count--;
1534 rbd_assert(obj_request->which == img_request->obj_request_count);
1535 obj_request->which = BAD_WHICH;
Alex Elder6365d332013-02-11 12:33:24 -06001536 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001537 rbd_assert(obj_request->img_request == img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001538 obj_request->img_request = NULL;
Alex Elder25dcf952013-01-25 17:08:55 -06001539 obj_request->callback = NULL;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001540 rbd_obj_request_put(obj_request);
1541}
1542
1543static bool obj_request_type_valid(enum obj_request_type type)
1544{
1545 switch (type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06001546 case OBJ_REQUEST_NODATA:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001547 case OBJ_REQUEST_BIO:
Alex Elder788e2df2013-01-17 12:25:27 -06001548 case OBJ_REQUEST_PAGES:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001549 return true;
1550 default:
1551 return false;
1552 }
1553}
1554
Alex Elderbf0d5f502012-11-22 00:00:08 -06001555static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1556 struct rbd_obj_request *obj_request)
1557{
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001558 dout("%s %p\n", __func__, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001559 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1560}
1561
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001562static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1563{
1564 dout("%s %p\n", __func__, obj_request);
1565 ceph_osdc_cancel_request(obj_request->osd_req);
1566}
1567
1568/*
1569 * Wait for an object request to complete. If interrupted, cancel the
1570 * underlying osd request.
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001571 *
1572 * @timeout: in jiffies, 0 means "wait forever"
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001573 */
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001574static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1575 unsigned long timeout)
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001576{
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001577 long ret;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001578
1579 dout("%s %p\n", __func__, obj_request);
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001580 ret = wait_for_completion_interruptible_timeout(
1581 &obj_request->completion,
1582 ceph_timeout_jiffies(timeout));
1583 if (ret <= 0) {
1584 if (ret == 0)
1585 ret = -ETIMEDOUT;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001586 rbd_obj_request_end(obj_request);
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001587 } else {
1588 ret = 0;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001589 }
1590
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001591 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1592 return ret;
1593}
1594
1595static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1596{
1597 return __rbd_obj_request_wait(obj_request, 0);
1598}
1599
1600static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
1601 unsigned long timeout)
1602{
1603 return __rbd_obj_request_wait(obj_request, timeout);
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001604}
1605
Alex Elderbf0d5f502012-11-22 00:00:08 -06001606static void rbd_img_request_complete(struct rbd_img_request *img_request)
1607{
Alex Elder55f27e02013-04-10 12:34:25 -05001608
Alex Elder37206ee2013-02-20 17:32:08 -06001609 dout("%s: img %p\n", __func__, img_request);
Alex Elder55f27e02013-04-10 12:34:25 -05001610
1611 /*
1612 * If no error occurred, compute the aggregate transfer
1613 * count for the image request. We could instead use
1614 * atomic64_cmpxchg() to update it as each object request
1615 * completes; not clear which way is better off hand.
1616 */
1617 if (!img_request->result) {
1618 struct rbd_obj_request *obj_request;
1619 u64 xferred = 0;
1620
1621 for_each_obj_request(img_request, obj_request)
1622 xferred += obj_request->xferred;
1623 img_request->xferred = xferred;
1624 }
1625
Alex Elderbf0d5f502012-11-22 00:00:08 -06001626 if (img_request->callback)
1627 img_request->callback(img_request);
1628 else
1629 rbd_img_request_put(img_request);
1630}
1631
Alex Elder0c425242013-02-08 09:55:49 -06001632/*
1633 * The default/initial value for all image request flags is 0. Each
1634 * is conditionally set to 1 at image request initialization time
1635 * and currently never change thereafter.
1636 */
1637static void img_request_write_set(struct rbd_img_request *img_request)
1638{
1639 set_bit(IMG_REQ_WRITE, &img_request->flags);
1640 smp_mb();
1641}
1642
1643static bool img_request_write_test(struct rbd_img_request *img_request)
1644{
1645 smp_mb();
1646 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1647}
1648
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001649/*
1650 * Set the discard flag when the img_request is an discard request
1651 */
1652static void img_request_discard_set(struct rbd_img_request *img_request)
1653{
1654 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1655 smp_mb();
1656}
1657
1658static bool img_request_discard_test(struct rbd_img_request *img_request)
1659{
1660 smp_mb();
1661 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1662}
1663
Alex Elder9849e982013-01-24 16:13:36 -06001664static void img_request_child_set(struct rbd_img_request *img_request)
1665{
1666 set_bit(IMG_REQ_CHILD, &img_request->flags);
1667 smp_mb();
1668}
1669
Alex Eldere93f3152013-05-08 22:50:04 -05001670static void img_request_child_clear(struct rbd_img_request *img_request)
1671{
1672 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1673 smp_mb();
1674}
1675
Alex Elder9849e982013-01-24 16:13:36 -06001676static bool img_request_child_test(struct rbd_img_request *img_request)
1677{
1678 smp_mb();
1679 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1680}
1681
Alex Elderd0b2e942013-01-24 16:13:36 -06001682static void img_request_layered_set(struct rbd_img_request *img_request)
1683{
1684 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1685 smp_mb();
1686}
1687
Alex Eldera2acd002013-05-08 22:50:04 -05001688static void img_request_layered_clear(struct rbd_img_request *img_request)
1689{
1690 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1691 smp_mb();
1692}
1693
Alex Elderd0b2e942013-01-24 16:13:36 -06001694static bool img_request_layered_test(struct rbd_img_request *img_request)
1695{
1696 smp_mb();
1697 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1698}
1699
Josh Durgin3b434a2a2014-04-04 17:32:15 -07001700static enum obj_operation_type
1701rbd_img_request_op_type(struct rbd_img_request *img_request)
1702{
1703 if (img_request_write_test(img_request))
1704 return OBJ_OP_WRITE;
1705 else if (img_request_discard_test(img_request))
1706 return OBJ_OP_DISCARD;
1707 else
1708 return OBJ_OP_READ;
1709}
1710
Alex Elder6e2a4502013-03-27 09:16:30 -05001711static void
1712rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1713{
Alex Elderb9434c52013-04-19 15:34:50 -05001714 u64 xferred = obj_request->xferred;
1715 u64 length = obj_request->length;
1716
Alex Elder6e2a4502013-03-27 09:16:30 -05001717 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1718 obj_request, obj_request->img_request, obj_request->result,
Alex Elderb9434c52013-04-19 15:34:50 -05001719 xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001720 /*
Josh Durgin17c1cc12013-08-26 17:55:38 -07001721 * ENOENT means a hole in the image. We zero-fill the entire
1722 * length of the request. A short read also implies zero-fill
1723 * to the end of the request. An error requires the whole
1724 * length of the request to be reported finished with an error
1725 * to the block layer. In each case we update the xferred
1726 * count to indicate the whole request was satisfied.
Alex Elder6e2a4502013-03-27 09:16:30 -05001727 */
Alex Elderb9434c52013-04-19 15:34:50 -05001728 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
Alex Elder6e2a4502013-03-27 09:16:30 -05001729 if (obj_request->result == -ENOENT) {
Alex Elderb9434c52013-04-19 15:34:50 -05001730 if (obj_request->type == OBJ_REQUEST_BIO)
1731 zero_bio_chain(obj_request->bio_list, 0);
1732 else
1733 zero_pages(obj_request->pages, 0, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001734 obj_request->result = 0;
Alex Elderb9434c52013-04-19 15:34:50 -05001735 } else if (xferred < length && !obj_request->result) {
1736 if (obj_request->type == OBJ_REQUEST_BIO)
1737 zero_bio_chain(obj_request->bio_list, xferred);
1738 else
1739 zero_pages(obj_request->pages, xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001740 }
Josh Durgin17c1cc12013-08-26 17:55:38 -07001741 obj_request->xferred = length;
Alex Elder6e2a4502013-03-27 09:16:30 -05001742 obj_request_done_set(obj_request);
1743}
1744
Alex Elderbf0d5f502012-11-22 00:00:08 -06001745static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1746{
Alex Elder37206ee2013-02-20 17:32:08 -06001747 dout("%s: obj %p cb %p\n", __func__, obj_request,
1748 obj_request->callback);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001749 if (obj_request->callback)
1750 obj_request->callback(obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06001751 else
1752 complete_all(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001753}
1754
Alex Elderc47f9372013-02-26 14:23:07 -06001755static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
Alex Elder39bf2c52013-02-26 14:23:07 -06001756{
1757 dout("%s: obj %p\n", __func__, obj_request);
1758 obj_request_done_set(obj_request);
1759}
1760
Alex Elderc47f9372013-02-26 14:23:07 -06001761static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001762{
Alex Elder57acbaa2013-02-11 12:33:24 -06001763 struct rbd_img_request *img_request = NULL;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001764 struct rbd_device *rbd_dev = NULL;
Alex Elder57acbaa2013-02-11 12:33:24 -06001765 bool layered = false;
1766
1767 if (obj_request_img_data_test(obj_request)) {
1768 img_request = obj_request->img_request;
1769 layered = img_request && img_request_layered_test(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001770 rbd_dev = img_request->rbd_dev;
Alex Elder57acbaa2013-02-11 12:33:24 -06001771 }
Alex Elder8b3e1a52013-01-24 16:13:36 -06001772
1773 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1774 obj_request, img_request, obj_request->result,
1775 obj_request->xferred, obj_request->length);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001776 if (layered && obj_request->result == -ENOENT &&
1777 obj_request->img_offset < rbd_dev->parent_overlap)
Alex Elder8b3e1a52013-01-24 16:13:36 -06001778 rbd_img_parent_read(obj_request);
1779 else if (img_request)
Alex Elder6e2a4502013-03-27 09:16:30 -05001780 rbd_img_obj_request_read_callback(obj_request);
1781 else
1782 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001783}
1784
Alex Elderc47f9372013-02-26 14:23:07 -06001785static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001786{
Sage Weil1b83bef2013-02-25 16:11:12 -08001787 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1788 obj_request->result, obj_request->length);
1789 /*
Alex Elder8b3e1a52013-01-24 16:13:36 -06001790 * There is no such thing as a successful short write. Set
1791 * it to our originally-requested length.
Sage Weil1b83bef2013-02-25 16:11:12 -08001792 */
1793 obj_request->xferred = obj_request->length;
Alex Elder07741302013-02-05 23:41:50 -06001794 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001795}
1796
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001797static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1798{
1799 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1800 obj_request->result, obj_request->length);
1801 /*
1802 * There is no such thing as a successful short discard. Set
1803 * it to our originally-requested length.
1804 */
1805 obj_request->xferred = obj_request->length;
Josh Durgind0265de2014-04-07 16:54:10 -07001806 /* discarding a non-existent object is not a problem */
1807 if (obj_request->result == -ENOENT)
1808 obj_request->result = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001809 obj_request_done_set(obj_request);
1810}
1811
Alex Elderfbfab532013-02-08 09:55:48 -06001812/*
1813 * For a simple stat call there's nothing to do. We'll do more if
1814 * this is part of a write sequence for a layered image.
1815 */
Alex Elderc47f9372013-02-26 14:23:07 -06001816static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
Alex Elderfbfab532013-02-08 09:55:48 -06001817{
Alex Elder37206ee2013-02-20 17:32:08 -06001818 dout("%s: obj %p\n", __func__, obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001819 obj_request_done_set(obj_request);
1820}
1821
Ilya Dryomov27617132015-07-16 17:36:11 +03001822static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1823{
1824 dout("%s: obj %p\n", __func__, obj_request);
1825
1826 if (obj_request_img_data_test(obj_request))
1827 rbd_osd_copyup_callback(obj_request);
1828 else
1829 obj_request_done_set(obj_request);
1830}
1831
Alex Elderbf0d5f502012-11-22 00:00:08 -06001832static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1833 struct ceph_msg *msg)
1834{
1835 struct rbd_obj_request *obj_request = osd_req->r_priv;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001836 u16 opcode;
1837
Alex Elder37206ee2013-02-20 17:32:08 -06001838 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001839 rbd_assert(osd_req == obj_request->osd_req);
Alex Elder57acbaa2013-02-11 12:33:24 -06001840 if (obj_request_img_data_test(obj_request)) {
1841 rbd_assert(obj_request->img_request);
1842 rbd_assert(obj_request->which != BAD_WHICH);
1843 } else {
1844 rbd_assert(obj_request->which == BAD_WHICH);
1845 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06001846
Sage Weil1b83bef2013-02-25 16:11:12 -08001847 if (osd_req->r_result < 0)
1848 obj_request->result = osd_req->r_result;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001849
Ilya Dryomov7cc69d42014-02-25 16:22:27 +02001850 rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001851
Alex Elderc47f9372013-02-26 14:23:07 -06001852 /*
1853 * We support a 64-bit length, but ultimately it has to be
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01001854 * passed to the block layer, which just supports a 32-bit
1855 * length field.
Alex Elderc47f9372013-02-26 14:23:07 -06001856 */
Sage Weil1b83bef2013-02-25 16:11:12 -08001857 obj_request->xferred = osd_req->r_reply_op_len[0];
Alex Elder8b3e1a52013-01-24 16:13:36 -06001858 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001859
Alex Elder79528732013-04-03 21:32:51 -05001860 opcode = osd_req->r_ops[0].op;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001861 switch (opcode) {
1862 case CEPH_OSD_OP_READ:
Alex Elderc47f9372013-02-26 14:23:07 -06001863 rbd_osd_read_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001864 break;
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001865 case CEPH_OSD_OP_SETALLOCHINT:
1866 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
1867 /* fall through */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001868 case CEPH_OSD_OP_WRITE:
Alex Elderc47f9372013-02-26 14:23:07 -06001869 rbd_osd_write_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001870 break;
Alex Elderfbfab532013-02-08 09:55:48 -06001871 case CEPH_OSD_OP_STAT:
Alex Elderc47f9372013-02-26 14:23:07 -06001872 rbd_osd_stat_callback(obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001873 break;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001874 case CEPH_OSD_OP_DELETE:
1875 case CEPH_OSD_OP_TRUNCATE:
1876 case CEPH_OSD_OP_ZERO:
1877 rbd_osd_discard_callback(obj_request);
1878 break;
Alex Elder36be9a72013-01-19 00:30:28 -06001879 case CEPH_OSD_OP_CALL:
Ilya Dryomov27617132015-07-16 17:36:11 +03001880 rbd_osd_call_callback(obj_request);
1881 break;
Alex Elderb8d70032012-11-30 17:53:04 -06001882 case CEPH_OSD_OP_NOTIFY_ACK:
Alex Elder9969ebc2013-01-18 12:31:10 -06001883 case CEPH_OSD_OP_WATCH:
Alex Elderc47f9372013-02-26 14:23:07 -06001884 rbd_osd_trivial_callback(obj_request);
Alex Elder9969ebc2013-01-18 12:31:10 -06001885 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001886 default:
Ilya Dryomov9584d502014-07-11 12:11:20 +04001887 rbd_warn(NULL, "%s: unsupported op %hu",
Alex Elderbf0d5f502012-11-22 00:00:08 -06001888 obj_request->object_name, (unsigned short) opcode);
1889 break;
1890 }
1891
Alex Elder07741302013-02-05 23:41:50 -06001892 if (obj_request_done_test(obj_request))
Alex Elderbf0d5f502012-11-22 00:00:08 -06001893 rbd_obj_request_complete(obj_request);
1894}
1895
Alex Elder9d4df012013-04-19 15:34:50 -05001896static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
Alex Elder430c28c2013-04-03 21:32:51 -05001897{
1898 struct rbd_img_request *img_request = obj_request->img_request;
Alex Elder8c042b02013-04-03 01:28:58 -05001899 struct ceph_osd_request *osd_req = obj_request->osd_req;
Alex Elder9d4df012013-04-19 15:34:50 -05001900 u64 snap_id;
Alex Elder430c28c2013-04-03 21:32:51 -05001901
Alex Elder8c042b02013-04-03 01:28:58 -05001902 rbd_assert(osd_req != NULL);
Alex Elder430c28c2013-04-03 21:32:51 -05001903
Alex Elder9d4df012013-04-19 15:34:50 -05001904 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
Alex Elder8c042b02013-04-03 01:28:58 -05001905 ceph_osdc_build_request(osd_req, obj_request->offset,
Alex Elder9d4df012013-04-19 15:34:50 -05001906 NULL, snap_id, NULL);
1907}
1908
1909static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1910{
1911 struct rbd_img_request *img_request = obj_request->img_request;
1912 struct ceph_osd_request *osd_req = obj_request->osd_req;
1913 struct ceph_snap_context *snapc;
1914 struct timespec mtime = CURRENT_TIME;
1915
1916 rbd_assert(osd_req != NULL);
1917
1918 snapc = img_request ? img_request->snapc : NULL;
1919 ceph_osdc_build_request(osd_req, obj_request->offset,
1920 snapc, CEPH_NOSNAP, &mtime);
Alex Elder430c28c2013-04-03 21:32:51 -05001921}
1922
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001923/*
1924 * Create an osd request. A read request has one osd op (read).
1925 * A write request has either one (watch) or two (hint+write) osd ops.
1926 * (All rbd data writes are prefixed with an allocation hint op, but
1927 * technically osd watch is a write request, hence this distinction.)
1928 */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001929static struct ceph_osd_request *rbd_osd_req_create(
1930 struct rbd_device *rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001931 enum obj_operation_type op_type,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001932 unsigned int num_ops,
Alex Elder430c28c2013-04-03 21:32:51 -05001933 struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001934{
Alex Elderbf0d5f502012-11-22 00:00:08 -06001935 struct ceph_snap_context *snapc = NULL;
1936 struct ceph_osd_client *osdc;
1937 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001938
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001939 if (obj_request_img_data_test(obj_request) &&
1940 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
Alex Elder6365d332013-02-11 12:33:24 -06001941 struct rbd_img_request *img_request = obj_request->img_request;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001942 if (op_type == OBJ_OP_WRITE) {
1943 rbd_assert(img_request_write_test(img_request));
1944 } else {
1945 rbd_assert(img_request_discard_test(img_request));
1946 }
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001947 snapc = img_request->snapc;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001948 }
1949
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001950 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001951
1952 /* Allocate and initialize the request, for the num_ops ops */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001953
1954 osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001955 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1956 GFP_ATOMIC);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001957 if (!osd_req)
1958 return NULL; /* ENOMEM */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001959
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001960 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001961 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
Alex Elder430c28c2013-04-03 21:32:51 -05001962 else
Alex Elderbf0d5f502012-11-22 00:00:08 -06001963 osd_req->r_flags = CEPH_OSD_FLAG_READ;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001964
1965 osd_req->r_callback = rbd_osd_req_callback;
1966 osd_req->r_priv = obj_request;
1967
Ilya Dryomov3c972c92014-01-27 17:40:20 +02001968 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1969 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001970
Alex Elderbf0d5f502012-11-22 00:00:08 -06001971 return osd_req;
1972}
1973
Alex Elder0eefd472013-04-19 15:34:50 -05001974/*
Josh Durgind3246fb2014-04-07 16:49:21 -07001975 * Create a copyup osd request based on the information in the object
1976 * request supplied. A copyup request has two or three osd ops, a
1977 * copyup method call, potentially a hint op, and a write or truncate
1978 * or zero op.
Alex Elder0eefd472013-04-19 15:34:50 -05001979 */
1980static struct ceph_osd_request *
1981rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1982{
1983 struct rbd_img_request *img_request;
1984 struct ceph_snap_context *snapc;
1985 struct rbd_device *rbd_dev;
1986 struct ceph_osd_client *osdc;
1987 struct ceph_osd_request *osd_req;
Josh Durgind3246fb2014-04-07 16:49:21 -07001988 int num_osd_ops = 3;
Alex Elder0eefd472013-04-19 15:34:50 -05001989
1990 rbd_assert(obj_request_img_data_test(obj_request));
1991 img_request = obj_request->img_request;
1992 rbd_assert(img_request);
Josh Durgind3246fb2014-04-07 16:49:21 -07001993 rbd_assert(img_request_write_test(img_request) ||
1994 img_request_discard_test(img_request));
Alex Elder0eefd472013-04-19 15:34:50 -05001995
Josh Durgind3246fb2014-04-07 16:49:21 -07001996 if (img_request_discard_test(img_request))
1997 num_osd_ops = 2;
1998
1999 /* Allocate and initialize the request, for all the ops */
Alex Elder0eefd472013-04-19 15:34:50 -05002000
2001 snapc = img_request->snapc;
2002 rbd_dev = img_request->rbd_dev;
2003 osdc = &rbd_dev->rbd_client->client->osdc;
Josh Durgind3246fb2014-04-07 16:49:21 -07002004 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
2005 false, GFP_ATOMIC);
Alex Elder0eefd472013-04-19 15:34:50 -05002006 if (!osd_req)
2007 return NULL; /* ENOMEM */
2008
2009 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2010 osd_req->r_callback = rbd_osd_req_callback;
2011 osd_req->r_priv = obj_request;
2012
Ilya Dryomov3c972c92014-01-27 17:40:20 +02002013 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
2014 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
Alex Elder0eefd472013-04-19 15:34:50 -05002015
Alex Elder0eefd472013-04-19 15:34:50 -05002016 return osd_req;
2017}
2018
2019
Alex Elderbf0d5f502012-11-22 00:00:08 -06002020static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2021{
2022 ceph_osdc_put_request(osd_req);
2023}
2024
2025/* object_name is assumed to be a non-null pointer and NUL-terminated */
2026
2027static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2028 u64 offset, u64 length,
2029 enum obj_request_type type)
2030{
2031 struct rbd_obj_request *obj_request;
2032 size_t size;
2033 char *name;
2034
2035 rbd_assert(obj_request_type_valid(type));
2036
2037 size = strlen(object_name) + 1;
Ilya Dryomov5a60e872015-06-24 17:24:33 +03002038 name = kmalloc(size, GFP_NOIO);
Alex Elderf907ad52013-05-01 12:43:03 -05002039 if (!name)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002040 return NULL;
2041
Ilya Dryomov5a60e872015-06-24 17:24:33 +03002042 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
Alex Elderf907ad52013-05-01 12:43:03 -05002043 if (!obj_request) {
2044 kfree(name);
2045 return NULL;
2046 }
2047
Alex Elderbf0d5f502012-11-22 00:00:08 -06002048 obj_request->object_name = memcpy(name, object_name, size);
2049 obj_request->offset = offset;
2050 obj_request->length = length;
Alex Elder926f9b32013-02-11 12:33:24 -06002051 obj_request->flags = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002052 obj_request->which = BAD_WHICH;
2053 obj_request->type = type;
2054 INIT_LIST_HEAD(&obj_request->links);
Alex Elder788e2df2013-01-17 12:25:27 -06002055 init_completion(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002056 kref_init(&obj_request->kref);
2057
Alex Elder37206ee2013-02-20 17:32:08 -06002058 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2059 offset, length, (int)type, obj_request);
2060
Alex Elderbf0d5f502012-11-22 00:00:08 -06002061 return obj_request;
2062}
2063
2064static void rbd_obj_request_destroy(struct kref *kref)
2065{
2066 struct rbd_obj_request *obj_request;
2067
2068 obj_request = container_of(kref, struct rbd_obj_request, kref);
2069
Alex Elder37206ee2013-02-20 17:32:08 -06002070 dout("%s: obj %p\n", __func__, obj_request);
2071
Alex Elderbf0d5f502012-11-22 00:00:08 -06002072 rbd_assert(obj_request->img_request == NULL);
2073 rbd_assert(obj_request->which == BAD_WHICH);
2074
2075 if (obj_request->osd_req)
2076 rbd_osd_req_destroy(obj_request->osd_req);
2077
2078 rbd_assert(obj_request_type_valid(obj_request->type));
2079 switch (obj_request->type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06002080 case OBJ_REQUEST_NODATA:
2081 break; /* Nothing to do */
Alex Elderbf0d5f502012-11-22 00:00:08 -06002082 case OBJ_REQUEST_BIO:
2083 if (obj_request->bio_list)
2084 bio_chain_put(obj_request->bio_list);
2085 break;
Alex Elder788e2df2013-01-17 12:25:27 -06002086 case OBJ_REQUEST_PAGES:
2087 if (obj_request->pages)
2088 ceph_release_page_vector(obj_request->pages,
2089 obj_request->page_count);
2090 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002091 }
2092
Alex Elderf907ad52013-05-01 12:43:03 -05002093 kfree(obj_request->object_name);
Alex Elder868311b2013-05-01 12:43:03 -05002094 obj_request->object_name = NULL;
2095 kmem_cache_free(rbd_obj_request_cache, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002096}
2097
Alex Elderfb65d2282013-05-08 22:50:04 -05002098/* It's OK to call this for a device with no parent */
2099
2100static void rbd_spec_put(struct rbd_spec *spec);
2101static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2102{
2103 rbd_dev_remove_parent(rbd_dev);
2104 rbd_spec_put(rbd_dev->parent_spec);
2105 rbd_dev->parent_spec = NULL;
2106 rbd_dev->parent_overlap = 0;
2107}
2108
Alex Elderbf0d5f502012-11-22 00:00:08 -06002109/*
Alex Eldera2acd002013-05-08 22:50:04 -05002110 * Parent image reference counting is used to determine when an
2111 * image's parent fields can be safely torn down--after there are no
2112 * more in-flight requests to the parent image. When the last
2113 * reference is dropped, cleaning them up is safe.
2114 */
2115static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2116{
2117 int counter;
2118
2119 if (!rbd_dev->parent_spec)
2120 return;
2121
2122 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2123 if (counter > 0)
2124 return;
2125
2126 /* Last reference; clean up parent data structures */
2127
2128 if (!counter)
2129 rbd_dev_unparent(rbd_dev);
2130 else
Ilya Dryomov9584d502014-07-11 12:11:20 +04002131 rbd_warn(rbd_dev, "parent reference underflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002132}
2133
2134/*
2135 * If an image has a non-zero parent overlap, get a reference to its
2136 * parent.
2137 *
2138 * Returns true if the rbd device has a parent with a non-zero
2139 * overlap and a reference for it was successfully taken, or
2140 * false otherwise.
2141 */
2142static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2143{
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002144 int counter = 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002145
2146 if (!rbd_dev->parent_spec)
2147 return false;
2148
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002149 down_read(&rbd_dev->header_rwsem);
2150 if (rbd_dev->parent_overlap)
2151 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2152 up_read(&rbd_dev->header_rwsem);
Alex Eldera2acd002013-05-08 22:50:04 -05002153
2154 if (counter < 0)
Ilya Dryomov9584d502014-07-11 12:11:20 +04002155 rbd_warn(rbd_dev, "parent reference overflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002156
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002157 return counter > 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002158}
2159
Alex Elderbf0d5f502012-11-22 00:00:08 -06002160/*
2161 * Caller is responsible for filling in the list of object requests
2162 * that comprises the image request, and the Linux request pointer
2163 * (if there is one).
2164 */
Alex Eldercc344fa2013-02-19 12:25:56 -06002165static struct rbd_img_request *rbd_img_request_create(
2166 struct rbd_device *rbd_dev,
Alex Elderbf0d5f502012-11-22 00:00:08 -06002167 u64 offset, u64 length,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002168 enum obj_operation_type op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07002169 struct ceph_snap_context *snapc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002170{
2171 struct rbd_img_request *img_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002172
Ilya Dryomov7a716aa2014-08-05 11:25:54 +04002173 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002174 if (!img_request)
2175 return NULL;
2176
Alex Elderbf0d5f502012-11-22 00:00:08 -06002177 img_request->rq = NULL;
2178 img_request->rbd_dev = rbd_dev;
2179 img_request->offset = offset;
2180 img_request->length = length;
Alex Elder0c425242013-02-08 09:55:49 -06002181 img_request->flags = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002182 if (op_type == OBJ_OP_DISCARD) {
2183 img_request_discard_set(img_request);
2184 img_request->snapc = snapc;
2185 } else if (op_type == OBJ_OP_WRITE) {
Alex Elder0c425242013-02-08 09:55:49 -06002186 img_request_write_set(img_request);
Josh Durgin4e752f02014-04-08 11:12:11 -07002187 img_request->snapc = snapc;
Alex Elder0c425242013-02-08 09:55:49 -06002188 } else {
Alex Elderbf0d5f502012-11-22 00:00:08 -06002189 img_request->snap_id = rbd_dev->spec->snap_id;
Alex Elder0c425242013-02-08 09:55:49 -06002190 }
Alex Eldera2acd002013-05-08 22:50:04 -05002191 if (rbd_dev_parent_get(rbd_dev))
Alex Elderd0b2e942013-01-24 16:13:36 -06002192 img_request_layered_set(img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002193 spin_lock_init(&img_request->completion_lock);
2194 img_request->next_completion = 0;
2195 img_request->callback = NULL;
Alex Eldera5a337d2013-01-24 16:13:36 -06002196 img_request->result = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002197 img_request->obj_request_count = 0;
2198 INIT_LIST_HEAD(&img_request->obj_requests);
2199 kref_init(&img_request->kref);
2200
Alex Elder37206ee2013-02-20 17:32:08 -06002201 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002202 obj_op_name(op_type), offset, length, img_request);
Alex Elder37206ee2013-02-20 17:32:08 -06002203
Alex Elderbf0d5f502012-11-22 00:00:08 -06002204 return img_request;
2205}
2206
2207static void rbd_img_request_destroy(struct kref *kref)
2208{
2209 struct rbd_img_request *img_request;
2210 struct rbd_obj_request *obj_request;
2211 struct rbd_obj_request *next_obj_request;
2212
2213 img_request = container_of(kref, struct rbd_img_request, kref);
2214
Alex Elder37206ee2013-02-20 17:32:08 -06002215 dout("%s: img %p\n", __func__, img_request);
2216
Alex Elderbf0d5f502012-11-22 00:00:08 -06002217 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2218 rbd_img_obj_request_del(img_request, obj_request);
Alex Elder25dcf952013-01-25 17:08:55 -06002219 rbd_assert(img_request->obj_request_count == 0);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002220
Alex Eldera2acd002013-05-08 22:50:04 -05002221 if (img_request_layered_test(img_request)) {
2222 img_request_layered_clear(img_request);
2223 rbd_dev_parent_put(img_request->rbd_dev);
2224 }
2225
Josh Durginbef95452014-04-04 17:47:52 -07002226 if (img_request_write_test(img_request) ||
2227 img_request_discard_test(img_request))
Alex Elder812164f82013-04-30 00:44:32 -05002228 ceph_put_snap_context(img_request->snapc);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002229
Alex Elder1c2a9df2013-05-01 12:43:03 -05002230 kmem_cache_free(rbd_img_request_cache, img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002231}
2232
Alex Eldere93f3152013-05-08 22:50:04 -05002233static struct rbd_img_request *rbd_parent_request_create(
2234 struct rbd_obj_request *obj_request,
2235 u64 img_offset, u64 length)
2236{
2237 struct rbd_img_request *parent_request;
2238 struct rbd_device *rbd_dev;
2239
2240 rbd_assert(obj_request->img_request);
2241 rbd_dev = obj_request->img_request->rbd_dev;
2242
Josh Durgin4e752f02014-04-08 11:12:11 -07002243 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002244 length, OBJ_OP_READ, NULL);
Alex Eldere93f3152013-05-08 22:50:04 -05002245 if (!parent_request)
2246 return NULL;
2247
2248 img_request_child_set(parent_request);
2249 rbd_obj_request_get(obj_request);
2250 parent_request->obj_request = obj_request;
2251
2252 return parent_request;
2253}
2254
2255static void rbd_parent_request_destroy(struct kref *kref)
2256{
2257 struct rbd_img_request *parent_request;
2258 struct rbd_obj_request *orig_request;
2259
2260 parent_request = container_of(kref, struct rbd_img_request, kref);
2261 orig_request = parent_request->obj_request;
2262
2263 parent_request->obj_request = NULL;
2264 rbd_obj_request_put(orig_request);
2265 img_request_child_clear(parent_request);
2266
2267 rbd_img_request_destroy(kref);
2268}
2269
Alex Elder12178572013-02-08 09:55:49 -06002270static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2271{
Alex Elder6365d332013-02-11 12:33:24 -06002272 struct rbd_img_request *img_request;
Alex Elder12178572013-02-08 09:55:49 -06002273 unsigned int xferred;
2274 int result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002275 bool more;
Alex Elder12178572013-02-08 09:55:49 -06002276
Alex Elder6365d332013-02-11 12:33:24 -06002277 rbd_assert(obj_request_img_data_test(obj_request));
2278 img_request = obj_request->img_request;
2279
Alex Elder12178572013-02-08 09:55:49 -06002280 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2281 xferred = (unsigned int)obj_request->xferred;
2282 result = obj_request->result;
2283 if (result) {
2284 struct rbd_device *rbd_dev = img_request->rbd_dev;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002285 enum obj_operation_type op_type;
2286
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002287 if (img_request_discard_test(img_request))
2288 op_type = OBJ_OP_DISCARD;
2289 else if (img_request_write_test(img_request))
2290 op_type = OBJ_OP_WRITE;
2291 else
2292 op_type = OBJ_OP_READ;
Alex Elder12178572013-02-08 09:55:49 -06002293
Ilya Dryomov9584d502014-07-11 12:11:20 +04002294 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002295 obj_op_name(op_type), obj_request->length,
2296 obj_request->img_offset, obj_request->offset);
Ilya Dryomov9584d502014-07-11 12:11:20 +04002297 rbd_warn(rbd_dev, " result %d xferred %x",
Alex Elder12178572013-02-08 09:55:49 -06002298 result, xferred);
2299 if (!img_request->result)
2300 img_request->result = result;
Ilya Dryomov082a75d2015-04-25 15:56:15 +03002301 /*
2302 * Need to end I/O on the entire obj_request worth of
2303 * bytes in case of error.
2304 */
2305 xferred = obj_request->length;
Alex Elder12178572013-02-08 09:55:49 -06002306 }
2307
Alex Elderf1a47392013-04-19 15:34:50 -05002308 /* Image object requests don't own their page array */
2309
2310 if (obj_request->type == OBJ_REQUEST_PAGES) {
2311 obj_request->pages = NULL;
2312 obj_request->page_count = 0;
2313 }
2314
Alex Elder8b3e1a52013-01-24 16:13:36 -06002315 if (img_request_child_test(img_request)) {
2316 rbd_assert(img_request->obj_request != NULL);
2317 more = obj_request->which < img_request->obj_request_count - 1;
2318 } else {
2319 rbd_assert(img_request->rq != NULL);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01002320
2321 more = blk_update_request(img_request->rq, result, xferred);
2322 if (!more)
2323 __blk_mq_end_request(img_request->rq, result);
Alex Elder8b3e1a52013-01-24 16:13:36 -06002324 }
2325
2326 return more;
Alex Elder12178572013-02-08 09:55:49 -06002327}
2328
Alex Elder21692382013-04-05 01:27:12 -05002329static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2330{
2331 struct rbd_img_request *img_request;
2332 u32 which = obj_request->which;
2333 bool more = true;
2334
Alex Elder6365d332013-02-11 12:33:24 -06002335 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elder21692382013-04-05 01:27:12 -05002336 img_request = obj_request->img_request;
2337
2338 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2339 rbd_assert(img_request != NULL);
Alex Elder21692382013-04-05 01:27:12 -05002340 rbd_assert(img_request->obj_request_count > 0);
2341 rbd_assert(which != BAD_WHICH);
2342 rbd_assert(which < img_request->obj_request_count);
Alex Elder21692382013-04-05 01:27:12 -05002343
2344 spin_lock_irq(&img_request->completion_lock);
2345 if (which != img_request->next_completion)
2346 goto out;
2347
2348 for_each_obj_request_from(img_request, obj_request) {
Alex Elder21692382013-04-05 01:27:12 -05002349 rbd_assert(more);
2350 rbd_assert(which < img_request->obj_request_count);
2351
2352 if (!obj_request_done_test(obj_request))
2353 break;
Alex Elder12178572013-02-08 09:55:49 -06002354 more = rbd_img_obj_end_request(obj_request);
Alex Elder21692382013-04-05 01:27:12 -05002355 which++;
2356 }
2357
2358 rbd_assert(more ^ (which == img_request->obj_request_count));
2359 img_request->next_completion = which;
2360out:
2361 spin_unlock_irq(&img_request->completion_lock);
Alex Elder0f2d5be2014-04-26 14:21:44 +04002362 rbd_img_request_put(img_request);
Alex Elder21692382013-04-05 01:27:12 -05002363
2364 if (!more)
2365 rbd_img_request_complete(img_request);
2366}
2367
Alex Elderf1a47392013-04-19 15:34:50 -05002368/*
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002369 * Add individual osd ops to the given ceph_osd_request and prepare
2370 * them for submission. num_ops is the current number of
2371 * osd operations already to the object request.
2372 */
2373static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2374 struct ceph_osd_request *osd_request,
2375 enum obj_operation_type op_type,
2376 unsigned int num_ops)
2377{
2378 struct rbd_img_request *img_request = obj_request->img_request;
2379 struct rbd_device *rbd_dev = img_request->rbd_dev;
2380 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2381 u64 offset = obj_request->offset;
2382 u64 length = obj_request->length;
2383 u64 img_end;
2384 u16 opcode;
2385
2386 if (op_type == OBJ_OP_DISCARD) {
Josh Durgind3246fb2014-04-07 16:49:21 -07002387 if (!offset && length == object_size &&
2388 (!img_request_layered_test(img_request) ||
2389 !obj_request_overlaps_parent(obj_request))) {
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002390 opcode = CEPH_OSD_OP_DELETE;
2391 } else if ((offset + length == object_size)) {
2392 opcode = CEPH_OSD_OP_TRUNCATE;
2393 } else {
2394 down_read(&rbd_dev->header_rwsem);
2395 img_end = rbd_dev->header.image_size;
2396 up_read(&rbd_dev->header_rwsem);
2397
2398 if (obj_request->img_offset + length == img_end)
2399 opcode = CEPH_OSD_OP_TRUNCATE;
2400 else
2401 opcode = CEPH_OSD_OP_ZERO;
2402 }
2403 } else if (op_type == OBJ_OP_WRITE) {
2404 opcode = CEPH_OSD_OP_WRITE;
2405 osd_req_op_alloc_hint_init(osd_request, num_ops,
2406 object_size, object_size);
2407 num_ops++;
2408 } else {
2409 opcode = CEPH_OSD_OP_READ;
2410 }
2411
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002412 if (opcode == CEPH_OSD_OP_DELETE)
Yan, Zheng144cba12015-04-27 11:09:54 +08002413 osd_req_op_init(osd_request, num_ops, opcode, 0);
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002414 else
2415 osd_req_op_extent_init(osd_request, num_ops, opcode,
2416 offset, length, 0, 0);
2417
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002418 if (obj_request->type == OBJ_REQUEST_BIO)
2419 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2420 obj_request->bio_list, length);
2421 else if (obj_request->type == OBJ_REQUEST_PAGES)
2422 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2423 obj_request->pages, length,
2424 offset & ~PAGE_MASK, false, false);
2425
2426 /* Discards are also writes */
2427 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2428 rbd_osd_req_format_write(obj_request);
2429 else
2430 rbd_osd_req_format_read(obj_request);
2431}
2432
2433/*
Alex Elderf1a47392013-04-19 15:34:50 -05002434 * Split up an image request into one or more object requests, each
2435 * to a different object. The "type" parameter indicates whether
2436 * "data_desc" is the pointer to the head of a list of bio
2437 * structures, or the base of a page array. In either case this
2438 * function assumes data_desc describes memory sufficient to hold
2439 * all data described by the image request.
2440 */
2441static int rbd_img_request_fill(struct rbd_img_request *img_request,
2442 enum obj_request_type type,
2443 void *data_desc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002444{
2445 struct rbd_device *rbd_dev = img_request->rbd_dev;
2446 struct rbd_obj_request *obj_request = NULL;
2447 struct rbd_obj_request *next_obj_request;
Jingoo Hana1580732013-08-09 13:04:35 +09002448 struct bio *bio_list = NULL;
Alex Elderf1a47392013-04-19 15:34:50 -05002449 unsigned int bio_offset = 0;
Jingoo Hana1580732013-08-09 13:04:35 +09002450 struct page **pages = NULL;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002451 enum obj_operation_type op_type;
Alex Elder7da22d22013-01-24 16:13:36 -06002452 u64 img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002453 u64 resid;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002454
Alex Elderf1a47392013-04-19 15:34:50 -05002455 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2456 (int)type, data_desc);
Alex Elder37206ee2013-02-20 17:32:08 -06002457
Alex Elder7da22d22013-01-24 16:13:36 -06002458 img_offset = img_request->offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002459 resid = img_request->length;
Alex Elder4dda41d2013-02-20 21:59:33 -06002460 rbd_assert(resid > 0);
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002461 op_type = rbd_img_request_op_type(img_request);
Alex Elderf1a47392013-04-19 15:34:50 -05002462
2463 if (type == OBJ_REQUEST_BIO) {
2464 bio_list = data_desc;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002465 rbd_assert(img_offset ==
2466 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002467 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002468 pages = data_desc;
2469 }
2470
Alex Elderbf0d5f502012-11-22 00:00:08 -06002471 while (resid) {
Alex Elder2fa12322013-04-05 01:27:12 -05002472 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002473 const char *object_name;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002474 u64 offset;
2475 u64 length;
2476
Alex Elder7da22d22013-01-24 16:13:36 -06002477 object_name = rbd_segment_name(rbd_dev, img_offset);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002478 if (!object_name)
2479 goto out_unwind;
Alex Elder7da22d22013-01-24 16:13:36 -06002480 offset = rbd_segment_offset(rbd_dev, img_offset);
2481 length = rbd_segment_length(rbd_dev, img_offset, resid);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002482 obj_request = rbd_obj_request_create(object_name,
Alex Elderf1a47392013-04-19 15:34:50 -05002483 offset, length, type);
Alex Elder78c2a442013-05-01 12:43:04 -05002484 /* object request has its own copy of the object name */
2485 rbd_segment_name_free(object_name);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002486 if (!obj_request)
2487 goto out_unwind;
Ilya Dryomov62054da2014-03-04 11:57:17 +02002488
Josh Durgin03507db2013-08-27 14:45:46 -07002489 /*
2490 * set obj_request->img_request before creating the
2491 * osd_request so that it gets the right snapc
2492 */
2493 rbd_img_obj_request_add(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002494
Alex Elderf1a47392013-04-19 15:34:50 -05002495 if (type == OBJ_REQUEST_BIO) {
2496 unsigned int clone_size;
2497
2498 rbd_assert(length <= (u64)UINT_MAX);
2499 clone_size = (unsigned int)length;
2500 obj_request->bio_list =
2501 bio_chain_clone_range(&bio_list,
2502 &bio_offset,
2503 clone_size,
2504 GFP_ATOMIC);
2505 if (!obj_request->bio_list)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002506 goto out_unwind;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002507 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002508 unsigned int page_count;
2509
2510 obj_request->pages = pages;
2511 page_count = (u32)calc_pages_for(offset, length);
2512 obj_request->page_count = page_count;
2513 if ((offset + length) & ~PAGE_MASK)
2514 page_count--; /* more on last page */
2515 pages += page_count;
2516 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06002517
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002518 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2519 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2520 obj_request);
Alex Elder2fa12322013-04-05 01:27:12 -05002521 if (!osd_req)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002522 goto out_unwind;
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002523
Alex Elder2fa12322013-04-05 01:27:12 -05002524 obj_request->osd_req = osd_req;
Alex Elder21692382013-04-05 01:27:12 -05002525 obj_request->callback = rbd_img_obj_callback;
Alex Elder7da22d22013-01-24 16:13:36 -06002526 obj_request->img_offset = img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002527
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002528 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2529
2530 rbd_img_request_get(img_request);
2531
Alex Elder7da22d22013-01-24 16:13:36 -06002532 img_offset += length;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002533 resid -= length;
2534 }
2535
2536 return 0;
2537
Alex Elderbf0d5f502012-11-22 00:00:08 -06002538out_unwind:
2539 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
Ilya Dryomov42dd0372014-03-04 11:57:17 +02002540 rbd_img_obj_request_del(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002541
2542 return -ENOMEM;
2543}
2544
Alex Elder3d7efd12013-04-19 15:34:50 -05002545static void
Ilya Dryomov27617132015-07-16 17:36:11 +03002546rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
Alex Elder0eefd472013-04-19 15:34:50 -05002547{
2548 struct rbd_img_request *img_request;
2549 struct rbd_device *rbd_dev;
Alex Elderebda6402013-05-10 16:29:22 -05002550 struct page **pages;
Alex Elder0eefd472013-04-19 15:34:50 -05002551 u32 page_count;
2552
Ilya Dryomov27617132015-07-16 17:36:11 +03002553 dout("%s: obj %p\n", __func__, obj_request);
2554
Josh Durgind3246fb2014-04-07 16:49:21 -07002555 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2556 obj_request->type == OBJ_REQUEST_NODATA);
Alex Elder0eefd472013-04-19 15:34:50 -05002557 rbd_assert(obj_request_img_data_test(obj_request));
2558 img_request = obj_request->img_request;
2559 rbd_assert(img_request);
2560
2561 rbd_dev = img_request->rbd_dev;
2562 rbd_assert(rbd_dev);
Alex Elder0eefd472013-04-19 15:34:50 -05002563
Alex Elderebda6402013-05-10 16:29:22 -05002564 pages = obj_request->copyup_pages;
2565 rbd_assert(pages != NULL);
Alex Elder0eefd472013-04-19 15:34:50 -05002566 obj_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002567 page_count = obj_request->copyup_page_count;
2568 rbd_assert(page_count);
2569 obj_request->copyup_page_count = 0;
2570 ceph_release_page_vector(pages, page_count);
Alex Elder0eefd472013-04-19 15:34:50 -05002571
2572 /*
2573 * We want the transfer count to reflect the size of the
2574 * original write request. There is no such thing as a
2575 * successful short write, so if the request was successful
2576 * we can just set it to the originally-requested length.
2577 */
2578 if (!obj_request->result)
2579 obj_request->xferred = obj_request->length;
2580
Ilya Dryomov27617132015-07-16 17:36:11 +03002581 obj_request_done_set(obj_request);
Alex Elder0eefd472013-04-19 15:34:50 -05002582}
2583
2584static void
Alex Elder3d7efd12013-04-19 15:34:50 -05002585rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2586{
2587 struct rbd_obj_request *orig_request;
Alex Elder0eefd472013-04-19 15:34:50 -05002588 struct ceph_osd_request *osd_req;
2589 struct ceph_osd_client *osdc;
2590 struct rbd_device *rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002591 struct page **pages;
Josh Durgind3246fb2014-04-07 16:49:21 -07002592 enum obj_operation_type op_type;
Alex Elderebda6402013-05-10 16:29:22 -05002593 u32 page_count;
Alex Elderbbea1c12013-05-06 17:40:33 -05002594 int img_result;
Alex Elderebda6402013-05-10 16:29:22 -05002595 u64 parent_length;
Alex Elder3d7efd12013-04-19 15:34:50 -05002596
2597 rbd_assert(img_request_child_test(img_request));
2598
2599 /* First get what we need from the image request */
2600
2601 pages = img_request->copyup_pages;
2602 rbd_assert(pages != NULL);
2603 img_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002604 page_count = img_request->copyup_page_count;
2605 rbd_assert(page_count);
2606 img_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002607
2608 orig_request = img_request->obj_request;
2609 rbd_assert(orig_request != NULL);
Alex Elderb91f09f2013-05-10 16:29:22 -05002610 rbd_assert(obj_request_type_valid(orig_request->type));
Alex Elderbbea1c12013-05-06 17:40:33 -05002611 img_result = img_request->result;
Alex Elderebda6402013-05-10 16:29:22 -05002612 parent_length = img_request->length;
2613 rbd_assert(parent_length == img_request->xferred);
Alex Elder3d7efd12013-04-19 15:34:50 -05002614 rbd_img_request_put(img_request);
2615
Alex Elder91c6feb2013-05-06 17:40:32 -05002616 rbd_assert(orig_request->img_request);
2617 rbd_dev = orig_request->img_request->rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002618 rbd_assert(rbd_dev);
Alex Elder3d7efd12013-04-19 15:34:50 -05002619
Alex Elderbbea1c12013-05-06 17:40:33 -05002620 /*
2621 * If the overlap has become 0 (most likely because the
2622 * image has been flattened) we need to free the pages
2623 * and re-submit the original write request.
2624 */
2625 if (!rbd_dev->parent_overlap) {
2626 struct ceph_osd_client *osdc;
2627
2628 ceph_release_page_vector(pages, page_count);
2629 osdc = &rbd_dev->rbd_client->client->osdc;
2630 img_result = rbd_obj_request_submit(osdc, orig_request);
2631 if (!img_result)
2632 return;
2633 }
2634
2635 if (img_result)
Alex Elder0eefd472013-04-19 15:34:50 -05002636 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002637
Alex Elder8785b1d2013-05-09 10:08:49 -05002638 /*
2639 * The original osd request is of no use to use any more.
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002640 * We need a new one that can hold the three ops in a copyup
Alex Elder8785b1d2013-05-09 10:08:49 -05002641 * request. Allocate the new copyup osd request for the
2642 * original request, and release the old one.
2643 */
Alex Elderbbea1c12013-05-06 17:40:33 -05002644 img_result = -ENOMEM;
Alex Elder0eefd472013-04-19 15:34:50 -05002645 osd_req = rbd_osd_req_create_copyup(orig_request);
2646 if (!osd_req)
2647 goto out_err;
Alex Elder8785b1d2013-05-09 10:08:49 -05002648 rbd_osd_req_destroy(orig_request->osd_req);
Alex Elder0eefd472013-04-19 15:34:50 -05002649 orig_request->osd_req = osd_req;
2650 orig_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002651 orig_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002652
Alex Elder0eefd472013-04-19 15:34:50 -05002653 /* Initialize the copyup op */
2654
2655 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
Alex Elderebda6402013-05-10 16:29:22 -05002656 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
Alex Elder0eefd472013-04-19 15:34:50 -05002657 false, false);
2658
Josh Durgind3246fb2014-04-07 16:49:21 -07002659 /* Add the other op(s) */
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002660
Josh Durgind3246fb2014-04-07 16:49:21 -07002661 op_type = rbd_img_request_op_type(orig_request->img_request);
2662 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
Alex Elder0eefd472013-04-19 15:34:50 -05002663
2664 /* All set, send it off. */
2665
Alex Elder0eefd472013-04-19 15:34:50 -05002666 osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elderbbea1c12013-05-06 17:40:33 -05002667 img_result = rbd_obj_request_submit(osdc, orig_request);
2668 if (!img_result)
Alex Elder0eefd472013-04-19 15:34:50 -05002669 return;
2670out_err:
2671 /* Record the error code and complete the request */
2672
Alex Elderbbea1c12013-05-06 17:40:33 -05002673 orig_request->result = img_result;
Alex Elder0eefd472013-04-19 15:34:50 -05002674 orig_request->xferred = 0;
2675 obj_request_done_set(orig_request);
2676 rbd_obj_request_complete(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002677}
2678
2679/*
2680 * Read from the parent image the range of data that covers the
2681 * entire target of the given object request. This is used for
2682 * satisfying a layered image write request when the target of an
2683 * object request from the image request does not exist.
2684 *
2685 * A page array big enough to hold the returned data is allocated
2686 * and supplied to rbd_img_request_fill() as the "data descriptor."
2687 * When the read completes, this page array will be transferred to
2688 * the original object request for the copyup operation.
2689 *
2690 * If an error occurs, record it as the result of the original
2691 * object request and mark it done so it gets completed.
2692 */
2693static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2694{
2695 struct rbd_img_request *img_request = NULL;
2696 struct rbd_img_request *parent_request = NULL;
2697 struct rbd_device *rbd_dev;
2698 u64 img_offset;
2699 u64 length;
2700 struct page **pages = NULL;
2701 u32 page_count;
2702 int result;
2703
2704 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderb91f09f2013-05-10 16:29:22 -05002705 rbd_assert(obj_request_type_valid(obj_request->type));
Alex Elder3d7efd12013-04-19 15:34:50 -05002706
2707 img_request = obj_request->img_request;
2708 rbd_assert(img_request != NULL);
2709 rbd_dev = img_request->rbd_dev;
2710 rbd_assert(rbd_dev->parent != NULL);
2711
2712 /*
2713 * Determine the byte range covered by the object in the
2714 * child image to which the original request was to be sent.
2715 */
2716 img_offset = obj_request->img_offset - obj_request->offset;
2717 length = (u64)1 << rbd_dev->header.obj_order;
2718
2719 /*
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002720 * There is no defined parent data beyond the parent
2721 * overlap, so limit what we read at that boundary if
2722 * necessary.
2723 */
2724 if (img_offset + length > rbd_dev->parent_overlap) {
2725 rbd_assert(img_offset < rbd_dev->parent_overlap);
2726 length = rbd_dev->parent_overlap - img_offset;
2727 }
2728
2729 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002730 * Allocate a page array big enough to receive the data read
2731 * from the parent.
2732 */
2733 page_count = (u32)calc_pages_for(0, length);
2734 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2735 if (IS_ERR(pages)) {
2736 result = PTR_ERR(pages);
2737 pages = NULL;
2738 goto out_err;
2739 }
2740
2741 result = -ENOMEM;
Alex Eldere93f3152013-05-08 22:50:04 -05002742 parent_request = rbd_parent_request_create(obj_request,
2743 img_offset, length);
Alex Elder3d7efd12013-04-19 15:34:50 -05002744 if (!parent_request)
2745 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002746
2747 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2748 if (result)
2749 goto out_err;
2750 parent_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002751 parent_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002752
2753 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2754 result = rbd_img_request_submit(parent_request);
2755 if (!result)
2756 return 0;
2757
2758 parent_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002759 parent_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002760 parent_request->obj_request = NULL;
2761 rbd_obj_request_put(obj_request);
2762out_err:
2763 if (pages)
2764 ceph_release_page_vector(pages, page_count);
2765 if (parent_request)
2766 rbd_img_request_put(parent_request);
2767 obj_request->result = result;
2768 obj_request->xferred = 0;
2769 obj_request_done_set(obj_request);
2770
2771 return result;
2772}
2773
Alex Elderc5b5ef62013-02-11 12:33:24 -06002774static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2775{
Alex Elderc5b5ef62013-02-11 12:33:24 -06002776 struct rbd_obj_request *orig_request;
Alex Elder638f5ab2013-05-06 17:40:33 -05002777 struct rbd_device *rbd_dev;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002778 int result;
2779
2780 rbd_assert(!obj_request_img_data_test(obj_request));
2781
2782 /*
2783 * All we need from the object request is the original
2784 * request and the result of the STAT op. Grab those, then
2785 * we're done with the request.
2786 */
2787 orig_request = obj_request->obj_request;
2788 obj_request->obj_request = NULL;
Alex Elder912c3172013-05-13 20:35:38 -05002789 rbd_obj_request_put(orig_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002790 rbd_assert(orig_request);
2791 rbd_assert(orig_request->img_request);
2792
2793 result = obj_request->result;
2794 obj_request->result = 0;
2795
2796 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2797 obj_request, orig_request, result,
2798 obj_request->xferred, obj_request->length);
2799 rbd_obj_request_put(obj_request);
2800
Alex Elder638f5ab2013-05-06 17:40:33 -05002801 /*
2802 * If the overlap has become 0 (most likely because the
2803 * image has been flattened) we need to free the pages
2804 * and re-submit the original write request.
2805 */
2806 rbd_dev = orig_request->img_request->rbd_dev;
2807 if (!rbd_dev->parent_overlap) {
2808 struct ceph_osd_client *osdc;
2809
Alex Elder638f5ab2013-05-06 17:40:33 -05002810 osdc = &rbd_dev->rbd_client->client->osdc;
2811 result = rbd_obj_request_submit(osdc, orig_request);
2812 if (!result)
2813 return;
2814 }
Alex Elderc5b5ef62013-02-11 12:33:24 -06002815
2816 /*
2817 * Our only purpose here is to determine whether the object
2818 * exists, and we don't want to treat the non-existence as
2819 * an error. If something else comes back, transfer the
2820 * error to the original request and complete it now.
2821 */
2822 if (!result) {
2823 obj_request_existence_set(orig_request, true);
2824 } else if (result == -ENOENT) {
2825 obj_request_existence_set(orig_request, false);
2826 } else if (result) {
2827 orig_request->result = result;
Alex Elder3d7efd12013-04-19 15:34:50 -05002828 goto out;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002829 }
2830
2831 /*
2832 * Resubmit the original request now that we have recorded
2833 * whether the target object exists.
2834 */
Alex Elderb454e362013-04-19 15:34:50 -05002835 orig_request->result = rbd_img_obj_request_submit(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002836out:
Alex Elderc5b5ef62013-02-11 12:33:24 -06002837 if (orig_request->result)
2838 rbd_obj_request_complete(orig_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002839}
2840
2841static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2842{
2843 struct rbd_obj_request *stat_request;
2844 struct rbd_device *rbd_dev;
2845 struct ceph_osd_client *osdc;
2846 struct page **pages = NULL;
2847 u32 page_count;
2848 size_t size;
2849 int ret;
2850
2851 /*
2852 * The response data for a STAT call consists of:
2853 * le64 length;
2854 * struct {
2855 * le32 tv_sec;
2856 * le32 tv_nsec;
2857 * } mtime;
2858 */
2859 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2860 page_count = (u32)calc_pages_for(0, size);
2861 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2862 if (IS_ERR(pages))
2863 return PTR_ERR(pages);
2864
2865 ret = -ENOMEM;
2866 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2867 OBJ_REQUEST_PAGES);
2868 if (!stat_request)
2869 goto out;
2870
2871 rbd_obj_request_get(obj_request);
2872 stat_request->obj_request = obj_request;
2873 stat_request->pages = pages;
2874 stat_request->page_count = page_count;
2875
2876 rbd_assert(obj_request->img_request);
2877 rbd_dev = obj_request->img_request->rbd_dev;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002878 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02002879 stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002880 if (!stat_request->osd_req)
2881 goto out;
2882 stat_request->callback = rbd_img_obj_exists_callback;
2883
Yan, Zheng144cba12015-04-27 11:09:54 +08002884 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002885 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2886 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05002887 rbd_osd_req_format_read(stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002888
2889 osdc = &rbd_dev->rbd_client->client->osdc;
2890 ret = rbd_obj_request_submit(osdc, stat_request);
2891out:
2892 if (ret)
2893 rbd_obj_request_put(obj_request);
2894
2895 return ret;
2896}
2897
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002898static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
Alex Elderb454e362013-04-19 15:34:50 -05002899{
2900 struct rbd_img_request *img_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002901 struct rbd_device *rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002902
2903 rbd_assert(obj_request_img_data_test(obj_request));
2904
2905 img_request = obj_request->img_request;
2906 rbd_assert(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002907 rbd_dev = img_request->rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002908
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002909 /* Reads */
Josh Durgin1c220882014-04-04 17:49:12 -07002910 if (!img_request_write_test(img_request) &&
2911 !img_request_discard_test(img_request))
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002912 return true;
Alex Elderb454e362013-04-19 15:34:50 -05002913
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002914 /* Non-layered writes */
2915 if (!img_request_layered_test(img_request))
2916 return true;
2917
2918 /*
2919 * Layered writes outside of the parent overlap range don't
2920 * share any data with the parent.
2921 */
2922 if (!obj_request_overlaps_parent(obj_request))
2923 return true;
2924
2925 /*
Guangliang Zhaoc622d222014-04-01 22:22:15 +08002926 * Entire-object layered writes - we will overwrite whatever
2927 * parent data there is anyway.
2928 */
2929 if (!obj_request->offset &&
2930 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2931 return true;
2932
2933 /*
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002934 * If the object is known to already exist, its parent data has
2935 * already been copied.
2936 */
2937 if (obj_request_known_test(obj_request) &&
2938 obj_request_exists_test(obj_request))
2939 return true;
2940
2941 return false;
2942}
2943
2944static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2945{
2946 if (img_obj_request_simple(obj_request)) {
Alex Elderb454e362013-04-19 15:34:50 -05002947 struct rbd_device *rbd_dev;
2948 struct ceph_osd_client *osdc;
2949
2950 rbd_dev = obj_request->img_request->rbd_dev;
2951 osdc = &rbd_dev->rbd_client->client->osdc;
2952
2953 return rbd_obj_request_submit(osdc, obj_request);
2954 }
2955
2956 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002957 * It's a layered write. The target object might exist but
2958 * we may not know that yet. If we know it doesn't exist,
2959 * start by reading the data for the full target object from
2960 * the parent so we can use it for a copyup to the target.
Alex Elderb454e362013-04-19 15:34:50 -05002961 */
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002962 if (obj_request_known_test(obj_request))
Alex Elder3d7efd12013-04-19 15:34:50 -05002963 return rbd_img_obj_parent_read_full(obj_request);
2964
2965 /* We don't know whether the target exists. Go find out. */
Alex Elderb454e362013-04-19 15:34:50 -05002966
2967 return rbd_img_obj_exists_submit(obj_request);
2968}
2969
Alex Elderbf0d5f502012-11-22 00:00:08 -06002970static int rbd_img_request_submit(struct rbd_img_request *img_request)
2971{
Alex Elderbf0d5f502012-11-22 00:00:08 -06002972 struct rbd_obj_request *obj_request;
Alex Elder46faeed2013-04-10 17:47:46 -05002973 struct rbd_obj_request *next_obj_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002974
Alex Elder37206ee2013-02-20 17:32:08 -06002975 dout("%s: img %p\n", __func__, img_request);
Alex Elder46faeed2013-04-10 17:47:46 -05002976 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
Alex Elderbf0d5f502012-11-22 00:00:08 -06002977 int ret;
2978
Alex Elderb454e362013-04-19 15:34:50 -05002979 ret = rbd_img_obj_request_submit(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002980 if (ret)
2981 return ret;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002982 }
2983
2984 return 0;
2985}
2986
Alex Elder8b3e1a52013-01-24 16:13:36 -06002987static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2988{
2989 struct rbd_obj_request *obj_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002990 struct rbd_device *rbd_dev;
2991 u64 obj_end;
Alex Elder02c74fb2013-05-06 17:40:33 -05002992 u64 img_xferred;
2993 int img_result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002994
2995 rbd_assert(img_request_child_test(img_request));
2996
Alex Elder02c74fb2013-05-06 17:40:33 -05002997 /* First get what we need from the image request and release it */
2998
Alex Elder8b3e1a52013-01-24 16:13:36 -06002999 obj_request = img_request->obj_request;
Alex Elder02c74fb2013-05-06 17:40:33 -05003000 img_xferred = img_request->xferred;
3001 img_result = img_request->result;
3002 rbd_img_request_put(img_request);
3003
3004 /*
3005 * If the overlap has become 0 (most likely because the
3006 * image has been flattened) we need to re-submit the
3007 * original request.
3008 */
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003009 rbd_assert(obj_request);
3010 rbd_assert(obj_request->img_request);
Alex Elder02c74fb2013-05-06 17:40:33 -05003011 rbd_dev = obj_request->img_request->rbd_dev;
3012 if (!rbd_dev->parent_overlap) {
3013 struct ceph_osd_client *osdc;
Alex Elder8b3e1a52013-01-24 16:13:36 -06003014
Alex Elder02c74fb2013-05-06 17:40:33 -05003015 osdc = &rbd_dev->rbd_client->client->osdc;
3016 img_result = rbd_obj_request_submit(osdc, obj_request);
3017 if (!img_result)
3018 return;
3019 }
3020
3021 obj_request->result = img_result;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003022 if (obj_request->result)
3023 goto out;
3024
3025 /*
3026 * We need to zero anything beyond the parent overlap
3027 * boundary. Since rbd_img_obj_request_read_callback()
3028 * will zero anything beyond the end of a short read, an
3029 * easy way to do this is to pretend the data from the
3030 * parent came up short--ending at the overlap boundary.
3031 */
3032 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3033 obj_end = obj_request->img_offset + obj_request->length;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003034 if (obj_end > rbd_dev->parent_overlap) {
3035 u64 xferred = 0;
3036
3037 if (obj_request->img_offset < rbd_dev->parent_overlap)
3038 xferred = rbd_dev->parent_overlap -
3039 obj_request->img_offset;
3040
Alex Elder02c74fb2013-05-06 17:40:33 -05003041 obj_request->xferred = min(img_xferred, xferred);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003042 } else {
Alex Elder02c74fb2013-05-06 17:40:33 -05003043 obj_request->xferred = img_xferred;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003044 }
3045out:
Alex Elder8b3e1a52013-01-24 16:13:36 -06003046 rbd_img_obj_request_read_callback(obj_request);
3047 rbd_obj_request_complete(obj_request);
3048}
3049
3050static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3051{
Alex Elder8b3e1a52013-01-24 16:13:36 -06003052 struct rbd_img_request *img_request;
3053 int result;
3054
3055 rbd_assert(obj_request_img_data_test(obj_request));
3056 rbd_assert(obj_request->img_request != NULL);
3057 rbd_assert(obj_request->result == (s32) -ENOENT);
Alex Elder5b2ab722013-05-06 17:40:33 -05003058 rbd_assert(obj_request_type_valid(obj_request->type));
Alex Elder8b3e1a52013-01-24 16:13:36 -06003059
Alex Elder8b3e1a52013-01-24 16:13:36 -06003060 /* rbd_read_finish(obj_request, obj_request->length); */
Alex Eldere93f3152013-05-08 22:50:04 -05003061 img_request = rbd_parent_request_create(obj_request,
Alex Elder8b3e1a52013-01-24 16:13:36 -06003062 obj_request->img_offset,
Alex Eldere93f3152013-05-08 22:50:04 -05003063 obj_request->length);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003064 result = -ENOMEM;
3065 if (!img_request)
3066 goto out_err;
3067
Alex Elder5b2ab722013-05-06 17:40:33 -05003068 if (obj_request->type == OBJ_REQUEST_BIO)
3069 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3070 obj_request->bio_list);
3071 else
3072 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3073 obj_request->pages);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003074 if (result)
3075 goto out_err;
3076
3077 img_request->callback = rbd_img_parent_read_callback;
3078 result = rbd_img_request_submit(img_request);
3079 if (result)
3080 goto out_err;
3081
3082 return;
3083out_err:
3084 if (img_request)
3085 rbd_img_request_put(img_request);
3086 obj_request->result = result;
3087 obj_request->xferred = 0;
3088 obj_request_done_set(obj_request);
3089}
3090
Josh Durgin20e0af62013-08-29 17:36:03 -07003091static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
Alex Elderb8d70032012-11-30 17:53:04 -06003092{
3093 struct rbd_obj_request *obj_request;
Alex Elder21692382013-04-05 01:27:12 -05003094 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elderb8d70032012-11-30 17:53:04 -06003095 int ret;
3096
3097 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3098 OBJ_REQUEST_NODATA);
3099 if (!obj_request)
3100 return -ENOMEM;
3101
3102 ret = -ENOMEM;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003103 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003104 obj_request);
Alex Elderb8d70032012-11-30 17:53:04 -06003105 if (!obj_request->osd_req)
3106 goto out;
3107
Alex Elderc99d2d42013-04-05 01:27:11 -05003108 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003109 notify_id, 0, 0);
Alex Elder9d4df012013-04-19 15:34:50 -05003110 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003111
Alex Elderb8d70032012-11-30 17:53:04 -06003112 ret = rbd_obj_request_submit(osdc, obj_request);
Alex Eldercf81b602013-01-17 12:18:46 -06003113 if (ret)
Josh Durgin20e0af62013-08-29 17:36:03 -07003114 goto out;
3115 ret = rbd_obj_request_wait(obj_request);
3116out:
3117 rbd_obj_request_put(obj_request);
Alex Elderb8d70032012-11-30 17:53:04 -06003118
3119 return ret;
3120}
3121
3122static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3123{
3124 struct rbd_device *rbd_dev = (struct rbd_device *)data;
Alex Eldere627db02013-05-06 07:40:30 -05003125 int ret;
Alex Elderb8d70032012-11-30 17:53:04 -06003126
3127 if (!rbd_dev)
3128 return;
3129
Alex Elder37206ee2013-02-20 17:32:08 -06003130 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003131 rbd_dev->header_name, (unsigned long long)notify_id,
3132 (unsigned int)opcode);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003133
3134 /*
3135 * Until adequate refresh error handling is in place, there is
3136 * not much we can do here, except warn.
3137 *
3138 * See http://tracker.ceph.com/issues/5040
3139 */
Alex Eldere627db02013-05-06 07:40:30 -05003140 ret = rbd_dev_refresh(rbd_dev);
3141 if (ret)
Ilya Dryomov9584d502014-07-11 12:11:20 +04003142 rbd_warn(rbd_dev, "refresh failed: %d", ret);
Alex Elderb8d70032012-11-30 17:53:04 -06003143
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003144 ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
3145 if (ret)
Ilya Dryomov9584d502014-07-11 12:11:20 +04003146 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
Alex Elderb8d70032012-11-30 17:53:04 -06003147}
3148
Alex Elder9969ebc2013-01-18 12:31:10 -06003149/*
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003150 * Send a (un)watch request and wait for the ack. Return a request
3151 * with a ref held on success or error.
3152 */
3153static struct rbd_obj_request *rbd_obj_watch_request_helper(
3154 struct rbd_device *rbd_dev,
3155 bool watch)
3156{
3157 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03003158 struct ceph_options *opts = osdc->client->options;
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003159 struct rbd_obj_request *obj_request;
3160 int ret;
3161
3162 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3163 OBJ_REQUEST_NODATA);
3164 if (!obj_request)
3165 return ERR_PTR(-ENOMEM);
3166
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003167 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003168 obj_request);
3169 if (!obj_request->osd_req) {
3170 ret = -ENOMEM;
3171 goto out;
3172 }
3173
3174 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3175 rbd_dev->watch_event->cookie, 0, watch);
3176 rbd_osd_req_format_write(obj_request);
3177
3178 if (watch)
3179 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3180
3181 ret = rbd_obj_request_submit(osdc, obj_request);
3182 if (ret)
3183 goto out;
3184
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03003185 ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003186 if (ret)
3187 goto out;
3188
3189 ret = obj_request->result;
3190 if (ret) {
3191 if (watch)
3192 rbd_obj_request_end(obj_request);
3193 goto out;
3194 }
3195
3196 return obj_request;
3197
3198out:
3199 rbd_obj_request_put(obj_request);
3200 return ERR_PTR(ret);
3201}
3202
3203/*
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003204 * Initiate a watch request, synchronously.
Alex Elder9969ebc2013-01-18 12:31:10 -06003205 */
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003206static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
Alex Elder9969ebc2013-01-18 12:31:10 -06003207{
3208 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3209 struct rbd_obj_request *obj_request;
Alex Elder9969ebc2013-01-18 12:31:10 -06003210 int ret;
3211
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003212 rbd_assert(!rbd_dev->watch_event);
3213 rbd_assert(!rbd_dev->watch_request);
Alex Elder9969ebc2013-01-18 12:31:10 -06003214
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003215 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3216 &rbd_dev->watch_event);
3217 if (ret < 0)
3218 return ret;
Alex Elder9969ebc2013-01-18 12:31:10 -06003219
Ilya Dryomov76756a52014-06-20 18:29:20 +04003220 obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
3221 if (IS_ERR(obj_request)) {
3222 ceph_osdc_cancel_event(rbd_dev->watch_event);
3223 rbd_dev->watch_event = NULL;
3224 return PTR_ERR(obj_request);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003225 }
Alex Elder9969ebc2013-01-18 12:31:10 -06003226
Alex Elder8eb87562013-01-25 17:08:55 -06003227 /*
3228 * A watch request is set to linger, so the underlying osd
3229 * request won't go away until we unregister it. We retain
3230 * a pointer to the object request during that time (in
Ilya Dryomov76756a52014-06-20 18:29:20 +04003231 * rbd_dev->watch_request), so we'll keep a reference to it.
3232 * We'll drop that reference after we've unregistered it in
3233 * rbd_dev_header_unwatch_sync().
Alex Elder8eb87562013-01-25 17:08:55 -06003234 */
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003235 rbd_dev->watch_request = obj_request;
Alex Elder8eb87562013-01-25 17:08:55 -06003236
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003237 return 0;
Alex Elder9969ebc2013-01-18 12:31:10 -06003238}
3239
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003240/*
3241 * Tear down a watch request, synchronously.
3242 */
Ilya Dryomov76756a52014-06-20 18:29:20 +04003243static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
Ilya Dryomovfca27062013-12-16 18:02:40 +02003244{
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003245 struct rbd_obj_request *obj_request;
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003246
3247 rbd_assert(rbd_dev->watch_event);
3248 rbd_assert(rbd_dev->watch_request);
3249
Ilya Dryomov76756a52014-06-20 18:29:20 +04003250 rbd_obj_request_end(rbd_dev->watch_request);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003251 rbd_obj_request_put(rbd_dev->watch_request);
3252 rbd_dev->watch_request = NULL;
3253
Ilya Dryomov76756a52014-06-20 18:29:20 +04003254 obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
3255 if (!IS_ERR(obj_request))
3256 rbd_obj_request_put(obj_request);
3257 else
3258 rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
3259 PTR_ERR(obj_request));
3260
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003261 ceph_osdc_cancel_event(rbd_dev->watch_event);
3262 rbd_dev->watch_event = NULL;
Ilya Dryomovfca27062013-12-16 18:02:40 +02003263}
3264
Alex Elder36be9a72013-01-19 00:30:28 -06003265/*
Alex Elderf40eb342013-04-25 15:09:42 -05003266 * Synchronous osd object method call. Returns the number of bytes
3267 * returned in the outbound buffer, or a negative error code.
Alex Elder36be9a72013-01-19 00:30:28 -06003268 */
3269static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3270 const char *object_name,
3271 const char *class_name,
3272 const char *method_name,
Alex Elder41579762013-04-21 12:14:45 -05003273 const void *outbound,
Alex Elder36be9a72013-01-19 00:30:28 -06003274 size_t outbound_size,
Alex Elder41579762013-04-21 12:14:45 -05003275 void *inbound,
Alex Eldere2a58ee2013-04-30 00:44:33 -05003276 size_t inbound_size)
Alex Elder36be9a72013-01-19 00:30:28 -06003277{
Alex Elder21692382013-04-05 01:27:12 -05003278 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder36be9a72013-01-19 00:30:28 -06003279 struct rbd_obj_request *obj_request;
Alex Elder36be9a72013-01-19 00:30:28 -06003280 struct page **pages;
3281 u32 page_count;
3282 int ret;
3283
3284 /*
Alex Elder6010a452013-04-05 01:27:11 -05003285 * Method calls are ultimately read operations. The result
3286 * should placed into the inbound buffer provided. They
3287 * also supply outbound data--parameters for the object
3288 * method. Currently if this is present it will be a
3289 * snapshot id.
Alex Elder36be9a72013-01-19 00:30:28 -06003290 */
Alex Elder57385b52013-04-21 12:14:45 -05003291 page_count = (u32)calc_pages_for(0, inbound_size);
Alex Elder36be9a72013-01-19 00:30:28 -06003292 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3293 if (IS_ERR(pages))
3294 return PTR_ERR(pages);
3295
3296 ret = -ENOMEM;
Alex Elder6010a452013-04-05 01:27:11 -05003297 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
Alex Elder36be9a72013-01-19 00:30:28 -06003298 OBJ_REQUEST_PAGES);
3299 if (!obj_request)
3300 goto out;
3301
3302 obj_request->pages = pages;
3303 obj_request->page_count = page_count;
3304
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003305 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003306 obj_request);
Alex Elder36be9a72013-01-19 00:30:28 -06003307 if (!obj_request->osd_req)
3308 goto out;
3309
Alex Elderc99d2d42013-04-05 01:27:11 -05003310 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
Alex Elder04017e22013-04-05 14:46:02 -05003311 class_name, method_name);
3312 if (outbound_size) {
3313 struct ceph_pagelist *pagelist;
3314
3315 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3316 if (!pagelist)
3317 goto out;
3318
3319 ceph_pagelist_init(pagelist);
3320 ceph_pagelist_append(pagelist, outbound, outbound_size);
3321 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3322 pagelist);
3323 }
Alex Eldera4ce40a2013-04-05 01:27:12 -05003324 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3325 obj_request->pages, inbound_size,
Alex Elder44cd1882013-04-05 01:27:12 -05003326 0, false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05003327 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003328
Alex Elder36be9a72013-01-19 00:30:28 -06003329 ret = rbd_obj_request_submit(osdc, obj_request);
3330 if (ret)
3331 goto out;
3332 ret = rbd_obj_request_wait(obj_request);
3333 if (ret)
3334 goto out;
3335
3336 ret = obj_request->result;
3337 if (ret < 0)
3338 goto out;
Alex Elder57385b52013-04-21 12:14:45 -05003339
3340 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3341 ret = (int)obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06003342 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
Alex Elder36be9a72013-01-19 00:30:28 -06003343out:
3344 if (obj_request)
3345 rbd_obj_request_put(obj_request);
3346 else
3347 ceph_release_page_vector(pages, page_count);
3348
3349 return ret;
3350}
3351
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003352static void rbd_queue_workfn(struct work_struct *work)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003353{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003354 struct request *rq = blk_mq_rq_from_pdu(work);
3355 struct rbd_device *rbd_dev = rq->q->queuedata;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003356 struct rbd_img_request *img_request;
Josh Durgin4e752f02014-04-08 11:12:11 -07003357 struct ceph_snap_context *snapc = NULL;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003358 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3359 u64 length = blk_rq_bytes(rq);
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003360 enum obj_operation_type op_type;
Josh Durgin4e752f02014-04-08 11:12:11 -07003361 u64 mapping_size;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003362 int result;
3363
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003364 if (rq->cmd_type != REQ_TYPE_FS) {
3365 dout("%s: non-fs request type %d\n", __func__,
3366 (int) rq->cmd_type);
3367 result = -EIO;
3368 goto err;
3369 }
3370
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003371 if (rq->cmd_flags & REQ_DISCARD)
3372 op_type = OBJ_OP_DISCARD;
3373 else if (rq->cmd_flags & REQ_WRITE)
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003374 op_type = OBJ_OP_WRITE;
3375 else
3376 op_type = OBJ_OP_READ;
3377
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003378 /* Ignore/skip any zero-length requests */
3379
3380 if (!length) {
3381 dout("%s: zero-length request\n", __func__);
3382 result = 0;
3383 goto err_rq;
3384 }
3385
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003386 /* Only reads are allowed to a read-only device */
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003387
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003388 if (op_type != OBJ_OP_READ) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003389 if (rbd_dev->mapping.read_only) {
3390 result = -EROFS;
3391 goto err_rq;
3392 }
3393 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3394 }
3395
3396 /*
3397 * Quit early if the mapped snapshot no longer exists. It's
3398 * still possible the snapshot will have disappeared by the
3399 * time our request arrives at the osd, but there's no sense in
3400 * sending it if we already know.
3401 */
3402 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3403 dout("request for non-existent snapshot");
3404 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3405 result = -ENXIO;
3406 goto err_rq;
3407 }
3408
3409 if (offset && length > U64_MAX - offset + 1) {
3410 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3411 length);
3412 result = -EINVAL;
3413 goto err_rq; /* Shouldn't happen */
3414 }
3415
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003416 blk_mq_start_request(rq);
3417
Josh Durgin4e752f02014-04-08 11:12:11 -07003418 down_read(&rbd_dev->header_rwsem);
3419 mapping_size = rbd_dev->mapping.size;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003420 if (op_type != OBJ_OP_READ) {
Josh Durgin4e752f02014-04-08 11:12:11 -07003421 snapc = rbd_dev->header.snapc;
3422 ceph_get_snap_context(snapc);
3423 }
3424 up_read(&rbd_dev->header_rwsem);
3425
3426 if (offset + length > mapping_size) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003427 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
Josh Durgin4e752f02014-04-08 11:12:11 -07003428 length, mapping_size);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003429 result = -EIO;
3430 goto err_rq;
3431 }
3432
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003433 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07003434 snapc);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003435 if (!img_request) {
3436 result = -ENOMEM;
3437 goto err_rq;
3438 }
3439 img_request->rq = rq;
3440
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003441 if (op_type == OBJ_OP_DISCARD)
3442 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3443 NULL);
3444 else
3445 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3446 rq->bio);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003447 if (result)
3448 goto err_img_request;
3449
3450 result = rbd_img_request_submit(img_request);
3451 if (result)
3452 goto err_img_request;
3453
3454 return;
3455
3456err_img_request:
3457 rbd_img_request_put(img_request);
3458err_rq:
3459 if (result)
3460 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003461 obj_op_name(op_type), length, offset, result);
SF Markus Elfringe96a6502014-11-02 15:20:59 +01003462 ceph_put_snap_context(snapc);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003463err:
3464 blk_mq_end_request(rq, result);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003465}
3466
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003467static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3468 const struct blk_mq_queue_data *bd)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003469{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003470 struct request *rq = bd->rq;
3471 struct work_struct *work = blk_mq_rq_to_pdu(rq);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003472
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003473 queue_work(rbd_wq, work);
3474 return BLK_MQ_RQ_QUEUE_OK;
Alex Elderbf0d5f502012-11-22 00:00:08 -06003475}
3476
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003477/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003478 * a queue callback. Makes sure that we don't create a bio that spans across
3479 * multiple osd objects. One exception would be with a single page bios,
Alex Elderf7760da2012-10-20 22:17:27 -05003480 * which we handle later at bio_chain_clone_range()
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003481 */
3482static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3483 struct bio_vec *bvec)
3484{
3485 struct rbd_device *rbd_dev = q->queuedata;
Alex Eldere5cfeed22012-10-20 22:17:27 -05003486 sector_t sector_offset;
3487 sector_t sectors_per_obj;
3488 sector_t obj_sector_offset;
3489 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003490
Alex Eldere5cfeed22012-10-20 22:17:27 -05003491 /*
3492 * Find how far into its rbd object the partition-relative
3493 * bio start sector is to offset relative to the enclosing
3494 * device.
3495 */
3496 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3497 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3498 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
Alex Elder593a9e72012-02-07 12:03:37 -06003499
Alex Eldere5cfeed22012-10-20 22:17:27 -05003500 /*
3501 * Compute the number of bytes from that offset to the end
3502 * of the object. Account for what's already used by the bio.
3503 */
3504 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3505 if (ret > bmd->bi_size)
3506 ret -= bmd->bi_size;
3507 else
3508 ret = 0;
3509
3510 /*
3511 * Don't send back more than was asked for. And if the bio
3512 * was empty, let the whole thing through because: "Note
3513 * that a block device *must* allow a single page to be
3514 * added to an empty bio."
3515 */
3516 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3517 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3518 ret = (int) bvec->bv_len;
3519
3520 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003521}
3522
3523static void rbd_free_disk(struct rbd_device *rbd_dev)
3524{
3525 struct gendisk *disk = rbd_dev->disk;
3526
3527 if (!disk)
3528 return;
3529
Alex Eldera0cab922013-04-25 23:15:08 -05003530 rbd_dev->disk = NULL;
3531 if (disk->flags & GENHD_FL_UP) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003532 del_gendisk(disk);
Alex Eldera0cab922013-04-25 23:15:08 -05003533 if (disk->queue)
3534 blk_cleanup_queue(disk->queue);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003535 blk_mq_free_tag_set(&rbd_dev->tag_set);
Alex Eldera0cab922013-04-25 23:15:08 -05003536 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003537 put_disk(disk);
3538}
3539
Alex Elder788e2df2013-01-17 12:25:27 -06003540static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3541 const char *object_name,
Alex Elder7097f8d2013-04-30 00:44:33 -05003542 u64 offset, u64 length, void *buf)
Alex Elder788e2df2013-01-17 12:25:27 -06003543
3544{
Alex Elder21692382013-04-05 01:27:12 -05003545 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder788e2df2013-01-17 12:25:27 -06003546 struct rbd_obj_request *obj_request;
Alex Elder788e2df2013-01-17 12:25:27 -06003547 struct page **pages = NULL;
3548 u32 page_count;
Alex Elder1ceae7e2013-02-06 13:11:38 -06003549 size_t size;
Alex Elder788e2df2013-01-17 12:25:27 -06003550 int ret;
3551
3552 page_count = (u32) calc_pages_for(offset, length);
3553 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3554 if (IS_ERR(pages))
Jan Karaa8d42052014-10-22 09:17:24 +02003555 return PTR_ERR(pages);
Alex Elder788e2df2013-01-17 12:25:27 -06003556
3557 ret = -ENOMEM;
3558 obj_request = rbd_obj_request_create(object_name, offset, length,
Alex Elder36be9a72013-01-19 00:30:28 -06003559 OBJ_REQUEST_PAGES);
Alex Elder788e2df2013-01-17 12:25:27 -06003560 if (!obj_request)
3561 goto out;
3562
3563 obj_request->pages = pages;
3564 obj_request->page_count = page_count;
3565
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003566 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003567 obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06003568 if (!obj_request->osd_req)
3569 goto out;
3570
Alex Elderc99d2d42013-04-05 01:27:11 -05003571 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3572 offset, length, 0, 0);
Alex Elder406e2c92013-04-15 14:50:36 -05003573 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
Alex Eldera4ce40a2013-04-05 01:27:12 -05003574 obj_request->pages,
Alex Elder44cd1882013-04-05 01:27:12 -05003575 obj_request->length,
3576 obj_request->offset & ~PAGE_MASK,
3577 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05003578 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003579
Alex Elder788e2df2013-01-17 12:25:27 -06003580 ret = rbd_obj_request_submit(osdc, obj_request);
3581 if (ret)
3582 goto out;
3583 ret = rbd_obj_request_wait(obj_request);
3584 if (ret)
3585 goto out;
3586
3587 ret = obj_request->result;
3588 if (ret < 0)
3589 goto out;
Alex Elder1ceae7e2013-02-06 13:11:38 -06003590
3591 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3592 size = (size_t) obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06003593 ceph_copy_from_page_vector(pages, buf, 0, size);
Alex Elder7097f8d2013-04-30 00:44:33 -05003594 rbd_assert(size <= (size_t)INT_MAX);
3595 ret = (int)size;
Alex Elder788e2df2013-01-17 12:25:27 -06003596out:
3597 if (obj_request)
3598 rbd_obj_request_put(obj_request);
3599 else
3600 ceph_release_page_vector(pages, page_count);
3601
3602 return ret;
3603}
3604
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003605/*
Alex Elder662518b2013-05-06 09:51:29 -05003606 * Read the complete header for the given rbd device. On successful
3607 * return, the rbd_dev->header field will contain up-to-date
3608 * information about the image.
Alex Elder4156d992012-08-02 11:29:46 -05003609 */
Alex Elder99a41eb2013-05-06 09:51:30 -05003610static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
Alex Elder4156d992012-08-02 11:29:46 -05003611{
3612 struct rbd_image_header_ondisk *ondisk = NULL;
3613 u32 snap_count = 0;
3614 u64 names_size = 0;
3615 u32 want_count;
3616 int ret;
3617
3618 /*
3619 * The complete header will include an array of its 64-bit
3620 * snapshot ids, followed by the names of those snapshots as
3621 * a contiguous block of NUL-terminated strings. Note that
3622 * the number of snapshots could change by the time we read
3623 * it in, in which case we re-read it.
3624 */
3625 do {
3626 size_t size;
3627
3628 kfree(ondisk);
3629
3630 size = sizeof (*ondisk);
3631 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3632 size += names_size;
3633 ondisk = kmalloc(size, GFP_KERNEL);
3634 if (!ondisk)
Alex Elder662518b2013-05-06 09:51:29 -05003635 return -ENOMEM;
Alex Elder4156d992012-08-02 11:29:46 -05003636
Alex Elder788e2df2013-01-17 12:25:27 -06003637 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
Alex Elder7097f8d2013-04-30 00:44:33 -05003638 0, size, ondisk);
Alex Elder4156d992012-08-02 11:29:46 -05003639 if (ret < 0)
Alex Elder662518b2013-05-06 09:51:29 -05003640 goto out;
Alex Elderc0cd10db2013-04-26 09:43:47 -05003641 if ((size_t)ret < size) {
Alex Elder4156d992012-08-02 11:29:46 -05003642 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003643 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3644 size, ret);
Alex Elder662518b2013-05-06 09:51:29 -05003645 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05003646 }
3647 if (!rbd_dev_ondisk_valid(ondisk)) {
3648 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003649 rbd_warn(rbd_dev, "invalid header");
Alex Elder662518b2013-05-06 09:51:29 -05003650 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05003651 }
3652
3653 names_size = le64_to_cpu(ondisk->snap_names_len);
3654 want_count = snap_count;
3655 snap_count = le32_to_cpu(ondisk->snap_count);
3656 } while (snap_count != want_count);
3657
Alex Elder662518b2013-05-06 09:51:29 -05003658 ret = rbd_header_from_disk(rbd_dev, ondisk);
3659out:
Alex Elder4156d992012-08-02 11:29:46 -05003660 kfree(ondisk);
3661
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003662 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003663}
3664
Alex Elder15228ed2013-05-01 12:43:03 -05003665/*
3666 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3667 * has disappeared from the (just updated) snapshot context.
3668 */
3669static void rbd_exists_validate(struct rbd_device *rbd_dev)
3670{
3671 u64 snap_id;
3672
3673 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3674 return;
3675
3676 snap_id = rbd_dev->spec->snap_id;
3677 if (snap_id == CEPH_NOSNAP)
3678 return;
3679
3680 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3681 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3682}
3683
Josh Durgin98752012013-08-29 17:26:31 -07003684static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3685{
3686 sector_t size;
3687 bool removing;
3688
3689 /*
3690 * Don't hold the lock while doing disk operations,
3691 * or lock ordering will conflict with the bdev mutex via:
3692 * rbd_add() -> blkdev_get() -> rbd_open()
3693 */
3694 spin_lock_irq(&rbd_dev->lock);
3695 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3696 spin_unlock_irq(&rbd_dev->lock);
3697 /*
3698 * If the device is being removed, rbd_dev->disk has
3699 * been destroyed, so don't try to update its size
3700 */
3701 if (!removing) {
3702 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3703 dout("setting size to %llu sectors", (unsigned long long)size);
3704 set_capacity(rbd_dev->disk, size);
3705 revalidate_disk(rbd_dev->disk);
3706 }
3707}
3708
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003709static int rbd_dev_refresh(struct rbd_device *rbd_dev)
Alex Elder1fe5e992012-07-25 09:32:41 -05003710{
Alex Eldere627db02013-05-06 07:40:30 -05003711 u64 mapping_size;
Alex Elder1fe5e992012-07-25 09:32:41 -05003712 int ret;
3713
Alex Eldercfbf6372013-05-31 17:40:45 -05003714 down_write(&rbd_dev->header_rwsem);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05003715 mapping_size = rbd_dev->mapping.size;
Ilya Dryomova720ae02014-07-23 17:11:19 +04003716
3717 ret = rbd_dev_header_info(rbd_dev);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003718 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003719 goto out;
Alex Elder15228ed2013-05-01 12:43:03 -05003720
Ilya Dryomove8f59b52014-07-24 10:42:13 +04003721 /*
3722 * If there is a parent, see if it has disappeared due to the
3723 * mapped image getting flattened.
3724 */
3725 if (rbd_dev->parent) {
3726 ret = rbd_dev_v2_parent_info(rbd_dev);
3727 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003728 goto out;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04003729 }
3730
Ilya Dryomov5ff11082014-07-23 17:11:21 +04003731 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003732 rbd_dev->mapping.size = rbd_dev->header.image_size;
Ilya Dryomov5ff11082014-07-23 17:11:21 +04003733 } else {
3734 /* validate mapped snapshot's EXISTS flag */
3735 rbd_exists_validate(rbd_dev);
3736 }
Alex Elder15228ed2013-05-01 12:43:03 -05003737
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003738out:
Alex Eldercfbf6372013-05-31 17:40:45 -05003739 up_write(&rbd_dev->header_rwsem);
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003740 if (!ret && mapping_size != rbd_dev->mapping.size)
Josh Durgin98752012013-08-29 17:26:31 -07003741 rbd_dev_update_size(rbd_dev);
Alex Elder1fe5e992012-07-25 09:32:41 -05003742
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003743 return ret;
Alex Elder1fe5e992012-07-25 09:32:41 -05003744}
3745
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003746static int rbd_init_request(void *data, struct request *rq,
3747 unsigned int hctx_idx, unsigned int request_idx,
3748 unsigned int numa_node)
3749{
3750 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3751
3752 INIT_WORK(work, rbd_queue_workfn);
3753 return 0;
3754}
3755
3756static struct blk_mq_ops rbd_mq_ops = {
3757 .queue_rq = rbd_queue_rq,
3758 .map_queue = blk_mq_map_queue,
3759 .init_request = rbd_init_request,
3760};
3761
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003762static int rbd_init_disk(struct rbd_device *rbd_dev)
3763{
3764 struct gendisk *disk;
3765 struct request_queue *q;
Alex Elder593a9e72012-02-07 12:03:37 -06003766 u64 segment_size;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003767 int err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003768
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003769 /* create gendisk info */
Ilya Dryomov7e513d42013-12-16 19:26:32 +02003770 disk = alloc_disk(single_major ?
3771 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3772 RBD_MINORS_PER_MAJOR);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003773 if (!disk)
Alex Elder1fcdb8a2012-08-29 17:11:06 -05003774 return -ENOMEM;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003775
Alex Elderf0f8cef2012-01-29 13:57:44 -06003776 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
Alex Elderde71a292012-07-03 16:01:19 -05003777 rbd_dev->dev_id);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003778 disk->major = rbd_dev->major;
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003779 disk->first_minor = rbd_dev->minor;
Ilya Dryomov7e513d42013-12-16 19:26:32 +02003780 if (single_major)
3781 disk->flags |= GENHD_FL_EXT_DEVT;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003782 disk->fops = &rbd_bd_ops;
3783 disk->private_data = rbd_dev;
3784
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003785 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3786 rbd_dev->tag_set.ops = &rbd_mq_ops;
Ilya Dryomovb5584182015-06-23 16:21:19 +03003787 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003788 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
Ilya Dryomovb5584182015-06-23 16:21:19 +03003789 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003790 rbd_dev->tag_set.nr_hw_queues = 1;
3791 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3792
3793 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3794 if (err)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003795 goto out_disk;
Josh Durgin029bcbd2011-07-22 11:35:23 -07003796
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003797 q = blk_mq_init_queue(&rbd_dev->tag_set);
3798 if (IS_ERR(q)) {
3799 err = PTR_ERR(q);
3800 goto out_tag_set;
3801 }
3802
Ilya Dryomovd8a2c892015-03-24 16:15:17 +03003803 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
3804 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
Alex Elder593a9e72012-02-07 12:03:37 -06003805
Josh Durgin029bcbd2011-07-22 11:35:23 -07003806 /* set io sizes to object size */
Alex Elder593a9e72012-02-07 12:03:37 -06003807 segment_size = rbd_obj_bytes(&rbd_dev->header);
3808 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
Ilya Dryomovd3834fe2015-06-12 19:19:02 +03003809 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
Alex Elder593a9e72012-02-07 12:03:37 -06003810 blk_queue_max_segment_size(q, segment_size);
3811 blk_queue_io_min(q, segment_size);
3812 blk_queue_io_opt(q, segment_size);
Josh Durgin029bcbd2011-07-22 11:35:23 -07003813
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003814 /* enable the discard support */
3815 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3816 q->limits.discard_granularity = segment_size;
3817 q->limits.discard_alignment = segment_size;
Josh Durginb76f8232014-04-07 16:52:03 -07003818 q->limits.max_discard_sectors = segment_size / SECTOR_SIZE;
3819 q->limits.discard_zeroes_data = 1;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003820
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003821 blk_queue_merge_bvec(q, rbd_merge_bvec);
3822 disk->queue = q;
3823
3824 q->queuedata = rbd_dev;
3825
3826 rbd_dev->disk = disk;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003827
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003828 return 0;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003829out_tag_set:
3830 blk_mq_free_tag_set(&rbd_dev->tag_set);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003831out_disk:
3832 put_disk(disk);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003833 return err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003834}
3835
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003836/*
3837 sysfs
3838*/
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003839
Alex Elder593a9e72012-02-07 12:03:37 -06003840static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3841{
3842 return container_of(dev, struct rbd_device, dev);
3843}
3844
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003845static ssize_t rbd_size_show(struct device *dev,
3846 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003847{
Alex Elder593a9e72012-02-07 12:03:37 -06003848 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003849
Alex Elderfc71d832013-04-26 15:44:36 -05003850 return sprintf(buf, "%llu\n",
3851 (unsigned long long)rbd_dev->mapping.size);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003852}
3853
Alex Elder34b13182012-07-13 20:35:12 -05003854/*
3855 * Note this shows the features for whatever's mapped, which is not
3856 * necessarily the base image.
3857 */
3858static ssize_t rbd_features_show(struct device *dev,
3859 struct device_attribute *attr, char *buf)
3860{
3861 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3862
3863 return sprintf(buf, "0x%016llx\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003864 (unsigned long long)rbd_dev->mapping.features);
Alex Elder34b13182012-07-13 20:35:12 -05003865}
3866
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003867static ssize_t rbd_major_show(struct device *dev,
3868 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003869{
Alex Elder593a9e72012-02-07 12:03:37 -06003870 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003871
Alex Elderfc71d832013-04-26 15:44:36 -05003872 if (rbd_dev->major)
3873 return sprintf(buf, "%d\n", rbd_dev->major);
3874
3875 return sprintf(buf, "(none)\n");
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003876}
Alex Elderfc71d832013-04-26 15:44:36 -05003877
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003878static ssize_t rbd_minor_show(struct device *dev,
3879 struct device_attribute *attr, char *buf)
3880{
3881 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3882
3883 return sprintf(buf, "%d\n", rbd_dev->minor);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003884}
3885
3886static ssize_t rbd_client_id_show(struct device *dev,
3887 struct device_attribute *attr, char *buf)
3888{
Alex Elder593a9e72012-02-07 12:03:37 -06003889 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003890
Alex Elder1dbb4392012-01-24 10:08:37 -06003891 return sprintf(buf, "client%lld\n",
3892 ceph_client_id(rbd_dev->rbd_client->client));
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003893}
3894
3895static ssize_t rbd_pool_show(struct device *dev,
3896 struct device_attribute *attr, char *buf)
3897{
Alex Elder593a9e72012-02-07 12:03:37 -06003898 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003899
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003900 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003901}
3902
Alex Elder9bb2f332012-07-12 10:46:35 -05003903static ssize_t rbd_pool_id_show(struct device *dev,
3904 struct device_attribute *attr, char *buf)
3905{
3906 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3907
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003908 return sprintf(buf, "%llu\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003909 (unsigned long long) rbd_dev->spec->pool_id);
Alex Elder9bb2f332012-07-12 10:46:35 -05003910}
3911
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003912static ssize_t rbd_name_show(struct device *dev,
3913 struct device_attribute *attr, char *buf)
3914{
Alex Elder593a9e72012-02-07 12:03:37 -06003915 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003916
Alex Eldera92ffdf2012-10-30 19:40:33 -05003917 if (rbd_dev->spec->image_name)
3918 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3919
3920 return sprintf(buf, "(unknown)\n");
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003921}
3922
Alex Elder589d30e2012-07-10 20:30:11 -05003923static ssize_t rbd_image_id_show(struct device *dev,
3924 struct device_attribute *attr, char *buf)
3925{
3926 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3927
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003928 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05003929}
3930
Alex Elder34b13182012-07-13 20:35:12 -05003931/*
3932 * Shows the name of the currently-mapped snapshot (or
3933 * RBD_SNAP_HEAD_NAME for the base image).
3934 */
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003935static ssize_t rbd_snap_show(struct device *dev,
3936 struct device_attribute *attr,
3937 char *buf)
3938{
Alex Elder593a9e72012-02-07 12:03:37 -06003939 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003940
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003941 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003942}
3943
Alex Elder86b00e02012-10-25 23:34:42 -05003944/*
Ilya Dryomovff961282014-07-22 21:53:07 +04003945 * For a v2 image, shows the chain of parent images, separated by empty
3946 * lines. For v1 images or if there is no parent, shows "(no parent
3947 * image)".
Alex Elder86b00e02012-10-25 23:34:42 -05003948 */
3949static ssize_t rbd_parent_show(struct device *dev,
Ilya Dryomovff961282014-07-22 21:53:07 +04003950 struct device_attribute *attr,
3951 char *buf)
Alex Elder86b00e02012-10-25 23:34:42 -05003952{
3953 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Ilya Dryomovff961282014-07-22 21:53:07 +04003954 ssize_t count = 0;
Alex Elder86b00e02012-10-25 23:34:42 -05003955
Ilya Dryomovff961282014-07-22 21:53:07 +04003956 if (!rbd_dev->parent)
Alex Elder86b00e02012-10-25 23:34:42 -05003957 return sprintf(buf, "(no parent image)\n");
3958
Ilya Dryomovff961282014-07-22 21:53:07 +04003959 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3960 struct rbd_spec *spec = rbd_dev->parent_spec;
Alex Elder86b00e02012-10-25 23:34:42 -05003961
Ilya Dryomovff961282014-07-22 21:53:07 +04003962 count += sprintf(&buf[count], "%s"
3963 "pool_id %llu\npool_name %s\n"
3964 "image_id %s\nimage_name %s\n"
3965 "snap_id %llu\nsnap_name %s\n"
3966 "overlap %llu\n",
3967 !count ? "" : "\n", /* first? */
3968 spec->pool_id, spec->pool_name,
3969 spec->image_id, spec->image_name ?: "(unknown)",
3970 spec->snap_id, spec->snap_name,
3971 rbd_dev->parent_overlap);
3972 }
Alex Elder86b00e02012-10-25 23:34:42 -05003973
Ilya Dryomovff961282014-07-22 21:53:07 +04003974 return count;
Alex Elder86b00e02012-10-25 23:34:42 -05003975}
3976
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003977static ssize_t rbd_image_refresh(struct device *dev,
3978 struct device_attribute *attr,
3979 const char *buf,
3980 size_t size)
3981{
Alex Elder593a9e72012-02-07 12:03:37 -06003982 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Alex Elderb8136232012-07-25 09:32:41 -05003983 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003984
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003985 ret = rbd_dev_refresh(rbd_dev);
Alex Eldere627db02013-05-06 07:40:30 -05003986 if (ret)
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003987 return ret;
Alex Elderb8136232012-07-25 09:32:41 -05003988
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003989 return size;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003990}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003991
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003992static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
Alex Elder34b13182012-07-13 20:35:12 -05003993static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003994static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003995static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003996static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3997static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
Alex Elder9bb2f332012-07-12 10:46:35 -05003998static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003999static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
Alex Elder589d30e2012-07-10 20:30:11 -05004000static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004001static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
4002static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
Alex Elder86b00e02012-10-25 23:34:42 -05004003static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004004
4005static struct attribute *rbd_attrs[] = {
4006 &dev_attr_size.attr,
Alex Elder34b13182012-07-13 20:35:12 -05004007 &dev_attr_features.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004008 &dev_attr_major.attr,
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02004009 &dev_attr_minor.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004010 &dev_attr_client_id.attr,
4011 &dev_attr_pool.attr,
Alex Elder9bb2f332012-07-12 10:46:35 -05004012 &dev_attr_pool_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004013 &dev_attr_name.attr,
Alex Elder589d30e2012-07-10 20:30:11 -05004014 &dev_attr_image_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004015 &dev_attr_current_snap.attr,
Alex Elder86b00e02012-10-25 23:34:42 -05004016 &dev_attr_parent.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004017 &dev_attr_refresh.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004018 NULL
4019};
4020
4021static struct attribute_group rbd_attr_group = {
4022 .attrs = rbd_attrs,
4023};
4024
4025static const struct attribute_group *rbd_attr_groups[] = {
4026 &rbd_attr_group,
4027 NULL
4028};
4029
4030static void rbd_sysfs_dev_release(struct device *dev)
4031{
4032}
4033
4034static struct device_type rbd_device_type = {
4035 .name = "rbd",
4036 .groups = rbd_attr_groups,
4037 .release = rbd_sysfs_dev_release,
4038};
4039
Alex Elder8b8fb992012-10-26 17:25:24 -05004040static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4041{
4042 kref_get(&spec->kref);
4043
4044 return spec;
4045}
4046
4047static void rbd_spec_free(struct kref *kref);
4048static void rbd_spec_put(struct rbd_spec *spec)
4049{
4050 if (spec)
4051 kref_put(&spec->kref, rbd_spec_free);
4052}
4053
4054static struct rbd_spec *rbd_spec_alloc(void)
4055{
4056 struct rbd_spec *spec;
4057
4058 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4059 if (!spec)
4060 return NULL;
Ilya Dryomov04077592014-07-23 17:11:20 +04004061
4062 spec->pool_id = CEPH_NOPOOL;
4063 spec->snap_id = CEPH_NOSNAP;
Alex Elder8b8fb992012-10-26 17:25:24 -05004064 kref_init(&spec->kref);
4065
Alex Elder8b8fb992012-10-26 17:25:24 -05004066 return spec;
4067}
4068
4069static void rbd_spec_free(struct kref *kref)
4070{
4071 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4072
4073 kfree(spec->pool_name);
4074 kfree(spec->image_id);
4075 kfree(spec->image_name);
4076 kfree(spec->snap_name);
4077 kfree(spec);
4078}
4079
Alex Eldercc344fa2013-02-19 12:25:56 -06004080static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
Ilya Dryomovd1475432015-06-22 13:24:48 +03004081 struct rbd_spec *spec,
4082 struct rbd_options *opts)
Alex Elderc53d5892012-10-25 23:34:42 -05004083{
4084 struct rbd_device *rbd_dev;
4085
4086 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
4087 if (!rbd_dev)
4088 return NULL;
4089
4090 spin_lock_init(&rbd_dev->lock);
Alex Elder6d292902013-01-14 12:43:31 -06004091 rbd_dev->flags = 0;
Alex Eldera2acd002013-05-08 22:50:04 -05004092 atomic_set(&rbd_dev->parent_ref, 0);
Alex Elderc53d5892012-10-25 23:34:42 -05004093 INIT_LIST_HEAD(&rbd_dev->node);
Alex Elderc53d5892012-10-25 23:34:42 -05004094 init_rwsem(&rbd_dev->header_rwsem);
4095
Alex Elderc53d5892012-10-25 23:34:42 -05004096 rbd_dev->rbd_client = rbdc;
Ilya Dryomovd1475432015-06-22 13:24:48 +03004097 rbd_dev->spec = spec;
4098 rbd_dev->opts = opts;
Alex Elderc53d5892012-10-25 23:34:42 -05004099
Alex Elder0903e872012-11-14 12:25:19 -06004100 /* Initialize the layout used for all rbd requests */
4101
4102 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4103 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
4104 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4105 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
4106
Alex Elderc53d5892012-10-25 23:34:42 -05004107 return rbd_dev;
4108}
4109
4110static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4111{
Alex Elderc53d5892012-10-25 23:34:42 -05004112 rbd_put_client(rbd_dev->rbd_client);
4113 rbd_spec_put(rbd_dev->spec);
Ilya Dryomovd1475432015-06-22 13:24:48 +03004114 kfree(rbd_dev->opts);
Alex Elderc53d5892012-10-25 23:34:42 -05004115 kfree(rbd_dev);
4116}
4117
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004118/*
Alex Elder9d475de2012-07-03 16:01:19 -05004119 * Get the size and object order for an image snapshot, or if
4120 * snap_id is CEPH_NOSNAP, gets this information for the base
4121 * image.
4122 */
4123static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4124 u8 *order, u64 *snap_size)
4125{
4126 __le64 snapid = cpu_to_le64(snap_id);
4127 int ret;
4128 struct {
4129 u8 order;
4130 __le64 size;
4131 } __attribute__ ((packed)) size_buf = { 0 };
4132
Alex Elder36be9a72013-01-19 00:30:28 -06004133 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder9d475de2012-07-03 16:01:19 -05004134 "rbd", "get_size",
Alex Elder41579762013-04-21 12:14:45 -05004135 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004136 &size_buf, sizeof (size_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004137 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder9d475de2012-07-03 16:01:19 -05004138 if (ret < 0)
4139 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004140 if (ret < sizeof (size_buf))
4141 return -ERANGE;
Alex Elder9d475de2012-07-03 16:01:19 -05004142
Josh Durginc3545572013-08-28 17:08:10 -07004143 if (order) {
Alex Elderc86f86e2013-04-25 15:09:41 -05004144 *order = size_buf.order;
Josh Durginc3545572013-08-28 17:08:10 -07004145 dout(" order %u", (unsigned int)*order);
4146 }
Alex Elder9d475de2012-07-03 16:01:19 -05004147 *snap_size = le64_to_cpu(size_buf.size);
4148
Josh Durginc3545572013-08-28 17:08:10 -07004149 dout(" snap_id 0x%016llx snap_size = %llu\n",
4150 (unsigned long long)snap_id,
Alex Elder57385b52013-04-21 12:14:45 -05004151 (unsigned long long)*snap_size);
Alex Elder9d475de2012-07-03 16:01:19 -05004152
4153 return 0;
4154}
4155
4156static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4157{
4158 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4159 &rbd_dev->header.obj_order,
4160 &rbd_dev->header.image_size);
4161}
4162
Alex Elder1e130192012-07-03 16:01:19 -05004163static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4164{
4165 void *reply_buf;
4166 int ret;
4167 void *p;
4168
4169 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4170 if (!reply_buf)
4171 return -ENOMEM;
4172
Alex Elder36be9a72013-01-19 00:30:28 -06004173 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder41579762013-04-21 12:14:45 -05004174 "rbd", "get_object_prefix", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004175 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06004176 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder1e130192012-07-03 16:01:19 -05004177 if (ret < 0)
4178 goto out;
4179
4180 p = reply_buf;
4181 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
Alex Elder57385b52013-04-21 12:14:45 -05004182 p + ret, NULL, GFP_NOIO);
4183 ret = 0;
Alex Elder1e130192012-07-03 16:01:19 -05004184
4185 if (IS_ERR(rbd_dev->header.object_prefix)) {
4186 ret = PTR_ERR(rbd_dev->header.object_prefix);
4187 rbd_dev->header.object_prefix = NULL;
4188 } else {
4189 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4190 }
Alex Elder1e130192012-07-03 16:01:19 -05004191out:
4192 kfree(reply_buf);
4193
4194 return ret;
4195}
4196
Alex Elderb1b54022012-07-03 16:01:19 -05004197static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4198 u64 *snap_features)
4199{
4200 __le64 snapid = cpu_to_le64(snap_id);
4201 struct {
4202 __le64 features;
4203 __le64 incompat;
Alex Elder41579762013-04-21 12:14:45 -05004204 } __attribute__ ((packed)) features_buf = { 0 };
Alex Elderd8891402012-10-09 13:50:17 -07004205 u64 incompat;
Alex Elderb1b54022012-07-03 16:01:19 -05004206 int ret;
4207
Alex Elder36be9a72013-01-19 00:30:28 -06004208 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elderb1b54022012-07-03 16:01:19 -05004209 "rbd", "get_features",
Alex Elder41579762013-04-21 12:14:45 -05004210 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004211 &features_buf, sizeof (features_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004212 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderb1b54022012-07-03 16:01:19 -05004213 if (ret < 0)
4214 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004215 if (ret < sizeof (features_buf))
4216 return -ERANGE;
Alex Elderd8891402012-10-09 13:50:17 -07004217
4218 incompat = le64_to_cpu(features_buf.incompat);
Alex Elder5cbf6f122013-04-11 09:29:48 -05004219 if (incompat & ~RBD_FEATURES_SUPPORTED)
Alex Elderb8f5c6e2012-11-01 08:39:26 -05004220 return -ENXIO;
Alex Elderd8891402012-10-09 13:50:17 -07004221
Alex Elderb1b54022012-07-03 16:01:19 -05004222 *snap_features = le64_to_cpu(features_buf.features);
4223
4224 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
Alex Elder57385b52013-04-21 12:14:45 -05004225 (unsigned long long)snap_id,
4226 (unsigned long long)*snap_features,
4227 (unsigned long long)le64_to_cpu(features_buf.incompat));
Alex Elderb1b54022012-07-03 16:01:19 -05004228
4229 return 0;
4230}
4231
4232static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4233{
4234 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4235 &rbd_dev->header.features);
4236}
4237
Alex Elder86b00e02012-10-25 23:34:42 -05004238static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4239{
4240 struct rbd_spec *parent_spec;
4241 size_t size;
4242 void *reply_buf = NULL;
4243 __le64 snapid;
4244 void *p;
4245 void *end;
Alex Elder642a2532013-05-06 17:40:33 -05004246 u64 pool_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004247 char *image_id;
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004248 u64 snap_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004249 u64 overlap;
Alex Elder86b00e02012-10-25 23:34:42 -05004250 int ret;
4251
4252 parent_spec = rbd_spec_alloc();
4253 if (!parent_spec)
4254 return -ENOMEM;
4255
4256 size = sizeof (__le64) + /* pool_id */
4257 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4258 sizeof (__le64) + /* snap_id */
4259 sizeof (__le64); /* overlap */
4260 reply_buf = kmalloc(size, GFP_KERNEL);
4261 if (!reply_buf) {
4262 ret = -ENOMEM;
4263 goto out_err;
4264 }
4265
Ilya Dryomov4d9b67c2014-07-24 10:42:13 +04004266 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
Alex Elder36be9a72013-01-19 00:30:28 -06004267 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder86b00e02012-10-25 23:34:42 -05004268 "rbd", "get_parent",
Alex Elder41579762013-04-21 12:14:45 -05004269 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004270 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004271 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder86b00e02012-10-25 23:34:42 -05004272 if (ret < 0)
4273 goto out_err;
4274
Alex Elder86b00e02012-10-25 23:34:42 -05004275 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05004276 end = reply_buf + ret;
4277 ret = -ERANGE;
Alex Elder642a2532013-05-06 17:40:33 -05004278 ceph_decode_64_safe(&p, end, pool_id, out_err);
Alex Elder392a9da2013-05-06 17:40:33 -05004279 if (pool_id == CEPH_NOPOOL) {
4280 /*
4281 * Either the parent never existed, or we have
4282 * record of it but the image got flattened so it no
4283 * longer has a parent. When the parent of a
4284 * layered image disappears we immediately set the
4285 * overlap to 0. The effect of this is that all new
4286 * requests will be treated as if the image had no
4287 * parent.
4288 */
4289 if (rbd_dev->parent_overlap) {
4290 rbd_dev->parent_overlap = 0;
Alex Elder392a9da2013-05-06 17:40:33 -05004291 rbd_dev_parent_put(rbd_dev);
4292 pr_info("%s: clone image has been flattened\n",
4293 rbd_dev->disk->disk_name);
4294 }
4295
Alex Elder86b00e02012-10-25 23:34:42 -05004296 goto out; /* No parent? No problem. */
Alex Elder392a9da2013-05-06 17:40:33 -05004297 }
Alex Elder86b00e02012-10-25 23:34:42 -05004298
Alex Elder0903e872012-11-14 12:25:19 -06004299 /* The ceph file layout needs to fit pool id in 32 bits */
4300
4301 ret = -EIO;
Alex Elder642a2532013-05-06 17:40:33 -05004302 if (pool_id > (u64)U32_MAX) {
Ilya Dryomov9584d502014-07-11 12:11:20 +04004303 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
Alex Elder642a2532013-05-06 17:40:33 -05004304 (unsigned long long)pool_id, U32_MAX);
Alex Elder57385b52013-04-21 12:14:45 -05004305 goto out_err;
Alex Elderc0cd10db2013-04-26 09:43:47 -05004306 }
Alex Elder0903e872012-11-14 12:25:19 -06004307
Alex Elder979ed482012-11-01 08:39:26 -05004308 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elder86b00e02012-10-25 23:34:42 -05004309 if (IS_ERR(image_id)) {
4310 ret = PTR_ERR(image_id);
4311 goto out_err;
4312 }
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004313 ceph_decode_64_safe(&p, end, snap_id, out_err);
Alex Elder86b00e02012-10-25 23:34:42 -05004314 ceph_decode_64_safe(&p, end, overlap, out_err);
4315
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004316 /*
4317 * The parent won't change (except when the clone is
4318 * flattened, already handled that). So we only need to
4319 * record the parent spec we have not already done so.
4320 */
4321 if (!rbd_dev->parent_spec) {
4322 parent_spec->pool_id = pool_id;
4323 parent_spec->image_id = image_id;
4324 parent_spec->snap_id = snap_id;
Alex Elder70cf49c2013-05-06 17:40:33 -05004325 rbd_dev->parent_spec = parent_spec;
4326 parent_spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovfbba11b2014-06-27 21:46:33 +04004327 } else {
4328 kfree(image_id);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004329 }
4330
4331 /*
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004332 * We always update the parent overlap. If it's zero we issue
4333 * a warning, as we will proceed as if there was no parent.
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004334 */
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004335 if (!overlap) {
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004336 if (parent_spec) {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004337 /* refresh, careful to warn just once */
4338 if (rbd_dev->parent_overlap)
4339 rbd_warn(rbd_dev,
4340 "clone now standalone (overlap became 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004341 } else {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004342 /* initial probe */
4343 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004344 }
Alex Elder70cf49c2013-05-06 17:40:33 -05004345 }
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004346 rbd_dev->parent_overlap = overlap;
4347
Alex Elder86b00e02012-10-25 23:34:42 -05004348out:
4349 ret = 0;
4350out_err:
4351 kfree(reply_buf);
4352 rbd_spec_put(parent_spec);
4353
4354 return ret;
4355}
4356
Alex Eldercc070d52013-04-21 12:14:45 -05004357static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4358{
4359 struct {
4360 __le64 stripe_unit;
4361 __le64 stripe_count;
4362 } __attribute__ ((packed)) striping_info_buf = { 0 };
4363 size_t size = sizeof (striping_info_buf);
4364 void *p;
4365 u64 obj_size;
4366 u64 stripe_unit;
4367 u64 stripe_count;
4368 int ret;
4369
4370 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4371 "rbd", "get_stripe_unit_count", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004372 (char *)&striping_info_buf, size);
Alex Eldercc070d52013-04-21 12:14:45 -05004373 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4374 if (ret < 0)
4375 return ret;
4376 if (ret < size)
4377 return -ERANGE;
4378
4379 /*
4380 * We don't actually support the "fancy striping" feature
4381 * (STRIPINGV2) yet, but if the striping sizes are the
4382 * defaults the behavior is the same as before. So find
4383 * out, and only fail if the image has non-default values.
4384 */
4385 ret = -EINVAL;
4386 obj_size = (u64)1 << rbd_dev->header.obj_order;
4387 p = &striping_info_buf;
4388 stripe_unit = ceph_decode_64(&p);
4389 if (stripe_unit != obj_size) {
4390 rbd_warn(rbd_dev, "unsupported stripe unit "
4391 "(got %llu want %llu)",
4392 stripe_unit, obj_size);
4393 return -EINVAL;
4394 }
4395 stripe_count = ceph_decode_64(&p);
4396 if (stripe_count != 1) {
4397 rbd_warn(rbd_dev, "unsupported stripe count "
4398 "(got %llu want 1)", stripe_count);
4399 return -EINVAL;
4400 }
Alex Elder500d0c02013-04-26 09:43:47 -05004401 rbd_dev->header.stripe_unit = stripe_unit;
4402 rbd_dev->header.stripe_count = stripe_count;
Alex Eldercc070d52013-04-21 12:14:45 -05004403
4404 return 0;
4405}
4406
Alex Elder9e15b772012-10-30 19:40:33 -05004407static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4408{
4409 size_t image_id_size;
4410 char *image_id;
4411 void *p;
4412 void *end;
4413 size_t size;
4414 void *reply_buf = NULL;
4415 size_t len = 0;
4416 char *image_name = NULL;
4417 int ret;
4418
4419 rbd_assert(!rbd_dev->spec->image_name);
4420
Alex Elder69e7a022012-11-01 08:39:26 -05004421 len = strlen(rbd_dev->spec->image_id);
4422 image_id_size = sizeof (__le32) + len;
Alex Elder9e15b772012-10-30 19:40:33 -05004423 image_id = kmalloc(image_id_size, GFP_KERNEL);
4424 if (!image_id)
4425 return NULL;
4426
4427 p = image_id;
Alex Elder41579762013-04-21 12:14:45 -05004428 end = image_id + image_id_size;
Alex Elder57385b52013-04-21 12:14:45 -05004429 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
Alex Elder9e15b772012-10-30 19:40:33 -05004430
4431 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4432 reply_buf = kmalloc(size, GFP_KERNEL);
4433 if (!reply_buf)
4434 goto out;
4435
Alex Elder36be9a72013-01-19 00:30:28 -06004436 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
Alex Elder9e15b772012-10-30 19:40:33 -05004437 "rbd", "dir_get_name",
4438 image_id, image_id_size,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004439 reply_buf, size);
Alex Elder9e15b772012-10-30 19:40:33 -05004440 if (ret < 0)
4441 goto out;
4442 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05004443 end = reply_buf + ret;
4444
Alex Elder9e15b772012-10-30 19:40:33 -05004445 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4446 if (IS_ERR(image_name))
4447 image_name = NULL;
4448 else
4449 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4450out:
4451 kfree(reply_buf);
4452 kfree(image_id);
4453
4454 return image_name;
4455}
4456
Alex Elder2ad3d712013-04-30 00:44:33 -05004457static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4458{
4459 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4460 const char *snap_name;
4461 u32 which = 0;
4462
4463 /* Skip over names until we find the one we are looking for */
4464
4465 snap_name = rbd_dev->header.snap_names;
4466 while (which < snapc->num_snaps) {
4467 if (!strcmp(name, snap_name))
4468 return snapc->snaps[which];
4469 snap_name += strlen(snap_name) + 1;
4470 which++;
4471 }
4472 return CEPH_NOSNAP;
4473}
4474
4475static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4476{
4477 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4478 u32 which;
4479 bool found = false;
4480 u64 snap_id;
4481
4482 for (which = 0; !found && which < snapc->num_snaps; which++) {
4483 const char *snap_name;
4484
4485 snap_id = snapc->snaps[which];
4486 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
Josh Durginefadc982013-08-29 19:16:42 -07004487 if (IS_ERR(snap_name)) {
4488 /* ignore no-longer existing snapshots */
4489 if (PTR_ERR(snap_name) == -ENOENT)
4490 continue;
4491 else
4492 break;
4493 }
Alex Elder2ad3d712013-04-30 00:44:33 -05004494 found = !strcmp(name, snap_name);
4495 kfree(snap_name);
4496 }
4497 return found ? snap_id : CEPH_NOSNAP;
4498}
4499
4500/*
4501 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4502 * no snapshot by that name is found, or if an error occurs.
4503 */
4504static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4505{
4506 if (rbd_dev->image_format == 1)
4507 return rbd_v1_snap_id_by_name(rbd_dev, name);
4508
4509 return rbd_v2_snap_id_by_name(rbd_dev, name);
4510}
4511
Alex Elder9e15b772012-10-30 19:40:33 -05004512/*
Ilya Dryomov04077592014-07-23 17:11:20 +04004513 * An image being mapped will have everything but the snap id.
Alex Elder9e15b772012-10-30 19:40:33 -05004514 */
Ilya Dryomov04077592014-07-23 17:11:20 +04004515static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4516{
4517 struct rbd_spec *spec = rbd_dev->spec;
4518
4519 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4520 rbd_assert(spec->image_id && spec->image_name);
4521 rbd_assert(spec->snap_name);
4522
4523 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4524 u64 snap_id;
4525
4526 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4527 if (snap_id == CEPH_NOSNAP)
4528 return -ENOENT;
4529
4530 spec->snap_id = snap_id;
4531 } else {
4532 spec->snap_id = CEPH_NOSNAP;
4533 }
4534
4535 return 0;
4536}
4537
4538/*
4539 * A parent image will have all ids but none of the names.
4540 *
4541 * All names in an rbd spec are dynamically allocated. It's OK if we
4542 * can't figure out the name for an image id.
4543 */
4544static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
Alex Elder9e15b772012-10-30 19:40:33 -05004545{
Alex Elder2e9f7f12013-04-26 09:43:48 -05004546 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4547 struct rbd_spec *spec = rbd_dev->spec;
4548 const char *pool_name;
4549 const char *image_name;
4550 const char *snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05004551 int ret;
4552
Ilya Dryomov04077592014-07-23 17:11:20 +04004553 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4554 rbd_assert(spec->image_id);
4555 rbd_assert(spec->snap_id != CEPH_NOSNAP);
Alex Elder9e15b772012-10-30 19:40:33 -05004556
Alex Elder2e9f7f12013-04-26 09:43:48 -05004557 /* Get the pool name; we have to make our own copy of this */
Alex Elder9e15b772012-10-30 19:40:33 -05004558
Alex Elder2e9f7f12013-04-26 09:43:48 -05004559 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4560 if (!pool_name) {
4561 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
Alex Elder935dc892012-11-01 10:17:15 -05004562 return -EIO;
4563 }
Alex Elder2e9f7f12013-04-26 09:43:48 -05004564 pool_name = kstrdup(pool_name, GFP_KERNEL);
4565 if (!pool_name)
Alex Elder9e15b772012-10-30 19:40:33 -05004566 return -ENOMEM;
4567
4568 /* Fetch the image name; tolerate failure here */
4569
Alex Elder2e9f7f12013-04-26 09:43:48 -05004570 image_name = rbd_dev_image_name(rbd_dev);
4571 if (!image_name)
Alex Elder06ecc6c2012-11-01 10:17:15 -05004572 rbd_warn(rbd_dev, "unable to get image name");
Alex Elder9e15b772012-10-30 19:40:33 -05004573
Ilya Dryomov04077592014-07-23 17:11:20 +04004574 /* Fetch the snapshot name */
Alex Elder9e15b772012-10-30 19:40:33 -05004575
Alex Elder2e9f7f12013-04-26 09:43:48 -05004576 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
Josh Durginda6a6b62013-09-04 17:57:31 -07004577 if (IS_ERR(snap_name)) {
4578 ret = PTR_ERR(snap_name);
Alex Elder9e15b772012-10-30 19:40:33 -05004579 goto out_err;
Alex Elder2e9f7f12013-04-26 09:43:48 -05004580 }
4581
4582 spec->pool_name = pool_name;
4583 spec->image_name = image_name;
4584 spec->snap_name = snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05004585
4586 return 0;
Ilya Dryomov04077592014-07-23 17:11:20 +04004587
Alex Elder9e15b772012-10-30 19:40:33 -05004588out_err:
Alex Elder2e9f7f12013-04-26 09:43:48 -05004589 kfree(image_name);
4590 kfree(pool_name);
Alex Elder9e15b772012-10-30 19:40:33 -05004591 return ret;
4592}
4593
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004594static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
Alex Elder35d489f2012-07-03 16:01:19 -05004595{
4596 size_t size;
4597 int ret;
4598 void *reply_buf;
4599 void *p;
4600 void *end;
4601 u64 seq;
4602 u32 snap_count;
4603 struct ceph_snap_context *snapc;
4604 u32 i;
4605
4606 /*
4607 * We'll need room for the seq value (maximum snapshot id),
4608 * snapshot count, and array of that many snapshot ids.
4609 * For now we have a fixed upper limit on the number we're
4610 * prepared to receive.
4611 */
4612 size = sizeof (__le64) + sizeof (__le32) +
4613 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4614 reply_buf = kzalloc(size, GFP_KERNEL);
4615 if (!reply_buf)
4616 return -ENOMEM;
4617
Alex Elder36be9a72013-01-19 00:30:28 -06004618 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder41579762013-04-21 12:14:45 -05004619 "rbd", "get_snapcontext", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004620 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004621 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder35d489f2012-07-03 16:01:19 -05004622 if (ret < 0)
4623 goto out;
4624
Alex Elder35d489f2012-07-03 16:01:19 -05004625 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05004626 end = reply_buf + ret;
4627 ret = -ERANGE;
Alex Elder35d489f2012-07-03 16:01:19 -05004628 ceph_decode_64_safe(&p, end, seq, out);
4629 ceph_decode_32_safe(&p, end, snap_count, out);
4630
4631 /*
4632 * Make sure the reported number of snapshot ids wouldn't go
4633 * beyond the end of our buffer. But before checking that,
4634 * make sure the computed size of the snapshot context we
4635 * allocate is representable in a size_t.
4636 */
4637 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4638 / sizeof (u64)) {
4639 ret = -EINVAL;
4640 goto out;
4641 }
4642 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4643 goto out;
Alex Elder468521c2013-04-26 09:43:47 -05004644 ret = 0;
Alex Elder35d489f2012-07-03 16:01:19 -05004645
Alex Elder812164f82013-04-30 00:44:32 -05004646 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
Alex Elder35d489f2012-07-03 16:01:19 -05004647 if (!snapc) {
4648 ret = -ENOMEM;
4649 goto out;
4650 }
Alex Elder35d489f2012-07-03 16:01:19 -05004651 snapc->seq = seq;
Alex Elder35d489f2012-07-03 16:01:19 -05004652 for (i = 0; i < snap_count; i++)
4653 snapc->snaps[i] = ceph_decode_64(&p);
4654
Alex Elder49ece552013-05-06 08:37:00 -05004655 ceph_put_snap_context(rbd_dev->header.snapc);
Alex Elder35d489f2012-07-03 16:01:19 -05004656 rbd_dev->header.snapc = snapc;
4657
4658 dout(" snap context seq = %llu, snap_count = %u\n",
Alex Elder57385b52013-04-21 12:14:45 -05004659 (unsigned long long)seq, (unsigned int)snap_count);
Alex Elder35d489f2012-07-03 16:01:19 -05004660out:
4661 kfree(reply_buf);
4662
Alex Elder57385b52013-04-21 12:14:45 -05004663 return ret;
Alex Elder35d489f2012-07-03 16:01:19 -05004664}
4665
Alex Elder54cac612013-04-30 00:44:33 -05004666static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4667 u64 snap_id)
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004668{
4669 size_t size;
4670 void *reply_buf;
Alex Elder54cac612013-04-30 00:44:33 -05004671 __le64 snapid;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004672 int ret;
4673 void *p;
4674 void *end;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004675 char *snap_name;
4676
4677 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4678 reply_buf = kmalloc(size, GFP_KERNEL);
4679 if (!reply_buf)
4680 return ERR_PTR(-ENOMEM);
4681
Alex Elder54cac612013-04-30 00:44:33 -05004682 snapid = cpu_to_le64(snap_id);
Alex Elder36be9a72013-01-19 00:30:28 -06004683 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004684 "rbd", "get_snapshot_name",
Alex Elder54cac612013-04-30 00:44:33 -05004685 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004686 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004687 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderf40eb342013-04-25 15:09:42 -05004688 if (ret < 0) {
4689 snap_name = ERR_PTR(ret);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004690 goto out;
Alex Elderf40eb342013-04-25 15:09:42 -05004691 }
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004692
4693 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05004694 end = reply_buf + ret;
Alex Eldere5c35532012-10-25 23:34:41 -05004695 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elderf40eb342013-04-25 15:09:42 -05004696 if (IS_ERR(snap_name))
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004697 goto out;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004698
Alex Elderf40eb342013-04-25 15:09:42 -05004699 dout(" snap_id 0x%016llx snap_name = %s\n",
Alex Elder54cac612013-04-30 00:44:33 -05004700 (unsigned long long)snap_id, snap_name);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004701out:
4702 kfree(reply_buf);
4703
Alex Elderf40eb342013-04-25 15:09:42 -05004704 return snap_name;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004705}
4706
Alex Elder2df3fac2013-05-06 09:51:30 -05004707static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
Alex Elder117973f2012-08-31 17:29:55 -05004708{
Alex Elder2df3fac2013-05-06 09:51:30 -05004709 bool first_time = rbd_dev->header.object_prefix == NULL;
Alex Elder117973f2012-08-31 17:29:55 -05004710 int ret;
Alex Elder117973f2012-08-31 17:29:55 -05004711
Josh Durgin1617e402013-06-12 14:43:10 -07004712 ret = rbd_dev_v2_image_size(rbd_dev);
4713 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05004714 return ret;
Josh Durgin1617e402013-06-12 14:43:10 -07004715
Alex Elder2df3fac2013-05-06 09:51:30 -05004716 if (first_time) {
4717 ret = rbd_dev_v2_header_onetime(rbd_dev);
4718 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05004719 return ret;
Alex Elder2df3fac2013-05-06 09:51:30 -05004720 }
4721
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004722 ret = rbd_dev_v2_snap_context(rbd_dev);
Alex Elder117973f2012-08-31 17:29:55 -05004723 dout("rbd_dev_v2_snap_context returned %d\n", ret);
Alex Elder117973f2012-08-31 17:29:55 -05004724
4725 return ret;
4726}
4727
Ilya Dryomova720ae02014-07-23 17:11:19 +04004728static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4729{
4730 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4731
4732 if (rbd_dev->image_format == 1)
4733 return rbd_dev_v1_header_info(rbd_dev);
4734
4735 return rbd_dev_v2_header_info(rbd_dev);
4736}
4737
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004738static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4739{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004740 struct device *dev;
Alex Eldercd789ab2012-08-30 00:16:38 -05004741 int ret;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004742
Alex Eldercd789ab2012-08-30 00:16:38 -05004743 dev = &rbd_dev->dev;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004744 dev->bus = &rbd_bus_type;
4745 dev->type = &rbd_device_type;
4746 dev->parent = &rbd_root_dev;
Alex Elder200a6a82013-04-28 23:32:34 -05004747 dev->release = rbd_dev_device_release;
Alex Elderde71a292012-07-03 16:01:19 -05004748 dev_set_name(dev, "%d", rbd_dev->dev_id);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004749 ret = device_register(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004750
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004751 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004752}
4753
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004754static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4755{
4756 device_unregister(&rbd_dev->dev);
4757}
4758
Alex Elder1ddbe942012-01-29 13:57:44 -06004759/*
Alex Elder499afd52012-02-02 08:13:29 -06004760 * Get a unique rbd identifier for the given new rbd_dev, and add
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004761 * the rbd_dev to the global list.
Alex Elder1ddbe942012-01-29 13:57:44 -06004762 */
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004763static int rbd_dev_id_get(struct rbd_device *rbd_dev)
Alex Elderb7f23c32012-01-29 13:57:43 -06004764{
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004765 int new_dev_id;
4766
Ilya Dryomov9b60e702013-12-13 15:28:57 +02004767 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4768 0, minor_to_rbd_dev_id(1 << MINORBITS),
4769 GFP_KERNEL);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004770 if (new_dev_id < 0)
4771 return new_dev_id;
4772
4773 rbd_dev->dev_id = new_dev_id;
Alex Elder499afd52012-02-02 08:13:29 -06004774
4775 spin_lock(&rbd_dev_list_lock);
4776 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4777 spin_unlock(&rbd_dev_list_lock);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004778
Ilya Dryomov70eebd22013-12-13 15:28:56 +02004779 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004780
4781 return 0;
Alex Elder1ddbe942012-01-29 13:57:44 -06004782}
Alex Elderb7f23c32012-01-29 13:57:43 -06004783
Alex Elder1ddbe942012-01-29 13:57:44 -06004784/*
Alex Elder499afd52012-02-02 08:13:29 -06004785 * Remove an rbd_dev from the global list, and record that its
4786 * identifier is no longer in use.
Alex Elder1ddbe942012-01-29 13:57:44 -06004787 */
Alex Eldere2839302012-08-29 17:11:06 -05004788static void rbd_dev_id_put(struct rbd_device *rbd_dev)
Alex Elder1ddbe942012-01-29 13:57:44 -06004789{
Alex Elder499afd52012-02-02 08:13:29 -06004790 spin_lock(&rbd_dev_list_lock);
4791 list_del_init(&rbd_dev->node);
4792 spin_unlock(&rbd_dev_list_lock);
Alex Elderb7f23c32012-01-29 13:57:43 -06004793
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004794 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4795
4796 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
Alex Elderb7f23c32012-01-29 13:57:43 -06004797}
4798
Alex Eldera725f65e2012-02-02 08:13:30 -06004799/*
Alex Eldere28fff262012-02-02 08:13:30 -06004800 * Skips over white space at *buf, and updates *buf to point to the
4801 * first found non-space character (if any). Returns the length of
Alex Elder593a9e72012-02-07 12:03:37 -06004802 * the token (string of non-white space characters) found. Note
4803 * that *buf must be terminated with '\0'.
Alex Eldere28fff262012-02-02 08:13:30 -06004804 */
4805static inline size_t next_token(const char **buf)
4806{
4807 /*
4808 * These are the characters that produce nonzero for
4809 * isspace() in the "C" and "POSIX" locales.
4810 */
4811 const char *spaces = " \f\n\r\t\v";
4812
4813 *buf += strspn(*buf, spaces); /* Find start of token */
4814
4815 return strcspn(*buf, spaces); /* Return token length */
4816}
4817
4818/*
Alex Elderea3352f2012-07-09 21:04:23 -05004819 * Finds the next token in *buf, dynamically allocates a buffer big
4820 * enough to hold a copy of it, and copies the token into the new
4821 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4822 * that a duplicate buffer is created even for a zero-length token.
4823 *
4824 * Returns a pointer to the newly-allocated duplicate, or a null
4825 * pointer if memory for the duplicate was not available. If
4826 * the lenp argument is a non-null pointer, the length of the token
4827 * (not including the '\0') is returned in *lenp.
4828 *
4829 * If successful, the *buf pointer will be updated to point beyond
4830 * the end of the found token.
4831 *
4832 * Note: uses GFP_KERNEL for allocation.
4833 */
4834static inline char *dup_token(const char **buf, size_t *lenp)
4835{
4836 char *dup;
4837 size_t len;
4838
4839 len = next_token(buf);
Alex Elder4caf35f2012-11-01 08:39:27 -05004840 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
Alex Elderea3352f2012-07-09 21:04:23 -05004841 if (!dup)
4842 return NULL;
Alex Elderea3352f2012-07-09 21:04:23 -05004843 *(dup + len) = '\0';
4844 *buf += len;
4845
4846 if (lenp)
4847 *lenp = len;
4848
4849 return dup;
4850}
4851
4852/*
Alex Elder859c31d2012-10-25 23:34:42 -05004853 * Parse the options provided for an "rbd add" (i.e., rbd image
4854 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4855 * and the data written is passed here via a NUL-terminated buffer.
4856 * Returns 0 if successful or an error code otherwise.
Alex Elderd22f76e2012-07-12 10:46:35 -05004857 *
Alex Elder859c31d2012-10-25 23:34:42 -05004858 * The information extracted from these options is recorded in
4859 * the other parameters which return dynamically-allocated
4860 * structures:
4861 * ceph_opts
4862 * The address of a pointer that will refer to a ceph options
4863 * structure. Caller must release the returned pointer using
4864 * ceph_destroy_options() when it is no longer needed.
4865 * rbd_opts
4866 * Address of an rbd options pointer. Fully initialized by
4867 * this function; caller must release with kfree().
4868 * spec
4869 * Address of an rbd image specification pointer. Fully
4870 * initialized by this function based on parsed options.
4871 * Caller must release with rbd_spec_put().
4872 *
4873 * The options passed take this form:
4874 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4875 * where:
4876 * <mon_addrs>
4877 * A comma-separated list of one or more monitor addresses.
4878 * A monitor address is an ip address, optionally followed
4879 * by a port number (separated by a colon).
4880 * I.e.: ip1[:port1][,ip2[:port2]...]
4881 * <options>
4882 * A comma-separated list of ceph and/or rbd options.
4883 * <pool_name>
4884 * The name of the rados pool containing the rbd image.
4885 * <image_name>
4886 * The name of the image in that pool to map.
4887 * <snap_id>
4888 * An optional snapshot id. If provided, the mapping will
4889 * present data from the image at the time that snapshot was
4890 * created. The image head is used if no snapshot id is
4891 * provided. Snapshot mappings are always read-only.
Alex Eldera725f65e2012-02-02 08:13:30 -06004892 */
Alex Elder859c31d2012-10-25 23:34:42 -05004893static int rbd_add_parse_args(const char *buf,
Alex Elderdc79b112012-10-25 23:34:41 -05004894 struct ceph_options **ceph_opts,
Alex Elder859c31d2012-10-25 23:34:42 -05004895 struct rbd_options **opts,
4896 struct rbd_spec **rbd_spec)
Alex Eldera725f65e2012-02-02 08:13:30 -06004897{
Alex Elderd22f76e2012-07-12 10:46:35 -05004898 size_t len;
Alex Elder859c31d2012-10-25 23:34:42 -05004899 char *options;
Alex Elder0ddebc02012-10-25 23:34:41 -05004900 const char *mon_addrs;
Alex Elderecb4dc22013-04-26 09:43:47 -05004901 char *snap_name;
Alex Elder0ddebc02012-10-25 23:34:41 -05004902 size_t mon_addrs_size;
Alex Elder859c31d2012-10-25 23:34:42 -05004903 struct rbd_spec *spec = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004904 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05004905 struct ceph_options *copts;
Alex Elderdc79b112012-10-25 23:34:41 -05004906 int ret;
Alex Eldere28fff262012-02-02 08:13:30 -06004907
4908 /* The first four tokens are required */
4909
Alex Elder7ef32142012-02-02 08:13:30 -06004910 len = next_token(&buf);
Alex Elder4fb5d6712012-11-01 10:17:15 -05004911 if (!len) {
4912 rbd_warn(NULL, "no monitor address(es) provided");
4913 return -EINVAL;
4914 }
Alex Elder0ddebc02012-10-25 23:34:41 -05004915 mon_addrs = buf;
Alex Elderf28e5652012-10-25 23:34:41 -05004916 mon_addrs_size = len + 1;
Alex Elder7ef32142012-02-02 08:13:30 -06004917 buf += len;
Alex Eldera725f65e2012-02-02 08:13:30 -06004918
Alex Elderdc79b112012-10-25 23:34:41 -05004919 ret = -EINVAL;
Alex Elderf28e5652012-10-25 23:34:41 -05004920 options = dup_token(&buf, NULL);
4921 if (!options)
Alex Elderdc79b112012-10-25 23:34:41 -05004922 return -ENOMEM;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004923 if (!*options) {
4924 rbd_warn(NULL, "no options provided");
4925 goto out_err;
4926 }
Alex Eldera725f65e2012-02-02 08:13:30 -06004927
Alex Elder859c31d2012-10-25 23:34:42 -05004928 spec = rbd_spec_alloc();
4929 if (!spec)
Alex Elderf28e5652012-10-25 23:34:41 -05004930 goto out_mem;
Alex Elder859c31d2012-10-25 23:34:42 -05004931
4932 spec->pool_name = dup_token(&buf, NULL);
4933 if (!spec->pool_name)
4934 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004935 if (!*spec->pool_name) {
4936 rbd_warn(NULL, "no pool name provided");
4937 goto out_err;
4938 }
Alex Eldere28fff262012-02-02 08:13:30 -06004939
Alex Elder69e7a022012-11-01 08:39:26 -05004940 spec->image_name = dup_token(&buf, NULL);
Alex Elder859c31d2012-10-25 23:34:42 -05004941 if (!spec->image_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004942 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004943 if (!*spec->image_name) {
4944 rbd_warn(NULL, "no image name provided");
4945 goto out_err;
4946 }
Alex Eldere28fff262012-02-02 08:13:30 -06004947
Alex Elderf28e5652012-10-25 23:34:41 -05004948 /*
4949 * Snapshot name is optional; default is to use "-"
4950 * (indicating the head/no snapshot).
4951 */
Alex Elder3feeb8942012-08-31 17:29:52 -05004952 len = next_token(&buf);
Alex Elder820a5f32012-07-09 21:04:24 -05004953 if (!len) {
Alex Elder3feeb8942012-08-31 17:29:52 -05004954 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4955 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
Alex Elderf28e5652012-10-25 23:34:41 -05004956 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
Alex Elderdc79b112012-10-25 23:34:41 -05004957 ret = -ENAMETOOLONG;
Alex Elderf28e5652012-10-25 23:34:41 -05004958 goto out_err;
Alex Elder849b4262012-07-09 21:04:24 -05004959 }
Alex Elderecb4dc22013-04-26 09:43:47 -05004960 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4961 if (!snap_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004962 goto out_mem;
Alex Elderecb4dc22013-04-26 09:43:47 -05004963 *(snap_name + len) = '\0';
4964 spec->snap_name = snap_name;
Alex Eldere5c35532012-10-25 23:34:41 -05004965
Alex Elder0ddebc02012-10-25 23:34:41 -05004966 /* Initialize all rbd options to the defaults */
Alex Eldere28fff262012-02-02 08:13:30 -06004967
Alex Elder4e9afeb2012-10-25 23:34:41 -05004968 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4969 if (!rbd_opts)
4970 goto out_mem;
4971
4972 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
Ilya Dryomovb5584182015-06-23 16:21:19 +03004973 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
Alex Elderd22f76e2012-07-12 10:46:35 -05004974
Alex Elder859c31d2012-10-25 23:34:42 -05004975 copts = ceph_parse_options(options, mon_addrs,
Alex Elder0ddebc02012-10-25 23:34:41 -05004976 mon_addrs + mon_addrs_size - 1,
Alex Elder4e9afeb2012-10-25 23:34:41 -05004977 parse_rbd_opts_token, rbd_opts);
Alex Elder859c31d2012-10-25 23:34:42 -05004978 if (IS_ERR(copts)) {
4979 ret = PTR_ERR(copts);
Alex Elderdc79b112012-10-25 23:34:41 -05004980 goto out_err;
4981 }
Alex Elder859c31d2012-10-25 23:34:42 -05004982 kfree(options);
4983
4984 *ceph_opts = copts;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004985 *opts = rbd_opts;
Alex Elder859c31d2012-10-25 23:34:42 -05004986 *rbd_spec = spec;
Alex Elder0ddebc02012-10-25 23:34:41 -05004987
Alex Elderdc79b112012-10-25 23:34:41 -05004988 return 0;
Alex Elderf28e5652012-10-25 23:34:41 -05004989out_mem:
Alex Elderdc79b112012-10-25 23:34:41 -05004990 ret = -ENOMEM;
Alex Elderd22f76e2012-07-12 10:46:35 -05004991out_err:
Alex Elder859c31d2012-10-25 23:34:42 -05004992 kfree(rbd_opts);
4993 rbd_spec_put(spec);
Alex Elderf28e5652012-10-25 23:34:41 -05004994 kfree(options);
Alex Elderd22f76e2012-07-12 10:46:35 -05004995
Alex Elderdc79b112012-10-25 23:34:41 -05004996 return ret;
Alex Eldera725f65e2012-02-02 08:13:30 -06004997}
4998
Alex Elder589d30e2012-07-10 20:30:11 -05004999/*
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005000 * Return pool id (>= 0) or a negative error code.
5001 */
5002static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
5003{
Ilya Dryomova319bf52015-05-15 12:02:17 +03005004 struct ceph_options *opts = rbdc->client->options;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005005 u64 newest_epoch;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005006 int tries = 0;
5007 int ret;
5008
5009again:
5010 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
5011 if (ret == -ENOENT && tries++ < 1) {
5012 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
5013 &newest_epoch);
5014 if (ret < 0)
5015 return ret;
5016
5017 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
5018 ceph_monc_request_next_osdmap(&rbdc->client->monc);
5019 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
Ilya Dryomova319bf52015-05-15 12:02:17 +03005020 newest_epoch,
5021 opts->mount_timeout);
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005022 goto again;
5023 } else {
5024 /* the osdmap we have is new enough */
5025 return -ENOENT;
5026 }
5027 }
5028
5029 return ret;
5030}
5031
5032/*
Alex Elder589d30e2012-07-10 20:30:11 -05005033 * An rbd format 2 image has a unique identifier, distinct from the
5034 * name given to it by the user. Internally, that identifier is
5035 * what's used to specify the names of objects related to the image.
5036 *
5037 * A special "rbd id" object is used to map an rbd image name to its
5038 * id. If that object doesn't exist, then there is no v2 rbd image
5039 * with the supplied name.
5040 *
5041 * This function will record the given rbd_dev's image_id field if
5042 * it can be determined, and in that case will return 0. If any
5043 * errors occur a negative errno will be returned and the rbd_dev's
5044 * image_id field will be unchanged (and should be NULL).
5045 */
5046static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5047{
5048 int ret;
5049 size_t size;
5050 char *object_name;
5051 void *response;
Alex Elderc0fba362013-04-25 23:15:08 -05005052 char *image_id;
Alex Elder2f82ee52012-10-30 19:40:33 -05005053
Alex Elder589d30e2012-07-10 20:30:11 -05005054 /*
Alex Elder2c0d0a12012-10-30 19:40:33 -05005055 * When probing a parent image, the image id is already
5056 * known (and the image name likely is not). There's no
Alex Elderc0fba362013-04-25 23:15:08 -05005057 * need to fetch the image id again in this case. We
5058 * do still need to set the image format though.
Alex Elder2c0d0a12012-10-30 19:40:33 -05005059 */
Alex Elderc0fba362013-04-25 23:15:08 -05005060 if (rbd_dev->spec->image_id) {
5061 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5062
Alex Elder2c0d0a12012-10-30 19:40:33 -05005063 return 0;
Alex Elderc0fba362013-04-25 23:15:08 -05005064 }
Alex Elder2c0d0a12012-10-30 19:40:33 -05005065
5066 /*
Alex Elder589d30e2012-07-10 20:30:11 -05005067 * First, see if the format 2 image id file exists, and if
5068 * so, get the image's persistent id from it.
5069 */
Alex Elder69e7a022012-11-01 08:39:26 -05005070 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05005071 object_name = kmalloc(size, GFP_NOIO);
5072 if (!object_name)
5073 return -ENOMEM;
Alex Elder0d7dbfc2012-10-25 23:34:41 -05005074 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05005075 dout("rbd id object name is %s\n", object_name);
5076
5077 /* Response will be an encoded string, which includes a length */
5078
5079 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5080 response = kzalloc(size, GFP_NOIO);
5081 if (!response) {
5082 ret = -ENOMEM;
5083 goto out;
5084 }
5085
Alex Elderc0fba362013-04-25 23:15:08 -05005086 /* If it doesn't exist we'll assume it's a format 1 image */
5087
Alex Elder36be9a72013-01-19 00:30:28 -06005088 ret = rbd_obj_method_sync(rbd_dev, object_name,
Alex Elder41579762013-04-21 12:14:45 -05005089 "rbd", "get_id", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05005090 response, RBD_IMAGE_ID_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06005091 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderc0fba362013-04-25 23:15:08 -05005092 if (ret == -ENOENT) {
5093 image_id = kstrdup("", GFP_KERNEL);
5094 ret = image_id ? 0 : -ENOMEM;
5095 if (!ret)
5096 rbd_dev->image_format = 1;
Ilya Dryomov7dd440c2014-09-11 18:49:18 +04005097 } else if (ret >= 0) {
Alex Elderc0fba362013-04-25 23:15:08 -05005098 void *p = response;
Alex Elder589d30e2012-07-10 20:30:11 -05005099
Alex Elderc0fba362013-04-25 23:15:08 -05005100 image_id = ceph_extract_encoded_string(&p, p + ret,
Alex Elder979ed482012-11-01 08:39:26 -05005101 NULL, GFP_NOIO);
Duan Jiong461f7582014-04-11 16:38:12 +08005102 ret = PTR_ERR_OR_ZERO(image_id);
Alex Elderc0fba362013-04-25 23:15:08 -05005103 if (!ret)
5104 rbd_dev->image_format = 2;
Alex Elderc0fba362013-04-25 23:15:08 -05005105 }
5106
5107 if (!ret) {
5108 rbd_dev->spec->image_id = image_id;
5109 dout("image_id is %s\n", image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05005110 }
5111out:
5112 kfree(response);
5113 kfree(object_name);
5114
5115 return ret;
5116}
5117
Alex Elder3abef3b2013-05-13 20:35:37 -05005118/*
5119 * Undo whatever state changes are made by v1 or v2 header info
5120 * call.
5121 */
Alex Elder6fd48b32013-04-28 23:32:34 -05005122static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5123{
5124 struct rbd_image_header *header;
5125
Ilya Dryomove69b8d42015-01-19 12:06:14 +03005126 rbd_dev_parent_put(rbd_dev);
Alex Elder6fd48b32013-04-28 23:32:34 -05005127
5128 /* Free dynamic fields from the header, then zero it out */
5129
5130 header = &rbd_dev->header;
Alex Elder812164f82013-04-30 00:44:32 -05005131 ceph_put_snap_context(header->snapc);
Alex Elder6fd48b32013-04-28 23:32:34 -05005132 kfree(header->snap_sizes);
5133 kfree(header->snap_names);
5134 kfree(header->object_prefix);
5135 memset(header, 0, sizeof (*header));
5136}
5137
Alex Elder2df3fac2013-05-06 09:51:30 -05005138static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
Alex Eldera30b71b2012-07-10 20:30:11 -05005139{
5140 int ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005141
Alex Elder1e130192012-07-03 16:01:19 -05005142 ret = rbd_dev_v2_object_prefix(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005143 if (ret)
Alex Elder1e130192012-07-03 16:01:19 -05005144 goto out_err;
Alex Elderb1b54022012-07-03 16:01:19 -05005145
Alex Elder2df3fac2013-05-06 09:51:30 -05005146 /*
5147 * Get the and check features for the image. Currently the
5148 * features are assumed to never change.
5149 */
Alex Elderb1b54022012-07-03 16:01:19 -05005150 ret = rbd_dev_v2_features(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005151 if (ret)
Alex Elderb1b54022012-07-03 16:01:19 -05005152 goto out_err;
Alex Elder35d489f2012-07-03 16:01:19 -05005153
Alex Eldercc070d52013-04-21 12:14:45 -05005154 /* If the image supports fancy striping, get its parameters */
5155
5156 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5157 ret = rbd_dev_v2_striping_info(rbd_dev);
5158 if (ret < 0)
5159 goto out_err;
5160 }
Alex Elder2df3fac2013-05-06 09:51:30 -05005161 /* No support for crypto and compression type format 2 images */
Alex Eldera30b71b2012-07-10 20:30:11 -05005162
Alex Elder35152972012-08-31 17:29:55 -05005163 return 0;
Alex Elder9d475de2012-07-03 16:01:19 -05005164out_err:
Alex Elder642a2532013-05-06 17:40:33 -05005165 rbd_dev->header.features = 0;
Alex Elder1e130192012-07-03 16:01:19 -05005166 kfree(rbd_dev->header.object_prefix);
5167 rbd_dev->header.object_prefix = NULL;
Alex Elder9d475de2012-07-03 16:01:19 -05005168
5169 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005170}
5171
Alex Elder124afba2013-04-26 15:44:36 -05005172static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
Alex Elder83a06262012-10-30 15:47:17 -05005173{
Alex Elder2f82ee52012-10-30 19:40:33 -05005174 struct rbd_device *parent = NULL;
Alex Elder124afba2013-04-26 15:44:36 -05005175 struct rbd_spec *parent_spec;
5176 struct rbd_client *rbdc;
5177 int ret;
5178
5179 if (!rbd_dev->parent_spec)
5180 return 0;
5181 /*
5182 * We need to pass a reference to the client and the parent
5183 * spec when creating the parent rbd_dev. Images related by
5184 * parent/child relationships always share both.
5185 */
5186 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
5187 rbdc = __rbd_get_client(rbd_dev->rbd_client);
5188
5189 ret = -ENOMEM;
Ilya Dryomovd1475432015-06-22 13:24:48 +03005190 parent = rbd_dev_create(rbdc, parent_spec, NULL);
Alex Elder124afba2013-04-26 15:44:36 -05005191 if (!parent)
5192 goto out_err;
5193
Alex Elder1f3ef782013-05-06 17:40:33 -05005194 ret = rbd_dev_image_probe(parent, false);
Alex Elder124afba2013-04-26 15:44:36 -05005195 if (ret < 0)
5196 goto out_err;
5197 rbd_dev->parent = parent;
Alex Eldera2acd002013-05-08 22:50:04 -05005198 atomic_set(&rbd_dev->parent_ref, 1);
Alex Elder124afba2013-04-26 15:44:36 -05005199
5200 return 0;
5201out_err:
5202 if (parent) {
Alex Elderfb65d2282013-05-08 22:50:04 -05005203 rbd_dev_unparent(rbd_dev);
Alex Elder124afba2013-04-26 15:44:36 -05005204 kfree(rbd_dev->header_name);
5205 rbd_dev_destroy(parent);
5206 } else {
5207 rbd_put_client(rbdc);
5208 rbd_spec_put(parent_spec);
5209 }
5210
5211 return ret;
5212}
5213
Alex Elder200a6a82013-04-28 23:32:34 -05005214static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
Alex Elder124afba2013-04-26 15:44:36 -05005215{
Alex Elder83a06262012-10-30 15:47:17 -05005216 int ret;
Alex Elder83a06262012-10-30 15:47:17 -05005217
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005218 /* Get an id and fill in device name. */
Alex Elder83a06262012-10-30 15:47:17 -05005219
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005220 ret = rbd_dev_id_get(rbd_dev);
5221 if (ret)
5222 return ret;
5223
Alex Elder83a06262012-10-30 15:47:17 -05005224 BUILD_BUG_ON(DEV_NAME_LEN
5225 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5226 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5227
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005228 /* Record our major and minor device numbers. */
Alex Elder83a06262012-10-30 15:47:17 -05005229
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005230 if (!single_major) {
5231 ret = register_blkdev(0, rbd_dev->name);
5232 if (ret < 0)
5233 goto err_out_id;
5234
5235 rbd_dev->major = ret;
5236 rbd_dev->minor = 0;
5237 } else {
5238 rbd_dev->major = rbd_major;
5239 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5240 }
Alex Elder83a06262012-10-30 15:47:17 -05005241
5242 /* Set up the blkdev mapping. */
5243
5244 ret = rbd_init_disk(rbd_dev);
5245 if (ret)
5246 goto err_out_blkdev;
5247
Alex Elderf35a4de2013-05-06 09:51:29 -05005248 ret = rbd_dev_mapping_set(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005249 if (ret)
5250 goto err_out_disk;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04005251
Alex Elderf35a4de2013-05-06 09:51:29 -05005252 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
Josh Durgin22001f62013-09-30 20:10:04 -07005253 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
Alex Elderf35a4de2013-05-06 09:51:29 -05005254
5255 ret = rbd_bus_add_dev(rbd_dev);
5256 if (ret)
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005257 goto err_out_mapping;
Alex Elder83a06262012-10-30 15:47:17 -05005258
Alex Elder83a06262012-10-30 15:47:17 -05005259 /* Everything's ready. Announce the disk to the world. */
5260
Alex Elder129b79d2013-04-26 15:44:36 -05005261 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Alex Elder83a06262012-10-30 15:47:17 -05005262 add_disk(rbd_dev->disk);
5263
5264 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5265 (unsigned long long) rbd_dev->mapping.size);
5266
5267 return ret;
Alex Elder2f82ee52012-10-30 19:40:33 -05005268
Alex Elderf35a4de2013-05-06 09:51:29 -05005269err_out_mapping:
5270 rbd_dev_mapping_clear(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005271err_out_disk:
5272 rbd_free_disk(rbd_dev);
5273err_out_blkdev:
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005274 if (!single_major)
5275 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Alex Elder83a06262012-10-30 15:47:17 -05005276err_out_id:
5277 rbd_dev_id_put(rbd_dev);
Alex Elderd1cf5782013-04-27 09:59:30 -05005278 rbd_dev_mapping_clear(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005279
5280 return ret;
5281}
5282
Alex Elder332bb122013-04-27 09:59:30 -05005283static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5284{
5285 struct rbd_spec *spec = rbd_dev->spec;
5286 size_t size;
5287
5288 /* Record the header object name for this rbd image. */
5289
5290 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5291
5292 if (rbd_dev->image_format == 1)
5293 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5294 else
5295 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5296
5297 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5298 if (!rbd_dev->header_name)
5299 return -ENOMEM;
5300
5301 if (rbd_dev->image_format == 1)
5302 sprintf(rbd_dev->header_name, "%s%s",
5303 spec->image_name, RBD_SUFFIX);
5304 else
5305 sprintf(rbd_dev->header_name, "%s%s",
5306 RBD_HEADER_PREFIX, spec->image_id);
5307 return 0;
5308}
5309
Alex Elder200a6a82013-04-28 23:32:34 -05005310static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5311{
Alex Elder6fd48b32013-04-28 23:32:34 -05005312 rbd_dev_unprobe(rbd_dev);
Alex Elder200a6a82013-04-28 23:32:34 -05005313 kfree(rbd_dev->header_name);
Alex Elder6fd48b32013-04-28 23:32:34 -05005314 rbd_dev->header_name = NULL;
5315 rbd_dev->image_format = 0;
5316 kfree(rbd_dev->spec->image_id);
5317 rbd_dev->spec->image_id = NULL;
5318
Alex Elder200a6a82013-04-28 23:32:34 -05005319 rbd_dev_destroy(rbd_dev);
5320}
5321
Alex Eldera30b71b2012-07-10 20:30:11 -05005322/*
5323 * Probe for the existence of the header object for the given rbd
Alex Elder1f3ef782013-05-06 17:40:33 -05005324 * device. If this image is the one being mapped (i.e., not a
5325 * parent), initiate a watch on its header object before using that
5326 * object to get detailed information about the rbd image.
Alex Eldera30b71b2012-07-10 20:30:11 -05005327 */
Alex Elder1f3ef782013-05-06 17:40:33 -05005328static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
Alex Eldera30b71b2012-07-10 20:30:11 -05005329{
5330 int ret;
5331
5332 /*
Alex Elder3abef3b2013-05-13 20:35:37 -05005333 * Get the id from the image id object. Unless there's an
5334 * error, rbd_dev->spec->image_id will be filled in with
5335 * a dynamically-allocated string, and rbd_dev->image_format
5336 * will be set to either 1 or 2.
Alex Eldera30b71b2012-07-10 20:30:11 -05005337 */
5338 ret = rbd_dev_image_id(rbd_dev);
5339 if (ret)
Alex Elderc0fba362013-04-25 23:15:08 -05005340 return ret;
Alex Elderc0fba362013-04-25 23:15:08 -05005341
Alex Elder332bb122013-04-27 09:59:30 -05005342 ret = rbd_dev_header_name(rbd_dev);
5343 if (ret)
5344 goto err_out_format;
5345
Alex Elder1f3ef782013-05-06 17:40:33 -05005346 if (mapping) {
Ilya Dryomovfca27062013-12-16 18:02:40 +02005347 ret = rbd_dev_header_watch_sync(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005348 if (ret) {
5349 if (ret == -ENOENT)
5350 pr_info("image %s/%s does not exist\n",
5351 rbd_dev->spec->pool_name,
5352 rbd_dev->spec->image_name);
Alex Elder1f3ef782013-05-06 17:40:33 -05005353 goto out_header_name;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005354 }
Alex Elder1f3ef782013-05-06 17:40:33 -05005355 }
Alex Elderb644de22013-04-27 09:59:31 -05005356
Ilya Dryomova720ae02014-07-23 17:11:19 +04005357 ret = rbd_dev_header_info(rbd_dev);
Alex Elder5655c4d2013-04-25 23:15:08 -05005358 if (ret)
Alex Elderb644de22013-04-27 09:59:31 -05005359 goto err_out_watch;
Alex Elder83a06262012-10-30 15:47:17 -05005360
Ilya Dryomov04077592014-07-23 17:11:20 +04005361 /*
5362 * If this image is the one being mapped, we have pool name and
5363 * id, image name and id, and snap name - need to fill snap id.
5364 * Otherwise this is a parent image, identified by pool, image
5365 * and snap ids - need to fill in names for those ids.
5366 */
5367 if (mapping)
5368 ret = rbd_spec_fill_snap_id(rbd_dev);
5369 else
5370 ret = rbd_spec_fill_names(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005371 if (ret) {
5372 if (ret == -ENOENT)
5373 pr_info("snap %s/%s@%s does not exist\n",
5374 rbd_dev->spec->pool_name,
5375 rbd_dev->spec->image_name,
5376 rbd_dev->spec->snap_name);
Alex Elder33dca392013-04-30 00:44:33 -05005377 goto err_out_probe;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005378 }
Alex Elder9bb81c92013-04-27 09:59:30 -05005379
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005380 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5381 ret = rbd_dev_v2_parent_info(rbd_dev);
5382 if (ret)
5383 goto err_out_probe;
5384
5385 /*
5386 * Need to warn users if this image is the one being
5387 * mapped and has a parent.
5388 */
5389 if (mapping && rbd_dev->parent_spec)
5390 rbd_warn(rbd_dev,
5391 "WARNING: kernel layering is EXPERIMENTAL!");
5392 }
5393
Alex Elder9bb81c92013-04-27 09:59:30 -05005394 ret = rbd_dev_probe_parent(rbd_dev);
Alex Elder30d60ba2013-05-06 09:51:30 -05005395 if (ret)
5396 goto err_out_probe;
Alex Elder83a06262012-10-30 15:47:17 -05005397
Alex Elder30d60ba2013-05-06 09:51:30 -05005398 dout("discovered format %u image, header name is %s\n",
5399 rbd_dev->image_format, rbd_dev->header_name);
Alex Elder30d60ba2013-05-06 09:51:30 -05005400 return 0;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005401
Alex Elder6fd48b32013-04-28 23:32:34 -05005402err_out_probe:
5403 rbd_dev_unprobe(rbd_dev);
Alex Elderb644de22013-04-27 09:59:31 -05005404err_out_watch:
Ilya Dryomovfca27062013-12-16 18:02:40 +02005405 if (mapping)
5406 rbd_dev_header_unwatch_sync(rbd_dev);
Alex Elder332bb122013-04-27 09:59:30 -05005407out_header_name:
5408 kfree(rbd_dev->header_name);
5409 rbd_dev->header_name = NULL;
5410err_out_format:
5411 rbd_dev->image_format = 0;
Alex Elder5655c4d2013-04-25 23:15:08 -05005412 kfree(rbd_dev->spec->image_id);
5413 rbd_dev->spec->image_id = NULL;
Alex Elder5655c4d2013-04-25 23:15:08 -05005414 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005415}
5416
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005417static ssize_t do_rbd_add(struct bus_type *bus,
5418 const char *buf,
5419 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005420{
Alex Eldercb8627c2012-07-09 21:04:23 -05005421 struct rbd_device *rbd_dev = NULL;
Alex Elderdc79b112012-10-25 23:34:41 -05005422 struct ceph_options *ceph_opts = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05005423 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05005424 struct rbd_spec *spec = NULL;
Alex Elder9d3997f2012-10-25 23:34:42 -05005425 struct rbd_client *rbdc;
Alex Elder51344a32013-05-06 07:40:30 -05005426 bool read_only;
Alex Elder27cc2592012-02-02 08:13:30 -06005427 int rc = -ENOMEM;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005428
5429 if (!try_module_get(THIS_MODULE))
5430 return -ENODEV;
5431
Alex Eldera725f65e2012-02-02 08:13:30 -06005432 /* parse add command */
Alex Elder859c31d2012-10-25 23:34:42 -05005433 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
Alex Elderdc79b112012-10-25 23:34:41 -05005434 if (rc < 0)
Alex Elderbd4ba652012-10-25 23:34:42 -05005435 goto err_out_module;
Alex Eldera725f65e2012-02-02 08:13:30 -06005436
Alex Elder9d3997f2012-10-25 23:34:42 -05005437 rbdc = rbd_get_client(ceph_opts);
5438 if (IS_ERR(rbdc)) {
5439 rc = PTR_ERR(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05005440 goto err_out_args;
Alex Elder9d3997f2012-10-25 23:34:42 -05005441 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005442
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005443 /* pick the pool */
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005444 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005445 if (rc < 0) {
5446 if (rc == -ENOENT)
5447 pr_info("pool %s does not exist\n", spec->pool_name);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005448 goto err_out_client;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005449 }
Alex Elderc0cd10db2013-04-26 09:43:47 -05005450 spec->pool_id = (u64)rc;
Alex Elder859c31d2012-10-25 23:34:42 -05005451
Alex Elder0903e872012-11-14 12:25:19 -06005452 /* The ceph file layout needs to fit pool id in 32 bits */
5453
Alex Elderc0cd10db2013-04-26 09:43:47 -05005454 if (spec->pool_id > (u64)U32_MAX) {
Ilya Dryomov9584d502014-07-11 12:11:20 +04005455 rbd_warn(NULL, "pool id too large (%llu > %u)",
Alex Elderc0cd10db2013-04-26 09:43:47 -05005456 (unsigned long long)spec->pool_id, U32_MAX);
Alex Elder0903e872012-11-14 12:25:19 -06005457 rc = -EIO;
5458 goto err_out_client;
5459 }
5460
Ilya Dryomovd1475432015-06-22 13:24:48 +03005461 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
Alex Elderbd4ba652012-10-25 23:34:42 -05005462 if (!rbd_dev)
5463 goto err_out_client;
Alex Elderc53d5892012-10-25 23:34:42 -05005464 rbdc = NULL; /* rbd_dev now owns this */
5465 spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovd1475432015-06-22 13:24:48 +03005466 rbd_opts = NULL; /* rbd_dev now owns this */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005467
Alex Elder1f3ef782013-05-06 17:40:33 -05005468 rc = rbd_dev_image_probe(rbd_dev, true);
Alex Eldera30b71b2012-07-10 20:30:11 -05005469 if (rc < 0)
Alex Elderc53d5892012-10-25 23:34:42 -05005470 goto err_out_rbd_dev;
Alex Elder05fd6f62012-08-29 17:11:07 -05005471
Alex Elder7ce4eef2013-05-06 17:40:33 -05005472 /* If we are mapping a snapshot it must be marked read-only */
5473
Ilya Dryomovd1475432015-06-22 13:24:48 +03005474 read_only = rbd_dev->opts->read_only;
Alex Elder7ce4eef2013-05-06 17:40:33 -05005475 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5476 read_only = true;
5477 rbd_dev->mapping.read_only = read_only;
5478
Alex Elderb536f692013-04-28 23:32:34 -05005479 rc = rbd_dev_device_setup(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05005480 if (rc) {
Ilya Dryomove37180c2013-12-16 18:02:41 +02005481 /*
5482 * rbd_dev_header_unwatch_sync() can't be moved into
5483 * rbd_dev_image_release() without refactoring, see
5484 * commit 1f3ef78861ac.
5485 */
5486 rbd_dev_header_unwatch_sync(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05005487 rbd_dev_image_release(rbd_dev);
5488 goto err_out_module;
5489 }
Alex Elderb536f692013-04-28 23:32:34 -05005490
Alex Elder3abef3b2013-05-13 20:35:37 -05005491 return count;
5492
Alex Elderc53d5892012-10-25 23:34:42 -05005493err_out_rbd_dev:
5494 rbd_dev_destroy(rbd_dev);
Alex Elderbd4ba652012-10-25 23:34:42 -05005495err_out_client:
Alex Elder9d3997f2012-10-25 23:34:42 -05005496 rbd_put_client(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05005497err_out_args:
Alex Elder859c31d2012-10-25 23:34:42 -05005498 rbd_spec_put(spec);
Ilya Dryomovd1475432015-06-22 13:24:48 +03005499 kfree(rbd_opts);
Alex Elderbd4ba652012-10-25 23:34:42 -05005500err_out_module:
5501 module_put(THIS_MODULE);
Alex Elder27cc2592012-02-02 08:13:30 -06005502
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005503 dout("Error adding device %s\n", buf);
Alex Elder27cc2592012-02-02 08:13:30 -06005504
Alex Elderc0cd10db2013-04-26 09:43:47 -05005505 return (ssize_t)rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005506}
5507
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005508static ssize_t rbd_add(struct bus_type *bus,
5509 const char *buf,
5510 size_t count)
5511{
5512 if (single_major)
5513 return -EINVAL;
5514
5515 return do_rbd_add(bus, buf, count);
5516}
5517
5518static ssize_t rbd_add_single_major(struct bus_type *bus,
5519 const char *buf,
5520 size_t count)
5521{
5522 return do_rbd_add(bus, buf, count);
5523}
5524
Alex Elder200a6a82013-04-28 23:32:34 -05005525static void rbd_dev_device_release(struct device *dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005526{
Alex Elder593a9e72012-02-07 12:03:37 -06005527 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005528
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005529 rbd_free_disk(rbd_dev);
Alex Elder200a6a82013-04-28 23:32:34 -05005530 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Alex Elder6d80b132013-05-06 07:40:30 -05005531 rbd_dev_mapping_clear(rbd_dev);
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005532 if (!single_major)
5533 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Alex Eldere2839302012-08-29 17:11:06 -05005534 rbd_dev_id_put(rbd_dev);
Alex Elderd1cf5782013-04-27 09:59:30 -05005535 rbd_dev_mapping_clear(rbd_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005536}
5537
Alex Elder05a46af2013-04-26 15:44:36 -05005538static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5539{
Alex Elderad945fc2013-04-26 15:44:36 -05005540 while (rbd_dev->parent) {
Alex Elder05a46af2013-04-26 15:44:36 -05005541 struct rbd_device *first = rbd_dev;
5542 struct rbd_device *second = first->parent;
5543 struct rbd_device *third;
5544
5545 /*
5546 * Follow to the parent with no grandparent and
5547 * remove it.
5548 */
5549 while (second && (third = second->parent)) {
5550 first = second;
5551 second = third;
5552 }
Alex Elderad945fc2013-04-26 15:44:36 -05005553 rbd_assert(second);
Alex Elder8ad42cd2013-04-28 23:32:34 -05005554 rbd_dev_image_release(second);
Alex Elderad945fc2013-04-26 15:44:36 -05005555 first->parent = NULL;
5556 first->parent_overlap = 0;
5557
5558 rbd_assert(first->parent_spec);
Alex Elder05a46af2013-04-26 15:44:36 -05005559 rbd_spec_put(first->parent_spec);
5560 first->parent_spec = NULL;
Alex Elder05a46af2013-04-26 15:44:36 -05005561 }
5562}
5563
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005564static ssize_t do_rbd_remove(struct bus_type *bus,
5565 const char *buf,
5566 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005567{
5568 struct rbd_device *rbd_dev = NULL;
Alex Elder751cc0e2013-05-31 15:17:01 -05005569 struct list_head *tmp;
5570 int dev_id;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005571 unsigned long ul;
Alex Elder82a442d2013-05-31 17:40:44 -05005572 bool already = false;
Alex Elder0d8189e2013-04-27 09:59:30 -05005573 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005574
Jingoo Hanbb8e0e82013-09-11 14:20:07 -07005575 ret = kstrtoul(buf, 10, &ul);
Alex Elder0d8189e2013-04-27 09:59:30 -05005576 if (ret)
5577 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005578
5579 /* convert to int; abort if we lost anything in the conversion */
Alex Elder751cc0e2013-05-31 15:17:01 -05005580 dev_id = (int)ul;
5581 if (dev_id != ul)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005582 return -EINVAL;
5583
Alex Elder751cc0e2013-05-31 15:17:01 -05005584 ret = -ENOENT;
5585 spin_lock(&rbd_dev_list_lock);
5586 list_for_each(tmp, &rbd_dev_list) {
5587 rbd_dev = list_entry(tmp, struct rbd_device, node);
5588 if (rbd_dev->dev_id == dev_id) {
5589 ret = 0;
5590 break;
5591 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005592 }
Alex Elder751cc0e2013-05-31 15:17:01 -05005593 if (!ret) {
5594 spin_lock_irq(&rbd_dev->lock);
5595 if (rbd_dev->open_count)
5596 ret = -EBUSY;
5597 else
Alex Elder82a442d2013-05-31 17:40:44 -05005598 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5599 &rbd_dev->flags);
Alex Elder751cc0e2013-05-31 15:17:01 -05005600 spin_unlock_irq(&rbd_dev->lock);
5601 }
5602 spin_unlock(&rbd_dev_list_lock);
Alex Elder82a442d2013-05-31 17:40:44 -05005603 if (ret < 0 || already)
Alex Elder1ba0f1e2013-05-31 15:17:01 -05005604 return ret;
Alex Elder751cc0e2013-05-31 15:17:01 -05005605
Ilya Dryomovfca27062013-12-16 18:02:40 +02005606 rbd_dev_header_unwatch_sync(rbd_dev);
Josh Durgin9abc5992013-08-29 17:31:03 -07005607 /*
5608 * flush remaining watch callbacks - these must be complete
5609 * before the osd_client is shutdown
5610 */
5611 dout("%s: flushing notifies", __func__);
5612 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
Ilya Dryomovfca27062013-12-16 18:02:40 +02005613
Josh Durgin98752012013-08-29 17:26:31 -07005614 /*
5615 * Don't free anything from rbd_dev->disk until after all
5616 * notifies are completely processed. Otherwise
5617 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5618 * in a potential use after free of rbd_dev->disk or rbd_dev.
5619 */
5620 rbd_bus_del_dev(rbd_dev);
Alex Elder8ad42cd2013-04-28 23:32:34 -05005621 rbd_dev_image_release(rbd_dev);
Alex Elder79ab7552013-04-28 23:32:34 -05005622 module_put(THIS_MODULE);
Alex Elderaafb2302012-09-06 16:00:54 -05005623
Alex Elder1ba0f1e2013-05-31 15:17:01 -05005624 return count;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005625}
5626
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005627static ssize_t rbd_remove(struct bus_type *bus,
5628 const char *buf,
5629 size_t count)
5630{
5631 if (single_major)
5632 return -EINVAL;
5633
5634 return do_rbd_remove(bus, buf, count);
5635}
5636
5637static ssize_t rbd_remove_single_major(struct bus_type *bus,
5638 const char *buf,
5639 size_t count)
5640{
5641 return do_rbd_remove(bus, buf, count);
5642}
5643
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005644/*
5645 * create control files in sysfs
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005646 * /sys/bus/rbd/...
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005647 */
5648static int rbd_sysfs_init(void)
5649{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005650 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005651
Alex Elderfed4c142012-02-07 12:03:36 -06005652 ret = device_register(&rbd_root_dev);
Alex Elder21079782012-01-24 10:08:36 -06005653 if (ret < 0)
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005654 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005655
Alex Elderfed4c142012-02-07 12:03:36 -06005656 ret = bus_register(&rbd_bus_type);
5657 if (ret < 0)
5658 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005659
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005660 return ret;
5661}
5662
5663static void rbd_sysfs_cleanup(void)
5664{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005665 bus_unregister(&rbd_bus_type);
Alex Elderfed4c142012-02-07 12:03:36 -06005666 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005667}
5668
Alex Elder1c2a9df2013-05-01 12:43:03 -05005669static int rbd_slab_init(void)
5670{
5671 rbd_assert(!rbd_img_request_cache);
5672 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5673 sizeof (struct rbd_img_request),
5674 __alignof__(struct rbd_img_request),
5675 0, NULL);
Alex Elder868311b2013-05-01 12:43:03 -05005676 if (!rbd_img_request_cache)
5677 return -ENOMEM;
5678
5679 rbd_assert(!rbd_obj_request_cache);
5680 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5681 sizeof (struct rbd_obj_request),
5682 __alignof__(struct rbd_obj_request),
5683 0, NULL);
Alex Elder78c2a442013-05-01 12:43:04 -05005684 if (!rbd_obj_request_cache)
5685 goto out_err;
5686
5687 rbd_assert(!rbd_segment_name_cache);
5688 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02005689 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
Alex Elder78c2a442013-05-01 12:43:04 -05005690 if (rbd_segment_name_cache)
Alex Elder1c2a9df2013-05-01 12:43:03 -05005691 return 0;
Alex Elder78c2a442013-05-01 12:43:04 -05005692out_err:
5693 if (rbd_obj_request_cache) {
5694 kmem_cache_destroy(rbd_obj_request_cache);
5695 rbd_obj_request_cache = NULL;
5696 }
Alex Elder1c2a9df2013-05-01 12:43:03 -05005697
Alex Elder868311b2013-05-01 12:43:03 -05005698 kmem_cache_destroy(rbd_img_request_cache);
5699 rbd_img_request_cache = NULL;
5700
Alex Elder1c2a9df2013-05-01 12:43:03 -05005701 return -ENOMEM;
5702}
5703
5704static void rbd_slab_exit(void)
5705{
Alex Elder78c2a442013-05-01 12:43:04 -05005706 rbd_assert(rbd_segment_name_cache);
5707 kmem_cache_destroy(rbd_segment_name_cache);
5708 rbd_segment_name_cache = NULL;
5709
Alex Elder868311b2013-05-01 12:43:03 -05005710 rbd_assert(rbd_obj_request_cache);
5711 kmem_cache_destroy(rbd_obj_request_cache);
5712 rbd_obj_request_cache = NULL;
5713
Alex Elder1c2a9df2013-05-01 12:43:03 -05005714 rbd_assert(rbd_img_request_cache);
5715 kmem_cache_destroy(rbd_img_request_cache);
5716 rbd_img_request_cache = NULL;
5717}
5718
Alex Eldercc344fa2013-02-19 12:25:56 -06005719static int __init rbd_init(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005720{
5721 int rc;
5722
Alex Elder1e32d342013-01-30 11:13:33 -06005723 if (!libceph_compatible(NULL)) {
5724 rbd_warn(NULL, "libceph incompatibility (quitting)");
Alex Elder1e32d342013-01-30 11:13:33 -06005725 return -EINVAL;
5726 }
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005727
Alex Elder1c2a9df2013-05-01 12:43:03 -05005728 rc = rbd_slab_init();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005729 if (rc)
5730 return rc;
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005731
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005732 /*
5733 * The number of active work items is limited by the number of
Ilya Dryomovf77303b2015-04-22 18:28:13 +03005734 * rbd devices * queue depth, so leave @max_active at default.
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005735 */
5736 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5737 if (!rbd_wq) {
5738 rc = -ENOMEM;
5739 goto err_out_slab;
5740 }
5741
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005742 if (single_major) {
5743 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5744 if (rbd_major < 0) {
5745 rc = rbd_major;
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005746 goto err_out_wq;
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005747 }
5748 }
5749
Alex Elder1c2a9df2013-05-01 12:43:03 -05005750 rc = rbd_sysfs_init();
5751 if (rc)
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005752 goto err_out_blkdev;
Alex Elder1c2a9df2013-05-01 12:43:03 -05005753
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005754 if (single_major)
5755 pr_info("loaded (major %d)\n", rbd_major);
5756 else
5757 pr_info("loaded\n");
5758
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005759 return 0;
5760
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005761err_out_blkdev:
5762 if (single_major)
5763 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005764err_out_wq:
5765 destroy_workqueue(rbd_wq);
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005766err_out_slab:
5767 rbd_slab_exit();
Alex Elder1c2a9df2013-05-01 12:43:03 -05005768 return rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005769}
5770
Alex Eldercc344fa2013-02-19 12:25:56 -06005771static void __exit rbd_exit(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005772{
Ilya Dryomovffe312c2014-05-20 15:46:04 +04005773 ida_destroy(&rbd_dev_id_ida);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005774 rbd_sysfs_cleanup();
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005775 if (single_major)
5776 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005777 destroy_workqueue(rbd_wq);
Alex Elder1c2a9df2013-05-01 12:43:03 -05005778 rbd_slab_exit();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005779}
5780
5781module_init(rbd_init);
5782module_exit(rbd_exit);
5783
Alex Elderd552c612013-05-31 20:13:09 -05005784MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005785MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5786MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005787/* following authorship retained from original osdblk.c */
5788MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5789
Ilya Dryomov90da2582013-12-13 15:28:56 +02005790MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005791MODULE_LICENSE("GPL");