blob: fce23dc908e33e62c59b4a8e4a742ada5f4741bb [file] [log] [blame]
Alex Eldere2a58ee2013-04-30 00:44:33 -05001
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
Yehuda Sadehdfc56062010-11-19 14:51:04 -080025 For usage instructions, please refer to:
Yehuda Sadeh602adf42010-08-12 16:11:25 -070026
Yehuda Sadehdfc56062010-11-19 14:51:04 -080027 Documentation/ABI/testing/sysfs-bus-rbd
Yehuda Sadeh602adf42010-08-12 16:11:25 -070028
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
34#include <linux/ceph/decode.h>
Yehuda Sadeh59c2be12011-03-21 15:10:11 -070035#include <linux/parser.h>
Alex Elder30d1cff2013-05-01 12:43:03 -050036#include <linux/bsearch.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070037
38#include <linux/kernel.h>
39#include <linux/device.h>
40#include <linux/module.h>
Christoph Hellwig7ad18af2015-01-13 17:20:04 +010041#include <linux/blk-mq.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070042#include <linux/fs.h>
43#include <linux/blkdev.h>
Alex Elder1c2a9df2013-05-01 12:43:03 -050044#include <linux/slab.h>
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +020045#include <linux/idr.h>
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +040046#include <linux/workqueue.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070047
48#include "rbd_types.h"
49
Alex Elderaafb2302012-09-06 16:00:54 -050050#define RBD_DEBUG /* Activate rbd_assert() calls */
51
Alex Elder593a9e72012-02-07 12:03:37 -060052/*
53 * The basic unit of block I/O is a sector. It is interpreted in a
54 * number of contexts in Linux (blk, bio, genhd), but the default is
55 * universally 512 bytes. These symbols are just slightly more
56 * meaningful than the bare numbers they represent.
57 */
58#define SECTOR_SHIFT 9
59#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60
Alex Eldera2acd002013-05-08 22:50:04 -050061/*
62 * Increment the given counter and return its updated value.
63 * If the counter is already 0 it will not be incremented.
64 * If the counter is already at its maximum value returns
65 * -EINVAL without updating it.
66 */
67static int atomic_inc_return_safe(atomic_t *v)
68{
69 unsigned int counter;
70
71 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
72 if (counter <= (unsigned int)INT_MAX)
73 return (int)counter;
74
75 atomic_dec(v);
76
77 return -EINVAL;
78}
79
80/* Decrement the counter. Return the resulting value, or -EINVAL */
81static int atomic_dec_return_safe(atomic_t *v)
82{
83 int counter;
84
85 counter = atomic_dec_return(v);
86 if (counter >= 0)
87 return counter;
88
89 atomic_inc(v);
90
91 return -EINVAL;
92}
93
Alex Elderf0f8cef2012-01-29 13:57:44 -060094#define RBD_DRV_NAME "rbd"
Yehuda Sadeh602adf42010-08-12 16:11:25 -070095
Ilya Dryomov7e513d42013-12-16 19:26:32 +020096#define RBD_MINORS_PER_MAJOR 256
97#define RBD_SINGLE_MAJOR_PART_SHIFT 4
Yehuda Sadeh602adf42010-08-12 16:11:25 -070098
Ilya Dryomov6d69bb532015-10-11 19:38:00 +020099#define RBD_MAX_PARENT_CHAIN_LEN 16
100
Alex Elderd4b125e2012-07-03 16:01:19 -0500101#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
102#define RBD_MAX_SNAP_NAME_LEN \
103 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
104
Alex Elder35d489f2012-07-03 16:01:19 -0500105#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700106
107#define RBD_SNAP_HEAD_NAME "-"
108
Alex Elder9682fc62013-04-30 00:44:33 -0500109#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
110
Alex Elder9e15b772012-10-30 19:40:33 -0500111/* This allows a single page to hold an image name sent by OSD */
112#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
Alex Elder1e130192012-07-03 16:01:19 -0500113#define RBD_IMAGE_ID_LEN_MAX 64
Alex Elder9e15b772012-10-30 19:40:33 -0500114
Alex Elder1e130192012-07-03 16:01:19 -0500115#define RBD_OBJ_PREFIX_LEN_MAX 64
Alex Elder589d30e2012-07-10 20:30:11 -0500116
Alex Elderd8891402012-10-09 13:50:17 -0700117/* Feature bits */
118
Alex Elder5cbf6f122013-04-11 09:29:48 -0500119#define RBD_FEATURE_LAYERING (1<<0)
120#define RBD_FEATURE_STRIPINGV2 (1<<1)
121#define RBD_FEATURES_ALL \
122 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
Alex Elderd8891402012-10-09 13:50:17 -0700123
124/* Features supported by this (client software) implementation. */
125
Alex Elder770eba62012-10-25 23:34:40 -0500126#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
Alex Elderd8891402012-10-09 13:50:17 -0700127
Alex Elder81a89792012-02-02 08:13:30 -0600128/*
129 * An RBD device name will be "rbd#", where the "rbd" comes from
130 * RBD_DRV_NAME above, and # is a unique integer identifier.
131 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
132 * enough to hold all possible device names.
133 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700134#define DEV_NAME_LEN 32
Alex Elder81a89792012-02-02 08:13:30 -0600135#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700136
137/*
138 * block device image metadata (in-memory version)
139 */
140struct rbd_image_header {
Alex Elderf35a4de2013-05-06 09:51:29 -0500141 /* These six fields never change for a given rbd image */
Alex Elder849b4262012-07-09 21:04:24 -0500142 char *object_prefix;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700143 __u8 obj_order;
144 __u8 crypt_type;
145 __u8 comp_type;
Alex Elderf35a4de2013-05-06 09:51:29 -0500146 u64 stripe_unit;
147 u64 stripe_count;
148 u64 features; /* Might be changeable someday? */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700149
Alex Elderf84344f2012-08-31 17:29:51 -0500150 /* The remaining fields need to be updated occasionally */
151 u64 image_size;
152 struct ceph_snap_context *snapc;
Alex Elderf35a4de2013-05-06 09:51:29 -0500153 char *snap_names; /* format 1 only */
154 u64 *snap_sizes; /* format 1 only */
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700155};
156
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500157/*
158 * An rbd image specification.
159 *
160 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
Alex Elderc66c6e02012-11-01 08:39:26 -0500161 * identify an image. Each rbd_dev structure includes a pointer to
162 * an rbd_spec structure that encapsulates this identity.
163 *
164 * Each of the id's in an rbd_spec has an associated name. For a
165 * user-mapped image, the names are supplied and the id's associated
166 * with them are looked up. For a layered image, a parent image is
167 * defined by the tuple, and the names are looked up.
168 *
169 * An rbd_dev structure contains a parent_spec pointer which is
170 * non-null if the image it represents is a child in a layered
171 * image. This pointer will refer to the rbd_spec structure used
172 * by the parent rbd_dev for its own identity (i.e., the structure
173 * is shared between the parent and child).
174 *
175 * Since these structures are populated once, during the discovery
176 * phase of image construction, they are effectively immutable so
177 * we make no effort to synchronize access to them.
178 *
179 * Note that code herein does not assume the image name is known (it
180 * could be a null pointer).
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500181 */
182struct rbd_spec {
183 u64 pool_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500184 const char *pool_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500185
Alex Elderecb4dc22013-04-26 09:43:47 -0500186 const char *image_id;
187 const char *image_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500188
189 u64 snap_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500190 const char *snap_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500191
192 struct kref kref;
193};
194
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700195/*
Alex Elderf0f8cef2012-01-29 13:57:44 -0600196 * an instance of the client. multiple devices may share an rbd client.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700197 */
198struct rbd_client {
199 struct ceph_client *client;
200 struct kref kref;
201 struct list_head node;
202};
203
Alex Elderbf0d5f502012-11-22 00:00:08 -0600204struct rbd_img_request;
205typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
206
207#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
208
209struct rbd_obj_request;
210typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
211
Alex Elder9969ebc2013-01-18 12:31:10 -0600212enum obj_request_type {
213 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
214};
Alex Elderbf0d5f502012-11-22 00:00:08 -0600215
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800216enum obj_operation_type {
217 OBJ_OP_WRITE,
218 OBJ_OP_READ,
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800219 OBJ_OP_DISCARD,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800220};
221
Alex Elder926f9b32013-02-11 12:33:24 -0600222enum obj_req_flags {
223 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
Alex Elder6365d332013-02-11 12:33:24 -0600224 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
Alex Elder5679c592013-02-11 12:33:24 -0600225 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
226 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
Alex Elder926f9b32013-02-11 12:33:24 -0600227};
228
Alex Elderbf0d5f502012-11-22 00:00:08 -0600229struct rbd_obj_request {
230 const char *object_name;
231 u64 offset; /* object start byte */
232 u64 length; /* bytes from offset */
Alex Elder926f9b32013-02-11 12:33:24 -0600233 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600234
Alex Elderc5b5ef62013-02-11 12:33:24 -0600235 /*
236 * An object request associated with an image will have its
237 * img_data flag set; a standalone object request will not.
238 *
239 * A standalone object request will have which == BAD_WHICH
240 * and a null obj_request pointer.
241 *
242 * An object request initiated in support of a layered image
243 * object (to check for its existence before a write) will
244 * have which == BAD_WHICH and a non-null obj_request pointer.
245 *
246 * Finally, an object request for rbd image data will have
247 * which != BAD_WHICH, and will have a non-null img_request
248 * pointer. The value of which will be in the range
249 * 0..(img_request->obj_request_count-1).
250 */
251 union {
252 struct rbd_obj_request *obj_request; /* STAT op */
253 struct {
254 struct rbd_img_request *img_request;
255 u64 img_offset;
256 /* links for img_request->obj_requests list */
257 struct list_head links;
258 };
259 };
Alex Elderbf0d5f502012-11-22 00:00:08 -0600260 u32 which; /* posn image request list */
261
262 enum obj_request_type type;
Alex Elder788e2df2013-01-17 12:25:27 -0600263 union {
264 struct bio *bio_list;
265 struct {
266 struct page **pages;
267 u32 page_count;
268 };
269 };
Alex Elder0eefd472013-04-19 15:34:50 -0500270 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500271 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600272
273 struct ceph_osd_request *osd_req;
274
275 u64 xferred; /* bytes transferred */
Sage Weil1b83bef2013-02-25 16:11:12 -0800276 int result;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600277
278 rbd_obj_callback_t callback;
Alex Elder788e2df2013-01-17 12:25:27 -0600279 struct completion completion;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600280
281 struct kref kref;
282};
283
Alex Elder0c425242013-02-08 09:55:49 -0600284enum img_req_flags {
Alex Elder9849e982013-01-24 16:13:36 -0600285 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
286 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
Alex Elderd0b2e942013-01-24 16:13:36 -0600287 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800288 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
Alex Elder0c425242013-02-08 09:55:49 -0600289};
290
Alex Elderbf0d5f502012-11-22 00:00:08 -0600291struct rbd_img_request {
Alex Elderbf0d5f502012-11-22 00:00:08 -0600292 struct rbd_device *rbd_dev;
293 u64 offset; /* starting image byte offset */
294 u64 length; /* byte count from offset */
Alex Elder0c425242013-02-08 09:55:49 -0600295 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600296 union {
Alex Elder9849e982013-01-24 16:13:36 -0600297 u64 snap_id; /* for reads */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600298 struct ceph_snap_context *snapc; /* for writes */
Alex Elder9849e982013-01-24 16:13:36 -0600299 };
300 union {
301 struct request *rq; /* block request */
302 struct rbd_obj_request *obj_request; /* obj req initiator */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600303 };
Alex Elder3d7efd12013-04-19 15:34:50 -0500304 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500305 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600306 spinlock_t completion_lock;/* protects next_completion */
307 u32 next_completion;
308 rbd_img_callback_t callback;
Alex Elder55f27e02013-04-10 12:34:25 -0500309 u64 xferred;/* aggregate bytes transferred */
Alex Eldera5a337d2013-01-24 16:13:36 -0600310 int result; /* first nonzero obj_request result */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600311
312 u32 obj_request_count;
313 struct list_head obj_requests; /* rbd_obj_request structs */
314
315 struct kref kref;
316};
317
318#define for_each_obj_request(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600319 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600320#define for_each_obj_request_from(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600321 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600322#define for_each_obj_request_safe(ireq, oreq, n) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600323 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600324
Alex Elderf84344f2012-08-31 17:29:51 -0500325struct rbd_mapping {
Alex Elder99c1f082012-08-30 14:42:15 -0500326 u64 size;
Alex Elder34b13182012-07-13 20:35:12 -0500327 u64 features;
Alex Elderf84344f2012-08-31 17:29:51 -0500328 bool read_only;
329};
330
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700331/*
332 * a single device
333 */
334struct rbd_device {
Alex Elderde71a292012-07-03 16:01:19 -0500335 int dev_id; /* blkdev unique id */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700336
337 int major; /* blkdev assigned major */
Ilya Dryomovdd82fff2013-12-13 15:28:57 +0200338 int minor;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700339 struct gendisk *disk; /* blkdev's gendisk and rq */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700340
Alex Eldera30b71b2012-07-10 20:30:11 -0500341 u32 image_format; /* Either 1 or 2 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700342 struct rbd_client *rbd_client;
343
344 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
345
Alex Elderb82d1672013-01-14 12:43:31 -0600346 spinlock_t lock; /* queue, flags, open_count */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700347
348 struct rbd_image_header header;
Alex Elderb82d1672013-01-14 12:43:31 -0600349 unsigned long flags; /* possibly lock protected */
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500350 struct rbd_spec *spec;
Ilya Dryomovd1475432015-06-22 13:24:48 +0300351 struct rbd_options *opts;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700352
Ilya Dryomovc41d13a2016-04-29 20:01:25 +0200353 struct ceph_object_id header_oid;
Alex Elder971f8392012-10-25 23:34:41 -0500354
Alex Elder0903e872012-11-14 12:25:19 -0600355 struct ceph_file_layout layout;
356
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700357 struct ceph_osd_event *watch_event;
Alex Elder975241a2013-01-25 17:08:55 -0600358 struct rbd_obj_request *watch_request;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700359
Alex Elder86b00e02012-10-25 23:34:42 -0500360 struct rbd_spec *parent_spec;
361 u64 parent_overlap;
Alex Eldera2acd002013-05-08 22:50:04 -0500362 atomic_t parent_ref;
Alex Elder2f82ee52012-10-30 19:40:33 -0500363 struct rbd_device *parent;
Alex Elder86b00e02012-10-25 23:34:42 -0500364
Christoph Hellwig7ad18af2015-01-13 17:20:04 +0100365 /* Block layer tags. */
366 struct blk_mq_tag_set tag_set;
367
Josh Durginc6666012011-11-21 17:11:12 -0800368 /* protects updating the header */
369 struct rw_semaphore header_rwsem;
Alex Elderf84344f2012-08-31 17:29:51 -0500370
371 struct rbd_mapping mapping;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700372
373 struct list_head node;
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800374
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800375 /* sysfs related */
376 struct device dev;
Alex Elderb82d1672013-01-14 12:43:31 -0600377 unsigned long open_count; /* protected by lock */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800378};
379
Alex Elderb82d1672013-01-14 12:43:31 -0600380/*
381 * Flag bits for rbd_dev->flags. If atomicity is required,
382 * rbd_dev->lock is used to protect access.
383 *
384 * Currently, only the "removing" flag (which is coupled with the
385 * "open_count" field) requires atomic access.
386 */
Alex Elder6d292902013-01-14 12:43:31 -0600387enum rbd_dev_flags {
388 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
Alex Elderb82d1672013-01-14 12:43:31 -0600389 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
Alex Elder6d292902013-01-14 12:43:31 -0600390};
391
Alex Eldercfbf6372013-05-31 17:40:45 -0500392static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
Alex Eldere124a82f2012-01-29 13:57:44 -0600393
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700394static LIST_HEAD(rbd_dev_list); /* devices */
Alex Eldere124a82f2012-01-29 13:57:44 -0600395static DEFINE_SPINLOCK(rbd_dev_list_lock);
396
Alex Elder432b8582012-01-29 13:57:44 -0600397static LIST_HEAD(rbd_client_list); /* clients */
398static DEFINE_SPINLOCK(rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700399
Alex Elder78c2a442013-05-01 12:43:04 -0500400/* Slab caches for frequently-allocated structures */
401
Alex Elder1c2a9df2013-05-01 12:43:03 -0500402static struct kmem_cache *rbd_img_request_cache;
Alex Elder868311b2013-05-01 12:43:03 -0500403static struct kmem_cache *rbd_obj_request_cache;
Alex Elder78c2a442013-05-01 12:43:04 -0500404static struct kmem_cache *rbd_segment_name_cache;
Alex Elder1c2a9df2013-05-01 12:43:03 -0500405
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200406static int rbd_major;
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +0200407static DEFINE_IDA(rbd_dev_id_ida);
408
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +0400409static struct workqueue_struct *rbd_wq;
410
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200411/*
412 * Default to false for now, as single-major requires >= 0.75 version of
413 * userspace rbd utility.
414 */
415static bool single_major = false;
416module_param(single_major, bool, S_IRUGO);
417MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
418
Alex Elder3d7efd12013-04-19 15:34:50 -0500419static int rbd_img_request_submit(struct rbd_img_request *img_request);
420
Alex Elderf0f8cef2012-01-29 13:57:44 -0600421static ssize_t rbd_add(struct bus_type *bus, const char *buf,
422 size_t count);
423static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
424 size_t count);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200425static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
426 size_t count);
427static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
428 size_t count);
Ilya Dryomov6d69bb532015-10-11 19:38:00 +0200429static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
Alex Eldera2acd002013-05-08 22:50:04 -0500430static void rbd_spec_put(struct rbd_spec *spec);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600431
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200432static int rbd_dev_id_to_minor(int dev_id)
433{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200434 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200435}
436
437static int minor_to_rbd_dev_id(int minor)
438{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200439 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200440}
441
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700442static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
443static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200444static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
445static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700446
447static struct attribute *rbd_bus_attrs[] = {
448 &bus_attr_add.attr,
449 &bus_attr_remove.attr,
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200450 &bus_attr_add_single_major.attr,
451 &bus_attr_remove_single_major.attr,
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700452 NULL,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600453};
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200454
455static umode_t rbd_bus_is_visible(struct kobject *kobj,
456 struct attribute *attr, int index)
457{
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200458 if (!single_major &&
459 (attr == &bus_attr_add_single_major.attr ||
460 attr == &bus_attr_remove_single_major.attr))
461 return 0;
462
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200463 return attr->mode;
464}
465
466static const struct attribute_group rbd_bus_group = {
467 .attrs = rbd_bus_attrs,
468 .is_visible = rbd_bus_is_visible,
469};
470__ATTRIBUTE_GROUPS(rbd_bus);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600471
472static struct bus_type rbd_bus_type = {
473 .name = "rbd",
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700474 .bus_groups = rbd_bus_groups,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600475};
476
477static void rbd_root_dev_release(struct device *dev)
478{
479}
480
481static struct device rbd_root_dev = {
482 .init_name = "rbd",
483 .release = rbd_root_dev_release,
484};
485
Alex Elder06ecc6c2012-11-01 10:17:15 -0500486static __printf(2, 3)
487void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
488{
489 struct va_format vaf;
490 va_list args;
491
492 va_start(args, fmt);
493 vaf.fmt = fmt;
494 vaf.va = &args;
495
496 if (!rbd_dev)
497 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
498 else if (rbd_dev->disk)
499 printk(KERN_WARNING "%s: %s: %pV\n",
500 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
501 else if (rbd_dev->spec && rbd_dev->spec->image_name)
502 printk(KERN_WARNING "%s: image %s: %pV\n",
503 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
504 else if (rbd_dev->spec && rbd_dev->spec->image_id)
505 printk(KERN_WARNING "%s: id %s: %pV\n",
506 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
507 else /* punt */
508 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
509 RBD_DRV_NAME, rbd_dev, &vaf);
510 va_end(args);
511}
512
Alex Elderaafb2302012-09-06 16:00:54 -0500513#ifdef RBD_DEBUG
514#define rbd_assert(expr) \
515 if (unlikely(!(expr))) { \
516 printk(KERN_ERR "\nAssertion failure in %s() " \
517 "at line %d:\n\n" \
518 "\trbd_assert(%s);\n\n", \
519 __func__, __LINE__, #expr); \
520 BUG(); \
521 }
522#else /* !RBD_DEBUG */
523# define rbd_assert(expr) ((void) 0)
524#endif /* !RBD_DEBUG */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800525
Ilya Dryomov27617132015-07-16 17:36:11 +0300526static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
Alex Elderb454e362013-04-19 15:34:50 -0500527static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
Alex Elder05a46af2013-04-26 15:44:36 -0500528static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
529static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
Alex Elder8b3e1a52013-01-24 16:13:36 -0600530
Alex Eldercc4a38bd2013-04-30 00:44:33 -0500531static int rbd_dev_refresh(struct rbd_device *rbd_dev);
Alex Elder2df3fac2013-05-06 09:51:30 -0500532static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
Ilya Dryomova720ae02014-07-23 17:11:19 +0400533static int rbd_dev_header_info(struct rbd_device *rbd_dev);
Ilya Dryomove8f59b52014-07-24 10:42:13 +0400534static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
Alex Elder54cac612013-04-30 00:44:33 -0500535static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
536 u64 snap_id);
Alex Elder2ad3d712013-04-30 00:44:33 -0500537static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
538 u8 *order, u64 *snap_size);
539static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
540 u64 *snap_features);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700541
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700542static int rbd_open(struct block_device *bdev, fmode_t mode)
543{
Alex Elderf0f8cef2012-01-29 13:57:44 -0600544 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600545 bool removing = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700546
Alex Elderf84344f2012-08-31 17:29:51 -0500547 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700548 return -EROFS;
549
Alex Eldera14ea262013-02-05 13:23:12 -0600550 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600551 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
552 removing = true;
553 else
554 rbd_dev->open_count++;
Alex Eldera14ea262013-02-05 13:23:12 -0600555 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600556 if (removing)
557 return -ENOENT;
558
Alex Elderc3e946c2012-11-16 09:29:16 -0600559 (void) get_device(&rbd_dev->dev);
Alex Elder340c7a22012-08-10 13:12:07 -0700560
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700561 return 0;
562}
563
Al Virodb2a1442013-05-05 21:52:57 -0400564static void rbd_release(struct gendisk *disk, fmode_t mode)
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800565{
566 struct rbd_device *rbd_dev = disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600567 unsigned long open_count_before;
568
Alex Eldera14ea262013-02-05 13:23:12 -0600569 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600570 open_count_before = rbd_dev->open_count--;
Alex Eldera14ea262013-02-05 13:23:12 -0600571 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600572 rbd_assert(open_count_before > 0);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800573
Alex Elderc3e946c2012-11-16 09:29:16 -0600574 put_device(&rbd_dev->dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800575}
576
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800577static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
578{
Josh Durgin77f33c02013-09-30 17:09:54 -0700579 int ret = 0;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800580 int val;
581 bool ro;
Josh Durgin77f33c02013-09-30 17:09:54 -0700582 bool ro_changed = false;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800583
Josh Durgin77f33c02013-09-30 17:09:54 -0700584 /* get_user() may sleep, so call it before taking rbd_dev->lock */
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800585 if (get_user(val, (int __user *)(arg)))
586 return -EFAULT;
587
588 ro = val ? true : false;
589 /* Snapshot doesn't allow to write*/
590 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
591 return -EROFS;
592
Josh Durgin77f33c02013-09-30 17:09:54 -0700593 spin_lock_irq(&rbd_dev->lock);
594 /* prevent others open this device */
595 if (rbd_dev->open_count > 1) {
596 ret = -EBUSY;
597 goto out;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800598 }
599
Josh Durgin77f33c02013-09-30 17:09:54 -0700600 if (rbd_dev->mapping.read_only != ro) {
601 rbd_dev->mapping.read_only = ro;
602 ro_changed = true;
603 }
604
605out:
606 spin_unlock_irq(&rbd_dev->lock);
607 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
608 if (ret == 0 && ro_changed)
609 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
610
611 return ret;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800612}
613
614static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
615 unsigned int cmd, unsigned long arg)
616{
617 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
618 int ret = 0;
619
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800620 switch (cmd) {
621 case BLKROSET:
622 ret = rbd_ioctl_set_ro(rbd_dev, arg);
623 break;
624 default:
625 ret = -ENOTTY;
626 }
627
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800628 return ret;
629}
630
631#ifdef CONFIG_COMPAT
632static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
633 unsigned int cmd, unsigned long arg)
634{
635 return rbd_ioctl(bdev, mode, cmd, arg);
636}
637#endif /* CONFIG_COMPAT */
638
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700639static const struct block_device_operations rbd_bd_ops = {
640 .owner = THIS_MODULE,
641 .open = rbd_open,
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800642 .release = rbd_release,
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800643 .ioctl = rbd_ioctl,
644#ifdef CONFIG_COMPAT
645 .compat_ioctl = rbd_compat_ioctl,
646#endif
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700647};
648
649/*
Alex Elder7262cfc2013-05-16 15:04:20 -0500650 * Initialize an rbd client instance. Success or not, this function
Alex Eldercfbf6372013-05-31 17:40:45 -0500651 * consumes ceph_opts. Caller holds client_mutex.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700652 */
Alex Elderf8c38922012-08-10 13:12:07 -0700653static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700654{
655 struct rbd_client *rbdc;
656 int ret = -ENOMEM;
657
Alex Elder37206ee2013-02-20 17:32:08 -0600658 dout("%s:\n", __func__);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700659 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
660 if (!rbdc)
661 goto out_opt;
662
663 kref_init(&rbdc->kref);
664 INIT_LIST_HEAD(&rbdc->node);
665
Alex Elder43ae4702012-07-03 16:01:18 -0500666 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700667 if (IS_ERR(rbdc->client))
Alex Elder08f75462013-05-29 11:19:00 -0500668 goto out_rbdc;
Alex Elder43ae4702012-07-03 16:01:18 -0500669 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700670
671 ret = ceph_open_session(rbdc->client);
672 if (ret < 0)
Alex Elder08f75462013-05-29 11:19:00 -0500673 goto out_client;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700674
Alex Elder432b8582012-01-29 13:57:44 -0600675 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700676 list_add_tail(&rbdc->node, &rbd_client_list);
Alex Elder432b8582012-01-29 13:57:44 -0600677 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700678
Alex Elder37206ee2013-02-20 17:32:08 -0600679 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Elderbc534d82012-01-29 13:57:44 -0600680
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700681 return rbdc;
Alex Elder08f75462013-05-29 11:19:00 -0500682out_client:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700683 ceph_destroy_client(rbdc->client);
Alex Elder08f75462013-05-29 11:19:00 -0500684out_rbdc:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700685 kfree(rbdc);
686out_opt:
Alex Elder43ae4702012-07-03 16:01:18 -0500687 if (ceph_opts)
688 ceph_destroy_options(ceph_opts);
Alex Elder37206ee2013-02-20 17:32:08 -0600689 dout("%s: error %d\n", __func__, ret);
690
Vasiliy Kulikov28f259b2010-09-26 12:59:37 +0400691 return ERR_PTR(ret);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700692}
693
Alex Elder2f82ee52012-10-30 19:40:33 -0500694static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
695{
696 kref_get(&rbdc->kref);
697
698 return rbdc;
699}
700
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700701/*
Alex Elder1f7ba332012-08-10 13:12:07 -0700702 * Find a ceph client with specific addr and configuration. If
703 * found, bump its reference count.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700704 */
Alex Elder1f7ba332012-08-10 13:12:07 -0700705static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700706{
707 struct rbd_client *client_node;
Alex Elder1f7ba332012-08-10 13:12:07 -0700708 bool found = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700709
Alex Elder43ae4702012-07-03 16:01:18 -0500710 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700711 return NULL;
712
Alex Elder1f7ba332012-08-10 13:12:07 -0700713 spin_lock(&rbd_client_list_lock);
714 list_for_each_entry(client_node, &rbd_client_list, node) {
715 if (!ceph_compare_options(ceph_opts, client_node->client)) {
Alex Elder2f82ee52012-10-30 19:40:33 -0500716 __rbd_get_client(client_node);
717
Alex Elder1f7ba332012-08-10 13:12:07 -0700718 found = true;
719 break;
720 }
721 }
722 spin_unlock(&rbd_client_list_lock);
723
724 return found ? client_node : NULL;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700725}
726
727/*
Ilya Dryomov210c1042015-06-22 13:24:48 +0300728 * (Per device) rbd map options
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700729 */
730enum {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300731 Opt_queue_depth,
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700732 Opt_last_int,
733 /* int args above */
734 Opt_last_string,
735 /* string args above */
Alex Eldercc0538b2012-08-10 13:12:07 -0700736 Opt_read_only,
737 Opt_read_write,
Ilya Dryomov210c1042015-06-22 13:24:48 +0300738 Opt_err
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700739};
740
Alex Elder43ae4702012-07-03 16:01:18 -0500741static match_table_t rbd_opts_tokens = {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300742 {Opt_queue_depth, "queue_depth=%d"},
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700743 /* int args above */
744 /* string args above */
Alex Elderbe466c12012-10-22 11:31:26 -0500745 {Opt_read_only, "read_only"},
Alex Eldercc0538b2012-08-10 13:12:07 -0700746 {Opt_read_only, "ro"}, /* Alternate spelling */
747 {Opt_read_write, "read_write"},
748 {Opt_read_write, "rw"}, /* Alternate spelling */
Ilya Dryomov210c1042015-06-22 13:24:48 +0300749 {Opt_err, NULL}
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700750};
751
Alex Elder98571b52013-01-20 14:44:42 -0600752struct rbd_options {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300753 int queue_depth;
Alex Elder98571b52013-01-20 14:44:42 -0600754 bool read_only;
755};
756
Ilya Dryomovb5584182015-06-23 16:21:19 +0300757#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
Alex Elder98571b52013-01-20 14:44:42 -0600758#define RBD_READ_ONLY_DEFAULT false
759
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700760static int parse_rbd_opts_token(char *c, void *private)
761{
Alex Elder43ae4702012-07-03 16:01:18 -0500762 struct rbd_options *rbd_opts = private;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700763 substring_t argstr[MAX_OPT_ARGS];
764 int token, intval, ret;
765
Alex Elder43ae4702012-07-03 16:01:18 -0500766 token = match_token(c, rbd_opts_tokens, argstr);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700767 if (token < Opt_last_int) {
768 ret = match_int(&argstr[0], &intval);
769 if (ret < 0) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300770 pr_err("bad mount option arg (not int) at '%s'\n", c);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700771 return ret;
772 }
773 dout("got int token %d val %d\n", token, intval);
774 } else if (token > Opt_last_int && token < Opt_last_string) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300775 dout("got string token %d val %s\n", token, argstr[0].from);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700776 } else {
777 dout("got token %d\n", token);
778 }
779
780 switch (token) {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300781 case Opt_queue_depth:
782 if (intval < 1) {
783 pr_err("queue_depth out of range\n");
784 return -EINVAL;
785 }
786 rbd_opts->queue_depth = intval;
787 break;
Alex Eldercc0538b2012-08-10 13:12:07 -0700788 case Opt_read_only:
789 rbd_opts->read_only = true;
790 break;
791 case Opt_read_write:
792 rbd_opts->read_only = false;
793 break;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700794 default:
Ilya Dryomov210c1042015-06-22 13:24:48 +0300795 /* libceph prints "bad option" msg */
796 return -EINVAL;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700797 }
Ilya Dryomov210c1042015-06-22 13:24:48 +0300798
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700799 return 0;
800}
801
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800802static char* obj_op_name(enum obj_operation_type op_type)
803{
804 switch (op_type) {
805 case OBJ_OP_READ:
806 return "read";
807 case OBJ_OP_WRITE:
808 return "write";
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800809 case OBJ_OP_DISCARD:
810 return "discard";
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800811 default:
812 return "???";
813 }
814}
815
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700816/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700817 * Get a ceph client with specific addr and configuration, if one does
Alex Elder7262cfc2013-05-16 15:04:20 -0500818 * not exist create it. Either way, ceph_opts is consumed by this
819 * function.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700820 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500821static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700822{
Alex Elderf8c38922012-08-10 13:12:07 -0700823 struct rbd_client *rbdc;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700824
Alex Eldercfbf6372013-05-31 17:40:45 -0500825 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
Alex Elder1f7ba332012-08-10 13:12:07 -0700826 rbdc = rbd_client_find(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500827 if (rbdc) /* using an existing client */
Alex Elder43ae4702012-07-03 16:01:18 -0500828 ceph_destroy_options(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500829 else
Alex Elderf8c38922012-08-10 13:12:07 -0700830 rbdc = rbd_client_create(ceph_opts);
Alex Eldercfbf6372013-05-31 17:40:45 -0500831 mutex_unlock(&client_mutex);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700832
Alex Elder9d3997f2012-10-25 23:34:42 -0500833 return rbdc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700834}
835
836/*
837 * Destroy ceph client
Alex Elderd23a4b32012-01-29 13:57:43 -0600838 *
Alex Elder432b8582012-01-29 13:57:44 -0600839 * Caller must hold rbd_client_list_lock.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700840 */
841static void rbd_client_release(struct kref *kref)
842{
843 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
844
Alex Elder37206ee2013-02-20 17:32:08 -0600845 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500846 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700847 list_del(&rbdc->node);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500848 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700849
850 ceph_destroy_client(rbdc->client);
851 kfree(rbdc);
852}
853
854/*
855 * Drop reference to ceph client node. If it's not referenced anymore, release
856 * it.
857 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500858static void rbd_put_client(struct rbd_client *rbdc)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700859{
Alex Elderc53d5892012-10-25 23:34:42 -0500860 if (rbdc)
861 kref_put(&rbdc->kref, rbd_client_release);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700862}
863
Alex Eldera30b71b2012-07-10 20:30:11 -0500864static bool rbd_image_format_valid(u32 image_format)
865{
866 return image_format == 1 || image_format == 2;
867}
868
Alex Elder8e94af82012-07-25 09:32:40 -0500869static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
870{
Alex Elder103a1502012-08-02 11:29:45 -0500871 size_t size;
872 u32 snap_count;
873
874 /* The header has to start with the magic rbd header text */
875 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
876 return false;
877
Alex Elderdb2388b2012-10-20 22:17:27 -0500878 /* The bio layer requires at least sector-sized I/O */
879
880 if (ondisk->options.order < SECTOR_SHIFT)
881 return false;
882
883 /* If we use u64 in a few spots we may be able to loosen this */
884
885 if (ondisk->options.order > 8 * sizeof (int) - 1)
886 return false;
887
Alex Elder103a1502012-08-02 11:29:45 -0500888 /*
889 * The size of a snapshot header has to fit in a size_t, and
890 * that limits the number of snapshots.
891 */
892 snap_count = le32_to_cpu(ondisk->snap_count);
893 size = SIZE_MAX - sizeof (struct ceph_snap_context);
894 if (snap_count > size / sizeof (__le64))
895 return false;
896
897 /*
898 * Not only that, but the size of the entire the snapshot
899 * header must also be representable in a size_t.
900 */
901 size -= snap_count * sizeof (__le64);
902 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
903 return false;
904
905 return true;
Alex Elder8e94af82012-07-25 09:32:40 -0500906}
907
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700908/*
Alex Elderbb23e372013-05-06 09:51:29 -0500909 * Fill an rbd image header with information from the given format 1
910 * on-disk header.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700911 */
Alex Elder662518b2013-05-06 09:51:29 -0500912static int rbd_header_from_disk(struct rbd_device *rbd_dev,
Alex Elder4156d992012-08-02 11:29:46 -0500913 struct rbd_image_header_ondisk *ondisk)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700914{
Alex Elder662518b2013-05-06 09:51:29 -0500915 struct rbd_image_header *header = &rbd_dev->header;
Alex Elderbb23e372013-05-06 09:51:29 -0500916 bool first_time = header->object_prefix == NULL;
917 struct ceph_snap_context *snapc;
918 char *object_prefix = NULL;
919 char *snap_names = NULL;
920 u64 *snap_sizes = NULL;
Alex Elderccece232012-07-10 20:30:10 -0500921 u32 snap_count;
Alex Elderd2bb24e2012-07-26 23:37:14 -0500922 size_t size;
Alex Elderbb23e372013-05-06 09:51:29 -0500923 int ret = -ENOMEM;
Alex Elder621901d2012-08-23 23:22:06 -0500924 u32 i;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700925
Alex Elderbb23e372013-05-06 09:51:29 -0500926 /* Allocate this now to avoid having to handle failure below */
927
928 if (first_time) {
929 size_t len;
930
931 len = strnlen(ondisk->object_prefix,
932 sizeof (ondisk->object_prefix));
933 object_prefix = kmalloc(len + 1, GFP_KERNEL);
934 if (!object_prefix)
935 return -ENOMEM;
936 memcpy(object_prefix, ondisk->object_prefix, len);
937 object_prefix[len] = '\0';
938 }
939
940 /* Allocate the snapshot context and fill it in */
Alex Elder6a523252012-07-19 17:12:59 -0500941
Alex Elder103a1502012-08-02 11:29:45 -0500942 snap_count = le32_to_cpu(ondisk->snap_count);
Alex Elderbb23e372013-05-06 09:51:29 -0500943 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
944 if (!snapc)
945 goto out_err;
946 snapc->seq = le64_to_cpu(ondisk->snap_seq);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700947 if (snap_count) {
Alex Elderbb23e372013-05-06 09:51:29 -0500948 struct rbd_image_snap_ondisk *snaps;
Alex Elderf785cc12012-08-23 23:22:06 -0500949 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
950
Alex Elderbb23e372013-05-06 09:51:29 -0500951 /* We'll keep a copy of the snapshot names... */
Alex Elder621901d2012-08-23 23:22:06 -0500952
Alex Elderbb23e372013-05-06 09:51:29 -0500953 if (snap_names_len > (u64)SIZE_MAX)
954 goto out_2big;
955 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
956 if (!snap_names)
Alex Elder6a523252012-07-19 17:12:59 -0500957 goto out_err;
Alex Elderbb23e372013-05-06 09:51:29 -0500958
959 /* ...as well as the array of their sizes. */
960
961 size = snap_count * sizeof (*header->snap_sizes);
962 snap_sizes = kmalloc(size, GFP_KERNEL);
963 if (!snap_sizes)
964 goto out_err;
965
Alex Elderf785cc12012-08-23 23:22:06 -0500966 /*
Alex Elderbb23e372013-05-06 09:51:29 -0500967 * Copy the names, and fill in each snapshot's id
968 * and size.
969 *
Alex Elder99a41eb2013-05-06 09:51:30 -0500970 * Note that rbd_dev_v1_header_info() guarantees the
Alex Elderbb23e372013-05-06 09:51:29 -0500971 * ondisk buffer we're working with has
Alex Elderf785cc12012-08-23 23:22:06 -0500972 * snap_names_len bytes beyond the end of the
973 * snapshot id array, this memcpy() is safe.
974 */
Alex Elderbb23e372013-05-06 09:51:29 -0500975 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
976 snaps = ondisk->snaps;
977 for (i = 0; i < snap_count; i++) {
978 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
979 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
980 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700981 }
Alex Elder849b4262012-07-09 21:04:24 -0500982
Alex Elderbb23e372013-05-06 09:51:29 -0500983 /* We won't fail any more, fill in the header */
Alex Elder6a523252012-07-19 17:12:59 -0500984
Alex Elderbb23e372013-05-06 09:51:29 -0500985 if (first_time) {
986 header->object_prefix = object_prefix;
987 header->obj_order = ondisk->options.order;
988 header->crypt_type = ondisk->options.crypt_type;
989 header->comp_type = ondisk->options.comp_type;
990 /* The rest aren't used for format 1 images */
991 header->stripe_unit = 0;
992 header->stripe_count = 0;
993 header->features = 0;
Alex Elder662518b2013-05-06 09:51:29 -0500994 } else {
995 ceph_put_snap_context(header->snapc);
996 kfree(header->snap_names);
997 kfree(header->snap_sizes);
Alex Elderbb23e372013-05-06 09:51:29 -0500998 }
999
1000 /* The remaining fields always get updated (when we refresh) */
Alex Elder621901d2012-08-23 23:22:06 -05001001
Alex Elderf84344f2012-08-31 17:29:51 -05001002 header->image_size = le64_to_cpu(ondisk->image_size);
Alex Elderbb23e372013-05-06 09:51:29 -05001003 header->snapc = snapc;
1004 header->snap_names = snap_names;
1005 header->snap_sizes = snap_sizes;
Alex Elder468521c2013-04-26 09:43:47 -05001006
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001007 return 0;
Alex Elderbb23e372013-05-06 09:51:29 -05001008out_2big:
1009 ret = -EIO;
Alex Elder6a523252012-07-19 17:12:59 -05001010out_err:
Alex Elderbb23e372013-05-06 09:51:29 -05001011 kfree(snap_sizes);
1012 kfree(snap_names);
1013 ceph_put_snap_context(snapc);
1014 kfree(object_prefix);
Alex Elderccece232012-07-10 20:30:10 -05001015
Alex Elderbb23e372013-05-06 09:51:29 -05001016 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001017}
1018
Alex Elder9682fc62013-04-30 00:44:33 -05001019static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1020{
1021 const char *snap_name;
1022
1023 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1024
1025 /* Skip over names until we find the one we are looking for */
1026
1027 snap_name = rbd_dev->header.snap_names;
1028 while (which--)
1029 snap_name += strlen(snap_name) + 1;
1030
1031 return kstrdup(snap_name, GFP_KERNEL);
1032}
1033
Alex Elder30d1cff2013-05-01 12:43:03 -05001034/*
1035 * Snapshot id comparison function for use with qsort()/bsearch().
1036 * Note that result is for snapshots in *descending* order.
1037 */
1038static int snapid_compare_reverse(const void *s1, const void *s2)
1039{
1040 u64 snap_id1 = *(u64 *)s1;
1041 u64 snap_id2 = *(u64 *)s2;
1042
1043 if (snap_id1 < snap_id2)
1044 return 1;
1045 return snap_id1 == snap_id2 ? 0 : -1;
1046}
1047
1048/*
1049 * Search a snapshot context to see if the given snapshot id is
1050 * present.
1051 *
1052 * Returns the position of the snapshot id in the array if it's found,
1053 * or BAD_SNAP_INDEX otherwise.
1054 *
1055 * Note: The snapshot array is in kept sorted (by the osd) in
1056 * reverse order, highest snapshot id first.
1057 */
Alex Elder9682fc62013-04-30 00:44:33 -05001058static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1059{
1060 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
Alex Elder30d1cff2013-05-01 12:43:03 -05001061 u64 *found;
Alex Elder9682fc62013-04-30 00:44:33 -05001062
Alex Elder30d1cff2013-05-01 12:43:03 -05001063 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1064 sizeof (snap_id), snapid_compare_reverse);
Alex Elder9682fc62013-04-30 00:44:33 -05001065
Alex Elder30d1cff2013-05-01 12:43:03 -05001066 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
Alex Elder9682fc62013-04-30 00:44:33 -05001067}
1068
Alex Elder2ad3d712013-04-30 00:44:33 -05001069static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1070 u64 snap_id)
Alex Elder54cac612013-04-30 00:44:33 -05001071{
1072 u32 which;
Josh Durginda6a6b62013-09-04 17:57:31 -07001073 const char *snap_name;
Alex Elder54cac612013-04-30 00:44:33 -05001074
1075 which = rbd_dev_snap_index(rbd_dev, snap_id);
1076 if (which == BAD_SNAP_INDEX)
Josh Durginda6a6b62013-09-04 17:57:31 -07001077 return ERR_PTR(-ENOENT);
Alex Elder54cac612013-04-30 00:44:33 -05001078
Josh Durginda6a6b62013-09-04 17:57:31 -07001079 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1080 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
Alex Elder54cac612013-04-30 00:44:33 -05001081}
1082
Alex Elder9e15b772012-10-30 19:40:33 -05001083static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1084{
Alex Elder9e15b772012-10-30 19:40:33 -05001085 if (snap_id == CEPH_NOSNAP)
1086 return RBD_SNAP_HEAD_NAME;
1087
Alex Elder54cac612013-04-30 00:44:33 -05001088 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1089 if (rbd_dev->image_format == 1)
1090 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001091
Alex Elder54cac612013-04-30 00:44:33 -05001092 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001093}
1094
Alex Elder2ad3d712013-04-30 00:44:33 -05001095static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1096 u64 *snap_size)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001097{
Alex Elder2ad3d712013-04-30 00:44:33 -05001098 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1099 if (snap_id == CEPH_NOSNAP) {
1100 *snap_size = rbd_dev->header.image_size;
1101 } else if (rbd_dev->image_format == 1) {
1102 u32 which;
Alex Elder00f1f362012-02-07 12:03:36 -06001103
Alex Elder2ad3d712013-04-30 00:44:33 -05001104 which = rbd_dev_snap_index(rbd_dev, snap_id);
1105 if (which == BAD_SNAP_INDEX)
1106 return -ENOENT;
Alex Elder00f1f362012-02-07 12:03:36 -06001107
Alex Elder2ad3d712013-04-30 00:44:33 -05001108 *snap_size = rbd_dev->header.snap_sizes[which];
1109 } else {
1110 u64 size = 0;
1111 int ret;
1112
1113 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1114 if (ret)
1115 return ret;
1116
1117 *snap_size = size;
1118 }
1119 return 0;
1120}
1121
1122static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1123 u64 *snap_features)
1124{
1125 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1126 if (snap_id == CEPH_NOSNAP) {
1127 *snap_features = rbd_dev->header.features;
1128 } else if (rbd_dev->image_format == 1) {
1129 *snap_features = 0; /* No features for format 1 */
1130 } else {
1131 u64 features = 0;
1132 int ret;
1133
1134 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1135 if (ret)
1136 return ret;
1137
1138 *snap_features = features;
1139 }
1140 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001141}
1142
Alex Elderd1cf5782013-04-27 09:59:30 -05001143static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001144{
Alex Elder8f4b7d92013-05-06 07:40:30 -05001145 u64 snap_id = rbd_dev->spec->snap_id;
Alex Elder2ad3d712013-04-30 00:44:33 -05001146 u64 size = 0;
1147 u64 features = 0;
1148 int ret;
Alex Elder8b0241f2013-04-25 23:15:08 -05001149
Alex Elder2ad3d712013-04-30 00:44:33 -05001150 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1151 if (ret)
1152 return ret;
1153 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1154 if (ret)
1155 return ret;
1156
1157 rbd_dev->mapping.size = size;
1158 rbd_dev->mapping.features = features;
1159
Alex Elder8b0241f2013-04-25 23:15:08 -05001160 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001161}
1162
Alex Elderd1cf5782013-04-27 09:59:30 -05001163static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1164{
1165 rbd_dev->mapping.size = 0;
1166 rbd_dev->mapping.features = 0;
Alex Elder200a6a82013-04-28 23:32:34 -05001167}
1168
Himangi Saraogi7d5079a2014-07-24 03:17:07 +05301169static void rbd_segment_name_free(const char *name)
1170{
1171 /* The explicit cast here is needed to drop the const qualifier */
1172
1173 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1174}
1175
Alex Elder98571b52013-01-20 14:44:42 -06001176static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001177{
Alex Elder65ccfe22012-08-09 10:33:26 -07001178 char *name;
1179 u64 segment;
1180 int ret;
Josh Durgin3a96d5c2013-06-12 19:15:06 -07001181 char *name_format;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001182
Alex Elder78c2a442013-05-01 12:43:04 -05001183 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
Alex Elder65ccfe22012-08-09 10:33:26 -07001184 if (!name)
1185 return NULL;
1186 segment = offset >> rbd_dev->header.obj_order;
Josh Durgin3a96d5c2013-06-12 19:15:06 -07001187 name_format = "%s.%012llx";
1188 if (rbd_dev->image_format == 2)
1189 name_format = "%s.%016llx";
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02001190 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
Alex Elder65ccfe22012-08-09 10:33:26 -07001191 rbd_dev->header.object_prefix, segment);
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02001192 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
Alex Elder65ccfe22012-08-09 10:33:26 -07001193 pr_err("error formatting segment name for #%llu (%d)\n",
1194 segment, ret);
Himangi Saraogi7d5079a2014-07-24 03:17:07 +05301195 rbd_segment_name_free(name);
Alex Elder65ccfe22012-08-09 10:33:26 -07001196 name = NULL;
1197 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001198
Alex Elder65ccfe22012-08-09 10:33:26 -07001199 return name;
1200}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001201
Alex Elder65ccfe22012-08-09 10:33:26 -07001202static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1203{
1204 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001205
Alex Elder65ccfe22012-08-09 10:33:26 -07001206 return offset & (segment_size - 1);
1207}
1208
1209static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1210 u64 offset, u64 length)
1211{
1212 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1213
1214 offset &= segment_size - 1;
1215
Alex Elderaafb2302012-09-06 16:00:54 -05001216 rbd_assert(length <= U64_MAX - offset);
Alex Elder65ccfe22012-08-09 10:33:26 -07001217 if (offset + length > segment_size)
1218 length = segment_size - offset;
1219
1220 return length;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001221}
1222
1223/*
Josh Durgin029bcbd2011-07-22 11:35:23 -07001224 * returns the size of an object in the image
1225 */
1226static u64 rbd_obj_bytes(struct rbd_image_header *header)
1227{
1228 return 1 << header->obj_order;
1229}
1230
1231/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001232 * bio helpers
1233 */
1234
1235static void bio_chain_put(struct bio *chain)
1236{
1237 struct bio *tmp;
1238
1239 while (chain) {
1240 tmp = chain;
1241 chain = chain->bi_next;
1242 bio_put(tmp);
1243 }
1244}
1245
1246/*
1247 * zeros a bio chain, starting at specific offset
1248 */
1249static void zero_bio_chain(struct bio *chain, int start_ofs)
1250{
Kent Overstreet79886132013-11-23 17:19:00 -08001251 struct bio_vec bv;
1252 struct bvec_iter iter;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001253 unsigned long flags;
1254 void *buf;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001255 int pos = 0;
1256
1257 while (chain) {
Kent Overstreet79886132013-11-23 17:19:00 -08001258 bio_for_each_segment(bv, chain, iter) {
1259 if (pos + bv.bv_len > start_ofs) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001260 int remainder = max(start_ofs - pos, 0);
Kent Overstreet79886132013-11-23 17:19:00 -08001261 buf = bvec_kmap_irq(&bv, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001262 memset(buf + remainder, 0,
Kent Overstreet79886132013-11-23 17:19:00 -08001263 bv.bv_len - remainder);
1264 flush_dcache_page(bv.bv_page);
Dan Carpenter85b5aaa2010-10-11 21:15:11 +02001265 bvec_kunmap_irq(buf, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001266 }
Kent Overstreet79886132013-11-23 17:19:00 -08001267 pos += bv.bv_len;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001268 }
1269
1270 chain = chain->bi_next;
1271 }
1272}
1273
1274/*
Alex Elderb9434c52013-04-19 15:34:50 -05001275 * similar to zero_bio_chain(), zeros data defined by a page array,
1276 * starting at the given byte offset from the start of the array and
1277 * continuing up to the given end offset. The pages array is
1278 * assumed to be big enough to hold all bytes up to the end.
1279 */
1280static void zero_pages(struct page **pages, u64 offset, u64 end)
1281{
1282 struct page **page = &pages[offset >> PAGE_SHIFT];
1283
1284 rbd_assert(end > offset);
1285 rbd_assert(end - offset <= (u64)SIZE_MAX);
1286 while (offset < end) {
1287 size_t page_offset;
1288 size_t length;
1289 unsigned long flags;
1290 void *kaddr;
1291
Geert Uytterhoeven491205a2013-05-13 20:35:37 -05001292 page_offset = offset & ~PAGE_MASK;
1293 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
Alex Elderb9434c52013-04-19 15:34:50 -05001294 local_irq_save(flags);
1295 kaddr = kmap_atomic(*page);
1296 memset(kaddr + page_offset, 0, length);
Alex Eldere2156052013-05-22 20:54:25 -05001297 flush_dcache_page(*page);
Alex Elderb9434c52013-04-19 15:34:50 -05001298 kunmap_atomic(kaddr);
1299 local_irq_restore(flags);
1300
1301 offset += length;
1302 page++;
1303 }
1304}
1305
1306/*
Alex Elderf7760da2012-10-20 22:17:27 -05001307 * Clone a portion of a bio, starting at the given byte offset
1308 * and continuing for the number of bytes indicated.
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001309 */
Alex Elderf7760da2012-10-20 22:17:27 -05001310static struct bio *bio_clone_range(struct bio *bio_src,
1311 unsigned int offset,
1312 unsigned int len,
1313 gfp_t gfpmask)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001314{
Alex Elderf7760da2012-10-20 22:17:27 -05001315 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001316
Kent Overstreet5341a6272013-08-07 14:31:11 -07001317 bio = bio_clone(bio_src, gfpmask);
Alex Elderf7760da2012-10-20 22:17:27 -05001318 if (!bio)
1319 return NULL; /* ENOMEM */
1320
Kent Overstreet5341a6272013-08-07 14:31:11 -07001321 bio_advance(bio, offset);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001322 bio->bi_iter.bi_size = len;
Alex Elder542582f2012-08-09 10:33:25 -07001323
Alex Elderf7760da2012-10-20 22:17:27 -05001324 return bio;
1325}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001326
Alex Elderf7760da2012-10-20 22:17:27 -05001327/*
1328 * Clone a portion of a bio chain, starting at the given byte offset
1329 * into the first bio in the source chain and continuing for the
1330 * number of bytes indicated. The result is another bio chain of
1331 * exactly the given length, or a null pointer on error.
1332 *
1333 * The bio_src and offset parameters are both in-out. On entry they
1334 * refer to the first source bio and the offset into that bio where
1335 * the start of data to be cloned is located.
1336 *
1337 * On return, bio_src is updated to refer to the bio in the source
1338 * chain that contains first un-cloned byte, and *offset will
1339 * contain the offset of that byte within that bio.
1340 */
1341static struct bio *bio_chain_clone_range(struct bio **bio_src,
1342 unsigned int *offset,
1343 unsigned int len,
1344 gfp_t gfpmask)
1345{
1346 struct bio *bi = *bio_src;
1347 unsigned int off = *offset;
1348 struct bio *chain = NULL;
1349 struct bio **end;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001350
Alex Elderf7760da2012-10-20 22:17:27 -05001351 /* Build up a chain of clone bios up to the limit */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001352
Kent Overstreet4f024f32013-10-11 15:44:27 -07001353 if (!bi || off >= bi->bi_iter.bi_size || !len)
Alex Elderf7760da2012-10-20 22:17:27 -05001354 return NULL; /* Nothing to clone */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001355
Alex Elderf7760da2012-10-20 22:17:27 -05001356 end = &chain;
1357 while (len) {
1358 unsigned int bi_size;
1359 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001360
Alex Elderf5400b72012-11-01 10:17:15 -05001361 if (!bi) {
1362 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
Alex Elderf7760da2012-10-20 22:17:27 -05001363 goto out_err; /* EINVAL; ran out of bio's */
Alex Elderf5400b72012-11-01 10:17:15 -05001364 }
Kent Overstreet4f024f32013-10-11 15:44:27 -07001365 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
Alex Elderf7760da2012-10-20 22:17:27 -05001366 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1367 if (!bio)
1368 goto out_err; /* ENOMEM */
1369
1370 *end = bio;
1371 end = &bio->bi_next;
1372
1373 off += bi_size;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001374 if (off == bi->bi_iter.bi_size) {
Alex Elderf7760da2012-10-20 22:17:27 -05001375 bi = bi->bi_next;
1376 off = 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001377 }
Alex Elderf7760da2012-10-20 22:17:27 -05001378 len -= bi_size;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001379 }
Alex Elderf7760da2012-10-20 22:17:27 -05001380 *bio_src = bi;
1381 *offset = off;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001382
Alex Elderf7760da2012-10-20 22:17:27 -05001383 return chain;
1384out_err:
1385 bio_chain_put(chain);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001386
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001387 return NULL;
1388}
1389
Alex Elder926f9b32013-02-11 12:33:24 -06001390/*
1391 * The default/initial value for all object request flags is 0. For
1392 * each flag, once its value is set to 1 it is never reset to 0
1393 * again.
1394 */
Alex Elder6365d332013-02-11 12:33:24 -06001395static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1396{
1397 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
Alex Elder6365d332013-02-11 12:33:24 -06001398 struct rbd_device *rbd_dev;
1399
Alex Elder57acbaa2013-02-11 12:33:24 -06001400 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001401 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
Alex Elder6365d332013-02-11 12:33:24 -06001402 obj_request);
1403 }
1404}
1405
1406static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1407{
1408 smp_mb();
1409 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1410}
1411
Alex Elder57acbaa2013-02-11 12:33:24 -06001412static void obj_request_done_set(struct rbd_obj_request *obj_request)
1413{
1414 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1415 struct rbd_device *rbd_dev = NULL;
1416
1417 if (obj_request_img_data_test(obj_request))
1418 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001419 rbd_warn(rbd_dev, "obj_request %p already marked done",
Alex Elder57acbaa2013-02-11 12:33:24 -06001420 obj_request);
1421 }
1422}
1423
1424static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1425{
1426 smp_mb();
1427 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1428}
1429
Alex Elder5679c592013-02-11 12:33:24 -06001430/*
1431 * This sets the KNOWN flag after (possibly) setting the EXISTS
1432 * flag. The latter is set based on the "exists" value provided.
1433 *
1434 * Note that for our purposes once an object exists it never goes
1435 * away again. It's possible that the response from two existence
1436 * checks are separated by the creation of the target object, and
1437 * the first ("doesn't exist") response arrives *after* the second
1438 * ("does exist"). In that case we ignore the second one.
1439 */
1440static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1441 bool exists)
1442{
1443 if (exists)
1444 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1445 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1446 smp_mb();
1447}
1448
1449static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1450{
1451 smp_mb();
1452 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1453}
1454
1455static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1456{
1457 smp_mb();
1458 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1459}
1460
Ilya Dryomov96385562014-06-10 13:53:29 +04001461static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1462{
1463 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1464
1465 return obj_request->img_offset <
1466 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1467}
1468
Alex Elderbf0d5f502012-11-22 00:00:08 -06001469static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1470{
Alex Elder37206ee2013-02-20 17:32:08 -06001471 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1472 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001473 kref_get(&obj_request->kref);
1474}
1475
1476static void rbd_obj_request_destroy(struct kref *kref);
1477static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1478{
1479 rbd_assert(obj_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001480 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1481 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001482 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1483}
1484
Alex Elder0f2d5be2014-04-26 14:21:44 +04001485static void rbd_img_request_get(struct rbd_img_request *img_request)
1486{
1487 dout("%s: img %p (was %d)\n", __func__, img_request,
1488 atomic_read(&img_request->kref.refcount));
1489 kref_get(&img_request->kref);
1490}
1491
Alex Eldere93f3152013-05-08 22:50:04 -05001492static bool img_request_child_test(struct rbd_img_request *img_request);
1493static void rbd_parent_request_destroy(struct kref *kref);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001494static void rbd_img_request_destroy(struct kref *kref);
1495static void rbd_img_request_put(struct rbd_img_request *img_request)
1496{
1497 rbd_assert(img_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001498 dout("%s: img %p (was %d)\n", __func__, img_request,
1499 atomic_read(&img_request->kref.refcount));
Alex Eldere93f3152013-05-08 22:50:04 -05001500 if (img_request_child_test(img_request))
1501 kref_put(&img_request->kref, rbd_parent_request_destroy);
1502 else
1503 kref_put(&img_request->kref, rbd_img_request_destroy);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001504}
1505
1506static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1507 struct rbd_obj_request *obj_request)
1508{
Alex Elder25dcf952013-01-25 17:08:55 -06001509 rbd_assert(obj_request->img_request == NULL);
1510
Alex Elderb155e862013-04-15 14:50:37 -05001511 /* Image request now owns object's original reference */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001512 obj_request->img_request = img_request;
Alex Elder25dcf952013-01-25 17:08:55 -06001513 obj_request->which = img_request->obj_request_count;
Alex Elder6365d332013-02-11 12:33:24 -06001514 rbd_assert(!obj_request_img_data_test(obj_request));
1515 obj_request_img_data_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001516 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001517 img_request->obj_request_count++;
1518 list_add_tail(&obj_request->links, &img_request->obj_requests);
Alex Elder37206ee2013-02-20 17:32:08 -06001519 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1520 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001521}
1522
1523static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1524 struct rbd_obj_request *obj_request)
1525{
1526 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001527
Alex Elder37206ee2013-02-20 17:32:08 -06001528 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1529 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001530 list_del(&obj_request->links);
Alex Elder25dcf952013-01-25 17:08:55 -06001531 rbd_assert(img_request->obj_request_count > 0);
1532 img_request->obj_request_count--;
1533 rbd_assert(obj_request->which == img_request->obj_request_count);
1534 obj_request->which = BAD_WHICH;
Alex Elder6365d332013-02-11 12:33:24 -06001535 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001536 rbd_assert(obj_request->img_request == img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001537 obj_request->img_request = NULL;
Alex Elder25dcf952013-01-25 17:08:55 -06001538 obj_request->callback = NULL;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001539 rbd_obj_request_put(obj_request);
1540}
1541
1542static bool obj_request_type_valid(enum obj_request_type type)
1543{
1544 switch (type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06001545 case OBJ_REQUEST_NODATA:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001546 case OBJ_REQUEST_BIO:
Alex Elder788e2df2013-01-17 12:25:27 -06001547 case OBJ_REQUEST_PAGES:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001548 return true;
1549 default:
1550 return false;
1551 }
1552}
1553
Alex Elderbf0d5f502012-11-22 00:00:08 -06001554static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1555 struct rbd_obj_request *obj_request)
1556{
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001557 dout("%s %p\n", __func__, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001558 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1559}
1560
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001561static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1562{
1563 dout("%s %p\n", __func__, obj_request);
1564 ceph_osdc_cancel_request(obj_request->osd_req);
1565}
1566
1567/*
1568 * Wait for an object request to complete. If interrupted, cancel the
1569 * underlying osd request.
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001570 *
1571 * @timeout: in jiffies, 0 means "wait forever"
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001572 */
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001573static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1574 unsigned long timeout)
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001575{
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001576 long ret;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001577
1578 dout("%s %p\n", __func__, obj_request);
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001579 ret = wait_for_completion_interruptible_timeout(
1580 &obj_request->completion,
1581 ceph_timeout_jiffies(timeout));
1582 if (ret <= 0) {
1583 if (ret == 0)
1584 ret = -ETIMEDOUT;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001585 rbd_obj_request_end(obj_request);
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001586 } else {
1587 ret = 0;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001588 }
1589
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001590 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1591 return ret;
1592}
1593
1594static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1595{
1596 return __rbd_obj_request_wait(obj_request, 0);
1597}
1598
1599static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
1600 unsigned long timeout)
1601{
1602 return __rbd_obj_request_wait(obj_request, timeout);
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001603}
1604
Alex Elderbf0d5f502012-11-22 00:00:08 -06001605static void rbd_img_request_complete(struct rbd_img_request *img_request)
1606{
Alex Elder55f27e02013-04-10 12:34:25 -05001607
Alex Elder37206ee2013-02-20 17:32:08 -06001608 dout("%s: img %p\n", __func__, img_request);
Alex Elder55f27e02013-04-10 12:34:25 -05001609
1610 /*
1611 * If no error occurred, compute the aggregate transfer
1612 * count for the image request. We could instead use
1613 * atomic64_cmpxchg() to update it as each object request
1614 * completes; not clear which way is better off hand.
1615 */
1616 if (!img_request->result) {
1617 struct rbd_obj_request *obj_request;
1618 u64 xferred = 0;
1619
1620 for_each_obj_request(img_request, obj_request)
1621 xferred += obj_request->xferred;
1622 img_request->xferred = xferred;
1623 }
1624
Alex Elderbf0d5f502012-11-22 00:00:08 -06001625 if (img_request->callback)
1626 img_request->callback(img_request);
1627 else
1628 rbd_img_request_put(img_request);
1629}
1630
Alex Elder0c425242013-02-08 09:55:49 -06001631/*
1632 * The default/initial value for all image request flags is 0. Each
1633 * is conditionally set to 1 at image request initialization time
1634 * and currently never change thereafter.
1635 */
1636static void img_request_write_set(struct rbd_img_request *img_request)
1637{
1638 set_bit(IMG_REQ_WRITE, &img_request->flags);
1639 smp_mb();
1640}
1641
1642static bool img_request_write_test(struct rbd_img_request *img_request)
1643{
1644 smp_mb();
1645 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1646}
1647
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001648/*
1649 * Set the discard flag when the img_request is an discard request
1650 */
1651static void img_request_discard_set(struct rbd_img_request *img_request)
1652{
1653 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1654 smp_mb();
1655}
1656
1657static bool img_request_discard_test(struct rbd_img_request *img_request)
1658{
1659 smp_mb();
1660 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1661}
1662
Alex Elder9849e982013-01-24 16:13:36 -06001663static void img_request_child_set(struct rbd_img_request *img_request)
1664{
1665 set_bit(IMG_REQ_CHILD, &img_request->flags);
1666 smp_mb();
1667}
1668
Alex Eldere93f3152013-05-08 22:50:04 -05001669static void img_request_child_clear(struct rbd_img_request *img_request)
1670{
1671 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1672 smp_mb();
1673}
1674
Alex Elder9849e982013-01-24 16:13:36 -06001675static bool img_request_child_test(struct rbd_img_request *img_request)
1676{
1677 smp_mb();
1678 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1679}
1680
Alex Elderd0b2e942013-01-24 16:13:36 -06001681static void img_request_layered_set(struct rbd_img_request *img_request)
1682{
1683 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1684 smp_mb();
1685}
1686
Alex Eldera2acd002013-05-08 22:50:04 -05001687static void img_request_layered_clear(struct rbd_img_request *img_request)
1688{
1689 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1690 smp_mb();
1691}
1692
Alex Elderd0b2e942013-01-24 16:13:36 -06001693static bool img_request_layered_test(struct rbd_img_request *img_request)
1694{
1695 smp_mb();
1696 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1697}
1698
Josh Durgin3b434a2a2014-04-04 17:32:15 -07001699static enum obj_operation_type
1700rbd_img_request_op_type(struct rbd_img_request *img_request)
1701{
1702 if (img_request_write_test(img_request))
1703 return OBJ_OP_WRITE;
1704 else if (img_request_discard_test(img_request))
1705 return OBJ_OP_DISCARD;
1706 else
1707 return OBJ_OP_READ;
1708}
1709
Alex Elder6e2a4502013-03-27 09:16:30 -05001710static void
1711rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1712{
Alex Elderb9434c52013-04-19 15:34:50 -05001713 u64 xferred = obj_request->xferred;
1714 u64 length = obj_request->length;
1715
Alex Elder6e2a4502013-03-27 09:16:30 -05001716 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1717 obj_request, obj_request->img_request, obj_request->result,
Alex Elderb9434c52013-04-19 15:34:50 -05001718 xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001719 /*
Josh Durgin17c1cc12013-08-26 17:55:38 -07001720 * ENOENT means a hole in the image. We zero-fill the entire
1721 * length of the request. A short read also implies zero-fill
1722 * to the end of the request. An error requires the whole
1723 * length of the request to be reported finished with an error
1724 * to the block layer. In each case we update the xferred
1725 * count to indicate the whole request was satisfied.
Alex Elder6e2a4502013-03-27 09:16:30 -05001726 */
Alex Elderb9434c52013-04-19 15:34:50 -05001727 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
Alex Elder6e2a4502013-03-27 09:16:30 -05001728 if (obj_request->result == -ENOENT) {
Alex Elderb9434c52013-04-19 15:34:50 -05001729 if (obj_request->type == OBJ_REQUEST_BIO)
1730 zero_bio_chain(obj_request->bio_list, 0);
1731 else
1732 zero_pages(obj_request->pages, 0, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001733 obj_request->result = 0;
Alex Elderb9434c52013-04-19 15:34:50 -05001734 } else if (xferred < length && !obj_request->result) {
1735 if (obj_request->type == OBJ_REQUEST_BIO)
1736 zero_bio_chain(obj_request->bio_list, xferred);
1737 else
1738 zero_pages(obj_request->pages, xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001739 }
Josh Durgin17c1cc12013-08-26 17:55:38 -07001740 obj_request->xferred = length;
Alex Elder6e2a4502013-03-27 09:16:30 -05001741 obj_request_done_set(obj_request);
1742}
1743
Alex Elderbf0d5f502012-11-22 00:00:08 -06001744static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1745{
Alex Elder37206ee2013-02-20 17:32:08 -06001746 dout("%s: obj %p cb %p\n", __func__, obj_request,
1747 obj_request->callback);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001748 if (obj_request->callback)
1749 obj_request->callback(obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06001750 else
1751 complete_all(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001752}
1753
Alex Elderc47f9372013-02-26 14:23:07 -06001754static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
Alex Elder39bf2c52013-02-26 14:23:07 -06001755{
1756 dout("%s: obj %p\n", __func__, obj_request);
1757 obj_request_done_set(obj_request);
1758}
1759
Alex Elderc47f9372013-02-26 14:23:07 -06001760static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001761{
Alex Elder57acbaa2013-02-11 12:33:24 -06001762 struct rbd_img_request *img_request = NULL;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001763 struct rbd_device *rbd_dev = NULL;
Alex Elder57acbaa2013-02-11 12:33:24 -06001764 bool layered = false;
1765
1766 if (obj_request_img_data_test(obj_request)) {
1767 img_request = obj_request->img_request;
1768 layered = img_request && img_request_layered_test(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001769 rbd_dev = img_request->rbd_dev;
Alex Elder57acbaa2013-02-11 12:33:24 -06001770 }
Alex Elder8b3e1a52013-01-24 16:13:36 -06001771
1772 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1773 obj_request, img_request, obj_request->result,
1774 obj_request->xferred, obj_request->length);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001775 if (layered && obj_request->result == -ENOENT &&
1776 obj_request->img_offset < rbd_dev->parent_overlap)
Alex Elder8b3e1a52013-01-24 16:13:36 -06001777 rbd_img_parent_read(obj_request);
1778 else if (img_request)
Alex Elder6e2a4502013-03-27 09:16:30 -05001779 rbd_img_obj_request_read_callback(obj_request);
1780 else
1781 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001782}
1783
Alex Elderc47f9372013-02-26 14:23:07 -06001784static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001785{
Sage Weil1b83bef2013-02-25 16:11:12 -08001786 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1787 obj_request->result, obj_request->length);
1788 /*
Alex Elder8b3e1a52013-01-24 16:13:36 -06001789 * There is no such thing as a successful short write. Set
1790 * it to our originally-requested length.
Sage Weil1b83bef2013-02-25 16:11:12 -08001791 */
1792 obj_request->xferred = obj_request->length;
Alex Elder07741302013-02-05 23:41:50 -06001793 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001794}
1795
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001796static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1797{
1798 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1799 obj_request->result, obj_request->length);
1800 /*
1801 * There is no such thing as a successful short discard. Set
1802 * it to our originally-requested length.
1803 */
1804 obj_request->xferred = obj_request->length;
Josh Durgind0265de2014-04-07 16:54:10 -07001805 /* discarding a non-existent object is not a problem */
1806 if (obj_request->result == -ENOENT)
1807 obj_request->result = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001808 obj_request_done_set(obj_request);
1809}
1810
Alex Elderfbfab532013-02-08 09:55:48 -06001811/*
1812 * For a simple stat call there's nothing to do. We'll do more if
1813 * this is part of a write sequence for a layered image.
1814 */
Alex Elderc47f9372013-02-26 14:23:07 -06001815static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
Alex Elderfbfab532013-02-08 09:55:48 -06001816{
Alex Elder37206ee2013-02-20 17:32:08 -06001817 dout("%s: obj %p\n", __func__, obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001818 obj_request_done_set(obj_request);
1819}
1820
Ilya Dryomov27617132015-07-16 17:36:11 +03001821static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1822{
1823 dout("%s: obj %p\n", __func__, obj_request);
1824
1825 if (obj_request_img_data_test(obj_request))
1826 rbd_osd_copyup_callback(obj_request);
1827 else
1828 obj_request_done_set(obj_request);
1829}
1830
Ilya Dryomov85e084f2016-04-28 16:07:24 +02001831static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001832{
1833 struct rbd_obj_request *obj_request = osd_req->r_priv;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001834 u16 opcode;
1835
Ilya Dryomov85e084f2016-04-28 16:07:24 +02001836 dout("%s: osd_req %p\n", __func__, osd_req);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001837 rbd_assert(osd_req == obj_request->osd_req);
Alex Elder57acbaa2013-02-11 12:33:24 -06001838 if (obj_request_img_data_test(obj_request)) {
1839 rbd_assert(obj_request->img_request);
1840 rbd_assert(obj_request->which != BAD_WHICH);
1841 } else {
1842 rbd_assert(obj_request->which == BAD_WHICH);
1843 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06001844
Sage Weil1b83bef2013-02-25 16:11:12 -08001845 if (osd_req->r_result < 0)
1846 obj_request->result = osd_req->r_result;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001847
Alex Elderc47f9372013-02-26 14:23:07 -06001848 /*
1849 * We support a 64-bit length, but ultimately it has to be
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01001850 * passed to the block layer, which just supports a 32-bit
1851 * length field.
Alex Elderc47f9372013-02-26 14:23:07 -06001852 */
Yan, Zheng7665d852016-01-07 16:48:57 +08001853 obj_request->xferred = osd_req->r_ops[0].outdata_len;
Alex Elder8b3e1a52013-01-24 16:13:36 -06001854 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001855
Alex Elder79528732013-04-03 21:32:51 -05001856 opcode = osd_req->r_ops[0].op;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001857 switch (opcode) {
1858 case CEPH_OSD_OP_READ:
Alex Elderc47f9372013-02-26 14:23:07 -06001859 rbd_osd_read_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001860 break;
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001861 case CEPH_OSD_OP_SETALLOCHINT:
Ilya Dryomove30b7572015-10-07 17:27:17 +02001862 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
1863 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001864 /* fall through */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001865 case CEPH_OSD_OP_WRITE:
Ilya Dryomove30b7572015-10-07 17:27:17 +02001866 case CEPH_OSD_OP_WRITEFULL:
Alex Elderc47f9372013-02-26 14:23:07 -06001867 rbd_osd_write_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001868 break;
Alex Elderfbfab532013-02-08 09:55:48 -06001869 case CEPH_OSD_OP_STAT:
Alex Elderc47f9372013-02-26 14:23:07 -06001870 rbd_osd_stat_callback(obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001871 break;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001872 case CEPH_OSD_OP_DELETE:
1873 case CEPH_OSD_OP_TRUNCATE:
1874 case CEPH_OSD_OP_ZERO:
1875 rbd_osd_discard_callback(obj_request);
1876 break;
Alex Elder36be9a72013-01-19 00:30:28 -06001877 case CEPH_OSD_OP_CALL:
Ilya Dryomov27617132015-07-16 17:36:11 +03001878 rbd_osd_call_callback(obj_request);
1879 break;
Alex Elderb8d70032012-11-30 17:53:04 -06001880 case CEPH_OSD_OP_NOTIFY_ACK:
Alex Elder9969ebc2013-01-18 12:31:10 -06001881 case CEPH_OSD_OP_WATCH:
Alex Elderc47f9372013-02-26 14:23:07 -06001882 rbd_osd_trivial_callback(obj_request);
Alex Elder9969ebc2013-01-18 12:31:10 -06001883 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001884 default:
Ilya Dryomov9584d502014-07-11 12:11:20 +04001885 rbd_warn(NULL, "%s: unsupported op %hu",
Alex Elderbf0d5f502012-11-22 00:00:08 -06001886 obj_request->object_name, (unsigned short) opcode);
1887 break;
1888 }
1889
Alex Elder07741302013-02-05 23:41:50 -06001890 if (obj_request_done_test(obj_request))
Alex Elderbf0d5f502012-11-22 00:00:08 -06001891 rbd_obj_request_complete(obj_request);
1892}
1893
Alex Elder9d4df012013-04-19 15:34:50 -05001894static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
Alex Elder430c28c2013-04-03 21:32:51 -05001895{
1896 struct rbd_img_request *img_request = obj_request->img_request;
Alex Elder8c042b02013-04-03 01:28:58 -05001897 struct ceph_osd_request *osd_req = obj_request->osd_req;
Alex Elder430c28c2013-04-03 21:32:51 -05001898
Ilya Dryomovbb873b52016-05-26 00:29:52 +02001899 if (img_request)
1900 osd_req->r_snapid = img_request->snap_id;
Alex Elder9d4df012013-04-19 15:34:50 -05001901}
1902
1903static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1904{
Alex Elder9d4df012013-04-19 15:34:50 -05001905 struct ceph_osd_request *osd_req = obj_request->osd_req;
Alex Elder9d4df012013-04-19 15:34:50 -05001906
Ilya Dryomovbb873b52016-05-26 00:29:52 +02001907 osd_req->r_mtime = CURRENT_TIME;
1908 osd_req->r_data_offset = obj_request->offset;
Alex Elder430c28c2013-04-03 21:32:51 -05001909}
1910
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001911/*
1912 * Create an osd request. A read request has one osd op (read).
1913 * A write request has either one (watch) or two (hint+write) osd ops.
1914 * (All rbd data writes are prefixed with an allocation hint op, but
1915 * technically osd watch is a write request, hence this distinction.)
1916 */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001917static struct ceph_osd_request *rbd_osd_req_create(
1918 struct rbd_device *rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001919 enum obj_operation_type op_type,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001920 unsigned int num_ops,
Alex Elder430c28c2013-04-03 21:32:51 -05001921 struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001922{
Alex Elderbf0d5f502012-11-22 00:00:08 -06001923 struct ceph_snap_context *snapc = NULL;
1924 struct ceph_osd_client *osdc;
1925 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001926
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001927 if (obj_request_img_data_test(obj_request) &&
1928 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
Alex Elder6365d332013-02-11 12:33:24 -06001929 struct rbd_img_request *img_request = obj_request->img_request;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001930 if (op_type == OBJ_OP_WRITE) {
1931 rbd_assert(img_request_write_test(img_request));
1932 } else {
1933 rbd_assert(img_request_discard_test(img_request));
1934 }
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001935 snapc = img_request->snapc;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001936 }
1937
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001938 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001939
1940 /* Allocate and initialize the request, for the num_ops ops */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001941
1942 osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001943 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
David Disseldorp2224d872016-04-05 11:13:39 +02001944 GFP_NOIO);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001945 if (!osd_req)
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02001946 goto fail;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001947
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001948 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001949 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
Alex Elder430c28c2013-04-03 21:32:51 -05001950 else
Alex Elderbf0d5f502012-11-22 00:00:08 -06001951 osd_req->r_flags = CEPH_OSD_FLAG_READ;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001952
1953 osd_req->r_callback = rbd_osd_req_callback;
1954 osd_req->r_priv = obj_request;
1955
Ilya Dryomov3c972c92014-01-27 17:40:20 +02001956 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
Ilya Dryomovd30291b2016-04-29 19:54:20 +02001957 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
1958 obj_request->object_name))
1959 goto fail;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001960
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02001961 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
1962 goto fail;
1963
Alex Elderbf0d5f502012-11-22 00:00:08 -06001964 return osd_req;
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02001965
1966fail:
1967 ceph_osdc_put_request(osd_req);
1968 return NULL;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001969}
1970
Alex Elder0eefd472013-04-19 15:34:50 -05001971/*
Josh Durgind3246fb2014-04-07 16:49:21 -07001972 * Create a copyup osd request based on the information in the object
1973 * request supplied. A copyup request has two or three osd ops, a
1974 * copyup method call, potentially a hint op, and a write or truncate
1975 * or zero op.
Alex Elder0eefd472013-04-19 15:34:50 -05001976 */
1977static struct ceph_osd_request *
1978rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1979{
1980 struct rbd_img_request *img_request;
1981 struct ceph_snap_context *snapc;
1982 struct rbd_device *rbd_dev;
1983 struct ceph_osd_client *osdc;
1984 struct ceph_osd_request *osd_req;
Josh Durgind3246fb2014-04-07 16:49:21 -07001985 int num_osd_ops = 3;
Alex Elder0eefd472013-04-19 15:34:50 -05001986
1987 rbd_assert(obj_request_img_data_test(obj_request));
1988 img_request = obj_request->img_request;
1989 rbd_assert(img_request);
Josh Durgind3246fb2014-04-07 16:49:21 -07001990 rbd_assert(img_request_write_test(img_request) ||
1991 img_request_discard_test(img_request));
Alex Elder0eefd472013-04-19 15:34:50 -05001992
Josh Durgind3246fb2014-04-07 16:49:21 -07001993 if (img_request_discard_test(img_request))
1994 num_osd_ops = 2;
1995
1996 /* Allocate and initialize the request, for all the ops */
Alex Elder0eefd472013-04-19 15:34:50 -05001997
1998 snapc = img_request->snapc;
1999 rbd_dev = img_request->rbd_dev;
2000 osdc = &rbd_dev->rbd_client->client->osdc;
Josh Durgind3246fb2014-04-07 16:49:21 -07002001 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
David Disseldorp2224d872016-04-05 11:13:39 +02002002 false, GFP_NOIO);
Alex Elder0eefd472013-04-19 15:34:50 -05002003 if (!osd_req)
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02002004 goto fail;
Alex Elder0eefd472013-04-19 15:34:50 -05002005
2006 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2007 osd_req->r_callback = rbd_osd_req_callback;
2008 osd_req->r_priv = obj_request;
2009
Ilya Dryomov3c972c92014-01-27 17:40:20 +02002010 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
Ilya Dryomovd30291b2016-04-29 19:54:20 +02002011 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
2012 obj_request->object_name))
2013 goto fail;
Alex Elder0eefd472013-04-19 15:34:50 -05002014
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02002015 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
2016 goto fail;
2017
Alex Elder0eefd472013-04-19 15:34:50 -05002018 return osd_req;
Ilya Dryomov13d1ad12016-04-27 14:15:51 +02002019
2020fail:
2021 ceph_osdc_put_request(osd_req);
2022 return NULL;
Alex Elder0eefd472013-04-19 15:34:50 -05002023}
2024
2025
Alex Elderbf0d5f502012-11-22 00:00:08 -06002026static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2027{
2028 ceph_osdc_put_request(osd_req);
2029}
2030
2031/* object_name is assumed to be a non-null pointer and NUL-terminated */
2032
2033static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2034 u64 offset, u64 length,
2035 enum obj_request_type type)
2036{
2037 struct rbd_obj_request *obj_request;
2038 size_t size;
2039 char *name;
2040
2041 rbd_assert(obj_request_type_valid(type));
2042
2043 size = strlen(object_name) + 1;
Ilya Dryomov5a60e872015-06-24 17:24:33 +03002044 name = kmalloc(size, GFP_NOIO);
Alex Elderf907ad52013-05-01 12:43:03 -05002045 if (!name)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002046 return NULL;
2047
Ilya Dryomov5a60e872015-06-24 17:24:33 +03002048 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
Alex Elderf907ad52013-05-01 12:43:03 -05002049 if (!obj_request) {
2050 kfree(name);
2051 return NULL;
2052 }
2053
Alex Elderbf0d5f502012-11-22 00:00:08 -06002054 obj_request->object_name = memcpy(name, object_name, size);
2055 obj_request->offset = offset;
2056 obj_request->length = length;
Alex Elder926f9b32013-02-11 12:33:24 -06002057 obj_request->flags = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002058 obj_request->which = BAD_WHICH;
2059 obj_request->type = type;
2060 INIT_LIST_HEAD(&obj_request->links);
Alex Elder788e2df2013-01-17 12:25:27 -06002061 init_completion(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002062 kref_init(&obj_request->kref);
2063
Alex Elder37206ee2013-02-20 17:32:08 -06002064 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2065 offset, length, (int)type, obj_request);
2066
Alex Elderbf0d5f502012-11-22 00:00:08 -06002067 return obj_request;
2068}
2069
2070static void rbd_obj_request_destroy(struct kref *kref)
2071{
2072 struct rbd_obj_request *obj_request;
2073
2074 obj_request = container_of(kref, struct rbd_obj_request, kref);
2075
Alex Elder37206ee2013-02-20 17:32:08 -06002076 dout("%s: obj %p\n", __func__, obj_request);
2077
Alex Elderbf0d5f502012-11-22 00:00:08 -06002078 rbd_assert(obj_request->img_request == NULL);
2079 rbd_assert(obj_request->which == BAD_WHICH);
2080
2081 if (obj_request->osd_req)
2082 rbd_osd_req_destroy(obj_request->osd_req);
2083
2084 rbd_assert(obj_request_type_valid(obj_request->type));
2085 switch (obj_request->type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06002086 case OBJ_REQUEST_NODATA:
2087 break; /* Nothing to do */
Alex Elderbf0d5f502012-11-22 00:00:08 -06002088 case OBJ_REQUEST_BIO:
2089 if (obj_request->bio_list)
2090 bio_chain_put(obj_request->bio_list);
2091 break;
Alex Elder788e2df2013-01-17 12:25:27 -06002092 case OBJ_REQUEST_PAGES:
2093 if (obj_request->pages)
2094 ceph_release_page_vector(obj_request->pages,
2095 obj_request->page_count);
2096 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002097 }
2098
Alex Elderf907ad52013-05-01 12:43:03 -05002099 kfree(obj_request->object_name);
Alex Elder868311b2013-05-01 12:43:03 -05002100 obj_request->object_name = NULL;
2101 kmem_cache_free(rbd_obj_request_cache, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002102}
2103
Alex Elderfb65d2282013-05-08 22:50:04 -05002104/* It's OK to call this for a device with no parent */
2105
2106static void rbd_spec_put(struct rbd_spec *spec);
2107static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2108{
2109 rbd_dev_remove_parent(rbd_dev);
2110 rbd_spec_put(rbd_dev->parent_spec);
2111 rbd_dev->parent_spec = NULL;
2112 rbd_dev->parent_overlap = 0;
2113}
2114
Alex Elderbf0d5f502012-11-22 00:00:08 -06002115/*
Alex Eldera2acd002013-05-08 22:50:04 -05002116 * Parent image reference counting is used to determine when an
2117 * image's parent fields can be safely torn down--after there are no
2118 * more in-flight requests to the parent image. When the last
2119 * reference is dropped, cleaning them up is safe.
2120 */
2121static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2122{
2123 int counter;
2124
2125 if (!rbd_dev->parent_spec)
2126 return;
2127
2128 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2129 if (counter > 0)
2130 return;
2131
2132 /* Last reference; clean up parent data structures */
2133
2134 if (!counter)
2135 rbd_dev_unparent(rbd_dev);
2136 else
Ilya Dryomov9584d502014-07-11 12:11:20 +04002137 rbd_warn(rbd_dev, "parent reference underflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002138}
2139
2140/*
2141 * If an image has a non-zero parent overlap, get a reference to its
2142 * parent.
2143 *
2144 * Returns true if the rbd device has a parent with a non-zero
2145 * overlap and a reference for it was successfully taken, or
2146 * false otherwise.
2147 */
2148static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2149{
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002150 int counter = 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002151
2152 if (!rbd_dev->parent_spec)
2153 return false;
2154
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002155 down_read(&rbd_dev->header_rwsem);
2156 if (rbd_dev->parent_overlap)
2157 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2158 up_read(&rbd_dev->header_rwsem);
Alex Eldera2acd002013-05-08 22:50:04 -05002159
2160 if (counter < 0)
Ilya Dryomov9584d502014-07-11 12:11:20 +04002161 rbd_warn(rbd_dev, "parent reference overflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002162
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002163 return counter > 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002164}
2165
Alex Elderbf0d5f502012-11-22 00:00:08 -06002166/*
2167 * Caller is responsible for filling in the list of object requests
2168 * that comprises the image request, and the Linux request pointer
2169 * (if there is one).
2170 */
Alex Eldercc344fa2013-02-19 12:25:56 -06002171static struct rbd_img_request *rbd_img_request_create(
2172 struct rbd_device *rbd_dev,
Alex Elderbf0d5f502012-11-22 00:00:08 -06002173 u64 offset, u64 length,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002174 enum obj_operation_type op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07002175 struct ceph_snap_context *snapc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002176{
2177 struct rbd_img_request *img_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002178
Ilya Dryomov7a716aa2014-08-05 11:25:54 +04002179 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002180 if (!img_request)
2181 return NULL;
2182
Alex Elderbf0d5f502012-11-22 00:00:08 -06002183 img_request->rq = NULL;
2184 img_request->rbd_dev = rbd_dev;
2185 img_request->offset = offset;
2186 img_request->length = length;
Alex Elder0c425242013-02-08 09:55:49 -06002187 img_request->flags = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002188 if (op_type == OBJ_OP_DISCARD) {
2189 img_request_discard_set(img_request);
2190 img_request->snapc = snapc;
2191 } else if (op_type == OBJ_OP_WRITE) {
Alex Elder0c425242013-02-08 09:55:49 -06002192 img_request_write_set(img_request);
Josh Durgin4e752f02014-04-08 11:12:11 -07002193 img_request->snapc = snapc;
Alex Elder0c425242013-02-08 09:55:49 -06002194 } else {
Alex Elderbf0d5f502012-11-22 00:00:08 -06002195 img_request->snap_id = rbd_dev->spec->snap_id;
Alex Elder0c425242013-02-08 09:55:49 -06002196 }
Alex Eldera2acd002013-05-08 22:50:04 -05002197 if (rbd_dev_parent_get(rbd_dev))
Alex Elderd0b2e942013-01-24 16:13:36 -06002198 img_request_layered_set(img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002199 spin_lock_init(&img_request->completion_lock);
2200 img_request->next_completion = 0;
2201 img_request->callback = NULL;
Alex Eldera5a337d2013-01-24 16:13:36 -06002202 img_request->result = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002203 img_request->obj_request_count = 0;
2204 INIT_LIST_HEAD(&img_request->obj_requests);
2205 kref_init(&img_request->kref);
2206
Alex Elder37206ee2013-02-20 17:32:08 -06002207 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002208 obj_op_name(op_type), offset, length, img_request);
Alex Elder37206ee2013-02-20 17:32:08 -06002209
Alex Elderbf0d5f502012-11-22 00:00:08 -06002210 return img_request;
2211}
2212
2213static void rbd_img_request_destroy(struct kref *kref)
2214{
2215 struct rbd_img_request *img_request;
2216 struct rbd_obj_request *obj_request;
2217 struct rbd_obj_request *next_obj_request;
2218
2219 img_request = container_of(kref, struct rbd_img_request, kref);
2220
Alex Elder37206ee2013-02-20 17:32:08 -06002221 dout("%s: img %p\n", __func__, img_request);
2222
Alex Elderbf0d5f502012-11-22 00:00:08 -06002223 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2224 rbd_img_obj_request_del(img_request, obj_request);
Alex Elder25dcf952013-01-25 17:08:55 -06002225 rbd_assert(img_request->obj_request_count == 0);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002226
Alex Eldera2acd002013-05-08 22:50:04 -05002227 if (img_request_layered_test(img_request)) {
2228 img_request_layered_clear(img_request);
2229 rbd_dev_parent_put(img_request->rbd_dev);
2230 }
2231
Josh Durginbef95452014-04-04 17:47:52 -07002232 if (img_request_write_test(img_request) ||
2233 img_request_discard_test(img_request))
Alex Elder812164f82013-04-30 00:44:32 -05002234 ceph_put_snap_context(img_request->snapc);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002235
Alex Elder1c2a9df2013-05-01 12:43:03 -05002236 kmem_cache_free(rbd_img_request_cache, img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002237}
2238
Alex Eldere93f3152013-05-08 22:50:04 -05002239static struct rbd_img_request *rbd_parent_request_create(
2240 struct rbd_obj_request *obj_request,
2241 u64 img_offset, u64 length)
2242{
2243 struct rbd_img_request *parent_request;
2244 struct rbd_device *rbd_dev;
2245
2246 rbd_assert(obj_request->img_request);
2247 rbd_dev = obj_request->img_request->rbd_dev;
2248
Josh Durgin4e752f02014-04-08 11:12:11 -07002249 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002250 length, OBJ_OP_READ, NULL);
Alex Eldere93f3152013-05-08 22:50:04 -05002251 if (!parent_request)
2252 return NULL;
2253
2254 img_request_child_set(parent_request);
2255 rbd_obj_request_get(obj_request);
2256 parent_request->obj_request = obj_request;
2257
2258 return parent_request;
2259}
2260
2261static void rbd_parent_request_destroy(struct kref *kref)
2262{
2263 struct rbd_img_request *parent_request;
2264 struct rbd_obj_request *orig_request;
2265
2266 parent_request = container_of(kref, struct rbd_img_request, kref);
2267 orig_request = parent_request->obj_request;
2268
2269 parent_request->obj_request = NULL;
2270 rbd_obj_request_put(orig_request);
2271 img_request_child_clear(parent_request);
2272
2273 rbd_img_request_destroy(kref);
2274}
2275
Alex Elder12178572013-02-08 09:55:49 -06002276static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2277{
Alex Elder6365d332013-02-11 12:33:24 -06002278 struct rbd_img_request *img_request;
Alex Elder12178572013-02-08 09:55:49 -06002279 unsigned int xferred;
2280 int result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002281 bool more;
Alex Elder12178572013-02-08 09:55:49 -06002282
Alex Elder6365d332013-02-11 12:33:24 -06002283 rbd_assert(obj_request_img_data_test(obj_request));
2284 img_request = obj_request->img_request;
2285
Alex Elder12178572013-02-08 09:55:49 -06002286 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2287 xferred = (unsigned int)obj_request->xferred;
2288 result = obj_request->result;
2289 if (result) {
2290 struct rbd_device *rbd_dev = img_request->rbd_dev;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002291 enum obj_operation_type op_type;
2292
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002293 if (img_request_discard_test(img_request))
2294 op_type = OBJ_OP_DISCARD;
2295 else if (img_request_write_test(img_request))
2296 op_type = OBJ_OP_WRITE;
2297 else
2298 op_type = OBJ_OP_READ;
Alex Elder12178572013-02-08 09:55:49 -06002299
Ilya Dryomov9584d502014-07-11 12:11:20 +04002300 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002301 obj_op_name(op_type), obj_request->length,
2302 obj_request->img_offset, obj_request->offset);
Ilya Dryomov9584d502014-07-11 12:11:20 +04002303 rbd_warn(rbd_dev, " result %d xferred %x",
Alex Elder12178572013-02-08 09:55:49 -06002304 result, xferred);
2305 if (!img_request->result)
2306 img_request->result = result;
Ilya Dryomov082a75d2015-04-25 15:56:15 +03002307 /*
2308 * Need to end I/O on the entire obj_request worth of
2309 * bytes in case of error.
2310 */
2311 xferred = obj_request->length;
Alex Elder12178572013-02-08 09:55:49 -06002312 }
2313
Alex Elderf1a47392013-04-19 15:34:50 -05002314 /* Image object requests don't own their page array */
2315
2316 if (obj_request->type == OBJ_REQUEST_PAGES) {
2317 obj_request->pages = NULL;
2318 obj_request->page_count = 0;
2319 }
2320
Alex Elder8b3e1a52013-01-24 16:13:36 -06002321 if (img_request_child_test(img_request)) {
2322 rbd_assert(img_request->obj_request != NULL);
2323 more = obj_request->which < img_request->obj_request_count - 1;
2324 } else {
2325 rbd_assert(img_request->rq != NULL);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01002326
2327 more = blk_update_request(img_request->rq, result, xferred);
2328 if (!more)
2329 __blk_mq_end_request(img_request->rq, result);
Alex Elder8b3e1a52013-01-24 16:13:36 -06002330 }
2331
2332 return more;
Alex Elder12178572013-02-08 09:55:49 -06002333}
2334
Alex Elder21692382013-04-05 01:27:12 -05002335static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2336{
2337 struct rbd_img_request *img_request;
2338 u32 which = obj_request->which;
2339 bool more = true;
2340
Alex Elder6365d332013-02-11 12:33:24 -06002341 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elder21692382013-04-05 01:27:12 -05002342 img_request = obj_request->img_request;
2343
2344 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2345 rbd_assert(img_request != NULL);
Alex Elder21692382013-04-05 01:27:12 -05002346 rbd_assert(img_request->obj_request_count > 0);
2347 rbd_assert(which != BAD_WHICH);
2348 rbd_assert(which < img_request->obj_request_count);
Alex Elder21692382013-04-05 01:27:12 -05002349
2350 spin_lock_irq(&img_request->completion_lock);
2351 if (which != img_request->next_completion)
2352 goto out;
2353
2354 for_each_obj_request_from(img_request, obj_request) {
Alex Elder21692382013-04-05 01:27:12 -05002355 rbd_assert(more);
2356 rbd_assert(which < img_request->obj_request_count);
2357
2358 if (!obj_request_done_test(obj_request))
2359 break;
Alex Elder12178572013-02-08 09:55:49 -06002360 more = rbd_img_obj_end_request(obj_request);
Alex Elder21692382013-04-05 01:27:12 -05002361 which++;
2362 }
2363
2364 rbd_assert(more ^ (which == img_request->obj_request_count));
2365 img_request->next_completion = which;
2366out:
2367 spin_unlock_irq(&img_request->completion_lock);
Alex Elder0f2d5be2014-04-26 14:21:44 +04002368 rbd_img_request_put(img_request);
Alex Elder21692382013-04-05 01:27:12 -05002369
2370 if (!more)
2371 rbd_img_request_complete(img_request);
2372}
2373
Alex Elderf1a47392013-04-19 15:34:50 -05002374/*
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002375 * Add individual osd ops to the given ceph_osd_request and prepare
2376 * them for submission. num_ops is the current number of
2377 * osd operations already to the object request.
2378 */
2379static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2380 struct ceph_osd_request *osd_request,
2381 enum obj_operation_type op_type,
2382 unsigned int num_ops)
2383{
2384 struct rbd_img_request *img_request = obj_request->img_request;
2385 struct rbd_device *rbd_dev = img_request->rbd_dev;
2386 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2387 u64 offset = obj_request->offset;
2388 u64 length = obj_request->length;
2389 u64 img_end;
2390 u16 opcode;
2391
2392 if (op_type == OBJ_OP_DISCARD) {
Josh Durgind3246fb2014-04-07 16:49:21 -07002393 if (!offset && length == object_size &&
2394 (!img_request_layered_test(img_request) ||
2395 !obj_request_overlaps_parent(obj_request))) {
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002396 opcode = CEPH_OSD_OP_DELETE;
2397 } else if ((offset + length == object_size)) {
2398 opcode = CEPH_OSD_OP_TRUNCATE;
2399 } else {
2400 down_read(&rbd_dev->header_rwsem);
2401 img_end = rbd_dev->header.image_size;
2402 up_read(&rbd_dev->header_rwsem);
2403
2404 if (obj_request->img_offset + length == img_end)
2405 opcode = CEPH_OSD_OP_TRUNCATE;
2406 else
2407 opcode = CEPH_OSD_OP_ZERO;
2408 }
2409 } else if (op_type == OBJ_OP_WRITE) {
Ilya Dryomove30b7572015-10-07 17:27:17 +02002410 if (!offset && length == object_size)
2411 opcode = CEPH_OSD_OP_WRITEFULL;
2412 else
2413 opcode = CEPH_OSD_OP_WRITE;
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002414 osd_req_op_alloc_hint_init(osd_request, num_ops,
2415 object_size, object_size);
2416 num_ops++;
2417 } else {
2418 opcode = CEPH_OSD_OP_READ;
2419 }
2420
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002421 if (opcode == CEPH_OSD_OP_DELETE)
Yan, Zheng144cba12015-04-27 11:09:54 +08002422 osd_req_op_init(osd_request, num_ops, opcode, 0);
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002423 else
2424 osd_req_op_extent_init(osd_request, num_ops, opcode,
2425 offset, length, 0, 0);
2426
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002427 if (obj_request->type == OBJ_REQUEST_BIO)
2428 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2429 obj_request->bio_list, length);
2430 else if (obj_request->type == OBJ_REQUEST_PAGES)
2431 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2432 obj_request->pages, length,
2433 offset & ~PAGE_MASK, false, false);
2434
2435 /* Discards are also writes */
2436 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2437 rbd_osd_req_format_write(obj_request);
2438 else
2439 rbd_osd_req_format_read(obj_request);
2440}
2441
2442/*
Alex Elderf1a47392013-04-19 15:34:50 -05002443 * Split up an image request into one or more object requests, each
2444 * to a different object. The "type" parameter indicates whether
2445 * "data_desc" is the pointer to the head of a list of bio
2446 * structures, or the base of a page array. In either case this
2447 * function assumes data_desc describes memory sufficient to hold
2448 * all data described by the image request.
2449 */
2450static int rbd_img_request_fill(struct rbd_img_request *img_request,
2451 enum obj_request_type type,
2452 void *data_desc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002453{
2454 struct rbd_device *rbd_dev = img_request->rbd_dev;
2455 struct rbd_obj_request *obj_request = NULL;
2456 struct rbd_obj_request *next_obj_request;
Jingoo Hana1580732013-08-09 13:04:35 +09002457 struct bio *bio_list = NULL;
Alex Elderf1a47392013-04-19 15:34:50 -05002458 unsigned int bio_offset = 0;
Jingoo Hana1580732013-08-09 13:04:35 +09002459 struct page **pages = NULL;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002460 enum obj_operation_type op_type;
Alex Elder7da22d22013-01-24 16:13:36 -06002461 u64 img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002462 u64 resid;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002463
Alex Elderf1a47392013-04-19 15:34:50 -05002464 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2465 (int)type, data_desc);
Alex Elder37206ee2013-02-20 17:32:08 -06002466
Alex Elder7da22d22013-01-24 16:13:36 -06002467 img_offset = img_request->offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002468 resid = img_request->length;
Alex Elder4dda41d2013-02-20 21:59:33 -06002469 rbd_assert(resid > 0);
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002470 op_type = rbd_img_request_op_type(img_request);
Alex Elderf1a47392013-04-19 15:34:50 -05002471
2472 if (type == OBJ_REQUEST_BIO) {
2473 bio_list = data_desc;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002474 rbd_assert(img_offset ==
2475 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002476 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002477 pages = data_desc;
2478 }
2479
Alex Elderbf0d5f502012-11-22 00:00:08 -06002480 while (resid) {
Alex Elder2fa12322013-04-05 01:27:12 -05002481 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002482 const char *object_name;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002483 u64 offset;
2484 u64 length;
2485
Alex Elder7da22d22013-01-24 16:13:36 -06002486 object_name = rbd_segment_name(rbd_dev, img_offset);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002487 if (!object_name)
2488 goto out_unwind;
Alex Elder7da22d22013-01-24 16:13:36 -06002489 offset = rbd_segment_offset(rbd_dev, img_offset);
2490 length = rbd_segment_length(rbd_dev, img_offset, resid);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002491 obj_request = rbd_obj_request_create(object_name,
Alex Elderf1a47392013-04-19 15:34:50 -05002492 offset, length, type);
Alex Elder78c2a442013-05-01 12:43:04 -05002493 /* object request has its own copy of the object name */
2494 rbd_segment_name_free(object_name);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002495 if (!obj_request)
2496 goto out_unwind;
Ilya Dryomov62054da2014-03-04 11:57:17 +02002497
Josh Durgin03507db2013-08-27 14:45:46 -07002498 /*
2499 * set obj_request->img_request before creating the
2500 * osd_request so that it gets the right snapc
2501 */
2502 rbd_img_obj_request_add(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002503
Alex Elderf1a47392013-04-19 15:34:50 -05002504 if (type == OBJ_REQUEST_BIO) {
2505 unsigned int clone_size;
2506
2507 rbd_assert(length <= (u64)UINT_MAX);
2508 clone_size = (unsigned int)length;
2509 obj_request->bio_list =
2510 bio_chain_clone_range(&bio_list,
2511 &bio_offset,
2512 clone_size,
David Disseldorp2224d872016-04-05 11:13:39 +02002513 GFP_NOIO);
Alex Elderf1a47392013-04-19 15:34:50 -05002514 if (!obj_request->bio_list)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002515 goto out_unwind;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002516 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002517 unsigned int page_count;
2518
2519 obj_request->pages = pages;
2520 page_count = (u32)calc_pages_for(offset, length);
2521 obj_request->page_count = page_count;
2522 if ((offset + length) & ~PAGE_MASK)
2523 page_count--; /* more on last page */
2524 pages += page_count;
2525 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06002526
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002527 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2528 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2529 obj_request);
Alex Elder2fa12322013-04-05 01:27:12 -05002530 if (!osd_req)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002531 goto out_unwind;
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002532
Alex Elder2fa12322013-04-05 01:27:12 -05002533 obj_request->osd_req = osd_req;
Alex Elder21692382013-04-05 01:27:12 -05002534 obj_request->callback = rbd_img_obj_callback;
Alex Elder7da22d22013-01-24 16:13:36 -06002535 obj_request->img_offset = img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002536
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002537 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2538
2539 rbd_img_request_get(img_request);
2540
Alex Elder7da22d22013-01-24 16:13:36 -06002541 img_offset += length;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002542 resid -= length;
2543 }
2544
2545 return 0;
2546
Alex Elderbf0d5f502012-11-22 00:00:08 -06002547out_unwind:
2548 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
Ilya Dryomov42dd0372014-03-04 11:57:17 +02002549 rbd_img_obj_request_del(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002550
2551 return -ENOMEM;
2552}
2553
Alex Elder3d7efd12013-04-19 15:34:50 -05002554static void
Ilya Dryomov27617132015-07-16 17:36:11 +03002555rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
Alex Elder0eefd472013-04-19 15:34:50 -05002556{
2557 struct rbd_img_request *img_request;
2558 struct rbd_device *rbd_dev;
Alex Elderebda6402013-05-10 16:29:22 -05002559 struct page **pages;
Alex Elder0eefd472013-04-19 15:34:50 -05002560 u32 page_count;
2561
Ilya Dryomov27617132015-07-16 17:36:11 +03002562 dout("%s: obj %p\n", __func__, obj_request);
2563
Josh Durgind3246fb2014-04-07 16:49:21 -07002564 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2565 obj_request->type == OBJ_REQUEST_NODATA);
Alex Elder0eefd472013-04-19 15:34:50 -05002566 rbd_assert(obj_request_img_data_test(obj_request));
2567 img_request = obj_request->img_request;
2568 rbd_assert(img_request);
2569
2570 rbd_dev = img_request->rbd_dev;
2571 rbd_assert(rbd_dev);
Alex Elder0eefd472013-04-19 15:34:50 -05002572
Alex Elderebda6402013-05-10 16:29:22 -05002573 pages = obj_request->copyup_pages;
2574 rbd_assert(pages != NULL);
Alex Elder0eefd472013-04-19 15:34:50 -05002575 obj_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002576 page_count = obj_request->copyup_page_count;
2577 rbd_assert(page_count);
2578 obj_request->copyup_page_count = 0;
2579 ceph_release_page_vector(pages, page_count);
Alex Elder0eefd472013-04-19 15:34:50 -05002580
2581 /*
2582 * We want the transfer count to reflect the size of the
2583 * original write request. There is no such thing as a
2584 * successful short write, so if the request was successful
2585 * we can just set it to the originally-requested length.
2586 */
2587 if (!obj_request->result)
2588 obj_request->xferred = obj_request->length;
2589
Ilya Dryomov27617132015-07-16 17:36:11 +03002590 obj_request_done_set(obj_request);
Alex Elder0eefd472013-04-19 15:34:50 -05002591}
2592
2593static void
Alex Elder3d7efd12013-04-19 15:34:50 -05002594rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2595{
2596 struct rbd_obj_request *orig_request;
Alex Elder0eefd472013-04-19 15:34:50 -05002597 struct ceph_osd_request *osd_req;
2598 struct ceph_osd_client *osdc;
2599 struct rbd_device *rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002600 struct page **pages;
Josh Durgind3246fb2014-04-07 16:49:21 -07002601 enum obj_operation_type op_type;
Alex Elderebda6402013-05-10 16:29:22 -05002602 u32 page_count;
Alex Elderbbea1c12013-05-06 17:40:33 -05002603 int img_result;
Alex Elderebda6402013-05-10 16:29:22 -05002604 u64 parent_length;
Alex Elder3d7efd12013-04-19 15:34:50 -05002605
2606 rbd_assert(img_request_child_test(img_request));
2607
2608 /* First get what we need from the image request */
2609
2610 pages = img_request->copyup_pages;
2611 rbd_assert(pages != NULL);
2612 img_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002613 page_count = img_request->copyup_page_count;
2614 rbd_assert(page_count);
2615 img_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002616
2617 orig_request = img_request->obj_request;
2618 rbd_assert(orig_request != NULL);
Alex Elderb91f09f2013-05-10 16:29:22 -05002619 rbd_assert(obj_request_type_valid(orig_request->type));
Alex Elderbbea1c12013-05-06 17:40:33 -05002620 img_result = img_request->result;
Alex Elderebda6402013-05-10 16:29:22 -05002621 parent_length = img_request->length;
2622 rbd_assert(parent_length == img_request->xferred);
Alex Elder3d7efd12013-04-19 15:34:50 -05002623 rbd_img_request_put(img_request);
2624
Alex Elder91c6feb2013-05-06 17:40:32 -05002625 rbd_assert(orig_request->img_request);
2626 rbd_dev = orig_request->img_request->rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002627 rbd_assert(rbd_dev);
Alex Elder3d7efd12013-04-19 15:34:50 -05002628
Alex Elderbbea1c12013-05-06 17:40:33 -05002629 /*
2630 * If the overlap has become 0 (most likely because the
2631 * image has been flattened) we need to free the pages
2632 * and re-submit the original write request.
2633 */
2634 if (!rbd_dev->parent_overlap) {
2635 struct ceph_osd_client *osdc;
2636
2637 ceph_release_page_vector(pages, page_count);
2638 osdc = &rbd_dev->rbd_client->client->osdc;
2639 img_result = rbd_obj_request_submit(osdc, orig_request);
2640 if (!img_result)
2641 return;
2642 }
2643
2644 if (img_result)
Alex Elder0eefd472013-04-19 15:34:50 -05002645 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002646
Alex Elder8785b1d2013-05-09 10:08:49 -05002647 /*
2648 * The original osd request is of no use to use any more.
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002649 * We need a new one that can hold the three ops in a copyup
Alex Elder8785b1d2013-05-09 10:08:49 -05002650 * request. Allocate the new copyup osd request for the
2651 * original request, and release the old one.
2652 */
Alex Elderbbea1c12013-05-06 17:40:33 -05002653 img_result = -ENOMEM;
Alex Elder0eefd472013-04-19 15:34:50 -05002654 osd_req = rbd_osd_req_create_copyup(orig_request);
2655 if (!osd_req)
2656 goto out_err;
Alex Elder8785b1d2013-05-09 10:08:49 -05002657 rbd_osd_req_destroy(orig_request->osd_req);
Alex Elder0eefd472013-04-19 15:34:50 -05002658 orig_request->osd_req = osd_req;
2659 orig_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002660 orig_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002661
Alex Elder0eefd472013-04-19 15:34:50 -05002662 /* Initialize the copyup op */
2663
2664 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
Alex Elderebda6402013-05-10 16:29:22 -05002665 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
Alex Elder0eefd472013-04-19 15:34:50 -05002666 false, false);
2667
Josh Durgind3246fb2014-04-07 16:49:21 -07002668 /* Add the other op(s) */
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002669
Josh Durgind3246fb2014-04-07 16:49:21 -07002670 op_type = rbd_img_request_op_type(orig_request->img_request);
2671 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
Alex Elder0eefd472013-04-19 15:34:50 -05002672
2673 /* All set, send it off. */
2674
Alex Elder0eefd472013-04-19 15:34:50 -05002675 osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elderbbea1c12013-05-06 17:40:33 -05002676 img_result = rbd_obj_request_submit(osdc, orig_request);
2677 if (!img_result)
Alex Elder0eefd472013-04-19 15:34:50 -05002678 return;
2679out_err:
2680 /* Record the error code and complete the request */
2681
Alex Elderbbea1c12013-05-06 17:40:33 -05002682 orig_request->result = img_result;
Alex Elder0eefd472013-04-19 15:34:50 -05002683 orig_request->xferred = 0;
2684 obj_request_done_set(orig_request);
2685 rbd_obj_request_complete(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002686}
2687
2688/*
2689 * Read from the parent image the range of data that covers the
2690 * entire target of the given object request. This is used for
2691 * satisfying a layered image write request when the target of an
2692 * object request from the image request does not exist.
2693 *
2694 * A page array big enough to hold the returned data is allocated
2695 * and supplied to rbd_img_request_fill() as the "data descriptor."
2696 * When the read completes, this page array will be transferred to
2697 * the original object request for the copyup operation.
2698 *
2699 * If an error occurs, record it as the result of the original
2700 * object request and mark it done so it gets completed.
2701 */
2702static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2703{
2704 struct rbd_img_request *img_request = NULL;
2705 struct rbd_img_request *parent_request = NULL;
2706 struct rbd_device *rbd_dev;
2707 u64 img_offset;
2708 u64 length;
2709 struct page **pages = NULL;
2710 u32 page_count;
2711 int result;
2712
2713 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderb91f09f2013-05-10 16:29:22 -05002714 rbd_assert(obj_request_type_valid(obj_request->type));
Alex Elder3d7efd12013-04-19 15:34:50 -05002715
2716 img_request = obj_request->img_request;
2717 rbd_assert(img_request != NULL);
2718 rbd_dev = img_request->rbd_dev;
2719 rbd_assert(rbd_dev->parent != NULL);
2720
2721 /*
2722 * Determine the byte range covered by the object in the
2723 * child image to which the original request was to be sent.
2724 */
2725 img_offset = obj_request->img_offset - obj_request->offset;
2726 length = (u64)1 << rbd_dev->header.obj_order;
2727
2728 /*
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002729 * There is no defined parent data beyond the parent
2730 * overlap, so limit what we read at that boundary if
2731 * necessary.
2732 */
2733 if (img_offset + length > rbd_dev->parent_overlap) {
2734 rbd_assert(img_offset < rbd_dev->parent_overlap);
2735 length = rbd_dev->parent_overlap - img_offset;
2736 }
2737
2738 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002739 * Allocate a page array big enough to receive the data read
2740 * from the parent.
2741 */
2742 page_count = (u32)calc_pages_for(0, length);
2743 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2744 if (IS_ERR(pages)) {
2745 result = PTR_ERR(pages);
2746 pages = NULL;
2747 goto out_err;
2748 }
2749
2750 result = -ENOMEM;
Alex Eldere93f3152013-05-08 22:50:04 -05002751 parent_request = rbd_parent_request_create(obj_request,
2752 img_offset, length);
Alex Elder3d7efd12013-04-19 15:34:50 -05002753 if (!parent_request)
2754 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002755
2756 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2757 if (result)
2758 goto out_err;
2759 parent_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002760 parent_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002761
2762 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2763 result = rbd_img_request_submit(parent_request);
2764 if (!result)
2765 return 0;
2766
2767 parent_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002768 parent_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002769 parent_request->obj_request = NULL;
2770 rbd_obj_request_put(obj_request);
2771out_err:
2772 if (pages)
2773 ceph_release_page_vector(pages, page_count);
2774 if (parent_request)
2775 rbd_img_request_put(parent_request);
2776 obj_request->result = result;
2777 obj_request->xferred = 0;
2778 obj_request_done_set(obj_request);
2779
2780 return result;
2781}
2782
Alex Elderc5b5ef62013-02-11 12:33:24 -06002783static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2784{
Alex Elderc5b5ef62013-02-11 12:33:24 -06002785 struct rbd_obj_request *orig_request;
Alex Elder638f5ab2013-05-06 17:40:33 -05002786 struct rbd_device *rbd_dev;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002787 int result;
2788
2789 rbd_assert(!obj_request_img_data_test(obj_request));
2790
2791 /*
2792 * All we need from the object request is the original
2793 * request and the result of the STAT op. Grab those, then
2794 * we're done with the request.
2795 */
2796 orig_request = obj_request->obj_request;
2797 obj_request->obj_request = NULL;
Alex Elder912c3172013-05-13 20:35:38 -05002798 rbd_obj_request_put(orig_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002799 rbd_assert(orig_request);
2800 rbd_assert(orig_request->img_request);
2801
2802 result = obj_request->result;
2803 obj_request->result = 0;
2804
2805 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2806 obj_request, orig_request, result,
2807 obj_request->xferred, obj_request->length);
2808 rbd_obj_request_put(obj_request);
2809
Alex Elder638f5ab2013-05-06 17:40:33 -05002810 /*
2811 * If the overlap has become 0 (most likely because the
2812 * image has been flattened) we need to free the pages
2813 * and re-submit the original write request.
2814 */
2815 rbd_dev = orig_request->img_request->rbd_dev;
2816 if (!rbd_dev->parent_overlap) {
2817 struct ceph_osd_client *osdc;
2818
Alex Elder638f5ab2013-05-06 17:40:33 -05002819 osdc = &rbd_dev->rbd_client->client->osdc;
2820 result = rbd_obj_request_submit(osdc, orig_request);
2821 if (!result)
2822 return;
2823 }
Alex Elderc5b5ef62013-02-11 12:33:24 -06002824
2825 /*
2826 * Our only purpose here is to determine whether the object
2827 * exists, and we don't want to treat the non-existence as
2828 * an error. If something else comes back, transfer the
2829 * error to the original request and complete it now.
2830 */
2831 if (!result) {
2832 obj_request_existence_set(orig_request, true);
2833 } else if (result == -ENOENT) {
2834 obj_request_existence_set(orig_request, false);
2835 } else if (result) {
2836 orig_request->result = result;
Alex Elder3d7efd12013-04-19 15:34:50 -05002837 goto out;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002838 }
2839
2840 /*
2841 * Resubmit the original request now that we have recorded
2842 * whether the target object exists.
2843 */
Alex Elderb454e362013-04-19 15:34:50 -05002844 orig_request->result = rbd_img_obj_request_submit(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002845out:
Alex Elderc5b5ef62013-02-11 12:33:24 -06002846 if (orig_request->result)
2847 rbd_obj_request_complete(orig_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002848}
2849
2850static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2851{
2852 struct rbd_obj_request *stat_request;
2853 struct rbd_device *rbd_dev;
2854 struct ceph_osd_client *osdc;
2855 struct page **pages = NULL;
2856 u32 page_count;
2857 size_t size;
2858 int ret;
2859
2860 /*
2861 * The response data for a STAT call consists of:
2862 * le64 length;
2863 * struct {
2864 * le32 tv_sec;
2865 * le32 tv_nsec;
2866 * } mtime;
2867 */
2868 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2869 page_count = (u32)calc_pages_for(0, size);
2870 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2871 if (IS_ERR(pages))
2872 return PTR_ERR(pages);
2873
2874 ret = -ENOMEM;
2875 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2876 OBJ_REQUEST_PAGES);
2877 if (!stat_request)
2878 goto out;
2879
2880 rbd_obj_request_get(obj_request);
2881 stat_request->obj_request = obj_request;
2882 stat_request->pages = pages;
2883 stat_request->page_count = page_count;
2884
2885 rbd_assert(obj_request->img_request);
2886 rbd_dev = obj_request->img_request->rbd_dev;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002887 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02002888 stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002889 if (!stat_request->osd_req)
2890 goto out;
2891 stat_request->callback = rbd_img_obj_exists_callback;
2892
Yan, Zheng144cba12015-04-27 11:09:54 +08002893 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002894 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2895 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05002896 rbd_osd_req_format_read(stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002897
2898 osdc = &rbd_dev->rbd_client->client->osdc;
2899 ret = rbd_obj_request_submit(osdc, stat_request);
2900out:
2901 if (ret)
2902 rbd_obj_request_put(obj_request);
2903
2904 return ret;
2905}
2906
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002907static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
Alex Elderb454e362013-04-19 15:34:50 -05002908{
2909 struct rbd_img_request *img_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002910 struct rbd_device *rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002911
2912 rbd_assert(obj_request_img_data_test(obj_request));
2913
2914 img_request = obj_request->img_request;
2915 rbd_assert(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002916 rbd_dev = img_request->rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002917
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002918 /* Reads */
Josh Durgin1c220882014-04-04 17:49:12 -07002919 if (!img_request_write_test(img_request) &&
2920 !img_request_discard_test(img_request))
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002921 return true;
Alex Elderb454e362013-04-19 15:34:50 -05002922
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002923 /* Non-layered writes */
2924 if (!img_request_layered_test(img_request))
2925 return true;
2926
2927 /*
2928 * Layered writes outside of the parent overlap range don't
2929 * share any data with the parent.
2930 */
2931 if (!obj_request_overlaps_parent(obj_request))
2932 return true;
2933
2934 /*
Guangliang Zhaoc622d222014-04-01 22:22:15 +08002935 * Entire-object layered writes - we will overwrite whatever
2936 * parent data there is anyway.
2937 */
2938 if (!obj_request->offset &&
2939 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2940 return true;
2941
2942 /*
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002943 * If the object is known to already exist, its parent data has
2944 * already been copied.
2945 */
2946 if (obj_request_known_test(obj_request) &&
2947 obj_request_exists_test(obj_request))
2948 return true;
2949
2950 return false;
2951}
2952
2953static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2954{
2955 if (img_obj_request_simple(obj_request)) {
Alex Elderb454e362013-04-19 15:34:50 -05002956 struct rbd_device *rbd_dev;
2957 struct ceph_osd_client *osdc;
2958
2959 rbd_dev = obj_request->img_request->rbd_dev;
2960 osdc = &rbd_dev->rbd_client->client->osdc;
2961
2962 return rbd_obj_request_submit(osdc, obj_request);
2963 }
2964
2965 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002966 * It's a layered write. The target object might exist but
2967 * we may not know that yet. If we know it doesn't exist,
2968 * start by reading the data for the full target object from
2969 * the parent so we can use it for a copyup to the target.
Alex Elderb454e362013-04-19 15:34:50 -05002970 */
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002971 if (obj_request_known_test(obj_request))
Alex Elder3d7efd12013-04-19 15:34:50 -05002972 return rbd_img_obj_parent_read_full(obj_request);
2973
2974 /* We don't know whether the target exists. Go find out. */
Alex Elderb454e362013-04-19 15:34:50 -05002975
2976 return rbd_img_obj_exists_submit(obj_request);
2977}
2978
Alex Elderbf0d5f502012-11-22 00:00:08 -06002979static int rbd_img_request_submit(struct rbd_img_request *img_request)
2980{
Alex Elderbf0d5f502012-11-22 00:00:08 -06002981 struct rbd_obj_request *obj_request;
Alex Elder46faeed2013-04-10 17:47:46 -05002982 struct rbd_obj_request *next_obj_request;
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002983 int ret = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002984
Alex Elder37206ee2013-02-20 17:32:08 -06002985 dout("%s: img %p\n", __func__, img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002986
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002987 rbd_img_request_get(img_request);
2988 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
Alex Elderb454e362013-04-19 15:34:50 -05002989 ret = rbd_img_obj_request_submit(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002990 if (ret)
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002991 goto out_put_ireq;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002992 }
2993
Ilya Dryomov663ae2c2016-05-16 13:18:57 +02002994out_put_ireq:
2995 rbd_img_request_put(img_request);
2996 return ret;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002997}
2998
Alex Elder8b3e1a52013-01-24 16:13:36 -06002999static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
3000{
3001 struct rbd_obj_request *obj_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003002 struct rbd_device *rbd_dev;
3003 u64 obj_end;
Alex Elder02c74fb2013-05-06 17:40:33 -05003004 u64 img_xferred;
3005 int img_result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06003006
3007 rbd_assert(img_request_child_test(img_request));
3008
Alex Elder02c74fb2013-05-06 17:40:33 -05003009 /* First get what we need from the image request and release it */
3010
Alex Elder8b3e1a52013-01-24 16:13:36 -06003011 obj_request = img_request->obj_request;
Alex Elder02c74fb2013-05-06 17:40:33 -05003012 img_xferred = img_request->xferred;
3013 img_result = img_request->result;
3014 rbd_img_request_put(img_request);
3015
3016 /*
3017 * If the overlap has become 0 (most likely because the
3018 * image has been flattened) we need to re-submit the
3019 * original request.
3020 */
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003021 rbd_assert(obj_request);
3022 rbd_assert(obj_request->img_request);
Alex Elder02c74fb2013-05-06 17:40:33 -05003023 rbd_dev = obj_request->img_request->rbd_dev;
3024 if (!rbd_dev->parent_overlap) {
3025 struct ceph_osd_client *osdc;
Alex Elder8b3e1a52013-01-24 16:13:36 -06003026
Alex Elder02c74fb2013-05-06 17:40:33 -05003027 osdc = &rbd_dev->rbd_client->client->osdc;
3028 img_result = rbd_obj_request_submit(osdc, obj_request);
3029 if (!img_result)
3030 return;
3031 }
3032
3033 obj_request->result = img_result;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003034 if (obj_request->result)
3035 goto out;
3036
3037 /*
3038 * We need to zero anything beyond the parent overlap
3039 * boundary. Since rbd_img_obj_request_read_callback()
3040 * will zero anything beyond the end of a short read, an
3041 * easy way to do this is to pretend the data from the
3042 * parent came up short--ending at the overlap boundary.
3043 */
3044 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3045 obj_end = obj_request->img_offset + obj_request->length;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003046 if (obj_end > rbd_dev->parent_overlap) {
3047 u64 xferred = 0;
3048
3049 if (obj_request->img_offset < rbd_dev->parent_overlap)
3050 xferred = rbd_dev->parent_overlap -
3051 obj_request->img_offset;
3052
Alex Elder02c74fb2013-05-06 17:40:33 -05003053 obj_request->xferred = min(img_xferred, xferred);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003054 } else {
Alex Elder02c74fb2013-05-06 17:40:33 -05003055 obj_request->xferred = img_xferred;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003056 }
3057out:
Alex Elder8b3e1a52013-01-24 16:13:36 -06003058 rbd_img_obj_request_read_callback(obj_request);
3059 rbd_obj_request_complete(obj_request);
3060}
3061
3062static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3063{
Alex Elder8b3e1a52013-01-24 16:13:36 -06003064 struct rbd_img_request *img_request;
3065 int result;
3066
3067 rbd_assert(obj_request_img_data_test(obj_request));
3068 rbd_assert(obj_request->img_request != NULL);
3069 rbd_assert(obj_request->result == (s32) -ENOENT);
Alex Elder5b2ab722013-05-06 17:40:33 -05003070 rbd_assert(obj_request_type_valid(obj_request->type));
Alex Elder8b3e1a52013-01-24 16:13:36 -06003071
Alex Elder8b3e1a52013-01-24 16:13:36 -06003072 /* rbd_read_finish(obj_request, obj_request->length); */
Alex Eldere93f3152013-05-08 22:50:04 -05003073 img_request = rbd_parent_request_create(obj_request,
Alex Elder8b3e1a52013-01-24 16:13:36 -06003074 obj_request->img_offset,
Alex Eldere93f3152013-05-08 22:50:04 -05003075 obj_request->length);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003076 result = -ENOMEM;
3077 if (!img_request)
3078 goto out_err;
3079
Alex Elder5b2ab722013-05-06 17:40:33 -05003080 if (obj_request->type == OBJ_REQUEST_BIO)
3081 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3082 obj_request->bio_list);
3083 else
3084 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3085 obj_request->pages);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003086 if (result)
3087 goto out_err;
3088
3089 img_request->callback = rbd_img_parent_read_callback;
3090 result = rbd_img_request_submit(img_request);
3091 if (result)
3092 goto out_err;
3093
3094 return;
3095out_err:
3096 if (img_request)
3097 rbd_img_request_put(img_request);
3098 obj_request->result = result;
3099 obj_request->xferred = 0;
3100 obj_request_done_set(obj_request);
3101}
3102
Josh Durgin20e0af62013-08-29 17:36:03 -07003103static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
Alex Elderb8d70032012-11-30 17:53:04 -06003104{
3105 struct rbd_obj_request *obj_request;
Alex Elder21692382013-04-05 01:27:12 -05003106 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elderb8d70032012-11-30 17:53:04 -06003107 int ret;
3108
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02003109 obj_request = rbd_obj_request_create(rbd_dev->header_oid.name, 0, 0,
Alex Elderb8d70032012-11-30 17:53:04 -06003110 OBJ_REQUEST_NODATA);
3111 if (!obj_request)
3112 return -ENOMEM;
3113
3114 ret = -ENOMEM;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003115 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003116 obj_request);
Alex Elderb8d70032012-11-30 17:53:04 -06003117 if (!obj_request->osd_req)
3118 goto out;
3119
Alex Elderc99d2d42013-04-05 01:27:11 -05003120 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003121 notify_id, 0, 0);
Alex Elder9d4df012013-04-19 15:34:50 -05003122 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003123
Alex Elderb8d70032012-11-30 17:53:04 -06003124 ret = rbd_obj_request_submit(osdc, obj_request);
Alex Eldercf81b602013-01-17 12:18:46 -06003125 if (ret)
Josh Durgin20e0af62013-08-29 17:36:03 -07003126 goto out;
3127 ret = rbd_obj_request_wait(obj_request);
3128out:
3129 rbd_obj_request_put(obj_request);
Alex Elderb8d70032012-11-30 17:53:04 -06003130
3131 return ret;
3132}
3133
3134static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3135{
3136 struct rbd_device *rbd_dev = (struct rbd_device *)data;
Alex Eldere627db02013-05-06 07:40:30 -05003137 int ret;
Alex Elderb8d70032012-11-30 17:53:04 -06003138
Alex Elder37206ee2013-02-20 17:32:08 -06003139 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02003140 rbd_dev->header_oid.name, (unsigned long long)notify_id,
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003141 (unsigned int)opcode);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003142
3143 /*
3144 * Until adequate refresh error handling is in place, there is
3145 * not much we can do here, except warn.
3146 *
3147 * See http://tracker.ceph.com/issues/5040
3148 */
Alex Eldere627db02013-05-06 07:40:30 -05003149 ret = rbd_dev_refresh(rbd_dev);
3150 if (ret)
Ilya Dryomov9584d502014-07-11 12:11:20 +04003151 rbd_warn(rbd_dev, "refresh failed: %d", ret);
Alex Elderb8d70032012-11-30 17:53:04 -06003152
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003153 ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
3154 if (ret)
Ilya Dryomov9584d502014-07-11 12:11:20 +04003155 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
Alex Elderb8d70032012-11-30 17:53:04 -06003156}
3157
Alex Elder9969ebc2013-01-18 12:31:10 -06003158/*
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003159 * Send a (un)watch request and wait for the ack. Return a request
3160 * with a ref held on success or error.
3161 */
3162static struct rbd_obj_request *rbd_obj_watch_request_helper(
3163 struct rbd_device *rbd_dev,
3164 bool watch)
3165{
3166 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03003167 struct ceph_options *opts = osdc->client->options;
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003168 struct rbd_obj_request *obj_request;
3169 int ret;
3170
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02003171 obj_request = rbd_obj_request_create(rbd_dev->header_oid.name, 0, 0,
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003172 OBJ_REQUEST_NODATA);
3173 if (!obj_request)
3174 return ERR_PTR(-ENOMEM);
3175
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003176 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003177 obj_request);
3178 if (!obj_request->osd_req) {
3179 ret = -ENOMEM;
3180 goto out;
3181 }
3182
3183 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3184 rbd_dev->watch_event->cookie, 0, watch);
3185 rbd_osd_req_format_write(obj_request);
3186
3187 if (watch)
3188 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3189
3190 ret = rbd_obj_request_submit(osdc, obj_request);
3191 if (ret)
3192 goto out;
3193
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03003194 ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003195 if (ret)
3196 goto out;
3197
3198 ret = obj_request->result;
3199 if (ret) {
3200 if (watch)
3201 rbd_obj_request_end(obj_request);
3202 goto out;
3203 }
3204
3205 return obj_request;
3206
3207out:
3208 rbd_obj_request_put(obj_request);
3209 return ERR_PTR(ret);
3210}
3211
3212/*
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003213 * Initiate a watch request, synchronously.
Alex Elder9969ebc2013-01-18 12:31:10 -06003214 */
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003215static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
Alex Elder9969ebc2013-01-18 12:31:10 -06003216{
3217 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3218 struct rbd_obj_request *obj_request;
Alex Elder9969ebc2013-01-18 12:31:10 -06003219 int ret;
3220
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003221 rbd_assert(!rbd_dev->watch_event);
3222 rbd_assert(!rbd_dev->watch_request);
Alex Elder9969ebc2013-01-18 12:31:10 -06003223
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003224 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3225 &rbd_dev->watch_event);
3226 if (ret < 0)
3227 return ret;
Alex Elder9969ebc2013-01-18 12:31:10 -06003228
Ilya Dryomov76756a52014-06-20 18:29:20 +04003229 obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
3230 if (IS_ERR(obj_request)) {
3231 ceph_osdc_cancel_event(rbd_dev->watch_event);
3232 rbd_dev->watch_event = NULL;
3233 return PTR_ERR(obj_request);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003234 }
Alex Elder9969ebc2013-01-18 12:31:10 -06003235
Alex Elder8eb87562013-01-25 17:08:55 -06003236 /*
3237 * A watch request is set to linger, so the underlying osd
3238 * request won't go away until we unregister it. We retain
3239 * a pointer to the object request during that time (in
Ilya Dryomov76756a52014-06-20 18:29:20 +04003240 * rbd_dev->watch_request), so we'll keep a reference to it.
3241 * We'll drop that reference after we've unregistered it in
3242 * rbd_dev_header_unwatch_sync().
Alex Elder8eb87562013-01-25 17:08:55 -06003243 */
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003244 rbd_dev->watch_request = obj_request;
Alex Elder8eb87562013-01-25 17:08:55 -06003245
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003246 return 0;
Alex Elder9969ebc2013-01-18 12:31:10 -06003247}
3248
Ilya Dryomovc525f032016-04-28 16:07:26 +02003249static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
Ilya Dryomovfca27062013-12-16 18:02:40 +02003250{
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003251 struct rbd_obj_request *obj_request;
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003252
3253 rbd_assert(rbd_dev->watch_event);
3254 rbd_assert(rbd_dev->watch_request);
3255
Ilya Dryomov76756a52014-06-20 18:29:20 +04003256 rbd_obj_request_end(rbd_dev->watch_request);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003257 rbd_obj_request_put(rbd_dev->watch_request);
3258 rbd_dev->watch_request = NULL;
3259
Ilya Dryomov76756a52014-06-20 18:29:20 +04003260 obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
3261 if (!IS_ERR(obj_request))
3262 rbd_obj_request_put(obj_request);
3263 else
3264 rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
3265 PTR_ERR(obj_request));
3266
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003267 ceph_osdc_cancel_event(rbd_dev->watch_event);
3268 rbd_dev->watch_event = NULL;
Ilya Dryomovc525f032016-04-28 16:07:26 +02003269}
3270
3271/*
3272 * Tear down a watch request, synchronously.
3273 */
3274static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3275{
3276 __rbd_dev_header_unwatch_sync(rbd_dev);
Ilya Dryomov811c6682016-04-15 16:22:16 +02003277
3278 dout("%s flushing notifies\n", __func__);
3279 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
Ilya Dryomovfca27062013-12-16 18:02:40 +02003280}
3281
Alex Elder36be9a72013-01-19 00:30:28 -06003282/*
Alex Elderf40eb342013-04-25 15:09:42 -05003283 * Synchronous osd object method call. Returns the number of bytes
3284 * returned in the outbound buffer, or a negative error code.
Alex Elder36be9a72013-01-19 00:30:28 -06003285 */
3286static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3287 const char *object_name,
3288 const char *class_name,
3289 const char *method_name,
Alex Elder41579762013-04-21 12:14:45 -05003290 const void *outbound,
Alex Elder36be9a72013-01-19 00:30:28 -06003291 size_t outbound_size,
Alex Elder41579762013-04-21 12:14:45 -05003292 void *inbound,
Alex Eldere2a58ee2013-04-30 00:44:33 -05003293 size_t inbound_size)
Alex Elder36be9a72013-01-19 00:30:28 -06003294{
Alex Elder21692382013-04-05 01:27:12 -05003295 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder36be9a72013-01-19 00:30:28 -06003296 struct rbd_obj_request *obj_request;
Alex Elder36be9a72013-01-19 00:30:28 -06003297 struct page **pages;
3298 u32 page_count;
3299 int ret;
3300
3301 /*
Alex Elder6010a452013-04-05 01:27:11 -05003302 * Method calls are ultimately read operations. The result
3303 * should placed into the inbound buffer provided. They
3304 * also supply outbound data--parameters for the object
3305 * method. Currently if this is present it will be a
3306 * snapshot id.
Alex Elder36be9a72013-01-19 00:30:28 -06003307 */
Alex Elder57385b52013-04-21 12:14:45 -05003308 page_count = (u32)calc_pages_for(0, inbound_size);
Alex Elder36be9a72013-01-19 00:30:28 -06003309 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3310 if (IS_ERR(pages))
3311 return PTR_ERR(pages);
3312
3313 ret = -ENOMEM;
Alex Elder6010a452013-04-05 01:27:11 -05003314 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
Alex Elder36be9a72013-01-19 00:30:28 -06003315 OBJ_REQUEST_PAGES);
3316 if (!obj_request)
3317 goto out;
3318
3319 obj_request->pages = pages;
3320 obj_request->page_count = page_count;
3321
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003322 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003323 obj_request);
Alex Elder36be9a72013-01-19 00:30:28 -06003324 if (!obj_request->osd_req)
3325 goto out;
3326
Alex Elderc99d2d42013-04-05 01:27:11 -05003327 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
Alex Elder04017e22013-04-05 14:46:02 -05003328 class_name, method_name);
3329 if (outbound_size) {
3330 struct ceph_pagelist *pagelist;
3331
3332 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3333 if (!pagelist)
3334 goto out;
3335
3336 ceph_pagelist_init(pagelist);
3337 ceph_pagelist_append(pagelist, outbound, outbound_size);
3338 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3339 pagelist);
3340 }
Alex Eldera4ce40a2013-04-05 01:27:12 -05003341 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3342 obj_request->pages, inbound_size,
Alex Elder44cd1882013-04-05 01:27:12 -05003343 0, false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05003344 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003345
Alex Elder36be9a72013-01-19 00:30:28 -06003346 ret = rbd_obj_request_submit(osdc, obj_request);
3347 if (ret)
3348 goto out;
3349 ret = rbd_obj_request_wait(obj_request);
3350 if (ret)
3351 goto out;
3352
3353 ret = obj_request->result;
3354 if (ret < 0)
3355 goto out;
Alex Elder57385b52013-04-21 12:14:45 -05003356
3357 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3358 ret = (int)obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06003359 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
Alex Elder36be9a72013-01-19 00:30:28 -06003360out:
3361 if (obj_request)
3362 rbd_obj_request_put(obj_request);
3363 else
3364 ceph_release_page_vector(pages, page_count);
3365
3366 return ret;
3367}
3368
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003369static void rbd_queue_workfn(struct work_struct *work)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003370{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003371 struct request *rq = blk_mq_rq_from_pdu(work);
3372 struct rbd_device *rbd_dev = rq->q->queuedata;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003373 struct rbd_img_request *img_request;
Josh Durgin4e752f02014-04-08 11:12:11 -07003374 struct ceph_snap_context *snapc = NULL;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003375 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3376 u64 length = blk_rq_bytes(rq);
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003377 enum obj_operation_type op_type;
Josh Durgin4e752f02014-04-08 11:12:11 -07003378 u64 mapping_size;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003379 int result;
3380
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003381 if (rq->cmd_type != REQ_TYPE_FS) {
3382 dout("%s: non-fs request type %d\n", __func__,
3383 (int) rq->cmd_type);
3384 result = -EIO;
3385 goto err;
3386 }
3387
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003388 if (rq->cmd_flags & REQ_DISCARD)
3389 op_type = OBJ_OP_DISCARD;
3390 else if (rq->cmd_flags & REQ_WRITE)
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003391 op_type = OBJ_OP_WRITE;
3392 else
3393 op_type = OBJ_OP_READ;
3394
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003395 /* Ignore/skip any zero-length requests */
3396
3397 if (!length) {
3398 dout("%s: zero-length request\n", __func__);
3399 result = 0;
3400 goto err_rq;
3401 }
3402
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003403 /* Only reads are allowed to a read-only device */
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003404
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003405 if (op_type != OBJ_OP_READ) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003406 if (rbd_dev->mapping.read_only) {
3407 result = -EROFS;
3408 goto err_rq;
3409 }
3410 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3411 }
3412
3413 /*
3414 * Quit early if the mapped snapshot no longer exists. It's
3415 * still possible the snapshot will have disappeared by the
3416 * time our request arrives at the osd, but there's no sense in
3417 * sending it if we already know.
3418 */
3419 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3420 dout("request for non-existent snapshot");
3421 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3422 result = -ENXIO;
3423 goto err_rq;
3424 }
3425
3426 if (offset && length > U64_MAX - offset + 1) {
3427 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3428 length);
3429 result = -EINVAL;
3430 goto err_rq; /* Shouldn't happen */
3431 }
3432
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003433 blk_mq_start_request(rq);
3434
Josh Durgin4e752f02014-04-08 11:12:11 -07003435 down_read(&rbd_dev->header_rwsem);
3436 mapping_size = rbd_dev->mapping.size;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003437 if (op_type != OBJ_OP_READ) {
Josh Durgin4e752f02014-04-08 11:12:11 -07003438 snapc = rbd_dev->header.snapc;
3439 ceph_get_snap_context(snapc);
3440 }
3441 up_read(&rbd_dev->header_rwsem);
3442
3443 if (offset + length > mapping_size) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003444 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
Josh Durgin4e752f02014-04-08 11:12:11 -07003445 length, mapping_size);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003446 result = -EIO;
3447 goto err_rq;
3448 }
3449
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003450 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07003451 snapc);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003452 if (!img_request) {
3453 result = -ENOMEM;
3454 goto err_rq;
3455 }
3456 img_request->rq = rq;
Ilya Dryomov70b16db2015-11-27 19:23:24 +01003457 snapc = NULL; /* img_request consumes a ref */
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003458
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003459 if (op_type == OBJ_OP_DISCARD)
3460 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3461 NULL);
3462 else
3463 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3464 rq->bio);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003465 if (result)
3466 goto err_img_request;
3467
3468 result = rbd_img_request_submit(img_request);
3469 if (result)
3470 goto err_img_request;
3471
3472 return;
3473
3474err_img_request:
3475 rbd_img_request_put(img_request);
3476err_rq:
3477 if (result)
3478 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003479 obj_op_name(op_type), length, offset, result);
SF Markus Elfringe96a6502014-11-02 15:20:59 +01003480 ceph_put_snap_context(snapc);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003481err:
3482 blk_mq_end_request(rq, result);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003483}
3484
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003485static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3486 const struct blk_mq_queue_data *bd)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003487{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003488 struct request *rq = bd->rq;
3489 struct work_struct *work = blk_mq_rq_to_pdu(rq);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003490
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003491 queue_work(rbd_wq, work);
3492 return BLK_MQ_RQ_QUEUE_OK;
Alex Elderbf0d5f502012-11-22 00:00:08 -06003493}
3494
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003495static void rbd_free_disk(struct rbd_device *rbd_dev)
3496{
3497 struct gendisk *disk = rbd_dev->disk;
3498
3499 if (!disk)
3500 return;
3501
Alex Eldera0cab922013-04-25 23:15:08 -05003502 rbd_dev->disk = NULL;
3503 if (disk->flags & GENHD_FL_UP) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003504 del_gendisk(disk);
Alex Eldera0cab922013-04-25 23:15:08 -05003505 if (disk->queue)
3506 blk_cleanup_queue(disk->queue);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003507 blk_mq_free_tag_set(&rbd_dev->tag_set);
Alex Eldera0cab922013-04-25 23:15:08 -05003508 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003509 put_disk(disk);
3510}
3511
Alex Elder788e2df2013-01-17 12:25:27 -06003512static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3513 const char *object_name,
Alex Elder7097f8d2013-04-30 00:44:33 -05003514 u64 offset, u64 length, void *buf)
Alex Elder788e2df2013-01-17 12:25:27 -06003515
3516{
Alex Elder21692382013-04-05 01:27:12 -05003517 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder788e2df2013-01-17 12:25:27 -06003518 struct rbd_obj_request *obj_request;
Alex Elder788e2df2013-01-17 12:25:27 -06003519 struct page **pages = NULL;
3520 u32 page_count;
Alex Elder1ceae7e2013-02-06 13:11:38 -06003521 size_t size;
Alex Elder788e2df2013-01-17 12:25:27 -06003522 int ret;
3523
3524 page_count = (u32) calc_pages_for(offset, length);
3525 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3526 if (IS_ERR(pages))
Jan Karaa8d42052014-10-22 09:17:24 +02003527 return PTR_ERR(pages);
Alex Elder788e2df2013-01-17 12:25:27 -06003528
3529 ret = -ENOMEM;
3530 obj_request = rbd_obj_request_create(object_name, offset, length,
Alex Elder36be9a72013-01-19 00:30:28 -06003531 OBJ_REQUEST_PAGES);
Alex Elder788e2df2013-01-17 12:25:27 -06003532 if (!obj_request)
3533 goto out;
3534
3535 obj_request->pages = pages;
3536 obj_request->page_count = page_count;
3537
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003538 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003539 obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06003540 if (!obj_request->osd_req)
3541 goto out;
3542
Alex Elderc99d2d42013-04-05 01:27:11 -05003543 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3544 offset, length, 0, 0);
Alex Elder406e2c92013-04-15 14:50:36 -05003545 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
Alex Eldera4ce40a2013-04-05 01:27:12 -05003546 obj_request->pages,
Alex Elder44cd1882013-04-05 01:27:12 -05003547 obj_request->length,
3548 obj_request->offset & ~PAGE_MASK,
3549 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05003550 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003551
Alex Elder788e2df2013-01-17 12:25:27 -06003552 ret = rbd_obj_request_submit(osdc, obj_request);
3553 if (ret)
3554 goto out;
3555 ret = rbd_obj_request_wait(obj_request);
3556 if (ret)
3557 goto out;
3558
3559 ret = obj_request->result;
3560 if (ret < 0)
3561 goto out;
Alex Elder1ceae7e2013-02-06 13:11:38 -06003562
3563 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3564 size = (size_t) obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06003565 ceph_copy_from_page_vector(pages, buf, 0, size);
Alex Elder7097f8d2013-04-30 00:44:33 -05003566 rbd_assert(size <= (size_t)INT_MAX);
3567 ret = (int)size;
Alex Elder788e2df2013-01-17 12:25:27 -06003568out:
3569 if (obj_request)
3570 rbd_obj_request_put(obj_request);
3571 else
3572 ceph_release_page_vector(pages, page_count);
3573
3574 return ret;
3575}
3576
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003577/*
Alex Elder662518b2013-05-06 09:51:29 -05003578 * Read the complete header for the given rbd device. On successful
3579 * return, the rbd_dev->header field will contain up-to-date
3580 * information about the image.
Alex Elder4156d992012-08-02 11:29:46 -05003581 */
Alex Elder99a41eb2013-05-06 09:51:30 -05003582static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
Alex Elder4156d992012-08-02 11:29:46 -05003583{
3584 struct rbd_image_header_ondisk *ondisk = NULL;
3585 u32 snap_count = 0;
3586 u64 names_size = 0;
3587 u32 want_count;
3588 int ret;
3589
3590 /*
3591 * The complete header will include an array of its 64-bit
3592 * snapshot ids, followed by the names of those snapshots as
3593 * a contiguous block of NUL-terminated strings. Note that
3594 * the number of snapshots could change by the time we read
3595 * it in, in which case we re-read it.
3596 */
3597 do {
3598 size_t size;
3599
3600 kfree(ondisk);
3601
3602 size = sizeof (*ondisk);
3603 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3604 size += names_size;
3605 ondisk = kmalloc(size, GFP_KERNEL);
3606 if (!ondisk)
Alex Elder662518b2013-05-06 09:51:29 -05003607 return -ENOMEM;
Alex Elder4156d992012-08-02 11:29:46 -05003608
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02003609 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elder7097f8d2013-04-30 00:44:33 -05003610 0, size, ondisk);
Alex Elder4156d992012-08-02 11:29:46 -05003611 if (ret < 0)
Alex Elder662518b2013-05-06 09:51:29 -05003612 goto out;
Alex Elderc0cd10db2013-04-26 09:43:47 -05003613 if ((size_t)ret < size) {
Alex Elder4156d992012-08-02 11:29:46 -05003614 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003615 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3616 size, ret);
Alex Elder662518b2013-05-06 09:51:29 -05003617 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05003618 }
3619 if (!rbd_dev_ondisk_valid(ondisk)) {
3620 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003621 rbd_warn(rbd_dev, "invalid header");
Alex Elder662518b2013-05-06 09:51:29 -05003622 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05003623 }
3624
3625 names_size = le64_to_cpu(ondisk->snap_names_len);
3626 want_count = snap_count;
3627 snap_count = le32_to_cpu(ondisk->snap_count);
3628 } while (snap_count != want_count);
3629
Alex Elder662518b2013-05-06 09:51:29 -05003630 ret = rbd_header_from_disk(rbd_dev, ondisk);
3631out:
Alex Elder4156d992012-08-02 11:29:46 -05003632 kfree(ondisk);
3633
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003634 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003635}
3636
Alex Elder15228ed2013-05-01 12:43:03 -05003637/*
3638 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3639 * has disappeared from the (just updated) snapshot context.
3640 */
3641static void rbd_exists_validate(struct rbd_device *rbd_dev)
3642{
3643 u64 snap_id;
3644
3645 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3646 return;
3647
3648 snap_id = rbd_dev->spec->snap_id;
3649 if (snap_id == CEPH_NOSNAP)
3650 return;
3651
3652 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3653 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3654}
3655
Josh Durgin98752012013-08-29 17:26:31 -07003656static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3657{
3658 sector_t size;
Josh Durgin98752012013-08-29 17:26:31 -07003659
3660 /*
Ilya Dryomov811c6682016-04-15 16:22:16 +02003661 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3662 * try to update its size. If REMOVING is set, updating size
3663 * is just useless work since the device can't be opened.
Josh Durgin98752012013-08-29 17:26:31 -07003664 */
Ilya Dryomov811c6682016-04-15 16:22:16 +02003665 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3666 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
Josh Durgin98752012013-08-29 17:26:31 -07003667 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3668 dout("setting size to %llu sectors", (unsigned long long)size);
3669 set_capacity(rbd_dev->disk, size);
3670 revalidate_disk(rbd_dev->disk);
3671 }
3672}
3673
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003674static int rbd_dev_refresh(struct rbd_device *rbd_dev)
Alex Elder1fe5e992012-07-25 09:32:41 -05003675{
Alex Eldere627db02013-05-06 07:40:30 -05003676 u64 mapping_size;
Alex Elder1fe5e992012-07-25 09:32:41 -05003677 int ret;
3678
Alex Eldercfbf6372013-05-31 17:40:45 -05003679 down_write(&rbd_dev->header_rwsem);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05003680 mapping_size = rbd_dev->mapping.size;
Ilya Dryomova720ae02014-07-23 17:11:19 +04003681
3682 ret = rbd_dev_header_info(rbd_dev);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003683 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003684 goto out;
Alex Elder15228ed2013-05-01 12:43:03 -05003685
Ilya Dryomove8f59b52014-07-24 10:42:13 +04003686 /*
3687 * If there is a parent, see if it has disappeared due to the
3688 * mapped image getting flattened.
3689 */
3690 if (rbd_dev->parent) {
3691 ret = rbd_dev_v2_parent_info(rbd_dev);
3692 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003693 goto out;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04003694 }
3695
Ilya Dryomov5ff11082014-07-23 17:11:21 +04003696 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003697 rbd_dev->mapping.size = rbd_dev->header.image_size;
Ilya Dryomov5ff11082014-07-23 17:11:21 +04003698 } else {
3699 /* validate mapped snapshot's EXISTS flag */
3700 rbd_exists_validate(rbd_dev);
3701 }
Alex Elder15228ed2013-05-01 12:43:03 -05003702
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003703out:
Alex Eldercfbf6372013-05-31 17:40:45 -05003704 up_write(&rbd_dev->header_rwsem);
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003705 if (!ret && mapping_size != rbd_dev->mapping.size)
Josh Durgin98752012013-08-29 17:26:31 -07003706 rbd_dev_update_size(rbd_dev);
Alex Elder1fe5e992012-07-25 09:32:41 -05003707
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003708 return ret;
Alex Elder1fe5e992012-07-25 09:32:41 -05003709}
3710
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003711static int rbd_init_request(void *data, struct request *rq,
3712 unsigned int hctx_idx, unsigned int request_idx,
3713 unsigned int numa_node)
3714{
3715 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3716
3717 INIT_WORK(work, rbd_queue_workfn);
3718 return 0;
3719}
3720
3721static struct blk_mq_ops rbd_mq_ops = {
3722 .queue_rq = rbd_queue_rq,
3723 .map_queue = blk_mq_map_queue,
3724 .init_request = rbd_init_request,
3725};
3726
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003727static int rbd_init_disk(struct rbd_device *rbd_dev)
3728{
3729 struct gendisk *disk;
3730 struct request_queue *q;
Alex Elder593a9e72012-02-07 12:03:37 -06003731 u64 segment_size;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003732 int err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003733
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003734 /* create gendisk info */
Ilya Dryomov7e513d42013-12-16 19:26:32 +02003735 disk = alloc_disk(single_major ?
3736 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3737 RBD_MINORS_PER_MAJOR);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003738 if (!disk)
Alex Elder1fcdb8a2012-08-29 17:11:06 -05003739 return -ENOMEM;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003740
Alex Elderf0f8cef2012-01-29 13:57:44 -06003741 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
Alex Elderde71a292012-07-03 16:01:19 -05003742 rbd_dev->dev_id);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003743 disk->major = rbd_dev->major;
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003744 disk->first_minor = rbd_dev->minor;
Ilya Dryomov7e513d42013-12-16 19:26:32 +02003745 if (single_major)
3746 disk->flags |= GENHD_FL_EXT_DEVT;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003747 disk->fops = &rbd_bd_ops;
3748 disk->private_data = rbd_dev;
3749
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003750 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3751 rbd_dev->tag_set.ops = &rbd_mq_ops;
Ilya Dryomovb5584182015-06-23 16:21:19 +03003752 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003753 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
Ilya Dryomovb5584182015-06-23 16:21:19 +03003754 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003755 rbd_dev->tag_set.nr_hw_queues = 1;
3756 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3757
3758 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3759 if (err)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003760 goto out_disk;
Josh Durgin029bcbd2011-07-22 11:35:23 -07003761
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003762 q = blk_mq_init_queue(&rbd_dev->tag_set);
3763 if (IS_ERR(q)) {
3764 err = PTR_ERR(q);
3765 goto out_tag_set;
3766 }
3767
Ilya Dryomovd8a2c892015-03-24 16:15:17 +03003768 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
3769 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
Alex Elder593a9e72012-02-07 12:03:37 -06003770
Josh Durgin029bcbd2011-07-22 11:35:23 -07003771 /* set io sizes to object size */
Alex Elder593a9e72012-02-07 12:03:37 -06003772 segment_size = rbd_obj_bytes(&rbd_dev->header);
3773 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
Ilya Dryomov0d9fde42015-10-07 16:09:35 +02003774 q->limits.max_sectors = queue_max_hw_sectors(q);
Ilya Dryomovd3834fe2015-06-12 19:19:02 +03003775 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
Alex Elder593a9e72012-02-07 12:03:37 -06003776 blk_queue_max_segment_size(q, segment_size);
3777 blk_queue_io_min(q, segment_size);
3778 blk_queue_io_opt(q, segment_size);
Josh Durgin029bcbd2011-07-22 11:35:23 -07003779
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003780 /* enable the discard support */
3781 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3782 q->limits.discard_granularity = segment_size;
3783 q->limits.discard_alignment = segment_size;
Jens Axboe2bb4cd52015-07-14 08:15:12 -06003784 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
Josh Durginb76f8232014-04-07 16:52:03 -07003785 q->limits.discard_zeroes_data = 1;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003786
Ronny Hegewaldbae818e2015-10-15 18:50:46 +00003787 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
3788 q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
3789
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003790 disk->queue = q;
3791
3792 q->queuedata = rbd_dev;
3793
3794 rbd_dev->disk = disk;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003795
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003796 return 0;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003797out_tag_set:
3798 blk_mq_free_tag_set(&rbd_dev->tag_set);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003799out_disk:
3800 put_disk(disk);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003801 return err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003802}
3803
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003804/*
3805 sysfs
3806*/
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003807
Alex Elder593a9e72012-02-07 12:03:37 -06003808static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3809{
3810 return container_of(dev, struct rbd_device, dev);
3811}
3812
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003813static ssize_t rbd_size_show(struct device *dev,
3814 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003815{
Alex Elder593a9e72012-02-07 12:03:37 -06003816 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003817
Alex Elderfc71d832013-04-26 15:44:36 -05003818 return sprintf(buf, "%llu\n",
3819 (unsigned long long)rbd_dev->mapping.size);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003820}
3821
Alex Elder34b13182012-07-13 20:35:12 -05003822/*
3823 * Note this shows the features for whatever's mapped, which is not
3824 * necessarily the base image.
3825 */
3826static ssize_t rbd_features_show(struct device *dev,
3827 struct device_attribute *attr, char *buf)
3828{
3829 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3830
3831 return sprintf(buf, "0x%016llx\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003832 (unsigned long long)rbd_dev->mapping.features);
Alex Elder34b13182012-07-13 20:35:12 -05003833}
3834
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003835static ssize_t rbd_major_show(struct device *dev,
3836 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003837{
Alex Elder593a9e72012-02-07 12:03:37 -06003838 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003839
Alex Elderfc71d832013-04-26 15:44:36 -05003840 if (rbd_dev->major)
3841 return sprintf(buf, "%d\n", rbd_dev->major);
3842
3843 return sprintf(buf, "(none)\n");
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003844}
Alex Elderfc71d832013-04-26 15:44:36 -05003845
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003846static ssize_t rbd_minor_show(struct device *dev,
3847 struct device_attribute *attr, char *buf)
3848{
3849 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3850
3851 return sprintf(buf, "%d\n", rbd_dev->minor);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003852}
3853
3854static ssize_t rbd_client_id_show(struct device *dev,
3855 struct device_attribute *attr, char *buf)
3856{
Alex Elder593a9e72012-02-07 12:03:37 -06003857 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003858
Alex Elder1dbb4392012-01-24 10:08:37 -06003859 return sprintf(buf, "client%lld\n",
3860 ceph_client_id(rbd_dev->rbd_client->client));
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003861}
3862
3863static ssize_t rbd_pool_show(struct device *dev,
3864 struct device_attribute *attr, char *buf)
3865{
Alex Elder593a9e72012-02-07 12:03:37 -06003866 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003867
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003868 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003869}
3870
Alex Elder9bb2f332012-07-12 10:46:35 -05003871static ssize_t rbd_pool_id_show(struct device *dev,
3872 struct device_attribute *attr, char *buf)
3873{
3874 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3875
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003876 return sprintf(buf, "%llu\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003877 (unsigned long long) rbd_dev->spec->pool_id);
Alex Elder9bb2f332012-07-12 10:46:35 -05003878}
3879
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003880static ssize_t rbd_name_show(struct device *dev,
3881 struct device_attribute *attr, char *buf)
3882{
Alex Elder593a9e72012-02-07 12:03:37 -06003883 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003884
Alex Eldera92ffdf2012-10-30 19:40:33 -05003885 if (rbd_dev->spec->image_name)
3886 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3887
3888 return sprintf(buf, "(unknown)\n");
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003889}
3890
Alex Elder589d30e2012-07-10 20:30:11 -05003891static ssize_t rbd_image_id_show(struct device *dev,
3892 struct device_attribute *attr, char *buf)
3893{
3894 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3895
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003896 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05003897}
3898
Alex Elder34b13182012-07-13 20:35:12 -05003899/*
3900 * Shows the name of the currently-mapped snapshot (or
3901 * RBD_SNAP_HEAD_NAME for the base image).
3902 */
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003903static ssize_t rbd_snap_show(struct device *dev,
3904 struct device_attribute *attr,
3905 char *buf)
3906{
Alex Elder593a9e72012-02-07 12:03:37 -06003907 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003908
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003909 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003910}
3911
Alex Elder86b00e02012-10-25 23:34:42 -05003912/*
Ilya Dryomovff961282014-07-22 21:53:07 +04003913 * For a v2 image, shows the chain of parent images, separated by empty
3914 * lines. For v1 images or if there is no parent, shows "(no parent
3915 * image)".
Alex Elder86b00e02012-10-25 23:34:42 -05003916 */
3917static ssize_t rbd_parent_show(struct device *dev,
Ilya Dryomovff961282014-07-22 21:53:07 +04003918 struct device_attribute *attr,
3919 char *buf)
Alex Elder86b00e02012-10-25 23:34:42 -05003920{
3921 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Ilya Dryomovff961282014-07-22 21:53:07 +04003922 ssize_t count = 0;
Alex Elder86b00e02012-10-25 23:34:42 -05003923
Ilya Dryomovff961282014-07-22 21:53:07 +04003924 if (!rbd_dev->parent)
Alex Elder86b00e02012-10-25 23:34:42 -05003925 return sprintf(buf, "(no parent image)\n");
3926
Ilya Dryomovff961282014-07-22 21:53:07 +04003927 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3928 struct rbd_spec *spec = rbd_dev->parent_spec;
Alex Elder86b00e02012-10-25 23:34:42 -05003929
Ilya Dryomovff961282014-07-22 21:53:07 +04003930 count += sprintf(&buf[count], "%s"
3931 "pool_id %llu\npool_name %s\n"
3932 "image_id %s\nimage_name %s\n"
3933 "snap_id %llu\nsnap_name %s\n"
3934 "overlap %llu\n",
3935 !count ? "" : "\n", /* first? */
3936 spec->pool_id, spec->pool_name,
3937 spec->image_id, spec->image_name ?: "(unknown)",
3938 spec->snap_id, spec->snap_name,
3939 rbd_dev->parent_overlap);
3940 }
Alex Elder86b00e02012-10-25 23:34:42 -05003941
Ilya Dryomovff961282014-07-22 21:53:07 +04003942 return count;
Alex Elder86b00e02012-10-25 23:34:42 -05003943}
3944
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003945static ssize_t rbd_image_refresh(struct device *dev,
3946 struct device_attribute *attr,
3947 const char *buf,
3948 size_t size)
3949{
Alex Elder593a9e72012-02-07 12:03:37 -06003950 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Alex Elderb8136232012-07-25 09:32:41 -05003951 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003952
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003953 ret = rbd_dev_refresh(rbd_dev);
Alex Eldere627db02013-05-06 07:40:30 -05003954 if (ret)
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003955 return ret;
Alex Elderb8136232012-07-25 09:32:41 -05003956
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003957 return size;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003958}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003959
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003960static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
Alex Elder34b13182012-07-13 20:35:12 -05003961static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003962static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003963static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003964static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3965static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
Alex Elder9bb2f332012-07-12 10:46:35 -05003966static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003967static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
Alex Elder589d30e2012-07-10 20:30:11 -05003968static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003969static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3970static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
Alex Elder86b00e02012-10-25 23:34:42 -05003971static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003972
3973static struct attribute *rbd_attrs[] = {
3974 &dev_attr_size.attr,
Alex Elder34b13182012-07-13 20:35:12 -05003975 &dev_attr_features.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003976 &dev_attr_major.attr,
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003977 &dev_attr_minor.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003978 &dev_attr_client_id.attr,
3979 &dev_attr_pool.attr,
Alex Elder9bb2f332012-07-12 10:46:35 -05003980 &dev_attr_pool_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003981 &dev_attr_name.attr,
Alex Elder589d30e2012-07-10 20:30:11 -05003982 &dev_attr_image_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003983 &dev_attr_current_snap.attr,
Alex Elder86b00e02012-10-25 23:34:42 -05003984 &dev_attr_parent.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003985 &dev_attr_refresh.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003986 NULL
3987};
3988
3989static struct attribute_group rbd_attr_group = {
3990 .attrs = rbd_attrs,
3991};
3992
3993static const struct attribute_group *rbd_attr_groups[] = {
3994 &rbd_attr_group,
3995 NULL
3996};
3997
Ilya Dryomov6cac4692015-10-16 20:11:25 +02003998static void rbd_dev_release(struct device *dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003999
4000static struct device_type rbd_device_type = {
4001 .name = "rbd",
4002 .groups = rbd_attr_groups,
Ilya Dryomov6cac4692015-10-16 20:11:25 +02004003 .release = rbd_dev_release,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004004};
4005
Alex Elder8b8fb992012-10-26 17:25:24 -05004006static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4007{
4008 kref_get(&spec->kref);
4009
4010 return spec;
4011}
4012
4013static void rbd_spec_free(struct kref *kref);
4014static void rbd_spec_put(struct rbd_spec *spec)
4015{
4016 if (spec)
4017 kref_put(&spec->kref, rbd_spec_free);
4018}
4019
4020static struct rbd_spec *rbd_spec_alloc(void)
4021{
4022 struct rbd_spec *spec;
4023
4024 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4025 if (!spec)
4026 return NULL;
Ilya Dryomov04077592014-07-23 17:11:20 +04004027
4028 spec->pool_id = CEPH_NOPOOL;
4029 spec->snap_id = CEPH_NOSNAP;
Alex Elder8b8fb992012-10-26 17:25:24 -05004030 kref_init(&spec->kref);
4031
Alex Elder8b8fb992012-10-26 17:25:24 -05004032 return spec;
4033}
4034
4035static void rbd_spec_free(struct kref *kref)
4036{
4037 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4038
4039 kfree(spec->pool_name);
4040 kfree(spec->image_id);
4041 kfree(spec->image_name);
4042 kfree(spec->snap_name);
4043 kfree(spec);
4044}
4045
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004046static void rbd_dev_release(struct device *dev)
4047{
4048 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4049 bool need_put = !!rbd_dev->opts;
4050
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004051 ceph_oid_destroy(&rbd_dev->header_oid);
4052
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004053 rbd_put_client(rbd_dev->rbd_client);
4054 rbd_spec_put(rbd_dev->spec);
4055 kfree(rbd_dev->opts);
4056 kfree(rbd_dev);
4057
4058 /*
4059 * This is racy, but way better than putting module outside of
4060 * the release callback. The race window is pretty small, so
4061 * doing something similar to dm (dm-builtin.c) is overkill.
4062 */
4063 if (need_put)
4064 module_put(THIS_MODULE);
4065}
4066
Alex Eldercc344fa2013-02-19 12:25:56 -06004067static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
Ilya Dryomovd1475432015-06-22 13:24:48 +03004068 struct rbd_spec *spec,
4069 struct rbd_options *opts)
Alex Elderc53d5892012-10-25 23:34:42 -05004070{
4071 struct rbd_device *rbd_dev;
4072
4073 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
4074 if (!rbd_dev)
4075 return NULL;
4076
4077 spin_lock_init(&rbd_dev->lock);
Alex Elder6d292902013-01-14 12:43:31 -06004078 rbd_dev->flags = 0;
Alex Eldera2acd002013-05-08 22:50:04 -05004079 atomic_set(&rbd_dev->parent_ref, 0);
Alex Elderc53d5892012-10-25 23:34:42 -05004080 INIT_LIST_HEAD(&rbd_dev->node);
Alex Elderc53d5892012-10-25 23:34:42 -05004081 init_rwsem(&rbd_dev->header_rwsem);
4082
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004083 ceph_oid_init(&rbd_dev->header_oid);
4084
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004085 rbd_dev->dev.bus = &rbd_bus_type;
4086 rbd_dev->dev.type = &rbd_device_type;
4087 rbd_dev->dev.parent = &rbd_root_dev;
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004088 device_initialize(&rbd_dev->dev);
4089
Alex Elderc53d5892012-10-25 23:34:42 -05004090 rbd_dev->rbd_client = rbdc;
Ilya Dryomovd1475432015-06-22 13:24:48 +03004091 rbd_dev->spec = spec;
4092 rbd_dev->opts = opts;
Alex Elderc53d5892012-10-25 23:34:42 -05004093
Alex Elder0903e872012-11-14 12:25:19 -06004094 /* Initialize the layout used for all rbd requests */
4095
4096 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4097 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
4098 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4099 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
4100
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004101 /*
4102 * If this is a mapping rbd_dev (as opposed to a parent one),
4103 * pin our module. We have a ref from do_rbd_add(), so use
4104 * __module_get().
4105 */
4106 if (rbd_dev->opts)
4107 __module_get(THIS_MODULE);
4108
Alex Elderc53d5892012-10-25 23:34:42 -05004109 return rbd_dev;
4110}
4111
4112static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4113{
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004114 if (rbd_dev)
4115 put_device(&rbd_dev->dev);
Alex Elderc53d5892012-10-25 23:34:42 -05004116}
4117
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004118/*
Alex Elder9d475de2012-07-03 16:01:19 -05004119 * Get the size and object order for an image snapshot, or if
4120 * snap_id is CEPH_NOSNAP, gets this information for the base
4121 * image.
4122 */
4123static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4124 u8 *order, u64 *snap_size)
4125{
4126 __le64 snapid = cpu_to_le64(snap_id);
4127 int ret;
4128 struct {
4129 u8 order;
4130 __le64 size;
4131 } __attribute__ ((packed)) size_buf = { 0 };
4132
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004133 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elder9d475de2012-07-03 16:01:19 -05004134 "rbd", "get_size",
Alex Elder41579762013-04-21 12:14:45 -05004135 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004136 &size_buf, sizeof (size_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004137 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder9d475de2012-07-03 16:01:19 -05004138 if (ret < 0)
4139 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004140 if (ret < sizeof (size_buf))
4141 return -ERANGE;
Alex Elder9d475de2012-07-03 16:01:19 -05004142
Josh Durginc3545572013-08-28 17:08:10 -07004143 if (order) {
Alex Elderc86f86e2013-04-25 15:09:41 -05004144 *order = size_buf.order;
Josh Durginc3545572013-08-28 17:08:10 -07004145 dout(" order %u", (unsigned int)*order);
4146 }
Alex Elder9d475de2012-07-03 16:01:19 -05004147 *snap_size = le64_to_cpu(size_buf.size);
4148
Josh Durginc3545572013-08-28 17:08:10 -07004149 dout(" snap_id 0x%016llx snap_size = %llu\n",
4150 (unsigned long long)snap_id,
Alex Elder57385b52013-04-21 12:14:45 -05004151 (unsigned long long)*snap_size);
Alex Elder9d475de2012-07-03 16:01:19 -05004152
4153 return 0;
4154}
4155
4156static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4157{
4158 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4159 &rbd_dev->header.obj_order,
4160 &rbd_dev->header.image_size);
4161}
4162
Alex Elder1e130192012-07-03 16:01:19 -05004163static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4164{
4165 void *reply_buf;
4166 int ret;
4167 void *p;
4168
4169 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4170 if (!reply_buf)
4171 return -ENOMEM;
4172
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004173 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elder41579762013-04-21 12:14:45 -05004174 "rbd", "get_object_prefix", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004175 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06004176 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder1e130192012-07-03 16:01:19 -05004177 if (ret < 0)
4178 goto out;
4179
4180 p = reply_buf;
4181 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
Alex Elder57385b52013-04-21 12:14:45 -05004182 p + ret, NULL, GFP_NOIO);
4183 ret = 0;
Alex Elder1e130192012-07-03 16:01:19 -05004184
4185 if (IS_ERR(rbd_dev->header.object_prefix)) {
4186 ret = PTR_ERR(rbd_dev->header.object_prefix);
4187 rbd_dev->header.object_prefix = NULL;
4188 } else {
4189 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4190 }
Alex Elder1e130192012-07-03 16:01:19 -05004191out:
4192 kfree(reply_buf);
4193
4194 return ret;
4195}
4196
Alex Elderb1b54022012-07-03 16:01:19 -05004197static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4198 u64 *snap_features)
4199{
4200 __le64 snapid = cpu_to_le64(snap_id);
4201 struct {
4202 __le64 features;
4203 __le64 incompat;
Alex Elder41579762013-04-21 12:14:45 -05004204 } __attribute__ ((packed)) features_buf = { 0 };
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004205 u64 unsup;
Alex Elderb1b54022012-07-03 16:01:19 -05004206 int ret;
4207
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004208 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elderb1b54022012-07-03 16:01:19 -05004209 "rbd", "get_features",
Alex Elder41579762013-04-21 12:14:45 -05004210 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004211 &features_buf, sizeof (features_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004212 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderb1b54022012-07-03 16:01:19 -05004213 if (ret < 0)
4214 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004215 if (ret < sizeof (features_buf))
4216 return -ERANGE;
Alex Elderd8891402012-10-09 13:50:17 -07004217
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004218 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4219 if (unsup) {
4220 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4221 unsup);
Alex Elderb8f5c6e2012-11-01 08:39:26 -05004222 return -ENXIO;
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004223 }
Alex Elderd8891402012-10-09 13:50:17 -07004224
Alex Elderb1b54022012-07-03 16:01:19 -05004225 *snap_features = le64_to_cpu(features_buf.features);
4226
4227 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
Alex Elder57385b52013-04-21 12:14:45 -05004228 (unsigned long long)snap_id,
4229 (unsigned long long)*snap_features,
4230 (unsigned long long)le64_to_cpu(features_buf.incompat));
Alex Elderb1b54022012-07-03 16:01:19 -05004231
4232 return 0;
4233}
4234
4235static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4236{
4237 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4238 &rbd_dev->header.features);
4239}
4240
Alex Elder86b00e02012-10-25 23:34:42 -05004241static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4242{
4243 struct rbd_spec *parent_spec;
4244 size_t size;
4245 void *reply_buf = NULL;
4246 __le64 snapid;
4247 void *p;
4248 void *end;
Alex Elder642a2532013-05-06 17:40:33 -05004249 u64 pool_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004250 char *image_id;
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004251 u64 snap_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004252 u64 overlap;
Alex Elder86b00e02012-10-25 23:34:42 -05004253 int ret;
4254
4255 parent_spec = rbd_spec_alloc();
4256 if (!parent_spec)
4257 return -ENOMEM;
4258
4259 size = sizeof (__le64) + /* pool_id */
4260 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4261 sizeof (__le64) + /* snap_id */
4262 sizeof (__le64); /* overlap */
4263 reply_buf = kmalloc(size, GFP_KERNEL);
4264 if (!reply_buf) {
4265 ret = -ENOMEM;
4266 goto out_err;
4267 }
4268
Ilya Dryomov4d9b67c2014-07-24 10:42:13 +04004269 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004270 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elder86b00e02012-10-25 23:34:42 -05004271 "rbd", "get_parent",
Alex Elder41579762013-04-21 12:14:45 -05004272 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004273 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004274 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder86b00e02012-10-25 23:34:42 -05004275 if (ret < 0)
4276 goto out_err;
4277
Alex Elder86b00e02012-10-25 23:34:42 -05004278 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05004279 end = reply_buf + ret;
4280 ret = -ERANGE;
Alex Elder642a2532013-05-06 17:40:33 -05004281 ceph_decode_64_safe(&p, end, pool_id, out_err);
Alex Elder392a9da2013-05-06 17:40:33 -05004282 if (pool_id == CEPH_NOPOOL) {
4283 /*
4284 * Either the parent never existed, or we have
4285 * record of it but the image got flattened so it no
4286 * longer has a parent. When the parent of a
4287 * layered image disappears we immediately set the
4288 * overlap to 0. The effect of this is that all new
4289 * requests will be treated as if the image had no
4290 * parent.
4291 */
4292 if (rbd_dev->parent_overlap) {
4293 rbd_dev->parent_overlap = 0;
Alex Elder392a9da2013-05-06 17:40:33 -05004294 rbd_dev_parent_put(rbd_dev);
4295 pr_info("%s: clone image has been flattened\n",
4296 rbd_dev->disk->disk_name);
4297 }
4298
Alex Elder86b00e02012-10-25 23:34:42 -05004299 goto out; /* No parent? No problem. */
Alex Elder392a9da2013-05-06 17:40:33 -05004300 }
Alex Elder86b00e02012-10-25 23:34:42 -05004301
Alex Elder0903e872012-11-14 12:25:19 -06004302 /* The ceph file layout needs to fit pool id in 32 bits */
4303
4304 ret = -EIO;
Alex Elder642a2532013-05-06 17:40:33 -05004305 if (pool_id > (u64)U32_MAX) {
Ilya Dryomov9584d502014-07-11 12:11:20 +04004306 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
Alex Elder642a2532013-05-06 17:40:33 -05004307 (unsigned long long)pool_id, U32_MAX);
Alex Elder57385b52013-04-21 12:14:45 -05004308 goto out_err;
Alex Elderc0cd10db2013-04-26 09:43:47 -05004309 }
Alex Elder0903e872012-11-14 12:25:19 -06004310
Alex Elder979ed482012-11-01 08:39:26 -05004311 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elder86b00e02012-10-25 23:34:42 -05004312 if (IS_ERR(image_id)) {
4313 ret = PTR_ERR(image_id);
4314 goto out_err;
4315 }
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004316 ceph_decode_64_safe(&p, end, snap_id, out_err);
Alex Elder86b00e02012-10-25 23:34:42 -05004317 ceph_decode_64_safe(&p, end, overlap, out_err);
4318
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004319 /*
4320 * The parent won't change (except when the clone is
4321 * flattened, already handled that). So we only need to
4322 * record the parent spec we have not already done so.
4323 */
4324 if (!rbd_dev->parent_spec) {
4325 parent_spec->pool_id = pool_id;
4326 parent_spec->image_id = image_id;
4327 parent_spec->snap_id = snap_id;
Alex Elder70cf49c2013-05-06 17:40:33 -05004328 rbd_dev->parent_spec = parent_spec;
4329 parent_spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovfbba11b2014-06-27 21:46:33 +04004330 } else {
4331 kfree(image_id);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004332 }
4333
4334 /*
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004335 * We always update the parent overlap. If it's zero we issue
4336 * a warning, as we will proceed as if there was no parent.
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004337 */
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004338 if (!overlap) {
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004339 if (parent_spec) {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004340 /* refresh, careful to warn just once */
4341 if (rbd_dev->parent_overlap)
4342 rbd_warn(rbd_dev,
4343 "clone now standalone (overlap became 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004344 } else {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004345 /* initial probe */
4346 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004347 }
Alex Elder70cf49c2013-05-06 17:40:33 -05004348 }
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004349 rbd_dev->parent_overlap = overlap;
4350
Alex Elder86b00e02012-10-25 23:34:42 -05004351out:
4352 ret = 0;
4353out_err:
4354 kfree(reply_buf);
4355 rbd_spec_put(parent_spec);
4356
4357 return ret;
4358}
4359
Alex Eldercc070d52013-04-21 12:14:45 -05004360static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4361{
4362 struct {
4363 __le64 stripe_unit;
4364 __le64 stripe_count;
4365 } __attribute__ ((packed)) striping_info_buf = { 0 };
4366 size_t size = sizeof (striping_info_buf);
4367 void *p;
4368 u64 obj_size;
4369 u64 stripe_unit;
4370 u64 stripe_count;
4371 int ret;
4372
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004373 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Eldercc070d52013-04-21 12:14:45 -05004374 "rbd", "get_stripe_unit_count", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004375 (char *)&striping_info_buf, size);
Alex Eldercc070d52013-04-21 12:14:45 -05004376 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4377 if (ret < 0)
4378 return ret;
4379 if (ret < size)
4380 return -ERANGE;
4381
4382 /*
4383 * We don't actually support the "fancy striping" feature
4384 * (STRIPINGV2) yet, but if the striping sizes are the
4385 * defaults the behavior is the same as before. So find
4386 * out, and only fail if the image has non-default values.
4387 */
4388 ret = -EINVAL;
4389 obj_size = (u64)1 << rbd_dev->header.obj_order;
4390 p = &striping_info_buf;
4391 stripe_unit = ceph_decode_64(&p);
4392 if (stripe_unit != obj_size) {
4393 rbd_warn(rbd_dev, "unsupported stripe unit "
4394 "(got %llu want %llu)",
4395 stripe_unit, obj_size);
4396 return -EINVAL;
4397 }
4398 stripe_count = ceph_decode_64(&p);
4399 if (stripe_count != 1) {
4400 rbd_warn(rbd_dev, "unsupported stripe count "
4401 "(got %llu want 1)", stripe_count);
4402 return -EINVAL;
4403 }
Alex Elder500d0c02013-04-26 09:43:47 -05004404 rbd_dev->header.stripe_unit = stripe_unit;
4405 rbd_dev->header.stripe_count = stripe_count;
Alex Eldercc070d52013-04-21 12:14:45 -05004406
4407 return 0;
4408}
4409
Alex Elder9e15b772012-10-30 19:40:33 -05004410static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4411{
4412 size_t image_id_size;
4413 char *image_id;
4414 void *p;
4415 void *end;
4416 size_t size;
4417 void *reply_buf = NULL;
4418 size_t len = 0;
4419 char *image_name = NULL;
4420 int ret;
4421
4422 rbd_assert(!rbd_dev->spec->image_name);
4423
Alex Elder69e7a022012-11-01 08:39:26 -05004424 len = strlen(rbd_dev->spec->image_id);
4425 image_id_size = sizeof (__le32) + len;
Alex Elder9e15b772012-10-30 19:40:33 -05004426 image_id = kmalloc(image_id_size, GFP_KERNEL);
4427 if (!image_id)
4428 return NULL;
4429
4430 p = image_id;
Alex Elder41579762013-04-21 12:14:45 -05004431 end = image_id + image_id_size;
Alex Elder57385b52013-04-21 12:14:45 -05004432 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
Alex Elder9e15b772012-10-30 19:40:33 -05004433
4434 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4435 reply_buf = kmalloc(size, GFP_KERNEL);
4436 if (!reply_buf)
4437 goto out;
4438
Alex Elder36be9a72013-01-19 00:30:28 -06004439 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
Alex Elder9e15b772012-10-30 19:40:33 -05004440 "rbd", "dir_get_name",
4441 image_id, image_id_size,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004442 reply_buf, size);
Alex Elder9e15b772012-10-30 19:40:33 -05004443 if (ret < 0)
4444 goto out;
4445 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05004446 end = reply_buf + ret;
4447
Alex Elder9e15b772012-10-30 19:40:33 -05004448 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4449 if (IS_ERR(image_name))
4450 image_name = NULL;
4451 else
4452 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4453out:
4454 kfree(reply_buf);
4455 kfree(image_id);
4456
4457 return image_name;
4458}
4459
Alex Elder2ad3d712013-04-30 00:44:33 -05004460static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4461{
4462 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4463 const char *snap_name;
4464 u32 which = 0;
4465
4466 /* Skip over names until we find the one we are looking for */
4467
4468 snap_name = rbd_dev->header.snap_names;
4469 while (which < snapc->num_snaps) {
4470 if (!strcmp(name, snap_name))
4471 return snapc->snaps[which];
4472 snap_name += strlen(snap_name) + 1;
4473 which++;
4474 }
4475 return CEPH_NOSNAP;
4476}
4477
4478static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4479{
4480 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4481 u32 which;
4482 bool found = false;
4483 u64 snap_id;
4484
4485 for (which = 0; !found && which < snapc->num_snaps; which++) {
4486 const char *snap_name;
4487
4488 snap_id = snapc->snaps[which];
4489 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
Josh Durginefadc982013-08-29 19:16:42 -07004490 if (IS_ERR(snap_name)) {
4491 /* ignore no-longer existing snapshots */
4492 if (PTR_ERR(snap_name) == -ENOENT)
4493 continue;
4494 else
4495 break;
4496 }
Alex Elder2ad3d712013-04-30 00:44:33 -05004497 found = !strcmp(name, snap_name);
4498 kfree(snap_name);
4499 }
4500 return found ? snap_id : CEPH_NOSNAP;
4501}
4502
4503/*
4504 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4505 * no snapshot by that name is found, or if an error occurs.
4506 */
4507static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4508{
4509 if (rbd_dev->image_format == 1)
4510 return rbd_v1_snap_id_by_name(rbd_dev, name);
4511
4512 return rbd_v2_snap_id_by_name(rbd_dev, name);
4513}
4514
Alex Elder9e15b772012-10-30 19:40:33 -05004515/*
Ilya Dryomov04077592014-07-23 17:11:20 +04004516 * An image being mapped will have everything but the snap id.
Alex Elder9e15b772012-10-30 19:40:33 -05004517 */
Ilya Dryomov04077592014-07-23 17:11:20 +04004518static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4519{
4520 struct rbd_spec *spec = rbd_dev->spec;
4521
4522 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4523 rbd_assert(spec->image_id && spec->image_name);
4524 rbd_assert(spec->snap_name);
4525
4526 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4527 u64 snap_id;
4528
4529 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4530 if (snap_id == CEPH_NOSNAP)
4531 return -ENOENT;
4532
4533 spec->snap_id = snap_id;
4534 } else {
4535 spec->snap_id = CEPH_NOSNAP;
4536 }
4537
4538 return 0;
4539}
4540
4541/*
4542 * A parent image will have all ids but none of the names.
4543 *
4544 * All names in an rbd spec are dynamically allocated. It's OK if we
4545 * can't figure out the name for an image id.
4546 */
4547static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
Alex Elder9e15b772012-10-30 19:40:33 -05004548{
Alex Elder2e9f7f12013-04-26 09:43:48 -05004549 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4550 struct rbd_spec *spec = rbd_dev->spec;
4551 const char *pool_name;
4552 const char *image_name;
4553 const char *snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05004554 int ret;
4555
Ilya Dryomov04077592014-07-23 17:11:20 +04004556 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4557 rbd_assert(spec->image_id);
4558 rbd_assert(spec->snap_id != CEPH_NOSNAP);
Alex Elder9e15b772012-10-30 19:40:33 -05004559
Alex Elder2e9f7f12013-04-26 09:43:48 -05004560 /* Get the pool name; we have to make our own copy of this */
Alex Elder9e15b772012-10-30 19:40:33 -05004561
Alex Elder2e9f7f12013-04-26 09:43:48 -05004562 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4563 if (!pool_name) {
4564 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
Alex Elder935dc892012-11-01 10:17:15 -05004565 return -EIO;
4566 }
Alex Elder2e9f7f12013-04-26 09:43:48 -05004567 pool_name = kstrdup(pool_name, GFP_KERNEL);
4568 if (!pool_name)
Alex Elder9e15b772012-10-30 19:40:33 -05004569 return -ENOMEM;
4570
4571 /* Fetch the image name; tolerate failure here */
4572
Alex Elder2e9f7f12013-04-26 09:43:48 -05004573 image_name = rbd_dev_image_name(rbd_dev);
4574 if (!image_name)
Alex Elder06ecc6c2012-11-01 10:17:15 -05004575 rbd_warn(rbd_dev, "unable to get image name");
Alex Elder9e15b772012-10-30 19:40:33 -05004576
Ilya Dryomov04077592014-07-23 17:11:20 +04004577 /* Fetch the snapshot name */
Alex Elder9e15b772012-10-30 19:40:33 -05004578
Alex Elder2e9f7f12013-04-26 09:43:48 -05004579 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
Josh Durginda6a6b62013-09-04 17:57:31 -07004580 if (IS_ERR(snap_name)) {
4581 ret = PTR_ERR(snap_name);
Alex Elder9e15b772012-10-30 19:40:33 -05004582 goto out_err;
Alex Elder2e9f7f12013-04-26 09:43:48 -05004583 }
4584
4585 spec->pool_name = pool_name;
4586 spec->image_name = image_name;
4587 spec->snap_name = snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05004588
4589 return 0;
Ilya Dryomov04077592014-07-23 17:11:20 +04004590
Alex Elder9e15b772012-10-30 19:40:33 -05004591out_err:
Alex Elder2e9f7f12013-04-26 09:43:48 -05004592 kfree(image_name);
4593 kfree(pool_name);
Alex Elder9e15b772012-10-30 19:40:33 -05004594 return ret;
4595}
4596
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004597static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
Alex Elder35d489f2012-07-03 16:01:19 -05004598{
4599 size_t size;
4600 int ret;
4601 void *reply_buf;
4602 void *p;
4603 void *end;
4604 u64 seq;
4605 u32 snap_count;
4606 struct ceph_snap_context *snapc;
4607 u32 i;
4608
4609 /*
4610 * We'll need room for the seq value (maximum snapshot id),
4611 * snapshot count, and array of that many snapshot ids.
4612 * For now we have a fixed upper limit on the number we're
4613 * prepared to receive.
4614 */
4615 size = sizeof (__le64) + sizeof (__le32) +
4616 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4617 reply_buf = kzalloc(size, GFP_KERNEL);
4618 if (!reply_buf)
4619 return -ENOMEM;
4620
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004621 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elder41579762013-04-21 12:14:45 -05004622 "rbd", "get_snapcontext", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004623 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004624 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder35d489f2012-07-03 16:01:19 -05004625 if (ret < 0)
4626 goto out;
4627
Alex Elder35d489f2012-07-03 16:01:19 -05004628 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05004629 end = reply_buf + ret;
4630 ret = -ERANGE;
Alex Elder35d489f2012-07-03 16:01:19 -05004631 ceph_decode_64_safe(&p, end, seq, out);
4632 ceph_decode_32_safe(&p, end, snap_count, out);
4633
4634 /*
4635 * Make sure the reported number of snapshot ids wouldn't go
4636 * beyond the end of our buffer. But before checking that,
4637 * make sure the computed size of the snapshot context we
4638 * allocate is representable in a size_t.
4639 */
4640 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4641 / sizeof (u64)) {
4642 ret = -EINVAL;
4643 goto out;
4644 }
4645 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4646 goto out;
Alex Elder468521c2013-04-26 09:43:47 -05004647 ret = 0;
Alex Elder35d489f2012-07-03 16:01:19 -05004648
Alex Elder812164f82013-04-30 00:44:32 -05004649 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
Alex Elder35d489f2012-07-03 16:01:19 -05004650 if (!snapc) {
4651 ret = -ENOMEM;
4652 goto out;
4653 }
Alex Elder35d489f2012-07-03 16:01:19 -05004654 snapc->seq = seq;
Alex Elder35d489f2012-07-03 16:01:19 -05004655 for (i = 0; i < snap_count; i++)
4656 snapc->snaps[i] = ceph_decode_64(&p);
4657
Alex Elder49ece552013-05-06 08:37:00 -05004658 ceph_put_snap_context(rbd_dev->header.snapc);
Alex Elder35d489f2012-07-03 16:01:19 -05004659 rbd_dev->header.snapc = snapc;
4660
4661 dout(" snap context seq = %llu, snap_count = %u\n",
Alex Elder57385b52013-04-21 12:14:45 -05004662 (unsigned long long)seq, (unsigned int)snap_count);
Alex Elder35d489f2012-07-03 16:01:19 -05004663out:
4664 kfree(reply_buf);
4665
Alex Elder57385b52013-04-21 12:14:45 -05004666 return ret;
Alex Elder35d489f2012-07-03 16:01:19 -05004667}
4668
Alex Elder54cac612013-04-30 00:44:33 -05004669static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4670 u64 snap_id)
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004671{
4672 size_t size;
4673 void *reply_buf;
Alex Elder54cac612013-04-30 00:44:33 -05004674 __le64 snapid;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004675 int ret;
4676 void *p;
4677 void *end;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004678 char *snap_name;
4679
4680 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4681 reply_buf = kmalloc(size, GFP_KERNEL);
4682 if (!reply_buf)
4683 return ERR_PTR(-ENOMEM);
4684
Alex Elder54cac612013-04-30 00:44:33 -05004685 snapid = cpu_to_le64(snap_id);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02004686 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004687 "rbd", "get_snapshot_name",
Alex Elder54cac612013-04-30 00:44:33 -05004688 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004689 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004690 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderf40eb342013-04-25 15:09:42 -05004691 if (ret < 0) {
4692 snap_name = ERR_PTR(ret);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004693 goto out;
Alex Elderf40eb342013-04-25 15:09:42 -05004694 }
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004695
4696 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05004697 end = reply_buf + ret;
Alex Eldere5c35532012-10-25 23:34:41 -05004698 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elderf40eb342013-04-25 15:09:42 -05004699 if (IS_ERR(snap_name))
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004700 goto out;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004701
Alex Elderf40eb342013-04-25 15:09:42 -05004702 dout(" snap_id 0x%016llx snap_name = %s\n",
Alex Elder54cac612013-04-30 00:44:33 -05004703 (unsigned long long)snap_id, snap_name);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004704out:
4705 kfree(reply_buf);
4706
Alex Elderf40eb342013-04-25 15:09:42 -05004707 return snap_name;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004708}
4709
Alex Elder2df3fac2013-05-06 09:51:30 -05004710static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
Alex Elder117973f2012-08-31 17:29:55 -05004711{
Alex Elder2df3fac2013-05-06 09:51:30 -05004712 bool first_time = rbd_dev->header.object_prefix == NULL;
Alex Elder117973f2012-08-31 17:29:55 -05004713 int ret;
Alex Elder117973f2012-08-31 17:29:55 -05004714
Josh Durgin1617e402013-06-12 14:43:10 -07004715 ret = rbd_dev_v2_image_size(rbd_dev);
4716 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05004717 return ret;
Josh Durgin1617e402013-06-12 14:43:10 -07004718
Alex Elder2df3fac2013-05-06 09:51:30 -05004719 if (first_time) {
4720 ret = rbd_dev_v2_header_onetime(rbd_dev);
4721 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05004722 return ret;
Alex Elder2df3fac2013-05-06 09:51:30 -05004723 }
4724
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004725 ret = rbd_dev_v2_snap_context(rbd_dev);
Ilya Dryomovd194cd12015-08-31 18:22:10 +03004726 if (ret && first_time) {
4727 kfree(rbd_dev->header.object_prefix);
4728 rbd_dev->header.object_prefix = NULL;
4729 }
Alex Elder117973f2012-08-31 17:29:55 -05004730
4731 return ret;
4732}
4733
Ilya Dryomova720ae02014-07-23 17:11:19 +04004734static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4735{
4736 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4737
4738 if (rbd_dev->image_format == 1)
4739 return rbd_dev_v1_header_info(rbd_dev);
4740
4741 return rbd_dev_v2_header_info(rbd_dev);
4742}
4743
Alex Elder1ddbe942012-01-29 13:57:44 -06004744/*
Alex Elder499afd52012-02-02 08:13:29 -06004745 * Get a unique rbd identifier for the given new rbd_dev, and add
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004746 * the rbd_dev to the global list.
Alex Elder1ddbe942012-01-29 13:57:44 -06004747 */
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004748static int rbd_dev_id_get(struct rbd_device *rbd_dev)
Alex Elderb7f23c32012-01-29 13:57:43 -06004749{
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004750 int new_dev_id;
4751
Ilya Dryomov9b60e702013-12-13 15:28:57 +02004752 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4753 0, minor_to_rbd_dev_id(1 << MINORBITS),
4754 GFP_KERNEL);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004755 if (new_dev_id < 0)
4756 return new_dev_id;
4757
4758 rbd_dev->dev_id = new_dev_id;
Alex Elder499afd52012-02-02 08:13:29 -06004759
4760 spin_lock(&rbd_dev_list_lock);
4761 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4762 spin_unlock(&rbd_dev_list_lock);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004763
Ilya Dryomov70eebd22013-12-13 15:28:56 +02004764 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004765
4766 return 0;
Alex Elder1ddbe942012-01-29 13:57:44 -06004767}
Alex Elderb7f23c32012-01-29 13:57:43 -06004768
Alex Elder1ddbe942012-01-29 13:57:44 -06004769/*
Alex Elder499afd52012-02-02 08:13:29 -06004770 * Remove an rbd_dev from the global list, and record that its
4771 * identifier is no longer in use.
Alex Elder1ddbe942012-01-29 13:57:44 -06004772 */
Alex Eldere2839302012-08-29 17:11:06 -05004773static void rbd_dev_id_put(struct rbd_device *rbd_dev)
Alex Elder1ddbe942012-01-29 13:57:44 -06004774{
Alex Elder499afd52012-02-02 08:13:29 -06004775 spin_lock(&rbd_dev_list_lock);
4776 list_del_init(&rbd_dev->node);
4777 spin_unlock(&rbd_dev_list_lock);
Alex Elderb7f23c32012-01-29 13:57:43 -06004778
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004779 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4780
4781 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
Alex Elderb7f23c32012-01-29 13:57:43 -06004782}
4783
Alex Eldera725f65e2012-02-02 08:13:30 -06004784/*
Alex Eldere28fff262012-02-02 08:13:30 -06004785 * Skips over white space at *buf, and updates *buf to point to the
4786 * first found non-space character (if any). Returns the length of
Alex Elder593a9e72012-02-07 12:03:37 -06004787 * the token (string of non-white space characters) found. Note
4788 * that *buf must be terminated with '\0'.
Alex Eldere28fff262012-02-02 08:13:30 -06004789 */
4790static inline size_t next_token(const char **buf)
4791{
4792 /*
4793 * These are the characters that produce nonzero for
4794 * isspace() in the "C" and "POSIX" locales.
4795 */
4796 const char *spaces = " \f\n\r\t\v";
4797
4798 *buf += strspn(*buf, spaces); /* Find start of token */
4799
4800 return strcspn(*buf, spaces); /* Return token length */
4801}
4802
4803/*
Alex Elderea3352f2012-07-09 21:04:23 -05004804 * Finds the next token in *buf, dynamically allocates a buffer big
4805 * enough to hold a copy of it, and copies the token into the new
4806 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4807 * that a duplicate buffer is created even for a zero-length token.
4808 *
4809 * Returns a pointer to the newly-allocated duplicate, or a null
4810 * pointer if memory for the duplicate was not available. If
4811 * the lenp argument is a non-null pointer, the length of the token
4812 * (not including the '\0') is returned in *lenp.
4813 *
4814 * If successful, the *buf pointer will be updated to point beyond
4815 * the end of the found token.
4816 *
4817 * Note: uses GFP_KERNEL for allocation.
4818 */
4819static inline char *dup_token(const char **buf, size_t *lenp)
4820{
4821 char *dup;
4822 size_t len;
4823
4824 len = next_token(buf);
Alex Elder4caf35f2012-11-01 08:39:27 -05004825 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
Alex Elderea3352f2012-07-09 21:04:23 -05004826 if (!dup)
4827 return NULL;
Alex Elderea3352f2012-07-09 21:04:23 -05004828 *(dup + len) = '\0';
4829 *buf += len;
4830
4831 if (lenp)
4832 *lenp = len;
4833
4834 return dup;
4835}
4836
4837/*
Alex Elder859c31d2012-10-25 23:34:42 -05004838 * Parse the options provided for an "rbd add" (i.e., rbd image
4839 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4840 * and the data written is passed here via a NUL-terminated buffer.
4841 * Returns 0 if successful or an error code otherwise.
Alex Elderd22f76e2012-07-12 10:46:35 -05004842 *
Alex Elder859c31d2012-10-25 23:34:42 -05004843 * The information extracted from these options is recorded in
4844 * the other parameters which return dynamically-allocated
4845 * structures:
4846 * ceph_opts
4847 * The address of a pointer that will refer to a ceph options
4848 * structure. Caller must release the returned pointer using
4849 * ceph_destroy_options() when it is no longer needed.
4850 * rbd_opts
4851 * Address of an rbd options pointer. Fully initialized by
4852 * this function; caller must release with kfree().
4853 * spec
4854 * Address of an rbd image specification pointer. Fully
4855 * initialized by this function based on parsed options.
4856 * Caller must release with rbd_spec_put().
4857 *
4858 * The options passed take this form:
4859 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4860 * where:
4861 * <mon_addrs>
4862 * A comma-separated list of one or more monitor addresses.
4863 * A monitor address is an ip address, optionally followed
4864 * by a port number (separated by a colon).
4865 * I.e.: ip1[:port1][,ip2[:port2]...]
4866 * <options>
4867 * A comma-separated list of ceph and/or rbd options.
4868 * <pool_name>
4869 * The name of the rados pool containing the rbd image.
4870 * <image_name>
4871 * The name of the image in that pool to map.
4872 * <snap_id>
4873 * An optional snapshot id. If provided, the mapping will
4874 * present data from the image at the time that snapshot was
4875 * created. The image head is used if no snapshot id is
4876 * provided. Snapshot mappings are always read-only.
Alex Eldera725f65e2012-02-02 08:13:30 -06004877 */
Alex Elder859c31d2012-10-25 23:34:42 -05004878static int rbd_add_parse_args(const char *buf,
Alex Elderdc79b112012-10-25 23:34:41 -05004879 struct ceph_options **ceph_opts,
Alex Elder859c31d2012-10-25 23:34:42 -05004880 struct rbd_options **opts,
4881 struct rbd_spec **rbd_spec)
Alex Eldera725f65e2012-02-02 08:13:30 -06004882{
Alex Elderd22f76e2012-07-12 10:46:35 -05004883 size_t len;
Alex Elder859c31d2012-10-25 23:34:42 -05004884 char *options;
Alex Elder0ddebc02012-10-25 23:34:41 -05004885 const char *mon_addrs;
Alex Elderecb4dc22013-04-26 09:43:47 -05004886 char *snap_name;
Alex Elder0ddebc02012-10-25 23:34:41 -05004887 size_t mon_addrs_size;
Alex Elder859c31d2012-10-25 23:34:42 -05004888 struct rbd_spec *spec = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004889 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05004890 struct ceph_options *copts;
Alex Elderdc79b112012-10-25 23:34:41 -05004891 int ret;
Alex Eldere28fff262012-02-02 08:13:30 -06004892
4893 /* The first four tokens are required */
4894
Alex Elder7ef32142012-02-02 08:13:30 -06004895 len = next_token(&buf);
Alex Elder4fb5d6712012-11-01 10:17:15 -05004896 if (!len) {
4897 rbd_warn(NULL, "no monitor address(es) provided");
4898 return -EINVAL;
4899 }
Alex Elder0ddebc02012-10-25 23:34:41 -05004900 mon_addrs = buf;
Alex Elderf28e5652012-10-25 23:34:41 -05004901 mon_addrs_size = len + 1;
Alex Elder7ef32142012-02-02 08:13:30 -06004902 buf += len;
Alex Eldera725f65e2012-02-02 08:13:30 -06004903
Alex Elderdc79b112012-10-25 23:34:41 -05004904 ret = -EINVAL;
Alex Elderf28e5652012-10-25 23:34:41 -05004905 options = dup_token(&buf, NULL);
4906 if (!options)
Alex Elderdc79b112012-10-25 23:34:41 -05004907 return -ENOMEM;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004908 if (!*options) {
4909 rbd_warn(NULL, "no options provided");
4910 goto out_err;
4911 }
Alex Eldera725f65e2012-02-02 08:13:30 -06004912
Alex Elder859c31d2012-10-25 23:34:42 -05004913 spec = rbd_spec_alloc();
4914 if (!spec)
Alex Elderf28e5652012-10-25 23:34:41 -05004915 goto out_mem;
Alex Elder859c31d2012-10-25 23:34:42 -05004916
4917 spec->pool_name = dup_token(&buf, NULL);
4918 if (!spec->pool_name)
4919 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004920 if (!*spec->pool_name) {
4921 rbd_warn(NULL, "no pool name provided");
4922 goto out_err;
4923 }
Alex Eldere28fff262012-02-02 08:13:30 -06004924
Alex Elder69e7a022012-11-01 08:39:26 -05004925 spec->image_name = dup_token(&buf, NULL);
Alex Elder859c31d2012-10-25 23:34:42 -05004926 if (!spec->image_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004927 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004928 if (!*spec->image_name) {
4929 rbd_warn(NULL, "no image name provided");
4930 goto out_err;
4931 }
Alex Eldere28fff262012-02-02 08:13:30 -06004932
Alex Elderf28e5652012-10-25 23:34:41 -05004933 /*
4934 * Snapshot name is optional; default is to use "-"
4935 * (indicating the head/no snapshot).
4936 */
Alex Elder3feeb8942012-08-31 17:29:52 -05004937 len = next_token(&buf);
Alex Elder820a5f32012-07-09 21:04:24 -05004938 if (!len) {
Alex Elder3feeb8942012-08-31 17:29:52 -05004939 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4940 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
Alex Elderf28e5652012-10-25 23:34:41 -05004941 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
Alex Elderdc79b112012-10-25 23:34:41 -05004942 ret = -ENAMETOOLONG;
Alex Elderf28e5652012-10-25 23:34:41 -05004943 goto out_err;
Alex Elder849b4262012-07-09 21:04:24 -05004944 }
Alex Elderecb4dc22013-04-26 09:43:47 -05004945 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4946 if (!snap_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004947 goto out_mem;
Alex Elderecb4dc22013-04-26 09:43:47 -05004948 *(snap_name + len) = '\0';
4949 spec->snap_name = snap_name;
Alex Eldere5c35532012-10-25 23:34:41 -05004950
Alex Elder0ddebc02012-10-25 23:34:41 -05004951 /* Initialize all rbd options to the defaults */
Alex Eldere28fff262012-02-02 08:13:30 -06004952
Alex Elder4e9afeb2012-10-25 23:34:41 -05004953 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4954 if (!rbd_opts)
4955 goto out_mem;
4956
4957 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
Ilya Dryomovb5584182015-06-23 16:21:19 +03004958 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
Alex Elderd22f76e2012-07-12 10:46:35 -05004959
Alex Elder859c31d2012-10-25 23:34:42 -05004960 copts = ceph_parse_options(options, mon_addrs,
Alex Elder0ddebc02012-10-25 23:34:41 -05004961 mon_addrs + mon_addrs_size - 1,
Alex Elder4e9afeb2012-10-25 23:34:41 -05004962 parse_rbd_opts_token, rbd_opts);
Alex Elder859c31d2012-10-25 23:34:42 -05004963 if (IS_ERR(copts)) {
4964 ret = PTR_ERR(copts);
Alex Elderdc79b112012-10-25 23:34:41 -05004965 goto out_err;
4966 }
Alex Elder859c31d2012-10-25 23:34:42 -05004967 kfree(options);
4968
4969 *ceph_opts = copts;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004970 *opts = rbd_opts;
Alex Elder859c31d2012-10-25 23:34:42 -05004971 *rbd_spec = spec;
Alex Elder0ddebc02012-10-25 23:34:41 -05004972
Alex Elderdc79b112012-10-25 23:34:41 -05004973 return 0;
Alex Elderf28e5652012-10-25 23:34:41 -05004974out_mem:
Alex Elderdc79b112012-10-25 23:34:41 -05004975 ret = -ENOMEM;
Alex Elderd22f76e2012-07-12 10:46:35 -05004976out_err:
Alex Elder859c31d2012-10-25 23:34:42 -05004977 kfree(rbd_opts);
4978 rbd_spec_put(spec);
Alex Elderf28e5652012-10-25 23:34:41 -05004979 kfree(options);
Alex Elderd22f76e2012-07-12 10:46:35 -05004980
Alex Elderdc79b112012-10-25 23:34:41 -05004981 return ret;
Alex Eldera725f65e2012-02-02 08:13:30 -06004982}
4983
Alex Elder589d30e2012-07-10 20:30:11 -05004984/*
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004985 * Return pool id (>= 0) or a negative error code.
4986 */
4987static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4988{
Ilya Dryomova319bf52015-05-15 12:02:17 +03004989 struct ceph_options *opts = rbdc->client->options;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004990 u64 newest_epoch;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004991 int tries = 0;
4992 int ret;
4993
4994again:
4995 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4996 if (ret == -ENOENT && tries++ < 1) {
4997 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4998 &newest_epoch);
4999 if (ret < 0)
5000 return ret;
5001
5002 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
5003 ceph_monc_request_next_osdmap(&rbdc->client->monc);
5004 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
Ilya Dryomova319bf52015-05-15 12:02:17 +03005005 newest_epoch,
5006 opts->mount_timeout);
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005007 goto again;
5008 } else {
5009 /* the osdmap we have is new enough */
5010 return -ENOENT;
5011 }
5012 }
5013
5014 return ret;
5015}
5016
5017/*
Alex Elder589d30e2012-07-10 20:30:11 -05005018 * An rbd format 2 image has a unique identifier, distinct from the
5019 * name given to it by the user. Internally, that identifier is
5020 * what's used to specify the names of objects related to the image.
5021 *
5022 * A special "rbd id" object is used to map an rbd image name to its
5023 * id. If that object doesn't exist, then there is no v2 rbd image
5024 * with the supplied name.
5025 *
5026 * This function will record the given rbd_dev's image_id field if
5027 * it can be determined, and in that case will return 0. If any
5028 * errors occur a negative errno will be returned and the rbd_dev's
5029 * image_id field will be unchanged (and should be NULL).
5030 */
5031static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5032{
5033 int ret;
5034 size_t size;
5035 char *object_name;
5036 void *response;
Alex Elderc0fba362013-04-25 23:15:08 -05005037 char *image_id;
Alex Elder2f82ee52012-10-30 19:40:33 -05005038
Alex Elder589d30e2012-07-10 20:30:11 -05005039 /*
Alex Elder2c0d0a12012-10-30 19:40:33 -05005040 * When probing a parent image, the image id is already
5041 * known (and the image name likely is not). There's no
Alex Elderc0fba362013-04-25 23:15:08 -05005042 * need to fetch the image id again in this case. We
5043 * do still need to set the image format though.
Alex Elder2c0d0a12012-10-30 19:40:33 -05005044 */
Alex Elderc0fba362013-04-25 23:15:08 -05005045 if (rbd_dev->spec->image_id) {
5046 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5047
Alex Elder2c0d0a12012-10-30 19:40:33 -05005048 return 0;
Alex Elderc0fba362013-04-25 23:15:08 -05005049 }
Alex Elder2c0d0a12012-10-30 19:40:33 -05005050
5051 /*
Alex Elder589d30e2012-07-10 20:30:11 -05005052 * First, see if the format 2 image id file exists, and if
5053 * so, get the image's persistent id from it.
5054 */
Alex Elder69e7a022012-11-01 08:39:26 -05005055 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05005056 object_name = kmalloc(size, GFP_NOIO);
5057 if (!object_name)
5058 return -ENOMEM;
Alex Elder0d7dbfc2012-10-25 23:34:41 -05005059 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05005060 dout("rbd id object name is %s\n", object_name);
5061
5062 /* Response will be an encoded string, which includes a length */
5063
5064 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5065 response = kzalloc(size, GFP_NOIO);
5066 if (!response) {
5067 ret = -ENOMEM;
5068 goto out;
5069 }
5070
Alex Elderc0fba362013-04-25 23:15:08 -05005071 /* If it doesn't exist we'll assume it's a format 1 image */
5072
Alex Elder36be9a72013-01-19 00:30:28 -06005073 ret = rbd_obj_method_sync(rbd_dev, object_name,
Alex Elder41579762013-04-21 12:14:45 -05005074 "rbd", "get_id", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05005075 response, RBD_IMAGE_ID_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06005076 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderc0fba362013-04-25 23:15:08 -05005077 if (ret == -ENOENT) {
5078 image_id = kstrdup("", GFP_KERNEL);
5079 ret = image_id ? 0 : -ENOMEM;
5080 if (!ret)
5081 rbd_dev->image_format = 1;
Ilya Dryomov7dd440c2014-09-11 18:49:18 +04005082 } else if (ret >= 0) {
Alex Elderc0fba362013-04-25 23:15:08 -05005083 void *p = response;
Alex Elder589d30e2012-07-10 20:30:11 -05005084
Alex Elderc0fba362013-04-25 23:15:08 -05005085 image_id = ceph_extract_encoded_string(&p, p + ret,
Alex Elder979ed482012-11-01 08:39:26 -05005086 NULL, GFP_NOIO);
Duan Jiong461f7582014-04-11 16:38:12 +08005087 ret = PTR_ERR_OR_ZERO(image_id);
Alex Elderc0fba362013-04-25 23:15:08 -05005088 if (!ret)
5089 rbd_dev->image_format = 2;
Alex Elderc0fba362013-04-25 23:15:08 -05005090 }
5091
5092 if (!ret) {
5093 rbd_dev->spec->image_id = image_id;
5094 dout("image_id is %s\n", image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05005095 }
5096out:
5097 kfree(response);
5098 kfree(object_name);
5099
5100 return ret;
5101}
5102
Alex Elder3abef3b2013-05-13 20:35:37 -05005103/*
5104 * Undo whatever state changes are made by v1 or v2 header info
5105 * call.
5106 */
Alex Elder6fd48b32013-04-28 23:32:34 -05005107static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5108{
5109 struct rbd_image_header *header;
5110
Ilya Dryomove69b8d42015-01-19 12:06:14 +03005111 rbd_dev_parent_put(rbd_dev);
Alex Elder6fd48b32013-04-28 23:32:34 -05005112
5113 /* Free dynamic fields from the header, then zero it out */
5114
5115 header = &rbd_dev->header;
Alex Elder812164f82013-04-30 00:44:32 -05005116 ceph_put_snap_context(header->snapc);
Alex Elder6fd48b32013-04-28 23:32:34 -05005117 kfree(header->snap_sizes);
5118 kfree(header->snap_names);
5119 kfree(header->object_prefix);
5120 memset(header, 0, sizeof (*header));
5121}
5122
Alex Elder2df3fac2013-05-06 09:51:30 -05005123static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
Alex Eldera30b71b2012-07-10 20:30:11 -05005124{
5125 int ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005126
Alex Elder1e130192012-07-03 16:01:19 -05005127 ret = rbd_dev_v2_object_prefix(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005128 if (ret)
Alex Elder1e130192012-07-03 16:01:19 -05005129 goto out_err;
Alex Elderb1b54022012-07-03 16:01:19 -05005130
Alex Elder2df3fac2013-05-06 09:51:30 -05005131 /*
5132 * Get the and check features for the image. Currently the
5133 * features are assumed to never change.
5134 */
Alex Elderb1b54022012-07-03 16:01:19 -05005135 ret = rbd_dev_v2_features(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005136 if (ret)
Alex Elderb1b54022012-07-03 16:01:19 -05005137 goto out_err;
Alex Elder35d489f2012-07-03 16:01:19 -05005138
Alex Eldercc070d52013-04-21 12:14:45 -05005139 /* If the image supports fancy striping, get its parameters */
5140
5141 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5142 ret = rbd_dev_v2_striping_info(rbd_dev);
5143 if (ret < 0)
5144 goto out_err;
5145 }
Alex Elder2df3fac2013-05-06 09:51:30 -05005146 /* No support for crypto and compression type format 2 images */
Alex Eldera30b71b2012-07-10 20:30:11 -05005147
Alex Elder35152972012-08-31 17:29:55 -05005148 return 0;
Alex Elder9d475de2012-07-03 16:01:19 -05005149out_err:
Alex Elder642a2532013-05-06 17:40:33 -05005150 rbd_dev->header.features = 0;
Alex Elder1e130192012-07-03 16:01:19 -05005151 kfree(rbd_dev->header.object_prefix);
5152 rbd_dev->header.object_prefix = NULL;
Alex Elder9d475de2012-07-03 16:01:19 -05005153
5154 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005155}
5156
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005157/*
5158 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5159 * rbd_dev_image_probe() recursion depth, which means it's also the
5160 * length of the already discovered part of the parent chain.
5161 */
5162static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
Alex Elder83a06262012-10-30 15:47:17 -05005163{
Alex Elder2f82ee52012-10-30 19:40:33 -05005164 struct rbd_device *parent = NULL;
Alex Elder124afba2013-04-26 15:44:36 -05005165 int ret;
5166
5167 if (!rbd_dev->parent_spec)
5168 return 0;
Alex Elder124afba2013-04-26 15:44:36 -05005169
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005170 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5171 pr_info("parent chain is too long (%d)\n", depth);
5172 ret = -EINVAL;
5173 goto out_err;
5174 }
5175
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005176 parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
5177 NULL);
5178 if (!parent) {
5179 ret = -ENOMEM;
Alex Elder124afba2013-04-26 15:44:36 -05005180 goto out_err;
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005181 }
5182
5183 /*
5184 * Images related by parent/child relationships always share
5185 * rbd_client and spec/parent_spec, so bump their refcounts.
5186 */
5187 __rbd_get_client(rbd_dev->rbd_client);
5188 rbd_spec_get(rbd_dev->parent_spec);
Alex Elder124afba2013-04-26 15:44:36 -05005189
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005190 ret = rbd_dev_image_probe(parent, depth);
Alex Elder124afba2013-04-26 15:44:36 -05005191 if (ret < 0)
5192 goto out_err;
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005193
Alex Elder124afba2013-04-26 15:44:36 -05005194 rbd_dev->parent = parent;
Alex Eldera2acd002013-05-08 22:50:04 -05005195 atomic_set(&rbd_dev->parent_ref, 1);
Alex Elder124afba2013-04-26 15:44:36 -05005196 return 0;
Alex Elder124afba2013-04-26 15:44:36 -05005197
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005198out_err:
5199 rbd_dev_unparent(rbd_dev);
Markus Elfring1761b222015-11-23 20:16:45 +01005200 rbd_dev_destroy(parent);
Alex Elder124afba2013-04-26 15:44:36 -05005201 return ret;
5202}
5203
Ilya Dryomov811c6682016-04-15 16:22:16 +02005204/*
5205 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5206 * upon return.
5207 */
Alex Elder200a6a82013-04-28 23:32:34 -05005208static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
Alex Elder124afba2013-04-26 15:44:36 -05005209{
Alex Elder83a06262012-10-30 15:47:17 -05005210 int ret;
Alex Elder83a06262012-10-30 15:47:17 -05005211
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005212 /* Get an id and fill in device name. */
Alex Elder83a06262012-10-30 15:47:17 -05005213
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005214 ret = rbd_dev_id_get(rbd_dev);
5215 if (ret)
Ilya Dryomov811c6682016-04-15 16:22:16 +02005216 goto err_out_unlock;
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005217
Alex Elder83a06262012-10-30 15:47:17 -05005218 BUILD_BUG_ON(DEV_NAME_LEN
5219 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5220 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5221
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005222 /* Record our major and minor device numbers. */
Alex Elder83a06262012-10-30 15:47:17 -05005223
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005224 if (!single_major) {
5225 ret = register_blkdev(0, rbd_dev->name);
5226 if (ret < 0)
5227 goto err_out_id;
5228
5229 rbd_dev->major = ret;
5230 rbd_dev->minor = 0;
5231 } else {
5232 rbd_dev->major = rbd_major;
5233 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5234 }
Alex Elder83a06262012-10-30 15:47:17 -05005235
5236 /* Set up the blkdev mapping. */
5237
5238 ret = rbd_init_disk(rbd_dev);
5239 if (ret)
5240 goto err_out_blkdev;
5241
Alex Elderf35a4de2013-05-06 09:51:29 -05005242 ret = rbd_dev_mapping_set(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005243 if (ret)
5244 goto err_out_disk;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04005245
Alex Elderf35a4de2013-05-06 09:51:29 -05005246 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
Josh Durgin22001f62013-09-30 20:10:04 -07005247 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
Alex Elderf35a4de2013-05-06 09:51:29 -05005248
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005249 dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
5250 ret = device_add(&rbd_dev->dev);
Alex Elderf35a4de2013-05-06 09:51:29 -05005251 if (ret)
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005252 goto err_out_mapping;
Alex Elder83a06262012-10-30 15:47:17 -05005253
Alex Elder83a06262012-10-30 15:47:17 -05005254 /* Everything's ready. Announce the disk to the world. */
5255
Alex Elder129b79d2013-04-26 15:44:36 -05005256 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Ilya Dryomov811c6682016-04-15 16:22:16 +02005257 up_write(&rbd_dev->header_rwsem);
Alex Elder83a06262012-10-30 15:47:17 -05005258
Ilya Dryomov811c6682016-04-15 16:22:16 +02005259 add_disk(rbd_dev->disk);
Alex Elder83a06262012-10-30 15:47:17 -05005260 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5261 (unsigned long long) rbd_dev->mapping.size);
5262
5263 return ret;
Alex Elder2f82ee52012-10-30 19:40:33 -05005264
Alex Elderf35a4de2013-05-06 09:51:29 -05005265err_out_mapping:
5266 rbd_dev_mapping_clear(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005267err_out_disk:
5268 rbd_free_disk(rbd_dev);
5269err_out_blkdev:
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005270 if (!single_major)
5271 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Alex Elder83a06262012-10-30 15:47:17 -05005272err_out_id:
5273 rbd_dev_id_put(rbd_dev);
Ilya Dryomov811c6682016-04-15 16:22:16 +02005274err_out_unlock:
5275 up_write(&rbd_dev->header_rwsem);
Alex Elder83a06262012-10-30 15:47:17 -05005276 return ret;
5277}
5278
Alex Elder332bb122013-04-27 09:59:30 -05005279static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5280{
5281 struct rbd_spec *spec = rbd_dev->spec;
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005282 int ret;
Alex Elder332bb122013-04-27 09:59:30 -05005283
5284 /* Record the header object name for this rbd image. */
5285
5286 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5287
5288 if (rbd_dev->image_format == 1)
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005289 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5290 spec->image_name, RBD_SUFFIX);
Alex Elder332bb122013-04-27 09:59:30 -05005291 else
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005292 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5293 RBD_HEADER_PREFIX, spec->image_id);
Alex Elder332bb122013-04-27 09:59:30 -05005294
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005295 return ret;
Alex Elder332bb122013-04-27 09:59:30 -05005296}
5297
Alex Elder200a6a82013-04-28 23:32:34 -05005298static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5299{
Alex Elder6fd48b32013-04-28 23:32:34 -05005300 rbd_dev_unprobe(rbd_dev);
Alex Elder6fd48b32013-04-28 23:32:34 -05005301 rbd_dev->image_format = 0;
5302 kfree(rbd_dev->spec->image_id);
5303 rbd_dev->spec->image_id = NULL;
5304
Alex Elder200a6a82013-04-28 23:32:34 -05005305 rbd_dev_destroy(rbd_dev);
5306}
5307
Alex Eldera30b71b2012-07-10 20:30:11 -05005308/*
5309 * Probe for the existence of the header object for the given rbd
Alex Elder1f3ef782013-05-06 17:40:33 -05005310 * device. If this image is the one being mapped (i.e., not a
5311 * parent), initiate a watch on its header object before using that
5312 * object to get detailed information about the rbd image.
Alex Eldera30b71b2012-07-10 20:30:11 -05005313 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005314static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
Alex Eldera30b71b2012-07-10 20:30:11 -05005315{
5316 int ret;
5317
5318 /*
Alex Elder3abef3b2013-05-13 20:35:37 -05005319 * Get the id from the image id object. Unless there's an
5320 * error, rbd_dev->spec->image_id will be filled in with
5321 * a dynamically-allocated string, and rbd_dev->image_format
5322 * will be set to either 1 or 2.
Alex Eldera30b71b2012-07-10 20:30:11 -05005323 */
5324 ret = rbd_dev_image_id(rbd_dev);
5325 if (ret)
Alex Elderc0fba362013-04-25 23:15:08 -05005326 return ret;
Alex Elderc0fba362013-04-25 23:15:08 -05005327
Alex Elder332bb122013-04-27 09:59:30 -05005328 ret = rbd_dev_header_name(rbd_dev);
5329 if (ret)
5330 goto err_out_format;
5331
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005332 if (!depth) {
Ilya Dryomovfca27062013-12-16 18:02:40 +02005333 ret = rbd_dev_header_watch_sync(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005334 if (ret) {
5335 if (ret == -ENOENT)
5336 pr_info("image %s/%s does not exist\n",
5337 rbd_dev->spec->pool_name,
5338 rbd_dev->spec->image_name);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005339 goto err_out_format;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005340 }
Alex Elder1f3ef782013-05-06 17:40:33 -05005341 }
Alex Elderb644de22013-04-27 09:59:31 -05005342
Ilya Dryomova720ae02014-07-23 17:11:19 +04005343 ret = rbd_dev_header_info(rbd_dev);
Alex Elder5655c4d2013-04-25 23:15:08 -05005344 if (ret)
Alex Elderb644de22013-04-27 09:59:31 -05005345 goto err_out_watch;
Alex Elder83a06262012-10-30 15:47:17 -05005346
Ilya Dryomov04077592014-07-23 17:11:20 +04005347 /*
5348 * If this image is the one being mapped, we have pool name and
5349 * id, image name and id, and snap name - need to fill snap id.
5350 * Otherwise this is a parent image, identified by pool, image
5351 * and snap ids - need to fill in names for those ids.
5352 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005353 if (!depth)
Ilya Dryomov04077592014-07-23 17:11:20 +04005354 ret = rbd_spec_fill_snap_id(rbd_dev);
5355 else
5356 ret = rbd_spec_fill_names(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005357 if (ret) {
5358 if (ret == -ENOENT)
5359 pr_info("snap %s/%s@%s does not exist\n",
5360 rbd_dev->spec->pool_name,
5361 rbd_dev->spec->image_name,
5362 rbd_dev->spec->snap_name);
Alex Elder33dca392013-04-30 00:44:33 -05005363 goto err_out_probe;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005364 }
Alex Elder9bb81c92013-04-27 09:59:30 -05005365
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005366 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5367 ret = rbd_dev_v2_parent_info(rbd_dev);
5368 if (ret)
5369 goto err_out_probe;
5370
5371 /*
5372 * Need to warn users if this image is the one being
5373 * mapped and has a parent.
5374 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005375 if (!depth && rbd_dev->parent_spec)
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005376 rbd_warn(rbd_dev,
5377 "WARNING: kernel layering is EXPERIMENTAL!");
5378 }
5379
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005380 ret = rbd_dev_probe_parent(rbd_dev, depth);
Alex Elder30d60ba2013-05-06 09:51:30 -05005381 if (ret)
5382 goto err_out_probe;
Alex Elder83a06262012-10-30 15:47:17 -05005383
Alex Elder30d60ba2013-05-06 09:51:30 -05005384 dout("discovered format %u image, header name is %s\n",
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005385 rbd_dev->image_format, rbd_dev->header_oid.name);
Alex Elder30d60ba2013-05-06 09:51:30 -05005386 return 0;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005387
Alex Elder6fd48b32013-04-28 23:32:34 -05005388err_out_probe:
5389 rbd_dev_unprobe(rbd_dev);
Alex Elderb644de22013-04-27 09:59:31 -05005390err_out_watch:
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005391 if (!depth)
Ilya Dryomovfca27062013-12-16 18:02:40 +02005392 rbd_dev_header_unwatch_sync(rbd_dev);
Alex Elder332bb122013-04-27 09:59:30 -05005393err_out_format:
5394 rbd_dev->image_format = 0;
Alex Elder5655c4d2013-04-25 23:15:08 -05005395 kfree(rbd_dev->spec->image_id);
5396 rbd_dev->spec->image_id = NULL;
Alex Elder5655c4d2013-04-25 23:15:08 -05005397 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005398}
5399
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005400static ssize_t do_rbd_add(struct bus_type *bus,
5401 const char *buf,
5402 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005403{
Alex Eldercb8627c2012-07-09 21:04:23 -05005404 struct rbd_device *rbd_dev = NULL;
Alex Elderdc79b112012-10-25 23:34:41 -05005405 struct ceph_options *ceph_opts = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05005406 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05005407 struct rbd_spec *spec = NULL;
Alex Elder9d3997f2012-10-25 23:34:42 -05005408 struct rbd_client *rbdc;
Alex Elder51344a32013-05-06 07:40:30 -05005409 bool read_only;
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02005410 int rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005411
5412 if (!try_module_get(THIS_MODULE))
5413 return -ENODEV;
5414
Alex Eldera725f65e2012-02-02 08:13:30 -06005415 /* parse add command */
Alex Elder859c31d2012-10-25 23:34:42 -05005416 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
Alex Elderdc79b112012-10-25 23:34:41 -05005417 if (rc < 0)
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005418 goto out;
Alex Eldera725f65e2012-02-02 08:13:30 -06005419
Alex Elder9d3997f2012-10-25 23:34:42 -05005420 rbdc = rbd_get_client(ceph_opts);
5421 if (IS_ERR(rbdc)) {
5422 rc = PTR_ERR(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05005423 goto err_out_args;
Alex Elder9d3997f2012-10-25 23:34:42 -05005424 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005425
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005426 /* pick the pool */
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005427 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005428 if (rc < 0) {
5429 if (rc == -ENOENT)
5430 pr_info("pool %s does not exist\n", spec->pool_name);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005431 goto err_out_client;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005432 }
Alex Elderc0cd10db2013-04-26 09:43:47 -05005433 spec->pool_id = (u64)rc;
Alex Elder859c31d2012-10-25 23:34:42 -05005434
Alex Elder0903e872012-11-14 12:25:19 -06005435 /* The ceph file layout needs to fit pool id in 32 bits */
5436
Alex Elderc0cd10db2013-04-26 09:43:47 -05005437 if (spec->pool_id > (u64)U32_MAX) {
Ilya Dryomov9584d502014-07-11 12:11:20 +04005438 rbd_warn(NULL, "pool id too large (%llu > %u)",
Alex Elderc0cd10db2013-04-26 09:43:47 -05005439 (unsigned long long)spec->pool_id, U32_MAX);
Alex Elder0903e872012-11-14 12:25:19 -06005440 rc = -EIO;
5441 goto err_out_client;
5442 }
5443
Ilya Dryomovd1475432015-06-22 13:24:48 +03005444 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02005445 if (!rbd_dev) {
5446 rc = -ENOMEM;
Alex Elderbd4ba652012-10-25 23:34:42 -05005447 goto err_out_client;
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02005448 }
Alex Elderc53d5892012-10-25 23:34:42 -05005449 rbdc = NULL; /* rbd_dev now owns this */
5450 spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovd1475432015-06-22 13:24:48 +03005451 rbd_opts = NULL; /* rbd_dev now owns this */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005452
Ilya Dryomov811c6682016-04-15 16:22:16 +02005453 down_write(&rbd_dev->header_rwsem);
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005454 rc = rbd_dev_image_probe(rbd_dev, 0);
Alex Eldera30b71b2012-07-10 20:30:11 -05005455 if (rc < 0)
Alex Elderc53d5892012-10-25 23:34:42 -05005456 goto err_out_rbd_dev;
Alex Elder05fd6f62012-08-29 17:11:07 -05005457
Alex Elder7ce4eef2013-05-06 17:40:33 -05005458 /* If we are mapping a snapshot it must be marked read-only */
5459
Ilya Dryomovd1475432015-06-22 13:24:48 +03005460 read_only = rbd_dev->opts->read_only;
Alex Elder7ce4eef2013-05-06 17:40:33 -05005461 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5462 read_only = true;
5463 rbd_dev->mapping.read_only = read_only;
5464
Alex Elderb536f692013-04-28 23:32:34 -05005465 rc = rbd_dev_device_setup(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05005466 if (rc) {
Ilya Dryomove37180c2013-12-16 18:02:41 +02005467 /*
5468 * rbd_dev_header_unwatch_sync() can't be moved into
5469 * rbd_dev_image_release() without refactoring, see
5470 * commit 1f3ef78861ac.
5471 */
5472 rbd_dev_header_unwatch_sync(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05005473 rbd_dev_image_release(rbd_dev);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005474 goto out;
Alex Elder3abef3b2013-05-13 20:35:37 -05005475 }
Alex Elderb536f692013-04-28 23:32:34 -05005476
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005477 rc = count;
5478out:
5479 module_put(THIS_MODULE);
5480 return rc;
Alex Elder3abef3b2013-05-13 20:35:37 -05005481
Alex Elderc53d5892012-10-25 23:34:42 -05005482err_out_rbd_dev:
Ilya Dryomov811c6682016-04-15 16:22:16 +02005483 up_write(&rbd_dev->header_rwsem);
Alex Elderc53d5892012-10-25 23:34:42 -05005484 rbd_dev_destroy(rbd_dev);
Alex Elderbd4ba652012-10-25 23:34:42 -05005485err_out_client:
Alex Elder9d3997f2012-10-25 23:34:42 -05005486 rbd_put_client(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05005487err_out_args:
Alex Elder859c31d2012-10-25 23:34:42 -05005488 rbd_spec_put(spec);
Ilya Dryomovd1475432015-06-22 13:24:48 +03005489 kfree(rbd_opts);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005490 goto out;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005491}
5492
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005493static ssize_t rbd_add(struct bus_type *bus,
5494 const char *buf,
5495 size_t count)
5496{
5497 if (single_major)
5498 return -EINVAL;
5499
5500 return do_rbd_add(bus, buf, count);
5501}
5502
5503static ssize_t rbd_add_single_major(struct bus_type *bus,
5504 const char *buf,
5505 size_t count)
5506{
5507 return do_rbd_add(bus, buf, count);
5508}
5509
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005510static void rbd_dev_device_release(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005511{
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005512 rbd_free_disk(rbd_dev);
Alex Elder200a6a82013-04-28 23:32:34 -05005513 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005514 device_del(&rbd_dev->dev);
Alex Elder6d80b132013-05-06 07:40:30 -05005515 rbd_dev_mapping_clear(rbd_dev);
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005516 if (!single_major)
5517 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Alex Eldere2839302012-08-29 17:11:06 -05005518 rbd_dev_id_put(rbd_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005519}
5520
Alex Elder05a46af2013-04-26 15:44:36 -05005521static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5522{
Alex Elderad945fc2013-04-26 15:44:36 -05005523 while (rbd_dev->parent) {
Alex Elder05a46af2013-04-26 15:44:36 -05005524 struct rbd_device *first = rbd_dev;
5525 struct rbd_device *second = first->parent;
5526 struct rbd_device *third;
5527
5528 /*
5529 * Follow to the parent with no grandparent and
5530 * remove it.
5531 */
5532 while (second && (third = second->parent)) {
5533 first = second;
5534 second = third;
5535 }
Alex Elderad945fc2013-04-26 15:44:36 -05005536 rbd_assert(second);
Alex Elder8ad42cd2013-04-28 23:32:34 -05005537 rbd_dev_image_release(second);
Alex Elderad945fc2013-04-26 15:44:36 -05005538 first->parent = NULL;
5539 first->parent_overlap = 0;
5540
5541 rbd_assert(first->parent_spec);
Alex Elder05a46af2013-04-26 15:44:36 -05005542 rbd_spec_put(first->parent_spec);
5543 first->parent_spec = NULL;
Alex Elder05a46af2013-04-26 15:44:36 -05005544 }
5545}
5546
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005547static ssize_t do_rbd_remove(struct bus_type *bus,
5548 const char *buf,
5549 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005550{
5551 struct rbd_device *rbd_dev = NULL;
Alex Elder751cc0e2013-05-31 15:17:01 -05005552 struct list_head *tmp;
5553 int dev_id;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005554 unsigned long ul;
Alex Elder82a442d2013-05-31 17:40:44 -05005555 bool already = false;
Alex Elder0d8189e2013-04-27 09:59:30 -05005556 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005557
Jingoo Hanbb8e0e82013-09-11 14:20:07 -07005558 ret = kstrtoul(buf, 10, &ul);
Alex Elder0d8189e2013-04-27 09:59:30 -05005559 if (ret)
5560 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005561
5562 /* convert to int; abort if we lost anything in the conversion */
Alex Elder751cc0e2013-05-31 15:17:01 -05005563 dev_id = (int)ul;
5564 if (dev_id != ul)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005565 return -EINVAL;
5566
Alex Elder751cc0e2013-05-31 15:17:01 -05005567 ret = -ENOENT;
5568 spin_lock(&rbd_dev_list_lock);
5569 list_for_each(tmp, &rbd_dev_list) {
5570 rbd_dev = list_entry(tmp, struct rbd_device, node);
5571 if (rbd_dev->dev_id == dev_id) {
5572 ret = 0;
5573 break;
5574 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005575 }
Alex Elder751cc0e2013-05-31 15:17:01 -05005576 if (!ret) {
5577 spin_lock_irq(&rbd_dev->lock);
5578 if (rbd_dev->open_count)
5579 ret = -EBUSY;
5580 else
Alex Elder82a442d2013-05-31 17:40:44 -05005581 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5582 &rbd_dev->flags);
Alex Elder751cc0e2013-05-31 15:17:01 -05005583 spin_unlock_irq(&rbd_dev->lock);
5584 }
5585 spin_unlock(&rbd_dev_list_lock);
Alex Elder82a442d2013-05-31 17:40:44 -05005586 if (ret < 0 || already)
Alex Elder1ba0f1e2013-05-31 15:17:01 -05005587 return ret;
Alex Elder751cc0e2013-05-31 15:17:01 -05005588
Ilya Dryomovfca27062013-12-16 18:02:40 +02005589 rbd_dev_header_unwatch_sync(rbd_dev);
Ilya Dryomovfca27062013-12-16 18:02:40 +02005590
Josh Durgin98752012013-08-29 17:26:31 -07005591 /*
5592 * Don't free anything from rbd_dev->disk until after all
5593 * notifies are completely processed. Otherwise
5594 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5595 * in a potential use after free of rbd_dev->disk or rbd_dev.
5596 */
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005597 rbd_dev_device_release(rbd_dev);
Alex Elder8ad42cd2013-04-28 23:32:34 -05005598 rbd_dev_image_release(rbd_dev);
Alex Elderaafb2302012-09-06 16:00:54 -05005599
Alex Elder1ba0f1e2013-05-31 15:17:01 -05005600 return count;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005601}
5602
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005603static ssize_t rbd_remove(struct bus_type *bus,
5604 const char *buf,
5605 size_t count)
5606{
5607 if (single_major)
5608 return -EINVAL;
5609
5610 return do_rbd_remove(bus, buf, count);
5611}
5612
5613static ssize_t rbd_remove_single_major(struct bus_type *bus,
5614 const char *buf,
5615 size_t count)
5616{
5617 return do_rbd_remove(bus, buf, count);
5618}
5619
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005620/*
5621 * create control files in sysfs
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005622 * /sys/bus/rbd/...
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005623 */
5624static int rbd_sysfs_init(void)
5625{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005626 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005627
Alex Elderfed4c142012-02-07 12:03:36 -06005628 ret = device_register(&rbd_root_dev);
Alex Elder21079782012-01-24 10:08:36 -06005629 if (ret < 0)
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005630 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005631
Alex Elderfed4c142012-02-07 12:03:36 -06005632 ret = bus_register(&rbd_bus_type);
5633 if (ret < 0)
5634 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005635
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005636 return ret;
5637}
5638
5639static void rbd_sysfs_cleanup(void)
5640{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005641 bus_unregister(&rbd_bus_type);
Alex Elderfed4c142012-02-07 12:03:36 -06005642 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005643}
5644
Alex Elder1c2a9df2013-05-01 12:43:03 -05005645static int rbd_slab_init(void)
5646{
5647 rbd_assert(!rbd_img_request_cache);
Geliang Tang03d94402016-03-13 15:17:32 +08005648 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
Alex Elder868311b2013-05-01 12:43:03 -05005649 if (!rbd_img_request_cache)
5650 return -ENOMEM;
5651
5652 rbd_assert(!rbd_obj_request_cache);
Geliang Tang03d94402016-03-13 15:17:32 +08005653 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
Alex Elder78c2a442013-05-01 12:43:04 -05005654 if (!rbd_obj_request_cache)
5655 goto out_err;
5656
5657 rbd_assert(!rbd_segment_name_cache);
5658 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02005659 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
Alex Elder78c2a442013-05-01 12:43:04 -05005660 if (rbd_segment_name_cache)
Alex Elder1c2a9df2013-05-01 12:43:03 -05005661 return 0;
Alex Elder78c2a442013-05-01 12:43:04 -05005662out_err:
Julia Lawall13bf2832015-09-13 14:15:26 +02005663 kmem_cache_destroy(rbd_obj_request_cache);
5664 rbd_obj_request_cache = NULL;
Alex Elder1c2a9df2013-05-01 12:43:03 -05005665
Alex Elder868311b2013-05-01 12:43:03 -05005666 kmem_cache_destroy(rbd_img_request_cache);
5667 rbd_img_request_cache = NULL;
5668
Alex Elder1c2a9df2013-05-01 12:43:03 -05005669 return -ENOMEM;
5670}
5671
5672static void rbd_slab_exit(void)
5673{
Alex Elder78c2a442013-05-01 12:43:04 -05005674 rbd_assert(rbd_segment_name_cache);
5675 kmem_cache_destroy(rbd_segment_name_cache);
5676 rbd_segment_name_cache = NULL;
5677
Alex Elder868311b2013-05-01 12:43:03 -05005678 rbd_assert(rbd_obj_request_cache);
5679 kmem_cache_destroy(rbd_obj_request_cache);
5680 rbd_obj_request_cache = NULL;
5681
Alex Elder1c2a9df2013-05-01 12:43:03 -05005682 rbd_assert(rbd_img_request_cache);
5683 kmem_cache_destroy(rbd_img_request_cache);
5684 rbd_img_request_cache = NULL;
5685}
5686
Alex Eldercc344fa2013-02-19 12:25:56 -06005687static int __init rbd_init(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005688{
5689 int rc;
5690
Alex Elder1e32d342013-01-30 11:13:33 -06005691 if (!libceph_compatible(NULL)) {
5692 rbd_warn(NULL, "libceph incompatibility (quitting)");
Alex Elder1e32d342013-01-30 11:13:33 -06005693 return -EINVAL;
5694 }
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005695
Alex Elder1c2a9df2013-05-01 12:43:03 -05005696 rc = rbd_slab_init();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005697 if (rc)
5698 return rc;
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005699
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005700 /*
5701 * The number of active work items is limited by the number of
Ilya Dryomovf77303b2015-04-22 18:28:13 +03005702 * rbd devices * queue depth, so leave @max_active at default.
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005703 */
5704 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5705 if (!rbd_wq) {
5706 rc = -ENOMEM;
5707 goto err_out_slab;
5708 }
5709
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005710 if (single_major) {
5711 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5712 if (rbd_major < 0) {
5713 rc = rbd_major;
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005714 goto err_out_wq;
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005715 }
5716 }
5717
Alex Elder1c2a9df2013-05-01 12:43:03 -05005718 rc = rbd_sysfs_init();
5719 if (rc)
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005720 goto err_out_blkdev;
Alex Elder1c2a9df2013-05-01 12:43:03 -05005721
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005722 if (single_major)
5723 pr_info("loaded (major %d)\n", rbd_major);
5724 else
5725 pr_info("loaded\n");
5726
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005727 return 0;
5728
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005729err_out_blkdev:
5730 if (single_major)
5731 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005732err_out_wq:
5733 destroy_workqueue(rbd_wq);
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005734err_out_slab:
5735 rbd_slab_exit();
Alex Elder1c2a9df2013-05-01 12:43:03 -05005736 return rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005737}
5738
Alex Eldercc344fa2013-02-19 12:25:56 -06005739static void __exit rbd_exit(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005740{
Ilya Dryomovffe312c2014-05-20 15:46:04 +04005741 ida_destroy(&rbd_dev_id_ida);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005742 rbd_sysfs_cleanup();
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005743 if (single_major)
5744 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005745 destroy_workqueue(rbd_wq);
Alex Elder1c2a9df2013-05-01 12:43:03 -05005746 rbd_slab_exit();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005747}
5748
5749module_init(rbd_init);
5750module_exit(rbd_exit);
5751
Alex Elderd552c612013-05-31 20:13:09 -05005752MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005753MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5754MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005755/* following authorship retained from original osdblk.c */
5756MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5757
Ilya Dryomov90da2582013-12-13 15:28:56 +02005758MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005759MODULE_LICENSE("GPL");