Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 1 | #include "ceph_debug.h" |
| 2 | |
| 3 | #include <linux/sched.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 4 | #include <linux/slab.h> |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 5 | #include <linux/file.h> |
| 6 | #include <linux/namei.h> |
| 7 | #include <linux/writeback.h> |
| 8 | |
| 9 | #include "super.h" |
| 10 | #include "mds_client.h" |
| 11 | |
| 12 | /* |
| 13 | * Ceph file operations |
| 14 | * |
| 15 | * Implement basic open/close functionality, and implement |
| 16 | * read/write. |
| 17 | * |
| 18 | * We implement three modes of file I/O: |
| 19 | * - buffered uses the generic_file_aio_{read,write} helpers |
| 20 | * |
| 21 | * - synchronous is used when there is multi-client read/write |
| 22 | * sharing, avoids the page cache, and synchronously waits for an |
| 23 | * ack from the OSD. |
| 24 | * |
| 25 | * - direct io takes the variant of the sync path that references |
| 26 | * user pages directly. |
| 27 | * |
| 28 | * fsync() flushes and waits on dirty pages, but just queues metadata |
| 29 | * for writeback: since the MDS can recover size and mtime there is no |
| 30 | * need to wait for MDS acknowledgement. |
| 31 | */ |
| 32 | |
| 33 | |
| 34 | /* |
| 35 | * Prepare an open request. Preallocate ceph_cap to avoid an |
| 36 | * inopportune ENOMEM later. |
| 37 | */ |
| 38 | static struct ceph_mds_request * |
| 39 | prepare_open_request(struct super_block *sb, int flags, int create_mode) |
| 40 | { |
| 41 | struct ceph_client *client = ceph_sb_to_client(sb); |
| 42 | struct ceph_mds_client *mdsc = &client->mdsc; |
| 43 | struct ceph_mds_request *req; |
| 44 | int want_auth = USE_ANY_MDS; |
| 45 | int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; |
| 46 | |
| 47 | if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) |
| 48 | want_auth = USE_AUTH_MDS; |
| 49 | |
| 50 | req = ceph_mdsc_create_request(mdsc, op, want_auth); |
| 51 | if (IS_ERR(req)) |
| 52 | goto out; |
| 53 | req->r_fmode = ceph_flags_to_mode(flags); |
| 54 | req->r_args.open.flags = cpu_to_le32(flags); |
| 55 | req->r_args.open.mode = cpu_to_le32(create_mode); |
Sage Weil | 6a18be1 | 2009-11-04 11:40:05 -0800 | [diff] [blame] | 56 | req->r_args.open.preferred = cpu_to_le32(-1); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 57 | out: |
| 58 | return req; |
| 59 | } |
| 60 | |
| 61 | /* |
| 62 | * initialize private struct file data. |
| 63 | * if we fail, clean up by dropping fmode reference on the ceph_inode |
| 64 | */ |
| 65 | static int ceph_init_file(struct inode *inode, struct file *file, int fmode) |
| 66 | { |
| 67 | struct ceph_file_info *cf; |
| 68 | int ret = 0; |
| 69 | |
| 70 | switch (inode->i_mode & S_IFMT) { |
| 71 | case S_IFREG: |
| 72 | case S_IFDIR: |
| 73 | dout("init_file %p %p 0%o (regular)\n", inode, file, |
| 74 | inode->i_mode); |
| 75 | cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO); |
| 76 | if (cf == NULL) { |
| 77 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ |
| 78 | return -ENOMEM; |
| 79 | } |
| 80 | cf->fmode = fmode; |
| 81 | cf->next_offset = 2; |
| 82 | file->private_data = cf; |
| 83 | BUG_ON(inode->i_fop->release != ceph_release); |
| 84 | break; |
| 85 | |
| 86 | case S_IFLNK: |
| 87 | dout("init_file %p %p 0%o (symlink)\n", inode, file, |
| 88 | inode->i_mode); |
| 89 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ |
| 90 | break; |
| 91 | |
| 92 | default: |
| 93 | dout("init_file %p %p 0%o (special)\n", inode, file, |
| 94 | inode->i_mode); |
| 95 | /* |
| 96 | * we need to drop the open ref now, since we don't |
| 97 | * have .release set to ceph_release. |
| 98 | */ |
| 99 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ |
| 100 | BUG_ON(inode->i_fop->release == ceph_release); |
| 101 | |
| 102 | /* call the proper open fop */ |
| 103 | ret = inode->i_fop->open(inode, file); |
| 104 | } |
| 105 | return ret; |
| 106 | } |
| 107 | |
| 108 | /* |
| 109 | * If the filp already has private_data, that means the file was |
| 110 | * already opened by intent during lookup, and we do nothing. |
| 111 | * |
| 112 | * If we already have the requisite capabilities, we can satisfy |
| 113 | * the open request locally (no need to request new caps from the |
| 114 | * MDS). We do, however, need to inform the MDS (asynchronously) |
| 115 | * if our wanted caps set expands. |
| 116 | */ |
| 117 | int ceph_open(struct inode *inode, struct file *file) |
| 118 | { |
| 119 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 120 | struct ceph_client *client = ceph_sb_to_client(inode->i_sb); |
| 121 | struct ceph_mds_client *mdsc = &client->mdsc; |
| 122 | struct ceph_mds_request *req; |
| 123 | struct ceph_file_info *cf = file->private_data; |
| 124 | struct inode *parent_inode = file->f_dentry->d_parent->d_inode; |
| 125 | int err; |
| 126 | int flags, fmode, wanted; |
| 127 | |
| 128 | if (cf) { |
| 129 | dout("open file %p is already opened\n", file); |
| 130 | return 0; |
| 131 | } |
| 132 | |
| 133 | /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ |
| 134 | flags = file->f_flags & ~(O_CREAT|O_EXCL); |
| 135 | if (S_ISDIR(inode->i_mode)) |
| 136 | flags = O_DIRECTORY; /* mds likes to know */ |
| 137 | |
| 138 | dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, |
| 139 | ceph_vinop(inode), file, flags, file->f_flags); |
| 140 | fmode = ceph_flags_to_mode(flags); |
| 141 | wanted = ceph_caps_for_mode(fmode); |
| 142 | |
| 143 | /* snapped files are read-only */ |
| 144 | if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) |
| 145 | return -EROFS; |
| 146 | |
| 147 | /* trivially open snapdir */ |
| 148 | if (ceph_snap(inode) == CEPH_SNAPDIR) { |
| 149 | spin_lock(&inode->i_lock); |
| 150 | __ceph_get_fmode(ci, fmode); |
| 151 | spin_unlock(&inode->i_lock); |
| 152 | return ceph_init_file(inode, file, fmode); |
| 153 | } |
| 154 | |
| 155 | /* |
| 156 | * No need to block if we have any caps. Update wanted set |
| 157 | * asynchronously. |
| 158 | */ |
| 159 | spin_lock(&inode->i_lock); |
| 160 | if (__ceph_is_any_real_caps(ci)) { |
| 161 | int mds_wanted = __ceph_caps_mds_wanted(ci); |
| 162 | int issued = __ceph_caps_issued(ci, NULL); |
| 163 | |
| 164 | dout("open %p fmode %d want %s issued %s using existing\n", |
| 165 | inode, fmode, ceph_cap_string(wanted), |
| 166 | ceph_cap_string(issued)); |
| 167 | __ceph_get_fmode(ci, fmode); |
| 168 | spin_unlock(&inode->i_lock); |
| 169 | |
| 170 | /* adjust wanted? */ |
| 171 | if ((issued & wanted) != wanted && |
| 172 | (mds_wanted & wanted) != wanted && |
| 173 | ceph_snap(inode) != CEPH_SNAPDIR) |
| 174 | ceph_check_caps(ci, 0, NULL); |
| 175 | |
| 176 | return ceph_init_file(inode, file, fmode); |
| 177 | } else if (ceph_snap(inode) != CEPH_NOSNAP && |
| 178 | (ci->i_snap_caps & wanted) == wanted) { |
| 179 | __ceph_get_fmode(ci, fmode); |
| 180 | spin_unlock(&inode->i_lock); |
| 181 | return ceph_init_file(inode, file, fmode); |
| 182 | } |
| 183 | spin_unlock(&inode->i_lock); |
| 184 | |
| 185 | dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); |
| 186 | req = prepare_open_request(inode->i_sb, flags, 0); |
| 187 | if (IS_ERR(req)) { |
| 188 | err = PTR_ERR(req); |
| 189 | goto out; |
| 190 | } |
| 191 | req->r_inode = igrab(inode); |
| 192 | req->r_num_caps = 1; |
| 193 | err = ceph_mdsc_do_request(mdsc, parent_inode, req); |
| 194 | if (!err) |
| 195 | err = ceph_init_file(inode, file, req->r_fmode); |
| 196 | ceph_mdsc_put_request(req); |
| 197 | dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); |
| 198 | out: |
| 199 | return err; |
| 200 | } |
| 201 | |
| 202 | |
| 203 | /* |
| 204 | * Do a lookup + open with a single request. |
| 205 | * |
| 206 | * If this succeeds, but some subsequent check in the vfs |
| 207 | * may_open() fails, the struct *file gets cleaned up (i.e. |
| 208 | * ceph_release gets called). So fear not! |
| 209 | */ |
| 210 | /* |
| 211 | * flags |
| 212 | * path_lookup_open -> LOOKUP_OPEN |
| 213 | * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE |
| 214 | */ |
| 215 | struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry, |
| 216 | struct nameidata *nd, int mode, |
| 217 | int locked_dir) |
| 218 | { |
| 219 | struct ceph_client *client = ceph_sb_to_client(dir->i_sb); |
| 220 | struct ceph_mds_client *mdsc = &client->mdsc; |
| 221 | struct file *file = nd->intent.open.file; |
| 222 | struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry); |
| 223 | struct ceph_mds_request *req; |
| 224 | int err; |
| 225 | int flags = nd->intent.open.flags - 1; /* silly vfs! */ |
| 226 | |
| 227 | dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n", |
| 228 | dentry, dentry->d_name.len, dentry->d_name.name, flags, mode); |
| 229 | |
| 230 | /* do the open */ |
| 231 | req = prepare_open_request(dir->i_sb, flags, mode); |
| 232 | if (IS_ERR(req)) |
Julia Lawall | 7e34bc5 | 2010-05-22 12:01:14 +0200 | [diff] [blame] | 233 | return ERR_CAST(req); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 234 | req->r_dentry = dget(dentry); |
| 235 | req->r_num_caps = 2; |
| 236 | if (flags & O_CREAT) { |
| 237 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; |
| 238 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; |
| 239 | } |
| 240 | req->r_locked_dir = dir; /* caller holds dir->i_mutex */ |
| 241 | err = ceph_mdsc_do_request(mdsc, parent_inode, req); |
| 242 | dentry = ceph_finish_lookup(req, dentry, err); |
| 243 | if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry) |
| 244 | err = ceph_handle_notrace_create(dir, dentry); |
| 245 | if (!err) |
| 246 | err = ceph_init_file(req->r_dentry->d_inode, file, |
| 247 | req->r_fmode); |
| 248 | ceph_mdsc_put_request(req); |
| 249 | dout("ceph_lookup_open result=%p\n", dentry); |
| 250 | return dentry; |
| 251 | } |
| 252 | |
| 253 | int ceph_release(struct inode *inode, struct file *file) |
| 254 | { |
| 255 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 256 | struct ceph_file_info *cf = file->private_data; |
| 257 | |
| 258 | dout("release inode %p file %p\n", inode, file); |
| 259 | ceph_put_fmode(ci, cf->fmode); |
| 260 | if (cf->last_readdir) |
| 261 | ceph_mdsc_put_request(cf->last_readdir); |
| 262 | kfree(cf->last_name); |
| 263 | kfree(cf->dir_info); |
| 264 | dput(cf->dentry); |
| 265 | kmem_cache_free(ceph_file_cachep, cf); |
Sage Weil | 195d3ce | 2010-03-01 09:57:54 -0800 | [diff] [blame] | 266 | |
| 267 | /* wake up anyone waiting for caps on this inode */ |
Yehuda Sadeh | 03066f2 | 2010-07-27 13:11:08 -0700 | [diff] [blame] | 268 | wake_up_all(&ci->i_cap_wq); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 269 | return 0; |
| 270 | } |
| 271 | |
| 272 | /* |
| 273 | * build a vector of user pages |
| 274 | */ |
| 275 | static struct page **get_direct_page_vector(const char __user *data, |
| 276 | int num_pages, |
| 277 | loff_t off, size_t len) |
| 278 | { |
| 279 | struct page **pages; |
| 280 | int rc; |
| 281 | |
| 282 | pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); |
| 283 | if (!pages) |
| 284 | return ERR_PTR(-ENOMEM); |
| 285 | |
| 286 | down_read(¤t->mm->mmap_sem); |
| 287 | rc = get_user_pages(current, current->mm, (unsigned long)data, |
| 288 | num_pages, 0, 0, pages, NULL); |
| 289 | up_read(¤t->mm->mmap_sem); |
| 290 | if (rc < 0) |
| 291 | goto fail; |
| 292 | return pages; |
| 293 | |
| 294 | fail: |
| 295 | kfree(pages); |
| 296 | return ERR_PTR(rc); |
| 297 | } |
| 298 | |
| 299 | static void put_page_vector(struct page **pages, int num_pages) |
| 300 | { |
| 301 | int i; |
| 302 | |
| 303 | for (i = 0; i < num_pages; i++) |
| 304 | put_page(pages[i]); |
| 305 | kfree(pages); |
| 306 | } |
| 307 | |
| 308 | void ceph_release_page_vector(struct page **pages, int num_pages) |
| 309 | { |
| 310 | int i; |
| 311 | |
| 312 | for (i = 0; i < num_pages; i++) |
| 313 | __free_pages(pages[i], 0); |
| 314 | kfree(pages); |
| 315 | } |
| 316 | |
| 317 | /* |
| 318 | * allocate a vector new pages |
| 319 | */ |
Yehuda Sadeh | cd84db6 | 2010-06-11 16:58:48 -0700 | [diff] [blame] | 320 | static struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags) |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 321 | { |
| 322 | struct page **pages; |
| 323 | int i; |
| 324 | |
Yehuda Sadeh | 34d2376 | 2010-04-06 14:33:58 -0700 | [diff] [blame] | 325 | pages = kmalloc(sizeof(*pages) * num_pages, flags); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 326 | if (!pages) |
| 327 | return ERR_PTR(-ENOMEM); |
| 328 | for (i = 0; i < num_pages; i++) { |
Yehuda Sadeh | 34d2376 | 2010-04-06 14:33:58 -0700 | [diff] [blame] | 329 | pages[i] = __page_cache_alloc(flags); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 330 | if (pages[i] == NULL) { |
| 331 | ceph_release_page_vector(pages, i); |
| 332 | return ERR_PTR(-ENOMEM); |
| 333 | } |
| 334 | } |
| 335 | return pages; |
| 336 | } |
| 337 | |
| 338 | /* |
| 339 | * copy user data into a page vector |
| 340 | */ |
| 341 | static int copy_user_to_page_vector(struct page **pages, |
| 342 | const char __user *data, |
| 343 | loff_t off, size_t len) |
| 344 | { |
| 345 | int i = 0; |
| 346 | int po = off & ~PAGE_CACHE_MASK; |
| 347 | int left = len; |
| 348 | int l, bad; |
| 349 | |
| 350 | while (left > 0) { |
| 351 | l = min_t(int, PAGE_CACHE_SIZE-po, left); |
| 352 | bad = copy_from_user(page_address(pages[i]) + po, data, l); |
| 353 | if (bad == l) |
| 354 | return -EFAULT; |
| 355 | data += l - bad; |
| 356 | left -= l - bad; |
Yehuda Sadeh | 6a4ef48 | 2009-12-31 12:04:58 -0800 | [diff] [blame] | 357 | po += l - bad; |
| 358 | if (po == PAGE_CACHE_SIZE) { |
| 359 | po = 0; |
| 360 | i++; |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 361 | } |
| 362 | } |
| 363 | return len; |
| 364 | } |
| 365 | |
| 366 | /* |
| 367 | * copy user data from a page vector into a user pointer |
| 368 | */ |
| 369 | static int copy_page_vector_to_user(struct page **pages, char __user *data, |
| 370 | loff_t off, size_t len) |
| 371 | { |
| 372 | int i = 0; |
| 373 | int po = off & ~PAGE_CACHE_MASK; |
| 374 | int left = len; |
| 375 | int l, bad; |
| 376 | |
| 377 | while (left > 0) { |
| 378 | l = min_t(int, left, PAGE_CACHE_SIZE-po); |
| 379 | bad = copy_to_user(data, page_address(pages[i]) + po, l); |
| 380 | if (bad == l) |
| 381 | return -EFAULT; |
| 382 | data += l - bad; |
| 383 | left -= l - bad; |
| 384 | if (po) { |
| 385 | po += l - bad; |
| 386 | if (po == PAGE_CACHE_SIZE) |
| 387 | po = 0; |
| 388 | } |
| 389 | i++; |
| 390 | } |
| 391 | return len; |
| 392 | } |
| 393 | |
| 394 | /* |
| 395 | * Zero an extent within a page vector. Offset is relative to the |
| 396 | * start of the first page. |
| 397 | */ |
| 398 | static void zero_page_vector_range(int off, int len, struct page **pages) |
| 399 | { |
| 400 | int i = off >> PAGE_CACHE_SHIFT; |
| 401 | |
Yehuda Sadeh | 972f0d3 | 2010-02-04 13:41:41 -0800 | [diff] [blame] | 402 | off &= ~PAGE_CACHE_MASK; |
| 403 | |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 404 | dout("zero_page_vector_page %u~%u\n", off, len); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 405 | |
| 406 | /* leading partial page? */ |
Yehuda Sadeh | 972f0d3 | 2010-02-04 13:41:41 -0800 | [diff] [blame] | 407 | if (off) { |
| 408 | int end = min((int)PAGE_CACHE_SIZE, off + len); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 409 | dout("zeroing %d %p head from %d\n", i, pages[i], |
Yehuda Sadeh | 972f0d3 | 2010-02-04 13:41:41 -0800 | [diff] [blame] | 410 | (int)off); |
| 411 | zero_user_segment(pages[i], off, end); |
| 412 | len -= (end - off); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 413 | i++; |
| 414 | } |
| 415 | while (len >= PAGE_CACHE_SIZE) { |
Yehuda Sadeh | 29065a5 | 2010-02-09 11:14:41 -0800 | [diff] [blame] | 416 | dout("zeroing %d %p len=%d\n", i, pages[i], len); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 417 | zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 418 | len -= PAGE_CACHE_SIZE; |
| 419 | i++; |
| 420 | } |
| 421 | /* trailing partial page? */ |
| 422 | if (len) { |
| 423 | dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len); |
| 424 | zero_user_segment(pages[i], 0, len); |
| 425 | } |
| 426 | } |
| 427 | |
| 428 | |
| 429 | /* |
| 430 | * Read a range of bytes striped over one or more objects. Iterate over |
| 431 | * objects we stripe over. (That's not atomic, but good enough for now.) |
| 432 | * |
| 433 | * If we get a short result from the OSD, check against i_size; we need to |
| 434 | * only return a short read to the caller if we hit EOF. |
| 435 | */ |
| 436 | static int striped_read(struct inode *inode, |
| 437 | u64 off, u64 len, |
Sage Weil | 6a02658 | 2010-02-09 14:04:02 -0800 | [diff] [blame] | 438 | struct page **pages, int num_pages, |
| 439 | int *checkeof) |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 440 | { |
| 441 | struct ceph_client *client = ceph_inode_to_client(inode); |
| 442 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 443 | u64 pos, this_len; |
Yehuda Sadeh | 972f0d3 | 2010-02-04 13:41:41 -0800 | [diff] [blame] | 444 | int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */ |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 445 | int left, pages_left; |
| 446 | int read; |
| 447 | struct page **page_pos; |
| 448 | int ret; |
| 449 | bool hit_stripe, was_short; |
| 450 | |
| 451 | /* |
| 452 | * we may need to do multiple reads. not atomic, unfortunately. |
| 453 | */ |
| 454 | pos = off; |
| 455 | left = len; |
| 456 | page_pos = pages; |
| 457 | pages_left = num_pages; |
| 458 | read = 0; |
| 459 | |
| 460 | more: |
| 461 | this_len = left; |
| 462 | ret = ceph_osdc_readpages(&client->osdc, ceph_vino(inode), |
| 463 | &ci->i_layout, pos, &this_len, |
| 464 | ci->i_truncate_seq, |
| 465 | ci->i_truncate_size, |
| 466 | page_pos, pages_left); |
| 467 | hit_stripe = this_len < left; |
| 468 | was_short = ret >= 0 && ret < this_len; |
| 469 | if (ret == -ENOENT) |
| 470 | ret = 0; |
| 471 | dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read, |
| 472 | ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); |
| 473 | |
| 474 | if (ret > 0) { |
| 475 | int didpages = |
| 476 | ((pos & ~PAGE_CACHE_MASK) + ret) >> PAGE_CACHE_SHIFT; |
| 477 | |
| 478 | if (read < pos - off) { |
| 479 | dout(" zero gap %llu to %llu\n", off + read, pos); |
| 480 | zero_page_vector_range(page_off + read, |
| 481 | pos - off - read, pages); |
| 482 | } |
| 483 | pos += ret; |
| 484 | read = pos - off; |
| 485 | left -= ret; |
| 486 | page_pos += didpages; |
| 487 | pages_left -= didpages; |
| 488 | |
| 489 | /* hit stripe? */ |
| 490 | if (left && hit_stripe) |
| 491 | goto more; |
| 492 | } |
| 493 | |
| 494 | if (was_short) { |
| 495 | /* was original extent fully inside i_size? */ |
| 496 | if (pos + left <= inode->i_size) { |
| 497 | dout("zero tail\n"); |
| 498 | zero_page_vector_range(page_off + read, len - read, |
| 499 | pages); |
Yehuda Sadeh | 972f0d3 | 2010-02-04 13:41:41 -0800 | [diff] [blame] | 500 | read = len; |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 501 | goto out; |
| 502 | } |
| 503 | |
| 504 | /* check i_size */ |
Sage Weil | 6a02658 | 2010-02-09 14:04:02 -0800 | [diff] [blame] | 505 | *checkeof = 1; |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 506 | } |
| 507 | |
| 508 | out: |
| 509 | if (ret >= 0) |
| 510 | ret = read; |
| 511 | dout("striped_read returns %d\n", ret); |
| 512 | return ret; |
| 513 | } |
| 514 | |
| 515 | /* |
| 516 | * Completely synchronous read and write methods. Direct from __user |
| 517 | * buffer to osd, or directly to user pages (if O_DIRECT). |
| 518 | * |
| 519 | * If the read spans object boundary, just do multiple reads. |
| 520 | */ |
| 521 | static ssize_t ceph_sync_read(struct file *file, char __user *data, |
Sage Weil | 6a02658 | 2010-02-09 14:04:02 -0800 | [diff] [blame] | 522 | unsigned len, loff_t *poff, int *checkeof) |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 523 | { |
| 524 | struct inode *inode = file->f_dentry->d_inode; |
| 525 | struct page **pages; |
| 526 | u64 off = *poff; |
| 527 | int num_pages = calc_pages_for(off, len); |
| 528 | int ret; |
| 529 | |
| 530 | dout("sync_read on file %p %llu~%u %s\n", file, off, len, |
| 531 | (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); |
| 532 | |
| 533 | if (file->f_flags & O_DIRECT) { |
| 534 | pages = get_direct_page_vector(data, num_pages, off, len); |
| 535 | |
| 536 | /* |
| 537 | * flush any page cache pages in this range. this |
| 538 | * will make concurrent normal and O_DIRECT io slow, |
| 539 | * but it will at least behave sensibly when they are |
| 540 | * in sequence. |
| 541 | */ |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 542 | } else { |
Yehuda Sadeh | 34d2376 | 2010-04-06 14:33:58 -0700 | [diff] [blame] | 543 | pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 544 | } |
| 545 | if (IS_ERR(pages)) |
| 546 | return PTR_ERR(pages); |
| 547 | |
Yehuda Sadeh | 29065a5 | 2010-02-09 11:14:41 -0800 | [diff] [blame] | 548 | ret = filemap_write_and_wait(inode->i_mapping); |
| 549 | if (ret < 0) |
| 550 | goto done; |
| 551 | |
Sage Weil | 6a02658 | 2010-02-09 14:04:02 -0800 | [diff] [blame] | 552 | ret = striped_read(inode, off, len, pages, num_pages, checkeof); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 553 | |
| 554 | if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) |
| 555 | ret = copy_page_vector_to_user(pages, data, off, ret); |
| 556 | if (ret >= 0) |
| 557 | *poff = off + ret; |
| 558 | |
Yehuda Sadeh | 29065a5 | 2010-02-09 11:14:41 -0800 | [diff] [blame] | 559 | done: |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 560 | if (file->f_flags & O_DIRECT) |
| 561 | put_page_vector(pages, num_pages); |
| 562 | else |
| 563 | ceph_release_page_vector(pages, num_pages); |
| 564 | dout("sync_read result %d\n", ret); |
| 565 | return ret; |
| 566 | } |
| 567 | |
| 568 | /* |
| 569 | * Write commit callback, called if we requested both an ACK and |
| 570 | * ONDISK commit reply from the OSD. |
| 571 | */ |
| 572 | static void sync_write_commit(struct ceph_osd_request *req, |
| 573 | struct ceph_msg *msg) |
| 574 | { |
| 575 | struct ceph_inode_info *ci = ceph_inode(req->r_inode); |
| 576 | |
| 577 | dout("sync_write_commit %p tid %llu\n", req, req->r_tid); |
| 578 | spin_lock(&ci->i_unsafe_lock); |
| 579 | list_del_init(&req->r_unsafe_item); |
| 580 | spin_unlock(&ci->i_unsafe_lock); |
| 581 | ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); |
| 582 | } |
| 583 | |
| 584 | /* |
| 585 | * Synchronous write, straight from __user pointer or user pages (if |
| 586 | * O_DIRECT). |
| 587 | * |
| 588 | * If write spans object boundary, just do multiple writes. (For a |
| 589 | * correct atomic write, we should e.g. take write locks on all |
| 590 | * objects, rollback on failure, etc.) |
| 591 | */ |
| 592 | static ssize_t ceph_sync_write(struct file *file, const char __user *data, |
| 593 | size_t left, loff_t *offset) |
| 594 | { |
| 595 | struct inode *inode = file->f_dentry->d_inode; |
| 596 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 597 | struct ceph_client *client = ceph_inode_to_client(inode); |
| 598 | struct ceph_osd_request *req; |
| 599 | struct page **pages; |
| 600 | int num_pages; |
| 601 | long long unsigned pos; |
| 602 | u64 len; |
| 603 | int written = 0; |
| 604 | int flags; |
| 605 | int do_sync = 0; |
| 606 | int check_caps = 0; |
| 607 | int ret; |
| 608 | struct timespec mtime = CURRENT_TIME; |
| 609 | |
| 610 | if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP) |
| 611 | return -EROFS; |
| 612 | |
| 613 | dout("sync_write on file %p %lld~%u %s\n", file, *offset, |
| 614 | (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); |
| 615 | |
| 616 | if (file->f_flags & O_APPEND) |
| 617 | pos = i_size_read(inode); |
| 618 | else |
| 619 | pos = *offset; |
| 620 | |
Yehuda Sadeh | 29065a5 | 2010-02-09 11:14:41 -0800 | [diff] [blame] | 621 | ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left); |
| 622 | if (ret < 0) |
| 623 | return ret; |
| 624 | |
| 625 | ret = invalidate_inode_pages2_range(inode->i_mapping, |
| 626 | pos >> PAGE_CACHE_SHIFT, |
| 627 | (pos + left) >> PAGE_CACHE_SHIFT); |
| 628 | if (ret < 0) |
| 629 | dout("invalidate_inode_pages2_range returned %d\n", ret); |
| 630 | |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 631 | flags = CEPH_OSD_FLAG_ORDERSNAP | |
| 632 | CEPH_OSD_FLAG_ONDISK | |
| 633 | CEPH_OSD_FLAG_WRITE; |
| 634 | if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0) |
| 635 | flags |= CEPH_OSD_FLAG_ACK; |
| 636 | else |
| 637 | do_sync = 1; |
| 638 | |
| 639 | /* |
| 640 | * we may need to do multiple writes here if we span an object |
| 641 | * boundary. this isn't atomic, unfortunately. :( |
| 642 | */ |
| 643 | more: |
| 644 | len = left; |
| 645 | req = ceph_osdc_new_request(&client->osdc, &ci->i_layout, |
| 646 | ceph_vino(inode), pos, &len, |
| 647 | CEPH_OSD_OP_WRITE, flags, |
| 648 | ci->i_snap_realm->cached_context, |
| 649 | do_sync, |
| 650 | ci->i_truncate_seq, ci->i_truncate_size, |
| 651 | &mtime, false, 2); |
Sage Weil | a79832f | 2010-04-01 16:06:19 -0700 | [diff] [blame] | 652 | if (!req) |
| 653 | return -ENOMEM; |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 654 | |
| 655 | num_pages = calc_pages_for(pos, len); |
| 656 | |
| 657 | if (file->f_flags & O_DIRECT) { |
| 658 | pages = get_direct_page_vector(data, num_pages, pos, len); |
| 659 | if (IS_ERR(pages)) { |
| 660 | ret = PTR_ERR(pages); |
| 661 | goto out; |
| 662 | } |
| 663 | |
| 664 | /* |
| 665 | * throw out any page cache pages in this range. this |
| 666 | * may block. |
| 667 | */ |
Sage Weil | 213c99e | 2010-08-03 10:25:11 -0700 | [diff] [blame] | 668 | truncate_inode_pages_range(inode->i_mapping, pos, |
Sage Weil | 5c6a2cd | 2010-04-22 13:48:59 -0700 | [diff] [blame] | 669 | (pos+len) | (PAGE_CACHE_SIZE-1)); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 670 | } else { |
Yehuda Sadeh | 34d2376 | 2010-04-06 14:33:58 -0700 | [diff] [blame] | 671 | pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 672 | if (IS_ERR(pages)) { |
| 673 | ret = PTR_ERR(pages); |
| 674 | goto out; |
| 675 | } |
| 676 | ret = copy_user_to_page_vector(pages, data, pos, len); |
| 677 | if (ret < 0) { |
| 678 | ceph_release_page_vector(pages, num_pages); |
| 679 | goto out; |
| 680 | } |
| 681 | |
| 682 | if ((file->f_flags & O_SYNC) == 0) { |
| 683 | /* get a second commit callback */ |
| 684 | req->r_safe_callback = sync_write_commit; |
| 685 | req->r_own_pages = 1; |
| 686 | } |
| 687 | } |
| 688 | req->r_pages = pages; |
| 689 | req->r_num_pages = num_pages; |
| 690 | req->r_inode = inode; |
| 691 | |
| 692 | ret = ceph_osdc_start_request(&client->osdc, req, false); |
| 693 | if (!ret) { |
| 694 | if (req->r_safe_callback) { |
| 695 | /* |
| 696 | * Add to inode unsafe list only after we |
| 697 | * start_request so that a tid has been assigned. |
| 698 | */ |
| 699 | spin_lock(&ci->i_unsafe_lock); |
| 700 | list_add(&ci->i_unsafe_writes, &req->r_unsafe_item); |
| 701 | spin_unlock(&ci->i_unsafe_lock); |
| 702 | ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); |
| 703 | } |
| 704 | ret = ceph_osdc_wait_request(&client->osdc, req); |
| 705 | } |
| 706 | |
| 707 | if (file->f_flags & O_DIRECT) |
| 708 | put_page_vector(pages, num_pages); |
| 709 | else if (file->f_flags & O_SYNC) |
| 710 | ceph_release_page_vector(pages, num_pages); |
| 711 | |
| 712 | out: |
| 713 | ceph_osdc_put_request(req); |
| 714 | if (ret == 0) { |
| 715 | pos += len; |
| 716 | written += len; |
| 717 | left -= len; |
| 718 | if (left) |
| 719 | goto more; |
| 720 | |
| 721 | ret = written; |
| 722 | *offset = pos; |
| 723 | if (pos > i_size_read(inode)) |
| 724 | check_caps = ceph_inode_set_size(inode, pos); |
| 725 | if (check_caps) |
| 726 | ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, |
| 727 | NULL); |
| 728 | } |
| 729 | return ret; |
| 730 | } |
| 731 | |
| 732 | /* |
| 733 | * Wrap generic_file_aio_read with checks for cap bits on the inode. |
| 734 | * Atomically grab references, so that those bits are not released |
| 735 | * back to the MDS mid-read. |
| 736 | * |
| 737 | * Hmm, the sync read case isn't actually async... should it be? |
| 738 | */ |
| 739 | static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov, |
| 740 | unsigned long nr_segs, loff_t pos) |
| 741 | { |
| 742 | struct file *filp = iocb->ki_filp; |
Sage Weil | 2962507ca | 2010-05-27 10:40:43 -0700 | [diff] [blame] | 743 | struct ceph_file_info *fi = filp->private_data; |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 744 | loff_t *ppos = &iocb->ki_pos; |
| 745 | size_t len = iov->iov_len; |
| 746 | struct inode *inode = filp->f_dentry->d_inode; |
| 747 | struct ceph_inode_info *ci = ceph_inode(inode); |
Yehuda Sadeh | cd84db6 | 2010-06-11 16:58:48 -0700 | [diff] [blame] | 748 | void __user *base = iov->iov_base; |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 749 | ssize_t ret; |
Sage Weil | 2962507ca | 2010-05-27 10:40:43 -0700 | [diff] [blame] | 750 | int want, got = 0; |
Sage Weil | 6a02658 | 2010-02-09 14:04:02 -0800 | [diff] [blame] | 751 | int checkeof = 0, read = 0; |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 752 | |
| 753 | dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", |
| 754 | inode, ceph_vinop(inode), pos, (unsigned)len, inode); |
Sage Weil | 6a02658 | 2010-02-09 14:04:02 -0800 | [diff] [blame] | 755 | again: |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 756 | __ceph_do_pending_vmtruncate(inode); |
Sage Weil | 2962507ca | 2010-05-27 10:40:43 -0700 | [diff] [blame] | 757 | if (fi->fmode & CEPH_FILE_MODE_LAZY) |
| 758 | want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; |
| 759 | else |
| 760 | want = CEPH_CAP_FILE_CACHE; |
| 761 | ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 762 | if (ret < 0) |
| 763 | goto out; |
| 764 | dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", |
| 765 | inode, ceph_vinop(inode), pos, (unsigned)len, |
| 766 | ceph_cap_string(got)); |
| 767 | |
Sage Weil | 2962507ca | 2010-05-27 10:40:43 -0700 | [diff] [blame] | 768 | if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 769 | (iocb->ki_filp->f_flags & O_DIRECT) || |
| 770 | (inode->i_sb->s_flags & MS_SYNCHRONOUS)) |
| 771 | /* hmm, this isn't really async... */ |
Sage Weil | 6a02658 | 2010-02-09 14:04:02 -0800 | [diff] [blame] | 772 | ret = ceph_sync_read(filp, base, len, ppos, &checkeof); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 773 | else |
| 774 | ret = generic_file_aio_read(iocb, iov, nr_segs, pos); |
| 775 | |
| 776 | out: |
| 777 | dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", |
| 778 | inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); |
| 779 | ceph_put_cap_refs(ci, got); |
Sage Weil | 6a02658 | 2010-02-09 14:04:02 -0800 | [diff] [blame] | 780 | |
| 781 | if (checkeof && ret >= 0) { |
| 782 | int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); |
| 783 | |
| 784 | /* hit EOF or hole? */ |
| 785 | if (statret == 0 && *ppos < inode->i_size) { |
| 786 | dout("aio_read sync_read hit hole, reading more\n"); |
| 787 | read += ret; |
| 788 | base += ret; |
| 789 | len -= ret; |
| 790 | checkeof = 0; |
| 791 | goto again; |
| 792 | } |
| 793 | } |
| 794 | if (ret >= 0) |
| 795 | ret += read; |
| 796 | |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 797 | return ret; |
| 798 | } |
| 799 | |
| 800 | /* |
| 801 | * Take cap references to avoid releasing caps to MDS mid-write. |
| 802 | * |
| 803 | * If we are synchronous, and write with an old snap context, the OSD |
| 804 | * may return EOLDSNAPC. In that case, retry the write.. _after_ |
| 805 | * dropping our cap refs and allowing the pending snap to logically |
| 806 | * complete _before_ this write occurs. |
| 807 | * |
| 808 | * If we are near ENOSPC, write synchronously. |
| 809 | */ |
| 810 | static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, |
| 811 | unsigned long nr_segs, loff_t pos) |
| 812 | { |
| 813 | struct file *file = iocb->ki_filp; |
Sage Weil | 33caad3 | 2010-05-26 14:31:27 -0700 | [diff] [blame] | 814 | struct ceph_file_info *fi = file->private_data; |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 815 | struct inode *inode = file->f_dentry->d_inode; |
| 816 | struct ceph_inode_info *ci = ceph_inode(inode); |
Cheng Renquan | 640ef79 | 2010-03-26 17:40:33 +0800 | [diff] [blame] | 817 | struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->osdc; |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 818 | loff_t endoff = pos + iov->iov_len; |
Sage Weil | 33caad3 | 2010-05-26 14:31:27 -0700 | [diff] [blame] | 819 | int want, got = 0; |
Yehuda Sadeh | 88d892a | 2010-02-23 18:16:23 +0000 | [diff] [blame] | 820 | int ret, err; |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 821 | |
| 822 | if (ceph_snap(inode) != CEPH_NOSNAP) |
| 823 | return -EROFS; |
| 824 | |
| 825 | retry_snap: |
| 826 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) |
| 827 | return -ENOSPC; |
| 828 | __ceph_do_pending_vmtruncate(inode); |
| 829 | dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n", |
| 830 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, |
| 831 | inode->i_size); |
Sage Weil | 33caad3 | 2010-05-26 14:31:27 -0700 | [diff] [blame] | 832 | if (fi->fmode & CEPH_FILE_MODE_LAZY) |
| 833 | want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; |
| 834 | else |
| 835 | want = CEPH_CAP_FILE_BUFFER; |
| 836 | ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff); |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 837 | if (ret < 0) |
| 838 | goto out; |
| 839 | |
| 840 | dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n", |
| 841 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, |
| 842 | ceph_cap_string(got)); |
| 843 | |
Sage Weil | 33caad3 | 2010-05-26 14:31:27 -0700 | [diff] [blame] | 844 | if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 845 | (iocb->ki_filp->f_flags & O_DIRECT) || |
| 846 | (inode->i_sb->s_flags & MS_SYNCHRONOUS)) { |
| 847 | ret = ceph_sync_write(file, iov->iov_base, iov->iov_len, |
| 848 | &iocb->ki_pos); |
| 849 | } else { |
| 850 | ret = generic_file_aio_write(iocb, iov, nr_segs, pos); |
| 851 | |
| 852 | if ((ret >= 0 || ret == -EIOCBQUEUED) && |
| 853 | ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) |
Yehuda Sadeh | 88d892a | 2010-02-23 18:16:23 +0000 | [diff] [blame] | 854 | || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { |
Christoph Hellwig | 8018ab0 | 2010-03-22 17:32:25 +0100 | [diff] [blame] | 855 | err = vfs_fsync_range(file, pos, pos + ret - 1, 1); |
Yehuda Sadeh | 88d892a | 2010-02-23 18:16:23 +0000 | [diff] [blame] | 856 | if (err < 0) |
| 857 | ret = err; |
| 858 | } |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 859 | } |
| 860 | if (ret >= 0) { |
| 861 | spin_lock(&inode->i_lock); |
| 862 | __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); |
| 863 | spin_unlock(&inode->i_lock); |
| 864 | } |
| 865 | |
| 866 | out: |
| 867 | dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", |
| 868 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, |
| 869 | ceph_cap_string(got)); |
| 870 | ceph_put_cap_refs(ci, got); |
| 871 | |
| 872 | if (ret == -EOLDSNAPC) { |
| 873 | dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n", |
| 874 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len); |
| 875 | goto retry_snap; |
| 876 | } |
| 877 | |
| 878 | return ret; |
| 879 | } |
| 880 | |
| 881 | /* |
| 882 | * llseek. be sure to verify file size on SEEK_END. |
| 883 | */ |
| 884 | static loff_t ceph_llseek(struct file *file, loff_t offset, int origin) |
| 885 | { |
| 886 | struct inode *inode = file->f_mapping->host; |
| 887 | int ret; |
| 888 | |
| 889 | mutex_lock(&inode->i_mutex); |
| 890 | __ceph_do_pending_vmtruncate(inode); |
| 891 | switch (origin) { |
| 892 | case SEEK_END: |
| 893 | ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); |
| 894 | if (ret < 0) { |
| 895 | offset = ret; |
| 896 | goto out; |
| 897 | } |
| 898 | offset += inode->i_size; |
| 899 | break; |
| 900 | case SEEK_CUR: |
| 901 | /* |
| 902 | * Here we special-case the lseek(fd, 0, SEEK_CUR) |
| 903 | * position-querying operation. Avoid rewriting the "same" |
| 904 | * f_pos value back to the file because a concurrent read(), |
| 905 | * write() or lseek() might have altered it |
| 906 | */ |
| 907 | if (offset == 0) { |
| 908 | offset = file->f_pos; |
| 909 | goto out; |
| 910 | } |
| 911 | offset += file->f_pos; |
| 912 | break; |
| 913 | } |
| 914 | |
| 915 | if (offset < 0 || offset > inode->i_sb->s_maxbytes) { |
| 916 | offset = -EINVAL; |
| 917 | goto out; |
| 918 | } |
| 919 | |
| 920 | /* Special lock needed here? */ |
| 921 | if (offset != file->f_pos) { |
| 922 | file->f_pos = offset; |
| 923 | file->f_version = 0; |
| 924 | } |
| 925 | |
| 926 | out: |
| 927 | mutex_unlock(&inode->i_mutex); |
| 928 | return offset; |
| 929 | } |
| 930 | |
| 931 | const struct file_operations ceph_file_fops = { |
| 932 | .open = ceph_open, |
| 933 | .release = ceph_release, |
| 934 | .llseek = ceph_llseek, |
| 935 | .read = do_sync_read, |
| 936 | .write = do_sync_write, |
| 937 | .aio_read = ceph_aio_read, |
| 938 | .aio_write = ceph_aio_write, |
| 939 | .mmap = ceph_mmap, |
| 940 | .fsync = ceph_fsync, |
Greg Farnum | 40819f6 | 2010-08-02 15:34:23 -0700 | [diff] [blame] | 941 | .lock = ceph_lock, |
| 942 | .flock = ceph_flock, |
Sage Weil | 124e68e | 2009-10-06 11:31:08 -0700 | [diff] [blame] | 943 | .splice_read = generic_file_splice_read, |
| 944 | .splice_write = generic_file_splice_write, |
| 945 | .unlocked_ioctl = ceph_ioctl, |
| 946 | .compat_ioctl = ceph_ioctl, |
| 947 | }; |
| 948 | |