blob: cb003c8ee1f6d9f839bd78f2a3c182086d629645 [file] [log] [blame]
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -07001/*
2 * dir.c - NILFS directory entry operations
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Modified for NILFS by Amagai Yoshiji <amagai@osrg.net>
21 */
22/*
23 * linux/fs/ext2/dir.c
24 *
25 * Copyright (C) 1992, 1993, 1994, 1995
26 * Remy Card (card@masi.ibp.fr)
27 * Laboratoire MASI - Institut Blaise Pascal
28 * Universite Pierre et Marie Curie (Paris VI)
29 *
30 * from
31 *
32 * linux/fs/minix/dir.c
33 *
34 * Copyright (C) 1991, 1992 Linus Torvalds
35 *
36 * ext2 directory handling functions
37 *
38 * Big-endian to little-endian byte-swapping/bitmaps by
39 * David S. Miller (davem@caip.rutgers.edu), 1995
40 *
41 * All code that works with directory layout had been switched to pagecache
42 * and moved here. AV
43 */
44
45#include <linux/pagemap.h>
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -070046#include "nilfs.h"
47#include "page.h"
48
49/*
50 * nilfs uses block-sized chunks. Arguably, sector-sized ones would be
51 * more robust, but we have what we have
52 */
53static inline unsigned nilfs_chunk_size(struct inode *inode)
54{
55 return inode->i_sb->s_blocksize;
56}
57
58static inline void nilfs_put_page(struct page *page)
59{
60 kunmap(page);
61 page_cache_release(page);
62}
63
64static inline unsigned long dir_pages(struct inode *inode)
65{
66 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
67}
68
69/*
70 * Return the offset into page `page_nr' of the last valid
71 * byte in that page, plus one.
72 */
73static unsigned nilfs_last_byte(struct inode *inode, unsigned long page_nr)
74{
75 unsigned last_byte = inode->i_size;
76
77 last_byte -= page_nr << PAGE_CACHE_SHIFT;
78 if (last_byte > PAGE_CACHE_SIZE)
79 last_byte = PAGE_CACHE_SIZE;
80 return last_byte;
81}
82
Christoph Hellwigf4e420d2010-06-04 11:29:56 +020083static int nilfs_prepare_chunk(struct page *page, unsigned from, unsigned to)
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -070084{
85 loff_t pos = page_offset(page) + from;
Christoph Hellwig6e1db882010-06-04 11:29:57 +020086 return __block_write_begin(page, pos, to - from, nilfs_get_block);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -070087}
88
Jiro SEKIBA2093abf2009-11-27 19:41:11 +090089static void nilfs_commit_chunk(struct page *page,
90 struct address_space *mapping,
91 unsigned from, unsigned to)
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -070092{
93 struct inode *dir = mapping->host;
94 struct nilfs_sb_info *sbi = NILFS_SB(dir->i_sb);
95 loff_t pos = page_offset(page) + from;
96 unsigned len = to - from;
97 unsigned nr_dirty, copied;
98 int err;
99
100 nr_dirty = nilfs_page_count_clean_buffers(page, from, to);
101 copied = block_write_end(NULL, mapping, pos, len, len, page, NULL);
Jiro SEKIBA58d55472009-11-27 19:41:12 +0900102 if (pos + copied > dir->i_size)
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700103 i_size_write(dir, pos + copied);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700104 if (IS_DIRSYNC(dir))
105 nilfs_set_transaction_flag(NILFS_TI_SYNC);
106 err = nilfs_set_file_dirty(sbi, dir, nr_dirty);
Jiro SEKIBA2093abf2009-11-27 19:41:11 +0900107 WARN_ON(err); /* do not happen */
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700108 unlock_page(page);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700109}
110
111static void nilfs_check_page(struct page *page)
112{
113 struct inode *dir = page->mapping->host;
114 struct super_block *sb = dir->i_sb;
115 unsigned chunk_size = nilfs_chunk_size(dir);
116 char *kaddr = page_address(page);
117 unsigned offs, rec_len;
118 unsigned limit = PAGE_CACHE_SIZE;
119 struct nilfs_dir_entry *p;
120 char *error;
121
122 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
123 limit = dir->i_size & ~PAGE_CACHE_MASK;
124 if (limit & (chunk_size - 1))
125 goto Ebadsize;
126 if (!limit)
127 goto out;
128 }
129 for (offs = 0; offs <= limit - NILFS_DIR_REC_LEN(1); offs += rec_len) {
130 p = (struct nilfs_dir_entry *)(kaddr + offs);
Ryusuke Konishi6cda9fa2010-07-25 20:39:03 +0900131 rec_len = nilfs_rec_len_from_disk(p->rec_len);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700132
133 if (rec_len < NILFS_DIR_REC_LEN(1))
134 goto Eshort;
135 if (rec_len & 3)
136 goto Ealign;
137 if (rec_len < NILFS_DIR_REC_LEN(p->name_len))
138 goto Enamelen;
139 if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
140 goto Espan;
141 }
142 if (offs != limit)
143 goto Eend;
144out:
145 SetPageChecked(page);
146 return;
147
148 /* Too bad, we had an error */
149
150Ebadsize:
151 nilfs_error(sb, "nilfs_check_page",
152 "size of directory #%lu is not a multiple of chunk size",
153 dir->i_ino
154 );
155 goto fail;
156Eshort:
157 error = "rec_len is smaller than minimal";
158 goto bad_entry;
159Ealign:
160 error = "unaligned directory entry";
161 goto bad_entry;
162Enamelen:
163 error = "rec_len is too small for name_len";
164 goto bad_entry;
165Espan:
166 error = "directory entry across blocks";
167bad_entry:
168 nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - "
169 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
170 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
171 (unsigned long) le64_to_cpu(p->inode),
172 rec_len, p->name_len);
173 goto fail;
174Eend:
175 p = (struct nilfs_dir_entry *)(kaddr + offs);
176 nilfs_error(sb, "nilfs_check_page",
177 "entry in directory #%lu spans the page boundary"
178 "offset=%lu, inode=%lu",
179 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
180 (unsigned long) le64_to_cpu(p->inode));
181fail:
182 SetPageChecked(page);
183 SetPageError(page);
184}
185
186static struct page *nilfs_get_page(struct inode *dir, unsigned long n)
187{
188 struct address_space *mapping = dir->i_mapping;
Ryusuke Konishic28e69d2010-07-24 17:09:10 +0900189 struct page *page = read_mapping_page(mapping, n, NULL);
190
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700191 if (!IS_ERR(page)) {
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700192 kmap(page);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700193 if (!PageChecked(page))
194 nilfs_check_page(page);
195 if (PageError(page))
196 goto fail;
197 }
198 return page;
199
200fail:
201 nilfs_put_page(page);
202 return ERR_PTR(-EIO);
203}
204
205/*
206 * NOTE! unlike strncmp, nilfs_match returns 1 for success, 0 for failure.
207 *
208 * len <= NILFS_NAME_LEN and de != NULL are guaranteed by caller.
209 */
210static int
Al Viro072f98b2010-01-31 21:03:58 -0500211nilfs_match(int len, const unsigned char *name, struct nilfs_dir_entry *de)
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700212{
213 if (len != de->name_len)
214 return 0;
215 if (!de->inode)
216 return 0;
217 return !memcmp(name, de->name, len);
218}
219
220/*
221 * p is at least 6 bytes before the end of page
222 */
223static struct nilfs_dir_entry *nilfs_next_entry(struct nilfs_dir_entry *p)
224{
Ryusuke Konishi6cda9fa2010-07-25 20:39:03 +0900225 return (struct nilfs_dir_entry *)((char *)p +
226 nilfs_rec_len_from_disk(p->rec_len));
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700227}
228
229static unsigned char
230nilfs_filetype_table[NILFS_FT_MAX] = {
231 [NILFS_FT_UNKNOWN] = DT_UNKNOWN,
232 [NILFS_FT_REG_FILE] = DT_REG,
233 [NILFS_FT_DIR] = DT_DIR,
234 [NILFS_FT_CHRDEV] = DT_CHR,
235 [NILFS_FT_BLKDEV] = DT_BLK,
236 [NILFS_FT_FIFO] = DT_FIFO,
237 [NILFS_FT_SOCK] = DT_SOCK,
238 [NILFS_FT_SYMLINK] = DT_LNK,
239};
240
241#define S_SHIFT 12
242static unsigned char
243nilfs_type_by_mode[S_IFMT >> S_SHIFT] = {
244 [S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE,
245 [S_IFDIR >> S_SHIFT] = NILFS_FT_DIR,
246 [S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV,
247 [S_IFBLK >> S_SHIFT] = NILFS_FT_BLKDEV,
248 [S_IFIFO >> S_SHIFT] = NILFS_FT_FIFO,
249 [S_IFSOCK >> S_SHIFT] = NILFS_FT_SOCK,
250 [S_IFLNK >> S_SHIFT] = NILFS_FT_SYMLINK,
251};
252
253static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode)
254{
255 mode_t mode = inode->i_mode;
256
257 de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
258}
259
260static int nilfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
261{
262 loff_t pos = filp->f_pos;
263 struct inode *inode = filp->f_dentry->d_inode;
264 struct super_block *sb = inode->i_sb;
265 unsigned int offset = pos & ~PAGE_CACHE_MASK;
266 unsigned long n = pos >> PAGE_CACHE_SHIFT;
267 unsigned long npages = dir_pages(inode);
268/* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */
269 unsigned char *types = NULL;
270 int ret;
271
272 if (pos > inode->i_size - NILFS_DIR_REC_LEN(1))
273 goto success;
274
275 types = nilfs_filetype_table;
276
277 for ( ; n < npages; n++, offset = 0) {
278 char *kaddr, *limit;
279 struct nilfs_dir_entry *de;
280 struct page *page = nilfs_get_page(inode, n);
281
282 if (IS_ERR(page)) {
283 nilfs_error(sb, __func__, "bad page in #%lu",
284 inode->i_ino);
285 filp->f_pos += PAGE_CACHE_SIZE - offset;
286 ret = -EIO;
287 goto done;
288 }
289 kaddr = page_address(page);
290 de = (struct nilfs_dir_entry *)(kaddr + offset);
291 limit = kaddr + nilfs_last_byte(inode, n) -
292 NILFS_DIR_REC_LEN(1);
293 for ( ; (char *)de <= limit; de = nilfs_next_entry(de)) {
294 if (de->rec_len == 0) {
295 nilfs_error(sb, __func__,
296 "zero-length directory entry");
297 ret = -EIO;
298 nilfs_put_page(page);
299 goto done;
300 }
301 if (de->inode) {
302 int over;
303 unsigned char d_type = DT_UNKNOWN;
304
305 if (types && de->file_type < NILFS_FT_MAX)
306 d_type = types[de->file_type];
307
308 offset = (char *)de - kaddr;
309 over = filldir(dirent, de->name, de->name_len,
310 (n<<PAGE_CACHE_SHIFT) | offset,
311 le64_to_cpu(de->inode), d_type);
312 if (over) {
313 nilfs_put_page(page);
314 goto success;
315 }
316 }
Ryusuke Konishi6cda9fa2010-07-25 20:39:03 +0900317 filp->f_pos += nilfs_rec_len_from_disk(de->rec_len);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700318 }
319 nilfs_put_page(page);
320 }
321
322success:
323 ret = 0;
324done:
325 return ret;
326}
327
328/*
329 * nilfs_find_entry()
330 *
331 * finds an entry in the specified directory with the wanted name. It
332 * returns the page in which the entry was found, and the entry itself
333 * (as a parameter - res_dir). Page is returned mapped and unlocked.
334 * Entry is guaranteed to be valid.
335 */
336struct nilfs_dir_entry *
Al Viro03190032010-01-31 21:02:09 -0500337nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700338 struct page **res_page)
339{
Al Viro03190032010-01-31 21:02:09 -0500340 const unsigned char *name = qstr->name;
341 int namelen = qstr->len;
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700342 unsigned reclen = NILFS_DIR_REC_LEN(namelen);
343 unsigned long start, n;
344 unsigned long npages = dir_pages(dir);
345 struct page *page = NULL;
346 struct nilfs_inode_info *ei = NILFS_I(dir);
347 struct nilfs_dir_entry *de;
348
349 if (npages == 0)
350 goto out;
351
352 /* OFFSET_CACHE */
353 *res_page = NULL;
354
355 start = ei->i_dir_start_lookup;
356 if (start >= npages)
357 start = 0;
358 n = start;
359 do {
360 char *kaddr;
361 page = nilfs_get_page(dir, n);
362 if (!IS_ERR(page)) {
363 kaddr = page_address(page);
364 de = (struct nilfs_dir_entry *)kaddr;
365 kaddr += nilfs_last_byte(dir, n) - reclen;
366 while ((char *) de <= kaddr) {
367 if (de->rec_len == 0) {
368 nilfs_error(dir->i_sb, __func__,
369 "zero-length directory entry");
370 nilfs_put_page(page);
371 goto out;
372 }
373 if (nilfs_match(namelen, name, de))
374 goto found;
375 de = nilfs_next_entry(de);
376 }
377 nilfs_put_page(page);
378 }
379 if (++n >= npages)
380 n = 0;
381 /* next page is past the blocks we've got */
382 if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
383 nilfs_error(dir->i_sb, __func__,
Ryusuke Konishi16215622010-03-14 03:17:45 +0900384 "dir %lu size %lld exceeds block count %llu",
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700385 dir->i_ino, dir->i_size,
386 (unsigned long long)dir->i_blocks);
387 goto out;
388 }
389 } while (n != start);
390out:
391 return NULL;
392
393found:
394 *res_page = page;
395 ei->i_dir_start_lookup = n;
396 return de;
397}
398
399struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
400{
401 struct page *page = nilfs_get_page(dir, 0);
402 struct nilfs_dir_entry *de = NULL;
403
404 if (!IS_ERR(page)) {
405 de = nilfs_next_entry(
406 (struct nilfs_dir_entry *)page_address(page));
407 *p = page;
408 }
409 return de;
410}
411
Al Viro03190032010-01-31 21:02:09 -0500412ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700413{
414 ino_t res = 0;
415 struct nilfs_dir_entry *de;
416 struct page *page;
417
Al Viro03190032010-01-31 21:02:09 -0500418 de = nilfs_find_entry(dir, qstr, &page);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700419 if (de) {
420 res = le64_to_cpu(de->inode);
421 kunmap(page);
422 page_cache_release(page);
423 }
424 return res;
425}
426
427/* Releases the page */
428void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
429 struct page *page, struct inode *inode)
430{
431 unsigned from = (char *) de - (char *) page_address(page);
Ryusuke Konishi6cda9fa2010-07-25 20:39:03 +0900432 unsigned to = from + nilfs_rec_len_from_disk(de->rec_len);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700433 struct address_space *mapping = page->mapping;
434 int err;
435
436 lock_page(page);
Christoph Hellwigf4e420d2010-06-04 11:29:56 +0200437 err = nilfs_prepare_chunk(page, from, to);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700438 BUG_ON(err);
439 de->inode = cpu_to_le64(inode->i_ino);
440 nilfs_set_de_type(de, inode);
Jiro SEKIBA2093abf2009-11-27 19:41:11 +0900441 nilfs_commit_chunk(page, mapping, from, to);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700442 nilfs_put_page(page);
443 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
444/* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700445}
446
447/*
448 * Parent is locked.
449 */
450int nilfs_add_link(struct dentry *dentry, struct inode *inode)
451{
452 struct inode *dir = dentry->d_parent->d_inode;
Al Viro072f98b2010-01-31 21:03:58 -0500453 const unsigned char *name = dentry->d_name.name;
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700454 int namelen = dentry->d_name.len;
455 unsigned chunk_size = nilfs_chunk_size(dir);
456 unsigned reclen = NILFS_DIR_REC_LEN(namelen);
457 unsigned short rec_len, name_len;
458 struct page *page = NULL;
459 struct nilfs_dir_entry *de;
460 unsigned long npages = dir_pages(dir);
461 unsigned long n;
462 char *kaddr;
463 unsigned from, to;
464 int err;
465
466 /*
467 * We take care of directory expansion in the same loop.
468 * This code plays outside i_size, so it locks the page
469 * to protect that region.
470 */
471 for (n = 0; n <= npages; n++) {
472 char *dir_end;
473
474 page = nilfs_get_page(dir, n);
475 err = PTR_ERR(page);
476 if (IS_ERR(page))
477 goto out;
478 lock_page(page);
479 kaddr = page_address(page);
480 dir_end = kaddr + nilfs_last_byte(dir, n);
481 de = (struct nilfs_dir_entry *)kaddr;
482 kaddr += PAGE_CACHE_SIZE - reclen;
483 while ((char *)de <= kaddr) {
484 if ((char *)de == dir_end) {
485 /* We hit i_size */
486 name_len = 0;
487 rec_len = chunk_size;
Ryusuke Konishi6cda9fa2010-07-25 20:39:03 +0900488 de->rec_len = nilfs_rec_len_to_disk(chunk_size);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700489 de->inode = 0;
490 goto got_it;
491 }
492 if (de->rec_len == 0) {
493 nilfs_error(dir->i_sb, __func__,
494 "zero-length directory entry");
495 err = -EIO;
496 goto out_unlock;
497 }
498 err = -EEXIST;
499 if (nilfs_match(namelen, name, de))
500 goto out_unlock;
501 name_len = NILFS_DIR_REC_LEN(de->name_len);
Ryusuke Konishi6cda9fa2010-07-25 20:39:03 +0900502 rec_len = nilfs_rec_len_from_disk(de->rec_len);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700503 if (!de->inode && rec_len >= reclen)
504 goto got_it;
505 if (rec_len >= name_len + reclen)
506 goto got_it;
507 de = (struct nilfs_dir_entry *)((char *)de + rec_len);
508 }
509 unlock_page(page);
510 nilfs_put_page(page);
511 }
512 BUG();
513 return -EINVAL;
514
515got_it:
516 from = (char *)de - (char *)page_address(page);
517 to = from + rec_len;
Christoph Hellwigf4e420d2010-06-04 11:29:56 +0200518 err = nilfs_prepare_chunk(page, from, to);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700519 if (err)
520 goto out_unlock;
521 if (de->inode) {
522 struct nilfs_dir_entry *de1;
523
524 de1 = (struct nilfs_dir_entry *)((char *)de + name_len);
Ryusuke Konishi6cda9fa2010-07-25 20:39:03 +0900525 de1->rec_len = nilfs_rec_len_to_disk(rec_len - name_len);
526 de->rec_len = nilfs_rec_len_to_disk(name_len);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700527 de = de1;
528 }
529 de->name_len = namelen;
530 memcpy(de->name, name, namelen);
531 de->inode = cpu_to_le64(inode->i_ino);
532 nilfs_set_de_type(de, inode);
Jiro SEKIBA2093abf2009-11-27 19:41:11 +0900533 nilfs_commit_chunk(page, page->mapping, from, to);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700534 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
535/* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
Jiro SEKIBAabdb3182009-11-27 19:41:14 +0900536 nilfs_mark_inode_dirty(dir);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700537 /* OFFSET_CACHE */
538out_put:
539 nilfs_put_page(page);
540out:
541 return err;
542out_unlock:
543 unlock_page(page);
544 goto out_put;
545}
546
547/*
548 * nilfs_delete_entry deletes a directory entry by merging it with the
549 * previous entry. Page is up-to-date. Releases the page.
550 */
551int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
552{
553 struct address_space *mapping = page->mapping;
554 struct inode *inode = mapping->host;
555 char *kaddr = page_address(page);
556 unsigned from = ((char *)dir - kaddr) & ~(nilfs_chunk_size(inode) - 1);
Ryusuke Konishi6cda9fa2010-07-25 20:39:03 +0900557 unsigned to = ((char *)dir - kaddr) +
558 nilfs_rec_len_from_disk(dir->rec_len);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700559 struct nilfs_dir_entry *pde = NULL;
560 struct nilfs_dir_entry *de = (struct nilfs_dir_entry *)(kaddr + from);
561 int err;
562
563 while ((char *)de < (char *)dir) {
564 if (de->rec_len == 0) {
565 nilfs_error(inode->i_sb, __func__,
566 "zero-length directory entry");
567 err = -EIO;
568 goto out;
569 }
570 pde = de;
571 de = nilfs_next_entry(de);
572 }
573 if (pde)
574 from = (char *)pde - (char *)page_address(page);
575 lock_page(page);
Christoph Hellwigf4e420d2010-06-04 11:29:56 +0200576 err = nilfs_prepare_chunk(page, from, to);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700577 BUG_ON(err);
578 if (pde)
Ryusuke Konishi6cda9fa2010-07-25 20:39:03 +0900579 pde->rec_len = nilfs_rec_len_to_disk(to - from);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700580 dir->inode = 0;
Jiro SEKIBA2093abf2009-11-27 19:41:11 +0900581 nilfs_commit_chunk(page, mapping, from, to);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700582 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
583/* NILFS_I(inode)->i_flags &= ~NILFS_BTREE_FL; */
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700584out:
585 nilfs_put_page(page);
586 return err;
587}
588
589/*
590 * Set the first fragment of directory.
591 */
592int nilfs_make_empty(struct inode *inode, struct inode *parent)
593{
594 struct address_space *mapping = inode->i_mapping;
595 struct page *page = grab_cache_page(mapping, 0);
596 unsigned chunk_size = nilfs_chunk_size(inode);
597 struct nilfs_dir_entry *de;
598 int err;
599 void *kaddr;
600
601 if (!page)
602 return -ENOMEM;
603
Christoph Hellwigf4e420d2010-06-04 11:29:56 +0200604 err = nilfs_prepare_chunk(page, 0, chunk_size);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700605 if (unlikely(err)) {
606 unlock_page(page);
607 goto fail;
608 }
609 kaddr = kmap_atomic(page, KM_USER0);
610 memset(kaddr, 0, chunk_size);
611 de = (struct nilfs_dir_entry *)kaddr;
612 de->name_len = 1;
Ryusuke Konishi6cda9fa2010-07-25 20:39:03 +0900613 de->rec_len = nilfs_rec_len_to_disk(NILFS_DIR_REC_LEN(1));
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700614 memcpy(de->name, ".\0\0", 4);
615 de->inode = cpu_to_le64(inode->i_ino);
616 nilfs_set_de_type(de, inode);
617
618 de = (struct nilfs_dir_entry *)(kaddr + NILFS_DIR_REC_LEN(1));
619 de->name_len = 2;
Ryusuke Konishi6cda9fa2010-07-25 20:39:03 +0900620 de->rec_len = nilfs_rec_len_to_disk(chunk_size - NILFS_DIR_REC_LEN(1));
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700621 de->inode = cpu_to_le64(parent->i_ino);
622 memcpy(de->name, "..\0", 4);
623 nilfs_set_de_type(de, inode);
624 kunmap_atomic(kaddr, KM_USER0);
Jiro SEKIBA2093abf2009-11-27 19:41:11 +0900625 nilfs_commit_chunk(page, mapping, 0, chunk_size);
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700626fail:
627 page_cache_release(page);
628 return err;
629}
630
631/*
632 * routine to check that the specified directory is empty (for rmdir)
633 */
634int nilfs_empty_dir(struct inode *inode)
635{
636 struct page *page = NULL;
637 unsigned long i, npages = dir_pages(inode);
638
639 for (i = 0; i < npages; i++) {
640 char *kaddr;
641 struct nilfs_dir_entry *de;
642
643 page = nilfs_get_page(inode, i);
644 if (IS_ERR(page))
645 continue;
646
647 kaddr = page_address(page);
648 de = (struct nilfs_dir_entry *)kaddr;
649 kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1);
650
651 while ((char *)de <= kaddr) {
652 if (de->rec_len == 0) {
653 nilfs_error(inode->i_sb, __func__,
654 "zero-length directory entry "
655 "(kaddr=%p, de=%p)\n", kaddr, de);
656 goto not_empty;
657 }
658 if (de->inode != 0) {
659 /* check for . and .. */
660 if (de->name[0] != '.')
661 goto not_empty;
662 if (de->name_len > 2)
663 goto not_empty;
664 if (de->name_len < 2) {
665 if (de->inode !=
666 cpu_to_le64(inode->i_ino))
667 goto not_empty;
668 } else if (de->name[1] != '.')
669 goto not_empty;
670 }
671 de = nilfs_next_entry(de);
672 }
673 nilfs_put_page(page);
674 }
675 return 1;
676
677not_empty:
678 nilfs_put_page(page);
679 return 0;
680}
681
Alexey Dobriyan828c0952009-10-01 15:43:56 -0700682const struct file_operations nilfs_dir_operations = {
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700683 .llseek = generic_file_llseek,
684 .read = generic_read_dir,
685 .readdir = nilfs_readdir,
Ryusuke Konishi7a946192009-04-06 19:01:53 -0700686 .unlocked_ioctl = nilfs_ioctl,
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700687#ifdef CONFIG_COMPAT
Ryusuke Konishi8082d362009-04-06 19:01:53 -0700688 .compat_ioctl = nilfs_ioctl,
Yoshiji Amagai2ba466d2009-04-06 19:01:34 -0700689#endif /* CONFIG_COMPAT */
690 .fsync = nilfs_sync_file,
691
692};