blob: 6b094250d805b26888b8825a48b8ee7ddfc0df68 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * inode.c
3 *
4 * PURPOSE
5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
6 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * COPYRIGHT
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
12 *
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
16 *
17 * HISTORY
18 *
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
23 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
24 * block boundaries (which is not actually allowed)
25 * 12/20/98 added support for strategy 4096
26 * 03/07/99 rewrote udf_block_map (again)
27 * New funcs, inode_bmap, udf_next_aext
28 * 04/19/99 Support for writing device EA's for major/minor #
29 */
30
31#include "udfdecl.h"
32#include <linux/mm.h>
33#include <linux/smp_lock.h>
34#include <linux/module.h>
35#include <linux/pagemap.h>
36#include <linux/buffer_head.h>
37#include <linux/writeback.h>
38#include <linux/slab.h>
39
40#include "udf_i.h"
41#include "udf_sb.h"
42
43MODULE_AUTHOR("Ben Fennema");
44MODULE_DESCRIPTION("Universal Disk Format Filesystem");
45MODULE_LICENSE("GPL");
46
47#define EXTENT_MERGE_SIZE 5
48
49static mode_t udf_convert_permissions(struct fileEntry *);
50static int udf_update_inode(struct inode *, int);
51static void udf_fill_inode(struct inode *, struct buffer_head *);
Jan Kara60448b12007-05-08 00:35:13 -070052static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 long *, int *);
54static int8_t udf_insert_aext(struct inode *, kernel_lb_addr, int,
55 kernel_lb_addr, uint32_t, struct buffer_head *);
56static void udf_split_extents(struct inode *, int *, int, int,
57 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
58static void udf_prealloc_extents(struct inode *, int, int,
59 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
60static void udf_merge_extents(struct inode *,
61 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
62static void udf_update_extents(struct inode *,
63 kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
64 kernel_lb_addr, uint32_t, struct buffer_head **);
65static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
66
67/*
68 * udf_delete_inode
69 *
70 * PURPOSE
71 * Clean-up before the specified inode is destroyed.
72 *
73 * DESCRIPTION
74 * This routine is called when the kernel destroys an inode structure
75 * ie. when iput() finds i_count == 0.
76 *
77 * HISTORY
78 * July 1, 1997 - Andrew E. Mileski
79 * Written, tested, and released.
80 *
81 * Called at the last iput() if i_nlink is zero.
82 */
83void udf_delete_inode(struct inode * inode)
84{
Mark Fashehfef26652005-09-09 13:01:31 -070085 truncate_inode_pages(&inode->i_data, 0);
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 if (is_bad_inode(inode))
88 goto no_delete;
89
90 inode->i_size = 0;
91 udf_truncate(inode);
92 lock_kernel();
93
94 udf_update_inode(inode, IS_SYNC(inode));
95 udf_free_inode(inode);
96
97 unlock_kernel();
98 return;
99no_delete:
100 clear_inode(inode);
101}
102
103void udf_clear_inode(struct inode *inode)
104{
105 if (!(inode->i_sb->s_flags & MS_RDONLY)) {
106 lock_kernel();
107 udf_discard_prealloc(inode);
108 unlock_kernel();
109 }
110
111 kfree(UDF_I_DATA(inode));
112 UDF_I_DATA(inode) = NULL;
113}
114
115static int udf_writepage(struct page *page, struct writeback_control *wbc)
116{
117 return block_write_full_page(page, udf_get_block, wbc);
118}
119
120static int udf_readpage(struct file *file, struct page *page)
121{
122 return block_read_full_page(page, udf_get_block);
123}
124
125static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
126{
127 return block_prepare_write(page, from, to, udf_get_block);
128}
129
130static sector_t udf_bmap(struct address_space *mapping, sector_t block)
131{
132 return generic_block_bmap(mapping,block,udf_get_block);
133}
134
Christoph Hellwigf5e54d62006-06-28 04:26:44 -0700135const struct address_space_operations udf_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 .readpage = udf_readpage,
137 .writepage = udf_writepage,
138 .sync_page = block_sync_page,
139 .prepare_write = udf_prepare_write,
140 .commit_write = generic_commit_write,
141 .bmap = udf_bmap,
142};
143
144void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
145{
146 struct page *page;
147 char *kaddr;
148 struct writeback_control udf_wbc = {
149 .sync_mode = WB_SYNC_NONE,
150 .nr_to_write = 1,
151 };
152
153 /* from now on we have normal address_space methods */
154 inode->i_data.a_ops = &udf_aops;
155
156 if (!UDF_I_LENALLOC(inode))
157 {
158 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
159 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
160 else
161 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
162 mark_inode_dirty(inode);
163 return;
164 }
165
166 page = grab_cache_page(inode->i_mapping, 0);
Matt Mackallcd7619d2005-05-01 08:59:01 -0700167 BUG_ON(!PageLocked(page));
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 if (!PageUptodate(page))
170 {
171 kaddr = kmap(page);
172 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
173 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
174 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
175 UDF_I_LENALLOC(inode));
176 flush_dcache_page(page);
177 SetPageUptodate(page);
178 kunmap(page);
179 }
180 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
181 UDF_I_LENALLOC(inode));
182 UDF_I_LENALLOC(inode) = 0;
183 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
184 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
185 else
186 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
187
188 inode->i_data.a_ops->writepage(page, &udf_wbc);
189 page_cache_release(page);
190
191 mark_inode_dirty(inode);
192}
193
194struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
195{
196 int newblock;
197 struct buffer_head *sbh = NULL, *dbh = NULL;
198 kernel_lb_addr bloc, eloc;
199 uint32_t elen, extoffset;
200 uint8_t alloctype;
201
202 struct udf_fileident_bh sfibh, dfibh;
203 loff_t f_pos = udf_ext0_offset(inode) >> 2;
204 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
205 struct fileIdentDesc cfi, *sfi, *dfi;
206
207 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
208 alloctype = ICBTAG_FLAG_AD_SHORT;
209 else
210 alloctype = ICBTAG_FLAG_AD_LONG;
211
212 if (!inode->i_size)
213 {
214 UDF_I_ALLOCTYPE(inode) = alloctype;
215 mark_inode_dirty(inode);
216 return NULL;
217 }
218
219 /* alloc block, and copy data to it */
220 *block = udf_new_block(inode->i_sb, inode,
221 UDF_I_LOCATION(inode).partitionReferenceNum,
222 UDF_I_LOCATION(inode).logicalBlockNum, err);
223
224 if (!(*block))
225 return NULL;
226 newblock = udf_get_pblock(inode->i_sb, *block,
227 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
228 if (!newblock)
229 return NULL;
230 dbh = udf_tgetblk(inode->i_sb, newblock);
231 if (!dbh)
232 return NULL;
233 lock_buffer(dbh);
234 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
235 set_buffer_uptodate(dbh);
236 unlock_buffer(dbh);
237 mark_buffer_dirty_inode(dbh, inode);
238
239 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
240 sbh = sfibh.sbh = sfibh.ebh = NULL;
241 dfibh.soffset = dfibh.eoffset = 0;
242 dfibh.sbh = dfibh.ebh = dbh;
243 while ( (f_pos < size) )
244 {
245 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
246 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
247 if (!sfi)
248 {
249 udf_release_data(dbh);
250 return NULL;
251 }
252 UDF_I_ALLOCTYPE(inode) = alloctype;
253 sfi->descTag.tagLocation = cpu_to_le32(*block);
254 dfibh.soffset = dfibh.eoffset;
255 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
256 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
257 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
258 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
259 {
260 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
261 udf_release_data(dbh);
262 return NULL;
263 }
264 }
265 mark_buffer_dirty_inode(dbh, inode);
266
267 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
268 UDF_I_LENALLOC(inode) = 0;
269 bloc = UDF_I_LOCATION(inode);
270 eloc.logicalBlockNum = *block;
271 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
272 elen = inode->i_size;
273 UDF_I_LENEXTENTS(inode) = elen;
274 extoffset = udf_file_entry_alloc_offset(inode);
275 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
276 /* UniqueID stuff */
277
278 udf_release_data(sbh);
279 mark_inode_dirty(inode);
280 return dbh;
281}
282
283static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
284{
285 int err, new;
286 struct buffer_head *bh;
287 unsigned long phys;
288
289 if (!create)
290 {
291 phys = udf_block_map(inode, block);
292 if (phys)
293 map_bh(bh_result, inode->i_sb, phys);
294 return 0;
295 }
296
297 err = -EIO;
298 new = 0;
299 bh = NULL;
300
301 lock_kernel();
302
303 if (block < 0)
304 goto abort_negative;
305
306 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
307 {
308 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
309 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
310 }
311
312 err = 0;
313
314 bh = inode_getblk(inode, block, &err, &phys, &new);
Eric Sesterhenn2c2111c2006-04-02 13:40:13 +0200315 BUG_ON(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 if (err)
317 goto abort;
Eric Sesterhenn2c2111c2006-04-02 13:40:13 +0200318 BUG_ON(!phys);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
320 if (new)
321 set_buffer_new(bh_result);
322 map_bh(bh_result, inode->i_sb, phys);
323abort:
324 unlock_kernel();
325 return err;
326
327abort_negative:
328 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
329 goto abort;
330}
331
332static struct buffer_head *
333udf_getblk(struct inode *inode, long block, int create, int *err)
334{
335 struct buffer_head dummy;
336
337 dummy.b_state = 0;
338 dummy.b_blocknr = -1000;
339 *err = udf_get_block(inode, block, &dummy, create);
340 if (!*err && buffer_mapped(&dummy))
341 {
342 struct buffer_head *bh;
343 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
344 if (buffer_new(&dummy))
345 {
346 lock_buffer(bh);
347 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
348 set_buffer_uptodate(bh);
349 unlock_buffer(bh);
350 mark_buffer_dirty_inode(bh, inode);
351 }
352 return bh;
353 }
354 return NULL;
355}
356
Jan Kara60448b12007-05-08 00:35:13 -0700357static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 int *err, long *phys, int *new)
359{
360 struct buffer_head *pbh = NULL, *cbh = NULL, *nbh = NULL, *result = NULL;
361 kernel_long_ad laarr[EXTENT_MERGE_SIZE];
362 uint32_t pextoffset = 0, cextoffset = 0, nextoffset = 0;
363 int count = 0, startnum = 0, endnum = 0;
364 uint32_t elen = 0;
365 kernel_lb_addr eloc, pbloc, cbloc, nbloc;
366 int c = 1;
Jan Kara60448b12007-05-08 00:35:13 -0700367 loff_t lbcount = 0, b_off = 0;
368 uint32_t newblocknum, newblock;
369 sector_t offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 int8_t etype;
371 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
372 char lastblock = 0;
373
374 pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
Jan Kara60448b12007-05-08 00:35:13 -0700375 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 pbloc = cbloc = nbloc = UDF_I_LOCATION(inode);
377
378 /* find the extent which contains the block we are looking for.
379 alternate between laarr[0] and laarr[1] for locations of the
380 current extent, and the previous extent */
381 do
382 {
383 if (pbh != cbh)
384 {
385 udf_release_data(pbh);
386 atomic_inc(&cbh->b_count);
387 pbh = cbh;
388 }
389 if (cbh != nbh)
390 {
391 udf_release_data(cbh);
392 atomic_inc(&nbh->b_count);
393 cbh = nbh;
394 }
395
396 lbcount += elen;
397
398 pbloc = cbloc;
399 cbloc = nbloc;
400
401 pextoffset = cextoffset;
402 cextoffset = nextoffset;
403
404 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) == -1)
405 break;
406
407 c = !c;
408
409 laarr[c].extLength = (etype << 30) | elen;
410 laarr[c].extLocation = eloc;
411
412 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
413 pgoal = eloc.logicalBlockNum +
414 ((elen + inode->i_sb->s_blocksize - 1) >>
415 inode->i_sb->s_blocksize_bits);
416
417 count ++;
418 } while (lbcount + elen <= b_off);
419
420 b_off -= lbcount;
421 offset = b_off >> inode->i_sb->s_blocksize_bits;
422
423 /* if the extent is allocated and recorded, return the block
424 if the extent is not a multiple of the blocksize, round up */
425
426 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
427 {
428 if (elen & (inode->i_sb->s_blocksize - 1))
429 {
430 elen = EXT_RECORDED_ALLOCATED |
431 ((elen + inode->i_sb->s_blocksize - 1) &
432 ~(inode->i_sb->s_blocksize - 1));
433 etype = udf_write_aext(inode, nbloc, &cextoffset, eloc, elen, nbh, 1);
434 }
435 udf_release_data(pbh);
436 udf_release_data(cbh);
437 udf_release_data(nbh);
438 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
439 *phys = newblock;
440 return NULL;
441 }
442
443 if (etype == -1)
444 {
445 endnum = startnum = ((count > 1) ? 1 : count);
446 if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
447 {
448 laarr[c].extLength =
449 (laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
450 (((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
451 inode->i_sb->s_blocksize - 1) &
452 ~(inode->i_sb->s_blocksize - 1));
453 UDF_I_LENEXTENTS(inode) =
454 (UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
455 ~(inode->i_sb->s_blocksize - 1);
456 }
457 c = !c;
458 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
459 ((offset + 1) << inode->i_sb->s_blocksize_bits);
460 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
461 count ++;
462 endnum ++;
463 lastblock = 1;
464 }
465 else
466 endnum = startnum = ((count > 2) ? 2 : count);
467
468 /* if the current extent is in position 0, swap it with the previous */
469 if (!c && count != 1)
470 {
471 laarr[2] = laarr[0];
472 laarr[0] = laarr[1];
473 laarr[1] = laarr[2];
474 c = 1;
475 }
476
477 /* if the current block is located in a extent, read the next extent */
478 if (etype != -1)
479 {
480 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 0)) != -1)
481 {
482 laarr[c+1].extLength = (etype << 30) | elen;
483 laarr[c+1].extLocation = eloc;
484 count ++;
485 startnum ++;
486 endnum ++;
487 }
488 else
489 lastblock = 1;
490 }
491 udf_release_data(cbh);
492 udf_release_data(nbh);
493
494 /* if the current extent is not recorded but allocated, get the
495 block in the extent corresponding to the requested block */
496 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
497 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
498 else /* otherwise, allocate a new block */
499 {
500 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
501 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
502
503 if (!goal)
504 {
505 if (!(goal = pgoal))
506 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
507 }
508
509 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
510 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
511 {
512 udf_release_data(pbh);
513 *err = -ENOSPC;
514 return NULL;
515 }
516 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
517 }
518
519 /* if the extent the requsted block is located in contains multiple blocks,
520 split the extent into at most three extents. blocks prior to requested
521 block, requested block, and blocks after requested block */
522 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
523
524#ifdef UDF_PREALLOCATE
525 /* preallocate blocks */
526 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
527#endif
528
529 /* merge any continuous blocks in laarr */
530 udf_merge_extents(inode, laarr, &endnum);
531
532 /* write back the new extents, inserting new extents if the new number
533 of extents is greater than the old number, and deleting extents if
534 the new number of extents is less than the old number */
535 udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
536
537 udf_release_data(pbh);
538
539 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
540 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
541 {
542 return NULL;
543 }
544 *phys = newblock;
545 *err = 0;
546 *new = 1;
547 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
548 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
549 inode->i_ctime = current_fs_time(inode->i_sb);
550
551 if (IS_SYNC(inode))
552 udf_sync_inode(inode);
553 else
554 mark_inode_dirty(inode);
555 return result;
556}
557
558static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
559 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
560{
561 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
562 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
563 {
564 int curr = *c;
565 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
566 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
567 int8_t etype = (laarr[curr].extLength >> 30);
568
569 if (blen == 1)
570 ;
571 else if (!offset || blen == offset + 1)
572 {
573 laarr[curr+2] = laarr[curr+1];
574 laarr[curr+1] = laarr[curr];
575 }
576 else
577 {
578 laarr[curr+3] = laarr[curr+1];
579 laarr[curr+2] = laarr[curr+1] = laarr[curr];
580 }
581
582 if (offset)
583 {
584 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
585 {
586 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
587 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
588 (offset << inode->i_sb->s_blocksize_bits);
589 laarr[curr].extLocation.logicalBlockNum = 0;
590 laarr[curr].extLocation.partitionReferenceNum = 0;
591 }
592 else
593 laarr[curr].extLength = (etype << 30) |
594 (offset << inode->i_sb->s_blocksize_bits);
595 curr ++;
596 (*c) ++;
597 (*endnum) ++;
598 }
599
600 laarr[curr].extLocation.logicalBlockNum = newblocknum;
601 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
602 laarr[curr].extLocation.partitionReferenceNum =
603 UDF_I_LOCATION(inode).partitionReferenceNum;
604 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
605 inode->i_sb->s_blocksize;
606 curr ++;
607
608 if (blen != offset + 1)
609 {
610 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
611 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
612 laarr[curr].extLength = (etype << 30) |
613 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
614 curr ++;
615 (*endnum) ++;
616 }
617 }
618}
619
620static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
621 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
622{
623 int start, length = 0, currlength = 0, i;
624
625 if (*endnum >= (c+1))
626 {
627 if (!lastblock)
628 return;
629 else
630 start = c;
631 }
632 else
633 {
634 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
635 {
636 start = c+1;
637 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
638 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
639 }
640 else
641 start = c;
642 }
643
644 for (i=start+1; i<=*endnum; i++)
645 {
646 if (i == *endnum)
647 {
648 if (lastblock)
649 length += UDF_DEFAULT_PREALLOC_BLOCKS;
650 }
651 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
652 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
653 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
654 else
655 break;
656 }
657
658 if (length)
659 {
660 int next = laarr[start].extLocation.logicalBlockNum +
661 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
662 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
663 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
664 laarr[start].extLocation.partitionReferenceNum,
665 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
666 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
667
668 if (numalloc)
669 {
670 if (start == (c+1))
671 laarr[start].extLength +=
672 (numalloc << inode->i_sb->s_blocksize_bits);
673 else
674 {
675 memmove(&laarr[c+2], &laarr[c+1],
676 sizeof(long_ad) * (*endnum - (c+1)));
677 (*endnum) ++;
678 laarr[c+1].extLocation.logicalBlockNum = next;
679 laarr[c+1].extLocation.partitionReferenceNum =
680 laarr[c].extLocation.partitionReferenceNum;
681 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
682 (numalloc << inode->i_sb->s_blocksize_bits);
683 start = c+1;
684 }
685
686 for (i=start+1; numalloc && i<*endnum; i++)
687 {
688 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
689 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
690
691 if (elen > numalloc)
692 {
693 laarr[i].extLength -=
694 (numalloc << inode->i_sb->s_blocksize_bits);
695 numalloc = 0;
696 }
697 else
698 {
699 numalloc -= elen;
700 if (*endnum > (i+1))
701 memmove(&laarr[i], &laarr[i+1],
702 sizeof(long_ad) * (*endnum - (i+1)));
703 i --;
704 (*endnum) --;
705 }
706 }
707 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
708 }
709 }
710}
711
712static void udf_merge_extents(struct inode *inode,
713 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
714{
715 int i;
716
717 for (i=0; i<(*endnum-1); i++)
718 {
719 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
720 {
721 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
722 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
723 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
724 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
725 {
726 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
727 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
728 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
729 {
730 laarr[i+1].extLength = (laarr[i+1].extLength -
731 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
732 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
733 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
734 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
735 laarr[i+1].extLocation.logicalBlockNum =
736 laarr[i].extLocation.logicalBlockNum +
737 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
738 inode->i_sb->s_blocksize_bits);
739 }
740 else
741 {
742 laarr[i].extLength = laarr[i+1].extLength +
743 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
744 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
745 if (*endnum > (i+2))
746 memmove(&laarr[i+1], &laarr[i+2],
747 sizeof(long_ad) * (*endnum - (i+2)));
748 i --;
749 (*endnum) --;
750 }
751 }
752 }
753 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
754 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
755 {
756 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
757 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
758 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
759 laarr[i].extLocation.logicalBlockNum = 0;
760 laarr[i].extLocation.partitionReferenceNum = 0;
761
762 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
763 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
764 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
765 {
766 laarr[i+1].extLength = (laarr[i+1].extLength -
767 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
768 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
769 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
770 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
771 }
772 else
773 {
774 laarr[i].extLength = laarr[i+1].extLength +
775 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
776 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
777 if (*endnum > (i+2))
778 memmove(&laarr[i+1], &laarr[i+2],
779 sizeof(long_ad) * (*endnum - (i+2)));
780 i --;
781 (*endnum) --;
782 }
783 }
784 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
785 {
786 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
787 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
788 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
789 laarr[i].extLocation.logicalBlockNum = 0;
790 laarr[i].extLocation.partitionReferenceNum = 0;
791 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
792 EXT_NOT_RECORDED_NOT_ALLOCATED;
793 }
794 }
795}
796
797static void udf_update_extents(struct inode *inode,
798 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
799 kernel_lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh)
800{
801 int start = 0, i;
802 kernel_lb_addr tmploc;
803 uint32_t tmplen;
804
805 if (startnum > endnum)
806 {
807 for (i=0; i<(startnum-endnum); i++)
808 {
809 udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
810 laarr[i].extLength, *pbh);
811 }
812 }
813 else if (startnum < endnum)
814 {
815 for (i=0; i<(endnum-startnum); i++)
816 {
817 udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
818 laarr[i].extLength, *pbh);
819 udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
820 &laarr[i].extLength, pbh, 1);
821 start ++;
822 }
823 }
824
825 for (i=start; i<endnum; i++)
826 {
827 udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
828 udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
829 laarr[i].extLength, *pbh, 1);
830 }
831}
832
833struct buffer_head * udf_bread(struct inode * inode, int block,
834 int create, int * err)
835{
836 struct buffer_head * bh = NULL;
837
838 bh = udf_getblk(inode, block, create, err);
839 if (!bh)
840 return NULL;
841
842 if (buffer_uptodate(bh))
843 return bh;
844 ll_rw_block(READ, 1, &bh);
845 wait_on_buffer(bh);
846 if (buffer_uptodate(bh))
847 return bh;
848 brelse(bh);
849 *err = -EIO;
850 return NULL;
851}
852
853void udf_truncate(struct inode * inode)
854{
855 int offset;
856 int err;
857
858 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
859 S_ISLNK(inode->i_mode)))
860 return;
861 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
862 return;
863
864 lock_kernel();
865 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
866 {
867 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
868 inode->i_size))
869 {
870 udf_expand_file_adinicb(inode, inode->i_size, &err);
871 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
872 {
873 inode->i_size = UDF_I_LENALLOC(inode);
874 unlock_kernel();
875 return;
876 }
877 else
878 udf_truncate_extents(inode);
879 }
880 else
881 {
882 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
883 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
884 UDF_I_LENALLOC(inode) = inode->i_size;
885 }
886 }
887 else
888 {
889 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
890 udf_truncate_extents(inode);
891 }
892
893 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
894 if (IS_SYNC(inode))
895 udf_sync_inode (inode);
896 else
897 mark_inode_dirty(inode);
898 unlock_kernel();
899}
900
901static void
902__udf_read_inode(struct inode *inode)
903{
904 struct buffer_head *bh = NULL;
905 struct fileEntry *fe;
906 uint16_t ident;
907
908 /*
909 * Set defaults, but the inode is still incomplete!
910 * Note: get_new_inode() sets the following on a new inode:
911 * i_sb = sb
912 * i_no = ino
913 * i_flags = sb->s_flags
914 * i_state = 0
915 * clean_inode(): zero fills and sets
916 * i_count = 1
917 * i_nlink = 1
918 * i_op = NULL;
919 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
921
922 if (!bh)
923 {
924 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
925 inode->i_ino);
926 make_bad_inode(inode);
927 return;
928 }
929
930 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
931 ident != TAG_IDENT_USE)
932 {
933 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
934 inode->i_ino, ident);
935 udf_release_data(bh);
936 make_bad_inode(inode);
937 return;
938 }
939
940 fe = (struct fileEntry *)bh->b_data;
941
942 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
943 {
944 struct buffer_head *ibh = NULL, *nbh = NULL;
945 struct indirectEntry *ie;
946
947 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
948 if (ident == TAG_IDENT_IE)
949 {
950 if (ibh)
951 {
952 kernel_lb_addr loc;
953 ie = (struct indirectEntry *)ibh->b_data;
954
955 loc = lelb_to_cpu(ie->indirectICB.extLocation);
956
957 if (ie->indirectICB.extLength &&
958 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
959 {
960 if (ident == TAG_IDENT_FE ||
961 ident == TAG_IDENT_EFE)
962 {
963 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
964 udf_release_data(bh);
965 udf_release_data(ibh);
966 udf_release_data(nbh);
967 __udf_read_inode(inode);
968 return;
969 }
970 else
971 {
972 udf_release_data(nbh);
973 udf_release_data(ibh);
974 }
975 }
976 else
977 udf_release_data(ibh);
978 }
979 }
980 else
981 udf_release_data(ibh);
982 }
983 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
984 {
985 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
986 le16_to_cpu(fe->icbTag.strategyType));
987 udf_release_data(bh);
988 make_bad_inode(inode);
989 return;
990 }
991 udf_fill_inode(inode, bh);
992 udf_release_data(bh);
993}
994
995static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
996{
997 struct fileEntry *fe;
998 struct extendedFileEntry *efe;
999 time_t convtime;
1000 long convtime_usec;
1001 int offset;
1002
1003 fe = (struct fileEntry *)bh->b_data;
1004 efe = (struct extendedFileEntry *)bh->b_data;
1005
1006 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1007 UDF_I_STRAT4096(inode) = 0;
1008 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1009 UDF_I_STRAT4096(inode) = 1;
1010
1011 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1012 UDF_I_UNIQUE(inode) = 0;
1013 UDF_I_LENEATTR(inode) = 0;
1014 UDF_I_LENEXTENTS(inode) = 0;
1015 UDF_I_LENALLOC(inode) = 0;
1016 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1017 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1018 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1019 {
1020 UDF_I_EFE(inode) = 1;
1021 UDF_I_USE(inode) = 0;
1022 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1023 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1024 }
1025 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1026 {
1027 UDF_I_EFE(inode) = 0;
1028 UDF_I_USE(inode) = 0;
1029 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1030 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1031 }
1032 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1033 {
1034 UDF_I_EFE(inode) = 0;
1035 UDF_I_USE(inode) = 1;
1036 UDF_I_LENALLOC(inode) =
1037 le32_to_cpu(
1038 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1039 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1040 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1041 return;
1042 }
1043
1044 inode->i_uid = le32_to_cpu(fe->uid);
Phillip Susi4d6660e2006-03-07 21:55:24 -08001045 if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1046 UDF_FLAG_UID_IGNORE))
1047 inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
1049 inode->i_gid = le32_to_cpu(fe->gid);
Phillip Susi4d6660e2006-03-07 21:55:24 -08001050 if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1051 UDF_FLAG_GID_IGNORE))
1052 inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
1054 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1055 if (!inode->i_nlink)
1056 inode->i_nlink = 1;
1057
1058 inode->i_size = le64_to_cpu(fe->informationLength);
1059 UDF_I_LENEXTENTS(inode) = inode->i_size;
1060
1061 inode->i_mode = udf_convert_permissions(fe);
1062 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1063
1064 if (UDF_I_EFE(inode) == 0)
1065 {
1066 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1067 (inode->i_sb->s_blocksize_bits - 9);
1068
1069 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1070 lets_to_cpu(fe->accessTime)) )
1071 {
1072 inode->i_atime.tv_sec = convtime;
1073 inode->i_atime.tv_nsec = convtime_usec * 1000;
1074 }
1075 else
1076 {
1077 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1078 }
1079
1080 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1081 lets_to_cpu(fe->modificationTime)) )
1082 {
1083 inode->i_mtime.tv_sec = convtime;
1084 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1085 }
1086 else
1087 {
1088 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1089 }
1090
1091 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1092 lets_to_cpu(fe->attrTime)) )
1093 {
1094 inode->i_ctime.tv_sec = convtime;
1095 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1096 }
1097 else
1098 {
1099 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1100 }
1101
1102 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1103 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1104 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1105 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1106 }
1107 else
1108 {
1109 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1110 (inode->i_sb->s_blocksize_bits - 9);
1111
1112 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1113 lets_to_cpu(efe->accessTime)) )
1114 {
1115 inode->i_atime.tv_sec = convtime;
1116 inode->i_atime.tv_nsec = convtime_usec * 1000;
1117 }
1118 else
1119 {
1120 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1121 }
1122
1123 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1124 lets_to_cpu(efe->modificationTime)) )
1125 {
1126 inode->i_mtime.tv_sec = convtime;
1127 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1128 }
1129 else
1130 {
1131 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1132 }
1133
1134 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1135 lets_to_cpu(efe->createTime)) )
1136 {
1137 UDF_I_CRTIME(inode).tv_sec = convtime;
1138 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1139 }
1140 else
1141 {
1142 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1143 }
1144
1145 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1146 lets_to_cpu(efe->attrTime)) )
1147 {
1148 inode->i_ctime.tv_sec = convtime;
1149 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1150 }
1151 else
1152 {
1153 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1154 }
1155
1156 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1157 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1158 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1159 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1160 }
1161
1162 switch (fe->icbTag.fileType)
1163 {
1164 case ICBTAG_FILE_TYPE_DIRECTORY:
1165 {
1166 inode->i_op = &udf_dir_inode_operations;
1167 inode->i_fop = &udf_dir_operations;
1168 inode->i_mode |= S_IFDIR;
Dave Hansend8c76e62006-09-30 23:29:04 -07001169 inc_nlink(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 break;
1171 }
1172 case ICBTAG_FILE_TYPE_REALTIME:
1173 case ICBTAG_FILE_TYPE_REGULAR:
1174 case ICBTAG_FILE_TYPE_UNDEF:
1175 {
1176 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1177 inode->i_data.a_ops = &udf_adinicb_aops;
1178 else
1179 inode->i_data.a_ops = &udf_aops;
1180 inode->i_op = &udf_file_inode_operations;
1181 inode->i_fop = &udf_file_operations;
1182 inode->i_mode |= S_IFREG;
1183 break;
1184 }
1185 case ICBTAG_FILE_TYPE_BLOCK:
1186 {
1187 inode->i_mode |= S_IFBLK;
1188 break;
1189 }
1190 case ICBTAG_FILE_TYPE_CHAR:
1191 {
1192 inode->i_mode |= S_IFCHR;
1193 break;
1194 }
1195 case ICBTAG_FILE_TYPE_FIFO:
1196 {
1197 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1198 break;
1199 }
1200 case ICBTAG_FILE_TYPE_SOCKET:
1201 {
1202 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1203 break;
1204 }
1205 case ICBTAG_FILE_TYPE_SYMLINK:
1206 {
1207 inode->i_data.a_ops = &udf_symlink_aops;
1208 inode->i_op = &page_symlink_inode_operations;
1209 inode->i_mode = S_IFLNK|S_IRWXUGO;
1210 break;
1211 }
1212 default:
1213 {
1214 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1215 inode->i_ino, fe->icbTag.fileType);
1216 make_bad_inode(inode);
1217 return;
1218 }
1219 }
1220 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1221 {
1222 struct deviceSpec *dsea =
1223 (struct deviceSpec *)
1224 udf_get_extendedattr(inode, 12, 1);
1225
1226 if (dsea)
1227 {
1228 init_special_inode(inode, inode->i_mode, MKDEV(
1229 le32_to_cpu(dsea->majorDeviceIdent),
1230 le32_to_cpu(dsea->minorDeviceIdent)));
1231 /* Developer ID ??? */
1232 }
1233 else
1234 {
1235 make_bad_inode(inode);
1236 }
1237 }
1238}
1239
1240static mode_t
1241udf_convert_permissions(struct fileEntry *fe)
1242{
1243 mode_t mode;
1244 uint32_t permissions;
1245 uint32_t flags;
1246
1247 permissions = le32_to_cpu(fe->permissions);
1248 flags = le16_to_cpu(fe->icbTag.flags);
1249
1250 mode = (( permissions ) & S_IRWXO) |
1251 (( permissions >> 2 ) & S_IRWXG) |
1252 (( permissions >> 4 ) & S_IRWXU) |
1253 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1254 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1255 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1256
1257 return mode;
1258}
1259
1260/*
1261 * udf_write_inode
1262 *
1263 * PURPOSE
1264 * Write out the specified inode.
1265 *
1266 * DESCRIPTION
1267 * This routine is called whenever an inode is synced.
1268 * Currently this routine is just a placeholder.
1269 *
1270 * HISTORY
1271 * July 1, 1997 - Andrew E. Mileski
1272 * Written, tested, and released.
1273 */
1274
1275int udf_write_inode(struct inode * inode, int sync)
1276{
1277 int ret;
1278 lock_kernel();
1279 ret = udf_update_inode(inode, sync);
1280 unlock_kernel();
1281 return ret;
1282}
1283
1284int udf_sync_inode(struct inode * inode)
1285{
1286 return udf_update_inode(inode, 1);
1287}
1288
1289static int
1290udf_update_inode(struct inode *inode, int do_sync)
1291{
1292 struct buffer_head *bh = NULL;
1293 struct fileEntry *fe;
1294 struct extendedFileEntry *efe;
1295 uint32_t udfperms;
1296 uint16_t icbflags;
1297 uint16_t crclen;
1298 int i;
1299 kernel_timestamp cpu_time;
1300 int err = 0;
1301
1302 bh = udf_tread(inode->i_sb,
1303 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1304
1305 if (!bh)
1306 {
1307 udf_debug("bread failure\n");
1308 return -EIO;
1309 }
1310
1311 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1312
1313 fe = (struct fileEntry *)bh->b_data;
1314 efe = (struct extendedFileEntry *)bh->b_data;
1315
1316 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1317 {
1318 struct unallocSpaceEntry *use =
1319 (struct unallocSpaceEntry *)bh->b_data;
1320
1321 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1322 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1323 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1324 sizeof(tag);
1325 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1326 use->descTag.descCRCLength = cpu_to_le16(crclen);
1327 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1328
1329 use->descTag.tagChecksum = 0;
1330 for (i=0; i<16; i++)
1331 if (i != 4)
1332 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1333
1334 mark_buffer_dirty(bh);
1335 udf_release_data(bh);
1336 return err;
1337 }
1338
Phillip Susi4d6660e2006-03-07 21:55:24 -08001339 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1340 fe->uid = cpu_to_le32(-1);
Phillip Susi0e6b3e52006-03-25 03:08:14 -08001341 else fe->uid = cpu_to_le32(inode->i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342
Phillip Susi4d6660e2006-03-07 21:55:24 -08001343 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1344 fe->gid = cpu_to_le32(-1);
Phillip Susi0e6b3e52006-03-25 03:08:14 -08001345 else fe->gid = cpu_to_le32(inode->i_gid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
1347 udfperms = ((inode->i_mode & S_IRWXO) ) |
1348 ((inode->i_mode & S_IRWXG) << 2) |
1349 ((inode->i_mode & S_IRWXU) << 4);
1350
1351 udfperms |= (le32_to_cpu(fe->permissions) &
1352 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1353 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1354 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1355 fe->permissions = cpu_to_le32(udfperms);
1356
1357 if (S_ISDIR(inode->i_mode))
1358 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1359 else
1360 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1361
1362 fe->informationLength = cpu_to_le64(inode->i_size);
1363
1364 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1365 {
1366 regid *eid;
1367 struct deviceSpec *dsea =
1368 (struct deviceSpec *)
1369 udf_get_extendedattr(inode, 12, 1);
1370
1371 if (!dsea)
1372 {
1373 dsea = (struct deviceSpec *)
1374 udf_add_extendedattr(inode,
1375 sizeof(struct deviceSpec) +
1376 sizeof(regid), 12, 0x3);
1377 dsea->attrType = cpu_to_le32(12);
1378 dsea->attrSubtype = 1;
1379 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1380 sizeof(regid));
1381 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1382 }
1383 eid = (regid *)dsea->impUse;
1384 memset(eid, 0, sizeof(regid));
1385 strcpy(eid->ident, UDF_ID_DEVELOPER);
1386 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1387 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1388 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1389 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1390 }
1391
1392 if (UDF_I_EFE(inode) == 0)
1393 {
1394 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1395 fe->logicalBlocksRecorded = cpu_to_le64(
1396 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1397 (inode->i_sb->s_blocksize_bits - 9));
1398
1399 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1400 fe->accessTime = cpu_to_lets(cpu_time);
1401 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1402 fe->modificationTime = cpu_to_lets(cpu_time);
1403 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1404 fe->attrTime = cpu_to_lets(cpu_time);
1405 memset(&(fe->impIdent), 0, sizeof(regid));
1406 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1407 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1408 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1409 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1410 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1411 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1412 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1413 crclen = sizeof(struct fileEntry);
1414 }
1415 else
1416 {
1417 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1418 efe->objectSize = cpu_to_le64(inode->i_size);
1419 efe->logicalBlocksRecorded = cpu_to_le64(
1420 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1421 (inode->i_sb->s_blocksize_bits - 9));
1422
1423 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1424 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1425 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1426 {
1427 UDF_I_CRTIME(inode) = inode->i_atime;
1428 }
1429 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1430 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1431 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1432 {
1433 UDF_I_CRTIME(inode) = inode->i_mtime;
1434 }
1435 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1436 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1437 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1438 {
1439 UDF_I_CRTIME(inode) = inode->i_ctime;
1440 }
1441
1442 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1443 efe->accessTime = cpu_to_lets(cpu_time);
1444 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1445 efe->modificationTime = cpu_to_lets(cpu_time);
1446 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1447 efe->createTime = cpu_to_lets(cpu_time);
1448 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1449 efe->attrTime = cpu_to_lets(cpu_time);
1450
1451 memset(&(efe->impIdent), 0, sizeof(regid));
1452 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1453 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1454 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1455 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1456 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1457 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1458 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1459 crclen = sizeof(struct extendedFileEntry);
1460 }
1461 if (UDF_I_STRAT4096(inode))
1462 {
1463 fe->icbTag.strategyType = cpu_to_le16(4096);
1464 fe->icbTag.strategyParameter = cpu_to_le16(1);
1465 fe->icbTag.numEntries = cpu_to_le16(2);
1466 }
1467 else
1468 {
1469 fe->icbTag.strategyType = cpu_to_le16(4);
1470 fe->icbTag.numEntries = cpu_to_le16(1);
1471 }
1472
1473 if (S_ISDIR(inode->i_mode))
1474 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1475 else if (S_ISREG(inode->i_mode))
1476 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1477 else if (S_ISLNK(inode->i_mode))
1478 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1479 else if (S_ISBLK(inode->i_mode))
1480 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1481 else if (S_ISCHR(inode->i_mode))
1482 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1483 else if (S_ISFIFO(inode->i_mode))
1484 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1485 else if (S_ISSOCK(inode->i_mode))
1486 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1487
1488 icbflags = UDF_I_ALLOCTYPE(inode) |
1489 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1490 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1491 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1492 (le16_to_cpu(fe->icbTag.flags) &
1493 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1494 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1495
1496 fe->icbTag.flags = cpu_to_le16(icbflags);
1497 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1498 fe->descTag.descVersion = cpu_to_le16(3);
1499 else
1500 fe->descTag.descVersion = cpu_to_le16(2);
1501 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1502 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1503 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1504 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1505 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1506
1507 fe->descTag.tagChecksum = 0;
1508 for (i=0; i<16; i++)
1509 if (i != 4)
1510 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1511
1512 /* write the data blocks */
1513 mark_buffer_dirty(bh);
1514 if (do_sync)
1515 {
1516 sync_dirty_buffer(bh);
1517 if (buffer_req(bh) && !buffer_uptodate(bh))
1518 {
1519 printk("IO error syncing udf inode [%s:%08lx]\n",
1520 inode->i_sb->s_id, inode->i_ino);
1521 err = -EIO;
1522 }
1523 }
1524 udf_release_data(bh);
1525 return err;
1526}
1527
1528struct inode *
1529udf_iget(struct super_block *sb, kernel_lb_addr ino)
1530{
1531 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1532 struct inode *inode = iget_locked(sb, block);
1533
1534 if (!inode)
1535 return NULL;
1536
1537 if (inode->i_state & I_NEW) {
1538 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1539 __udf_read_inode(inode);
1540 unlock_new_inode(inode);
1541 }
1542
1543 if (is_bad_inode(inode))
1544 goto out_iput;
1545
1546 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1547 udf_debug("block=%d, partition=%d out of range\n",
1548 ino.logicalBlockNum, ino.partitionReferenceNum);
1549 make_bad_inode(inode);
1550 goto out_iput;
1551 }
1552
1553 return inode;
1554
1555 out_iput:
1556 iput(inode);
1557 return NULL;
1558}
1559
1560int8_t udf_add_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1561 kernel_lb_addr eloc, uint32_t elen, struct buffer_head **bh, int inc)
1562{
1563 int adsize;
1564 short_ad *sad = NULL;
1565 long_ad *lad = NULL;
1566 struct allocExtDesc *aed;
1567 int8_t etype;
1568 uint8_t *ptr;
1569
1570 if (!*bh)
1571 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1572 else
1573 ptr = (*bh)->b_data + *extoffset;
1574
1575 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1576 adsize = sizeof(short_ad);
1577 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1578 adsize = sizeof(long_ad);
1579 else
1580 return -1;
1581
1582 if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1583 {
1584 char *sptr, *dptr;
1585 struct buffer_head *nbh;
1586 int err, loffset;
1587 kernel_lb_addr obloc = *bloc;
1588
1589 if (!(bloc->logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1590 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1591 {
1592 return -1;
1593 }
1594 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1595 *bloc, 0))))
1596 {
1597 return -1;
1598 }
1599 lock_buffer(nbh);
1600 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1601 set_buffer_uptodate(nbh);
1602 unlock_buffer(nbh);
1603 mark_buffer_dirty_inode(nbh, inode);
1604
1605 aed = (struct allocExtDesc *)(nbh->b_data);
1606 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1607 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1608 if (*extoffset + adsize > inode->i_sb->s_blocksize)
1609 {
1610 loffset = *extoffset;
1611 aed->lengthAllocDescs = cpu_to_le32(adsize);
1612 sptr = ptr - adsize;
1613 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1614 memcpy(dptr, sptr, adsize);
1615 *extoffset = sizeof(struct allocExtDesc) + adsize;
1616 }
1617 else
1618 {
1619 loffset = *extoffset + adsize;
1620 aed->lengthAllocDescs = cpu_to_le32(0);
1621 sptr = ptr;
1622 *extoffset = sizeof(struct allocExtDesc);
1623
1624 if (*bh)
1625 {
1626 aed = (struct allocExtDesc *)(*bh)->b_data;
1627 aed->lengthAllocDescs =
1628 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1629 }
1630 else
1631 {
1632 UDF_I_LENALLOC(inode) += adsize;
1633 mark_inode_dirty(inode);
1634 }
1635 }
1636 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1637 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1638 bloc->logicalBlockNum, sizeof(tag));
1639 else
1640 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1641 bloc->logicalBlockNum, sizeof(tag));
1642 switch (UDF_I_ALLOCTYPE(inode))
1643 {
1644 case ICBTAG_FLAG_AD_SHORT:
1645 {
1646 sad = (short_ad *)sptr;
1647 sad->extLength = cpu_to_le32(
1648 EXT_NEXT_EXTENT_ALLOCDECS |
1649 inode->i_sb->s_blocksize);
1650 sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1651 break;
1652 }
1653 case ICBTAG_FLAG_AD_LONG:
1654 {
1655 lad = (long_ad *)sptr;
1656 lad->extLength = cpu_to_le32(
1657 EXT_NEXT_EXTENT_ALLOCDECS |
1658 inode->i_sb->s_blocksize);
1659 lad->extLocation = cpu_to_lelb(*bloc);
1660 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1661 break;
1662 }
1663 }
1664 if (*bh)
1665 {
1666 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1667 udf_update_tag((*bh)->b_data, loffset);
1668 else
1669 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1670 mark_buffer_dirty_inode(*bh, inode);
1671 udf_release_data(*bh);
1672 }
1673 else
1674 mark_inode_dirty(inode);
1675 *bh = nbh;
1676 }
1677
1678 etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
1679
1680 if (!*bh)
1681 {
1682 UDF_I_LENALLOC(inode) += adsize;
1683 mark_inode_dirty(inode);
1684 }
1685 else
1686 {
1687 aed = (struct allocExtDesc *)(*bh)->b_data;
1688 aed->lengthAllocDescs =
1689 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1690 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1691 udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1692 else
1693 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1694 mark_buffer_dirty_inode(*bh, inode);
1695 }
1696
1697 return etype;
1698}
1699
1700int8_t udf_write_aext(struct inode *inode, kernel_lb_addr bloc, int *extoffset,
1701 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
1702{
1703 int adsize;
1704 uint8_t *ptr;
1705
1706 if (!bh)
1707 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1708 else
1709 {
1710 ptr = bh->b_data + *extoffset;
1711 atomic_inc(&bh->b_count);
1712 }
1713
1714 switch (UDF_I_ALLOCTYPE(inode))
1715 {
1716 case ICBTAG_FLAG_AD_SHORT:
1717 {
1718 short_ad *sad = (short_ad *)ptr;
1719 sad->extLength = cpu_to_le32(elen);
1720 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1721 adsize = sizeof(short_ad);
1722 break;
1723 }
1724 case ICBTAG_FLAG_AD_LONG:
1725 {
1726 long_ad *lad = (long_ad *)ptr;
1727 lad->extLength = cpu_to_le32(elen);
1728 lad->extLocation = cpu_to_lelb(eloc);
1729 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1730 adsize = sizeof(long_ad);
1731 break;
1732 }
1733 default:
1734 return -1;
1735 }
1736
1737 if (bh)
1738 {
1739 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1740 {
1741 struct allocExtDesc *aed = (struct allocExtDesc *)(bh)->b_data;
1742 udf_update_tag((bh)->b_data,
1743 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1744 }
1745 mark_buffer_dirty_inode(bh, inode);
1746 udf_release_data(bh);
1747 }
1748 else
1749 mark_inode_dirty(inode);
1750
1751 if (inc)
1752 *extoffset += adsize;
1753 return (elen >> 30);
1754}
1755
1756int8_t udf_next_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1757 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1758{
1759 int8_t etype;
1760
1761 while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
1762 (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1763 {
1764 *bloc = *eloc;
1765 *extoffset = sizeof(struct allocExtDesc);
1766 udf_release_data(*bh);
1767 if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1768 {
1769 udf_debug("reading block %d failed!\n",
1770 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1771 return -1;
1772 }
1773 }
1774
1775 return etype;
1776}
1777
1778int8_t udf_current_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1779 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1780{
1781 int alen;
1782 int8_t etype;
1783 uint8_t *ptr;
1784
1785 if (!*bh)
1786 {
1787 if (!(*extoffset))
1788 *extoffset = udf_file_entry_alloc_offset(inode);
1789 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1790 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1791 }
1792 else
1793 {
1794 if (!(*extoffset))
1795 *extoffset = sizeof(struct allocExtDesc);
1796 ptr = (*bh)->b_data + *extoffset;
1797 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
1798 }
1799
1800 switch (UDF_I_ALLOCTYPE(inode))
1801 {
1802 case ICBTAG_FLAG_AD_SHORT:
1803 {
1804 short_ad *sad;
1805
1806 if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
1807 return -1;
1808
1809 etype = le32_to_cpu(sad->extLength) >> 30;
1810 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1811 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1812 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1813 break;
1814 }
1815 case ICBTAG_FLAG_AD_LONG:
1816 {
1817 long_ad *lad;
1818
1819 if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
1820 return -1;
1821
1822 etype = le32_to_cpu(lad->extLength) >> 30;
1823 *eloc = lelb_to_cpu(lad->extLocation);
1824 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1825 break;
1826 }
1827 default:
1828 {
1829 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1830 return -1;
1831 }
1832 }
1833
1834 return etype;
1835}
1836
1837static int8_t
1838udf_insert_aext(struct inode *inode, kernel_lb_addr bloc, int extoffset,
1839 kernel_lb_addr neloc, uint32_t nelen, struct buffer_head *bh)
1840{
1841 kernel_lb_addr oeloc;
1842 uint32_t oelen;
1843 int8_t etype;
1844
1845 if (bh)
1846 atomic_inc(&bh->b_count);
1847
1848 while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
1849 {
1850 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
1851
1852 neloc = oeloc;
1853 nelen = (etype << 30) | oelen;
1854 }
1855 udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
1856 udf_release_data(bh);
1857 return (nelen >> 30);
1858}
1859
1860int8_t udf_delete_aext(struct inode *inode, kernel_lb_addr nbloc, int nextoffset,
1861 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *nbh)
1862{
1863 struct buffer_head *obh;
1864 kernel_lb_addr obloc;
1865 int oextoffset, adsize;
1866 int8_t etype;
1867 struct allocExtDesc *aed;
1868
1869 if (nbh)
1870 {
1871 atomic_inc(&nbh->b_count);
1872 atomic_inc(&nbh->b_count);
1873 }
1874
1875 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1876 adsize = sizeof(short_ad);
1877 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1878 adsize = sizeof(long_ad);
1879 else
1880 adsize = 0;
1881
1882 obh = nbh;
1883 obloc = nbloc;
1884 oextoffset = nextoffset;
1885
1886 if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
1887 return -1;
1888
1889 while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
1890 {
1891 udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
1892 if (obh != nbh)
1893 {
1894 obloc = nbloc;
1895 udf_release_data(obh);
1896 atomic_inc(&nbh->b_count);
1897 obh = nbh;
1898 oextoffset = nextoffset - adsize;
1899 }
1900 }
1901 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
1902 elen = 0;
1903
1904 if (nbh != obh)
1905 {
1906 udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
1907 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1908 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1909 if (!obh)
1910 {
1911 UDF_I_LENALLOC(inode) -= (adsize * 2);
1912 mark_inode_dirty(inode);
1913 }
1914 else
1915 {
1916 aed = (struct allocExtDesc *)(obh)->b_data;
1917 aed->lengthAllocDescs =
1918 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1919 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1920 udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
1921 else
1922 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1923 mark_buffer_dirty_inode(obh, inode);
1924 }
1925 }
1926 else
1927 {
1928 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1929 if (!obh)
1930 {
1931 UDF_I_LENALLOC(inode) -= adsize;
1932 mark_inode_dirty(inode);
1933 }
1934 else
1935 {
1936 aed = (struct allocExtDesc *)(obh)->b_data;
1937 aed->lengthAllocDescs =
1938 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
1939 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1940 udf_update_tag((obh)->b_data, oextoffset - adsize);
1941 else
1942 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1943 mark_buffer_dirty_inode(obh, inode);
1944 }
1945 }
1946
1947 udf_release_data(nbh);
1948 udf_release_data(obh);
1949 return (elen >> 30);
1950}
1951
Jan Kara60448b12007-05-08 00:35:13 -07001952int8_t inode_bmap(struct inode *inode, sector_t block, kernel_lb_addr *bloc, uint32_t *extoffset,
1953 kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset, struct buffer_head **bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954{
Jan Kara60448b12007-05-08 00:35:13 -07001955 loff_t lbcount = 0, bcount = (loff_t)block << inode->i_sb->s_blocksize_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 int8_t etype;
1957
1958 if (block < 0)
1959 {
1960 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
1961 return -1;
1962 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
1964 *extoffset = 0;
1965 *elen = 0;
1966 *bloc = UDF_I_LOCATION(inode);
1967
1968 do
1969 {
1970 if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
1971 {
Jan Kara60448b12007-05-08 00:35:13 -07001972 *offset = (bcount - lbcount) >> inode->i_sb->s_blocksize_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 UDF_I_LENEXTENTS(inode) = lbcount;
1974 return -1;
1975 }
1976 lbcount += *elen;
1977 } while (lbcount <= bcount);
1978
Jan Kara60448b12007-05-08 00:35:13 -07001979 *offset = (bcount + *elen - lbcount) >> inode->i_sb->s_blocksize_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
1981 return etype;
1982}
1983
Jan Kara60448b12007-05-08 00:35:13 -07001984long udf_block_map(struct inode *inode, sector_t block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985{
1986 kernel_lb_addr eloc, bloc;
Jan Kara60448b12007-05-08 00:35:13 -07001987 uint32_t extoffset, elen;
1988 sector_t offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 struct buffer_head *bh = NULL;
1990 int ret;
1991
1992 lock_kernel();
1993
1994 if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
Jan Kara60448b12007-05-08 00:35:13 -07001995 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 else
1997 ret = 0;
1998
1999 unlock_kernel();
2000 udf_release_data(bh);
2001
2002 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2003 return udf_fixed_to_variable(ret);
2004 else
2005 return ret;
2006}