blob: a459211a1c21059ff8739566d873b2093285b495 [file] [log] [blame]
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001/*
2 * This file is part of UBIFS.
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 * Authors: Artem Bityutskiy (Битюцкий Артём)
20 * Adrian Hunter
21 */
22
23/*
24 * This file implements UBIFS journal.
25 *
26 * The journal consists of 2 parts - the log and bud LEBs. The log has fixed
27 * length and position, while a bud logical eraseblock is any LEB in the main
28 * area. Buds contain file system data - data nodes, inode nodes, etc. The log
29 * contains only references to buds and some other stuff like commit
30 * start node. The idea is that when we commit the journal, we do
31 * not copy the data, the buds just become indexed. Since after the commit the
32 * nodes in bud eraseblocks become leaf nodes of the file system index tree, we
33 * use term "bud". Analogy is obvious, bud eraseblocks contain nodes which will
34 * become leafs in the future.
35 *
36 * The journal is multi-headed because we want to write data to the journal as
37 * optimally as possible. It is nice to have nodes belonging to the same inode
38 * in one LEB, so we may write data owned by different inodes to different
39 * journal heads, although at present only one data head is used.
40 *
41 * For recovery reasons, the base head contains all inode nodes, all directory
42 * entry nodes and all truncate nodes. This means that the other heads contain
43 * only data nodes.
44 *
45 * Bud LEBs may be half-indexed. For example, if the bud was not full at the
46 * time of commit, the bud is retained to continue to be used in the journal,
47 * even though the "front" of the LEB is now indexed. In that case, the log
48 * reference contains the offset where the bud starts for the purposes of the
49 * journal.
50 *
51 * The journal size has to be limited, because the larger is the journal, the
52 * longer it takes to mount UBIFS (scanning the journal) and the more memory it
53 * takes (indexing in the TNC).
54 *
55 * All the journal write operations like 'ubifs_jnl_update()' here, which write
56 * multiple UBIFS nodes to the journal at one go, are atomic with respect to
57 * unclean reboots. Should the unclean reboot happen, the recovery code drops
58 * all the nodes.
59 */
60
61#include "ubifs.h"
62
63/**
64 * zero_ino_node_unused - zero out unused fields of an on-flash inode node.
65 * @ino: the inode to zero out
66 */
67static inline void zero_ino_node_unused(struct ubifs_ino_node *ino)
68{
69 memset(ino->padding1, 0, 4);
70 memset(ino->padding2, 0, 26);
71}
72
73/**
74 * zero_dent_node_unused - zero out unused fields of an on-flash directory
75 * entry node.
76 * @dent: the directory entry to zero out
77 */
78static inline void zero_dent_node_unused(struct ubifs_dent_node *dent)
79{
80 dent->padding1 = 0;
Artem Bityutskiy1e517642008-07-14 19:08:37 +030081}
82
83/**
Artem Bityutskiy1e517642008-07-14 19:08:37 +030084 * zero_trun_node_unused - zero out unused fields of an on-flash truncation
85 * node.
86 * @trun: the truncation node to zero out
87 */
88static inline void zero_trun_node_unused(struct ubifs_trun_node *trun)
89{
90 memset(trun->padding, 0, 12);
91}
92
93/**
94 * reserve_space - reserve space in the journal.
95 * @c: UBIFS file-system description object
96 * @jhead: journal head number
97 * @len: node length
98 *
99 * This function reserves space in journal head @head. If the reservation
100 * succeeded, the journal head stays locked and later has to be unlocked using
101 * 'release_head()'. 'write_node()' and 'write_head()' functions also unlock
102 * it. Returns zero in case of success, %-EAGAIN if commit has to be done, and
103 * other negative error codes in case of other failures.
104 */
105static int reserve_space(struct ubifs_info *c, int jhead, int len)
106{
Artem Bityutskiy3edaae72009-03-03 19:22:53 +0200107 int err = 0, err1, retries = 0, avail, lnum, offs, squeeze;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300108 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
109
110 /*
111 * Typically, the base head has smaller nodes written to it, so it is
112 * better to try to allocate space at the ends of eraseblocks. This is
113 * what the squeeze parameter does.
114 */
Artem Bityutskiy2ef13292010-09-19 18:34:26 +0300115 ubifs_assert(!c->ro_media && !c->ro_mount);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300116 squeeze = (jhead == BASEHD);
117again:
118 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
119
Artem Bityutskiy2680d722010-09-17 16:44:28 +0300120 if (c->ro_error) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300121 err = -EROFS;
122 goto out_unlock;
123 }
124
125 avail = c->leb_size - wbuf->offs - wbuf->used;
126 if (wbuf->lnum != -1 && avail >= len)
127 return 0;
128
129 /*
130 * Write buffer wasn't seek'ed or there is no enough space - look for an
131 * LEB with some empty space.
132 */
Artem Bityutskiy3edaae72009-03-03 19:22:53 +0200133 lnum = ubifs_find_free_space(c, len, &offs, squeeze);
Artem Bityutskiycb14a182011-05-15 14:51:54 +0300134 if (lnum >= 0)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300135 goto out;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300136
137 err = lnum;
138 if (err != -ENOSPC)
139 goto out_unlock;
140
141 /*
142 * No free space, we have to run garbage collector to make
143 * some. But the write-buffer mutex has to be unlocked because
144 * GC also takes it.
145 */
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300146 dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300147 mutex_unlock(&wbuf->io_mutex);
148
149 lnum = ubifs_garbage_collect(c, 0);
150 if (lnum < 0) {
151 err = lnum;
152 if (err != -ENOSPC)
153 return err;
154
155 /*
156 * GC could not make a free LEB. But someone else may
157 * have allocated new bud for this journal head,
158 * because we dropped @wbuf->io_mutex, so try once
159 * again.
160 */
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300161 dbg_jnl("GC couldn't make a free LEB for jhead %s",
162 dbg_jhead(jhead));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300163 if (retries++ < 2) {
164 dbg_jnl("retry (%d)", retries);
165 goto again;
166 }
167
168 dbg_jnl("return -ENOSPC");
169 return err;
170 }
171
172 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300173 dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300174 avail = c->leb_size - wbuf->offs - wbuf->used;
175
176 if (wbuf->lnum != -1 && avail >= len) {
177 /*
178 * Someone else has switched the journal head and we have
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200179 * enough space now. This happens when more than one process is
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300180 * trying to write to the same journal head at the same time.
181 */
182 dbg_jnl("return LEB %d back, already have LEB %d:%d",
183 lnum, wbuf->lnum, wbuf->offs + wbuf->used);
184 err = ubifs_return_leb(c, lnum);
185 if (err)
186 goto out_unlock;
187 return 0;
188 }
189
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300190 offs = 0;
191
192out:
Artem Bityutskiycb14a182011-05-15 14:51:54 +0300193 /*
194 * Make sure we synchronize the write-buffer before we add the new bud
195 * to the log. Otherwise we may have a power cut after the log
196 * reference node for the last bud (@lnum) is written but before the
197 * write-buffer data are written to the next-to-last bud
198 * (@wbuf->lnum). And the effect would be that the recovery would see
199 * that there is corruption in the next-to-last bud.
200 */
201 err = ubifs_wbuf_sync_nolock(wbuf);
202 if (err)
203 goto out_return;
204 err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
205 if (err)
206 goto out_return;
Richard Weinbergerb36a2612012-05-14 17:55:51 +0200207 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300208 if (err)
209 goto out_unlock;
210
211 return 0;
212
213out_unlock:
214 mutex_unlock(&wbuf->io_mutex);
215 return err;
216
217out_return:
218 /* An error occurred and the LEB has to be returned to lprops */
219 ubifs_assert(err < 0);
220 err1 = ubifs_return_leb(c, lnum);
221 if (err1 && err == -EAGAIN)
222 /*
223 * Return original error code only if it is not %-EAGAIN,
224 * which is not really an error. Otherwise, return the error
225 * code of 'ubifs_return_leb()'.
226 */
227 err = err1;
228 mutex_unlock(&wbuf->io_mutex);
229 return err;
230}
231
232/**
233 * write_node - write node to a journal head.
234 * @c: UBIFS file-system description object
235 * @jhead: journal head
236 * @node: node to write
237 * @len: node length
238 * @lnum: LEB number written is returned here
239 * @offs: offset written is returned here
240 *
241 * This function writes a node to reserved space of journal head @jhead.
242 * Returns zero in case of success and a negative error code in case of
243 * failure.
244 */
245static int write_node(struct ubifs_info *c, int jhead, void *node, int len,
246 int *lnum, int *offs)
247{
248 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
249
250 ubifs_assert(jhead != GCHD);
251
252 *lnum = c->jheads[jhead].wbuf.lnum;
253 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
254
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300255 dbg_jnl("jhead %s, LEB %d:%d, len %d",
256 dbg_jhead(jhead), *lnum, *offs, len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300257 ubifs_prepare_node(c, node, len, 0);
258
259 return ubifs_wbuf_write_nolock(wbuf, node, len);
260}
261
262/**
263 * write_head - write data to a journal head.
264 * @c: UBIFS file-system description object
265 * @jhead: journal head
266 * @buf: buffer to write
267 * @len: length to write
268 * @lnum: LEB number written is returned here
269 * @offs: offset written is returned here
270 * @sync: non-zero if the write-buffer has to by synchronized
271 *
272 * This function is the same as 'write_node()' but it does not assume the
273 * buffer it is writing is a node, so it does not prepare it (which means
274 * initializing common header and calculating CRC).
275 */
276static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
277 int *lnum, int *offs, int sync)
278{
279 int err;
280 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
281
282 ubifs_assert(jhead != GCHD);
283
284 *lnum = c->jheads[jhead].wbuf.lnum;
285 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300286 dbg_jnl("jhead %s, LEB %d:%d, len %d",
287 dbg_jhead(jhead), *lnum, *offs, len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300288
289 err = ubifs_wbuf_write_nolock(wbuf, buf, len);
290 if (err)
291 return err;
292 if (sync)
293 err = ubifs_wbuf_sync_nolock(wbuf);
294 return err;
295}
296
297/**
298 * make_reservation - reserve journal space.
299 * @c: UBIFS file-system description object
300 * @jhead: journal head
301 * @len: how many bytes to reserve
302 *
303 * This function makes space reservation in journal head @jhead. The function
304 * takes the commit lock and locks the journal head, and the caller has to
305 * unlock the head and finish the reservation with 'finish_reservation()'.
306 * Returns zero in case of success and a negative error code in case of
307 * failure.
308 *
309 * Note, the journal head may be unlocked as soon as the data is written, while
310 * the commit lock has to be released after the data has been added to the
311 * TNC.
312 */
313static int make_reservation(struct ubifs_info *c, int jhead, int len)
314{
315 int err, cmt_retries = 0, nospc_retries = 0;
316
317again:
318 down_read(&c->commit_sem);
319 err = reserve_space(c, jhead, len);
320 if (!err)
321 return 0;
322 up_read(&c->commit_sem);
323
324 if (err == -ENOSPC) {
325 /*
326 * GC could not make any progress. We should try to commit
327 * once because it could make some dirty space and GC would
328 * make progress, so make the error -EAGAIN so that the below
329 * will commit and re-try.
330 */
331 if (nospc_retries++ < 2) {
332 dbg_jnl("no space, retry");
333 err = -EAGAIN;
334 }
335
336 /*
337 * This means that the budgeting is incorrect. We always have
338 * to be able to write to the media, because all operations are
339 * budgeted. Deletions are not budgeted, though, but we reserve
340 * an extra LEB for them.
341 */
342 }
343
344 if (err != -EAGAIN)
345 goto out;
346
347 /*
348 * -EAGAIN means that the journal is full or too large, or the above
349 * code wants to do one commit. Do this and re-try.
350 */
351 if (cmt_retries > 128) {
352 /*
353 * This should not happen unless the journal size limitations
354 * are too tough.
355 */
Sheng Yong235c3622015-03-20 10:39:42 +0000356 ubifs_err(c, "stuck in space allocation");
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300357 err = -ENOSPC;
358 goto out;
359 } else if (cmt_retries > 32)
Sheng Yong235c3622015-03-20 10:39:42 +0000360 ubifs_warn(c, "too many space allocation re-tries (%d)",
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300361 cmt_retries);
362
363 dbg_jnl("-EAGAIN, commit and retry (retried %d times)",
364 cmt_retries);
365 cmt_retries += 1;
366
367 err = ubifs_run_commit(c);
368 if (err)
369 return err;
370 goto again;
371
372out:
Sheng Yong235c3622015-03-20 10:39:42 +0000373 ubifs_err(c, "cannot reserve %d bytes in jhead %d, error %d",
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300374 len, jhead, err);
375 if (err == -ENOSPC) {
376 /* This are some budgeting problems, print useful information */
377 down_write(&c->commit_sem);
Artem Bityutskiy7c46d0a2012-05-16 19:04:54 +0300378 dump_stack();
Artem Bityutskiyedf6be22012-05-16 19:15:56 +0300379 ubifs_dump_budg(c, &c->bi);
380 ubifs_dump_lprops(c);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300381 cmt_retries = dbg_check_lprops(c);
382 up_write(&c->commit_sem);
383 }
384 return err;
385}
386
387/**
388 * release_head - release a journal head.
389 * @c: UBIFS file-system description object
390 * @jhead: journal head
391 *
392 * This function releases journal head @jhead which was locked by
393 * the 'make_reservation()' function. It has to be called after each successful
394 * 'make_reservation()' invocation.
395 */
396static inline void release_head(struct ubifs_info *c, int jhead)
397{
398 mutex_unlock(&c->jheads[jhead].wbuf.io_mutex);
399}
400
401/**
402 * finish_reservation - finish a reservation.
403 * @c: UBIFS file-system description object
404 *
405 * This function finishes journal space reservation. It must be called after
406 * 'make_reservation()'.
407 */
408static void finish_reservation(struct ubifs_info *c)
409{
410 up_read(&c->commit_sem);
411}
412
413/**
414 * get_dent_type - translate VFS inode mode to UBIFS directory entry type.
415 * @mode: inode mode
416 */
417static int get_dent_type(int mode)
418{
419 switch (mode & S_IFMT) {
420 case S_IFREG:
421 return UBIFS_ITYPE_REG;
422 case S_IFDIR:
423 return UBIFS_ITYPE_DIR;
424 case S_IFLNK:
425 return UBIFS_ITYPE_LNK;
426 case S_IFBLK:
427 return UBIFS_ITYPE_BLK;
428 case S_IFCHR:
429 return UBIFS_ITYPE_CHR;
430 case S_IFIFO:
431 return UBIFS_ITYPE_FIFO;
432 case S_IFSOCK:
433 return UBIFS_ITYPE_SOCK;
434 default:
435 BUG();
436 }
437 return 0;
438}
439
440/**
441 * pack_inode - pack an inode node.
442 * @c: UBIFS file-system description object
443 * @ino: buffer in which to pack inode node
444 * @inode: inode to pack
445 * @last: indicates the last node of the group
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300446 */
447static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300448 const struct inode *inode, int last)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300449{
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300450 int data_len = 0, last_reference = !inode->i_nlink;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300451 struct ubifs_inode *ui = ubifs_inode(inode);
452
453 ino->ch.node_type = UBIFS_INO_NODE;
454 ino_key_init_flash(c, &ino->key, inode->i_ino);
455 ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum);
456 ino->atime_sec = cpu_to_le64(inode->i_atime.tv_sec);
457 ino->atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
458 ino->ctime_sec = cpu_to_le64(inode->i_ctime.tv_sec);
459 ino->ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
460 ino->mtime_sec = cpu_to_le64(inode->i_mtime.tv_sec);
461 ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
Eric W. Biederman39241be2012-02-07 15:50:56 -0800462 ino->uid = cpu_to_le32(i_uid_read(inode));
463 ino->gid = cpu_to_le32(i_gid_read(inode));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300464 ino->mode = cpu_to_le32(inode->i_mode);
465 ino->flags = cpu_to_le32(ui->flags);
466 ino->size = cpu_to_le64(ui->ui_size);
467 ino->nlink = cpu_to_le32(inode->i_nlink);
468 ino->compr_type = cpu_to_le16(ui->compr_type);
469 ino->data_len = cpu_to_le32(ui->data_len);
470 ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt);
471 ino->xattr_size = cpu_to_le32(ui->xattr_size);
472 ino->xattr_names = cpu_to_le32(ui->xattr_names);
473 zero_ino_node_unused(ino);
474
475 /*
476 * Drop the attached data if this is a deletion inode, the data is not
477 * needed anymore.
478 */
479 if (!last_reference) {
480 memcpy(ino->data, ui->data, ui->data_len);
481 data_len = ui->data_len;
482 }
483
484 ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last);
485}
486
487/**
488 * mark_inode_clean - mark UBIFS inode as clean.
489 * @c: UBIFS file-system description object
490 * @ui: UBIFS inode to mark as clean
491 *
492 * This helper function marks UBIFS inode @ui as clean by cleaning the
493 * @ui->dirty flag and releasing its budget. Note, VFS may still treat the
494 * inode as dirty and try to write it back, but 'ubifs_write_inode()' would
495 * just do nothing.
496 */
497static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
498{
499 if (ui->dirty)
500 ubifs_release_dirty_inode_budget(c, ui);
501 ui->dirty = 0;
502}
503
Richard Weinbergerd63d61c2016-10-19 15:59:12 +0200504static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent)
505{
506 if (c->double_hash)
507 dent->cookie = prandom_u32();
508 else
509 dent->cookie = 0;
510}
511
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300512/**
513 * ubifs_jnl_update - update inode.
514 * @c: UBIFS file-system description object
515 * @dir: parent inode or host inode in case of extended attributes
516 * @nm: directory entry name
517 * @inode: inode to update
518 * @deletion: indicates a directory entry deletion i.e unlink or rmdir
519 * @xent: non-zero if the directory entry is an extended attribute entry
520 *
521 * This function updates an inode by writing a directory entry (or extended
522 * attribute entry), the inode itself, and the parent directory inode (or the
523 * host inode) to the journal.
524 *
525 * The function writes the host inode @dir last, which is important in case of
526 * extended attributes. Indeed, then we guarantee that if the host inode gets
527 * synchronized (with 'fsync()'), and the write-buffer it sits in gets flushed,
528 * the extended attribute inode gets flushed too. And this is exactly what the
529 * user expects - synchronizing the host inode synchronizes its extended
530 * attributes. Similarly, this guarantees that if @dir is synchronized, its
531 * directory entry corresponding to @nm gets synchronized too.
532 *
533 * If the inode (@inode) or the parent directory (@dir) are synchronous, this
534 * function synchronizes the write-buffer.
535 *
536 * This function marks the @dir and @inode inodes as clean and returns zero on
537 * success. In case of failure, a negative error code is returned.
538 */
539int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100540 const struct fscrypt_name *nm, const struct inode *inode,
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300541 int deletion, int xent)
542{
543 int err, dlen, ilen, len, lnum, ino_offs, dent_offs;
544 int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
545 int last_reference = !!(deletion && inode->i_nlink == 0);
546 struct ubifs_inode *ui = ubifs_inode(inode);
Richard Weinbergerd577bc12014-09-19 11:48:46 +0200547 struct ubifs_inode *host_ui = ubifs_inode(dir);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300548 struct ubifs_dent_node *dent;
549 struct ubifs_ino_node *ino;
550 union ubifs_key dent_key, ino_key;
551
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100552 //dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu",
553 // inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino);
Richard Weinbergerd577bc12014-09-19 11:48:46 +0200554 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300555
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100556 dlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300557 ilen = UBIFS_INO_NODE_SZ;
558
559 /*
560 * If the last reference to the inode is being deleted, then there is
561 * no need to attach and write inode data, it is being deleted anyway.
562 * And if the inode is being deleted, no need to synchronize
563 * write-buffer even if the inode is synchronous.
564 */
565 if (!last_reference) {
566 ilen += ui->data_len;
567 sync |= IS_SYNC(inode);
568 }
569
570 aligned_dlen = ALIGN(dlen, 8);
571 aligned_ilen = ALIGN(ilen, 8);
Subodh Nijsurea76284e2014-10-31 13:50:28 -0500572
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300573 len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
Subodh Nijsurea76284e2014-10-31 13:50:28 -0500574 /* Make sure to also account for extended attributes */
575 len += host_ui->data_len;
576
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300577 dent = kmalloc(len, GFP_NOFS);
578 if (!dent)
579 return -ENOMEM;
580
581 /* Make reservation before allocating sequence numbers */
582 err = make_reservation(c, BASEHD, len);
583 if (err)
584 goto out_free;
585
586 if (!xent) {
587 dent->ch.node_type = UBIFS_DENT_NODE;
588 dent_key_init(c, &dent_key, dir->i_ino, nm);
589 } else {
590 dent->ch.node_type = UBIFS_XENT_NODE;
591 xent_key_init(c, &dent_key, dir->i_ino, nm);
592 }
593
594 key_write(c, &dent_key, dent->key);
595 dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino);
596 dent->type = get_dent_type(inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100597 dent->nlen = cpu_to_le16(fname_len(nm));
598 memcpy(dent->name, fname_name(nm), fname_len(nm));
599 dent->name[fname_len(nm)] = '\0';
Richard Weinbergerd63d61c2016-10-19 15:59:12 +0200600 set_dent_cookie(c, dent);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100601
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300602 zero_dent_node_unused(dent);
603 ubifs_prep_grp_node(c, dent, dlen, 0);
604
605 ino = (void *)dent + aligned_dlen;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300606 pack_inode(c, ino, inode, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300607 ino = (void *)ino + aligned_ilen;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300608 pack_inode(c, ino, dir, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300609
610 if (last_reference) {
611 err = ubifs_add_orphan(c, inode->i_ino);
612 if (err) {
613 release_head(c, BASEHD);
614 goto out_finish;
615 }
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300616 ui->del_cmtno = c->cmt_no;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300617 }
618
619 err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
620 if (err)
621 goto out_release;
622 if (!sync) {
623 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
624
625 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
626 ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino);
627 }
628 release_head(c, BASEHD);
629 kfree(dent);
630
631 if (deletion) {
632 err = ubifs_tnc_remove_nm(c, &dent_key, nm);
633 if (err)
634 goto out_ro;
635 err = ubifs_add_dirt(c, lnum, dlen);
636 } else
637 err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, nm);
638 if (err)
639 goto out_ro;
640
641 /*
642 * Note, we do not remove the inode from TNC even if the last reference
643 * to it has just been deleted, because the inode may still be opened.
644 * Instead, the inode has been added to orphan lists and the orphan
645 * subsystem will take further care about it.
646 */
647 ino_key_init(c, &ino_key, inode->i_ino);
648 ino_offs = dent_offs + aligned_dlen;
649 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen);
650 if (err)
651 goto out_ro;
652
653 ino_key_init(c, &ino_key, dir->i_ino);
654 ino_offs += aligned_ilen;
Subodh Nijsurea76284e2014-10-31 13:50:28 -0500655 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs,
656 UBIFS_INO_NODE_SZ + host_ui->data_len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300657 if (err)
658 goto out_ro;
659
660 finish_reservation(c);
661 spin_lock(&ui->ui_lock);
662 ui->synced_i_size = ui->ui_size;
663 spin_unlock(&ui->ui_lock);
664 mark_inode_clean(c, ui);
Richard Weinbergerd577bc12014-09-19 11:48:46 +0200665 mark_inode_clean(c, host_ui);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300666 return 0;
667
668out_finish:
669 finish_reservation(c);
670out_free:
671 kfree(dent);
672 return err;
673
674out_release:
675 release_head(c, BASEHD);
Artem Bityutskiy812eb252011-05-31 08:40:40 +0300676 kfree(dent);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300677out_ro:
678 ubifs_ro_mode(c, err);
679 if (last_reference)
680 ubifs_delete_orphan(c, inode->i_ino);
681 finish_reservation(c);
682 return err;
683}
684
685/**
686 * ubifs_jnl_write_data - write a data node to the journal.
687 * @c: UBIFS file-system description object
688 * @inode: inode the data node belongs to
689 * @key: node key
690 * @buf: buffer to write
691 * @len: data length (must not exceed %UBIFS_BLOCK_SIZE)
692 *
693 * This function writes a data node to the journal. Returns %0 if the data node
694 * was successfully written, and a negative error code in case of failure.
695 */
696int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
697 const union ubifs_key *key, const void *buf, int len)
698{
699 struct ubifs_data_node *data;
Richard Weinberger77999532016-09-29 22:20:19 +0200700 int err, lnum, offs, compr_type, out_len, compr_len;
Matthew L. Creechd8829622011-03-04 17:55:02 -0500701 int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300702 struct ubifs_inode *ui = ubifs_inode(inode);
Richard Weinberger77999532016-09-29 22:20:19 +0200703 bool encrypted = ubifs_crypt_is_encrypted(inode);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300704
Artem Bityutskiy515315a2012-01-13 12:33:53 +0200705 dbg_jnlk(key, "ino %lu, blk %u, len %d, key ",
706 (unsigned long)key_inum(c, key), key_block(c, key), len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300707 ubifs_assert(len <= UBIFS_BLOCK_SIZE);
708
Richard Weinberger77999532016-09-29 22:20:19 +0200709 if (encrypted)
710 dlen += UBIFS_CIPHER_BLOCK_SIZE;
711
Matthew L. Creechd8829622011-03-04 17:55:02 -0500712 data = kmalloc(dlen, GFP_NOFS | __GFP_NOWARN);
713 if (!data) {
714 /*
715 * Fall-back to the write reserve buffer. Note, we might be
716 * currently on the memory reclaim path, when the kernel is
717 * trying to free some memory by writing out dirty pages. The
718 * write reserve buffer helps us to guarantee that we are
719 * always able to write the data.
720 */
721 allocated = 0;
722 mutex_lock(&c->write_reserve_mutex);
723 data = c->write_reserve_buf;
724 }
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300725
726 data->ch.node_type = UBIFS_DATA_NODE;
727 key_write(c, key, &data->key);
728 data->size = cpu_to_le32(len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300729
Artem Bityutskiya9f2fc02008-12-23 14:39:14 +0200730 if (!(ui->flags & UBIFS_COMPR_FL))
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300731 /* Compression is disabled for this inode */
732 compr_type = UBIFS_COMPR_NONE;
733 else
734 compr_type = ui->compr_type;
735
Richard Weinberger77999532016-09-29 22:20:19 +0200736 out_len = compr_len = dlen - UBIFS_DATA_NODE_SZ;
737 ubifs_compress(c, buf, len, &data->data, &compr_len, &compr_type);
738 ubifs_assert(compr_len <= UBIFS_BLOCK_SIZE);
739
740 if (encrypted) {
741 err = ubifs_encrypt(inode, data, compr_len, &out_len, key_block(c, key));
742 if (err)
743 goto out_free;
744
745 } else {
746 data->compr_size = 0;
747 }
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300748
749 dlen = UBIFS_DATA_NODE_SZ + out_len;
750 data->compr_type = cpu_to_le16(compr_type);
751
752 /* Make reservation before allocating sequence numbers */
753 err = make_reservation(c, DATAHD, dlen);
754 if (err)
755 goto out_free;
756
757 err = write_node(c, DATAHD, data, dlen, &lnum, &offs);
758 if (err)
759 goto out_release;
760 ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key));
761 release_head(c, DATAHD);
762
763 err = ubifs_tnc_add(c, key, lnum, offs, dlen);
764 if (err)
765 goto out_ro;
766
767 finish_reservation(c);
Matthew L. Creechd8829622011-03-04 17:55:02 -0500768 if (!allocated)
769 mutex_unlock(&c->write_reserve_mutex);
770 else
771 kfree(data);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300772 return 0;
773
774out_release:
775 release_head(c, DATAHD);
776out_ro:
777 ubifs_ro_mode(c, err);
778 finish_reservation(c);
779out_free:
Matthew L. Creechd8829622011-03-04 17:55:02 -0500780 if (!allocated)
781 mutex_unlock(&c->write_reserve_mutex);
782 else
783 kfree(data);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300784 return err;
785}
786
787/**
788 * ubifs_jnl_write_inode - flush inode to the journal.
789 * @c: UBIFS file-system description object
790 * @inode: inode to flush
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300791 *
792 * This function writes inode @inode to the journal. If the inode is
793 * synchronous, it also synchronizes the write-buffer. Returns zero in case of
794 * success and a negative error code in case of failure.
795 */
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300796int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300797{
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300798 int err, lnum, offs;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300799 struct ubifs_ino_node *ino;
800 struct ubifs_inode *ui = ubifs_inode(inode);
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300801 int sync = 0, len = UBIFS_INO_NODE_SZ, last_reference = !inode->i_nlink;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300802
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300803 dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300804
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300805 /*
806 * If the inode is being deleted, do not write the attached data. No
807 * need to synchronize the write-buffer either.
808 */
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300809 if (!last_reference) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300810 len += ui->data_len;
811 sync = IS_SYNC(inode);
812 }
813 ino = kmalloc(len, GFP_NOFS);
814 if (!ino)
815 return -ENOMEM;
816
817 /* Make reservation before allocating sequence numbers */
818 err = make_reservation(c, BASEHD, len);
819 if (err)
820 goto out_free;
821
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300822 pack_inode(c, ino, inode, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300823 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
824 if (err)
825 goto out_release;
826 if (!sync)
827 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
828 inode->i_ino);
829 release_head(c, BASEHD);
830
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300831 if (last_reference) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300832 err = ubifs_tnc_remove_ino(c, inode->i_ino);
833 if (err)
834 goto out_ro;
835 ubifs_delete_orphan(c, inode->i_ino);
836 err = ubifs_add_dirt(c, lnum, len);
837 } else {
838 union ubifs_key key;
839
840 ino_key_init(c, &key, inode->i_ino);
841 err = ubifs_tnc_add(c, &key, lnum, offs, len);
842 }
843 if (err)
844 goto out_ro;
845
846 finish_reservation(c);
847 spin_lock(&ui->ui_lock);
848 ui->synced_i_size = ui->ui_size;
849 spin_unlock(&ui->ui_lock);
850 kfree(ino);
851 return 0;
852
853out_release:
854 release_head(c, BASEHD);
855out_ro:
856 ubifs_ro_mode(c, err);
857 finish_reservation(c);
858out_free:
859 kfree(ino);
860 return err;
861}
862
863/**
Adrian Hunter7d62ff22008-07-23 15:48:39 +0300864 * ubifs_jnl_delete_inode - delete an inode.
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300865 * @c: UBIFS file-system description object
866 * @inode: inode to delete
867 *
868 * This function deletes inode @inode which includes removing it from orphans,
869 * deleting it from TNC and, in some cases, writing a deletion inode to the
870 * journal.
871 *
872 * When regular file inodes are unlinked or a directory inode is removed, the
Adrian Hunter7d62ff22008-07-23 15:48:39 +0300873 * 'ubifs_jnl_update()' function writes a corresponding deletion inode and
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300874 * direntry to the media, and adds the inode to orphans. After this, when the
875 * last reference to this inode has been dropped, this function is called. In
876 * general, it has to write one more deletion inode to the media, because if
877 * a commit happened between 'ubifs_jnl_update()' and
878 * 'ubifs_jnl_delete_inode()', the deletion inode is not in the journal
Adrian Hunter7d62ff22008-07-23 15:48:39 +0300879 * anymore, and in fact it might not be on the flash anymore, because it might
880 * have been garbage-collected already. And for optimization reasons UBIFS does
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300881 * not read the orphan area if it has been unmounted cleanly, so it would have
882 * no indication in the journal that there is a deleted inode which has to be
883 * removed from TNC.
884 *
885 * However, if there was no commit between 'ubifs_jnl_update()' and
886 * 'ubifs_jnl_delete_inode()', then there is no need to write the deletion
Adrian Hunter7d62ff22008-07-23 15:48:39 +0300887 * inode to the media for the second time. And this is quite a typical case.
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300888 *
889 * This function returns zero in case of success and a negative error code in
890 * case of failure.
891 */
892int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
893{
894 int err;
895 struct ubifs_inode *ui = ubifs_inode(inode);
896
897 ubifs_assert(inode->i_nlink == 0);
898
899 if (ui->del_cmtno != c->cmt_no)
900 /* A commit happened for sure */
901 return ubifs_jnl_write_inode(c, inode);
902
903 down_read(&c->commit_sem);
904 /*
905 * Check commit number again, because the first test has been done
906 * without @c->commit_sem, so a commit might have happened.
907 */
908 if (ui->del_cmtno != c->cmt_no) {
909 up_read(&c->commit_sem);
910 return ubifs_jnl_write_inode(c, inode);
911 }
912
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300913 err = ubifs_tnc_remove_ino(c, inode->i_ino);
914 if (err)
915 ubifs_ro_mode(c, err);
Adrian Hunterf7691082008-07-23 16:55:55 +0300916 else
917 ubifs_delete_orphan(c, inode->i_ino);
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300918 up_read(&c->commit_sem);
919 return err;
920}
921
922/**
Richard Weinberger9ec64962016-09-14 22:28:51 +0200923 * ubifs_jnl_xrename - cross rename two directory entries.
924 * @c: UBIFS file-system description object
925 * @fst_dir: parent inode of 1st directory entry to exchange
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100926 * @fst_inode: 1st inode to exchange
927 * @fst_nm: name of 1st inode to exchange
Richard Weinberger9ec64962016-09-14 22:28:51 +0200928 * @snd_dir: parent inode of 2nd directory entry to exchange
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100929 * @snd_inode: 2nd inode to exchange
930 * @snd_nm: name of 2nd inode to exchange
Richard Weinberger9ec64962016-09-14 22:28:51 +0200931 * @sync: non-zero if the write-buffer has to be synchronized
932 *
933 * This function implements the cross rename operation which may involve
934 * writing 2 inodes and 2 directory entries. It marks the written inodes as clean
935 * and returns zero on success. In case of failure, a negative error code is
936 * returned.
937 */
938int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100939 const struct inode *fst_inode,
940 const struct fscrypt_name *fst_nm,
Richard Weinberger9ec64962016-09-14 22:28:51 +0200941 const struct inode *snd_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100942 const struct inode *snd_inode,
943 const struct fscrypt_name *snd_nm, int sync)
Richard Weinberger9ec64962016-09-14 22:28:51 +0200944{
945 union ubifs_key key;
946 struct ubifs_dent_node *dent1, *dent2;
947 int err, dlen1, dlen2, lnum, offs, len, plen = UBIFS_INO_NODE_SZ;
948 int aligned_dlen1, aligned_dlen2;
949 int twoparents = (fst_dir != snd_dir);
Richard Weinberger9ec64962016-09-14 22:28:51 +0200950 void *p;
951
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100952 //dbg_jnl("dent '%pd' in dir ino %lu between dent '%pd' in dir ino %lu",
953 // fst_dentry, fst_dir->i_ino, snd_dentry, snd_dir->i_ino);
Richard Weinberger9ec64962016-09-14 22:28:51 +0200954
955 ubifs_assert(ubifs_inode(fst_dir)->data_len == 0);
956 ubifs_assert(ubifs_inode(snd_dir)->data_len == 0);
957 ubifs_assert(mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex));
958 ubifs_assert(mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex));
959
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100960 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(snd_nm) + 1;
961 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(fst_nm) + 1;
Richard Weinberger9ec64962016-09-14 22:28:51 +0200962 aligned_dlen1 = ALIGN(dlen1, 8);
963 aligned_dlen2 = ALIGN(dlen2, 8);
964
965 len = aligned_dlen1 + aligned_dlen2 + ALIGN(plen, 8);
966 if (twoparents)
967 len += plen;
968
969 dent1 = kmalloc(len, GFP_NOFS);
970 if (!dent1)
971 return -ENOMEM;
972
973 /* Make reservation before allocating sequence numbers */
974 err = make_reservation(c, BASEHD, len);
975 if (err)
976 goto out_free;
977
978 /* Make new dent for 1st entry */
979 dent1->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100980 dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, snd_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +0200981 dent1->inum = cpu_to_le64(fst_inode->i_ino);
982 dent1->type = get_dent_type(fst_inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100983 dent1->nlen = cpu_to_le16(fname_len(snd_nm));
984 memcpy(dent1->name, fname_name(snd_nm), fname_len(snd_nm));
985 dent1->name[fname_len(snd_nm)] = '\0';
Richard Weinberger9ec64962016-09-14 22:28:51 +0200986 zero_dent_node_unused(dent1);
987 ubifs_prep_grp_node(c, dent1, dlen1, 0);
988
989 /* Make new dent for 2nd entry */
990 dent2 = (void *)dent1 + aligned_dlen1;
991 dent2->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100992 dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, fst_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +0200993 dent2->inum = cpu_to_le64(snd_inode->i_ino);
994 dent2->type = get_dent_type(snd_inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100995 dent2->nlen = cpu_to_le16(fname_len(fst_nm));
996 memcpy(dent2->name, fname_name(fst_nm), fname_len(fst_nm));
997 dent2->name[fname_len(fst_nm)] = '\0';
Richard Weinberger9ec64962016-09-14 22:28:51 +0200998 zero_dent_node_unused(dent2);
999 ubifs_prep_grp_node(c, dent2, dlen2, 0);
1000
1001 p = (void *)dent2 + aligned_dlen2;
1002 if (!twoparents)
1003 pack_inode(c, p, fst_dir, 1);
1004 else {
1005 pack_inode(c, p, fst_dir, 0);
1006 p += ALIGN(plen, 8);
1007 pack_inode(c, p, snd_dir, 1);
1008 }
1009
1010 err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync);
1011 if (err)
1012 goto out_release;
1013 if (!sync) {
1014 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1015
1016 ubifs_wbuf_add_ino_nolock(wbuf, fst_dir->i_ino);
1017 ubifs_wbuf_add_ino_nolock(wbuf, snd_dir->i_ino);
1018 }
1019 release_head(c, BASEHD);
1020
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001021 dent_key_init(c, &key, snd_dir->i_ino, snd_nm);
1022 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, snd_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001023 if (err)
1024 goto out_ro;
1025
1026 offs += aligned_dlen1;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001027 dent_key_init(c, &key, fst_dir->i_ino, fst_nm);
1028 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, fst_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001029 if (err)
1030 goto out_ro;
1031
1032 offs += aligned_dlen2;
1033
1034 ino_key_init(c, &key, fst_dir->i_ino);
1035 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1036 if (err)
1037 goto out_ro;
1038
1039 if (twoparents) {
1040 offs += ALIGN(plen, 8);
1041 ino_key_init(c, &key, snd_dir->i_ino);
1042 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1043 if (err)
1044 goto out_ro;
1045 }
1046
1047 finish_reservation(c);
1048
1049 mark_inode_clean(c, ubifs_inode(fst_dir));
1050 if (twoparents)
1051 mark_inode_clean(c, ubifs_inode(snd_dir));
1052 kfree(dent1);
1053 return 0;
1054
1055out_release:
1056 release_head(c, BASEHD);
1057out_ro:
1058 ubifs_ro_mode(c, err);
1059 finish_reservation(c);
1060out_free:
1061 kfree(dent1);
1062 return err;
1063}
1064
1065/**
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001066 * ubifs_jnl_rename - rename a directory entry.
1067 * @c: UBIFS file-system description object
1068 * @old_dir: parent inode of directory entry to rename
1069 * @old_dentry: directory entry to rename
1070 * @new_dir: parent inode of directory entry to rename
1071 * @new_dentry: new directory entry (or directory entry to replace)
1072 * @sync: non-zero if the write-buffer has to be synchronized
1073 *
1074 * This function implements the re-name operation which may involve writing up
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001075 * to 4 inodes and 2 directory entries. It marks the written inodes as clean
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001076 * and returns zero on success. In case of failure, a negative error code is
1077 * returned.
1078 */
1079int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001080 const struct inode *old_inode,
1081 const struct fscrypt_name *old_nm,
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001082 const struct inode *new_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001083 const struct inode *new_inode,
1084 const struct fscrypt_name *new_nm,
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001085 const struct inode *whiteout, int sync)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001086{
1087 void *p;
1088 union ubifs_key key;
1089 struct ubifs_dent_node *dent, *dent2;
1090 int err, dlen1, dlen2, ilen, lnum, offs, len;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001091 int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
1092 int last_reference = !!(new_inode && new_inode->i_nlink == 0);
1093 int move = (old_dir != new_dir);
1094 struct ubifs_inode *uninitialized_var(new_ui);
1095
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001096 //dbg_jnl("dent '%pd' in dir ino %lu to dent '%pd' in dir ino %lu",
1097 // old_dentry, old_dir->i_ino, new_dentry, new_dir->i_ino);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001098 ubifs_assert(ubifs_inode(old_dir)->data_len == 0);
1099 ubifs_assert(ubifs_inode(new_dir)->data_len == 0);
1100 ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
1101 ubifs_assert(mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex));
1102
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001103 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(new_nm) + 1;
1104 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(old_nm) + 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001105 if (new_inode) {
1106 new_ui = ubifs_inode(new_inode);
1107 ubifs_assert(mutex_is_locked(&new_ui->ui_mutex));
1108 ilen = UBIFS_INO_NODE_SZ;
1109 if (!last_reference)
1110 ilen += new_ui->data_len;
1111 } else
1112 ilen = 0;
1113
1114 aligned_dlen1 = ALIGN(dlen1, 8);
1115 aligned_dlen2 = ALIGN(dlen2, 8);
1116 len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
Richard Weinberger1e039532016-09-14 22:28:52 +02001117 if (move)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001118 len += plen;
1119 dent = kmalloc(len, GFP_NOFS);
1120 if (!dent)
1121 return -ENOMEM;
1122
1123 /* Make reservation before allocating sequence numbers */
1124 err = make_reservation(c, BASEHD, len);
1125 if (err)
1126 goto out_free;
1127
1128 /* Make new dent */
1129 dent->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001130 dent_key_init_flash(c, &dent->key, new_dir->i_ino, new_nm);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001131 dent->inum = cpu_to_le64(old_inode->i_ino);
1132 dent->type = get_dent_type(old_inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001133 dent->nlen = cpu_to_le16(fname_len(new_nm));
1134 memcpy(dent->name, fname_name(new_nm), fname_len(new_nm));
1135 dent->name[fname_len(new_nm)] = '\0';
Richard Weinbergerd63d61c2016-10-19 15:59:12 +02001136 set_dent_cookie(c, dent);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001137 zero_dent_node_unused(dent);
1138 ubifs_prep_grp_node(c, dent, dlen1, 0);
1139
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001140 dent2 = (void *)dent + aligned_dlen1;
1141 dent2->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001142 dent_key_init_flash(c, &dent2->key, old_dir->i_ino, old_nm);
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001143
1144 if (whiteout) {
1145 dent2->inum = cpu_to_le64(whiteout->i_ino);
1146 dent2->type = get_dent_type(whiteout->i_mode);
1147 } else {
1148 /* Make deletion dent */
1149 dent2->inum = 0;
1150 dent2->type = DT_UNKNOWN;
1151 }
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001152 dent2->nlen = cpu_to_le16(fname_len(old_nm));
1153 memcpy(dent2->name, fname_name(old_nm), fname_len(old_nm));
1154 dent2->name[fname_len(old_nm)] = '\0';
Richard Weinbergerd63d61c2016-10-19 15:59:12 +02001155 set_dent_cookie(c, dent2);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001156 zero_dent_node_unused(dent2);
1157 ubifs_prep_grp_node(c, dent2, dlen2, 0);
1158
1159 p = (void *)dent2 + aligned_dlen2;
1160 if (new_inode) {
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001161 pack_inode(c, p, new_inode, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001162 p += ALIGN(ilen, 8);
1163 }
1164
1165 if (!move)
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001166 pack_inode(c, p, old_dir, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001167 else {
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001168 pack_inode(c, p, old_dir, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001169 p += ALIGN(plen, 8);
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001170 pack_inode(c, p, new_dir, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001171 }
1172
1173 if (last_reference) {
1174 err = ubifs_add_orphan(c, new_inode->i_ino);
1175 if (err) {
1176 release_head(c, BASEHD);
1177 goto out_finish;
1178 }
Artem Bityutskiyde94eb52008-07-22 13:06:20 +03001179 new_ui->del_cmtno = c->cmt_no;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001180 }
1181
1182 err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
1183 if (err)
1184 goto out_release;
1185 if (!sync) {
1186 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1187
1188 ubifs_wbuf_add_ino_nolock(wbuf, new_dir->i_ino);
1189 ubifs_wbuf_add_ino_nolock(wbuf, old_dir->i_ino);
1190 if (new_inode)
1191 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
1192 new_inode->i_ino);
1193 }
1194 release_head(c, BASEHD);
1195
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001196 dent_key_init(c, &key, new_dir->i_ino, new_nm);
1197 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, new_nm);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001198 if (err)
1199 goto out_ro;
1200
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001201 offs += aligned_dlen1;
1202 if (whiteout) {
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001203 dent_key_init(c, &key, old_dir->i_ino, old_nm);
1204 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, old_nm);
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001205 if (err)
1206 goto out_ro;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001207
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001208 ubifs_delete_orphan(c, whiteout->i_ino);
1209 } else {
1210 err = ubifs_add_dirt(c, lnum, dlen2);
1211 if (err)
1212 goto out_ro;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001213
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001214 dent_key_init(c, &key, old_dir->i_ino, old_nm);
1215 err = ubifs_tnc_remove_nm(c, &key, old_nm);
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001216 if (err)
1217 goto out_ro;
1218 }
1219
1220 offs += aligned_dlen2;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001221 if (new_inode) {
1222 ino_key_init(c, &key, new_inode->i_ino);
1223 err = ubifs_tnc_add(c, &key, lnum, offs, ilen);
1224 if (err)
1225 goto out_ro;
1226 offs += ALIGN(ilen, 8);
1227 }
1228
1229 ino_key_init(c, &key, old_dir->i_ino);
1230 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1231 if (err)
1232 goto out_ro;
1233
Richard Weinberger1e039532016-09-14 22:28:52 +02001234 if (move) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001235 offs += ALIGN(plen, 8);
1236 ino_key_init(c, &key, new_dir->i_ino);
1237 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1238 if (err)
1239 goto out_ro;
1240 }
1241
1242 finish_reservation(c);
1243 if (new_inode) {
1244 mark_inode_clean(c, new_ui);
1245 spin_lock(&new_ui->ui_lock);
1246 new_ui->synced_i_size = new_ui->ui_size;
1247 spin_unlock(&new_ui->ui_lock);
1248 }
1249 mark_inode_clean(c, ubifs_inode(old_dir));
1250 if (move)
1251 mark_inode_clean(c, ubifs_inode(new_dir));
1252 kfree(dent);
1253 return 0;
1254
1255out_release:
1256 release_head(c, BASEHD);
1257out_ro:
1258 ubifs_ro_mode(c, err);
1259 if (last_reference)
1260 ubifs_delete_orphan(c, new_inode->i_ino);
1261out_finish:
1262 finish_reservation(c);
1263out_free:
1264 kfree(dent);
1265 return err;
1266}
1267
1268/**
Richard Weinberger77999532016-09-29 22:20:19 +02001269 * truncate_data_node - re-compress/encrypt a truncated data node.
1270 * @c: UBIFS file-system description object
1271 * @inode: inode which referes to the data node
1272 * @block: data block number
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001273 * @dn: data node to re-compress
1274 * @new_len: new length
1275 *
1276 * This function is used when an inode is truncated and the last data node of
Richard Weinberger77999532016-09-29 22:20:19 +02001277 * the inode has to be re-compressed/encrypted and re-written.
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001278 */
Richard Weinberger77999532016-09-29 22:20:19 +02001279static int truncate_data_node(const struct ubifs_info *c, const struct inode *inode,
1280 unsigned int block, struct ubifs_data_node *dn,
1281 int *new_len)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001282{
1283 void *buf;
Richard Weinberger77999532016-09-29 22:20:19 +02001284 int err, dlen, compr_type, out_len, old_dlen;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001285
1286 out_len = le32_to_cpu(dn->size);
1287 buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
1288 if (!buf)
1289 return -ENOMEM;
1290
Richard Weinberger77999532016-09-29 22:20:19 +02001291 dlen = old_dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001292 compr_type = le16_to_cpu(dn->compr_type);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001293
Richard Weinberger77999532016-09-29 22:20:19 +02001294 if (ubifs_crypt_is_encrypted(inode)) {
1295 err = ubifs_decrypt(inode, dn, &dlen, block);
1296 if (err)
1297 goto out;
1298 }
1299
1300 if (compr_type != UBIFS_COMPR_NONE) {
1301 err = ubifs_decompress(c, &dn->data, dlen, buf, &out_len, compr_type);
1302 if (err)
1303 goto out;
1304
1305 ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type);
1306 }
1307
1308 if (ubifs_crypt_is_encrypted(inode)) {
1309 err = ubifs_encrypt(inode, dn, out_len, &old_dlen, block);
1310 if (err)
1311 goto out;
1312
1313 out_len = old_dlen;
1314 } else {
1315 dn->compr_size = 0;
1316 }
1317
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001318 ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
1319 dn->compr_type = cpu_to_le16(compr_type);
1320 dn->size = cpu_to_le32(*new_len);
1321 *new_len = UBIFS_DATA_NODE_SZ + out_len;
1322out:
1323 kfree(buf);
1324 return err;
1325}
1326
1327/**
1328 * ubifs_jnl_truncate - update the journal for a truncation.
1329 * @c: UBIFS file-system description object
1330 * @inode: inode to truncate
1331 * @old_size: old size
1332 * @new_size: new size
1333 *
1334 * When the size of a file decreases due to truncation, a truncation node is
1335 * written, the journal tree is updated, and the last data block is re-written
1336 * if it has been affected. The inode is also updated in order to synchronize
1337 * the new inode size.
1338 *
1339 * This function marks the inode as clean and returns zero on success. In case
1340 * of failure, a negative error code is returned.
1341 */
1342int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
1343 loff_t old_size, loff_t new_size)
1344{
1345 union ubifs_key key, to_key;
1346 struct ubifs_ino_node *ino;
1347 struct ubifs_trun_node *trun;
1348 struct ubifs_data_node *uninitialized_var(dn);
1349 int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode);
1350 struct ubifs_inode *ui = ubifs_inode(inode);
1351 ino_t inum = inode->i_ino;
1352 unsigned int blk;
1353
Artem Bityutskiye84461a2008-10-29 12:08:43 +02001354 dbg_jnl("ino %lu, size %lld -> %lld",
1355 (unsigned long)inum, old_size, new_size);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001356 ubifs_assert(!ui->data_len);
1357 ubifs_assert(S_ISREG(inode->i_mode));
1358 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
1359
1360 sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ +
1361 UBIFS_MAX_DATA_NODE_SZ * WORST_COMPR_FACTOR;
1362 ino = kmalloc(sz, GFP_NOFS);
1363 if (!ino)
1364 return -ENOMEM;
1365
1366 trun = (void *)ino + UBIFS_INO_NODE_SZ;
1367 trun->ch.node_type = UBIFS_TRUN_NODE;
1368 trun->inum = cpu_to_le32(inum);
1369 trun->old_size = cpu_to_le64(old_size);
1370 trun->new_size = cpu_to_le64(new_size);
1371 zero_trun_node_unused(trun);
1372
1373 dlen = new_size & (UBIFS_BLOCK_SIZE - 1);
1374 if (dlen) {
1375 /* Get last data block so it can be truncated */
1376 dn = (void *)trun + UBIFS_TRUN_NODE_SZ;
1377 blk = new_size >> UBIFS_BLOCK_SHIFT;
1378 data_key_init(c, &key, inum, blk);
Artem Bityutskiy515315a2012-01-13 12:33:53 +02001379 dbg_jnlk(&key, "last block key ");
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001380 err = ubifs_tnc_lookup(c, &key, dn);
1381 if (err == -ENOENT)
1382 dlen = 0; /* Not found (so it is a hole) */
1383 else if (err)
1384 goto out_free;
1385 else {
1386 if (le32_to_cpu(dn->size) <= dlen)
1387 dlen = 0; /* Nothing to do */
1388 else {
Richard Weinberger77999532016-09-29 22:20:19 +02001389 err = truncate_data_node(c, inode, blk, dn, &dlen);
1390 if (err)
1391 goto out_free;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001392 }
1393 }
1394 }
1395
1396 /* Must make reservation before allocating sequence numbers */
1397 len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ;
1398 if (dlen)
1399 len += dlen;
1400 err = make_reservation(c, BASEHD, len);
1401 if (err)
1402 goto out_free;
1403
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001404 pack_inode(c, ino, inode, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001405 ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1);
1406 if (dlen)
1407 ubifs_prep_grp_node(c, dn, dlen, 1);
1408
1409 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
1410 if (err)
1411 goto out_release;
1412 if (!sync)
1413 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum);
1414 release_head(c, BASEHD);
1415
1416 if (dlen) {
1417 sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ;
1418 err = ubifs_tnc_add(c, &key, lnum, sz, dlen);
1419 if (err)
1420 goto out_ro;
1421 }
1422
1423 ino_key_init(c, &key, inum);
1424 err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ);
1425 if (err)
1426 goto out_ro;
1427
1428 err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ);
1429 if (err)
1430 goto out_ro;
1431
1432 bit = new_size & (UBIFS_BLOCK_SIZE - 1);
1433 blk = (new_size >> UBIFS_BLOCK_SHIFT) + (bit ? 1 : 0);
1434 data_key_init(c, &key, inum, blk);
1435
1436 bit = old_size & (UBIFS_BLOCK_SIZE - 1);
Artem Bityutskiyf92b9822008-12-28 11:34:26 +02001437 blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001438 data_key_init(c, &to_key, inum, blk);
1439
1440 err = ubifs_tnc_remove_range(c, &key, &to_key);
1441 if (err)
1442 goto out_ro;
1443
1444 finish_reservation(c);
1445 spin_lock(&ui->ui_lock);
1446 ui->synced_i_size = ui->ui_size;
1447 spin_unlock(&ui->ui_lock);
1448 mark_inode_clean(c, ui);
1449 kfree(ino);
1450 return 0;
1451
1452out_release:
1453 release_head(c, BASEHD);
1454out_ro:
1455 ubifs_ro_mode(c, err);
1456 finish_reservation(c);
1457out_free:
1458 kfree(ino);
1459 return err;
1460}
1461
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001462
1463/**
1464 * ubifs_jnl_delete_xattr - delete an extended attribute.
1465 * @c: UBIFS file-system description object
1466 * @host: host inode
1467 * @inode: extended attribute inode
1468 * @nm: extended attribute entry name
1469 *
1470 * This function delete an extended attribute which is very similar to
1471 * un-linking regular files - it writes a deletion xentry, a deletion inode and
1472 * updates the target inode. Returns zero in case of success and a negative
1473 * error code in case of failure.
1474 */
1475int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001476 const struct inode *inode,
1477 const struct fscrypt_name *nm)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001478{
1479 int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen;
1480 struct ubifs_dent_node *xent;
1481 struct ubifs_ino_node *ino;
1482 union ubifs_key xent_key, key1, key2;
1483 int sync = IS_DIRSYNC(host);
1484 struct ubifs_inode *host_ui = ubifs_inode(host);
1485
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001486 //dbg_jnl("host %lu, xattr ino %lu, name '%s', data len %d",
1487 // host->i_ino, inode->i_ino, nm->name,
1488 // ubifs_inode(inode)->data_len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001489 ubifs_assert(inode->i_nlink == 0);
1490 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
1491
1492 /*
1493 * Since we are deleting the inode, we do not bother to attach any data
1494 * to it and assume its length is %UBIFS_INO_NODE_SZ.
1495 */
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001496 xlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001497 aligned_xlen = ALIGN(xlen, 8);
1498 hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
1499 len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
1500
1501 xent = kmalloc(len, GFP_NOFS);
1502 if (!xent)
1503 return -ENOMEM;
1504
1505 /* Make reservation before allocating sequence numbers */
1506 err = make_reservation(c, BASEHD, len);
1507 if (err) {
1508 kfree(xent);
1509 return err;
1510 }
1511
1512 xent->ch.node_type = UBIFS_XENT_NODE;
1513 xent_key_init(c, &xent_key, host->i_ino, nm);
1514 key_write(c, &xent_key, xent->key);
1515 xent->inum = 0;
1516 xent->type = get_dent_type(inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001517 xent->nlen = cpu_to_le16(fname_len(nm));
1518 memcpy(xent->name, fname_name(nm), fname_len(nm));
1519 xent->name[fname_len(nm)] = '\0';
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001520 zero_dent_node_unused(xent);
1521 ubifs_prep_grp_node(c, xent, xlen, 0);
1522
1523 ino = (void *)xent + aligned_xlen;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001524 pack_inode(c, ino, inode, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001525 ino = (void *)ino + UBIFS_INO_NODE_SZ;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001526 pack_inode(c, ino, host, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001527
1528 err = write_head(c, BASEHD, xent, len, &lnum, &xent_offs, sync);
1529 if (!sync && !err)
1530 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino);
1531 release_head(c, BASEHD);
1532 kfree(xent);
1533 if (err)
1534 goto out_ro;
1535
1536 /* Remove the extended attribute entry from TNC */
1537 err = ubifs_tnc_remove_nm(c, &xent_key, nm);
1538 if (err)
1539 goto out_ro;
1540 err = ubifs_add_dirt(c, lnum, xlen);
1541 if (err)
1542 goto out_ro;
1543
1544 /*
1545 * Remove all nodes belonging to the extended attribute inode from TNC.
1546 * Well, there actually must be only one node - the inode itself.
1547 */
1548 lowest_ino_key(c, &key1, inode->i_ino);
1549 highest_ino_key(c, &key2, inode->i_ino);
1550 err = ubifs_tnc_remove_range(c, &key1, &key2);
1551 if (err)
1552 goto out_ro;
1553 err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ);
1554 if (err)
1555 goto out_ro;
1556
1557 /* And update TNC with the new host inode position */
1558 ino_key_init(c, &key1, host->i_ino);
1559 err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen);
1560 if (err)
1561 goto out_ro;
1562
1563 finish_reservation(c);
1564 spin_lock(&host_ui->ui_lock);
1565 host_ui->synced_i_size = host_ui->ui_size;
1566 spin_unlock(&host_ui->ui_lock);
1567 mark_inode_clean(c, host_ui);
1568 return 0;
1569
1570out_ro:
1571 ubifs_ro_mode(c, err);
1572 finish_reservation(c);
1573 return err;
1574}
1575
1576/**
1577 * ubifs_jnl_change_xattr - change an extended attribute.
1578 * @c: UBIFS file-system description object
1579 * @inode: extended attribute inode
1580 * @host: host inode
1581 *
1582 * This function writes the updated version of an extended attribute inode and
Artem Bityutskiy7d4e9cc2009-03-20 19:11:12 +02001583 * the host inode to the journal (to the base head). The host inode is written
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001584 * after the extended attribute inode in order to guarantee that the extended
1585 * attribute will be flushed when the inode is synchronized by 'fsync()' and
1586 * consequently, the write-buffer is synchronized. This function returns zero
1587 * in case of success and a negative error code in case of failure.
1588 */
1589int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
1590 const struct inode *host)
1591{
1592 int err, len1, len2, aligned_len, aligned_len1, lnum, offs;
Artem Bityutskiyc78c7e32008-08-12 16:30:12 +03001593 struct ubifs_inode *host_ui = ubifs_inode(host);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001594 struct ubifs_ino_node *ino;
1595 union ubifs_key key;
1596 int sync = IS_DIRSYNC(host);
1597
1598 dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino);
1599 ubifs_assert(host->i_nlink > 0);
1600 ubifs_assert(inode->i_nlink > 0);
1601 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
1602
1603 len1 = UBIFS_INO_NODE_SZ + host_ui->data_len;
1604 len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len;
1605 aligned_len1 = ALIGN(len1, 8);
1606 aligned_len = aligned_len1 + ALIGN(len2, 8);
1607
1608 ino = kmalloc(aligned_len, GFP_NOFS);
1609 if (!ino)
1610 return -ENOMEM;
1611
1612 /* Make reservation before allocating sequence numbers */
1613 err = make_reservation(c, BASEHD, aligned_len);
1614 if (err)
1615 goto out_free;
1616
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001617 pack_inode(c, ino, host, 0);
1618 pack_inode(c, (void *)ino + aligned_len1, inode, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001619
1620 err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0);
1621 if (!sync && !err) {
1622 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1623
1624 ubifs_wbuf_add_ino_nolock(wbuf, host->i_ino);
1625 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
1626 }
1627 release_head(c, BASEHD);
1628 if (err)
1629 goto out_ro;
1630
1631 ino_key_init(c, &key, host->i_ino);
1632 err = ubifs_tnc_add(c, &key, lnum, offs, len1);
1633 if (err)
1634 goto out_ro;
1635
1636 ino_key_init(c, &key, inode->i_ino);
1637 err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2);
1638 if (err)
1639 goto out_ro;
1640
1641 finish_reservation(c);
1642 spin_lock(&host_ui->ui_lock);
1643 host_ui->synced_i_size = host_ui->ui_size;
1644 spin_unlock(&host_ui->ui_lock);
1645 mark_inode_clean(c, host_ui);
1646 kfree(ino);
1647 return 0;
1648
1649out_ro:
1650 ubifs_ro_mode(c, err);
1651 finish_reservation(c);
1652out_free:
1653 kfree(ino);
1654 return err;
1655}
1656