blob: 294519b98874058ef7ac7e089361733adb9f26de [file] [log] [blame]
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001/*
2 * This file is part of UBIFS.
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 * Authors: Artem Bityutskiy (Битюцкий Артём)
20 * Adrian Hunter
21 */
22
23/*
24 * This file implements UBIFS journal.
25 *
26 * The journal consists of 2 parts - the log and bud LEBs. The log has fixed
27 * length and position, while a bud logical eraseblock is any LEB in the main
28 * area. Buds contain file system data - data nodes, inode nodes, etc. The log
29 * contains only references to buds and some other stuff like commit
30 * start node. The idea is that when we commit the journal, we do
31 * not copy the data, the buds just become indexed. Since after the commit the
32 * nodes in bud eraseblocks become leaf nodes of the file system index tree, we
33 * use term "bud". Analogy is obvious, bud eraseblocks contain nodes which will
34 * become leafs in the future.
35 *
36 * The journal is multi-headed because we want to write data to the journal as
37 * optimally as possible. It is nice to have nodes belonging to the same inode
38 * in one LEB, so we may write data owned by different inodes to different
39 * journal heads, although at present only one data head is used.
40 *
41 * For recovery reasons, the base head contains all inode nodes, all directory
42 * entry nodes and all truncate nodes. This means that the other heads contain
43 * only data nodes.
44 *
45 * Bud LEBs may be half-indexed. For example, if the bud was not full at the
46 * time of commit, the bud is retained to continue to be used in the journal,
47 * even though the "front" of the LEB is now indexed. In that case, the log
48 * reference contains the offset where the bud starts for the purposes of the
49 * journal.
50 *
51 * The journal size has to be limited, because the larger is the journal, the
52 * longer it takes to mount UBIFS (scanning the journal) and the more memory it
53 * takes (indexing in the TNC).
54 *
55 * All the journal write operations like 'ubifs_jnl_update()' here, which write
56 * multiple UBIFS nodes to the journal at one go, are atomic with respect to
57 * unclean reboots. Should the unclean reboot happen, the recovery code drops
58 * all the nodes.
59 */
60
61#include "ubifs.h"
62
63/**
64 * zero_ino_node_unused - zero out unused fields of an on-flash inode node.
65 * @ino: the inode to zero out
66 */
67static inline void zero_ino_node_unused(struct ubifs_ino_node *ino)
68{
69 memset(ino->padding1, 0, 4);
70 memset(ino->padding2, 0, 26);
71}
72
73/**
74 * zero_dent_node_unused - zero out unused fields of an on-flash directory
75 * entry node.
76 * @dent: the directory entry to zero out
77 */
78static inline void zero_dent_node_unused(struct ubifs_dent_node *dent)
79{
80 dent->padding1 = 0;
Artem Bityutskiy1e517642008-07-14 19:08:37 +030081}
82
83/**
Artem Bityutskiy1e517642008-07-14 19:08:37 +030084 * zero_trun_node_unused - zero out unused fields of an on-flash truncation
85 * node.
86 * @trun: the truncation node to zero out
87 */
88static inline void zero_trun_node_unused(struct ubifs_trun_node *trun)
89{
90 memset(trun->padding, 0, 12);
91}
92
93/**
94 * reserve_space - reserve space in the journal.
95 * @c: UBIFS file-system description object
96 * @jhead: journal head number
97 * @len: node length
98 *
99 * This function reserves space in journal head @head. If the reservation
100 * succeeded, the journal head stays locked and later has to be unlocked using
101 * 'release_head()'. 'write_node()' and 'write_head()' functions also unlock
102 * it. Returns zero in case of success, %-EAGAIN if commit has to be done, and
103 * other negative error codes in case of other failures.
104 */
105static int reserve_space(struct ubifs_info *c, int jhead, int len)
106{
Artem Bityutskiy3edaae72009-03-03 19:22:53 +0200107 int err = 0, err1, retries = 0, avail, lnum, offs, squeeze;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300108 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
109
110 /*
111 * Typically, the base head has smaller nodes written to it, so it is
112 * better to try to allocate space at the ends of eraseblocks. This is
113 * what the squeeze parameter does.
114 */
Artem Bityutskiy2ef13292010-09-19 18:34:26 +0300115 ubifs_assert(!c->ro_media && !c->ro_mount);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300116 squeeze = (jhead == BASEHD);
117again:
118 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
119
Artem Bityutskiy2680d722010-09-17 16:44:28 +0300120 if (c->ro_error) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300121 err = -EROFS;
122 goto out_unlock;
123 }
124
125 avail = c->leb_size - wbuf->offs - wbuf->used;
126 if (wbuf->lnum != -1 && avail >= len)
127 return 0;
128
129 /*
130 * Write buffer wasn't seek'ed or there is no enough space - look for an
131 * LEB with some empty space.
132 */
Artem Bityutskiy3edaae72009-03-03 19:22:53 +0200133 lnum = ubifs_find_free_space(c, len, &offs, squeeze);
Artem Bityutskiycb14a182011-05-15 14:51:54 +0300134 if (lnum >= 0)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300135 goto out;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300136
137 err = lnum;
138 if (err != -ENOSPC)
139 goto out_unlock;
140
141 /*
142 * No free space, we have to run garbage collector to make
143 * some. But the write-buffer mutex has to be unlocked because
144 * GC also takes it.
145 */
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300146 dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300147 mutex_unlock(&wbuf->io_mutex);
148
149 lnum = ubifs_garbage_collect(c, 0);
150 if (lnum < 0) {
151 err = lnum;
152 if (err != -ENOSPC)
153 return err;
154
155 /*
156 * GC could not make a free LEB. But someone else may
157 * have allocated new bud for this journal head,
158 * because we dropped @wbuf->io_mutex, so try once
159 * again.
160 */
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300161 dbg_jnl("GC couldn't make a free LEB for jhead %s",
162 dbg_jhead(jhead));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300163 if (retries++ < 2) {
164 dbg_jnl("retry (%d)", retries);
165 goto again;
166 }
167
168 dbg_jnl("return -ENOSPC");
169 return err;
170 }
171
172 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300173 dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300174 avail = c->leb_size - wbuf->offs - wbuf->used;
175
176 if (wbuf->lnum != -1 && avail >= len) {
177 /*
178 * Someone else has switched the journal head and we have
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200179 * enough space now. This happens when more than one process is
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300180 * trying to write to the same journal head at the same time.
181 */
182 dbg_jnl("return LEB %d back, already have LEB %d:%d",
183 lnum, wbuf->lnum, wbuf->offs + wbuf->used);
184 err = ubifs_return_leb(c, lnum);
185 if (err)
186 goto out_unlock;
187 return 0;
188 }
189
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300190 offs = 0;
191
192out:
Artem Bityutskiycb14a182011-05-15 14:51:54 +0300193 /*
194 * Make sure we synchronize the write-buffer before we add the new bud
195 * to the log. Otherwise we may have a power cut after the log
196 * reference node for the last bud (@lnum) is written but before the
197 * write-buffer data are written to the next-to-last bud
198 * (@wbuf->lnum). And the effect would be that the recovery would see
199 * that there is corruption in the next-to-last bud.
200 */
201 err = ubifs_wbuf_sync_nolock(wbuf);
202 if (err)
203 goto out_return;
204 err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
205 if (err)
206 goto out_return;
Richard Weinbergerb36a2612012-05-14 17:55:51 +0200207 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300208 if (err)
209 goto out_unlock;
210
211 return 0;
212
213out_unlock:
214 mutex_unlock(&wbuf->io_mutex);
215 return err;
216
217out_return:
218 /* An error occurred and the LEB has to be returned to lprops */
219 ubifs_assert(err < 0);
220 err1 = ubifs_return_leb(c, lnum);
221 if (err1 && err == -EAGAIN)
222 /*
223 * Return original error code only if it is not %-EAGAIN,
224 * which is not really an error. Otherwise, return the error
225 * code of 'ubifs_return_leb()'.
226 */
227 err = err1;
228 mutex_unlock(&wbuf->io_mutex);
229 return err;
230}
231
232/**
233 * write_node - write node to a journal head.
234 * @c: UBIFS file-system description object
235 * @jhead: journal head
236 * @node: node to write
237 * @len: node length
238 * @lnum: LEB number written is returned here
239 * @offs: offset written is returned here
240 *
241 * This function writes a node to reserved space of journal head @jhead.
242 * Returns zero in case of success and a negative error code in case of
243 * failure.
244 */
245static int write_node(struct ubifs_info *c, int jhead, void *node, int len,
246 int *lnum, int *offs)
247{
248 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
249
250 ubifs_assert(jhead != GCHD);
251
252 *lnum = c->jheads[jhead].wbuf.lnum;
253 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
254
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300255 dbg_jnl("jhead %s, LEB %d:%d, len %d",
256 dbg_jhead(jhead), *lnum, *offs, len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300257 ubifs_prepare_node(c, node, len, 0);
258
259 return ubifs_wbuf_write_nolock(wbuf, node, len);
260}
261
262/**
263 * write_head - write data to a journal head.
264 * @c: UBIFS file-system description object
265 * @jhead: journal head
266 * @buf: buffer to write
267 * @len: length to write
268 * @lnum: LEB number written is returned here
269 * @offs: offset written is returned here
270 * @sync: non-zero if the write-buffer has to by synchronized
271 *
272 * This function is the same as 'write_node()' but it does not assume the
273 * buffer it is writing is a node, so it does not prepare it (which means
274 * initializing common header and calculating CRC).
275 */
276static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
277 int *lnum, int *offs, int sync)
278{
279 int err;
280 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
281
282 ubifs_assert(jhead != GCHD);
283
284 *lnum = c->jheads[jhead].wbuf.lnum;
285 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300286 dbg_jnl("jhead %s, LEB %d:%d, len %d",
287 dbg_jhead(jhead), *lnum, *offs, len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300288
289 err = ubifs_wbuf_write_nolock(wbuf, buf, len);
290 if (err)
291 return err;
292 if (sync)
293 err = ubifs_wbuf_sync_nolock(wbuf);
294 return err;
295}
296
297/**
298 * make_reservation - reserve journal space.
299 * @c: UBIFS file-system description object
300 * @jhead: journal head
301 * @len: how many bytes to reserve
302 *
303 * This function makes space reservation in journal head @jhead. The function
304 * takes the commit lock and locks the journal head, and the caller has to
305 * unlock the head and finish the reservation with 'finish_reservation()'.
306 * Returns zero in case of success and a negative error code in case of
307 * failure.
308 *
309 * Note, the journal head may be unlocked as soon as the data is written, while
310 * the commit lock has to be released after the data has been added to the
311 * TNC.
312 */
313static int make_reservation(struct ubifs_info *c, int jhead, int len)
314{
315 int err, cmt_retries = 0, nospc_retries = 0;
316
317again:
318 down_read(&c->commit_sem);
319 err = reserve_space(c, jhead, len);
320 if (!err)
321 return 0;
322 up_read(&c->commit_sem);
323
324 if (err == -ENOSPC) {
325 /*
326 * GC could not make any progress. We should try to commit
327 * once because it could make some dirty space and GC would
328 * make progress, so make the error -EAGAIN so that the below
329 * will commit and re-try.
330 */
331 if (nospc_retries++ < 2) {
332 dbg_jnl("no space, retry");
333 err = -EAGAIN;
334 }
335
336 /*
337 * This means that the budgeting is incorrect. We always have
338 * to be able to write to the media, because all operations are
339 * budgeted. Deletions are not budgeted, though, but we reserve
340 * an extra LEB for them.
341 */
342 }
343
344 if (err != -EAGAIN)
345 goto out;
346
347 /*
348 * -EAGAIN means that the journal is full or too large, or the above
349 * code wants to do one commit. Do this and re-try.
350 */
351 if (cmt_retries > 128) {
352 /*
353 * This should not happen unless the journal size limitations
354 * are too tough.
355 */
Sheng Yong235c3622015-03-20 10:39:42 +0000356 ubifs_err(c, "stuck in space allocation");
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300357 err = -ENOSPC;
358 goto out;
359 } else if (cmt_retries > 32)
Sheng Yong235c3622015-03-20 10:39:42 +0000360 ubifs_warn(c, "too many space allocation re-tries (%d)",
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300361 cmt_retries);
362
363 dbg_jnl("-EAGAIN, commit and retry (retried %d times)",
364 cmt_retries);
365 cmt_retries += 1;
366
367 err = ubifs_run_commit(c);
368 if (err)
369 return err;
370 goto again;
371
372out:
Sheng Yong235c3622015-03-20 10:39:42 +0000373 ubifs_err(c, "cannot reserve %d bytes in jhead %d, error %d",
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300374 len, jhead, err);
375 if (err == -ENOSPC) {
376 /* This are some budgeting problems, print useful information */
377 down_write(&c->commit_sem);
Artem Bityutskiy7c46d0a2012-05-16 19:04:54 +0300378 dump_stack();
Artem Bityutskiyedf6be22012-05-16 19:15:56 +0300379 ubifs_dump_budg(c, &c->bi);
380 ubifs_dump_lprops(c);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300381 cmt_retries = dbg_check_lprops(c);
382 up_write(&c->commit_sem);
383 }
384 return err;
385}
386
387/**
388 * release_head - release a journal head.
389 * @c: UBIFS file-system description object
390 * @jhead: journal head
391 *
392 * This function releases journal head @jhead which was locked by
393 * the 'make_reservation()' function. It has to be called after each successful
394 * 'make_reservation()' invocation.
395 */
396static inline void release_head(struct ubifs_info *c, int jhead)
397{
398 mutex_unlock(&c->jheads[jhead].wbuf.io_mutex);
399}
400
401/**
402 * finish_reservation - finish a reservation.
403 * @c: UBIFS file-system description object
404 *
405 * This function finishes journal space reservation. It must be called after
406 * 'make_reservation()'.
407 */
408static void finish_reservation(struct ubifs_info *c)
409{
410 up_read(&c->commit_sem);
411}
412
413/**
414 * get_dent_type - translate VFS inode mode to UBIFS directory entry type.
415 * @mode: inode mode
416 */
417static int get_dent_type(int mode)
418{
419 switch (mode & S_IFMT) {
420 case S_IFREG:
421 return UBIFS_ITYPE_REG;
422 case S_IFDIR:
423 return UBIFS_ITYPE_DIR;
424 case S_IFLNK:
425 return UBIFS_ITYPE_LNK;
426 case S_IFBLK:
427 return UBIFS_ITYPE_BLK;
428 case S_IFCHR:
429 return UBIFS_ITYPE_CHR;
430 case S_IFIFO:
431 return UBIFS_ITYPE_FIFO;
432 case S_IFSOCK:
433 return UBIFS_ITYPE_SOCK;
434 default:
435 BUG();
436 }
437 return 0;
438}
439
440/**
441 * pack_inode - pack an inode node.
442 * @c: UBIFS file-system description object
443 * @ino: buffer in which to pack inode node
444 * @inode: inode to pack
445 * @last: indicates the last node of the group
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300446 */
447static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300448 const struct inode *inode, int last)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300449{
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300450 int data_len = 0, last_reference = !inode->i_nlink;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300451 struct ubifs_inode *ui = ubifs_inode(inode);
452
453 ino->ch.node_type = UBIFS_INO_NODE;
454 ino_key_init_flash(c, &ino->key, inode->i_ino);
455 ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum);
456 ino->atime_sec = cpu_to_le64(inode->i_atime.tv_sec);
457 ino->atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
458 ino->ctime_sec = cpu_to_le64(inode->i_ctime.tv_sec);
459 ino->ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
460 ino->mtime_sec = cpu_to_le64(inode->i_mtime.tv_sec);
461 ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
Eric W. Biederman39241be2012-02-07 15:50:56 -0800462 ino->uid = cpu_to_le32(i_uid_read(inode));
463 ino->gid = cpu_to_le32(i_gid_read(inode));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300464 ino->mode = cpu_to_le32(inode->i_mode);
465 ino->flags = cpu_to_le32(ui->flags);
466 ino->size = cpu_to_le64(ui->ui_size);
467 ino->nlink = cpu_to_le32(inode->i_nlink);
468 ino->compr_type = cpu_to_le16(ui->compr_type);
469 ino->data_len = cpu_to_le32(ui->data_len);
470 ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt);
471 ino->xattr_size = cpu_to_le32(ui->xattr_size);
472 ino->xattr_names = cpu_to_le32(ui->xattr_names);
473 zero_ino_node_unused(ino);
474
475 /*
476 * Drop the attached data if this is a deletion inode, the data is not
477 * needed anymore.
478 */
479 if (!last_reference) {
480 memcpy(ino->data, ui->data, ui->data_len);
481 data_len = ui->data_len;
482 }
483
484 ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last);
485}
486
487/**
488 * mark_inode_clean - mark UBIFS inode as clean.
489 * @c: UBIFS file-system description object
490 * @ui: UBIFS inode to mark as clean
491 *
492 * This helper function marks UBIFS inode @ui as clean by cleaning the
493 * @ui->dirty flag and releasing its budget. Note, VFS may still treat the
494 * inode as dirty and try to write it back, but 'ubifs_write_inode()' would
495 * just do nothing.
496 */
497static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
498{
499 if (ui->dirty)
500 ubifs_release_dirty_inode_budget(c, ui);
501 ui->dirty = 0;
502}
503
Richard Weinbergerd63d61c2016-10-19 15:59:12 +0200504static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent)
505{
506 if (c->double_hash)
507 dent->cookie = prandom_u32();
508 else
509 dent->cookie = 0;
510}
511
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300512/**
513 * ubifs_jnl_update - update inode.
514 * @c: UBIFS file-system description object
515 * @dir: parent inode or host inode in case of extended attributes
516 * @nm: directory entry name
517 * @inode: inode to update
518 * @deletion: indicates a directory entry deletion i.e unlink or rmdir
519 * @xent: non-zero if the directory entry is an extended attribute entry
520 *
521 * This function updates an inode by writing a directory entry (or extended
522 * attribute entry), the inode itself, and the parent directory inode (or the
523 * host inode) to the journal.
524 *
525 * The function writes the host inode @dir last, which is important in case of
526 * extended attributes. Indeed, then we guarantee that if the host inode gets
527 * synchronized (with 'fsync()'), and the write-buffer it sits in gets flushed,
528 * the extended attribute inode gets flushed too. And this is exactly what the
529 * user expects - synchronizing the host inode synchronizes its extended
530 * attributes. Similarly, this guarantees that if @dir is synchronized, its
531 * directory entry corresponding to @nm gets synchronized too.
532 *
533 * If the inode (@inode) or the parent directory (@dir) are synchronous, this
534 * function synchronizes the write-buffer.
535 *
536 * This function marks the @dir and @inode inodes as clean and returns zero on
537 * success. In case of failure, a negative error code is returned.
538 */
539int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100540 const struct fscrypt_name *nm, const struct inode *inode,
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300541 int deletion, int xent)
542{
543 int err, dlen, ilen, len, lnum, ino_offs, dent_offs;
544 int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
545 int last_reference = !!(deletion && inode->i_nlink == 0);
546 struct ubifs_inode *ui = ubifs_inode(inode);
Richard Weinbergerd577bc12014-09-19 11:48:46 +0200547 struct ubifs_inode *host_ui = ubifs_inode(dir);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300548 struct ubifs_dent_node *dent;
549 struct ubifs_ino_node *ino;
550 union ubifs_key dent_key, ino_key;
551
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100552 //dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu",
553 // inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino);
Richard Weinbergerd577bc12014-09-19 11:48:46 +0200554 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300555
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100556 dlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300557 ilen = UBIFS_INO_NODE_SZ;
558
559 /*
560 * If the last reference to the inode is being deleted, then there is
561 * no need to attach and write inode data, it is being deleted anyway.
562 * And if the inode is being deleted, no need to synchronize
563 * write-buffer even if the inode is synchronous.
564 */
565 if (!last_reference) {
566 ilen += ui->data_len;
567 sync |= IS_SYNC(inode);
568 }
569
570 aligned_dlen = ALIGN(dlen, 8);
571 aligned_ilen = ALIGN(ilen, 8);
Subodh Nijsurea76284e2014-10-31 13:50:28 -0500572
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300573 len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
Subodh Nijsurea76284e2014-10-31 13:50:28 -0500574 /* Make sure to also account for extended attributes */
575 len += host_ui->data_len;
576
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300577 dent = kmalloc(len, GFP_NOFS);
578 if (!dent)
579 return -ENOMEM;
580
581 /* Make reservation before allocating sequence numbers */
582 err = make_reservation(c, BASEHD, len);
583 if (err)
584 goto out_free;
585
586 if (!xent) {
587 dent->ch.node_type = UBIFS_DENT_NODE;
588 dent_key_init(c, &dent_key, dir->i_ino, nm);
589 } else {
590 dent->ch.node_type = UBIFS_XENT_NODE;
591 xent_key_init(c, &dent_key, dir->i_ino, nm);
592 }
593
594 key_write(c, &dent_key, dent->key);
595 dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino);
596 dent->type = get_dent_type(inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100597 dent->nlen = cpu_to_le16(fname_len(nm));
598 memcpy(dent->name, fname_name(nm), fname_len(nm));
599 dent->name[fname_len(nm)] = '\0';
Richard Weinbergerd63d61c2016-10-19 15:59:12 +0200600 set_dent_cookie(c, dent);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100601
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300602 zero_dent_node_unused(dent);
603 ubifs_prep_grp_node(c, dent, dlen, 0);
604
605 ino = (void *)dent + aligned_dlen;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300606 pack_inode(c, ino, inode, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300607 ino = (void *)ino + aligned_ilen;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300608 pack_inode(c, ino, dir, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300609
610 if (last_reference) {
611 err = ubifs_add_orphan(c, inode->i_ino);
612 if (err) {
613 release_head(c, BASEHD);
614 goto out_finish;
615 }
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300616 ui->del_cmtno = c->cmt_no;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300617 }
618
619 err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
620 if (err)
621 goto out_release;
622 if (!sync) {
623 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
624
625 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
626 ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino);
627 }
628 release_head(c, BASEHD);
629 kfree(dent);
630
631 if (deletion) {
632 err = ubifs_tnc_remove_nm(c, &dent_key, nm);
633 if (err)
634 goto out_ro;
635 err = ubifs_add_dirt(c, lnum, dlen);
636 } else
637 err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, nm);
638 if (err)
639 goto out_ro;
640
641 /*
642 * Note, we do not remove the inode from TNC even if the last reference
643 * to it has just been deleted, because the inode may still be opened.
644 * Instead, the inode has been added to orphan lists and the orphan
645 * subsystem will take further care about it.
646 */
647 ino_key_init(c, &ino_key, inode->i_ino);
648 ino_offs = dent_offs + aligned_dlen;
649 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen);
650 if (err)
651 goto out_ro;
652
653 ino_key_init(c, &ino_key, dir->i_ino);
654 ino_offs += aligned_ilen;
Subodh Nijsurea76284e2014-10-31 13:50:28 -0500655 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs,
656 UBIFS_INO_NODE_SZ + host_ui->data_len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300657 if (err)
658 goto out_ro;
659
660 finish_reservation(c);
661 spin_lock(&ui->ui_lock);
662 ui->synced_i_size = ui->ui_size;
663 spin_unlock(&ui->ui_lock);
664 mark_inode_clean(c, ui);
Richard Weinbergerd577bc12014-09-19 11:48:46 +0200665 mark_inode_clean(c, host_ui);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300666 return 0;
667
668out_finish:
669 finish_reservation(c);
670out_free:
671 kfree(dent);
672 return err;
673
674out_release:
675 release_head(c, BASEHD);
Artem Bityutskiy812eb252011-05-31 08:40:40 +0300676 kfree(dent);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300677out_ro:
678 ubifs_ro_mode(c, err);
679 if (last_reference)
680 ubifs_delete_orphan(c, inode->i_ino);
681 finish_reservation(c);
682 return err;
683}
684
685/**
686 * ubifs_jnl_write_data - write a data node to the journal.
687 * @c: UBIFS file-system description object
688 * @inode: inode the data node belongs to
689 * @key: node key
690 * @buf: buffer to write
691 * @len: data length (must not exceed %UBIFS_BLOCK_SIZE)
692 *
693 * This function writes a data node to the journal. Returns %0 if the data node
694 * was successfully written, and a negative error code in case of failure.
695 */
696int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
697 const union ubifs_key *key, const void *buf, int len)
698{
699 struct ubifs_data_node *data;
Richard Weinberger77999532016-09-29 22:20:19 +0200700 int err, lnum, offs, compr_type, out_len, compr_len;
Matthew L. Creechd8829622011-03-04 17:55:02 -0500701 int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300702 struct ubifs_inode *ui = ubifs_inode(inode);
Richard Weinberger77999532016-09-29 22:20:19 +0200703 bool encrypted = ubifs_crypt_is_encrypted(inode);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300704
Artem Bityutskiy515315a2012-01-13 12:33:53 +0200705 dbg_jnlk(key, "ino %lu, blk %u, len %d, key ",
706 (unsigned long)key_inum(c, key), key_block(c, key), len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300707 ubifs_assert(len <= UBIFS_BLOCK_SIZE);
708
Richard Weinberger77999532016-09-29 22:20:19 +0200709 if (encrypted)
710 dlen += UBIFS_CIPHER_BLOCK_SIZE;
711
Matthew L. Creechd8829622011-03-04 17:55:02 -0500712 data = kmalloc(dlen, GFP_NOFS | __GFP_NOWARN);
713 if (!data) {
714 /*
715 * Fall-back to the write reserve buffer. Note, we might be
716 * currently on the memory reclaim path, when the kernel is
717 * trying to free some memory by writing out dirty pages. The
718 * write reserve buffer helps us to guarantee that we are
719 * always able to write the data.
720 */
721 allocated = 0;
722 mutex_lock(&c->write_reserve_mutex);
723 data = c->write_reserve_buf;
724 }
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300725
726 data->ch.node_type = UBIFS_DATA_NODE;
727 key_write(c, key, &data->key);
728 data->size = cpu_to_le32(len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300729
Artem Bityutskiya9f2fc02008-12-23 14:39:14 +0200730 if (!(ui->flags & UBIFS_COMPR_FL))
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300731 /* Compression is disabled for this inode */
732 compr_type = UBIFS_COMPR_NONE;
733 else
734 compr_type = ui->compr_type;
735
Richard Weinberger77999532016-09-29 22:20:19 +0200736 out_len = compr_len = dlen - UBIFS_DATA_NODE_SZ;
737 ubifs_compress(c, buf, len, &data->data, &compr_len, &compr_type);
738 ubifs_assert(compr_len <= UBIFS_BLOCK_SIZE);
739
740 if (encrypted) {
741 err = ubifs_encrypt(inode, data, compr_len, &out_len, key_block(c, key));
742 if (err)
743 goto out_free;
744
745 } else {
746 data->compr_size = 0;
Peter Rosin507502a2017-01-04 09:38:29 +0100747 out_len = compr_len;
Richard Weinberger77999532016-09-29 22:20:19 +0200748 }
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300749
750 dlen = UBIFS_DATA_NODE_SZ + out_len;
751 data->compr_type = cpu_to_le16(compr_type);
752
753 /* Make reservation before allocating sequence numbers */
754 err = make_reservation(c, DATAHD, dlen);
755 if (err)
756 goto out_free;
757
758 err = write_node(c, DATAHD, data, dlen, &lnum, &offs);
759 if (err)
760 goto out_release;
761 ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key));
762 release_head(c, DATAHD);
763
764 err = ubifs_tnc_add(c, key, lnum, offs, dlen);
765 if (err)
766 goto out_ro;
767
768 finish_reservation(c);
Matthew L. Creechd8829622011-03-04 17:55:02 -0500769 if (!allocated)
770 mutex_unlock(&c->write_reserve_mutex);
771 else
772 kfree(data);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300773 return 0;
774
775out_release:
776 release_head(c, DATAHD);
777out_ro:
778 ubifs_ro_mode(c, err);
779 finish_reservation(c);
780out_free:
Matthew L. Creechd8829622011-03-04 17:55:02 -0500781 if (!allocated)
782 mutex_unlock(&c->write_reserve_mutex);
783 else
784 kfree(data);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300785 return err;
786}
787
788/**
789 * ubifs_jnl_write_inode - flush inode to the journal.
790 * @c: UBIFS file-system description object
791 * @inode: inode to flush
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300792 *
793 * This function writes inode @inode to the journal. If the inode is
794 * synchronous, it also synchronizes the write-buffer. Returns zero in case of
795 * success and a negative error code in case of failure.
796 */
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300797int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300798{
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300799 int err, lnum, offs;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300800 struct ubifs_ino_node *ino;
801 struct ubifs_inode *ui = ubifs_inode(inode);
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300802 int sync = 0, len = UBIFS_INO_NODE_SZ, last_reference = !inode->i_nlink;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300803
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300804 dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300805
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300806 /*
807 * If the inode is being deleted, do not write the attached data. No
808 * need to synchronize the write-buffer either.
809 */
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300810 if (!last_reference) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300811 len += ui->data_len;
812 sync = IS_SYNC(inode);
813 }
814 ino = kmalloc(len, GFP_NOFS);
815 if (!ino)
816 return -ENOMEM;
817
818 /* Make reservation before allocating sequence numbers */
819 err = make_reservation(c, BASEHD, len);
820 if (err)
821 goto out_free;
822
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300823 pack_inode(c, ino, inode, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300824 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
825 if (err)
826 goto out_release;
827 if (!sync)
828 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
829 inode->i_ino);
830 release_head(c, BASEHD);
831
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300832 if (last_reference) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300833 err = ubifs_tnc_remove_ino(c, inode->i_ino);
834 if (err)
835 goto out_ro;
836 ubifs_delete_orphan(c, inode->i_ino);
837 err = ubifs_add_dirt(c, lnum, len);
838 } else {
839 union ubifs_key key;
840
841 ino_key_init(c, &key, inode->i_ino);
842 err = ubifs_tnc_add(c, &key, lnum, offs, len);
843 }
844 if (err)
845 goto out_ro;
846
847 finish_reservation(c);
848 spin_lock(&ui->ui_lock);
849 ui->synced_i_size = ui->ui_size;
850 spin_unlock(&ui->ui_lock);
851 kfree(ino);
852 return 0;
853
854out_release:
855 release_head(c, BASEHD);
856out_ro:
857 ubifs_ro_mode(c, err);
858 finish_reservation(c);
859out_free:
860 kfree(ino);
861 return err;
862}
863
864/**
Adrian Hunter7d62ff22008-07-23 15:48:39 +0300865 * ubifs_jnl_delete_inode - delete an inode.
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300866 * @c: UBIFS file-system description object
867 * @inode: inode to delete
868 *
869 * This function deletes inode @inode which includes removing it from orphans,
870 * deleting it from TNC and, in some cases, writing a deletion inode to the
871 * journal.
872 *
873 * When regular file inodes are unlinked or a directory inode is removed, the
Adrian Hunter7d62ff22008-07-23 15:48:39 +0300874 * 'ubifs_jnl_update()' function writes a corresponding deletion inode and
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300875 * direntry to the media, and adds the inode to orphans. After this, when the
876 * last reference to this inode has been dropped, this function is called. In
877 * general, it has to write one more deletion inode to the media, because if
878 * a commit happened between 'ubifs_jnl_update()' and
879 * 'ubifs_jnl_delete_inode()', the deletion inode is not in the journal
Adrian Hunter7d62ff22008-07-23 15:48:39 +0300880 * anymore, and in fact it might not be on the flash anymore, because it might
881 * have been garbage-collected already. And for optimization reasons UBIFS does
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300882 * not read the orphan area if it has been unmounted cleanly, so it would have
883 * no indication in the journal that there is a deleted inode which has to be
884 * removed from TNC.
885 *
886 * However, if there was no commit between 'ubifs_jnl_update()' and
887 * 'ubifs_jnl_delete_inode()', then there is no need to write the deletion
Adrian Hunter7d62ff22008-07-23 15:48:39 +0300888 * inode to the media for the second time. And this is quite a typical case.
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300889 *
890 * This function returns zero in case of success and a negative error code in
891 * case of failure.
892 */
893int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
894{
895 int err;
896 struct ubifs_inode *ui = ubifs_inode(inode);
897
898 ubifs_assert(inode->i_nlink == 0);
899
900 if (ui->del_cmtno != c->cmt_no)
901 /* A commit happened for sure */
902 return ubifs_jnl_write_inode(c, inode);
903
904 down_read(&c->commit_sem);
905 /*
906 * Check commit number again, because the first test has been done
907 * without @c->commit_sem, so a commit might have happened.
908 */
909 if (ui->del_cmtno != c->cmt_no) {
910 up_read(&c->commit_sem);
911 return ubifs_jnl_write_inode(c, inode);
912 }
913
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300914 err = ubifs_tnc_remove_ino(c, inode->i_ino);
915 if (err)
916 ubifs_ro_mode(c, err);
Adrian Hunterf7691082008-07-23 16:55:55 +0300917 else
918 ubifs_delete_orphan(c, inode->i_ino);
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300919 up_read(&c->commit_sem);
920 return err;
921}
922
923/**
Richard Weinberger9ec64962016-09-14 22:28:51 +0200924 * ubifs_jnl_xrename - cross rename two directory entries.
925 * @c: UBIFS file-system description object
926 * @fst_dir: parent inode of 1st directory entry to exchange
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100927 * @fst_inode: 1st inode to exchange
928 * @fst_nm: name of 1st inode to exchange
Richard Weinberger9ec64962016-09-14 22:28:51 +0200929 * @snd_dir: parent inode of 2nd directory entry to exchange
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100930 * @snd_inode: 2nd inode to exchange
931 * @snd_nm: name of 2nd inode to exchange
Richard Weinberger9ec64962016-09-14 22:28:51 +0200932 * @sync: non-zero if the write-buffer has to be synchronized
933 *
934 * This function implements the cross rename operation which may involve
935 * writing 2 inodes and 2 directory entries. It marks the written inodes as clean
936 * and returns zero on success. In case of failure, a negative error code is
937 * returned.
938 */
939int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100940 const struct inode *fst_inode,
941 const struct fscrypt_name *fst_nm,
Richard Weinberger9ec64962016-09-14 22:28:51 +0200942 const struct inode *snd_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100943 const struct inode *snd_inode,
944 const struct fscrypt_name *snd_nm, int sync)
Richard Weinberger9ec64962016-09-14 22:28:51 +0200945{
946 union ubifs_key key;
947 struct ubifs_dent_node *dent1, *dent2;
948 int err, dlen1, dlen2, lnum, offs, len, plen = UBIFS_INO_NODE_SZ;
949 int aligned_dlen1, aligned_dlen2;
950 int twoparents = (fst_dir != snd_dir);
Richard Weinberger9ec64962016-09-14 22:28:51 +0200951 void *p;
952
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100953 //dbg_jnl("dent '%pd' in dir ino %lu between dent '%pd' in dir ino %lu",
954 // fst_dentry, fst_dir->i_ino, snd_dentry, snd_dir->i_ino);
Richard Weinberger9ec64962016-09-14 22:28:51 +0200955
956 ubifs_assert(ubifs_inode(fst_dir)->data_len == 0);
957 ubifs_assert(ubifs_inode(snd_dir)->data_len == 0);
958 ubifs_assert(mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex));
959 ubifs_assert(mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex));
960
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100961 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(snd_nm) + 1;
962 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(fst_nm) + 1;
Richard Weinberger9ec64962016-09-14 22:28:51 +0200963 aligned_dlen1 = ALIGN(dlen1, 8);
964 aligned_dlen2 = ALIGN(dlen2, 8);
965
966 len = aligned_dlen1 + aligned_dlen2 + ALIGN(plen, 8);
967 if (twoparents)
968 len += plen;
969
970 dent1 = kmalloc(len, GFP_NOFS);
971 if (!dent1)
972 return -ENOMEM;
973
974 /* Make reservation before allocating sequence numbers */
975 err = make_reservation(c, BASEHD, len);
976 if (err)
977 goto out_free;
978
979 /* Make new dent for 1st entry */
980 dent1->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100981 dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, snd_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +0200982 dent1->inum = cpu_to_le64(fst_inode->i_ino);
983 dent1->type = get_dent_type(fst_inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100984 dent1->nlen = cpu_to_le16(fname_len(snd_nm));
985 memcpy(dent1->name, fname_name(snd_nm), fname_len(snd_nm));
986 dent1->name[fname_len(snd_nm)] = '\0';
Richard Weinberger9ec64962016-09-14 22:28:51 +0200987 zero_dent_node_unused(dent1);
988 ubifs_prep_grp_node(c, dent1, dlen1, 0);
989
990 /* Make new dent for 2nd entry */
991 dent2 = (void *)dent1 + aligned_dlen1;
992 dent2->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100993 dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, fst_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +0200994 dent2->inum = cpu_to_le64(snd_inode->i_ino);
995 dent2->type = get_dent_type(snd_inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100996 dent2->nlen = cpu_to_le16(fname_len(fst_nm));
997 memcpy(dent2->name, fname_name(fst_nm), fname_len(fst_nm));
998 dent2->name[fname_len(fst_nm)] = '\0';
Richard Weinberger9ec64962016-09-14 22:28:51 +0200999 zero_dent_node_unused(dent2);
1000 ubifs_prep_grp_node(c, dent2, dlen2, 0);
1001
1002 p = (void *)dent2 + aligned_dlen2;
1003 if (!twoparents)
1004 pack_inode(c, p, fst_dir, 1);
1005 else {
1006 pack_inode(c, p, fst_dir, 0);
1007 p += ALIGN(plen, 8);
1008 pack_inode(c, p, snd_dir, 1);
1009 }
1010
1011 err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync);
1012 if (err)
1013 goto out_release;
1014 if (!sync) {
1015 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1016
1017 ubifs_wbuf_add_ino_nolock(wbuf, fst_dir->i_ino);
1018 ubifs_wbuf_add_ino_nolock(wbuf, snd_dir->i_ino);
1019 }
1020 release_head(c, BASEHD);
1021
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001022 dent_key_init(c, &key, snd_dir->i_ino, snd_nm);
1023 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, snd_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001024 if (err)
1025 goto out_ro;
1026
1027 offs += aligned_dlen1;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001028 dent_key_init(c, &key, fst_dir->i_ino, fst_nm);
1029 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, fst_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001030 if (err)
1031 goto out_ro;
1032
1033 offs += aligned_dlen2;
1034
1035 ino_key_init(c, &key, fst_dir->i_ino);
1036 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1037 if (err)
1038 goto out_ro;
1039
1040 if (twoparents) {
1041 offs += ALIGN(plen, 8);
1042 ino_key_init(c, &key, snd_dir->i_ino);
1043 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1044 if (err)
1045 goto out_ro;
1046 }
1047
1048 finish_reservation(c);
1049
1050 mark_inode_clean(c, ubifs_inode(fst_dir));
1051 if (twoparents)
1052 mark_inode_clean(c, ubifs_inode(snd_dir));
1053 kfree(dent1);
1054 return 0;
1055
1056out_release:
1057 release_head(c, BASEHD);
1058out_ro:
1059 ubifs_ro_mode(c, err);
1060 finish_reservation(c);
1061out_free:
1062 kfree(dent1);
1063 return err;
1064}
1065
1066/**
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001067 * ubifs_jnl_rename - rename a directory entry.
1068 * @c: UBIFS file-system description object
1069 * @old_dir: parent inode of directory entry to rename
1070 * @old_dentry: directory entry to rename
1071 * @new_dir: parent inode of directory entry to rename
1072 * @new_dentry: new directory entry (or directory entry to replace)
1073 * @sync: non-zero if the write-buffer has to be synchronized
1074 *
1075 * This function implements the re-name operation which may involve writing up
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001076 * to 4 inodes and 2 directory entries. It marks the written inodes as clean
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001077 * and returns zero on success. In case of failure, a negative error code is
1078 * returned.
1079 */
1080int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001081 const struct inode *old_inode,
1082 const struct fscrypt_name *old_nm,
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001083 const struct inode *new_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001084 const struct inode *new_inode,
1085 const struct fscrypt_name *new_nm,
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001086 const struct inode *whiteout, int sync)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001087{
1088 void *p;
1089 union ubifs_key key;
1090 struct ubifs_dent_node *dent, *dent2;
1091 int err, dlen1, dlen2, ilen, lnum, offs, len;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001092 int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
1093 int last_reference = !!(new_inode && new_inode->i_nlink == 0);
1094 int move = (old_dir != new_dir);
1095 struct ubifs_inode *uninitialized_var(new_ui);
1096
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001097 //dbg_jnl("dent '%pd' in dir ino %lu to dent '%pd' in dir ino %lu",
1098 // old_dentry, old_dir->i_ino, new_dentry, new_dir->i_ino);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001099 ubifs_assert(ubifs_inode(old_dir)->data_len == 0);
1100 ubifs_assert(ubifs_inode(new_dir)->data_len == 0);
1101 ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
1102 ubifs_assert(mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex));
1103
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001104 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(new_nm) + 1;
1105 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(old_nm) + 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001106 if (new_inode) {
1107 new_ui = ubifs_inode(new_inode);
1108 ubifs_assert(mutex_is_locked(&new_ui->ui_mutex));
1109 ilen = UBIFS_INO_NODE_SZ;
1110 if (!last_reference)
1111 ilen += new_ui->data_len;
1112 } else
1113 ilen = 0;
1114
1115 aligned_dlen1 = ALIGN(dlen1, 8);
1116 aligned_dlen2 = ALIGN(dlen2, 8);
1117 len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
Richard Weinberger1e039532016-09-14 22:28:52 +02001118 if (move)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001119 len += plen;
1120 dent = kmalloc(len, GFP_NOFS);
1121 if (!dent)
1122 return -ENOMEM;
1123
1124 /* Make reservation before allocating sequence numbers */
1125 err = make_reservation(c, BASEHD, len);
1126 if (err)
1127 goto out_free;
1128
1129 /* Make new dent */
1130 dent->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001131 dent_key_init_flash(c, &dent->key, new_dir->i_ino, new_nm);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001132 dent->inum = cpu_to_le64(old_inode->i_ino);
1133 dent->type = get_dent_type(old_inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001134 dent->nlen = cpu_to_le16(fname_len(new_nm));
1135 memcpy(dent->name, fname_name(new_nm), fname_len(new_nm));
1136 dent->name[fname_len(new_nm)] = '\0';
Richard Weinbergerd63d61c2016-10-19 15:59:12 +02001137 set_dent_cookie(c, dent);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001138 zero_dent_node_unused(dent);
1139 ubifs_prep_grp_node(c, dent, dlen1, 0);
1140
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001141 dent2 = (void *)dent + aligned_dlen1;
1142 dent2->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001143 dent_key_init_flash(c, &dent2->key, old_dir->i_ino, old_nm);
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001144
1145 if (whiteout) {
1146 dent2->inum = cpu_to_le64(whiteout->i_ino);
1147 dent2->type = get_dent_type(whiteout->i_mode);
1148 } else {
1149 /* Make deletion dent */
1150 dent2->inum = 0;
1151 dent2->type = DT_UNKNOWN;
1152 }
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001153 dent2->nlen = cpu_to_le16(fname_len(old_nm));
1154 memcpy(dent2->name, fname_name(old_nm), fname_len(old_nm));
1155 dent2->name[fname_len(old_nm)] = '\0';
Richard Weinbergerd63d61c2016-10-19 15:59:12 +02001156 set_dent_cookie(c, dent2);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001157 zero_dent_node_unused(dent2);
1158 ubifs_prep_grp_node(c, dent2, dlen2, 0);
1159
1160 p = (void *)dent2 + aligned_dlen2;
1161 if (new_inode) {
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001162 pack_inode(c, p, new_inode, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001163 p += ALIGN(ilen, 8);
1164 }
1165
1166 if (!move)
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001167 pack_inode(c, p, old_dir, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001168 else {
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001169 pack_inode(c, p, old_dir, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001170 p += ALIGN(plen, 8);
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001171 pack_inode(c, p, new_dir, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001172 }
1173
1174 if (last_reference) {
1175 err = ubifs_add_orphan(c, new_inode->i_ino);
1176 if (err) {
1177 release_head(c, BASEHD);
1178 goto out_finish;
1179 }
Artem Bityutskiyde94eb52008-07-22 13:06:20 +03001180 new_ui->del_cmtno = c->cmt_no;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001181 }
1182
1183 err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
1184 if (err)
1185 goto out_release;
1186 if (!sync) {
1187 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1188
1189 ubifs_wbuf_add_ino_nolock(wbuf, new_dir->i_ino);
1190 ubifs_wbuf_add_ino_nolock(wbuf, old_dir->i_ino);
1191 if (new_inode)
1192 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
1193 new_inode->i_ino);
1194 }
1195 release_head(c, BASEHD);
1196
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001197 dent_key_init(c, &key, new_dir->i_ino, new_nm);
1198 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, new_nm);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001199 if (err)
1200 goto out_ro;
1201
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001202 offs += aligned_dlen1;
1203 if (whiteout) {
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001204 dent_key_init(c, &key, old_dir->i_ino, old_nm);
1205 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, old_nm);
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001206 if (err)
1207 goto out_ro;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001208
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001209 ubifs_delete_orphan(c, whiteout->i_ino);
1210 } else {
1211 err = ubifs_add_dirt(c, lnum, dlen2);
1212 if (err)
1213 goto out_ro;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001214
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001215 dent_key_init(c, &key, old_dir->i_ino, old_nm);
1216 err = ubifs_tnc_remove_nm(c, &key, old_nm);
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001217 if (err)
1218 goto out_ro;
1219 }
1220
1221 offs += aligned_dlen2;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001222 if (new_inode) {
1223 ino_key_init(c, &key, new_inode->i_ino);
1224 err = ubifs_tnc_add(c, &key, lnum, offs, ilen);
1225 if (err)
1226 goto out_ro;
1227 offs += ALIGN(ilen, 8);
1228 }
1229
1230 ino_key_init(c, &key, old_dir->i_ino);
1231 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1232 if (err)
1233 goto out_ro;
1234
Richard Weinberger1e039532016-09-14 22:28:52 +02001235 if (move) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001236 offs += ALIGN(plen, 8);
1237 ino_key_init(c, &key, new_dir->i_ino);
1238 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1239 if (err)
1240 goto out_ro;
1241 }
1242
1243 finish_reservation(c);
1244 if (new_inode) {
1245 mark_inode_clean(c, new_ui);
1246 spin_lock(&new_ui->ui_lock);
1247 new_ui->synced_i_size = new_ui->ui_size;
1248 spin_unlock(&new_ui->ui_lock);
1249 }
1250 mark_inode_clean(c, ubifs_inode(old_dir));
1251 if (move)
1252 mark_inode_clean(c, ubifs_inode(new_dir));
1253 kfree(dent);
1254 return 0;
1255
1256out_release:
1257 release_head(c, BASEHD);
1258out_ro:
1259 ubifs_ro_mode(c, err);
1260 if (last_reference)
1261 ubifs_delete_orphan(c, new_inode->i_ino);
1262out_finish:
1263 finish_reservation(c);
1264out_free:
1265 kfree(dent);
1266 return err;
1267}
1268
1269/**
Richard Weinberger77999532016-09-29 22:20:19 +02001270 * truncate_data_node - re-compress/encrypt a truncated data node.
1271 * @c: UBIFS file-system description object
1272 * @inode: inode which referes to the data node
1273 * @block: data block number
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001274 * @dn: data node to re-compress
1275 * @new_len: new length
1276 *
1277 * This function is used when an inode is truncated and the last data node of
Richard Weinberger77999532016-09-29 22:20:19 +02001278 * the inode has to be re-compressed/encrypted and re-written.
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001279 */
Richard Weinberger77999532016-09-29 22:20:19 +02001280static int truncate_data_node(const struct ubifs_info *c, const struct inode *inode,
1281 unsigned int block, struct ubifs_data_node *dn,
1282 int *new_len)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001283{
1284 void *buf;
Richard Weinberger77999532016-09-29 22:20:19 +02001285 int err, dlen, compr_type, out_len, old_dlen;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001286
1287 out_len = le32_to_cpu(dn->size);
1288 buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
1289 if (!buf)
1290 return -ENOMEM;
1291
Richard Weinberger77999532016-09-29 22:20:19 +02001292 dlen = old_dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001293 compr_type = le16_to_cpu(dn->compr_type);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001294
Richard Weinberger77999532016-09-29 22:20:19 +02001295 if (ubifs_crypt_is_encrypted(inode)) {
1296 err = ubifs_decrypt(inode, dn, &dlen, block);
1297 if (err)
1298 goto out;
1299 }
1300
1301 if (compr_type != UBIFS_COMPR_NONE) {
1302 err = ubifs_decompress(c, &dn->data, dlen, buf, &out_len, compr_type);
1303 if (err)
1304 goto out;
1305
1306 ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type);
1307 }
1308
1309 if (ubifs_crypt_is_encrypted(inode)) {
1310 err = ubifs_encrypt(inode, dn, out_len, &old_dlen, block);
1311 if (err)
1312 goto out;
1313
1314 out_len = old_dlen;
1315 } else {
1316 dn->compr_size = 0;
1317 }
1318
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001319 ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
1320 dn->compr_type = cpu_to_le16(compr_type);
1321 dn->size = cpu_to_le32(*new_len);
1322 *new_len = UBIFS_DATA_NODE_SZ + out_len;
Colin Ian Kinge8f19742016-12-16 13:32:39 +00001323 err = 0;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001324out:
1325 kfree(buf);
1326 return err;
1327}
1328
1329/**
1330 * ubifs_jnl_truncate - update the journal for a truncation.
1331 * @c: UBIFS file-system description object
1332 * @inode: inode to truncate
1333 * @old_size: old size
1334 * @new_size: new size
1335 *
1336 * When the size of a file decreases due to truncation, a truncation node is
1337 * written, the journal tree is updated, and the last data block is re-written
1338 * if it has been affected. The inode is also updated in order to synchronize
1339 * the new inode size.
1340 *
1341 * This function marks the inode as clean and returns zero on success. In case
1342 * of failure, a negative error code is returned.
1343 */
1344int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
1345 loff_t old_size, loff_t new_size)
1346{
1347 union ubifs_key key, to_key;
1348 struct ubifs_ino_node *ino;
1349 struct ubifs_trun_node *trun;
1350 struct ubifs_data_node *uninitialized_var(dn);
1351 int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode);
1352 struct ubifs_inode *ui = ubifs_inode(inode);
1353 ino_t inum = inode->i_ino;
1354 unsigned int blk;
1355
Artem Bityutskiye84461a2008-10-29 12:08:43 +02001356 dbg_jnl("ino %lu, size %lld -> %lld",
1357 (unsigned long)inum, old_size, new_size);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001358 ubifs_assert(!ui->data_len);
1359 ubifs_assert(S_ISREG(inode->i_mode));
1360 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
1361
1362 sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ +
1363 UBIFS_MAX_DATA_NODE_SZ * WORST_COMPR_FACTOR;
1364 ino = kmalloc(sz, GFP_NOFS);
1365 if (!ino)
1366 return -ENOMEM;
1367
1368 trun = (void *)ino + UBIFS_INO_NODE_SZ;
1369 trun->ch.node_type = UBIFS_TRUN_NODE;
1370 trun->inum = cpu_to_le32(inum);
1371 trun->old_size = cpu_to_le64(old_size);
1372 trun->new_size = cpu_to_le64(new_size);
1373 zero_trun_node_unused(trun);
1374
1375 dlen = new_size & (UBIFS_BLOCK_SIZE - 1);
1376 if (dlen) {
1377 /* Get last data block so it can be truncated */
1378 dn = (void *)trun + UBIFS_TRUN_NODE_SZ;
1379 blk = new_size >> UBIFS_BLOCK_SHIFT;
1380 data_key_init(c, &key, inum, blk);
Artem Bityutskiy515315a2012-01-13 12:33:53 +02001381 dbg_jnlk(&key, "last block key ");
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001382 err = ubifs_tnc_lookup(c, &key, dn);
1383 if (err == -ENOENT)
1384 dlen = 0; /* Not found (so it is a hole) */
1385 else if (err)
1386 goto out_free;
1387 else {
1388 if (le32_to_cpu(dn->size) <= dlen)
1389 dlen = 0; /* Nothing to do */
1390 else {
Richard Weinberger77999532016-09-29 22:20:19 +02001391 err = truncate_data_node(c, inode, blk, dn, &dlen);
1392 if (err)
1393 goto out_free;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001394 }
1395 }
1396 }
1397
1398 /* Must make reservation before allocating sequence numbers */
1399 len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ;
1400 if (dlen)
1401 len += dlen;
1402 err = make_reservation(c, BASEHD, len);
1403 if (err)
1404 goto out_free;
1405
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001406 pack_inode(c, ino, inode, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001407 ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1);
1408 if (dlen)
1409 ubifs_prep_grp_node(c, dn, dlen, 1);
1410
1411 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
1412 if (err)
1413 goto out_release;
1414 if (!sync)
1415 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum);
1416 release_head(c, BASEHD);
1417
1418 if (dlen) {
1419 sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ;
1420 err = ubifs_tnc_add(c, &key, lnum, sz, dlen);
1421 if (err)
1422 goto out_ro;
1423 }
1424
1425 ino_key_init(c, &key, inum);
1426 err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ);
1427 if (err)
1428 goto out_ro;
1429
1430 err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ);
1431 if (err)
1432 goto out_ro;
1433
1434 bit = new_size & (UBIFS_BLOCK_SIZE - 1);
1435 blk = (new_size >> UBIFS_BLOCK_SHIFT) + (bit ? 1 : 0);
1436 data_key_init(c, &key, inum, blk);
1437
1438 bit = old_size & (UBIFS_BLOCK_SIZE - 1);
Artem Bityutskiyf92b9822008-12-28 11:34:26 +02001439 blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001440 data_key_init(c, &to_key, inum, blk);
1441
1442 err = ubifs_tnc_remove_range(c, &key, &to_key);
1443 if (err)
1444 goto out_ro;
1445
1446 finish_reservation(c);
1447 spin_lock(&ui->ui_lock);
1448 ui->synced_i_size = ui->ui_size;
1449 spin_unlock(&ui->ui_lock);
1450 mark_inode_clean(c, ui);
1451 kfree(ino);
1452 return 0;
1453
1454out_release:
1455 release_head(c, BASEHD);
1456out_ro:
1457 ubifs_ro_mode(c, err);
1458 finish_reservation(c);
1459out_free:
1460 kfree(ino);
1461 return err;
1462}
1463
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001464
1465/**
1466 * ubifs_jnl_delete_xattr - delete an extended attribute.
1467 * @c: UBIFS file-system description object
1468 * @host: host inode
1469 * @inode: extended attribute inode
1470 * @nm: extended attribute entry name
1471 *
1472 * This function delete an extended attribute which is very similar to
1473 * un-linking regular files - it writes a deletion xentry, a deletion inode and
1474 * updates the target inode. Returns zero in case of success and a negative
1475 * error code in case of failure.
1476 */
1477int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001478 const struct inode *inode,
1479 const struct fscrypt_name *nm)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001480{
1481 int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen;
1482 struct ubifs_dent_node *xent;
1483 struct ubifs_ino_node *ino;
1484 union ubifs_key xent_key, key1, key2;
1485 int sync = IS_DIRSYNC(host);
1486 struct ubifs_inode *host_ui = ubifs_inode(host);
1487
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001488 //dbg_jnl("host %lu, xattr ino %lu, name '%s', data len %d",
1489 // host->i_ino, inode->i_ino, nm->name,
1490 // ubifs_inode(inode)->data_len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001491 ubifs_assert(inode->i_nlink == 0);
1492 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
1493
1494 /*
1495 * Since we are deleting the inode, we do not bother to attach any data
1496 * to it and assume its length is %UBIFS_INO_NODE_SZ.
1497 */
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001498 xlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001499 aligned_xlen = ALIGN(xlen, 8);
1500 hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
1501 len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
1502
1503 xent = kmalloc(len, GFP_NOFS);
1504 if (!xent)
1505 return -ENOMEM;
1506
1507 /* Make reservation before allocating sequence numbers */
1508 err = make_reservation(c, BASEHD, len);
1509 if (err) {
1510 kfree(xent);
1511 return err;
1512 }
1513
1514 xent->ch.node_type = UBIFS_XENT_NODE;
1515 xent_key_init(c, &xent_key, host->i_ino, nm);
1516 key_write(c, &xent_key, xent->key);
1517 xent->inum = 0;
1518 xent->type = get_dent_type(inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001519 xent->nlen = cpu_to_le16(fname_len(nm));
1520 memcpy(xent->name, fname_name(nm), fname_len(nm));
1521 xent->name[fname_len(nm)] = '\0';
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001522 zero_dent_node_unused(xent);
1523 ubifs_prep_grp_node(c, xent, xlen, 0);
1524
1525 ino = (void *)xent + aligned_xlen;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001526 pack_inode(c, ino, inode, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001527 ino = (void *)ino + UBIFS_INO_NODE_SZ;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001528 pack_inode(c, ino, host, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001529
1530 err = write_head(c, BASEHD, xent, len, &lnum, &xent_offs, sync);
1531 if (!sync && !err)
1532 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino);
1533 release_head(c, BASEHD);
1534 kfree(xent);
1535 if (err)
1536 goto out_ro;
1537
1538 /* Remove the extended attribute entry from TNC */
1539 err = ubifs_tnc_remove_nm(c, &xent_key, nm);
1540 if (err)
1541 goto out_ro;
1542 err = ubifs_add_dirt(c, lnum, xlen);
1543 if (err)
1544 goto out_ro;
1545
1546 /*
1547 * Remove all nodes belonging to the extended attribute inode from TNC.
1548 * Well, there actually must be only one node - the inode itself.
1549 */
1550 lowest_ino_key(c, &key1, inode->i_ino);
1551 highest_ino_key(c, &key2, inode->i_ino);
1552 err = ubifs_tnc_remove_range(c, &key1, &key2);
1553 if (err)
1554 goto out_ro;
1555 err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ);
1556 if (err)
1557 goto out_ro;
1558
1559 /* And update TNC with the new host inode position */
1560 ino_key_init(c, &key1, host->i_ino);
1561 err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen);
1562 if (err)
1563 goto out_ro;
1564
1565 finish_reservation(c);
1566 spin_lock(&host_ui->ui_lock);
1567 host_ui->synced_i_size = host_ui->ui_size;
1568 spin_unlock(&host_ui->ui_lock);
1569 mark_inode_clean(c, host_ui);
1570 return 0;
1571
1572out_ro:
1573 ubifs_ro_mode(c, err);
1574 finish_reservation(c);
1575 return err;
1576}
1577
1578/**
1579 * ubifs_jnl_change_xattr - change an extended attribute.
1580 * @c: UBIFS file-system description object
1581 * @inode: extended attribute inode
1582 * @host: host inode
1583 *
1584 * This function writes the updated version of an extended attribute inode and
Artem Bityutskiy7d4e9cc2009-03-20 19:11:12 +02001585 * the host inode to the journal (to the base head). The host inode is written
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001586 * after the extended attribute inode in order to guarantee that the extended
1587 * attribute will be flushed when the inode is synchronized by 'fsync()' and
1588 * consequently, the write-buffer is synchronized. This function returns zero
1589 * in case of success and a negative error code in case of failure.
1590 */
1591int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
1592 const struct inode *host)
1593{
1594 int err, len1, len2, aligned_len, aligned_len1, lnum, offs;
Artem Bityutskiyc78c7e32008-08-12 16:30:12 +03001595 struct ubifs_inode *host_ui = ubifs_inode(host);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001596 struct ubifs_ino_node *ino;
1597 union ubifs_key key;
1598 int sync = IS_DIRSYNC(host);
1599
1600 dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino);
1601 ubifs_assert(host->i_nlink > 0);
1602 ubifs_assert(inode->i_nlink > 0);
1603 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
1604
1605 len1 = UBIFS_INO_NODE_SZ + host_ui->data_len;
1606 len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len;
1607 aligned_len1 = ALIGN(len1, 8);
1608 aligned_len = aligned_len1 + ALIGN(len2, 8);
1609
1610 ino = kmalloc(aligned_len, GFP_NOFS);
1611 if (!ino)
1612 return -ENOMEM;
1613
1614 /* Make reservation before allocating sequence numbers */
1615 err = make_reservation(c, BASEHD, aligned_len);
1616 if (err)
1617 goto out_free;
1618
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001619 pack_inode(c, ino, host, 0);
1620 pack_inode(c, (void *)ino + aligned_len1, inode, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001621
1622 err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0);
1623 if (!sync && !err) {
1624 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1625
1626 ubifs_wbuf_add_ino_nolock(wbuf, host->i_ino);
1627 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
1628 }
1629 release_head(c, BASEHD);
1630 if (err)
1631 goto out_ro;
1632
1633 ino_key_init(c, &key, host->i_ino);
1634 err = ubifs_tnc_add(c, &key, lnum, offs, len1);
1635 if (err)
1636 goto out_ro;
1637
1638 ino_key_init(c, &key, inode->i_ino);
1639 err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2);
1640 if (err)
1641 goto out_ro;
1642
1643 finish_reservation(c);
1644 spin_lock(&host_ui->ui_lock);
1645 host_ui->synced_i_size = host_ui->ui_size;
1646 spin_unlock(&host_ui->ui_lock);
1647 mark_inode_clean(c, host_ui);
1648 kfree(ino);
1649 return 0;
1650
1651out_ro:
1652 ubifs_ro_mode(c, err);
1653 finish_reservation(c);
1654out_free:
1655 kfree(ino);
1656 return err;
1657}
1658