blob: 826dad0243dcc74c7754ea11dbbf7b8a73159cf8 [file] [log] [blame]
Thomas Gleixner2b27bdc2019-05-29 16:57:50 -07001// SPDX-License-Identifier: GPL-2.0-only
Artem Bityutskiy1e517642008-07-14 19:08:37 +03002/*
3 * This file is part of UBIFS.
4 *
5 * Copyright (C) 2006-2008 Nokia Corporation.
6 *
Artem Bityutskiy1e517642008-07-14 19:08:37 +03007 * Authors: Artem Bityutskiy (Битюцкий Артём)
8 * Adrian Hunter
9 */
10
11/*
12 * This file implements UBIFS journal.
13 *
14 * The journal consists of 2 parts - the log and bud LEBs. The log has fixed
15 * length and position, while a bud logical eraseblock is any LEB in the main
16 * area. Buds contain file system data - data nodes, inode nodes, etc. The log
17 * contains only references to buds and some other stuff like commit
18 * start node. The idea is that when we commit the journal, we do
19 * not copy the data, the buds just become indexed. Since after the commit the
20 * nodes in bud eraseblocks become leaf nodes of the file system index tree, we
21 * use term "bud". Analogy is obvious, bud eraseblocks contain nodes which will
22 * become leafs in the future.
23 *
24 * The journal is multi-headed because we want to write data to the journal as
25 * optimally as possible. It is nice to have nodes belonging to the same inode
26 * in one LEB, so we may write data owned by different inodes to different
27 * journal heads, although at present only one data head is used.
28 *
29 * For recovery reasons, the base head contains all inode nodes, all directory
30 * entry nodes and all truncate nodes. This means that the other heads contain
31 * only data nodes.
32 *
33 * Bud LEBs may be half-indexed. For example, if the bud was not full at the
34 * time of commit, the bud is retained to continue to be used in the journal,
35 * even though the "front" of the LEB is now indexed. In that case, the log
36 * reference contains the offset where the bud starts for the purposes of the
37 * journal.
38 *
39 * The journal size has to be limited, because the larger is the journal, the
40 * longer it takes to mount UBIFS (scanning the journal) and the more memory it
41 * takes (indexing in the TNC).
42 *
43 * All the journal write operations like 'ubifs_jnl_update()' here, which write
44 * multiple UBIFS nodes to the journal at one go, are atomic with respect to
45 * unclean reboots. Should the unclean reboot happen, the recovery code drops
46 * all the nodes.
47 */
48
49#include "ubifs.h"
50
51/**
52 * zero_ino_node_unused - zero out unused fields of an on-flash inode node.
53 * @ino: the inode to zero out
54 */
55static inline void zero_ino_node_unused(struct ubifs_ino_node *ino)
56{
57 memset(ino->padding1, 0, 4);
58 memset(ino->padding2, 0, 26);
59}
60
61/**
62 * zero_dent_node_unused - zero out unused fields of an on-flash directory
63 * entry node.
64 * @dent: the directory entry to zero out
65 */
66static inline void zero_dent_node_unused(struct ubifs_dent_node *dent)
67{
68 dent->padding1 = 0;
Artem Bityutskiy1e517642008-07-14 19:08:37 +030069}
70
71/**
Artem Bityutskiy1e517642008-07-14 19:08:37 +030072 * zero_trun_node_unused - zero out unused fields of an on-flash truncation
73 * node.
74 * @trun: the truncation node to zero out
75 */
76static inline void zero_trun_node_unused(struct ubifs_trun_node *trun)
77{
78 memset(trun->padding, 0, 12);
79}
80
Sascha Hauer6a98bc42018-09-07 14:36:36 +020081static void ubifs_add_auth_dirt(struct ubifs_info *c, int lnum)
82{
83 if (ubifs_authenticated(c))
84 ubifs_add_dirt(c, lnum, ubifs_auth_node_sz(c));
85}
86
Artem Bityutskiy1e517642008-07-14 19:08:37 +030087/**
88 * reserve_space - reserve space in the journal.
89 * @c: UBIFS file-system description object
90 * @jhead: journal head number
91 * @len: node length
92 *
93 * This function reserves space in journal head @head. If the reservation
94 * succeeded, the journal head stays locked and later has to be unlocked using
Sascha Hauer671b9b72018-05-14 10:18:15 +020095 * 'release_head()'. Returns zero in case of success, %-EAGAIN if commit has to
96 * be done, and other negative error codes in case of other failures.
Artem Bityutskiy1e517642008-07-14 19:08:37 +030097 */
98static int reserve_space(struct ubifs_info *c, int jhead, int len)
99{
Artem Bityutskiy3edaae72009-03-03 19:22:53 +0200100 int err = 0, err1, retries = 0, avail, lnum, offs, squeeze;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300101 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
102
103 /*
104 * Typically, the base head has smaller nodes written to it, so it is
105 * better to try to allocate space at the ends of eraseblocks. This is
106 * what the squeeze parameter does.
107 */
Richard Weinberger6eb61d52018-07-12 13:01:57 +0200108 ubifs_assert(c, !c->ro_media && !c->ro_mount);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300109 squeeze = (jhead == BASEHD);
110again:
111 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
112
Artem Bityutskiy2680d722010-09-17 16:44:28 +0300113 if (c->ro_error) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300114 err = -EROFS;
115 goto out_unlock;
116 }
117
118 avail = c->leb_size - wbuf->offs - wbuf->used;
119 if (wbuf->lnum != -1 && avail >= len)
120 return 0;
121
122 /*
123 * Write buffer wasn't seek'ed or there is no enough space - look for an
124 * LEB with some empty space.
125 */
Artem Bityutskiy3edaae72009-03-03 19:22:53 +0200126 lnum = ubifs_find_free_space(c, len, &offs, squeeze);
Artem Bityutskiycb14a182011-05-15 14:51:54 +0300127 if (lnum >= 0)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300128 goto out;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300129
130 err = lnum;
131 if (err != -ENOSPC)
132 goto out_unlock;
133
134 /*
135 * No free space, we have to run garbage collector to make
136 * some. But the write-buffer mutex has to be unlocked because
137 * GC also takes it.
138 */
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300139 dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300140 mutex_unlock(&wbuf->io_mutex);
141
142 lnum = ubifs_garbage_collect(c, 0);
143 if (lnum < 0) {
144 err = lnum;
145 if (err != -ENOSPC)
146 return err;
147
148 /*
149 * GC could not make a free LEB. But someone else may
150 * have allocated new bud for this journal head,
151 * because we dropped @wbuf->io_mutex, so try once
152 * again.
153 */
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300154 dbg_jnl("GC couldn't make a free LEB for jhead %s",
155 dbg_jhead(jhead));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300156 if (retries++ < 2) {
157 dbg_jnl("retry (%d)", retries);
158 goto again;
159 }
160
161 dbg_jnl("return -ENOSPC");
162 return err;
163 }
164
165 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300166 dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300167 avail = c->leb_size - wbuf->offs - wbuf->used;
168
169 if (wbuf->lnum != -1 && avail >= len) {
170 /*
171 * Someone else has switched the journal head and we have
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200172 * enough space now. This happens when more than one process is
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300173 * trying to write to the same journal head at the same time.
174 */
175 dbg_jnl("return LEB %d back, already have LEB %d:%d",
176 lnum, wbuf->lnum, wbuf->offs + wbuf->used);
177 err = ubifs_return_leb(c, lnum);
178 if (err)
179 goto out_unlock;
180 return 0;
181 }
182
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300183 offs = 0;
184
185out:
Artem Bityutskiycb14a182011-05-15 14:51:54 +0300186 /*
187 * Make sure we synchronize the write-buffer before we add the new bud
188 * to the log. Otherwise we may have a power cut after the log
189 * reference node for the last bud (@lnum) is written but before the
190 * write-buffer data are written to the next-to-last bud
191 * (@wbuf->lnum). And the effect would be that the recovery would see
192 * that there is corruption in the next-to-last bud.
193 */
194 err = ubifs_wbuf_sync_nolock(wbuf);
195 if (err)
196 goto out_return;
197 err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
198 if (err)
199 goto out_return;
Richard Weinbergerb36a2612012-05-14 17:55:51 +0200200 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300201 if (err)
202 goto out_unlock;
203
204 return 0;
205
206out_unlock:
207 mutex_unlock(&wbuf->io_mutex);
208 return err;
209
210out_return:
211 /* An error occurred and the LEB has to be returned to lprops */
Richard Weinberger6eb61d52018-07-12 13:01:57 +0200212 ubifs_assert(c, err < 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300213 err1 = ubifs_return_leb(c, lnum);
214 if (err1 && err == -EAGAIN)
215 /*
216 * Return original error code only if it is not %-EAGAIN,
217 * which is not really an error. Otherwise, return the error
218 * code of 'ubifs_return_leb()'.
219 */
220 err = err1;
221 mutex_unlock(&wbuf->io_mutex);
222 return err;
223}
224
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200225static int ubifs_hash_nodes(struct ubifs_info *c, void *node,
226 int len, struct shash_desc *hash)
227{
228 int auth_node_size = ubifs_auth_node_sz(c);
229 int err;
230
231 while (1) {
232 const struct ubifs_ch *ch = node;
233 int nodelen = le32_to_cpu(ch->len);
234
235 ubifs_assert(c, len >= auth_node_size);
236
237 if (len == auth_node_size)
238 break;
239
240 ubifs_assert(c, len > nodelen);
241 ubifs_assert(c, ch->magic == cpu_to_le32(UBIFS_NODE_MAGIC));
242
243 err = ubifs_shash_update(c, hash, (void *)node, nodelen);
244 if (err)
245 return err;
246
247 node += ALIGN(nodelen, 8);
248 len -= ALIGN(nodelen, 8);
249 }
250
251 return ubifs_prepare_auth_node(c, node, hash);
252}
253
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300254/**
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300255 * write_head - write data to a journal head.
256 * @c: UBIFS file-system description object
257 * @jhead: journal head
258 * @buf: buffer to write
259 * @len: length to write
260 * @lnum: LEB number written is returned here
261 * @offs: offset written is returned here
262 * @sync: non-zero if the write-buffer has to by synchronized
263 *
Sascha Hauer83407432018-09-07 14:36:28 +0200264 * This function writes data to the reserved space of journal head @jhead.
265 * Returns zero in case of success and a negative error code in case of
266 * failure.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300267 */
268static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
269 int *lnum, int *offs, int sync)
270{
271 int err;
272 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
273
Richard Weinberger6eb61d52018-07-12 13:01:57 +0200274 ubifs_assert(c, jhead != GCHD);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300275
276 *lnum = c->jheads[jhead].wbuf.lnum;
277 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300278 dbg_jnl("jhead %s, LEB %d:%d, len %d",
279 dbg_jhead(jhead), *lnum, *offs, len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300280
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200281 if (ubifs_authenticated(c)) {
282 err = ubifs_hash_nodes(c, buf, len, c->jheads[jhead].log_hash);
283 if (err)
284 return err;
285 }
286
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300287 err = ubifs_wbuf_write_nolock(wbuf, buf, len);
288 if (err)
289 return err;
290 if (sync)
291 err = ubifs_wbuf_sync_nolock(wbuf);
292 return err;
293}
294
295/**
296 * make_reservation - reserve journal space.
297 * @c: UBIFS file-system description object
298 * @jhead: journal head
299 * @len: how many bytes to reserve
300 *
301 * This function makes space reservation in journal head @jhead. The function
302 * takes the commit lock and locks the journal head, and the caller has to
303 * unlock the head and finish the reservation with 'finish_reservation()'.
304 * Returns zero in case of success and a negative error code in case of
305 * failure.
306 *
307 * Note, the journal head may be unlocked as soon as the data is written, while
308 * the commit lock has to be released after the data has been added to the
309 * TNC.
310 */
311static int make_reservation(struct ubifs_info *c, int jhead, int len)
312{
313 int err, cmt_retries = 0, nospc_retries = 0;
314
315again:
316 down_read(&c->commit_sem);
317 err = reserve_space(c, jhead, len);
318 if (!err)
Richard Weinberger49d2e052018-08-13 15:14:45 +0200319 /* c->commit_sem will get released via finish_reservation(). */
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300320 return 0;
321 up_read(&c->commit_sem);
322
323 if (err == -ENOSPC) {
324 /*
325 * GC could not make any progress. We should try to commit
326 * once because it could make some dirty space and GC would
327 * make progress, so make the error -EAGAIN so that the below
328 * will commit and re-try.
329 */
330 if (nospc_retries++ < 2) {
331 dbg_jnl("no space, retry");
332 err = -EAGAIN;
333 }
334
335 /*
336 * This means that the budgeting is incorrect. We always have
337 * to be able to write to the media, because all operations are
338 * budgeted. Deletions are not budgeted, though, but we reserve
339 * an extra LEB for them.
340 */
341 }
342
343 if (err != -EAGAIN)
344 goto out;
345
346 /*
347 * -EAGAIN means that the journal is full or too large, or the above
348 * code wants to do one commit. Do this and re-try.
349 */
350 if (cmt_retries > 128) {
351 /*
352 * This should not happen unless the journal size limitations
353 * are too tough.
354 */
Sheng Yong235c3622015-03-20 10:39:42 +0000355 ubifs_err(c, "stuck in space allocation");
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300356 err = -ENOSPC;
357 goto out;
358 } else if (cmt_retries > 32)
Sheng Yong235c3622015-03-20 10:39:42 +0000359 ubifs_warn(c, "too many space allocation re-tries (%d)",
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300360 cmt_retries);
361
362 dbg_jnl("-EAGAIN, commit and retry (retried %d times)",
363 cmt_retries);
364 cmt_retries += 1;
365
366 err = ubifs_run_commit(c);
367 if (err)
368 return err;
369 goto again;
370
371out:
Sheng Yong235c3622015-03-20 10:39:42 +0000372 ubifs_err(c, "cannot reserve %d bytes in jhead %d, error %d",
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300373 len, jhead, err);
374 if (err == -ENOSPC) {
375 /* This are some budgeting problems, print useful information */
376 down_write(&c->commit_sem);
Artem Bityutskiy7c46d0a2012-05-16 19:04:54 +0300377 dump_stack();
Artem Bityutskiyedf6be22012-05-16 19:15:56 +0300378 ubifs_dump_budg(c, &c->bi);
379 ubifs_dump_lprops(c);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300380 cmt_retries = dbg_check_lprops(c);
381 up_write(&c->commit_sem);
382 }
383 return err;
384}
385
386/**
387 * release_head - release a journal head.
388 * @c: UBIFS file-system description object
389 * @jhead: journal head
390 *
391 * This function releases journal head @jhead which was locked by
392 * the 'make_reservation()' function. It has to be called after each successful
393 * 'make_reservation()' invocation.
394 */
395static inline void release_head(struct ubifs_info *c, int jhead)
396{
397 mutex_unlock(&c->jheads[jhead].wbuf.io_mutex);
398}
399
400/**
401 * finish_reservation - finish a reservation.
402 * @c: UBIFS file-system description object
403 *
404 * This function finishes journal space reservation. It must be called after
405 * 'make_reservation()'.
406 */
407static void finish_reservation(struct ubifs_info *c)
408{
409 up_read(&c->commit_sem);
410}
411
412/**
413 * get_dent_type - translate VFS inode mode to UBIFS directory entry type.
414 * @mode: inode mode
415 */
416static int get_dent_type(int mode)
417{
418 switch (mode & S_IFMT) {
419 case S_IFREG:
420 return UBIFS_ITYPE_REG;
421 case S_IFDIR:
422 return UBIFS_ITYPE_DIR;
423 case S_IFLNK:
424 return UBIFS_ITYPE_LNK;
425 case S_IFBLK:
426 return UBIFS_ITYPE_BLK;
427 case S_IFCHR:
428 return UBIFS_ITYPE_CHR;
429 case S_IFIFO:
430 return UBIFS_ITYPE_FIFO;
431 case S_IFSOCK:
432 return UBIFS_ITYPE_SOCK;
433 default:
434 BUG();
435 }
436 return 0;
437}
438
439/**
440 * pack_inode - pack an inode node.
441 * @c: UBIFS file-system description object
442 * @ino: buffer in which to pack inode node
443 * @inode: inode to pack
444 * @last: indicates the last node of the group
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300445 */
446static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300447 const struct inode *inode, int last)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300448{
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300449 int data_len = 0, last_reference = !inode->i_nlink;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300450 struct ubifs_inode *ui = ubifs_inode(inode);
451
452 ino->ch.node_type = UBIFS_INO_NODE;
453 ino_key_init_flash(c, &ino->key, inode->i_ino);
454 ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum);
455 ino->atime_sec = cpu_to_le64(inode->i_atime.tv_sec);
456 ino->atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
457 ino->ctime_sec = cpu_to_le64(inode->i_ctime.tv_sec);
458 ino->ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
459 ino->mtime_sec = cpu_to_le64(inode->i_mtime.tv_sec);
460 ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
Eric W. Biederman39241be2012-02-07 15:50:56 -0800461 ino->uid = cpu_to_le32(i_uid_read(inode));
462 ino->gid = cpu_to_le32(i_gid_read(inode));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300463 ino->mode = cpu_to_le32(inode->i_mode);
464 ino->flags = cpu_to_le32(ui->flags);
465 ino->size = cpu_to_le64(ui->ui_size);
466 ino->nlink = cpu_to_le32(inode->i_nlink);
467 ino->compr_type = cpu_to_le16(ui->compr_type);
468 ino->data_len = cpu_to_le32(ui->data_len);
469 ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt);
470 ino->xattr_size = cpu_to_le32(ui->xattr_size);
471 ino->xattr_names = cpu_to_le32(ui->xattr_names);
472 zero_ino_node_unused(ino);
473
474 /*
475 * Drop the attached data if this is a deletion inode, the data is not
476 * needed anymore.
477 */
478 if (!last_reference) {
479 memcpy(ino->data, ui->data, ui->data_len);
480 data_len = ui->data_len;
481 }
482
483 ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last);
484}
485
486/**
487 * mark_inode_clean - mark UBIFS inode as clean.
488 * @c: UBIFS file-system description object
489 * @ui: UBIFS inode to mark as clean
490 *
491 * This helper function marks UBIFS inode @ui as clean by cleaning the
492 * @ui->dirty flag and releasing its budget. Note, VFS may still treat the
493 * inode as dirty and try to write it back, but 'ubifs_write_inode()' would
494 * just do nothing.
495 */
496static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
497{
498 if (ui->dirty)
499 ubifs_release_dirty_inode_budget(c, ui);
500 ui->dirty = 0;
501}
502
Richard Weinbergerd63d61c2016-10-19 15:59:12 +0200503static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent)
504{
505 if (c->double_hash)
506 dent->cookie = prandom_u32();
507 else
508 dent->cookie = 0;
509}
510
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300511/**
512 * ubifs_jnl_update - update inode.
513 * @c: UBIFS file-system description object
514 * @dir: parent inode or host inode in case of extended attributes
515 * @nm: directory entry name
516 * @inode: inode to update
517 * @deletion: indicates a directory entry deletion i.e unlink or rmdir
518 * @xent: non-zero if the directory entry is an extended attribute entry
519 *
520 * This function updates an inode by writing a directory entry (or extended
521 * attribute entry), the inode itself, and the parent directory inode (or the
522 * host inode) to the journal.
523 *
524 * The function writes the host inode @dir last, which is important in case of
525 * extended attributes. Indeed, then we guarantee that if the host inode gets
526 * synchronized (with 'fsync()'), and the write-buffer it sits in gets flushed,
527 * the extended attribute inode gets flushed too. And this is exactly what the
528 * user expects - synchronizing the host inode synchronizes its extended
529 * attributes. Similarly, this guarantees that if @dir is synchronized, its
530 * directory entry corresponding to @nm gets synchronized too.
531 *
532 * If the inode (@inode) or the parent directory (@dir) are synchronous, this
533 * function synchronizes the write-buffer.
534 *
535 * This function marks the @dir and @inode inodes as clean and returns zero on
536 * success. In case of failure, a negative error code is returned.
537 */
538int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100539 const struct fscrypt_name *nm, const struct inode *inode,
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300540 int deletion, int xent)
541{
542 int err, dlen, ilen, len, lnum, ino_offs, dent_offs;
543 int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
544 int last_reference = !!(deletion && inode->i_nlink == 0);
545 struct ubifs_inode *ui = ubifs_inode(inode);
Richard Weinbergerd577bc12014-09-19 11:48:46 +0200546 struct ubifs_inode *host_ui = ubifs_inode(dir);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300547 struct ubifs_dent_node *dent;
548 struct ubifs_ino_node *ino;
549 union ubifs_key dent_key, ino_key;
Sascha Hauer823838a2018-09-07 14:36:34 +0200550 u8 hash_dent[UBIFS_HASH_ARR_SZ];
551 u8 hash_ino[UBIFS_HASH_ARR_SZ];
552 u8 hash_ino_host[UBIFS_HASH_ARR_SZ];
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300553
Richard Weinberger6eb61d52018-07-12 13:01:57 +0200554 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300555
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100556 dlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300557 ilen = UBIFS_INO_NODE_SZ;
558
559 /*
560 * If the last reference to the inode is being deleted, then there is
561 * no need to attach and write inode data, it is being deleted anyway.
562 * And if the inode is being deleted, no need to synchronize
563 * write-buffer even if the inode is synchronous.
564 */
565 if (!last_reference) {
566 ilen += ui->data_len;
567 sync |= IS_SYNC(inode);
568 }
569
570 aligned_dlen = ALIGN(dlen, 8);
571 aligned_ilen = ALIGN(ilen, 8);
Subodh Nijsurea76284e2014-10-31 13:50:28 -0500572
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300573 len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
Subodh Nijsurea76284e2014-10-31 13:50:28 -0500574 /* Make sure to also account for extended attributes */
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200575 if (ubifs_authenticated(c))
576 len += ALIGN(host_ui->data_len, 8) + ubifs_auth_node_sz(c);
577 else
578 len += host_ui->data_len;
Subodh Nijsurea76284e2014-10-31 13:50:28 -0500579
Richard Weinberger4acadda2017-06-16 16:21:44 +0200580 dent = kzalloc(len, GFP_NOFS);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300581 if (!dent)
582 return -ENOMEM;
583
584 /* Make reservation before allocating sequence numbers */
585 err = make_reservation(c, BASEHD, len);
586 if (err)
587 goto out_free;
588
589 if (!xent) {
590 dent->ch.node_type = UBIFS_DENT_NODE;
Richard Weinberger781f6752017-05-17 10:36:46 +0200591 if (nm->hash)
592 dent_key_init_hash(c, &dent_key, dir->i_ino, nm->hash);
593 else
594 dent_key_init(c, &dent_key, dir->i_ino, nm);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300595 } else {
596 dent->ch.node_type = UBIFS_XENT_NODE;
597 xent_key_init(c, &dent_key, dir->i_ino, nm);
598 }
599
600 key_write(c, &dent_key, dent->key);
601 dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino);
602 dent->type = get_dent_type(inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100603 dent->nlen = cpu_to_le16(fname_len(nm));
604 memcpy(dent->name, fname_name(nm), fname_len(nm));
605 dent->name[fname_len(nm)] = '\0';
Richard Weinbergerd63d61c2016-10-19 15:59:12 +0200606 set_dent_cookie(c, dent);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100607
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300608 zero_dent_node_unused(dent);
609 ubifs_prep_grp_node(c, dent, dlen, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +0200610 err = ubifs_node_calc_hash(c, dent, hash_dent);
611 if (err)
612 goto out_release;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300613
614 ino = (void *)dent + aligned_dlen;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300615 pack_inode(c, ino, inode, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +0200616 err = ubifs_node_calc_hash(c, ino, hash_ino);
617 if (err)
618 goto out_release;
619
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300620 ino = (void *)ino + aligned_ilen;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300621 pack_inode(c, ino, dir, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +0200622 err = ubifs_node_calc_hash(c, ino, hash_ino_host);
623 if (err)
624 goto out_release;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300625
626 if (last_reference) {
627 err = ubifs_add_orphan(c, inode->i_ino);
628 if (err) {
629 release_head(c, BASEHD);
630 goto out_finish;
631 }
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300632 ui->del_cmtno = c->cmt_no;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300633 }
634
635 err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
636 if (err)
637 goto out_release;
638 if (!sync) {
639 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
640
641 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
642 ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino);
643 }
644 release_head(c, BASEHD);
645 kfree(dent);
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200646 ubifs_add_auth_dirt(c, lnum);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300647
648 if (deletion) {
Richard Weinberger781f6752017-05-17 10:36:46 +0200649 if (nm->hash)
650 err = ubifs_tnc_remove_dh(c, &dent_key, nm->minor_hash);
651 else
652 err = ubifs_tnc_remove_nm(c, &dent_key, nm);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300653 if (err)
654 goto out_ro;
655 err = ubifs_add_dirt(c, lnum, dlen);
656 } else
Sascha Hauer823838a2018-09-07 14:36:34 +0200657 err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen,
658 hash_dent, nm);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300659 if (err)
660 goto out_ro;
661
662 /*
663 * Note, we do not remove the inode from TNC even if the last reference
664 * to it has just been deleted, because the inode may still be opened.
665 * Instead, the inode has been added to orphan lists and the orphan
666 * subsystem will take further care about it.
667 */
668 ino_key_init(c, &ino_key, inode->i_ino);
669 ino_offs = dent_offs + aligned_dlen;
Sascha Hauer823838a2018-09-07 14:36:34 +0200670 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen, hash_ino);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300671 if (err)
672 goto out_ro;
673
674 ino_key_init(c, &ino_key, dir->i_ino);
675 ino_offs += aligned_ilen;
Subodh Nijsurea76284e2014-10-31 13:50:28 -0500676 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs,
Sascha Hauer823838a2018-09-07 14:36:34 +0200677 UBIFS_INO_NODE_SZ + host_ui->data_len, hash_ino_host);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300678 if (err)
679 goto out_ro;
680
681 finish_reservation(c);
682 spin_lock(&ui->ui_lock);
683 ui->synced_i_size = ui->ui_size;
684 spin_unlock(&ui->ui_lock);
Richard Weinberger59965592018-06-12 00:52:28 +0200685 if (xent) {
686 spin_lock(&host_ui->ui_lock);
687 host_ui->synced_i_size = host_ui->ui_size;
688 spin_unlock(&host_ui->ui_lock);
689 }
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300690 mark_inode_clean(c, ui);
Richard Weinbergerd577bc12014-09-19 11:48:46 +0200691 mark_inode_clean(c, host_ui);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300692 return 0;
693
694out_finish:
695 finish_reservation(c);
696out_free:
697 kfree(dent);
698 return err;
699
700out_release:
701 release_head(c, BASEHD);
Artem Bityutskiy812eb252011-05-31 08:40:40 +0300702 kfree(dent);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300703out_ro:
704 ubifs_ro_mode(c, err);
705 if (last_reference)
706 ubifs_delete_orphan(c, inode->i_ino);
707 finish_reservation(c);
708 return err;
709}
710
711/**
712 * ubifs_jnl_write_data - write a data node to the journal.
713 * @c: UBIFS file-system description object
714 * @inode: inode the data node belongs to
715 * @key: node key
716 * @buf: buffer to write
717 * @len: data length (must not exceed %UBIFS_BLOCK_SIZE)
718 *
719 * This function writes a data node to the journal. Returns %0 if the data node
720 * was successfully written, and a negative error code in case of failure.
721 */
722int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
723 const union ubifs_key *key, const void *buf, int len)
724{
725 struct ubifs_data_node *data;
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200726 int err, lnum, offs, compr_type, out_len, compr_len, auth_len;
Matthew L. Creechd8829622011-03-04 17:55:02 -0500727 int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1;
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200728 int write_len;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300729 struct ubifs_inode *ui = ubifs_inode(inode);
Richard Weinberger77999532016-09-29 22:20:19 +0200730 bool encrypted = ubifs_crypt_is_encrypted(inode);
Sascha Hauer823838a2018-09-07 14:36:34 +0200731 u8 hash[UBIFS_HASH_ARR_SZ];
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300732
Artem Bityutskiy515315a2012-01-13 12:33:53 +0200733 dbg_jnlk(key, "ino %lu, blk %u, len %d, key ",
734 (unsigned long)key_inum(c, key), key_block(c, key), len);
Richard Weinberger6eb61d52018-07-12 13:01:57 +0200735 ubifs_assert(c, len <= UBIFS_BLOCK_SIZE);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300736
Richard Weinberger77999532016-09-29 22:20:19 +0200737 if (encrypted)
738 dlen += UBIFS_CIPHER_BLOCK_SIZE;
739
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200740 auth_len = ubifs_auth_node_sz(c);
741
742 data = kmalloc(dlen + auth_len, GFP_NOFS | __GFP_NOWARN);
Matthew L. Creechd8829622011-03-04 17:55:02 -0500743 if (!data) {
744 /*
745 * Fall-back to the write reserve buffer. Note, we might be
746 * currently on the memory reclaim path, when the kernel is
747 * trying to free some memory by writing out dirty pages. The
748 * write reserve buffer helps us to guarantee that we are
749 * always able to write the data.
750 */
751 allocated = 0;
752 mutex_lock(&c->write_reserve_mutex);
753 data = c->write_reserve_buf;
754 }
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300755
756 data->ch.node_type = UBIFS_DATA_NODE;
757 key_write(c, key, &data->key);
758 data->size = cpu_to_le32(len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300759
Artem Bityutskiya9f2fc02008-12-23 14:39:14 +0200760 if (!(ui->flags & UBIFS_COMPR_FL))
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300761 /* Compression is disabled for this inode */
762 compr_type = UBIFS_COMPR_NONE;
763 else
764 compr_type = ui->compr_type;
765
Richard Weinberger77999532016-09-29 22:20:19 +0200766 out_len = compr_len = dlen - UBIFS_DATA_NODE_SZ;
767 ubifs_compress(c, buf, len, &data->data, &compr_len, &compr_type);
Richard Weinberger6eb61d52018-07-12 13:01:57 +0200768 ubifs_assert(c, compr_len <= UBIFS_BLOCK_SIZE);
Richard Weinberger77999532016-09-29 22:20:19 +0200769
770 if (encrypted) {
771 err = ubifs_encrypt(inode, data, compr_len, &out_len, key_block(c, key));
772 if (err)
773 goto out_free;
774
775 } else {
776 data->compr_size = 0;
Peter Rosin507502a2017-01-04 09:38:29 +0100777 out_len = compr_len;
Richard Weinberger77999532016-09-29 22:20:19 +0200778 }
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300779
780 dlen = UBIFS_DATA_NODE_SZ + out_len;
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200781 if (ubifs_authenticated(c))
782 write_len = ALIGN(dlen, 8) + auth_len;
783 else
784 write_len = dlen;
785
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300786 data->compr_type = cpu_to_le16(compr_type);
787
788 /* Make reservation before allocating sequence numbers */
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200789 err = make_reservation(c, DATAHD, write_len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300790 if (err)
791 goto out_free;
792
Sascha Hauer83407432018-09-07 14:36:28 +0200793 ubifs_prepare_node(c, data, dlen, 0);
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200794 err = write_head(c, DATAHD, data, write_len, &lnum, &offs, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300795 if (err)
796 goto out_release;
Sascha Hauer823838a2018-09-07 14:36:34 +0200797
798 err = ubifs_node_calc_hash(c, data, hash);
799 if (err)
800 goto out_release;
801
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300802 ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key));
803 release_head(c, DATAHD);
804
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200805 ubifs_add_auth_dirt(c, lnum);
806
Sascha Hauer823838a2018-09-07 14:36:34 +0200807 err = ubifs_tnc_add(c, key, lnum, offs, dlen, hash);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300808 if (err)
809 goto out_ro;
810
811 finish_reservation(c);
Matthew L. Creechd8829622011-03-04 17:55:02 -0500812 if (!allocated)
813 mutex_unlock(&c->write_reserve_mutex);
814 else
815 kfree(data);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300816 return 0;
817
818out_release:
819 release_head(c, DATAHD);
820out_ro:
821 ubifs_ro_mode(c, err);
822 finish_reservation(c);
823out_free:
Matthew L. Creechd8829622011-03-04 17:55:02 -0500824 if (!allocated)
825 mutex_unlock(&c->write_reserve_mutex);
826 else
827 kfree(data);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300828 return err;
829}
830
831/**
832 * ubifs_jnl_write_inode - flush inode to the journal.
833 * @c: UBIFS file-system description object
834 * @inode: inode to flush
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300835 *
836 * This function writes inode @inode to the journal. If the inode is
837 * synchronous, it also synchronizes the write-buffer. Returns zero in case of
838 * success and a negative error code in case of failure.
839 */
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300840int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300841{
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300842 int err, lnum, offs;
Richard Weinberger7959cf32019-04-05 00:34:36 +0200843 struct ubifs_ino_node *ino, *ino_start;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300844 struct ubifs_inode *ui = ubifs_inode(inode);
Richard Weinberger7959cf32019-04-05 00:34:36 +0200845 int sync = 0, write_len = 0, ilen = UBIFS_INO_NODE_SZ;
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200846 int last_reference = !inode->i_nlink;
Richard Weinberger7959cf32019-04-05 00:34:36 +0200847 int kill_xattrs = ui->xattr_cnt && last_reference;
Sascha Hauer823838a2018-09-07 14:36:34 +0200848 u8 hash[UBIFS_HASH_ARR_SZ];
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300849
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300850 dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300851
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300852 /*
853 * If the inode is being deleted, do not write the attached data. No
854 * need to synchronize the write-buffer either.
855 */
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300856 if (!last_reference) {
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200857 ilen += ui->data_len;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300858 sync = IS_SYNC(inode);
Richard Weinberger7959cf32019-04-05 00:34:36 +0200859 } else if (kill_xattrs) {
860 write_len += UBIFS_INO_NODE_SZ * ui->xattr_cnt;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300861 }
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200862
863 if (ubifs_authenticated(c))
Richard Weinberger7959cf32019-04-05 00:34:36 +0200864 write_len += ALIGN(ilen, 8) + ubifs_auth_node_sz(c);
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200865 else
Richard Weinberger7959cf32019-04-05 00:34:36 +0200866 write_len += ilen;
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200867
Richard Weinberger7959cf32019-04-05 00:34:36 +0200868 ino_start = ino = kmalloc(write_len, GFP_NOFS);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300869 if (!ino)
870 return -ENOMEM;
871
872 /* Make reservation before allocating sequence numbers */
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200873 err = make_reservation(c, BASEHD, write_len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300874 if (err)
875 goto out_free;
876
Richard Weinberger7959cf32019-04-05 00:34:36 +0200877 if (kill_xattrs) {
878 union ubifs_key key;
879 struct fscrypt_name nm = {0};
880 struct inode *xino;
881 struct ubifs_dent_node *xent, *pxent = NULL;
882
Richard Weinberger9ca2d732019-04-05 00:34:38 +0200883 if (ui->xattr_cnt >= ubifs_xattr_max_cnt(c)) {
884 ubifs_err(c, "Cannot delete inode, it has too much xattrs!");
885 goto out_release;
886 }
887
Richard Weinberger7959cf32019-04-05 00:34:36 +0200888 lowest_xent_key(c, &key, inode->i_ino);
889 while (1) {
890 xent = ubifs_tnc_next_ent(c, &key, &nm);
891 if (IS_ERR(xent)) {
892 err = PTR_ERR(xent);
893 if (err == -ENOENT)
894 break;
895
896 goto out_release;
897 }
898
899 fname_name(&nm) = xent->name;
900 fname_len(&nm) = le16_to_cpu(xent->nlen);
901
Ben Dooks (Codethink)c7e5f092019-10-16 11:08:03 +0100902 xino = ubifs_iget(c->vfs_sb, le64_to_cpu(xent->inum));
Richard Weinberger9ca2d732019-04-05 00:34:38 +0200903 if (IS_ERR(xino)) {
904 err = PTR_ERR(xino);
905 ubifs_err(c, "dead directory entry '%s', error %d",
906 xent->name, err);
907 ubifs_ro_mode(c, err);
908 goto out_release;
909 }
Richard Weinberger7959cf32019-04-05 00:34:36 +0200910 ubifs_assert(c, ubifs_inode(xino)->xattr);
911
912 clear_nlink(xino);
913 pack_inode(c, ino, xino, 0);
914 ino = (void *)ino + UBIFS_INO_NODE_SZ;
915 iput(xino);
916
917 kfree(pxent);
918 pxent = xent;
919 key_read(c, &xent->key, &key);
920 }
921 kfree(pxent);
922 }
923
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300924 pack_inode(c, ino, inode, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +0200925 err = ubifs_node_calc_hash(c, ino, hash);
926 if (err)
927 goto out_release;
928
Richard Weinberger7959cf32019-04-05 00:34:36 +0200929 err = write_head(c, BASEHD, ino_start, write_len, &lnum, &offs, sync);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300930 if (err)
931 goto out_release;
932 if (!sync)
933 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
934 inode->i_ino);
935 release_head(c, BASEHD);
936
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200937 ubifs_add_auth_dirt(c, lnum);
938
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300939 if (last_reference) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300940 err = ubifs_tnc_remove_ino(c, inode->i_ino);
941 if (err)
942 goto out_ro;
943 ubifs_delete_orphan(c, inode->i_ino);
Richard Weinberger7959cf32019-04-05 00:34:36 +0200944 err = ubifs_add_dirt(c, lnum, write_len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300945 } else {
946 union ubifs_key key;
947
948 ino_key_init(c, &key, inode->i_ino);
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200949 err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300950 }
951 if (err)
952 goto out_ro;
953
954 finish_reservation(c);
955 spin_lock(&ui->ui_lock);
956 ui->synced_i_size = ui->ui_size;
957 spin_unlock(&ui->ui_lock);
Richard Weinberger7959cf32019-04-05 00:34:36 +0200958 kfree(ino_start);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300959 return 0;
960
961out_release:
962 release_head(c, BASEHD);
963out_ro:
964 ubifs_ro_mode(c, err);
965 finish_reservation(c);
966out_free:
Richard Weinberger7959cf32019-04-05 00:34:36 +0200967 kfree(ino_start);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300968 return err;
969}
970
971/**
Adrian Hunter7d62ff22008-07-23 15:48:39 +0300972 * ubifs_jnl_delete_inode - delete an inode.
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300973 * @c: UBIFS file-system description object
974 * @inode: inode to delete
975 *
976 * This function deletes inode @inode which includes removing it from orphans,
977 * deleting it from TNC and, in some cases, writing a deletion inode to the
978 * journal.
979 *
980 * When regular file inodes are unlinked or a directory inode is removed, the
Adrian Hunter7d62ff22008-07-23 15:48:39 +0300981 * 'ubifs_jnl_update()' function writes a corresponding deletion inode and
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300982 * direntry to the media, and adds the inode to orphans. After this, when the
983 * last reference to this inode has been dropped, this function is called. In
984 * general, it has to write one more deletion inode to the media, because if
985 * a commit happened between 'ubifs_jnl_update()' and
986 * 'ubifs_jnl_delete_inode()', the deletion inode is not in the journal
Adrian Hunter7d62ff22008-07-23 15:48:39 +0300987 * anymore, and in fact it might not be on the flash anymore, because it might
988 * have been garbage-collected already. And for optimization reasons UBIFS does
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300989 * not read the orphan area if it has been unmounted cleanly, so it would have
990 * no indication in the journal that there is a deleted inode which has to be
991 * removed from TNC.
992 *
993 * However, if there was no commit between 'ubifs_jnl_update()' and
994 * 'ubifs_jnl_delete_inode()', then there is no need to write the deletion
Adrian Hunter7d62ff22008-07-23 15:48:39 +0300995 * inode to the media for the second time. And this is quite a typical case.
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300996 *
997 * This function returns zero in case of success and a negative error code in
998 * case of failure.
999 */
1000int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
1001{
1002 int err;
1003 struct ubifs_inode *ui = ubifs_inode(inode);
1004
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001005 ubifs_assert(c, inode->i_nlink == 0);
Artem Bityutskiyde94eb52008-07-22 13:06:20 +03001006
Richard Weinberger7959cf32019-04-05 00:34:36 +02001007 if (ui->xattr_cnt || ui->del_cmtno != c->cmt_no)
1008 /* A commit happened for sure or inode hosts xattrs */
Artem Bityutskiyde94eb52008-07-22 13:06:20 +03001009 return ubifs_jnl_write_inode(c, inode);
1010
1011 down_read(&c->commit_sem);
1012 /*
1013 * Check commit number again, because the first test has been done
1014 * without @c->commit_sem, so a commit might have happened.
1015 */
1016 if (ui->del_cmtno != c->cmt_no) {
1017 up_read(&c->commit_sem);
1018 return ubifs_jnl_write_inode(c, inode);
1019 }
1020
Artem Bityutskiyde94eb52008-07-22 13:06:20 +03001021 err = ubifs_tnc_remove_ino(c, inode->i_ino);
1022 if (err)
1023 ubifs_ro_mode(c, err);
Adrian Hunterf7691082008-07-23 16:55:55 +03001024 else
1025 ubifs_delete_orphan(c, inode->i_ino);
Artem Bityutskiyde94eb52008-07-22 13:06:20 +03001026 up_read(&c->commit_sem);
1027 return err;
1028}
1029
1030/**
Richard Weinberger9ec64962016-09-14 22:28:51 +02001031 * ubifs_jnl_xrename - cross rename two directory entries.
1032 * @c: UBIFS file-system description object
1033 * @fst_dir: parent inode of 1st directory entry to exchange
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001034 * @fst_inode: 1st inode to exchange
1035 * @fst_nm: name of 1st inode to exchange
Richard Weinberger9ec64962016-09-14 22:28:51 +02001036 * @snd_dir: parent inode of 2nd directory entry to exchange
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001037 * @snd_inode: 2nd inode to exchange
1038 * @snd_nm: name of 2nd inode to exchange
Richard Weinberger9ec64962016-09-14 22:28:51 +02001039 * @sync: non-zero if the write-buffer has to be synchronized
1040 *
1041 * This function implements the cross rename operation which may involve
1042 * writing 2 inodes and 2 directory entries. It marks the written inodes as clean
1043 * and returns zero on success. In case of failure, a negative error code is
1044 * returned.
1045 */
1046int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001047 const struct inode *fst_inode,
1048 const struct fscrypt_name *fst_nm,
Richard Weinberger9ec64962016-09-14 22:28:51 +02001049 const struct inode *snd_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001050 const struct inode *snd_inode,
1051 const struct fscrypt_name *snd_nm, int sync)
Richard Weinberger9ec64962016-09-14 22:28:51 +02001052{
1053 union ubifs_key key;
1054 struct ubifs_dent_node *dent1, *dent2;
1055 int err, dlen1, dlen2, lnum, offs, len, plen = UBIFS_INO_NODE_SZ;
1056 int aligned_dlen1, aligned_dlen2;
1057 int twoparents = (fst_dir != snd_dir);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001058 void *p;
Sascha Hauer823838a2018-09-07 14:36:34 +02001059 u8 hash_dent1[UBIFS_HASH_ARR_SZ];
1060 u8 hash_dent2[UBIFS_HASH_ARR_SZ];
1061 u8 hash_p1[UBIFS_HASH_ARR_SZ];
1062 u8 hash_p2[UBIFS_HASH_ARR_SZ];
Richard Weinberger9ec64962016-09-14 22:28:51 +02001063
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001064 ubifs_assert(c, ubifs_inode(fst_dir)->data_len == 0);
1065 ubifs_assert(c, ubifs_inode(snd_dir)->data_len == 0);
1066 ubifs_assert(c, mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex));
1067 ubifs_assert(c, mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex));
Richard Weinberger9ec64962016-09-14 22:28:51 +02001068
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001069 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(snd_nm) + 1;
1070 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(fst_nm) + 1;
Richard Weinberger9ec64962016-09-14 22:28:51 +02001071 aligned_dlen1 = ALIGN(dlen1, 8);
1072 aligned_dlen2 = ALIGN(dlen2, 8);
1073
1074 len = aligned_dlen1 + aligned_dlen2 + ALIGN(plen, 8);
1075 if (twoparents)
1076 len += plen;
1077
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001078 len += ubifs_auth_node_sz(c);
1079
Richard Weinberger4acadda2017-06-16 16:21:44 +02001080 dent1 = kzalloc(len, GFP_NOFS);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001081 if (!dent1)
1082 return -ENOMEM;
1083
1084 /* Make reservation before allocating sequence numbers */
1085 err = make_reservation(c, BASEHD, len);
1086 if (err)
1087 goto out_free;
1088
1089 /* Make new dent for 1st entry */
1090 dent1->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001091 dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, snd_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001092 dent1->inum = cpu_to_le64(fst_inode->i_ino);
1093 dent1->type = get_dent_type(fst_inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001094 dent1->nlen = cpu_to_le16(fname_len(snd_nm));
1095 memcpy(dent1->name, fname_name(snd_nm), fname_len(snd_nm));
1096 dent1->name[fname_len(snd_nm)] = '\0';
Richard Weinbergera6664432017-06-26 13:49:04 +02001097 set_dent_cookie(c, dent1);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001098 zero_dent_node_unused(dent1);
1099 ubifs_prep_grp_node(c, dent1, dlen1, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001100 err = ubifs_node_calc_hash(c, dent1, hash_dent1);
1101 if (err)
1102 goto out_release;
Richard Weinberger9ec64962016-09-14 22:28:51 +02001103
1104 /* Make new dent for 2nd entry */
1105 dent2 = (void *)dent1 + aligned_dlen1;
1106 dent2->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001107 dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, fst_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001108 dent2->inum = cpu_to_le64(snd_inode->i_ino);
1109 dent2->type = get_dent_type(snd_inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001110 dent2->nlen = cpu_to_le16(fname_len(fst_nm));
1111 memcpy(dent2->name, fname_name(fst_nm), fname_len(fst_nm));
1112 dent2->name[fname_len(fst_nm)] = '\0';
Richard Weinbergera6664432017-06-26 13:49:04 +02001113 set_dent_cookie(c, dent2);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001114 zero_dent_node_unused(dent2);
1115 ubifs_prep_grp_node(c, dent2, dlen2, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001116 err = ubifs_node_calc_hash(c, dent2, hash_dent2);
1117 if (err)
1118 goto out_release;
Richard Weinberger9ec64962016-09-14 22:28:51 +02001119
1120 p = (void *)dent2 + aligned_dlen2;
Sascha Hauer823838a2018-09-07 14:36:34 +02001121 if (!twoparents) {
Richard Weinberger9ec64962016-09-14 22:28:51 +02001122 pack_inode(c, p, fst_dir, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001123 err = ubifs_node_calc_hash(c, p, hash_p1);
1124 if (err)
1125 goto out_release;
1126 } else {
Richard Weinberger9ec64962016-09-14 22:28:51 +02001127 pack_inode(c, p, fst_dir, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001128 err = ubifs_node_calc_hash(c, p, hash_p1);
1129 if (err)
1130 goto out_release;
Richard Weinberger9ec64962016-09-14 22:28:51 +02001131 p += ALIGN(plen, 8);
1132 pack_inode(c, p, snd_dir, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001133 err = ubifs_node_calc_hash(c, p, hash_p2);
1134 if (err)
1135 goto out_release;
Richard Weinberger9ec64962016-09-14 22:28:51 +02001136 }
1137
1138 err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync);
1139 if (err)
1140 goto out_release;
1141 if (!sync) {
1142 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1143
1144 ubifs_wbuf_add_ino_nolock(wbuf, fst_dir->i_ino);
1145 ubifs_wbuf_add_ino_nolock(wbuf, snd_dir->i_ino);
1146 }
1147 release_head(c, BASEHD);
1148
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001149 ubifs_add_auth_dirt(c, lnum);
1150
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001151 dent_key_init(c, &key, snd_dir->i_ino, snd_nm);
Sascha Hauer823838a2018-09-07 14:36:34 +02001152 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, snd_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001153 if (err)
1154 goto out_ro;
1155
1156 offs += aligned_dlen1;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001157 dent_key_init(c, &key, fst_dir->i_ino, fst_nm);
Sascha Hauer823838a2018-09-07 14:36:34 +02001158 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, fst_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001159 if (err)
1160 goto out_ro;
1161
1162 offs += aligned_dlen2;
1163
1164 ino_key_init(c, &key, fst_dir->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001165 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p1);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001166 if (err)
1167 goto out_ro;
1168
1169 if (twoparents) {
1170 offs += ALIGN(plen, 8);
1171 ino_key_init(c, &key, snd_dir->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001172 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p2);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001173 if (err)
1174 goto out_ro;
1175 }
1176
1177 finish_reservation(c);
1178
1179 mark_inode_clean(c, ubifs_inode(fst_dir));
1180 if (twoparents)
1181 mark_inode_clean(c, ubifs_inode(snd_dir));
1182 kfree(dent1);
1183 return 0;
1184
1185out_release:
1186 release_head(c, BASEHD);
1187out_ro:
1188 ubifs_ro_mode(c, err);
1189 finish_reservation(c);
1190out_free:
1191 kfree(dent1);
1192 return err;
1193}
1194
1195/**
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001196 * ubifs_jnl_rename - rename a directory entry.
1197 * @c: UBIFS file-system description object
1198 * @old_dir: parent inode of directory entry to rename
1199 * @old_dentry: directory entry to rename
1200 * @new_dir: parent inode of directory entry to rename
1201 * @new_dentry: new directory entry (or directory entry to replace)
1202 * @sync: non-zero if the write-buffer has to be synchronized
1203 *
1204 * This function implements the re-name operation which may involve writing up
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001205 * to 4 inodes and 2 directory entries. It marks the written inodes as clean
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001206 * and returns zero on success. In case of failure, a negative error code is
1207 * returned.
1208 */
1209int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001210 const struct inode *old_inode,
1211 const struct fscrypt_name *old_nm,
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001212 const struct inode *new_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001213 const struct inode *new_inode,
1214 const struct fscrypt_name *new_nm,
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001215 const struct inode *whiteout, int sync)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001216{
1217 void *p;
1218 union ubifs_key key;
1219 struct ubifs_dent_node *dent, *dent2;
1220 int err, dlen1, dlen2, ilen, lnum, offs, len;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001221 int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
1222 int last_reference = !!(new_inode && new_inode->i_nlink == 0);
1223 int move = (old_dir != new_dir);
1224 struct ubifs_inode *uninitialized_var(new_ui);
Sascha Hauer823838a2018-09-07 14:36:34 +02001225 u8 hash_old_dir[UBIFS_HASH_ARR_SZ];
1226 u8 hash_new_dir[UBIFS_HASH_ARR_SZ];
1227 u8 hash_new_inode[UBIFS_HASH_ARR_SZ];
1228 u8 hash_dent1[UBIFS_HASH_ARR_SZ];
1229 u8 hash_dent2[UBIFS_HASH_ARR_SZ];
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001230
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001231 ubifs_assert(c, ubifs_inode(old_dir)->data_len == 0);
1232 ubifs_assert(c, ubifs_inode(new_dir)->data_len == 0);
1233 ubifs_assert(c, mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
1234 ubifs_assert(c, mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex));
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001235
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001236 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(new_nm) + 1;
1237 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(old_nm) + 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001238 if (new_inode) {
1239 new_ui = ubifs_inode(new_inode);
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001240 ubifs_assert(c, mutex_is_locked(&new_ui->ui_mutex));
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001241 ilen = UBIFS_INO_NODE_SZ;
1242 if (!last_reference)
1243 ilen += new_ui->data_len;
1244 } else
1245 ilen = 0;
1246
1247 aligned_dlen1 = ALIGN(dlen1, 8);
1248 aligned_dlen2 = ALIGN(dlen2, 8);
1249 len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
Richard Weinberger1e039532016-09-14 22:28:52 +02001250 if (move)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001251 len += plen;
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001252
1253 len += ubifs_auth_node_sz(c);
1254
Richard Weinberger4acadda2017-06-16 16:21:44 +02001255 dent = kzalloc(len, GFP_NOFS);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001256 if (!dent)
1257 return -ENOMEM;
1258
1259 /* Make reservation before allocating sequence numbers */
1260 err = make_reservation(c, BASEHD, len);
1261 if (err)
1262 goto out_free;
1263
1264 /* Make new dent */
1265 dent->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001266 dent_key_init_flash(c, &dent->key, new_dir->i_ino, new_nm);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001267 dent->inum = cpu_to_le64(old_inode->i_ino);
1268 dent->type = get_dent_type(old_inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001269 dent->nlen = cpu_to_le16(fname_len(new_nm));
1270 memcpy(dent->name, fname_name(new_nm), fname_len(new_nm));
1271 dent->name[fname_len(new_nm)] = '\0';
Richard Weinbergerd63d61c2016-10-19 15:59:12 +02001272 set_dent_cookie(c, dent);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001273 zero_dent_node_unused(dent);
1274 ubifs_prep_grp_node(c, dent, dlen1, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001275 err = ubifs_node_calc_hash(c, dent, hash_dent1);
1276 if (err)
1277 goto out_release;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001278
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001279 dent2 = (void *)dent + aligned_dlen1;
1280 dent2->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001281 dent_key_init_flash(c, &dent2->key, old_dir->i_ino, old_nm);
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001282
1283 if (whiteout) {
1284 dent2->inum = cpu_to_le64(whiteout->i_ino);
1285 dent2->type = get_dent_type(whiteout->i_mode);
1286 } else {
1287 /* Make deletion dent */
1288 dent2->inum = 0;
1289 dent2->type = DT_UNKNOWN;
1290 }
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001291 dent2->nlen = cpu_to_le16(fname_len(old_nm));
1292 memcpy(dent2->name, fname_name(old_nm), fname_len(old_nm));
1293 dent2->name[fname_len(old_nm)] = '\0';
Richard Weinbergerd63d61c2016-10-19 15:59:12 +02001294 set_dent_cookie(c, dent2);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001295 zero_dent_node_unused(dent2);
1296 ubifs_prep_grp_node(c, dent2, dlen2, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001297 err = ubifs_node_calc_hash(c, dent2, hash_dent2);
1298 if (err)
1299 goto out_release;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001300
1301 p = (void *)dent2 + aligned_dlen2;
1302 if (new_inode) {
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001303 pack_inode(c, p, new_inode, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001304 err = ubifs_node_calc_hash(c, p, hash_new_inode);
1305 if (err)
1306 goto out_release;
1307
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001308 p += ALIGN(ilen, 8);
1309 }
1310
Sascha Hauer823838a2018-09-07 14:36:34 +02001311 if (!move) {
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001312 pack_inode(c, p, old_dir, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001313 err = ubifs_node_calc_hash(c, p, hash_old_dir);
1314 if (err)
1315 goto out_release;
1316 } else {
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001317 pack_inode(c, p, old_dir, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001318 err = ubifs_node_calc_hash(c, p, hash_old_dir);
1319 if (err)
1320 goto out_release;
1321
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001322 p += ALIGN(plen, 8);
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001323 pack_inode(c, p, new_dir, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001324 err = ubifs_node_calc_hash(c, p, hash_new_dir);
1325 if (err)
1326 goto out_release;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001327 }
1328
1329 if (last_reference) {
1330 err = ubifs_add_orphan(c, new_inode->i_ino);
1331 if (err) {
1332 release_head(c, BASEHD);
1333 goto out_finish;
1334 }
Artem Bityutskiyde94eb52008-07-22 13:06:20 +03001335 new_ui->del_cmtno = c->cmt_no;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001336 }
1337
1338 err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
1339 if (err)
1340 goto out_release;
1341 if (!sync) {
1342 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1343
1344 ubifs_wbuf_add_ino_nolock(wbuf, new_dir->i_ino);
1345 ubifs_wbuf_add_ino_nolock(wbuf, old_dir->i_ino);
1346 if (new_inode)
1347 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
1348 new_inode->i_ino);
1349 }
1350 release_head(c, BASEHD);
1351
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001352 ubifs_add_auth_dirt(c, lnum);
1353
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001354 dent_key_init(c, &key, new_dir->i_ino, new_nm);
Sascha Hauer823838a2018-09-07 14:36:34 +02001355 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, new_nm);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001356 if (err)
1357 goto out_ro;
1358
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001359 offs += aligned_dlen1;
1360 if (whiteout) {
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001361 dent_key_init(c, &key, old_dir->i_ino, old_nm);
Sascha Hauer823838a2018-09-07 14:36:34 +02001362 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, old_nm);
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001363 if (err)
1364 goto out_ro;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001365
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001366 ubifs_delete_orphan(c, whiteout->i_ino);
1367 } else {
1368 err = ubifs_add_dirt(c, lnum, dlen2);
1369 if (err)
1370 goto out_ro;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001371
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001372 dent_key_init(c, &key, old_dir->i_ino, old_nm);
1373 err = ubifs_tnc_remove_nm(c, &key, old_nm);
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001374 if (err)
1375 goto out_ro;
1376 }
1377
1378 offs += aligned_dlen2;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001379 if (new_inode) {
1380 ino_key_init(c, &key, new_inode->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001381 err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash_new_inode);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001382 if (err)
1383 goto out_ro;
1384 offs += ALIGN(ilen, 8);
1385 }
1386
1387 ino_key_init(c, &key, old_dir->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001388 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_old_dir);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001389 if (err)
1390 goto out_ro;
1391
Richard Weinberger1e039532016-09-14 22:28:52 +02001392 if (move) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001393 offs += ALIGN(plen, 8);
1394 ino_key_init(c, &key, new_dir->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001395 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_new_dir);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001396 if (err)
1397 goto out_ro;
1398 }
1399
1400 finish_reservation(c);
1401 if (new_inode) {
1402 mark_inode_clean(c, new_ui);
1403 spin_lock(&new_ui->ui_lock);
1404 new_ui->synced_i_size = new_ui->ui_size;
1405 spin_unlock(&new_ui->ui_lock);
1406 }
1407 mark_inode_clean(c, ubifs_inode(old_dir));
1408 if (move)
1409 mark_inode_clean(c, ubifs_inode(new_dir));
1410 kfree(dent);
1411 return 0;
1412
1413out_release:
1414 release_head(c, BASEHD);
1415out_ro:
1416 ubifs_ro_mode(c, err);
1417 if (last_reference)
1418 ubifs_delete_orphan(c, new_inode->i_ino);
1419out_finish:
1420 finish_reservation(c);
1421out_free:
1422 kfree(dent);
1423 return err;
1424}
1425
1426/**
Richard Weinberger77999532016-09-29 22:20:19 +02001427 * truncate_data_node - re-compress/encrypt a truncated data node.
1428 * @c: UBIFS file-system description object
1429 * @inode: inode which referes to the data node
1430 * @block: data block number
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001431 * @dn: data node to re-compress
1432 * @new_len: new length
1433 *
1434 * This function is used when an inode is truncated and the last data node of
Richard Weinberger77999532016-09-29 22:20:19 +02001435 * the inode has to be re-compressed/encrypted and re-written.
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001436 */
Richard Weinberger77999532016-09-29 22:20:19 +02001437static int truncate_data_node(const struct ubifs_info *c, const struct inode *inode,
1438 unsigned int block, struct ubifs_data_node *dn,
1439 int *new_len)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001440{
1441 void *buf;
Richard Weinberger08acbdd2018-07-01 23:20:50 +02001442 int err, dlen, compr_type, out_len, old_dlen;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001443
1444 out_len = le32_to_cpu(dn->size);
Richard Weinbergera3d21822018-07-02 23:47:13 +02001445 buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001446 if (!buf)
1447 return -ENOMEM;
1448
Richard Weinberger77999532016-09-29 22:20:19 +02001449 dlen = old_dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001450 compr_type = le16_to_cpu(dn->compr_type);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001451
Richard Weinberger77999532016-09-29 22:20:19 +02001452 if (ubifs_crypt_is_encrypted(inode)) {
1453 err = ubifs_decrypt(inode, dn, &dlen, block);
1454 if (err)
1455 goto out;
1456 }
1457
David Oberhollenzer59a74992017-05-17 10:36:45 +02001458 if (compr_type == UBIFS_COMPR_NONE) {
1459 out_len = *new_len;
1460 } else {
Richard Weinberger77999532016-09-29 22:20:19 +02001461 err = ubifs_decompress(c, &dn->data, dlen, buf, &out_len, compr_type);
1462 if (err)
1463 goto out;
1464
1465 ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type);
1466 }
1467
1468 if (ubifs_crypt_is_encrypted(inode)) {
1469 err = ubifs_encrypt(inode, dn, out_len, &old_dlen, block);
1470 if (err)
1471 goto out;
1472
1473 out_len = old_dlen;
1474 } else {
1475 dn->compr_size = 0;
1476 }
1477
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001478 ubifs_assert(c, out_len <= UBIFS_BLOCK_SIZE);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001479 dn->compr_type = cpu_to_le16(compr_type);
1480 dn->size = cpu_to_le32(*new_len);
1481 *new_len = UBIFS_DATA_NODE_SZ + out_len;
Colin Ian Kinge8f19742016-12-16 13:32:39 +00001482 err = 0;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001483out:
1484 kfree(buf);
1485 return err;
1486}
1487
1488/**
1489 * ubifs_jnl_truncate - update the journal for a truncation.
1490 * @c: UBIFS file-system description object
1491 * @inode: inode to truncate
1492 * @old_size: old size
1493 * @new_size: new size
1494 *
1495 * When the size of a file decreases due to truncation, a truncation node is
1496 * written, the journal tree is updated, and the last data block is re-written
1497 * if it has been affected. The inode is also updated in order to synchronize
1498 * the new inode size.
1499 *
1500 * This function marks the inode as clean and returns zero on success. In case
1501 * of failure, a negative error code is returned.
1502 */
1503int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
1504 loff_t old_size, loff_t new_size)
1505{
1506 union ubifs_key key, to_key;
1507 struct ubifs_ino_node *ino;
1508 struct ubifs_trun_node *trun;
1509 struct ubifs_data_node *uninitialized_var(dn);
1510 int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode);
1511 struct ubifs_inode *ui = ubifs_inode(inode);
1512 ino_t inum = inode->i_ino;
1513 unsigned int blk;
Sascha Hauer823838a2018-09-07 14:36:34 +02001514 u8 hash_ino[UBIFS_HASH_ARR_SZ];
1515 u8 hash_dn[UBIFS_HASH_ARR_SZ];
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001516
Artem Bityutskiye84461a2008-10-29 12:08:43 +02001517 dbg_jnl("ino %lu, size %lld -> %lld",
1518 (unsigned long)inum, old_size, new_size);
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001519 ubifs_assert(c, !ui->data_len);
1520 ubifs_assert(c, S_ISREG(inode->i_mode));
1521 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001522
1523 sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ +
1524 UBIFS_MAX_DATA_NODE_SZ * WORST_COMPR_FACTOR;
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001525
1526 sz += ubifs_auth_node_sz(c);
1527
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001528 ino = kmalloc(sz, GFP_NOFS);
1529 if (!ino)
1530 return -ENOMEM;
1531
1532 trun = (void *)ino + UBIFS_INO_NODE_SZ;
1533 trun->ch.node_type = UBIFS_TRUN_NODE;
1534 trun->inum = cpu_to_le32(inum);
1535 trun->old_size = cpu_to_le64(old_size);
1536 trun->new_size = cpu_to_le64(new_size);
1537 zero_trun_node_unused(trun);
1538
1539 dlen = new_size & (UBIFS_BLOCK_SIZE - 1);
1540 if (dlen) {
1541 /* Get last data block so it can be truncated */
1542 dn = (void *)trun + UBIFS_TRUN_NODE_SZ;
1543 blk = new_size >> UBIFS_BLOCK_SHIFT;
1544 data_key_init(c, &key, inum, blk);
Artem Bityutskiy515315a2012-01-13 12:33:53 +02001545 dbg_jnlk(&key, "last block key ");
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001546 err = ubifs_tnc_lookup(c, &key, dn);
1547 if (err == -ENOENT)
1548 dlen = 0; /* Not found (so it is a hole) */
1549 else if (err)
1550 goto out_free;
1551 else {
Richard Weinberger95a22d22018-07-01 23:20:51 +02001552 int dn_len = le32_to_cpu(dn->size);
1553
1554 if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
1555 ubifs_err(c, "bad data node (block %u, inode %lu)",
1556 blk, inode->i_ino);
1557 ubifs_dump_node(c, dn);
1558 goto out_free;
1559 }
1560
1561 if (dn_len <= dlen)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001562 dlen = 0; /* Nothing to do */
1563 else {
Richard Weinberger77999532016-09-29 22:20:19 +02001564 err = truncate_data_node(c, inode, blk, dn, &dlen);
1565 if (err)
1566 goto out_free;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001567 }
1568 }
1569 }
1570
1571 /* Must make reservation before allocating sequence numbers */
1572 len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ;
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001573
1574 if (ubifs_authenticated(c))
1575 len += ALIGN(dlen, 8) + ubifs_auth_node_sz(c);
1576 else
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001577 len += dlen;
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001578
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001579 err = make_reservation(c, BASEHD, len);
1580 if (err)
1581 goto out_free;
1582
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001583 pack_inode(c, ino, inode, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001584 err = ubifs_node_calc_hash(c, ino, hash_ino);
1585 if (err)
1586 goto out_release;
1587
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001588 ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001589 if (dlen) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001590 ubifs_prep_grp_node(c, dn, dlen, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001591 err = ubifs_node_calc_hash(c, dn, hash_dn);
1592 if (err)
1593 goto out_release;
1594 }
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001595
1596 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
1597 if (err)
1598 goto out_release;
1599 if (!sync)
1600 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum);
1601 release_head(c, BASEHD);
1602
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001603 ubifs_add_auth_dirt(c, lnum);
1604
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001605 if (dlen) {
1606 sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ;
Sascha Hauer823838a2018-09-07 14:36:34 +02001607 err = ubifs_tnc_add(c, &key, lnum, sz, dlen, hash_dn);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001608 if (err)
1609 goto out_ro;
1610 }
1611
1612 ino_key_init(c, &key, inum);
Sascha Hauer823838a2018-09-07 14:36:34 +02001613 err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ, hash_ino);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001614 if (err)
1615 goto out_ro;
1616
1617 err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ);
1618 if (err)
1619 goto out_ro;
1620
1621 bit = new_size & (UBIFS_BLOCK_SIZE - 1);
1622 blk = (new_size >> UBIFS_BLOCK_SHIFT) + (bit ? 1 : 0);
1623 data_key_init(c, &key, inum, blk);
1624
1625 bit = old_size & (UBIFS_BLOCK_SIZE - 1);
Artem Bityutskiyf92b9822008-12-28 11:34:26 +02001626 blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001627 data_key_init(c, &to_key, inum, blk);
1628
1629 err = ubifs_tnc_remove_range(c, &key, &to_key);
1630 if (err)
1631 goto out_ro;
1632
1633 finish_reservation(c);
1634 spin_lock(&ui->ui_lock);
1635 ui->synced_i_size = ui->ui_size;
1636 spin_unlock(&ui->ui_lock);
1637 mark_inode_clean(c, ui);
1638 kfree(ino);
1639 return 0;
1640
1641out_release:
1642 release_head(c, BASEHD);
1643out_ro:
1644 ubifs_ro_mode(c, err);
1645 finish_reservation(c);
1646out_free:
1647 kfree(ino);
1648 return err;
1649}
1650
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001651
1652/**
1653 * ubifs_jnl_delete_xattr - delete an extended attribute.
1654 * @c: UBIFS file-system description object
1655 * @host: host inode
1656 * @inode: extended attribute inode
1657 * @nm: extended attribute entry name
1658 *
1659 * This function delete an extended attribute which is very similar to
1660 * un-linking regular files - it writes a deletion xentry, a deletion inode and
1661 * updates the target inode. Returns zero in case of success and a negative
1662 * error code in case of failure.
1663 */
1664int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001665 const struct inode *inode,
1666 const struct fscrypt_name *nm)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001667{
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001668 int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen, write_len;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001669 struct ubifs_dent_node *xent;
1670 struct ubifs_ino_node *ino;
1671 union ubifs_key xent_key, key1, key2;
1672 int sync = IS_DIRSYNC(host);
1673 struct ubifs_inode *host_ui = ubifs_inode(host);
Sascha Hauer823838a2018-09-07 14:36:34 +02001674 u8 hash[UBIFS_HASH_ARR_SZ];
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001675
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001676 ubifs_assert(c, inode->i_nlink == 0);
1677 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001678
1679 /*
1680 * Since we are deleting the inode, we do not bother to attach any data
1681 * to it and assume its length is %UBIFS_INO_NODE_SZ.
1682 */
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001683 xlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001684 aligned_xlen = ALIGN(xlen, 8);
1685 hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
1686 len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
1687
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001688 write_len = len + ubifs_auth_node_sz(c);
1689
1690 xent = kzalloc(write_len, GFP_NOFS);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001691 if (!xent)
1692 return -ENOMEM;
1693
1694 /* Make reservation before allocating sequence numbers */
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001695 err = make_reservation(c, BASEHD, write_len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001696 if (err) {
1697 kfree(xent);
1698 return err;
1699 }
1700
1701 xent->ch.node_type = UBIFS_XENT_NODE;
1702 xent_key_init(c, &xent_key, host->i_ino, nm);
1703 key_write(c, &xent_key, xent->key);
1704 xent->inum = 0;
1705 xent->type = get_dent_type(inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001706 xent->nlen = cpu_to_le16(fname_len(nm));
1707 memcpy(xent->name, fname_name(nm), fname_len(nm));
1708 xent->name[fname_len(nm)] = '\0';
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001709 zero_dent_node_unused(xent);
1710 ubifs_prep_grp_node(c, xent, xlen, 0);
1711
1712 ino = (void *)xent + aligned_xlen;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001713 pack_inode(c, ino, inode, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001714 ino = (void *)ino + UBIFS_INO_NODE_SZ;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001715 pack_inode(c, ino, host, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001716 err = ubifs_node_calc_hash(c, ino, hash);
1717 if (err)
1718 goto out_release;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001719
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001720 err = write_head(c, BASEHD, xent, write_len, &lnum, &xent_offs, sync);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001721 if (!sync && !err)
1722 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino);
1723 release_head(c, BASEHD);
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001724
1725 ubifs_add_auth_dirt(c, lnum);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001726 kfree(xent);
1727 if (err)
1728 goto out_ro;
1729
1730 /* Remove the extended attribute entry from TNC */
1731 err = ubifs_tnc_remove_nm(c, &xent_key, nm);
1732 if (err)
1733 goto out_ro;
1734 err = ubifs_add_dirt(c, lnum, xlen);
1735 if (err)
1736 goto out_ro;
1737
1738 /*
1739 * Remove all nodes belonging to the extended attribute inode from TNC.
1740 * Well, there actually must be only one node - the inode itself.
1741 */
1742 lowest_ino_key(c, &key1, inode->i_ino);
1743 highest_ino_key(c, &key2, inode->i_ino);
1744 err = ubifs_tnc_remove_range(c, &key1, &key2);
1745 if (err)
1746 goto out_ro;
1747 err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ);
1748 if (err)
1749 goto out_ro;
1750
1751 /* And update TNC with the new host inode position */
1752 ino_key_init(c, &key1, host->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001753 err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen, hash);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001754 if (err)
1755 goto out_ro;
1756
1757 finish_reservation(c);
1758 spin_lock(&host_ui->ui_lock);
1759 host_ui->synced_i_size = host_ui->ui_size;
1760 spin_unlock(&host_ui->ui_lock);
1761 mark_inode_clean(c, host_ui);
1762 return 0;
1763
Sascha Hauer823838a2018-09-07 14:36:34 +02001764out_release:
1765 kfree(xent);
1766 release_head(c, BASEHD);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001767out_ro:
1768 ubifs_ro_mode(c, err);
1769 finish_reservation(c);
1770 return err;
1771}
1772
1773/**
1774 * ubifs_jnl_change_xattr - change an extended attribute.
1775 * @c: UBIFS file-system description object
1776 * @inode: extended attribute inode
1777 * @host: host inode
1778 *
1779 * This function writes the updated version of an extended attribute inode and
Artem Bityutskiy7d4e9cc2009-03-20 19:11:12 +02001780 * the host inode to the journal (to the base head). The host inode is written
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001781 * after the extended attribute inode in order to guarantee that the extended
1782 * attribute will be flushed when the inode is synchronized by 'fsync()' and
1783 * consequently, the write-buffer is synchronized. This function returns zero
1784 * in case of success and a negative error code in case of failure.
1785 */
1786int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
1787 const struct inode *host)
1788{
1789 int err, len1, len2, aligned_len, aligned_len1, lnum, offs;
Artem Bityutskiyc78c7e32008-08-12 16:30:12 +03001790 struct ubifs_inode *host_ui = ubifs_inode(host);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001791 struct ubifs_ino_node *ino;
1792 union ubifs_key key;
1793 int sync = IS_DIRSYNC(host);
Sascha Hauer823838a2018-09-07 14:36:34 +02001794 u8 hash_host[UBIFS_HASH_ARR_SZ];
1795 u8 hash[UBIFS_HASH_ARR_SZ];
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001796
1797 dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino);
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001798 ubifs_assert(c, host->i_nlink > 0);
1799 ubifs_assert(c, inode->i_nlink > 0);
1800 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001801
1802 len1 = UBIFS_INO_NODE_SZ + host_ui->data_len;
1803 len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len;
1804 aligned_len1 = ALIGN(len1, 8);
1805 aligned_len = aligned_len1 + ALIGN(len2, 8);
1806
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001807 aligned_len += ubifs_auth_node_sz(c);
1808
Richard Weinberger4acadda2017-06-16 16:21:44 +02001809 ino = kzalloc(aligned_len, GFP_NOFS);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001810 if (!ino)
1811 return -ENOMEM;
1812
1813 /* Make reservation before allocating sequence numbers */
1814 err = make_reservation(c, BASEHD, aligned_len);
1815 if (err)
1816 goto out_free;
1817
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001818 pack_inode(c, ino, host, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001819 err = ubifs_node_calc_hash(c, ino, hash_host);
1820 if (err)
1821 goto out_release;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001822 pack_inode(c, (void *)ino + aligned_len1, inode, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001823 err = ubifs_node_calc_hash(c, (void *)ino + aligned_len1, hash);
1824 if (err)
1825 goto out_release;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001826
1827 err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0);
1828 if (!sync && !err) {
1829 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1830
1831 ubifs_wbuf_add_ino_nolock(wbuf, host->i_ino);
1832 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
1833 }
1834 release_head(c, BASEHD);
1835 if (err)
1836 goto out_ro;
1837
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001838 ubifs_add_auth_dirt(c, lnum);
1839
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001840 ino_key_init(c, &key, host->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001841 err = ubifs_tnc_add(c, &key, lnum, offs, len1, hash_host);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001842 if (err)
1843 goto out_ro;
1844
1845 ino_key_init(c, &key, inode->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001846 err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2, hash);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001847 if (err)
1848 goto out_ro;
1849
1850 finish_reservation(c);
1851 spin_lock(&host_ui->ui_lock);
1852 host_ui->synced_i_size = host_ui->ui_size;
1853 spin_unlock(&host_ui->ui_lock);
1854 mark_inode_clean(c, host_ui);
1855 kfree(ino);
1856 return 0;
1857
Sascha Hauer823838a2018-09-07 14:36:34 +02001858out_release:
1859 release_head(c, BASEHD);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001860out_ro:
1861 ubifs_ro_mode(c, err);
1862 finish_reservation(c);
1863out_free:
1864 kfree(ino);
1865 return err;
1866}
1867