blob: 9e708e525ba8a1c2bac38894c87db63b7db98bd5 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/bio.h>
14#include <linux/blkdev.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010015#include <linux/prefetch.h>
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +090016#include <linux/kthread.h>
Chao Yu74de5932013-11-22 09:09:59 +080017#include <linux/swap.h>
Jaegeuk Kim60b99b42015-10-05 14:49:57 -070018#include <linux/timer.h>
Jaegeuk Kim1d7be272017-05-17 10:36:58 -070019#include <linux/freezer.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090020
21#include "f2fs.h"
22#include "segment.h"
23#include "node.h"
Jaegeuk Kim5f656542017-08-15 21:27:19 -070024#include "gc.h"
Jaegeuk Kim9e4ded32014-12-17 19:58:58 -080025#include "trace.h"
Namjae Jeon6ec178d2013-04-23 17:51:43 +090026#include <trace/events/f2fs.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090027
Changman Lee9a7f1432013-11-15 10:42:51 +090028#define __reverse_ffz(x) __reverse_ffs(~(x))
29
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +090030static struct kmem_cache *discard_entry_slab;
Jaegeuk Kimb01a9202017-01-09 14:13:03 -080031static struct kmem_cache *discard_cmd_slab;
Chao Yu184a5cd2014-09-04 18:13:01 +080032static struct kmem_cache *sit_entry_set_slab;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -070033static struct kmem_cache *inmem_entry_slab;
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +090034
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070035static unsigned long __reverse_ulong(unsigned char *str)
36{
37 unsigned long tmp = 0;
38 int shift = 24, idx = 0;
39
40#if BITS_PER_LONG == 64
41 shift = 56;
42#endif
43 while (shift >= 0) {
44 tmp |= (unsigned long)str[idx++] << shift;
45 shift -= BITS_PER_BYTE;
46 }
47 return tmp;
48}
49
Changman Lee9a7f1432013-11-15 10:42:51 +090050/*
51 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
52 * MSB and LSB are reversed in a byte by f2fs_set_bit.
53 */
54static inline unsigned long __reverse_ffs(unsigned long word)
55{
56 int num = 0;
57
58#if BITS_PER_LONG == 64
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070059 if ((word & 0xffffffff00000000UL) == 0)
Changman Lee9a7f1432013-11-15 10:42:51 +090060 num += 32;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070061 else
Changman Lee9a7f1432013-11-15 10:42:51 +090062 word >>= 32;
Changman Lee9a7f1432013-11-15 10:42:51 +090063#endif
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070064 if ((word & 0xffff0000) == 0)
Changman Lee9a7f1432013-11-15 10:42:51 +090065 num += 16;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070066 else
Changman Lee9a7f1432013-11-15 10:42:51 +090067 word >>= 16;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070068
69 if ((word & 0xff00) == 0)
Changman Lee9a7f1432013-11-15 10:42:51 +090070 num += 8;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070071 else
Changman Lee9a7f1432013-11-15 10:42:51 +090072 word >>= 8;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070073
Changman Lee9a7f1432013-11-15 10:42:51 +090074 if ((word & 0xf0) == 0)
75 num += 4;
76 else
77 word >>= 4;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070078
Changman Lee9a7f1432013-11-15 10:42:51 +090079 if ((word & 0xc) == 0)
80 num += 2;
81 else
82 word >>= 2;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070083
Changman Lee9a7f1432013-11-15 10:42:51 +090084 if ((word & 0x2) == 0)
85 num += 1;
86 return num;
87}
88
89/*
arter97e1c42042014-08-06 23:22:50 +090090 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
Changman Lee9a7f1432013-11-15 10:42:51 +090091 * f2fs_set_bit makes MSB and LSB reversed in a byte.
Fan Li692223d2015-11-12 08:43:04 +080092 * @size must be integral times of unsigned long.
Changman Lee9a7f1432013-11-15 10:42:51 +090093 * Example:
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070094 * MSB <--> LSB
95 * f2fs_set_bit(0, bitmap) => 1000 0000
96 * f2fs_set_bit(7, bitmap) => 0000 0001
Changman Lee9a7f1432013-11-15 10:42:51 +090097 */
98static unsigned long __find_rev_next_bit(const unsigned long *addr,
99 unsigned long size, unsigned long offset)
100{
101 const unsigned long *p = addr + BIT_WORD(offset);
Fan Li692223d2015-11-12 08:43:04 +0800102 unsigned long result = size;
Changman Lee9a7f1432013-11-15 10:42:51 +0900103 unsigned long tmp;
Changman Lee9a7f1432013-11-15 10:42:51 +0900104
105 if (offset >= size)
106 return size;
107
Fan Li692223d2015-11-12 08:43:04 +0800108 size -= (offset & ~(BITS_PER_LONG - 1));
Changman Lee9a7f1432013-11-15 10:42:51 +0900109 offset %= BITS_PER_LONG;
Changman Lee9a7f1432013-11-15 10:42:51 +0900110
Fan Li692223d2015-11-12 08:43:04 +0800111 while (1) {
112 if (*p == 0)
113 goto pass;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -0700114
Jaegeuk Kimf96999c2015-10-20 15:17:19 -0700115 tmp = __reverse_ulong((unsigned char *)p);
Fan Li692223d2015-11-12 08:43:04 +0800116
117 tmp &= ~0UL >> offset;
118 if (size < BITS_PER_LONG)
119 tmp &= (~0UL << (BITS_PER_LONG - size));
Changman Lee9a7f1432013-11-15 10:42:51 +0900120 if (tmp)
Fan Li692223d2015-11-12 08:43:04 +0800121 goto found;
122pass:
123 if (size <= BITS_PER_LONG)
124 break;
Changman Lee9a7f1432013-11-15 10:42:51 +0900125 size -= BITS_PER_LONG;
Fan Li692223d2015-11-12 08:43:04 +0800126 offset = 0;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -0700127 p++;
Changman Lee9a7f1432013-11-15 10:42:51 +0900128 }
Fan Li692223d2015-11-12 08:43:04 +0800129 return result;
130found:
131 return result - size + __reverse_ffs(tmp);
Changman Lee9a7f1432013-11-15 10:42:51 +0900132}
133
134static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
135 unsigned long size, unsigned long offset)
136{
137 const unsigned long *p = addr + BIT_WORD(offset);
Jaegeuk Kim80609442015-12-04 16:51:13 -0800138 unsigned long result = size;
Changman Lee9a7f1432013-11-15 10:42:51 +0900139 unsigned long tmp;
Changman Lee9a7f1432013-11-15 10:42:51 +0900140
141 if (offset >= size)
142 return size;
143
Jaegeuk Kim80609442015-12-04 16:51:13 -0800144 size -= (offset & ~(BITS_PER_LONG - 1));
Changman Lee9a7f1432013-11-15 10:42:51 +0900145 offset %= BITS_PER_LONG;
Changman Lee9a7f1432013-11-15 10:42:51 +0900146
Jaegeuk Kim80609442015-12-04 16:51:13 -0800147 while (1) {
148 if (*p == ~0UL)
149 goto pass;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -0700150
Jaegeuk Kimf96999c2015-10-20 15:17:19 -0700151 tmp = __reverse_ulong((unsigned char *)p);
Jaegeuk Kim80609442015-12-04 16:51:13 -0800152
153 if (offset)
154 tmp |= ~0UL << (BITS_PER_LONG - offset);
155 if (size < BITS_PER_LONG)
156 tmp |= ~0UL >> size;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -0700157 if (tmp != ~0UL)
Jaegeuk Kim80609442015-12-04 16:51:13 -0800158 goto found;
159pass:
160 if (size <= BITS_PER_LONG)
161 break;
Changman Lee9a7f1432013-11-15 10:42:51 +0900162 size -= BITS_PER_LONG;
Jaegeuk Kim80609442015-12-04 16:51:13 -0800163 offset = 0;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -0700164 p++;
Changman Lee9a7f1432013-11-15 10:42:51 +0900165 }
Jaegeuk Kim80609442015-12-04 16:51:13 -0800166 return result;
167found:
168 return result - size + __reverse_ffz(tmp);
Changman Lee9a7f1432013-11-15 10:42:51 +0900169}
170
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700171void register_inmem_page(struct inode *inode, struct page *page)
172{
173 struct f2fs_inode_info *fi = F2FS_I(inode);
174 struct inmem_pages *new;
Jaegeuk Kim9be32d72014-12-05 10:39:49 -0800175
Jaegeuk Kim9e4ded32014-12-17 19:58:58 -0800176 f2fs_trace_pid(page);
Jaegeuk Kim0722b102014-12-05 11:58:02 -0800177
Chao Yudecd36b2015-08-07 18:42:09 +0800178 set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
179 SetPagePrivate(page);
180
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700181 new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
182
183 /* add atomic page indices to the list */
184 new->page = page;
185 INIT_LIST_HEAD(&new->list);
Chao Yudecd36b2015-08-07 18:42:09 +0800186
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700187 /* increase reference count with clean state */
188 mutex_lock(&fi->inmem_lock);
189 get_page(page);
190 list_add_tail(&new->list, &fi->inmem_pages);
Jaegeuk Kim8dcf2ff72014-12-05 17:18:15 -0800191 inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700192 mutex_unlock(&fi->inmem_lock);
Jaegeuk Kim8ce67cb2015-03-17 17:58:08 -0700193
194 trace_f2fs_register_inmem_page(page, INMEM);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700195}
196
Chao Yu28bc1062016-02-06 14:40:34 +0800197static int __revoke_inmem_pages(struct inode *inode,
198 struct list_head *head, bool drop, bool recover)
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700199{
Chao Yu28bc1062016-02-06 14:40:34 +0800200 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700201 struct inmem_pages *cur, *tmp;
Chao Yu28bc1062016-02-06 14:40:34 +0800202 int err = 0;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700203
Chao Yu29b96b52016-02-06 14:38:29 +0800204 list_for_each_entry_safe(cur, tmp, head, list) {
Chao Yu28bc1062016-02-06 14:40:34 +0800205 struct page *page = cur->page;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700206
Chao Yu28bc1062016-02-06 14:40:34 +0800207 if (drop)
208 trace_f2fs_commit_inmem_page(page, INMEM_DROP);
209
210 lock_page(page);
211
212 if (recover) {
213 struct dnode_of_data dn;
214 struct node_info ni;
215
216 trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
Chao Yu7f2b4e82017-08-08 19:09:08 +0800217retry:
Chao Yu28bc1062016-02-06 14:40:34 +0800218 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu7f2b4e82017-08-08 19:09:08 +0800219 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
220 if (err) {
221 if (err == -ENOMEM) {
222 congestion_wait(BLK_RW_ASYNC, HZ/50);
223 cond_resched();
224 goto retry;
225 }
Chao Yu28bc1062016-02-06 14:40:34 +0800226 err = -EAGAIN;
227 goto next;
228 }
229 get_node_info(sbi, dn.nid, &ni);
230 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
231 cur->old_addr, ni.version, true, true);
232 f2fs_put_dnode(&dn);
233 }
234next:
Jaegeuk Kim63c52d72016-04-12 14:11:03 -0700235 /* we don't need to invalidate this in the sccessful status */
236 if (drop || recover)
237 ClearPageUptodate(page);
Chao Yu28bc1062016-02-06 14:40:34 +0800238 set_page_private(page, 0);
Chao Yuc81ced02016-04-29 20:13:36 +0800239 ClearPagePrivate(page);
Chao Yu28bc1062016-02-06 14:40:34 +0800240 f2fs_put_page(page, 1);
Chao Yudecd36b2015-08-07 18:42:09 +0800241
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700242 list_del(&cur->list);
243 kmem_cache_free(inmem_entry_slab, cur);
Jaegeuk Kim8dcf2ff72014-12-05 17:18:15 -0800244 dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700245 }
Chao Yu28bc1062016-02-06 14:40:34 +0800246 return err;
Chao Yu29b96b52016-02-06 14:38:29 +0800247}
248
249void drop_inmem_pages(struct inode *inode)
250{
251 struct f2fs_inode_info *fi = F2FS_I(inode);
252
253 mutex_lock(&fi->inmem_lock);
Chao Yu28bc1062016-02-06 14:40:34 +0800254 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
Chao Yu29b96b52016-02-06 14:38:29 +0800255 mutex_unlock(&fi->inmem_lock);
Chao Yu5fe45742017-01-07 18:50:26 +0800256
257 clear_inode_flag(inode, FI_ATOMIC_FILE);
Chao Yu84a23fb2017-08-18 23:37:36 +0800258 clear_inode_flag(inode, FI_HOT_DATA);
Chao Yu5fe45742017-01-07 18:50:26 +0800259 stat_dec_atomic_write(inode);
Chao Yu29b96b52016-02-06 14:38:29 +0800260}
261
Jaegeuk Kim8c242db2017-03-17 09:55:52 +0800262void drop_inmem_page(struct inode *inode, struct page *page)
263{
264 struct f2fs_inode_info *fi = F2FS_I(inode);
265 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
266 struct list_head *head = &fi->inmem_pages;
267 struct inmem_pages *cur = NULL;
268
269 f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
270
271 mutex_lock(&fi->inmem_lock);
272 list_for_each_entry(cur, head, list) {
273 if (cur->page == page)
274 break;
275 }
276
277 f2fs_bug_on(sbi, !cur || cur->page != page);
278 list_del(&cur->list);
279 mutex_unlock(&fi->inmem_lock);
280
281 dec_page_count(sbi, F2FS_INMEM_PAGES);
282 kmem_cache_free(inmem_entry_slab, cur);
283
284 ClearPageUptodate(page);
285 set_page_private(page, 0);
286 ClearPagePrivate(page);
287 f2fs_put_page(page, 0);
288
289 trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
290}
291
Chao Yu28bc1062016-02-06 14:40:34 +0800292static int __commit_inmem_pages(struct inode *inode,
293 struct list_head *revoke_list)
Chao Yu29b96b52016-02-06 14:38:29 +0800294{
295 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
296 struct f2fs_inode_info *fi = F2FS_I(inode);
297 struct inmem_pages *cur, *tmp;
298 struct f2fs_io_info fio = {
299 .sbi = sbi,
300 .type = DATA,
Mike Christie04d328d2016-06-05 14:31:55 -0500301 .op = REQ_OP_WRITE,
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600302 .op_flags = REQ_SYNC | REQ_PRIO,
Chao Yub0af6d42017-08-02 23:21:48 +0800303 .io_type = FS_DATA_IO,
Chao Yu29b96b52016-02-06 14:38:29 +0800304 };
Jaegeuk Kim942fd312017-02-01 16:51:22 -0800305 pgoff_t last_idx = ULONG_MAX;
Chao Yu29b96b52016-02-06 14:38:29 +0800306 int err = 0;
307
308 list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
Chao Yu28bc1062016-02-06 14:40:34 +0800309 struct page *page = cur->page;
310
311 lock_page(page);
312 if (page->mapping == inode->i_mapping) {
313 trace_f2fs_commit_inmem_page(page, INMEM);
314
315 set_page_dirty(page);
316 f2fs_wait_on_page_writeback(page, DATA, true);
Chao Yu933439c2016-10-11 22:57:01 +0800317 if (clear_page_dirty_for_io(page)) {
Chao Yu29b96b52016-02-06 14:38:29 +0800318 inode_dec_dirty_pages(inode);
Chao Yu933439c2016-10-11 22:57:01 +0800319 remove_dirty_inode(inode);
320 }
Jaegeuk Kim640cc182017-07-19 10:59:55 -0700321retry:
Chao Yu28bc1062016-02-06 14:40:34 +0800322 fio.page = page;
Hou Pengyange959c8f2017-04-25 12:45:13 +0000323 fio.old_blkaddr = NULL_ADDR;
Jaegeuk Kim4d978072017-04-26 11:11:12 -0700324 fio.encrypted_page = NULL;
Jaegeuk Kimcc156202017-05-12 13:51:34 -0700325 fio.need_lock = LOCK_DONE;
Chao Yu29b96b52016-02-06 14:38:29 +0800326 err = do_write_data_page(&fio);
327 if (err) {
Jaegeuk Kim640cc182017-07-19 10:59:55 -0700328 if (err == -ENOMEM) {
329 congestion_wait(BLK_RW_ASYNC, HZ/50);
330 cond_resched();
331 goto retry;
332 }
Chao Yu28bc1062016-02-06 14:40:34 +0800333 unlock_page(page);
Chao Yu29b96b52016-02-06 14:38:29 +0800334 break;
335 }
Chao Yu28bc1062016-02-06 14:40:34 +0800336 /* record old blkaddr for revoking */
337 cur->old_addr = fio.old_blkaddr;
Jaegeuk Kim942fd312017-02-01 16:51:22 -0800338 last_idx = page->index;
Chao Yu29b96b52016-02-06 14:38:29 +0800339 }
Chao Yu28bc1062016-02-06 14:40:34 +0800340 unlock_page(page);
341 list_move_tail(&cur->list, revoke_list);
Chao Yu29b96b52016-02-06 14:38:29 +0800342 }
343
Jaegeuk Kim942fd312017-02-01 16:51:22 -0800344 if (last_idx != ULONG_MAX)
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700345 f2fs_submit_merged_write_cond(sbi, inode, 0, last_idx, DATA);
Chao Yu28bc1062016-02-06 14:40:34 +0800346
347 if (!err)
348 __revoke_inmem_pages(inode, revoke_list, false, false);
349
Chao Yu29b96b52016-02-06 14:38:29 +0800350 return err;
351}
352
353int commit_inmem_pages(struct inode *inode)
354{
355 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
356 struct f2fs_inode_info *fi = F2FS_I(inode);
Chao Yu28bc1062016-02-06 14:40:34 +0800357 struct list_head revoke_list;
358 int err;
Chao Yu29b96b52016-02-06 14:38:29 +0800359
Chao Yu28bc1062016-02-06 14:40:34 +0800360 INIT_LIST_HEAD(&revoke_list);
Chao Yu29b96b52016-02-06 14:38:29 +0800361 f2fs_balance_fs(sbi, true);
362 f2fs_lock_op(sbi);
363
Chao Yu5fe45742017-01-07 18:50:26 +0800364 set_inode_flag(inode, FI_ATOMIC_COMMIT);
365
Chao Yu29b96b52016-02-06 14:38:29 +0800366 mutex_lock(&fi->inmem_lock);
Chao Yu28bc1062016-02-06 14:40:34 +0800367 err = __commit_inmem_pages(inode, &revoke_list);
368 if (err) {
369 int ret;
370 /*
371 * try to revoke all committed pages, but still we could fail
372 * due to no memory or other reason, if that happened, EAGAIN
373 * will be returned, which means in such case, transaction is
374 * already not integrity, caller should use journal to do the
375 * recovery or rewrite & commit last transaction. For other
376 * error number, revoking was done by filesystem itself.
377 */
378 ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
379 if (ret)
380 err = ret;
381
382 /* drop all uncommitted pages */
383 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
384 }
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700385 mutex_unlock(&fi->inmem_lock);
386
Chao Yu5fe45742017-01-07 18:50:26 +0800387 clear_inode_flag(inode, FI_ATOMIC_COMMIT);
388
Chao Yu29b96b52016-02-06 14:38:29 +0800389 f2fs_unlock_op(sbi);
Jaegeuk Kimedb27de2015-07-25 00:52:52 -0700390 return err;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700391}
392
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900393/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900394 * This function balances dirty node and dentry pages.
395 * In addition, it controls garbage collection.
396 */
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -0800397void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900398{
Chao Yu0f348022016-09-26 19:45:55 +0800399#ifdef CONFIG_F2FS_FAULT_INJECTION
Chao Yu55523512017-02-25 11:08:28 +0800400 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
401 f2fs_show_injection_info(FAULT_CHECKPOINT);
Chao Yu0f348022016-09-26 19:45:55 +0800402 f2fs_stop_checkpoint(sbi, false);
Chao Yu55523512017-02-25 11:08:28 +0800403 }
Chao Yu0f348022016-09-26 19:45:55 +0800404#endif
405
Jaegeuk Kime589c2c2016-06-02 15:24:24 -0700406 /* balance_fs_bg is able to be pending */
Jaegeuk Kima7881892017-04-20 13:51:57 -0700407 if (need && excess_cached_nats(sbi))
Jaegeuk Kime589c2c2016-06-02 15:24:24 -0700408 f2fs_balance_fs_bg(sbi);
409
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900410 /*
Jaegeuk Kim029cd282012-12-21 17:20:21 +0900411 * We should do GC or end up with checkpoint, if there are so many dirty
412 * dir/node pages without enough free segments.
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900413 */
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700414 if (has_not_enough_free_secs(sbi, 0, 0)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900415 mutex_lock(&sbi->gc_mutex);
Jaegeuk Kime066b832017-04-13 15:17:00 -0700416 f2fs_gc(sbi, false, false, NULL_SEGNO);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900417 }
418}
419
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +0900420void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
421{
Chao Yu1dcc3362015-02-05 17:57:31 +0800422 /* try to shrink extent cache when there is no enough memory */
Jaegeuk Kim554df792015-06-19 13:41:23 -0700423 if (!available_free_memory(sbi, EXTENT_CACHE))
424 f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
Chao Yu1dcc3362015-02-05 17:57:31 +0800425
Jaegeuk Kim1b38dc82015-06-19 15:36:07 -0700426 /* check the # of cached NAT entries */
427 if (!available_free_memory(sbi, NAT_ENTRIES))
428 try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
429
Chao Yu31696582015-07-28 18:33:46 +0800430 if (!available_free_memory(sbi, FREE_NIDS))
Jaegeuk Kimad4edb82016-06-16 16:41:49 -0700431 try_to_free_nids(sbi, MAX_FREE_NIDS);
432 else
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -0800433 build_free_nids(sbi, false, false);
Chao Yu31696582015-07-28 18:33:46 +0800434
Jaegeuk Kim1c0f4bf2017-05-01 18:09:44 -0700435 if (!is_idle(sbi) && !excess_dirty_nats(sbi))
Jaegeuk Kimf455c8a2016-12-05 11:37:14 -0800436 return;
Jaegeuk Kime5e7ea32014-11-06 15:24:46 -0800437
Jaegeuk Kim88a70a62014-12-10 15:20:48 -0800438 /* checkpoint is the only way to shrink partial cached entries */
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +0900439 if (!available_free_memory(sbi, NAT_ENTRIES) ||
Jaegeuk Kim60b99b42015-10-05 14:49:57 -0700440 !available_free_memory(sbi, INO_ENTRIES) ||
Chao Yu7d768d22016-01-18 18:31:18 +0800441 excess_prefree_segs(sbi) ||
442 excess_dirty_nats(sbi) ||
Jaegeuk Kimf455c8a2016-12-05 11:37:14 -0800443 f2fs_time_over(sbi, CP_TIME)) {
Chao Yue9f5b8b2016-02-14 18:54:33 +0800444 if (test_opt(sbi, DATA_FLUSH)) {
445 struct blk_plug plug;
446
447 blk_start_plug(&plug);
Chao Yu36b35a02015-12-17 17:13:28 +0800448 sync_dirty_inodes(sbi, FILE_INODE);
Chao Yue9f5b8b2016-02-14 18:54:33 +0800449 blk_finish_plug(&plug);
450 }
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +0900451 f2fs_sync_fs(sbi->sb, true);
Jaegeuk Kim42190d22016-01-09 13:45:17 -0800452 stat_inc_bg_cp_count(sbi->stat_info);
Chao Yu36b35a02015-12-17 17:13:28 +0800453 }
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +0900454}
455
Kinglong Mee20fda56b2017-03-04 22:13:10 +0800456static int __submit_flush_wait(struct f2fs_sb_info *sbi,
457 struct block_device *bdev)
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700458{
459 struct bio *bio = f2fs_bio_alloc(0);
460 int ret;
461
Jan Kara3adc5fcb2017-05-02 17:03:47 +0200462 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700463 bio->bi_bdev = bdev;
464 ret = submit_bio_wait(bio);
465 bio_put(bio);
Kinglong Mee20fda56b2017-03-04 22:13:10 +0800466
467 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
468 test_opt(sbi, FLUSH_MERGE), ret);
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700469 return ret;
470}
471
472static int submit_flush_wait(struct f2fs_sb_info *sbi)
473{
Kinglong Mee20fda56b2017-03-04 22:13:10 +0800474 int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev);
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700475 int i;
476
Kinglong Mee20fda56b2017-03-04 22:13:10 +0800477 if (!sbi->s_ndevs || ret)
478 return ret;
479
480 for (i = 1; i < sbi->s_ndevs; i++) {
481 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
482 if (ret)
483 break;
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700484 }
485 return ret;
486}
487
Gu Zheng2163d192014-04-27 14:21:33 +0800488static int issue_flush_thread(void *data)
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900489{
490 struct f2fs_sb_info *sbi = data;
Jaegeuk Kimb01a9202017-01-09 14:13:03 -0800491 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800492 wait_queue_head_t *q = &fcc->flush_wait_queue;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900493repeat:
494 if (kthread_should_stop())
495 return 0;
496
Chao Yudc6febb2017-07-22 08:52:23 +0800497 sb_start_intwrite(sbi->sb);
498
Gu Zheng721bd4d2014-09-05 18:31:00 +0800499 if (!llist_empty(&fcc->issue_list)) {
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900500 struct flush_cmd *cmd, *next;
501 int ret;
502
Gu Zheng721bd4d2014-09-05 18:31:00 +0800503 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
504 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
505
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700506 ret = submit_flush_wait(sbi);
Chao Yu8b8dd652017-03-25 17:19:58 +0800507 atomic_inc(&fcc->issued_flush);
508
Gu Zheng721bd4d2014-09-05 18:31:00 +0800509 llist_for_each_entry_safe(cmd, next,
510 fcc->dispatch_list, llnode) {
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900511 cmd->ret = ret;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900512 complete(&cmd->wait);
513 }
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800514 fcc->dispatch_list = NULL;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900515 }
516
Chao Yudc6febb2017-07-22 08:52:23 +0800517 sb_end_intwrite(sbi->sb);
518
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800519 wait_event_interruptible(*q,
Gu Zheng721bd4d2014-09-05 18:31:00 +0800520 kthread_should_stop() || !llist_empty(&fcc->issue_list));
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900521 goto repeat;
522}
523
524int f2fs_issue_flush(struct f2fs_sb_info *sbi)
525{
Jaegeuk Kimb01a9202017-01-09 14:13:03 -0800526 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
Chao Yuadf8d902014-05-08 17:00:35 +0800527 struct flush_cmd cmd;
Chao Yu8b8dd652017-03-25 17:19:58 +0800528 int ret;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900529
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -0700530 if (test_opt(sbi, NOBARRIER))
531 return 0;
532
Chao Yu8b8dd652017-03-25 17:19:58 +0800533 if (!test_opt(sbi, FLUSH_MERGE)) {
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700534 ret = submit_flush_wait(sbi);
Chao Yu8b8dd652017-03-25 17:19:58 +0800535 atomic_inc(&fcc->issued_flush);
536 return ret;
537 }
538
Chao Yuedd748e2017-08-31 18:56:05 +0800539 if (atomic_inc_return(&fcc->issing_flush) == 1) {
Chao Yu8b8dd652017-03-25 17:19:58 +0800540 ret = submit_flush_wait(sbi);
541 atomic_dec(&fcc->issing_flush);
542
543 atomic_inc(&fcc->issued_flush);
Jaegeuk Kim740432f2015-08-14 11:43:56 -0700544 return ret;
545 }
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900546
Chao Yuadf8d902014-05-08 17:00:35 +0800547 init_completion(&cmd.wait);
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900548
Gu Zheng721bd4d2014-09-05 18:31:00 +0800549 llist_add(&cmd.llnode, &fcc->issue_list);
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900550
Chao Yu6f890df2017-08-21 22:53:45 +0800551 /* update issue_list before we wake up issue_flush thread */
552 smp_mb();
553
554 if (waitqueue_active(&fcc->flush_wait_queue))
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800555 wake_up(&fcc->flush_wait_queue);
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900556
Jaegeuk Kim5eba8c52016-12-07 16:23:32 -0800557 if (fcc->f2fs_issue_flush) {
558 wait_for_completion(&cmd.wait);
Chao Yu8b8dd652017-03-25 17:19:58 +0800559 atomic_dec(&fcc->issing_flush);
Jaegeuk Kim5eba8c52016-12-07 16:23:32 -0800560 } else {
Chao Yud3238692017-08-31 18:56:06 +0800561 struct llist_node *list;
562
563 list = llist_del_all(&fcc->issue_list);
564 if (!list) {
565 wait_for_completion(&cmd.wait);
566 atomic_dec(&fcc->issing_flush);
567 } else {
568 struct flush_cmd *tmp, *next;
569
570 ret = submit_flush_wait(sbi);
571
572 llist_for_each_entry_safe(tmp, next, list, llnode) {
573 if (tmp == &cmd) {
574 cmd.ret = ret;
575 atomic_dec(&fcc->issing_flush);
576 continue;
577 }
578 tmp->ret = ret;
579 complete(&tmp->wait);
580 }
581 }
Jaegeuk Kim5eba8c52016-12-07 16:23:32 -0800582 }
Chao Yuadf8d902014-05-08 17:00:35 +0800583
584 return cmd.ret;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900585}
586
Gu Zheng2163d192014-04-27 14:21:33 +0800587int create_flush_cmd_control(struct f2fs_sb_info *sbi)
588{
589 dev_t dev = sbi->sb->s_bdev->bd_dev;
590 struct flush_cmd_control *fcc;
591 int err = 0;
592
Jaegeuk Kimb01a9202017-01-09 14:13:03 -0800593 if (SM_I(sbi)->fcc_info) {
594 fcc = SM_I(sbi)->fcc_info;
Yunlong Songd871cd02017-06-24 15:57:19 +0800595 if (fcc->f2fs_issue_flush)
596 return err;
Jaegeuk Kim5eba8c52016-12-07 16:23:32 -0800597 goto init_thread;
598 }
599
Gu Zheng2163d192014-04-27 14:21:33 +0800600 fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
601 if (!fcc)
602 return -ENOMEM;
Chao Yu8b8dd652017-03-25 17:19:58 +0800603 atomic_set(&fcc->issued_flush, 0);
604 atomic_set(&fcc->issing_flush, 0);
Gu Zheng2163d192014-04-27 14:21:33 +0800605 init_waitqueue_head(&fcc->flush_wait_queue);
Gu Zheng721bd4d2014-09-05 18:31:00 +0800606 init_llist_head(&fcc->issue_list);
Jaegeuk Kimb01a9202017-01-09 14:13:03 -0800607 SM_I(sbi)->fcc_info = fcc;
Yunlei Hed4fdf8b2017-06-01 16:43:51 +0800608 if (!test_opt(sbi, FLUSH_MERGE))
609 return err;
610
Jaegeuk Kim5eba8c52016-12-07 16:23:32 -0800611init_thread:
Gu Zheng2163d192014-04-27 14:21:33 +0800612 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
613 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
614 if (IS_ERR(fcc->f2fs_issue_flush)) {
615 err = PTR_ERR(fcc->f2fs_issue_flush);
616 kfree(fcc);
Jaegeuk Kimb01a9202017-01-09 14:13:03 -0800617 SM_I(sbi)->fcc_info = NULL;
Gu Zheng2163d192014-04-27 14:21:33 +0800618 return err;
619 }
Gu Zheng2163d192014-04-27 14:21:33 +0800620
621 return err;
622}
623
Jaegeuk Kim5eba8c52016-12-07 16:23:32 -0800624void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
Gu Zheng2163d192014-04-27 14:21:33 +0800625{
Jaegeuk Kimb01a9202017-01-09 14:13:03 -0800626 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
Gu Zheng2163d192014-04-27 14:21:33 +0800627
Jaegeuk Kim5eba8c52016-12-07 16:23:32 -0800628 if (fcc && fcc->f2fs_issue_flush) {
629 struct task_struct *flush_thread = fcc->f2fs_issue_flush;
630
631 fcc->f2fs_issue_flush = NULL;
632 kthread_stop(flush_thread);
633 }
634 if (free) {
635 kfree(fcc);
Jaegeuk Kimb01a9202017-01-09 14:13:03 -0800636 SM_I(sbi)->fcc_info = NULL;
Jaegeuk Kim5eba8c52016-12-07 16:23:32 -0800637 }
Gu Zheng2163d192014-04-27 14:21:33 +0800638}
639
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900640static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
641 enum dirty_type dirty_type)
642{
643 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
644
645 /* need not be added */
646 if (IS_CURSEG(sbi, segno))
647 return;
648
649 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
650 dirty_i->nr_dirty[dirty_type]++;
651
652 if (dirty_type == DIRTY) {
653 struct seg_entry *sentry = get_seg_entry(sbi, segno);
Changman Lee4625d6a2013-10-25 17:31:57 +0900654 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900655
Jaegeuk Kimec325b52014-09-02 16:24:11 -0700656 if (unlikely(t >= DIRTY)) {
657 f2fs_bug_on(sbi, 1);
658 return;
659 }
Changman Lee4625d6a2013-10-25 17:31:57 +0900660 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
661 dirty_i->nr_dirty[t]++;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900662 }
663}
664
665static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
666 enum dirty_type dirty_type)
667{
668 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
669
670 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
671 dirty_i->nr_dirty[dirty_type]--;
672
673 if (dirty_type == DIRTY) {
Changman Lee4625d6a2013-10-25 17:31:57 +0900674 struct seg_entry *sentry = get_seg_entry(sbi, segno);
675 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900676
Changman Lee4625d6a2013-10-25 17:31:57 +0900677 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
678 dirty_i->nr_dirty[t]--;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900679
Jaegeuk Kim302bd342017-04-07 14:33:22 -0700680 if (get_valid_blocks(sbi, segno, true) == 0)
Jaegeuk Kim4ddb1a42017-04-07 15:08:17 -0700681 clear_bit(GET_SEC_FROM_SEG(sbi, segno),
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900682 dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900683 }
684}
685
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900686/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900687 * Should not occur error such as -ENOMEM.
688 * Adding dirty entry into seglist is not critical operation.
689 * If a given segment is one of current working segments, it won't be added.
690 */
Haicheng Li8d8451a2013-06-13 16:59:28 +0800691static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900692{
693 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
694 unsigned short valid_blocks;
695
696 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
697 return;
698
699 mutex_lock(&dirty_i->seglist_lock);
700
Jaegeuk Kim302bd342017-04-07 14:33:22 -0700701 valid_blocks = get_valid_blocks(sbi, segno, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900702
703 if (valid_blocks == 0) {
704 __locate_dirty_segment(sbi, segno, PRE);
705 __remove_dirty_segment(sbi, segno, DIRTY);
706 } else if (valid_blocks < sbi->blocks_per_seg) {
707 __locate_dirty_segment(sbi, segno, DIRTY);
708 } else {
709 /* Recovery routine with SSR needs this */
710 __remove_dirty_segment(sbi, segno, DIRTY);
711 }
712
713 mutex_unlock(&dirty_i->seglist_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900714}
715
Chao Yu004b6862017-04-14 23:24:55 +0800716static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
Jaegeuk Kimc81abe32017-03-07 18:02:02 -0800717 struct block_device *bdev, block_t lstart,
718 block_t start, block_t len)
Chao Yu275b66b2016-08-29 23:58:34 +0800719{
Jaegeuk Kim0b54fb82017-01-11 14:40:24 -0800720 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
Chao Yuba48a332017-04-15 14:09:37 +0800721 struct list_head *pend_list;
Jaegeuk Kimb01a9202017-01-09 14:13:03 -0800722 struct discard_cmd *dc;
Chao Yu275b66b2016-08-29 23:58:34 +0800723
Chao Yuba48a332017-04-15 14:09:37 +0800724 f2fs_bug_on(sbi, !len);
725
726 pend_list = &dcc->pend_list[plist_idx(len)];
727
Jaegeuk Kimb01a9202017-01-09 14:13:03 -0800728 dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
729 INIT_LIST_HEAD(&dc->list);
Jaegeuk Kimc81abe32017-03-07 18:02:02 -0800730 dc->bdev = bdev;
Jaegeuk Kimb01a9202017-01-09 14:13:03 -0800731 dc->lstart = lstart;
Jaegeuk Kimc81abe32017-03-07 18:02:02 -0800732 dc->start = start;
Jaegeuk Kimb01a9202017-01-09 14:13:03 -0800733 dc->len = len;
Chao Yuec9895a2017-04-26 17:39:54 +0800734 dc->ref = 0;
Jaegeuk Kim15469962017-01-09 20:32:07 -0800735 dc->state = D_PREP;
Jaegeuk Kimc81abe32017-03-07 18:02:02 -0800736 dc->error = 0;
Jaegeuk Kimb01a9202017-01-09 14:13:03 -0800737 init_completion(&dc->wait);
Chao Yu22d375d2017-04-05 18:19:48 +0800738 list_add_tail(&dc->list, pend_list);
Chao Yu5f323662017-03-25 17:19:59 +0800739 atomic_inc(&dcc->discard_cmd_cnt);
Chao Yud84d1cb2017-04-18 19:27:39 +0800740 dcc->undiscard_blks += len;
Chao Yu004b6862017-04-14 23:24:55 +0800741
742 return dc;
Jaegeuk Kim15469962017-01-09 20:32:07 -0800743}
744
Chao Yu004b6862017-04-14 23:24:55 +0800745static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
746 struct block_device *bdev, block_t lstart,
747 block_t start, block_t len,
748 struct rb_node *parent, struct rb_node **p)
749{
750 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
751 struct discard_cmd *dc;
752
753 dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
754
755 rb_link_node(&dc->rb_node, parent, p);
756 rb_insert_color(&dc->rb_node, &dcc->root);
757
758 return dc;
759}
760
761static void __detach_discard_cmd(struct discard_cmd_control *dcc,
762 struct discard_cmd *dc)
Jaegeuk Kim15469962017-01-09 20:32:07 -0800763{
Jaegeuk Kimdcc91652017-01-11 10:20:04 -0800764 if (dc->state == D_DONE)
Chao Yu004b6862017-04-14 23:24:55 +0800765 atomic_dec(&dcc->issing_discard);
766
767 list_del(&dc->list);
768 rb_erase(&dc->rb_node, &dcc->root);
Chao Yud84d1cb2017-04-18 19:27:39 +0800769 dcc->undiscard_blks -= dc->len;
Chao Yu004b6862017-04-14 23:24:55 +0800770
771 kmem_cache_free(discard_cmd_slab, dc);
772
773 atomic_dec(&dcc->discard_cmd_cnt);
774}
775
776static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
777 struct discard_cmd *dc)
778{
779 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
Jaegeuk Kimdcc91652017-01-11 10:20:04 -0800780
Chao Yud9703d92017-06-05 18:29:07 +0800781 f2fs_bug_on(sbi, dc->ref);
782
Jaegeuk Kimc81abe32017-03-07 18:02:02 -0800783 if (dc->error == -EOPNOTSUPP)
784 dc->error = 0;
Jaegeuk Kim15469962017-01-09 20:32:07 -0800785
Jaegeuk Kimc81abe32017-03-07 18:02:02 -0800786 if (dc->error)
Jaegeuk Kim15469962017-01-09 20:32:07 -0800787 f2fs_msg(sbi->sb, KERN_INFO,
Chao Yu04dfc232017-05-19 23:46:43 +0800788 "Issue discard(%u, %u, %u) failed, ret: %d",
789 dc->lstart, dc->start, dc->len, dc->error);
Chao Yu004b6862017-04-14 23:24:55 +0800790 __detach_discard_cmd(dcc, dc);
Chao Yu275b66b2016-08-29 23:58:34 +0800791}
792
Jaegeuk Kimc81abe32017-03-07 18:02:02 -0800793static void f2fs_submit_discard_endio(struct bio *bio)
794{
795 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
796
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200797 dc->error = blk_status_to_errno(bio->bi_status);
Jaegeuk Kimc81abe32017-03-07 18:02:02 -0800798 dc->state = D_DONE;
Chao Yue31b9822017-05-19 23:46:44 +0800799 complete_all(&dc->wait);
Jaegeuk Kimc81abe32017-03-07 18:02:02 -0800800 bio_put(bio);
801}
802
Chao Yu6915ea92017-06-30 17:19:02 +0800803void __check_sit_bitmap(struct f2fs_sb_info *sbi,
804 block_t start, block_t end)
805{
806#ifdef CONFIG_F2FS_CHECK_FS
807 struct seg_entry *sentry;
808 unsigned int segno;
809 block_t blk = start;
810 unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
811 unsigned long *map;
812
813 while (blk < end) {
814 segno = GET_SEGNO(sbi, blk);
815 sentry = get_seg_entry(sbi, segno);
816 offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
817
Yunlong Song008396e2017-08-04 17:07:15 +0800818 if (end < START_BLOCK(sbi, segno + 1))
819 size = GET_BLKOFF_FROM_SEG0(sbi, end);
820 else
821 size = max_blocks;
Chao Yu6915ea92017-06-30 17:19:02 +0800822 map = (unsigned long *)(sentry->cur_valid_map);
823 offset = __find_rev_next_bit(map, size, offset);
824 f2fs_bug_on(sbi, offset != size);
Yunlong Song008396e2017-08-04 17:07:15 +0800825 blk = START_BLOCK(sbi, segno + 1);
Chao Yu6915ea92017-06-30 17:19:02 +0800826 }
827#endif
828}
829
Jaegeuk Kimc81abe32017-03-07 18:02:02 -0800830/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
831static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
832 struct discard_cmd *dc)
833{
834 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
835 struct bio *bio = NULL;
836
837 if (dc->state != D_PREP)
838 return;
839
Chao Yu0243a5f2017-04-15 14:09:38 +0800840 trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
841
Jaegeuk Kimc81abe32017-03-07 18:02:02 -0800842 dc->error = __blkdev_issue_discard(dc->bdev,
843 SECTOR_FROM_BLOCK(dc->start),
844 SECTOR_FROM_BLOCK(dc->len),
845 GFP_NOFS, 0, &bio);
846 if (!dc->error) {
847 /* should keep before submission to avoid D_DONE right away */
848 dc->state = D_SUBMIT;
Chao Yu8b8dd652017-03-25 17:19:58 +0800849 atomic_inc(&dcc->issued_discard);
850 atomic_inc(&dcc->issing_discard);
Jaegeuk Kimc81abe32017-03-07 18:02:02 -0800851 if (bio) {
852 bio->bi_private = dc;
853 bio->bi_end_io = f2fs_submit_discard_endio;
854 bio->bi_opf |= REQ_SYNC;
855 submit_bio(bio);
Chao Yu46f84c22017-04-15 14:09:36 +0800856 list_move_tail(&dc->list, &dcc->wait_list);
Chao Yu6915ea92017-06-30 17:19:02 +0800857 __check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
Chao Yub0af6d42017-08-02 23:21:48 +0800858
859 f2fs_update_iostat(sbi, FS_DISCARD, 1);
Jaegeuk Kimc81abe32017-03-07 18:02:02 -0800860 }
861 } else {
862 __remove_discard_cmd(sbi, dc);
863 }
864}
865
Chao Yu004b6862017-04-14 23:24:55 +0800866static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
867 struct block_device *bdev, block_t lstart,
868 block_t start, block_t len,
869 struct rb_node **insert_p,
870 struct rb_node *insert_parent)
871{
872 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
873 struct rb_node **p = &dcc->root.rb_node;
874 struct rb_node *parent = NULL;
875 struct discard_cmd *dc = NULL;
876
877 if (insert_p && insert_parent) {
878 parent = insert_parent;
879 p = insert_p;
880 goto do_insert;
881 }
882
883 p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
884do_insert:
885 dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p);
886 if (!dc)
887 return NULL;
888
889 return dc;
890}
891
Chao Yuba48a332017-04-15 14:09:37 +0800892static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
893 struct discard_cmd *dc)
894{
895 list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
896}
897
Chao Yu004b6862017-04-14 23:24:55 +0800898static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
899 struct discard_cmd *dc, block_t blkaddr)
900{
Chao Yuba48a332017-04-15 14:09:37 +0800901 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
Chao Yu004b6862017-04-14 23:24:55 +0800902 struct discard_info di = dc->di;
903 bool modified = false;
904
905 if (dc->state == D_DONE || dc->len == 1) {
906 __remove_discard_cmd(sbi, dc);
907 return;
908 }
909
Chao Yud84d1cb2017-04-18 19:27:39 +0800910 dcc->undiscard_blks -= di.len;
911
Chao Yu004b6862017-04-14 23:24:55 +0800912 if (blkaddr > di.lstart) {
913 dc->len = blkaddr - dc->lstart;
Chao Yud84d1cb2017-04-18 19:27:39 +0800914 dcc->undiscard_blks += dc->len;
Chao Yuba48a332017-04-15 14:09:37 +0800915 __relocate_discard_cmd(dcc, dc);
Chao Yu004b6862017-04-14 23:24:55 +0800916 modified = true;
917 }
918
919 if (blkaddr < di.lstart + di.len - 1) {
920 if (modified) {
921 __insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
922 di.start + blkaddr + 1 - di.lstart,
923 di.lstart + di.len - 1 - blkaddr,
924 NULL, NULL);
925 } else {
926 dc->lstart++;
927 dc->len--;
928 dc->start++;
Chao Yud84d1cb2017-04-18 19:27:39 +0800929 dcc->undiscard_blks += dc->len;
Chao Yuba48a332017-04-15 14:09:37 +0800930 __relocate_discard_cmd(dcc, dc);
Chao Yu004b6862017-04-14 23:24:55 +0800931 }
932 }
933}
934
935static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
936 struct block_device *bdev, block_t lstart,
937 block_t start, block_t len)
938{
939 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
940 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
941 struct discard_cmd *dc;
942 struct discard_info di = {0};
943 struct rb_node **insert_p = NULL, *insert_parent = NULL;
944 block_t end = lstart + len;
945
946 mutex_lock(&dcc->cmd_lock);
947
948 dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
949 NULL, lstart,
950 (struct rb_entry **)&prev_dc,
951 (struct rb_entry **)&next_dc,
952 &insert_p, &insert_parent, true);
953 if (dc)
954 prev_dc = dc;
955
956 if (!prev_dc) {
957 di.lstart = lstart;
958 di.len = next_dc ? next_dc->lstart - lstart : len;
959 di.len = min(di.len, len);
960 di.start = start;
961 }
962
963 while (1) {
964 struct rb_node *node;
965 bool merged = false;
966 struct discard_cmd *tdc = NULL;
967
968 if (prev_dc) {
969 di.lstart = prev_dc->lstart + prev_dc->len;
970 if (di.lstart < lstart)
971 di.lstart = lstart;
972 if (di.lstart >= end)
973 break;
974
975 if (!next_dc || next_dc->lstart > end)
976 di.len = end - di.lstart;
977 else
978 di.len = next_dc->lstart - di.lstart;
979 di.start = start + di.lstart - lstart;
980 }
981
982 if (!di.len)
983 goto next;
984
985 if (prev_dc && prev_dc->state == D_PREP &&
986 prev_dc->bdev == bdev &&
987 __is_discard_back_mergeable(&di, &prev_dc->di)) {
988 prev_dc->di.len += di.len;
Chao Yud84d1cb2017-04-18 19:27:39 +0800989 dcc->undiscard_blks += di.len;
Chao Yuba48a332017-04-15 14:09:37 +0800990 __relocate_discard_cmd(dcc, prev_dc);
Chao Yu004b6862017-04-14 23:24:55 +0800991 di = prev_dc->di;
992 tdc = prev_dc;
993 merged = true;
994 }
995
996 if (next_dc && next_dc->state == D_PREP &&
997 next_dc->bdev == bdev &&
998 __is_discard_front_mergeable(&di, &next_dc->di)) {
999 next_dc->di.lstart = di.lstart;
1000 next_dc->di.len += di.len;
1001 next_dc->di.start = di.start;
Chao Yud84d1cb2017-04-18 19:27:39 +08001002 dcc->undiscard_blks += di.len;
Chao Yuba48a332017-04-15 14:09:37 +08001003 __relocate_discard_cmd(dcc, next_dc);
Chao Yu004b6862017-04-14 23:24:55 +08001004 if (tdc)
1005 __remove_discard_cmd(sbi, tdc);
Chao Yu004b6862017-04-14 23:24:55 +08001006 merged = true;
1007 }
1008
Chao Yudf0f6b42017-04-17 18:21:43 +08001009 if (!merged) {
Chao Yu004b6862017-04-14 23:24:55 +08001010 __insert_discard_tree(sbi, bdev, di.lstart, di.start,
1011 di.len, NULL, NULL);
Chao Yudf0f6b42017-04-17 18:21:43 +08001012 }
Chao Yu004b6862017-04-14 23:24:55 +08001013 next:
1014 prev_dc = next_dc;
1015 if (!prev_dc)
1016 break;
1017
1018 node = rb_next(&prev_dc->rb_node);
1019 next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1020 }
1021
1022 mutex_unlock(&dcc->cmd_lock);
1023}
1024
Jaegeuk Kimc81abe32017-03-07 18:02:02 -08001025static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
1026 struct block_device *bdev, block_t blkstart, block_t blklen)
1027{
1028 block_t lblkstart = blkstart;
1029
Chao Yu0243a5f2017-04-15 14:09:38 +08001030 trace_f2fs_queue_discard(bdev, blkstart, blklen);
Jaegeuk Kimc81abe32017-03-07 18:02:02 -08001031
1032 if (sbi->s_ndevs) {
1033 int devi = f2fs_target_device_index(sbi, blkstart);
1034
1035 blkstart -= FDEV(devi).start_blk;
1036 }
Chao Yu004b6862017-04-14 23:24:55 +08001037 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
Jaegeuk Kimc81abe32017-03-07 18:02:02 -08001038 return 0;
1039}
1040
Chao Yu969d1b12017-08-07 23:09:56 +08001041static int __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond)
Chao Yubd5b0732017-04-25 20:21:37 +08001042{
1043 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1044 struct list_head *pend_list;
1045 struct discard_cmd *dc, *tmp;
1046 struct blk_plug plug;
Chao Yu969d1b12017-08-07 23:09:56 +08001047 int iter = 0, issued = 0;
1048 int i;
Chao Yubd5b0732017-04-25 20:21:37 +08001049
1050 mutex_lock(&dcc->cmd_lock);
Yunlei He963932a2017-05-19 14:42:12 +08001051 f2fs_bug_on(sbi,
1052 !__check_rb_tree_consistence(sbi, &dcc->root));
Chao Yubd5b0732017-04-25 20:21:37 +08001053 blk_start_plug(&plug);
Chao Yu969d1b12017-08-07 23:09:56 +08001054 for (i = MAX_PLIST_NUM - 1;
1055 i >= 0 && plist_issue(dcc->pend_list_tag[i]); i--) {
Chao Yubd5b0732017-04-25 20:21:37 +08001056 pend_list = &dcc->pend_list[i];
1057 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1058 f2fs_bug_on(sbi, dc->state != D_PREP);
1059
Chao Yu969d1b12017-08-07 23:09:56 +08001060 /* Hurry up to finish fstrim */
1061 if (dcc->pend_list_tag[i] & P_TRIM) {
Chao Yubd5b0732017-04-25 20:21:37 +08001062 __submit_discard_cmd(sbi, dc);
Chao Yu969d1b12017-08-07 23:09:56 +08001063 issued++;
1064 continue;
1065 }
1066
1067 if (!issue_cond || is_idle(sbi)) {
1068 issued++;
1069 __submit_discard_cmd(sbi, dc);
1070 }
Chao Yubd5b0732017-04-25 20:21:37 +08001071 if (issue_cond && iter++ > DISCARD_ISSUE_RATE)
1072 goto out;
1073 }
Chao Yu969d1b12017-08-07 23:09:56 +08001074 if (list_empty(pend_list) && dcc->pend_list_tag[i] & P_TRIM)
1075 dcc->pend_list_tag[i] &= (~P_TRIM);
Chao Yubd5b0732017-04-25 20:21:37 +08001076 }
1077out:
1078 blk_finish_plug(&plug);
1079 mutex_unlock(&dcc->cmd_lock);
Chao Yu969d1b12017-08-07 23:09:56 +08001080
1081 return issued;
1082}
1083
1084static void __drop_discard_cmd(struct f2fs_sb_info *sbi)
1085{
1086 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1087 struct list_head *pend_list;
1088 struct discard_cmd *dc, *tmp;
1089 int i;
1090
1091 mutex_lock(&dcc->cmd_lock);
1092 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1093 pend_list = &dcc->pend_list[i];
1094 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1095 f2fs_bug_on(sbi, dc->state != D_PREP);
1096 __remove_discard_cmd(sbi, dc);
1097 }
1098 }
1099 mutex_unlock(&dcc->cmd_lock);
Chao Yubd5b0732017-04-25 20:21:37 +08001100}
1101
Chao Yu2a510c002017-06-05 18:29:06 +08001102static void __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1103 struct discard_cmd *dc)
1104{
1105 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1106
1107 wait_for_completion_io(&dc->wait);
1108 mutex_lock(&dcc->cmd_lock);
1109 f2fs_bug_on(sbi, dc->state != D_DONE);
1110 dc->ref--;
1111 if (!dc->ref)
1112 __remove_discard_cmd(sbi, dc);
1113 mutex_unlock(&dcc->cmd_lock);
1114}
1115
Chao Yu63a94fa2017-04-25 20:21:38 +08001116static void __wait_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond)
1117{
1118 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1119 struct list_head *wait_list = &(dcc->wait_list);
1120 struct discard_cmd *dc, *tmp;
Chao Yu6afae632017-05-19 23:46:45 +08001121 bool need_wait;
1122
1123next:
1124 need_wait = false;
Chao Yu63a94fa2017-04-25 20:21:38 +08001125
1126 mutex_lock(&dcc->cmd_lock);
1127 list_for_each_entry_safe(dc, tmp, wait_list, list) {
Chao Yu6afae632017-05-19 23:46:45 +08001128 if (!wait_cond || (dc->state == D_DONE && !dc->ref)) {
Chao Yu63a94fa2017-04-25 20:21:38 +08001129 wait_for_completion_io(&dc->wait);
1130 __remove_discard_cmd(sbi, dc);
Chao Yu6afae632017-05-19 23:46:45 +08001131 } else {
1132 dc->ref++;
1133 need_wait = true;
1134 break;
Chao Yu63a94fa2017-04-25 20:21:38 +08001135 }
1136 }
1137 mutex_unlock(&dcc->cmd_lock);
Chao Yu6afae632017-05-19 23:46:45 +08001138
1139 if (need_wait) {
Chao Yu2a510c002017-06-05 18:29:06 +08001140 __wait_one_discard_bio(sbi, dc);
Chao Yu6afae632017-05-19 23:46:45 +08001141 goto next;
1142 }
Chao Yu63a94fa2017-04-25 20:21:38 +08001143}
1144
Jaegeuk Kim4e6a8d92016-12-29 14:07:53 -08001145/* This should be covered by global mutex, &sit_i->sentry_lock */
1146void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
Chao Yu275b66b2016-08-29 23:58:34 +08001147{
Jaegeuk Kim0b54fb82017-01-11 14:40:24 -08001148 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
Chao Yu004b6862017-04-14 23:24:55 +08001149 struct discard_cmd *dc;
Chao Yuec9895a2017-04-26 17:39:54 +08001150 bool need_wait = false;
Chao Yu275b66b2016-08-29 23:58:34 +08001151
Jaegeuk Kim15469962017-01-09 20:32:07 -08001152 mutex_lock(&dcc->cmd_lock);
Chao Yu004b6862017-04-14 23:24:55 +08001153 dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr);
1154 if (dc) {
Chao Yuec9895a2017-04-26 17:39:54 +08001155 if (dc->state == D_PREP) {
1156 __punch_discard_cmd(sbi, dc, blkaddr);
1157 } else {
1158 dc->ref++;
1159 need_wait = true;
1160 }
Chao Yu275b66b2016-08-29 23:58:34 +08001161 }
Chao Yud4314132017-04-05 18:19:49 +08001162 mutex_unlock(&dcc->cmd_lock);
Chao Yuec9895a2017-04-26 17:39:54 +08001163
Chao Yu2a510c002017-06-05 18:29:06 +08001164 if (need_wait)
1165 __wait_one_discard_bio(sbi, dc);
Chao Yud4314132017-04-05 18:19:49 +08001166}
Chao Yu22d375d2017-04-05 18:19:48 +08001167
Chao Yucce13252017-06-29 23:17:45 +08001168void stop_discard_thread(struct f2fs_sb_info *sbi)
1169{
1170 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1171
1172 if (dcc && dcc->f2fs_issue_discard) {
1173 struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1174
1175 dcc->f2fs_issue_discard = NULL;
1176 kthread_stop(discard_thread);
Jaegeuk Kim15469962017-01-09 20:32:07 -08001177 }
1178}
1179
1180/* This comes from f2fs_put_super */
Chao Yubd5b0732017-04-25 20:21:37 +08001181void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
Chao Yu63a94fa2017-04-25 20:21:38 +08001182{
Jaegeuk Kim15469962017-01-09 20:32:07 -08001183 __issue_discard_cmd(sbi, false);
Chao Yu969d1b12017-08-07 23:09:56 +08001184 __drop_discard_cmd(sbi);
Jaegeuk Kim15469962017-01-09 20:32:07 -08001185 __wait_discard_cmd(sbi, false);
1186}
1187
Chao Yu969d1b12017-08-07 23:09:56 +08001188static void mark_discard_range_all(struct f2fs_sb_info *sbi)
1189{
1190 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1191 int i;
1192
1193 mutex_lock(&dcc->cmd_lock);
1194 for (i = 0; i < MAX_PLIST_NUM; i++)
1195 dcc->pend_list_tag[i] |= P_TRIM;
1196 mutex_unlock(&dcc->cmd_lock);
1197}
1198
Jaegeuk Kim15469962017-01-09 20:32:07 -08001199static int issue_discard_thread(void *data)
1200{
1201 struct f2fs_sb_info *sbi = data;
1202 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1203 wait_queue_head_t *q = &dcc->discard_wait_queue;
Chao Yu969d1b12017-08-07 23:09:56 +08001204 unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
1205 int issued;
Jaegeuk Kim15469962017-01-09 20:32:07 -08001206
Jaegeuk Kim1d7be272017-05-17 10:36:58 -07001207 set_freezable();
Jaegeuk Kim15469962017-01-09 20:32:07 -08001208
Jaegeuk Kim1d7be272017-05-17 10:36:58 -07001209 do {
Chao Yu969d1b12017-08-07 23:09:56 +08001210 wait_event_interruptible_timeout(*q,
1211 kthread_should_stop() || freezing(current) ||
1212 dcc->discard_wake,
1213 msecs_to_jiffies(wait_ms));
Jaegeuk Kim1d7be272017-05-17 10:36:58 -07001214 if (try_to_freeze())
1215 continue;
1216 if (kthread_should_stop())
1217 return 0;
Jaegeuk Kim15469962017-01-09 20:32:07 -08001218
Jaegeuk Kim5f656542017-08-15 21:27:19 -07001219 if (dcc->discard_wake) {
Chao Yu969d1b12017-08-07 23:09:56 +08001220 dcc->discard_wake = 0;
Jaegeuk Kim5f656542017-08-15 21:27:19 -07001221 if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
1222 mark_discard_range_all(sbi);
1223 }
Chao Yu969d1b12017-08-07 23:09:56 +08001224
Chao Yudc6febb2017-07-22 08:52:23 +08001225 sb_start_intwrite(sbi->sb);
1226
Chao Yu969d1b12017-08-07 23:09:56 +08001227 issued = __issue_discard_cmd(sbi, true);
1228 if (issued) {
1229 __wait_discard_cmd(sbi, true);
1230 wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
1231 } else {
1232 wait_ms = DEF_MAX_DISCARD_ISSUE_TIME;
1233 }
Jaegeuk Kim1d7be272017-05-17 10:36:58 -07001234
Chao Yudc6febb2017-07-22 08:52:23 +08001235 sb_end_intwrite(sbi->sb);
1236
Jaegeuk Kim1d7be272017-05-17 10:36:58 -07001237 } while (!kthread_should_stop());
1238 return 0;
Jaegeuk Kim15469962017-01-09 20:32:07 -08001239}
1240
Damien Le Moalf46e88092016-10-28 17:45:06 +09001241#ifdef CONFIG_BLK_DEV_ZONED
Jaegeuk Kim3c62be12016-10-06 19:02:05 -07001242static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1243 struct block_device *bdev, block_t blkstart, block_t blklen)
Damien Le Moalf46e88092016-10-28 17:45:06 +09001244{
Jaegeuk Kim92592282017-02-22 20:18:35 -08001245 sector_t sector, nr_sects;
Kinglong Mee10a875f2017-03-08 09:49:53 +08001246 block_t lblkstart = blkstart;
Jaegeuk Kim3c62be12016-10-06 19:02:05 -07001247 int devi = 0;
Damien Le Moalf46e88092016-10-28 17:45:06 +09001248
Jaegeuk Kim3c62be12016-10-06 19:02:05 -07001249 if (sbi->s_ndevs) {
1250 devi = f2fs_target_device_index(sbi, blkstart);
1251 blkstart -= FDEV(devi).start_blk;
1252 }
Damien Le Moalf46e88092016-10-28 17:45:06 +09001253
1254 /*
1255 * We need to know the type of the zone: for conventional zones,
1256 * use regular discard if the drive supports it. For sequential
1257 * zones, reset the zone write pointer.
1258 */
Jaegeuk Kim3c62be12016-10-06 19:02:05 -07001259 switch (get_blkz_type(sbi, bdev, blkstart)) {
Damien Le Moalf46e88092016-10-28 17:45:06 +09001260
1261 case BLK_ZONE_TYPE_CONVENTIONAL:
1262 if (!blk_queue_discard(bdev_get_queue(bdev)))
1263 return 0;
Jaegeuk Kimc81abe32017-03-07 18:02:02 -08001264 return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
Damien Le Moalf46e88092016-10-28 17:45:06 +09001265 case BLK_ZONE_TYPE_SEQWRITE_REQ:
1266 case BLK_ZONE_TYPE_SEQWRITE_PREF:
Jaegeuk Kim92592282017-02-22 20:18:35 -08001267 sector = SECTOR_FROM_BLOCK(blkstart);
1268 nr_sects = SECTOR_FROM_BLOCK(blklen);
1269
1270 if (sector & (bdev_zone_sectors(bdev) - 1) ||
1271 nr_sects != bdev_zone_sectors(bdev)) {
1272 f2fs_msg(sbi->sb, KERN_INFO,
1273 "(%d) %s: Unaligned discard attempted (block %x + %x)",
1274 devi, sbi->s_ndevs ? FDEV(devi).path: "",
1275 blkstart, blklen);
1276 return -EIO;
1277 }
Jaegeuk Kimd50aaee2017-02-15 11:14:06 -08001278 trace_f2fs_issue_reset_zone(bdev, blkstart);
Damien Le Moalf46e88092016-10-28 17:45:06 +09001279 return blkdev_reset_zones(bdev, sector,
1280 nr_sects, GFP_NOFS);
1281 default:
1282 /* Unknown zone type: broken device ? */
1283 return -EIO;
1284 }
1285}
1286#endif
1287
Jaegeuk Kim3c62be12016-10-06 19:02:05 -07001288static int __issue_discard_async(struct f2fs_sb_info *sbi,
1289 struct block_device *bdev, block_t blkstart, block_t blklen)
1290{
1291#ifdef CONFIG_BLK_DEV_ZONED
1292 if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
1293 bdev_zoned_model(bdev) != BLK_ZONED_NONE)
1294 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1295#endif
Jaegeuk Kimc81abe32017-03-07 18:02:02 -08001296 return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
Jaegeuk Kim3c62be12016-10-06 19:02:05 -07001297}
1298
Jaegeuk Kim1e87a782014-04-15 13:57:55 +09001299static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
Jaegeuk Kim37208872013-11-12 16:55:17 +09001300 block_t blkstart, block_t blklen)
1301{
Jaegeuk Kim3c62be12016-10-06 19:02:05 -07001302 sector_t start = blkstart, len = 0;
1303 struct block_device *bdev;
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001304 struct seg_entry *se;
1305 unsigned int offset;
1306 block_t i;
Jaegeuk Kim3c62be12016-10-06 19:02:05 -07001307 int err = 0;
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001308
Jaegeuk Kim3c62be12016-10-06 19:02:05 -07001309 bdev = f2fs_target_device(sbi, blkstart, NULL);
1310
1311 for (i = blkstart; i < blkstart + blklen; i++, len++) {
1312 if (i != start) {
1313 struct block_device *bdev2 =
1314 f2fs_target_device(sbi, i, NULL);
1315
1316 if (bdev2 != bdev) {
1317 err = __issue_discard_async(sbi, bdev,
1318 start, len);
1319 if (err)
1320 return err;
1321 bdev = bdev2;
1322 start = i;
1323 len = 0;
1324 }
1325 }
1326
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001327 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
1328 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
1329
1330 if (!f2fs_test_and_set_bit(offset, se->discard_map))
1331 sbi->discard_blks--;
1332 }
Damien Le Moalf46e88092016-10-28 17:45:06 +09001333
Jaegeuk Kim3c62be12016-10-06 19:02:05 -07001334 if (len)
1335 err = __issue_discard_async(sbi, bdev, start, len);
1336 return err;
Jaegeuk Kim1e87a782014-04-15 13:57:55 +09001337}
1338
Jaegeuk Kim25290fa2016-12-29 22:06:15 -08001339static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
1340 bool check_only)
Jaegeuk Kimadf49832014-10-28 22:27:59 -07001341{
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001342 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1343 int max_blocks = sbi->blocks_per_seg;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001344 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001345 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1346 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001347 unsigned long *discard_map = (unsigned long *)se->discard_map;
Jaegeuk Kim60a3b782015-02-10 16:44:29 -08001348 unsigned long *dmap = SIT_I(sbi)->tmp_map;
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001349 unsigned int start = 0, end = -1;
Chao Yuc473f1a2017-04-27 20:40:39 +08001350 bool force = (cpc->reason & CP_DISCARD);
Chao Yua7eeb8232017-03-28 18:18:50 +08001351 struct discard_entry *de = NULL;
Chao Yu46f84c22017-04-15 14:09:36 +08001352 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001353 int i;
1354
Jaegeuk Kim3e025742016-08-02 10:56:40 -07001355 if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
Jaegeuk Kim25290fa2016-12-29 22:06:15 -08001356 return false;
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001357
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001358 if (!force) {
1359 if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
Jaegeuk Kim0b54fb82017-01-11 14:40:24 -08001360 SM_I(sbi)->dcc_info->nr_discards >=
1361 SM_I(sbi)->dcc_info->max_discards)
Jaegeuk Kim25290fa2016-12-29 22:06:15 -08001362 return false;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001363 }
1364
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001365 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1366 for (i = 0; i < entries; i++)
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001367 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
Jaegeuk Kimd7bc2482014-12-12 13:53:41 -08001368 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001369
Jaegeuk Kim0b54fb82017-01-11 14:40:24 -08001370 while (force || SM_I(sbi)->dcc_info->nr_discards <=
1371 SM_I(sbi)->dcc_info->max_discards) {
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001372 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1373 if (start >= max_blocks)
1374 break;
1375
1376 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
Yunlei Hec7b41e12016-07-07 12:13:33 +08001377 if (force && start && end != max_blocks
1378 && (end - start) < cpc->trim_minlen)
1379 continue;
1380
Jaegeuk Kim25290fa2016-12-29 22:06:15 -08001381 if (check_only)
1382 return true;
1383
Chao Yua7eeb8232017-03-28 18:18:50 +08001384 if (!de) {
1385 de = f2fs_kmem_cache_alloc(discard_entry_slab,
1386 GFP_F2FS_ZERO);
1387 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1388 list_add_tail(&de->list, head);
1389 }
1390
1391 for (i = start; i < end; i++)
1392 __set_bit_le(i, (void *)de->discard_map);
1393
1394 SM_I(sbi)->dcc_info->nr_discards += end - start;
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001395 }
Jaegeuk Kim25290fa2016-12-29 22:06:15 -08001396 return false;
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001397}
1398
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001399void release_discard_addrs(struct f2fs_sb_info *sbi)
1400{
Chao Yu46f84c22017-04-15 14:09:36 +08001401 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001402 struct discard_entry *entry, *this;
1403
1404 /* drop caches */
1405 list_for_each_entry_safe(entry, this, head, list) {
1406 list_del(&entry->list);
1407 kmem_cache_free(discard_entry_slab, entry);
1408 }
1409}
1410
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001411/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001412 * Should call clear_prefree_segments after checkpoint is done.
1413 */
1414static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
1415{
1416 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Chao Yub65ee142014-08-04 10:10:07 +08001417 unsigned int segno;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001418
1419 mutex_lock(&dirty_i->seglist_lock);
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001420 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001421 __set_test_and_free(sbi, segno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001422 mutex_unlock(&dirty_i->seglist_lock);
1423}
1424
Jaegeuk Kim836b5a62015-04-30 22:50:06 -07001425void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001426{
Chao Yu969d1b12017-08-07 23:09:56 +08001427 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1428 struct list_head *head = &dcc->entry_list;
Chao Yu2d7b8222014-03-29 11:33:17 +08001429 struct discard_entry *entry, *this;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001430 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Changman Lee29e59c12013-11-11 09:24:37 +09001431 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
Changman Lee29e59c12013-11-11 09:24:37 +09001432 unsigned int start = 0, end = -1;
Jaegeuk Kim36abef42016-06-03 19:29:38 -07001433 unsigned int secno, start_segno;
Chao Yuc473f1a2017-04-27 20:40:39 +08001434 bool force = (cpc->reason & CP_DISCARD);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001435
1436 mutex_lock(&dirty_i->seglist_lock);
Changman Lee29e59c12013-11-11 09:24:37 +09001437
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001438 while (1) {
Changman Lee29e59c12013-11-11 09:24:37 +09001439 int i;
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001440 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
1441 if (start >= MAIN_SEGS(sbi))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001442 break;
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001443 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
1444 start + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001445
Changman Lee29e59c12013-11-11 09:24:37 +09001446 for (i = start; i < end; i++)
1447 clear_bit(i, prefree_map);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001448
Changman Lee29e59c12013-11-11 09:24:37 +09001449 dirty_i->nr_dirty[PRE] -= end - start;
1450
Yunlei He650d3c42016-12-22 11:46:24 +08001451 if (!test_opt(sbi, DISCARD))
Changman Lee29e59c12013-11-11 09:24:37 +09001452 continue;
1453
Yunlei He650d3c42016-12-22 11:46:24 +08001454 if (force && start >= cpc->trim_start &&
1455 (end - 1) <= cpc->trim_end)
1456 continue;
1457
Jaegeuk Kim36abef42016-06-03 19:29:38 -07001458 if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
1459 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
Jaegeuk Kim37208872013-11-12 16:55:17 +09001460 (end - start) << sbi->log_blocks_per_seg);
Jaegeuk Kim36abef42016-06-03 19:29:38 -07001461 continue;
1462 }
1463next:
Jaegeuk Kim4ddb1a42017-04-07 15:08:17 -07001464 secno = GET_SEC_FROM_SEG(sbi, start);
1465 start_segno = GET_SEG_FROM_SEC(sbi, secno);
Jaegeuk Kim36abef42016-06-03 19:29:38 -07001466 if (!IS_CURSEC(sbi, secno) &&
Jaegeuk Kim302bd342017-04-07 14:33:22 -07001467 !get_valid_blocks(sbi, start, true))
Jaegeuk Kim36abef42016-06-03 19:29:38 -07001468 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
1469 sbi->segs_per_sec << sbi->log_blocks_per_seg);
1470
1471 start = start_segno + sbi->segs_per_sec;
1472 if (start < end)
1473 goto next;
Jaegeuk Kim8b107f52017-02-27 11:57:11 -08001474 else
1475 end = start - 1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001476 }
1477 mutex_unlock(&dirty_i->seglist_lock);
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001478
1479 /* send small discards */
Chao Yu2d7b8222014-03-29 11:33:17 +08001480 list_for_each_entry_safe(entry, this, head, list) {
Chao Yua7eeb8232017-03-28 18:18:50 +08001481 unsigned int cur_pos = 0, next_pos, len, total_len = 0;
1482 bool is_valid = test_bit_le(0, entry->discard_map);
1483
1484find_next:
1485 if (is_valid) {
1486 next_pos = find_next_zero_bit_le(entry->discard_map,
1487 sbi->blocks_per_seg, cur_pos);
1488 len = next_pos - cur_pos;
1489
Damien Le Moalacfd28102017-05-26 17:04:40 +09001490 if (f2fs_sb_mounted_blkzoned(sbi->sb) ||
1491 (force && len < cpc->trim_minlen))
Chao Yua7eeb8232017-03-28 18:18:50 +08001492 goto skip;
1493
1494 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
1495 len);
1496 cpc->trimmed += len;
1497 total_len += len;
1498 } else {
1499 next_pos = find_next_bit_le(entry->discard_map,
1500 sbi->blocks_per_seg, cur_pos);
1501 }
Jaegeuk Kim836b5a62015-04-30 22:50:06 -07001502skip:
Chao Yua7eeb8232017-03-28 18:18:50 +08001503 cur_pos = next_pos;
1504 is_valid = !is_valid;
1505
1506 if (cur_pos < sbi->blocks_per_seg)
1507 goto find_next;
1508
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001509 list_del(&entry->list);
Chao Yu969d1b12017-08-07 23:09:56 +08001510 dcc->nr_discards -= total_len;
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001511 kmem_cache_free(discard_entry_slab, entry);
1512 }
Chao Yu34e159d2017-04-25 00:21:34 +08001513
Jaegeuk Kim01983c72017-08-22 21:15:43 -07001514 wake_up_discard_thread(sbi, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001515}
1516
Jaegeuk Kim8ed59742017-01-29 14:27:02 +09001517static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
Jaegeuk Kim0b54fb82017-01-11 14:40:24 -08001518{
Jaegeuk Kim15469962017-01-09 20:32:07 -08001519 dev_t dev = sbi->sb->s_bdev->bd_dev;
Jaegeuk Kim0b54fb82017-01-11 14:40:24 -08001520 struct discard_cmd_control *dcc;
Chao Yuba48a332017-04-15 14:09:37 +08001521 int err = 0, i;
Jaegeuk Kim0b54fb82017-01-11 14:40:24 -08001522
1523 if (SM_I(sbi)->dcc_info) {
1524 dcc = SM_I(sbi)->dcc_info;
1525 goto init_thread;
1526 }
1527
1528 dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
1529 if (!dcc)
1530 return -ENOMEM;
1531
Chao Yu969d1b12017-08-07 23:09:56 +08001532 dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
Chao Yu46f84c22017-04-15 14:09:36 +08001533 INIT_LIST_HEAD(&dcc->entry_list);
Chao Yu969d1b12017-08-07 23:09:56 +08001534 for (i = 0; i < MAX_PLIST_NUM; i++) {
Chao Yuba48a332017-04-15 14:09:37 +08001535 INIT_LIST_HEAD(&dcc->pend_list[i]);
Chao Yu969d1b12017-08-07 23:09:56 +08001536 if (i >= dcc->discard_granularity - 1)
1537 dcc->pend_list_tag[i] |= P_ACTIVE;
1538 }
Chao Yu46f84c22017-04-15 14:09:36 +08001539 INIT_LIST_HEAD(&dcc->wait_list);
Jaegeuk Kim15469962017-01-09 20:32:07 -08001540 mutex_init(&dcc->cmd_lock);
Chao Yu8b8dd652017-03-25 17:19:58 +08001541 atomic_set(&dcc->issued_discard, 0);
1542 atomic_set(&dcc->issing_discard, 0);
Chao Yu5f323662017-03-25 17:19:59 +08001543 atomic_set(&dcc->discard_cmd_cnt, 0);
Jaegeuk Kim0b54fb82017-01-11 14:40:24 -08001544 dcc->nr_discards = 0;
Chao Yud618eba2017-04-25 00:21:35 +08001545 dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
Chao Yud84d1cb2017-04-18 19:27:39 +08001546 dcc->undiscard_blks = 0;
Chao Yu004b6862017-04-14 23:24:55 +08001547 dcc->root = RB_ROOT;
Jaegeuk Kim0b54fb82017-01-11 14:40:24 -08001548
Jaegeuk Kim15469962017-01-09 20:32:07 -08001549 init_waitqueue_head(&dcc->discard_wait_queue);
Jaegeuk Kim0b54fb82017-01-11 14:40:24 -08001550 SM_I(sbi)->dcc_info = dcc;
1551init_thread:
Jaegeuk Kim15469962017-01-09 20:32:07 -08001552 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
1553 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
1554 if (IS_ERR(dcc->f2fs_issue_discard)) {
1555 err = PTR_ERR(dcc->f2fs_issue_discard);
1556 kfree(dcc);
1557 SM_I(sbi)->dcc_info = NULL;
1558 return err;
1559 }
1560
Jaegeuk Kim0b54fb82017-01-11 14:40:24 -08001561 return err;
1562}
1563
Chao Yuf0994052017-03-27 18:14:04 +08001564static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
Jaegeuk Kim0b54fb82017-01-11 14:40:24 -08001565{
1566 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1567
Chao Yuf0994052017-03-27 18:14:04 +08001568 if (!dcc)
1569 return;
1570
Chao Yucce13252017-06-29 23:17:45 +08001571 stop_discard_thread(sbi);
Chao Yuf0994052017-03-27 18:14:04 +08001572
1573 kfree(dcc);
1574 SM_I(sbi)->dcc_info = NULL;
Jaegeuk Kim0b54fb82017-01-11 14:40:24 -08001575}
1576
Chao Yu184a5cd2014-09-04 18:13:01 +08001577static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001578{
1579 struct sit_info *sit_i = SIT_I(sbi);
Chao Yu184a5cd2014-09-04 18:13:01 +08001580
1581 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001582 sit_i->dirty_sentries++;
Chao Yu184a5cd2014-09-04 18:13:01 +08001583 return false;
1584 }
1585
1586 return true;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001587}
1588
1589static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
1590 unsigned int segno, int modified)
1591{
1592 struct seg_entry *se = get_seg_entry(sbi, segno);
1593 se->type = type;
1594 if (modified)
1595 __mark_sit_entry_dirty(sbi, segno);
1596}
1597
1598static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
1599{
1600 struct seg_entry *se;
1601 unsigned int segno, offset;
1602 long int new_vblocks;
Yunlong Song6415fed2017-08-02 21:20:13 +08001603 bool exist;
1604#ifdef CONFIG_F2FS_CHECK_FS
1605 bool mir_exist;
1606#endif
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001607
1608 segno = GET_SEGNO(sbi, blkaddr);
1609
1610 se = get_seg_entry(sbi, segno);
1611 new_vblocks = se->valid_blocks + del;
Jaegeuk Kim491c0852014-02-04 13:01:10 +09001612 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001613
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07001614 f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001615 (new_vblocks > sbi->blocks_per_seg)));
1616
1617 se->valid_blocks = new_vblocks;
1618 se->mtime = get_mtime(sbi);
1619 SIT_I(sbi)->max_mtime = se->mtime;
1620
1621 /* Update valid block bitmap */
1622 if (del > 0) {
Yunlong Song6415fed2017-08-02 21:20:13 +08001623 exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
Chao Yu355e7892017-01-07 18:51:01 +08001624#ifdef CONFIG_F2FS_CHECK_FS
Yunlong Song6415fed2017-08-02 21:20:13 +08001625 mir_exist = f2fs_test_and_set_bit(offset,
1626 se->cur_valid_map_mir);
1627 if (unlikely(exist != mir_exist)) {
1628 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
1629 "when setting bitmap, blk:%u, old bit:%d",
1630 blkaddr, exist);
Jaegeuk Kim05796762014-09-02 16:05:00 -07001631 f2fs_bug_on(sbi, 1);
Chao Yu355e7892017-01-07 18:51:01 +08001632 }
Yunlong Song6415fed2017-08-02 21:20:13 +08001633#endif
1634 if (unlikely(exist)) {
1635 f2fs_msg(sbi->sb, KERN_ERR,
1636 "Bitmap was wrongly set, blk:%u", blkaddr);
1637 f2fs_bug_on(sbi, 1);
Yunlong Song35ee82c2017-08-02 22:16:54 +08001638 se->valid_blocks--;
1639 del = 0;
Yunlong Song6415fed2017-08-02 21:20:13 +08001640 }
1641
Jaegeuk Kim3e025742016-08-02 10:56:40 -07001642 if (f2fs_discard_en(sbi) &&
1643 !f2fs_test_and_set_bit(offset, se->discard_map))
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001644 sbi->discard_blks--;
Jaegeuk Kim720037f2017-03-06 11:59:56 -08001645
1646 /* don't overwrite by SSR to keep node chain */
1647 if (se->type == CURSEG_WARM_NODE) {
1648 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
1649 se->ckpt_valid_blocks++;
1650 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001651 } else {
Yunlong Song6415fed2017-08-02 21:20:13 +08001652 exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
Chao Yu355e7892017-01-07 18:51:01 +08001653#ifdef CONFIG_F2FS_CHECK_FS
Yunlong Song6415fed2017-08-02 21:20:13 +08001654 mir_exist = f2fs_test_and_clear_bit(offset,
1655 se->cur_valid_map_mir);
1656 if (unlikely(exist != mir_exist)) {
1657 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
1658 "when clearing bitmap, blk:%u, old bit:%d",
1659 blkaddr, exist);
Jaegeuk Kim05796762014-09-02 16:05:00 -07001660 f2fs_bug_on(sbi, 1);
Chao Yu355e7892017-01-07 18:51:01 +08001661 }
Yunlong Song6415fed2017-08-02 21:20:13 +08001662#endif
1663 if (unlikely(!exist)) {
1664 f2fs_msg(sbi->sb, KERN_ERR,
1665 "Bitmap was wrongly cleared, blk:%u", blkaddr);
1666 f2fs_bug_on(sbi, 1);
Yunlong Song35ee82c2017-08-02 22:16:54 +08001667 se->valid_blocks++;
1668 del = 0;
Yunlong Song6415fed2017-08-02 21:20:13 +08001669 }
1670
Jaegeuk Kim3e025742016-08-02 10:56:40 -07001671 if (f2fs_discard_en(sbi) &&
1672 f2fs_test_and_clear_bit(offset, se->discard_map))
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001673 sbi->discard_blks++;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001674 }
1675 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
1676 se->ckpt_valid_blocks += del;
1677
1678 __mark_sit_entry_dirty(sbi, segno);
1679
1680 /* update total number of valid blocks to be written in ckpt area */
1681 SIT_I(sbi)->written_valid_blocks += del;
1682
1683 if (sbi->segs_per_sec > 1)
1684 get_sec_entry(sbi, segno)->valid_blocks += del;
1685}
1686
Jaegeuk Kim5e443812014-01-28 12:22:14 +09001687void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001688{
Jaegeuk Kim5e443812014-01-28 12:22:14 +09001689 update_sit_entry(sbi, new, 1);
1690 if (GET_SEGNO(sbi, old) != NULL_SEGNO)
1691 update_sit_entry(sbi, old, -1);
1692
1693 locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
1694 locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001695}
1696
1697void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
1698{
1699 unsigned int segno = GET_SEGNO(sbi, addr);
1700 struct sit_info *sit_i = SIT_I(sbi);
1701
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07001702 f2fs_bug_on(sbi, addr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001703 if (addr == NEW_ADDR)
1704 return;
1705
1706 /* add it into sit main buffer */
1707 mutex_lock(&sit_i->sentry_lock);
1708
1709 update_sit_entry(sbi, addr, -1);
1710
1711 /* add it into dirty seglist */
1712 locate_dirty_segment(sbi, segno);
1713
1714 mutex_unlock(&sit_i->sentry_lock);
1715}
1716
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001717bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
1718{
1719 struct sit_info *sit_i = SIT_I(sbi);
1720 unsigned int segno, offset;
1721 struct seg_entry *se;
1722 bool is_cp = false;
1723
1724 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
1725 return true;
1726
1727 mutex_lock(&sit_i->sentry_lock);
1728
1729 segno = GET_SEGNO(sbi, blkaddr);
1730 se = get_seg_entry(sbi, segno);
1731 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1732
1733 if (f2fs_test_bit(offset, se->ckpt_valid_map))
1734 is_cp = true;
1735
1736 mutex_unlock(&sit_i->sentry_lock);
1737
1738 return is_cp;
1739}
1740
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001741/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001742 * This function should be resided under the curseg_mutex lock
1743 */
1744static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
Haicheng Lie79efe32013-06-13 16:59:27 +08001745 struct f2fs_summary *sum)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001746{
1747 struct curseg_info *curseg = CURSEG_I(sbi, type);
1748 void *addr = curseg->sum_blk;
Haicheng Lie79efe32013-06-13 16:59:27 +08001749 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001750 memcpy(addr, sum, sizeof(struct f2fs_summary));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001751}
1752
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001753/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001754 * Calculate the number of current summary pages for writing
1755 */
Chao Yu3fa06d72014-12-09 14:21:46 +08001756int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001757{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001758 int valid_sum_count = 0;
Fan Li9a479382013-10-29 16:21:47 +08001759 int i, sum_in_page;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001760
1761 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1762 if (sbi->ckpt->alloc_type[i] == SSR)
1763 valid_sum_count += sbi->blocks_per_seg;
Chao Yu3fa06d72014-12-09 14:21:46 +08001764 else {
1765 if (for_ra)
1766 valid_sum_count += le16_to_cpu(
1767 F2FS_CKPT(sbi)->cur_data_blkoff[i]);
1768 else
1769 valid_sum_count += curseg_blkoff(sbi, i);
1770 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001771 }
1772
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001773 sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
Fan Li9a479382013-10-29 16:21:47 +08001774 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
1775 if (valid_sum_count <= sum_in_page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001776 return 1;
Fan Li9a479382013-10-29 16:21:47 +08001777 else if ((valid_sum_count - sum_in_page) <=
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001778 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001779 return 2;
1780 return 3;
1781}
1782
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001783/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001784 * Caller should put this summary page
1785 */
1786struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
1787{
1788 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
1789}
1790
Chao Yu381722d2015-05-19 17:40:04 +08001791void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
1792{
1793 struct page *page = grab_meta_page(sbi, blk_addr);
1794 void *dst = page_address(page);
1795
1796 if (src)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001797 memcpy(dst, src, PAGE_SIZE);
Chao Yu381722d2015-05-19 17:40:04 +08001798 else
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001799 memset(dst, 0, PAGE_SIZE);
Chao Yu381722d2015-05-19 17:40:04 +08001800 set_page_dirty(page);
1801 f2fs_put_page(page, 1);
1802}
1803
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001804static void write_sum_page(struct f2fs_sb_info *sbi,
1805 struct f2fs_summary_block *sum_blk, block_t blk_addr)
1806{
Chao Yu381722d2015-05-19 17:40:04 +08001807 update_meta_page(sbi, (void *)sum_blk, blk_addr);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001808}
1809
Chao Yub7ad7512016-02-19 18:08:46 +08001810static void write_current_sum_page(struct f2fs_sb_info *sbi,
1811 int type, block_t blk_addr)
1812{
1813 struct curseg_info *curseg = CURSEG_I(sbi, type);
1814 struct page *page = grab_meta_page(sbi, blk_addr);
1815 struct f2fs_summary_block *src = curseg->sum_blk;
1816 struct f2fs_summary_block *dst;
1817
1818 dst = (struct f2fs_summary_block *)page_address(page);
1819
1820 mutex_lock(&curseg->curseg_mutex);
1821
1822 down_read(&curseg->journal_rwsem);
1823 memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
1824 up_read(&curseg->journal_rwsem);
1825
1826 memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
1827 memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
1828
1829 mutex_unlock(&curseg->curseg_mutex);
1830
1831 set_page_dirty(page);
1832 f2fs_put_page(page, 1);
1833}
1834
Jaegeuk Kima7881892017-04-20 13:51:57 -07001835static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
1836{
1837 struct curseg_info *curseg = CURSEG_I(sbi, type);
1838 unsigned int segno = curseg->segno + 1;
1839 struct free_segmap_info *free_i = FREE_I(sbi);
1840
1841 if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
1842 return !test_bit(segno, free_i->free_segmap);
1843 return 0;
1844}
1845
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001846/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001847 * Find a new segment from the free segments bitmap to right order
1848 * This function should be returned with success, otherwise BUG
1849 */
1850static void get_new_segment(struct f2fs_sb_info *sbi,
1851 unsigned int *newseg, bool new_sec, int dir)
1852{
1853 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001854 unsigned int segno, secno, zoneno;
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001855 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
Jaegeuk Kim4ddb1a42017-04-07 15:08:17 -07001856 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
1857 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001858 unsigned int left_start = hint;
1859 bool init = true;
1860 int go_left = 0;
1861 int i;
1862
Chao Yu1a118cc2015-02-11 18:20:38 +08001863 spin_lock(&free_i->segmap_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001864
1865 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
1866 segno = find_next_zero_bit(free_i->free_segmap,
Jaegeuk Kim4ddb1a42017-04-07 15:08:17 -07001867 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
1868 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001869 goto got_it;
1870 }
1871find_other_zone:
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001872 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
1873 if (secno >= MAIN_SECS(sbi)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001874 if (dir == ALLOC_RIGHT) {
1875 secno = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001876 MAIN_SECS(sbi), 0);
1877 f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001878 } else {
1879 go_left = 1;
1880 left_start = hint - 1;
1881 }
1882 }
1883 if (go_left == 0)
1884 goto skip_left;
1885
1886 while (test_bit(left_start, free_i->free_secmap)) {
1887 if (left_start > 0) {
1888 left_start--;
1889 continue;
1890 }
1891 left_start = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001892 MAIN_SECS(sbi), 0);
1893 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001894 break;
1895 }
1896 secno = left_start;
1897skip_left:
1898 hint = secno;
Jaegeuk Kim4ddb1a42017-04-07 15:08:17 -07001899 segno = GET_SEG_FROM_SEC(sbi, secno);
1900 zoneno = GET_ZONE_FROM_SEC(sbi, secno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001901
1902 /* give up on finding another zone */
1903 if (!init)
1904 goto got_it;
1905 if (sbi->secs_per_zone == 1)
1906 goto got_it;
1907 if (zoneno == old_zoneno)
1908 goto got_it;
1909 if (dir == ALLOC_LEFT) {
1910 if (!go_left && zoneno + 1 >= total_zones)
1911 goto got_it;
1912 if (go_left && zoneno == 0)
1913 goto got_it;
1914 }
1915 for (i = 0; i < NR_CURSEG_TYPE; i++)
1916 if (CURSEG_I(sbi, i)->zone == zoneno)
1917 break;
1918
1919 if (i < NR_CURSEG_TYPE) {
1920 /* zone is in user, try another */
1921 if (go_left)
1922 hint = zoneno * sbi->secs_per_zone - 1;
1923 else if (zoneno + 1 >= total_zones)
1924 hint = 0;
1925 else
1926 hint = (zoneno + 1) * sbi->secs_per_zone;
1927 init = false;
1928 goto find_other_zone;
1929 }
1930got_it:
1931 /* set it as dirty segment in free segmap */
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07001932 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001933 __set_inuse(sbi, segno);
1934 *newseg = segno;
Chao Yu1a118cc2015-02-11 18:20:38 +08001935 spin_unlock(&free_i->segmap_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001936}
1937
1938static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
1939{
1940 struct curseg_info *curseg = CURSEG_I(sbi, type);
1941 struct summary_footer *sum_footer;
1942
1943 curseg->segno = curseg->next_segno;
Jaegeuk Kim4ddb1a42017-04-07 15:08:17 -07001944 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001945 curseg->next_blkoff = 0;
1946 curseg->next_segno = NULL_SEGNO;
1947
1948 sum_footer = &(curseg->sum_blk->footer);
1949 memset(sum_footer, 0, sizeof(struct summary_footer));
1950 if (IS_DATASEG(type))
1951 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
1952 if (IS_NODESEG(type))
1953 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
1954 __set_sit_entry_type(sbi, type, curseg->segno, modified);
1955}
1956
Jaegeuk Kim7a20b8a2017-03-24 20:41:45 -04001957static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
1958{
Jaegeuk Kima7881892017-04-20 13:51:57 -07001959 /* if segs_per_sec is large than 1, we need to keep original policy. */
1960 if (sbi->segs_per_sec != 1)
1961 return CURSEG_I(sbi, type)->segno;
1962
Jaegeuk Kim7a20b8a2017-03-24 20:41:45 -04001963 if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
1964 return 0;
1965
Jaegeuk Kime066b832017-04-13 15:17:00 -07001966 if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
1967 return SIT_I(sbi)->last_victim[ALLOC_NEXT];
Jaegeuk Kim7a20b8a2017-03-24 20:41:45 -04001968 return CURSEG_I(sbi, type)->segno;
1969}
1970
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001971/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001972 * Allocate a current working segment.
1973 * This function always allocates a free segment in LFS manner.
1974 */
1975static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
1976{
1977 struct curseg_info *curseg = CURSEG_I(sbi, type);
1978 unsigned int segno = curseg->segno;
1979 int dir = ALLOC_LEFT;
1980
1981 write_sum_page(sbi, curseg->sum_blk,
Haicheng Li81fb5e82013-05-14 18:20:28 +08001982 GET_SUM_BLOCK(sbi, segno));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001983 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
1984 dir = ALLOC_RIGHT;
1985
1986 if (test_opt(sbi, NOHEAP))
1987 dir = ALLOC_RIGHT;
1988
Jaegeuk Kim7a20b8a2017-03-24 20:41:45 -04001989 segno = __get_next_segno(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001990 get_new_segment(sbi, &segno, new_sec, dir);
1991 curseg->next_segno = segno;
1992 reset_curseg(sbi, type, 1);
1993 curseg->alloc_type = LFS;
1994}
1995
1996static void __next_free_blkoff(struct f2fs_sb_info *sbi,
1997 struct curseg_info *seg, block_t start)
1998{
1999 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
Changman Leee81c93c2013-11-15 13:21:16 +09002000 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
Jaegeuk Kim60a3b782015-02-10 16:44:29 -08002001 unsigned long *target_map = SIT_I(sbi)->tmp_map;
Changman Leee81c93c2013-11-15 13:21:16 +09002002 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2003 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2004 int i, pos;
2005
2006 for (i = 0; i < entries; i++)
2007 target_map[i] = ckpt_map[i] | cur_map[i];
2008
2009 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2010
2011 seg->next_blkoff = pos;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002012}
2013
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002014/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002015 * If a segment is written by LFS manner, next block offset is just obtained
2016 * by increasing the current block offset. However, if a segment is written by
2017 * SSR manner, next block offset obtained by calling __next_free_blkoff
2018 */
2019static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2020 struct curseg_info *seg)
2021{
2022 if (seg->alloc_type == SSR)
2023 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
2024 else
2025 seg->next_blkoff++;
2026}
2027
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002028/*
arter97e1c42042014-08-06 23:22:50 +09002029 * This function always allocates a used segment(from dirty seglist) by SSR
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002030 * manner, so it should recover the existing segment information of valid blocks
2031 */
Chao Yu025d63a2017-08-30 18:04:48 +08002032static void change_curseg(struct f2fs_sb_info *sbi, int type)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002033{
2034 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2035 struct curseg_info *curseg = CURSEG_I(sbi, type);
2036 unsigned int new_segno = curseg->next_segno;
2037 struct f2fs_summary_block *sum_node;
2038 struct page *sum_page;
2039
2040 write_sum_page(sbi, curseg->sum_blk,
2041 GET_SUM_BLOCK(sbi, curseg->segno));
2042 __set_test_and_inuse(sbi, new_segno);
2043
2044 mutex_lock(&dirty_i->seglist_lock);
2045 __remove_dirty_segment(sbi, new_segno, PRE);
2046 __remove_dirty_segment(sbi, new_segno, DIRTY);
2047 mutex_unlock(&dirty_i->seglist_lock);
2048
2049 reset_curseg(sbi, type, 1);
2050 curseg->alloc_type = SSR;
2051 __next_free_blkoff(sbi, curseg, 0);
2052
Chao Yu025d63a2017-08-30 18:04:48 +08002053 sum_page = get_sum_page(sbi, new_segno);
2054 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2055 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2056 f2fs_put_page(sum_page, 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002057}
2058
Jaegeuk Kim43727522013-02-04 15:11:17 +09002059static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
2060{
2061 struct curseg_info *curseg = CURSEG_I(sbi, type);
2062 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
Jaegeuk Kime066b832017-04-13 15:17:00 -07002063 unsigned segno = NULL_SEGNO;
Chao Yud27c3d82017-02-24 18:46:00 +08002064 int i, cnt;
2065 bool reversed = false;
Jaegeuk Kimc192f7a2017-02-22 17:10:18 -08002066
2067 /* need_SSR() already forces to do this */
Jaegeuk Kime066b832017-04-13 15:17:00 -07002068 if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
2069 curseg->next_segno = segno;
Jaegeuk Kimc192f7a2017-02-22 17:10:18 -08002070 return 1;
Jaegeuk Kime066b832017-04-13 15:17:00 -07002071 }
Jaegeuk Kim43727522013-02-04 15:11:17 +09002072
Jaegeuk Kim70d625c2017-02-22 17:02:32 -08002073 /* For node segments, let's do SSR more intensively */
2074 if (IS_NODESEG(type)) {
Chao Yud27c3d82017-02-24 18:46:00 +08002075 if (type >= CURSEG_WARM_NODE) {
2076 reversed = true;
2077 i = CURSEG_COLD_NODE;
2078 } else {
2079 i = CURSEG_HOT_NODE;
2080 }
2081 cnt = NR_CURSEG_NODE_TYPE;
Jaegeuk Kim70d625c2017-02-22 17:02:32 -08002082 } else {
Chao Yud27c3d82017-02-24 18:46:00 +08002083 if (type >= CURSEG_WARM_DATA) {
2084 reversed = true;
2085 i = CURSEG_COLD_DATA;
2086 } else {
2087 i = CURSEG_HOT_DATA;
2088 }
2089 cnt = NR_CURSEG_DATA_TYPE;
Jaegeuk Kim70d625c2017-02-22 17:02:32 -08002090 }
Jaegeuk Kim43727522013-02-04 15:11:17 +09002091
Chao Yud27c3d82017-02-24 18:46:00 +08002092 for (; cnt-- > 0; reversed ? i-- : i++) {
Jaegeuk Kimc192f7a2017-02-22 17:10:18 -08002093 if (i == type)
2094 continue;
Jaegeuk Kime066b832017-04-13 15:17:00 -07002095 if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) {
2096 curseg->next_segno = segno;
Jaegeuk Kim43727522013-02-04 15:11:17 +09002097 return 1;
Jaegeuk Kime066b832017-04-13 15:17:00 -07002098 }
Jaegeuk Kimc192f7a2017-02-22 17:10:18 -08002099 }
Jaegeuk Kim43727522013-02-04 15:11:17 +09002100 return 0;
2101}
2102
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002103/*
2104 * flush out current segment and replace it with new segment
2105 * This function should be returned with success, otherwise BUG
2106 */
2107static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
2108 int type, bool force)
2109{
Jaegeuk Kima7881892017-04-20 13:51:57 -07002110 struct curseg_info *curseg = CURSEG_I(sbi, type);
2111
Gu Zheng7b405272013-08-19 09:41:15 +08002112 if (force)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002113 new_curseg(sbi, type, true);
Jaegeuk Kim5b6c6be2017-02-14 19:32:51 -08002114 else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
2115 type == CURSEG_WARM_NODE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002116 new_curseg(sbi, type, false);
Jaegeuk Kima7881892017-04-20 13:51:57 -07002117 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
2118 new_curseg(sbi, type, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002119 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
Chao Yu025d63a2017-08-30 18:04:48 +08002120 change_curseg(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002121 else
2122 new_curseg(sbi, type, false);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +09002123
Jaegeuk Kima7881892017-04-20 13:51:57 -07002124 stat_inc_seg_type(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002125}
2126
2127void allocate_new_segments(struct f2fs_sb_info *sbi)
2128{
Jaegeuk Kim6ae1be12016-11-11 12:31:40 -08002129 struct curseg_info *curseg;
2130 unsigned int old_segno;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002131 int i;
2132
Jaegeuk Kim6ae1be12016-11-11 12:31:40 -08002133 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2134 curseg = CURSEG_I(sbi, i);
2135 old_segno = curseg->segno;
2136 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
2137 locate_dirty_segment(sbi, old_segno);
2138 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002139}
2140
2141static const struct segment_allocation default_salloc_ops = {
2142 .allocate_segment = allocate_segment_by_default,
2143};
2144
Jaegeuk Kim25290fa2016-12-29 22:06:15 -08002145bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2146{
2147 __u64 trim_start = cpc->trim_start;
2148 bool has_candidate = false;
2149
2150 mutex_lock(&SIT_I(sbi)->sentry_lock);
2151 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
2152 if (add_discard_addrs(sbi, cpc, true)) {
2153 has_candidate = true;
2154 break;
2155 }
2156 }
2157 mutex_unlock(&SIT_I(sbi)->sentry_lock);
2158
2159 cpc->trim_start = trim_start;
2160 return has_candidate;
2161}
2162
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002163int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
2164{
Jaegeuk Kimf7ef9b82015-02-09 12:02:44 -08002165 __u64 start = F2FS_BYTES_TO_BLK(range->start);
2166 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002167 unsigned int start_segno, end_segno;
2168 struct cp_control cpc;
Chao Yuc34f42e2015-12-23 17:50:30 +08002169 int err = 0;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002170
Jaegeuk Kim836b5a62015-04-30 22:50:06 -07002171 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002172 return -EINVAL;
2173
Jan Kara9bd27ae2014-10-21 14:07:33 +02002174 cpc.trimmed = 0;
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07002175 if (end <= MAIN_BLKADDR(sbi))
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002176 goto out;
2177
Yunlei Heed214a12016-09-01 10:14:39 +08002178 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2179 f2fs_msg(sbi->sb, KERN_WARNING,
2180 "Found FS corruption, run fsck to fix.");
2181 goto out;
2182 }
2183
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002184 /* start/end segment number in main_area */
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07002185 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
2186 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
2187 GET_SEGNO(sbi, end);
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002188 cpc.reason = CP_DISCARD;
Jaegeuk Kim836b5a62015-04-30 22:50:06 -07002189 cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002190
2191 /* do checkpoint to issue discard commands safely */
Jaegeuk Kimbba681c2015-01-26 17:41:23 -08002192 for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
2193 cpc.trim_start = start_segno;
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07002194
2195 if (sbi->discard_blks == 0)
2196 break;
2197 else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
2198 cpc.trim_end = end_segno;
2199 else
2200 cpc.trim_end = min_t(unsigned int,
2201 rounddown(start_segno +
Jaegeuk Kimbba681c2015-01-26 17:41:23 -08002202 BATCHED_TRIM_SEGMENTS(sbi),
2203 sbi->segs_per_sec) - 1, end_segno);
2204
2205 mutex_lock(&sbi->gc_mutex);
Chao Yuc34f42e2015-12-23 17:50:30 +08002206 err = write_checkpoint(sbi, &cpc);
Jaegeuk Kimbba681c2015-01-26 17:41:23 -08002207 mutex_unlock(&sbi->gc_mutex);
Chao Yue9328352016-08-21 23:21:29 +08002208 if (err)
2209 break;
Chao Yu74fa5f32016-08-21 23:21:30 +08002210
2211 schedule();
Jaegeuk Kimbba681c2015-01-26 17:41:23 -08002212 }
Chao Yu969d1b12017-08-07 23:09:56 +08002213 /* It's time to issue all the filed discards */
2214 mark_discard_range_all(sbi);
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002215out:
Jaegeuk Kimf7ef9b82015-02-09 12:02:44 -08002216 range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
Chao Yuc34f42e2015-12-23 17:50:30 +08002217 return err;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002218}
2219
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002220static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
2221{
2222 struct curseg_info *curseg = CURSEG_I(sbi, type);
2223 if (curseg->next_blkoff < sbi->blocks_per_seg)
2224 return true;
2225 return false;
2226}
2227
Jaegeuk Kim81377bd2017-05-10 14:19:54 -07002228static int __get_segment_type_2(struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002229{
Jaegeuk Kim81377bd2017-05-10 14:19:54 -07002230 if (fio->type == DATA)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002231 return CURSEG_HOT_DATA;
2232 else
2233 return CURSEG_HOT_NODE;
2234}
2235
Jaegeuk Kim81377bd2017-05-10 14:19:54 -07002236static int __get_segment_type_4(struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002237{
Jaegeuk Kim81377bd2017-05-10 14:19:54 -07002238 if (fio->type == DATA) {
2239 struct inode *inode = fio->page->mapping->host;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002240
2241 if (S_ISDIR(inode->i_mode))
2242 return CURSEG_HOT_DATA;
2243 else
2244 return CURSEG_COLD_DATA;
2245 } else {
Jaegeuk Kim81377bd2017-05-10 14:19:54 -07002246 if (IS_DNODE(fio->page) && is_cold_node(fio->page))
Jaegeuk Kima344b9f2014-11-05 20:05:53 -08002247 return CURSEG_WARM_NODE;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002248 else
2249 return CURSEG_COLD_NODE;
2250 }
2251}
2252
Jaegeuk Kim81377bd2017-05-10 14:19:54 -07002253static int __get_segment_type_6(struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002254{
Jaegeuk Kim81377bd2017-05-10 14:19:54 -07002255 if (fio->type == DATA) {
2256 struct inode *inode = fio->page->mapping->host;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002257
Jaegeuk Kim81377bd2017-05-10 14:19:54 -07002258 if (is_cold_data(fio->page) || file_is_cold(inode))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002259 return CURSEG_COLD_DATA;
Jaegeuk Kimef095d12017-03-24 20:05:13 -04002260 if (is_inode_flag_set(inode, FI_HOT_DATA))
2261 return CURSEG_HOT_DATA;
2262 return CURSEG_WARM_DATA;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002263 } else {
Jaegeuk Kim81377bd2017-05-10 14:19:54 -07002264 if (IS_DNODE(fio->page))
2265 return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002266 CURSEG_HOT_NODE;
Jaegeuk Kimef095d12017-03-24 20:05:13 -04002267 return CURSEG_COLD_NODE;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002268 }
2269}
2270
Jaegeuk Kim81377bd2017-05-10 14:19:54 -07002271static int __get_segment_type(struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002272{
Jaegeuk Kima912b542017-05-10 11:18:25 -07002273 int type = 0;
2274
Jaegeuk Kim81377bd2017-05-10 14:19:54 -07002275 switch (fio->sbi->active_logs) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002276 case 2:
Jaegeuk Kima912b542017-05-10 11:18:25 -07002277 type = __get_segment_type_2(fio);
2278 break;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002279 case 4:
Jaegeuk Kima912b542017-05-10 11:18:25 -07002280 type = __get_segment_type_4(fio);
2281 break;
2282 case 6:
2283 type = __get_segment_type_6(fio);
2284 break;
2285 default:
2286 f2fs_bug_on(fio->sbi, true);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002287 }
Jaegeuk Kim81377bd2017-05-10 14:19:54 -07002288
Jaegeuk Kima912b542017-05-10 11:18:25 -07002289 if (IS_HOT(type))
2290 fio->temp = HOT;
2291 else if (IS_WARM(type))
2292 fio->temp = WARM;
2293 else
2294 fio->temp = COLD;
2295 return type;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002296}
2297
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09002298void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
2299 block_t old_blkaddr, block_t *new_blkaddr,
Chao Yufb830fc2017-05-19 23:37:01 +08002300 struct f2fs_summary *sum, int type,
2301 struct f2fs_io_info *fio, bool add_list)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002302{
2303 struct sit_info *sit_i = SIT_I(sbi);
Jaegeuk Kim6ae1be12016-11-11 12:31:40 -08002304 struct curseg_info *curseg = CURSEG_I(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002305
2306 mutex_lock(&curseg->curseg_mutex);
Jaegeuk Kim21cb1d92015-03-11 13:42:48 -04002307 mutex_lock(&sit_i->sentry_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002308
2309 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002310
Jaegeuk Kim4e6a8d92016-12-29 14:07:53 -08002311 f2fs_wait_discard_bio(sbi, *new_blkaddr);
2312
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002313 /*
2314 * __add_sum_entry should be resided under the curseg_mutex
2315 * because, this function updates a summary entry in the
2316 * current summary block.
2317 */
Haicheng Lie79efe32013-06-13 16:59:27 +08002318 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002319
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002320 __refresh_next_blkoff(sbi, curseg);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +09002321
2322 stat_inc_block_count(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002323
Yunlong Song3436c4b2017-02-21 16:59:26 +08002324 if (!__has_curseg_space(sbi, type))
2325 sit_i->s_ops->allocate_segment(sbi, type, false);
Jaegeuk Kimc6f82fe92017-04-04 16:45:30 -07002326 /*
2327 * SIT information should be updated after segment allocation,
2328 * since we need to keep dirty segments precisely under SSR.
2329 */
2330 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
Yunlong Song3436c4b2017-02-21 16:59:26 +08002331
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002332 mutex_unlock(&sit_i->sentry_lock);
2333
Chao Yu704956e2017-07-31 20:19:09 +08002334 if (page && IS_NODESEG(type)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002335 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
2336
Chao Yu704956e2017-07-31 20:19:09 +08002337 f2fs_inode_chksum_set(sbi, page);
2338 }
2339
Chao Yufb830fc2017-05-19 23:37:01 +08002340 if (add_list) {
2341 struct f2fs_bio_info *io;
2342
2343 INIT_LIST_HEAD(&fio->list);
2344 fio->in_list = true;
2345 io = sbi->write_io[fio->type] + fio->temp;
2346 spin_lock(&io->io_lock);
2347 list_add_tail(&fio->list, &io->io_list);
2348 spin_unlock(&io->io_lock);
2349 }
2350
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09002351 mutex_unlock(&curseg->curseg_mutex);
2352}
2353
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002354static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09002355{
Jaegeuk Kim81377bd2017-05-10 14:19:54 -07002356 int type = __get_segment_type(fio);
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -08002357 int err;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09002358
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -08002359reallocate:
Chao Yu7a9d7542016-02-22 18:36:38 +08002360 allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
Chao Yufb830fc2017-05-19 23:37:01 +08002361 &fio->new_blkaddr, sum, type, fio, true);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09002362
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002363 /* writeout dirty page into bdev */
Jaegeuk Kimb9109b02017-05-10 11:28:38 -07002364 err = f2fs_submit_page_write(fio);
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -08002365 if (err == -EAGAIN) {
2366 fio->old_blkaddr = fio->new_blkaddr;
2367 goto reallocate;
2368 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002369}
2370
Chao Yub0af6d42017-08-02 23:21:48 +08002371void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
2372 enum iostat_type io_type)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002373{
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002374 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002375 .sbi = sbi,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002376 .type = META,
Mike Christie04d328d2016-06-05 14:31:55 -05002377 .op = REQ_OP_WRITE,
Christoph Hellwig70fd7612016-11-01 07:40:10 -06002378 .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
Chao Yu7a9d7542016-02-22 18:36:38 +08002379 .old_blkaddr = page->index,
2380 .new_blkaddr = page->index,
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002381 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -07002382 .encrypted_page = NULL,
Chao Yufb830fc2017-05-19 23:37:01 +08002383 .in_list = false,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002384 };
2385
Chao Yu2b947002015-10-12 17:04:21 +08002386 if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
Mike Christie04d328d2016-06-05 14:31:55 -05002387 fio.op_flags &= ~REQ_META;
Chao Yu2b947002015-10-12 17:04:21 +08002388
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002389 set_page_writeback(page);
Jaegeuk Kimb9109b02017-05-10 11:28:38 -07002390 f2fs_submit_page_write(&fio);
Chao Yub0af6d42017-08-02 23:21:48 +08002391
2392 f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002393}
2394
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002395void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002396{
2397 struct f2fs_summary sum;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002398
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002399 set_summary(&sum, nid, 0, 0);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002400 do_write_page(&sum, fio);
Chao Yub0af6d42017-08-02 23:21:48 +08002401
2402 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002403}
2404
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002405void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002406{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002407 struct f2fs_sb_info *sbi = fio->sbi;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002408 struct f2fs_summary sum;
2409 struct node_info ni;
2410
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07002411 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002412 get_node_info(sbi, dn->nid, &ni);
2413 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002414 do_write_page(&sum, fio);
Chao Yuf28b3432016-02-24 17:16:47 +08002415 f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
Chao Yub0af6d42017-08-02 23:21:48 +08002416
2417 f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002418}
2419
Jaegeuk Kimd1b3e722017-03-30 21:02:46 -07002420int rewrite_data_page(struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002421{
Chao Yub0af6d42017-08-02 23:21:48 +08002422 int err;
2423
Chao Yu7a9d7542016-02-22 18:36:38 +08002424 fio->new_blkaddr = fio->old_blkaddr;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002425 stat_inc_inplace_blocks(fio->sbi);
Chao Yub0af6d42017-08-02 23:21:48 +08002426
2427 err = f2fs_submit_page_bio(fio);
2428
2429 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
2430
2431 return err;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002432}
2433
Chao Yu4356e482016-02-23 17:52:43 +08002434void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Chao Yu19f106b2015-05-06 13:08:06 +08002435 block_t old_blkaddr, block_t new_blkaddr,
Chao Yu28bc1062016-02-06 14:40:34 +08002436 bool recover_curseg, bool recover_newaddr)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002437{
2438 struct sit_info *sit_i = SIT_I(sbi);
2439 struct curseg_info *curseg;
2440 unsigned int segno, old_cursegno;
2441 struct seg_entry *se;
2442 int type;
Chao Yu19f106b2015-05-06 13:08:06 +08002443 unsigned short old_blkoff;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002444
2445 segno = GET_SEGNO(sbi, new_blkaddr);
2446 se = get_seg_entry(sbi, segno);
2447 type = se->type;
2448
Chao Yu19f106b2015-05-06 13:08:06 +08002449 if (!recover_curseg) {
2450 /* for recovery flow */
2451 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
2452 if (old_blkaddr == NULL_ADDR)
2453 type = CURSEG_COLD_DATA;
2454 else
2455 type = CURSEG_WARM_DATA;
2456 }
2457 } else {
2458 if (!IS_CURSEG(sbi, segno))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002459 type = CURSEG_WARM_DATA;
2460 }
Chao Yu19f106b2015-05-06 13:08:06 +08002461
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002462 curseg = CURSEG_I(sbi, type);
2463
2464 mutex_lock(&curseg->curseg_mutex);
2465 mutex_lock(&sit_i->sentry_lock);
2466
2467 old_cursegno = curseg->segno;
Chao Yu19f106b2015-05-06 13:08:06 +08002468 old_blkoff = curseg->next_blkoff;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002469
2470 /* change the current segment */
2471 if (segno != curseg->segno) {
2472 curseg->next_segno = segno;
Chao Yu025d63a2017-08-30 18:04:48 +08002473 change_curseg(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002474 }
2475
Jaegeuk Kim491c0852014-02-04 13:01:10 +09002476 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
Haicheng Lie79efe32013-06-13 16:59:27 +08002477 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002478
Chao Yu28bc1062016-02-06 14:40:34 +08002479 if (!recover_curseg || recover_newaddr)
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07002480 update_sit_entry(sbi, new_blkaddr, 1);
2481 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
2482 update_sit_entry(sbi, old_blkaddr, -1);
2483
2484 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
2485 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
2486
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002487 locate_dirty_segment(sbi, old_cursegno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002488
Chao Yu19f106b2015-05-06 13:08:06 +08002489 if (recover_curseg) {
2490 if (old_cursegno != curseg->segno) {
2491 curseg->next_segno = old_cursegno;
Chao Yu025d63a2017-08-30 18:04:48 +08002492 change_curseg(sbi, type);
Chao Yu19f106b2015-05-06 13:08:06 +08002493 }
2494 curseg->next_blkoff = old_blkoff;
2495 }
2496
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002497 mutex_unlock(&sit_i->sentry_lock);
2498 mutex_unlock(&curseg->curseg_mutex);
2499}
2500
Chao Yu528e3452015-05-28 19:15:35 +08002501void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
2502 block_t old_addr, block_t new_addr,
Chao Yu28bc1062016-02-06 14:40:34 +08002503 unsigned char version, bool recover_curseg,
2504 bool recover_newaddr)
Chao Yu528e3452015-05-28 19:15:35 +08002505{
2506 struct f2fs_summary sum;
2507
2508 set_summary(&sum, dn->nid, dn->ofs_in_node, version);
2509
Chao Yu28bc1062016-02-06 14:40:34 +08002510 __f2fs_replace_block(sbi, &sum, old_addr, new_addr,
2511 recover_curseg, recover_newaddr);
Chao Yu528e3452015-05-28 19:15:35 +08002512
Chao Yuf28b3432016-02-24 17:16:47 +08002513 f2fs_update_data_blkaddr(dn, new_addr);
Chao Yu528e3452015-05-28 19:15:35 +08002514}
2515
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09002516void f2fs_wait_on_page_writeback(struct page *page,
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08002517 enum page_type type, bool ordered)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09002518{
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09002519 if (PageWriteback(page)) {
Jaegeuk Kim40813632014-09-02 15:31:18 -07002520 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
2521
Jaegeuk Kimb9109b02017-05-10 11:28:38 -07002522 f2fs_submit_merged_write_cond(sbi, page->mapping->host,
2523 0, page->index, type);
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08002524 if (ordered)
2525 wait_on_page_writeback(page);
2526 else
2527 wait_for_stable_page(page);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09002528 }
2529}
2530
Jaegeuk Kimd4c759e2017-09-05 17:04:35 -07002531void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
Chao Yu08b39fb2015-10-08 13:27:34 +08002532{
2533 struct page *cpage;
2534
Yunlei He5d4c0af2016-09-18 08:16:56 +08002535 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
Chao Yu08b39fb2015-10-08 13:27:34 +08002536 return;
2537
Chao Yu08b39fb2015-10-08 13:27:34 +08002538 cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
2539 if (cpage) {
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08002540 f2fs_wait_on_page_writeback(cpage, DATA, true);
Chao Yu08b39fb2015-10-08 13:27:34 +08002541 f2fs_put_page(cpage, 1);
2542 }
2543}
2544
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002545static int read_compacted_summaries(struct f2fs_sb_info *sbi)
2546{
2547 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2548 struct curseg_info *seg_i;
2549 unsigned char *kaddr;
2550 struct page *page;
2551 block_t start;
2552 int i, j, offset;
2553
2554 start = start_sum_block(sbi);
2555
2556 page = get_meta_page(sbi, start++);
2557 kaddr = (unsigned char *)page_address(page);
2558
2559 /* Step 1: restore nat cache */
2560 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08002561 memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002562
2563 /* Step 2: restore sit cache */
2564 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08002565 memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002566 offset = 2 * SUM_JOURNAL_SIZE;
2567
2568 /* Step 3: restore summary entries */
2569 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2570 unsigned short blk_off;
2571 unsigned int segno;
2572
2573 seg_i = CURSEG_I(sbi, i);
2574 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
2575 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
2576 seg_i->next_segno = segno;
2577 reset_curseg(sbi, i, 0);
2578 seg_i->alloc_type = ckpt->alloc_type[i];
2579 seg_i->next_blkoff = blk_off;
2580
2581 if (seg_i->alloc_type == SSR)
2582 blk_off = sbi->blocks_per_seg;
2583
2584 for (j = 0; j < blk_off; j++) {
2585 struct f2fs_summary *s;
2586 s = (struct f2fs_summary *)(kaddr + offset);
2587 seg_i->sum_blk->entries[j] = *s;
2588 offset += SUMMARY_SIZE;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002589 if (offset + SUMMARY_SIZE <= PAGE_SIZE -
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002590 SUM_FOOTER_SIZE)
2591 continue;
2592
2593 f2fs_put_page(page, 1);
2594 page = NULL;
2595
2596 page = get_meta_page(sbi, start++);
2597 kaddr = (unsigned char *)page_address(page);
2598 offset = 0;
2599 }
2600 }
2601 f2fs_put_page(page, 1);
2602 return 0;
2603}
2604
2605static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
2606{
2607 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2608 struct f2fs_summary_block *sum;
2609 struct curseg_info *curseg;
2610 struct page *new;
2611 unsigned short blk_off;
2612 unsigned int segno = 0;
2613 block_t blk_addr = 0;
2614
2615 /* get segment number and block addr */
2616 if (IS_DATASEG(type)) {
2617 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
2618 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
2619 CURSEG_HOT_DATA]);
Jaegeuk Kim119ee912015-01-29 11:45:33 -08002620 if (__exist_node_summaries(sbi))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002621 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
2622 else
2623 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
2624 } else {
2625 segno = le32_to_cpu(ckpt->cur_node_segno[type -
2626 CURSEG_HOT_NODE]);
2627 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
2628 CURSEG_HOT_NODE]);
Jaegeuk Kim119ee912015-01-29 11:45:33 -08002629 if (__exist_node_summaries(sbi))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002630 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
2631 type - CURSEG_HOT_NODE);
2632 else
2633 blk_addr = GET_SUM_BLOCK(sbi, segno);
2634 }
2635
2636 new = get_meta_page(sbi, blk_addr);
2637 sum = (struct f2fs_summary_block *)page_address(new);
2638
2639 if (IS_NODESEG(type)) {
Jaegeuk Kim119ee912015-01-29 11:45:33 -08002640 if (__exist_node_summaries(sbi)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002641 struct f2fs_summary *ns = &sum->entries[0];
2642 int i;
2643 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
2644 ns->version = 0;
2645 ns->ofs_in_node = 0;
2646 }
2647 } else {
Gu Zhengd6537882014-03-07 18:43:36 +08002648 int err;
2649
2650 err = restore_node_summary(sbi, segno, sum);
2651 if (err) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002652 f2fs_put_page(new, 1);
Gu Zhengd6537882014-03-07 18:43:36 +08002653 return err;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002654 }
2655 }
2656 }
2657
2658 /* set uncompleted segment to curseg */
2659 curseg = CURSEG_I(sbi, type);
2660 mutex_lock(&curseg->curseg_mutex);
Chao Yub7ad7512016-02-19 18:08:46 +08002661
2662 /* update journal info */
2663 down_write(&curseg->journal_rwsem);
2664 memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
2665 up_write(&curseg->journal_rwsem);
2666
2667 memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
2668 memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002669 curseg->next_segno = segno;
2670 reset_curseg(sbi, type, 0);
2671 curseg->alloc_type = ckpt->alloc_type[type];
2672 curseg->next_blkoff = blk_off;
2673 mutex_unlock(&curseg->curseg_mutex);
2674 f2fs_put_page(new, 1);
2675 return 0;
2676}
2677
2678static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
2679{
Jin Qian21d3f8e2017-06-01 11:18:30 -07002680 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
2681 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002682 int type = CURSEG_HOT_DATA;
Chao Yue4fc5fb2014-03-17 16:36:24 +08002683 int err;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002684
Chao Yuaaec2b12016-09-20 11:04:18 +08002685 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
Chao Yu3fa06d72014-12-09 14:21:46 +08002686 int npages = npages_for_summary_flush(sbi, true);
2687
2688 if (npages >= 2)
2689 ra_meta_pages(sbi, start_sum_block(sbi), npages,
Chao Yu26879fb2015-10-12 17:05:59 +08002690 META_CP, true);
Chao Yu3fa06d72014-12-09 14:21:46 +08002691
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002692 /* restore for compacted data summary */
2693 if (read_compacted_summaries(sbi))
2694 return -EINVAL;
2695 type = CURSEG_HOT_NODE;
2696 }
2697
Jaegeuk Kim119ee912015-01-29 11:45:33 -08002698 if (__exist_node_summaries(sbi))
Chao Yu3fa06d72014-12-09 14:21:46 +08002699 ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
Chao Yu26879fb2015-10-12 17:05:59 +08002700 NR_CURSEG_TYPE - type, META_CP, true);
Chao Yu3fa06d72014-12-09 14:21:46 +08002701
Chao Yue4fc5fb2014-03-17 16:36:24 +08002702 for (; type <= CURSEG_COLD_NODE; type++) {
2703 err = read_normal_summaries(sbi, type);
2704 if (err)
2705 return err;
2706 }
2707
Jin Qian21d3f8e2017-06-01 11:18:30 -07002708 /* sanity check for summary blocks */
2709 if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
2710 sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES)
2711 return -EINVAL;
2712
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002713 return 0;
2714}
2715
2716static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
2717{
2718 struct page *page;
2719 unsigned char *kaddr;
2720 struct f2fs_summary *summary;
2721 struct curseg_info *seg_i;
2722 int written_size = 0;
2723 int i, j;
2724
2725 page = grab_meta_page(sbi, blkaddr++);
2726 kaddr = (unsigned char *)page_address(page);
2727
2728 /* Step 1: write nat cache */
2729 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08002730 memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002731 written_size += SUM_JOURNAL_SIZE;
2732
2733 /* Step 2: write sit cache */
2734 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08002735 memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002736 written_size += SUM_JOURNAL_SIZE;
2737
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002738 /* Step 3: write summary entries */
2739 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2740 unsigned short blkoff;
2741 seg_i = CURSEG_I(sbi, i);
2742 if (sbi->ckpt->alloc_type[i] == SSR)
2743 blkoff = sbi->blocks_per_seg;
2744 else
2745 blkoff = curseg_blkoff(sbi, i);
2746
2747 for (j = 0; j < blkoff; j++) {
2748 if (!page) {
2749 page = grab_meta_page(sbi, blkaddr++);
2750 kaddr = (unsigned char *)page_address(page);
2751 written_size = 0;
2752 }
2753 summary = (struct f2fs_summary *)(kaddr + written_size);
2754 *summary = seg_i->sum_blk->entries[j];
2755 written_size += SUMMARY_SIZE;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002756
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002757 if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002758 SUM_FOOTER_SIZE)
2759 continue;
2760
Chao Yue8d61a72013-10-24 15:08:28 +08002761 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002762 f2fs_put_page(page, 1);
2763 page = NULL;
2764 }
2765 }
Chao Yue8d61a72013-10-24 15:08:28 +08002766 if (page) {
2767 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002768 f2fs_put_page(page, 1);
Chao Yue8d61a72013-10-24 15:08:28 +08002769 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002770}
2771
2772static void write_normal_summaries(struct f2fs_sb_info *sbi,
2773 block_t blkaddr, int type)
2774{
2775 int i, end;
2776 if (IS_DATASEG(type))
2777 end = type + NR_CURSEG_DATA_TYPE;
2778 else
2779 end = type + NR_CURSEG_NODE_TYPE;
2780
Chao Yub7ad7512016-02-19 18:08:46 +08002781 for (i = type; i < end; i++)
2782 write_current_sum_page(sbi, i, blkaddr + (i - type));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002783}
2784
2785void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
2786{
Chao Yuaaec2b12016-09-20 11:04:18 +08002787 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002788 write_compacted_summaries(sbi, start_blk);
2789 else
2790 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
2791}
2792
2793void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
2794{
Jaegeuk Kim119ee912015-01-29 11:45:33 -08002795 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002796}
2797
Chao Yudfc08a12016-02-14 18:50:40 +08002798int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002799 unsigned int val, int alloc)
2800{
2801 int i;
2802
2803 if (type == NAT_JOURNAL) {
Chao Yudfc08a12016-02-14 18:50:40 +08002804 for (i = 0; i < nats_in_cursum(journal); i++) {
2805 if (le32_to_cpu(nid_in_journal(journal, i)) == val)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002806 return i;
2807 }
Chao Yudfc08a12016-02-14 18:50:40 +08002808 if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
2809 return update_nats_in_cursum(journal, 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002810 } else if (type == SIT_JOURNAL) {
Chao Yudfc08a12016-02-14 18:50:40 +08002811 for (i = 0; i < sits_in_cursum(journal); i++)
2812 if (le32_to_cpu(segno_in_journal(journal, i)) == val)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002813 return i;
Chao Yudfc08a12016-02-14 18:50:40 +08002814 if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
2815 return update_sits_in_cursum(journal, 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002816 }
2817 return -1;
2818}
2819
2820static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
2821 unsigned int segno)
2822{
Gu Zheng2cc22182014-10-20 17:45:49 +08002823 return get_meta_page(sbi, current_sit_addr(sbi, segno));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002824}
2825
2826static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
2827 unsigned int start)
2828{
2829 struct sit_info *sit_i = SIT_I(sbi);
2830 struct page *src_page, *dst_page;
2831 pgoff_t src_off, dst_off;
2832 void *src_addr, *dst_addr;
2833
2834 src_off = current_sit_addr(sbi, start);
2835 dst_off = next_sit_addr(sbi, src_off);
2836
2837 /* get current sit block page without lock */
2838 src_page = get_meta_page(sbi, src_off);
2839 dst_page = grab_meta_page(sbi, dst_off);
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07002840 f2fs_bug_on(sbi, PageDirty(src_page));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002841
2842 src_addr = page_address(src_page);
2843 dst_addr = page_address(dst_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002844 memcpy(dst_addr, src_addr, PAGE_SIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002845
2846 set_page_dirty(dst_page);
2847 f2fs_put_page(src_page, 1);
2848
2849 set_to_next_sit(sit_i, start);
2850
2851 return dst_page;
2852}
2853
Chao Yu184a5cd2014-09-04 18:13:01 +08002854static struct sit_entry_set *grab_sit_entry_set(void)
2855{
2856 struct sit_entry_set *ses =
Jaegeuk Kim80c54502015-08-20 08:51:56 -07002857 f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
Chao Yu184a5cd2014-09-04 18:13:01 +08002858
2859 ses->entry_cnt = 0;
2860 INIT_LIST_HEAD(&ses->set_list);
2861 return ses;
2862}
2863
2864static void release_sit_entry_set(struct sit_entry_set *ses)
2865{
2866 list_del(&ses->set_list);
2867 kmem_cache_free(sit_entry_set_slab, ses);
2868}
2869
2870static void adjust_sit_entry_set(struct sit_entry_set *ses,
2871 struct list_head *head)
2872{
2873 struct sit_entry_set *next = ses;
2874
2875 if (list_is_last(&ses->set_list, head))
2876 return;
2877
2878 list_for_each_entry_continue(next, head, set_list)
2879 if (ses->entry_cnt <= next->entry_cnt)
2880 break;
2881
2882 list_move_tail(&ses->set_list, &next->set_list);
2883}
2884
2885static void add_sit_entry(unsigned int segno, struct list_head *head)
2886{
2887 struct sit_entry_set *ses;
2888 unsigned int start_segno = START_SEGNO(segno);
2889
2890 list_for_each_entry(ses, head, set_list) {
2891 if (ses->start_segno == start_segno) {
2892 ses->entry_cnt++;
2893 adjust_sit_entry_set(ses, head);
2894 return;
2895 }
2896 }
2897
2898 ses = grab_sit_entry_set();
2899
2900 ses->start_segno = start_segno;
2901 ses->entry_cnt++;
2902 list_add(&ses->set_list, head);
2903}
2904
2905static void add_sits_in_set(struct f2fs_sb_info *sbi)
2906{
2907 struct f2fs_sm_info *sm_info = SM_I(sbi);
2908 struct list_head *set_list = &sm_info->sit_entry_set;
2909 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
Chao Yu184a5cd2014-09-04 18:13:01 +08002910 unsigned int segno;
2911
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07002912 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
Chao Yu184a5cd2014-09-04 18:13:01 +08002913 add_sit_entry(segno, set_list);
2914}
2915
2916static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002917{
2918 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08002919 struct f2fs_journal *journal = curseg->journal;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002920 int i;
2921
Chao Yub7ad7512016-02-19 18:08:46 +08002922 down_write(&curseg->journal_rwsem);
Chao Yudfc08a12016-02-14 18:50:40 +08002923 for (i = 0; i < sits_in_cursum(journal); i++) {
Chao Yu184a5cd2014-09-04 18:13:01 +08002924 unsigned int segno;
2925 bool dirtied;
2926
Chao Yudfc08a12016-02-14 18:50:40 +08002927 segno = le32_to_cpu(segno_in_journal(journal, i));
Chao Yu184a5cd2014-09-04 18:13:01 +08002928 dirtied = __mark_sit_entry_dirty(sbi, segno);
2929
2930 if (!dirtied)
2931 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002932 }
Chao Yudfc08a12016-02-14 18:50:40 +08002933 update_sits_in_cursum(journal, -i);
Chao Yub7ad7512016-02-19 18:08:46 +08002934 up_write(&curseg->journal_rwsem);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002935}
2936
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002937/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002938 * CP calls this function, which flushes SIT entries including sit_journal,
2939 * and moves prefree segs to free segs.
2940 */
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002941void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002942{
2943 struct sit_info *sit_i = SIT_I(sbi);
2944 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
2945 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08002946 struct f2fs_journal *journal = curseg->journal;
Chao Yu184a5cd2014-09-04 18:13:01 +08002947 struct sit_entry_set *ses, *tmp;
2948 struct list_head *head = &SM_I(sbi)->sit_entry_set;
Chao Yu184a5cd2014-09-04 18:13:01 +08002949 bool to_journal = true;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002950 struct seg_entry *se;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002951
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002952 mutex_lock(&sit_i->sentry_lock);
2953
Wanpeng Li2b11a742015-02-27 16:52:50 +08002954 if (!sit_i->dirty_sentries)
2955 goto out;
2956
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002957 /*
Chao Yu184a5cd2014-09-04 18:13:01 +08002958 * add and account sit entries of dirty bitmap in sit entry
2959 * set temporarily
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002960 */
Chao Yu184a5cd2014-09-04 18:13:01 +08002961 add_sits_in_set(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002962
Chao Yu184a5cd2014-09-04 18:13:01 +08002963 /*
2964 * if there are no enough space in journal to store dirty sit
2965 * entries, remove all entries from journal and add and account
2966 * them in sit entry set.
2967 */
Chao Yudfc08a12016-02-14 18:50:40 +08002968 if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
Chao Yu184a5cd2014-09-04 18:13:01 +08002969 remove_sits_in_journal(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002970
Chao Yu184a5cd2014-09-04 18:13:01 +08002971 /*
2972 * there are two steps to flush sit entries:
2973 * #1, flush sit entries to journal in current cold data summary block.
2974 * #2, flush sit entries to sit page.
2975 */
2976 list_for_each_entry_safe(ses, tmp, head, set_list) {
Jaegeuk Kim4a257ed2014-10-16 11:43:30 -07002977 struct page *page = NULL;
Chao Yu184a5cd2014-09-04 18:13:01 +08002978 struct f2fs_sit_block *raw_sit = NULL;
2979 unsigned int start_segno = ses->start_segno;
2980 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07002981 (unsigned long)MAIN_SEGS(sbi));
Chao Yu184a5cd2014-09-04 18:13:01 +08002982 unsigned int segno = start_segno;
Jaegeuk Kimb2955552013-11-12 14:49:56 +09002983
Chao Yu184a5cd2014-09-04 18:13:01 +08002984 if (to_journal &&
Chao Yudfc08a12016-02-14 18:50:40 +08002985 !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
Chao Yu184a5cd2014-09-04 18:13:01 +08002986 to_journal = false;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002987
Chao Yub7ad7512016-02-19 18:08:46 +08002988 if (to_journal) {
2989 down_write(&curseg->journal_rwsem);
2990 } else {
Chao Yu184a5cd2014-09-04 18:13:01 +08002991 page = get_next_sit_page(sbi, start_segno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002992 raw_sit = page_address(page);
2993 }
2994
Chao Yu184a5cd2014-09-04 18:13:01 +08002995 /* flush dirty sit entries in region of current sit set */
2996 for_each_set_bit_from(segno, bitmap, end) {
2997 int offset, sit_offset;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002998
2999 se = get_seg_entry(sbi, segno);
Chao Yu184a5cd2014-09-04 18:13:01 +08003000
3001 /* add discard candidates */
Chao Yuc473f1a2017-04-27 20:40:39 +08003002 if (!(cpc->reason & CP_DISCARD)) {
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07003003 cpc->trim_start = segno;
Jaegeuk Kim25290fa2016-12-29 22:06:15 -08003004 add_discard_addrs(sbi, cpc, false);
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07003005 }
Chao Yu184a5cd2014-09-04 18:13:01 +08003006
3007 if (to_journal) {
Chao Yudfc08a12016-02-14 18:50:40 +08003008 offset = lookup_journal_in_cursum(journal,
Chao Yu184a5cd2014-09-04 18:13:01 +08003009 SIT_JOURNAL, segno, 1);
3010 f2fs_bug_on(sbi, offset < 0);
Chao Yudfc08a12016-02-14 18:50:40 +08003011 segno_in_journal(journal, offset) =
Chao Yu184a5cd2014-09-04 18:13:01 +08003012 cpu_to_le32(segno);
3013 seg_info_to_raw_sit(se,
Chao Yudfc08a12016-02-14 18:50:40 +08003014 &sit_in_journal(journal, offset));
Chao Yu184a5cd2014-09-04 18:13:01 +08003015 } else {
3016 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
3017 seg_info_to_raw_sit(se,
3018 &raw_sit->entries[sit_offset]);
3019 }
3020
3021 __clear_bit(segno, bitmap);
3022 sit_i->dirty_sentries--;
3023 ses->entry_cnt--;
3024 }
3025
Chao Yub7ad7512016-02-19 18:08:46 +08003026 if (to_journal)
3027 up_write(&curseg->journal_rwsem);
3028 else
Chao Yu184a5cd2014-09-04 18:13:01 +08003029 f2fs_put_page(page, 1);
3030
3031 f2fs_bug_on(sbi, ses->entry_cnt);
3032 release_sit_entry_set(ses);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003033 }
Chao Yu184a5cd2014-09-04 18:13:01 +08003034
3035 f2fs_bug_on(sbi, !list_empty(head));
3036 f2fs_bug_on(sbi, sit_i->dirty_sentries);
Chao Yu184a5cd2014-09-04 18:13:01 +08003037out:
Chao Yuc473f1a2017-04-27 20:40:39 +08003038 if (cpc->reason & CP_DISCARD) {
Yunlei He650d3c42016-12-22 11:46:24 +08003039 __u64 trim_start = cpc->trim_start;
3040
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07003041 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
Jaegeuk Kim25290fa2016-12-29 22:06:15 -08003042 add_discard_addrs(sbi, cpc, false);
Yunlei He650d3c42016-12-22 11:46:24 +08003043
3044 cpc->trim_start = trim_start;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07003045 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003046 mutex_unlock(&sit_i->sentry_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003047
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003048 set_prefree_as_free_segments(sbi);
3049}
3050
3051static int build_sit_info(struct f2fs_sb_info *sbi)
3052{
3053 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003054 struct sit_info *sit_i;
3055 unsigned int sit_segs, start;
Chao Yuae27d622017-01-07 18:52:34 +08003056 char *src_bitmap;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003057 unsigned int bitmap_size;
3058
3059 /* allocate memory for SIT information */
3060 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
3061 if (!sit_i)
3062 return -ENOMEM;
3063
3064 SM_I(sbi)->sit_info = sit_i;
3065
Michal Hockoa7c3e902017-05-08 15:57:09 -07003066 sit_i->sentries = kvzalloc(MAIN_SEGS(sbi) *
Jaegeuk Kim39307a82015-09-22 13:50:47 -07003067 sizeof(struct seg_entry), GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003068 if (!sit_i->sentries)
3069 return -ENOMEM;
3070
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003071 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
Michal Hockoa7c3e902017-05-08 15:57:09 -07003072 sit_i->dirty_sentries_bitmap = kvzalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003073 if (!sit_i->dirty_sentries_bitmap)
3074 return -ENOMEM;
3075
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003076 for (start = 0; start < MAIN_SEGS(sbi); start++) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003077 sit_i->sentries[start].cur_valid_map
3078 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3079 sit_i->sentries[start].ckpt_valid_map
3080 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07003081 if (!sit_i->sentries[start].cur_valid_map ||
Jaegeuk Kim3e025742016-08-02 10:56:40 -07003082 !sit_i->sentries[start].ckpt_valid_map)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003083 return -ENOMEM;
Jaegeuk Kim3e025742016-08-02 10:56:40 -07003084
Chao Yu355e7892017-01-07 18:51:01 +08003085#ifdef CONFIG_F2FS_CHECK_FS
3086 sit_i->sentries[start].cur_valid_map_mir
3087 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3088 if (!sit_i->sentries[start].cur_valid_map_mir)
3089 return -ENOMEM;
3090#endif
3091
Jaegeuk Kim3e025742016-08-02 10:56:40 -07003092 if (f2fs_discard_en(sbi)) {
3093 sit_i->sentries[start].discard_map
3094 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3095 if (!sit_i->sentries[start].discard_map)
3096 return -ENOMEM;
3097 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003098 }
3099
Jaegeuk Kim60a3b782015-02-10 16:44:29 -08003100 sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3101 if (!sit_i->tmp_map)
3102 return -ENOMEM;
3103
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003104 if (sbi->segs_per_sec > 1) {
Michal Hockoa7c3e902017-05-08 15:57:09 -07003105 sit_i->sec_entries = kvzalloc(MAIN_SECS(sbi) *
Jaegeuk Kim39307a82015-09-22 13:50:47 -07003106 sizeof(struct sec_entry), GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003107 if (!sit_i->sec_entries)
3108 return -ENOMEM;
3109 }
3110
3111 /* get information related with SIT */
3112 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
3113
3114 /* setup SIT bitmap from ckeckpoint pack */
3115 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
3116 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
3117
Chao Yuae27d622017-01-07 18:52:34 +08003118 sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
3119 if (!sit_i->sit_bitmap)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003120 return -ENOMEM;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003121
Chao Yuae27d622017-01-07 18:52:34 +08003122#ifdef CONFIG_F2FS_CHECK_FS
3123 sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
3124 if (!sit_i->sit_bitmap_mir)
3125 return -ENOMEM;
3126#endif
3127
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003128 /* init SIT information */
3129 sit_i->s_ops = &default_salloc_ops;
3130
3131 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
3132 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
Jaegeuk Kimc79b7ff2016-11-14 18:20:10 -08003133 sit_i->written_valid_blocks = 0;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003134 sit_i->bitmap_size = bitmap_size;
3135 sit_i->dirty_sentries = 0;
3136 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
3137 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
Deepa Dinamani48fbfe52017-05-08 15:59:10 -07003138 sit_i->mounted_time = ktime_get_real_seconds();
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003139 mutex_init(&sit_i->sentry_lock);
3140 return 0;
3141}
3142
3143static int build_free_segmap(struct f2fs_sb_info *sbi)
3144{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003145 struct free_segmap_info *free_i;
3146 unsigned int bitmap_size, sec_bitmap_size;
3147
3148 /* allocate memory for free segmap information */
3149 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
3150 if (!free_i)
3151 return -ENOMEM;
3152
3153 SM_I(sbi)->free_info = free_i;
3154
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003155 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
Michal Hockoa7c3e902017-05-08 15:57:09 -07003156 free_i->free_segmap = kvmalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003157 if (!free_i->free_segmap)
3158 return -ENOMEM;
3159
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003160 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
Michal Hockoa7c3e902017-05-08 15:57:09 -07003161 free_i->free_secmap = kvmalloc(sec_bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003162 if (!free_i->free_secmap)
3163 return -ENOMEM;
3164
3165 /* set all segments as dirty temporarily */
3166 memset(free_i->free_segmap, 0xff, bitmap_size);
3167 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
3168
3169 /* init free segmap information */
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003170 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003171 free_i->free_segments = 0;
3172 free_i->free_sections = 0;
Chao Yu1a118cc2015-02-11 18:20:38 +08003173 spin_lock_init(&free_i->segmap_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003174 return 0;
3175}
3176
3177static int build_curseg(struct f2fs_sb_info *sbi)
3178{
Namjae Jeon1042d602012-12-01 10:56:13 +09003179 struct curseg_info *array;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003180 int i;
3181
Fabian Frederickb434bab2014-06-23 18:39:15 +02003182 array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003183 if (!array)
3184 return -ENOMEM;
3185
3186 SM_I(sbi)->curseg_array = array;
3187
3188 for (i = 0; i < NR_CURSEG_TYPE; i++) {
3189 mutex_init(&array[i].curseg_mutex);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003190 array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003191 if (!array[i].sum_blk)
3192 return -ENOMEM;
Chao Yub7ad7512016-02-19 18:08:46 +08003193 init_rwsem(&array[i].journal_rwsem);
3194 array[i].journal = kzalloc(sizeof(struct f2fs_journal),
3195 GFP_KERNEL);
3196 if (!array[i].journal)
3197 return -ENOMEM;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003198 array[i].segno = NULL_SEGNO;
3199 array[i].next_blkoff = 0;
3200 }
3201 return restore_curseg_summaries(sbi);
3202}
3203
3204static void build_sit_entries(struct f2fs_sb_info *sbi)
3205{
3206 struct sit_info *sit_i = SIT_I(sbi);
3207 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08003208 struct f2fs_journal *journal = curseg->journal;
Yunlei He9c094042016-09-24 12:29:18 +08003209 struct seg_entry *se;
3210 struct f2fs_sit_entry sit;
Chao Yu74de5932013-11-22 09:09:59 +08003211 int sit_blk_cnt = SIT_BLK_CNT(sbi);
3212 unsigned int i, start, end;
3213 unsigned int readed, start_blk = 0;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003214
Chao Yu74de5932013-11-22 09:09:59 +08003215 do {
Jaegeuk Kim664ba972016-10-18 11:07:45 -07003216 readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
3217 META_SIT, true);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003218
Chao Yu74de5932013-11-22 09:09:59 +08003219 start = start_blk * sit_i->sents_per_block;
3220 end = (start_blk + readed) * sit_i->sents_per_block;
3221
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003222 for (; start < end && start < MAIN_SEGS(sbi); start++) {
Chao Yu74de5932013-11-22 09:09:59 +08003223 struct f2fs_sit_block *sit_blk;
Chao Yu74de5932013-11-22 09:09:59 +08003224 struct page *page;
3225
Yunlei He9c094042016-09-24 12:29:18 +08003226 se = &sit_i->sentries[start];
Chao Yu74de5932013-11-22 09:09:59 +08003227 page = get_current_sit_page(sbi, start);
3228 sit_blk = (struct f2fs_sit_block *)page_address(page);
3229 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
3230 f2fs_put_page(page, 1);
Chao Yud600af232016-08-19 23:13:47 +08003231
Chao Yu74de5932013-11-22 09:09:59 +08003232 check_block_count(sbi, start, &sit);
3233 seg_info_from_raw_sit(se, &sit);
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07003234
3235 /* build discard map only one time */
Jaegeuk Kim3e025742016-08-02 10:56:40 -07003236 if (f2fs_discard_en(sbi)) {
Chao Yu1f43e2a2017-04-28 13:56:08 +08003237 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
3238 memset(se->discard_map, 0xff,
3239 SIT_VBLOCK_MAP_SIZE);
3240 } else {
3241 memcpy(se->discard_map,
3242 se->cur_valid_map,
3243 SIT_VBLOCK_MAP_SIZE);
3244 sbi->discard_blks +=
3245 sbi->blocks_per_seg -
3246 se->valid_blocks;
3247 }
Jaegeuk Kim3e025742016-08-02 10:56:40 -07003248 }
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07003249
Chao Yud600af232016-08-19 23:13:47 +08003250 if (sbi->segs_per_sec > 1)
3251 get_sec_entry(sbi, start)->valid_blocks +=
3252 se->valid_blocks;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003253 }
Chao Yu74de5932013-11-22 09:09:59 +08003254 start_blk += readed;
3255 } while (start_blk < sit_blk_cnt);
Chao Yud600af232016-08-19 23:13:47 +08003256
3257 down_read(&curseg->journal_rwsem);
3258 for (i = 0; i < sits_in_cursum(journal); i++) {
Chao Yud600af232016-08-19 23:13:47 +08003259 unsigned int old_valid_blocks;
3260
3261 start = le32_to_cpu(segno_in_journal(journal, i));
3262 se = &sit_i->sentries[start];
3263 sit = sit_in_journal(journal, i);
3264
3265 old_valid_blocks = se->valid_blocks;
3266
3267 check_block_count(sbi, start, &sit);
3268 seg_info_from_raw_sit(se, &sit);
3269
3270 if (f2fs_discard_en(sbi)) {
Chao Yu1f43e2a2017-04-28 13:56:08 +08003271 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
3272 memset(se->discard_map, 0xff,
3273 SIT_VBLOCK_MAP_SIZE);
3274 } else {
3275 memcpy(se->discard_map, se->cur_valid_map,
3276 SIT_VBLOCK_MAP_SIZE);
3277 sbi->discard_blks += old_valid_blocks -
3278 se->valid_blocks;
3279 }
Chao Yud600af232016-08-19 23:13:47 +08003280 }
3281
3282 if (sbi->segs_per_sec > 1)
3283 get_sec_entry(sbi, start)->valid_blocks +=
3284 se->valid_blocks - old_valid_blocks;
3285 }
3286 up_read(&curseg->journal_rwsem);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003287}
3288
3289static void init_free_segmap(struct f2fs_sb_info *sbi)
3290{
3291 unsigned int start;
3292 int type;
3293
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003294 for (start = 0; start < MAIN_SEGS(sbi); start++) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003295 struct seg_entry *sentry = get_seg_entry(sbi, start);
3296 if (!sentry->valid_blocks)
3297 __set_free(sbi, start);
Jaegeuk Kimc79b7ff2016-11-14 18:20:10 -08003298 else
3299 SIT_I(sbi)->written_valid_blocks +=
3300 sentry->valid_blocks;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003301 }
3302
3303 /* set use the current segments */
3304 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
3305 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
3306 __set_test_and_inuse(sbi, curseg_t->segno);
3307 }
3308}
3309
3310static void init_dirty_segmap(struct f2fs_sb_info *sbi)
3311{
3312 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3313 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003314 unsigned int segno = 0, offset = 0;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003315 unsigned short valid_blocks;
3316
Namjae Jeon8736fbf2013-06-16 09:49:11 +09003317 while (1) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003318 /* find dirty segment based on free segmap */
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003319 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
3320 if (segno >= MAIN_SEGS(sbi))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003321 break;
3322 offset = segno + 1;
Jaegeuk Kim302bd342017-04-07 14:33:22 -07003323 valid_blocks = get_valid_blocks(sbi, segno, false);
Jaegeuk Kimec325b52014-09-02 16:24:11 -07003324 if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003325 continue;
Jaegeuk Kimec325b52014-09-02 16:24:11 -07003326 if (valid_blocks > sbi->blocks_per_seg) {
3327 f2fs_bug_on(sbi, 1);
3328 continue;
3329 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003330 mutex_lock(&dirty_i->seglist_lock);
3331 __locate_dirty_segment(sbi, segno, DIRTY);
3332 mutex_unlock(&dirty_i->seglist_lock);
3333 }
3334}
3335
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09003336static int init_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003337{
3338 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003339 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003340
Michal Hockoa7c3e902017-05-08 15:57:09 -07003341 dirty_i->victim_secmap = kvzalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09003342 if (!dirty_i->victim_secmap)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003343 return -ENOMEM;
3344 return 0;
3345}
3346
3347static int build_dirty_segmap(struct f2fs_sb_info *sbi)
3348{
3349 struct dirty_seglist_info *dirty_i;
3350 unsigned int bitmap_size, i;
3351
3352 /* allocate memory for dirty segments list information */
3353 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
3354 if (!dirty_i)
3355 return -ENOMEM;
3356
3357 SM_I(sbi)->dirty_info = dirty_i;
3358 mutex_init(&dirty_i->seglist_lock);
3359
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003360 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003361
3362 for (i = 0; i < NR_DIRTY_TYPE; i++) {
Michal Hockoa7c3e902017-05-08 15:57:09 -07003363 dirty_i->dirty_segmap[i] = kvzalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003364 if (!dirty_i->dirty_segmap[i])
3365 return -ENOMEM;
3366 }
3367
3368 init_dirty_segmap(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09003369 return init_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003370}
3371
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09003372/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003373 * Update min, max modified time for cost-benefit GC algorithm
3374 */
3375static void init_min_max_mtime(struct f2fs_sb_info *sbi)
3376{
3377 struct sit_info *sit_i = SIT_I(sbi);
3378 unsigned int segno;
3379
3380 mutex_lock(&sit_i->sentry_lock);
3381
3382 sit_i->min_mtime = LLONG_MAX;
3383
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003384 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003385 unsigned int i;
3386 unsigned long long mtime = 0;
3387
3388 for (i = 0; i < sbi->segs_per_sec; i++)
3389 mtime += get_seg_entry(sbi, segno + i)->mtime;
3390
3391 mtime = div_u64(mtime, sbi->segs_per_sec);
3392
3393 if (sit_i->min_mtime > mtime)
3394 sit_i->min_mtime = mtime;
3395 }
3396 sit_i->max_mtime = get_mtime(sbi);
3397 mutex_unlock(&sit_i->sentry_lock);
3398}
3399
3400int build_segment_manager(struct f2fs_sb_info *sbi)
3401{
3402 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3403 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Namjae Jeon1042d602012-12-01 10:56:13 +09003404 struct f2fs_sm_info *sm_info;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003405 int err;
3406
3407 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
3408 if (!sm_info)
3409 return -ENOMEM;
3410
3411 /* init sm info */
3412 sbi->sm_info = sm_info;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003413 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
3414 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
3415 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
3416 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3417 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3418 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
3419 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
Jaegeuk Kim58c41032014-03-19 14:17:21 +09003420 sm_info->rec_prefree_segments = sm_info->main_segments *
3421 DEF_RECLAIM_PREFREE_SEGMENTS / 100;
Jaegeuk Kim44a83492016-07-13 18:23:35 -07003422 if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
3423 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
3424
Jaegeuk Kim52763a42016-06-13 09:47:48 -07003425 if (!test_opt(sbi, LFS))
3426 sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
Jaegeuk Kim216fbd62013-11-07 13:13:42 +09003427 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
Jaegeuk Kimc1ce1b02014-09-10 16:53:02 -07003428 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
Jaegeuk Kimef095d12017-03-24 20:05:13 -04003429 sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003430
Jaegeuk Kimbba681c2015-01-26 17:41:23 -08003431 sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
3432
Chao Yu184a5cd2014-09-04 18:13:01 +08003433 INIT_LIST_HEAD(&sm_info->sit_entry_set);
3434
Yunlei Hed4fdf8b2017-06-01 16:43:51 +08003435 if (!f2fs_readonly(sbi->sb)) {
Gu Zheng2163d192014-04-27 14:21:33 +08003436 err = create_flush_cmd_control(sbi);
3437 if (err)
Gu Zhenga688b9d9e2014-04-27 14:21:21 +08003438 return err;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +09003439 }
3440
Jaegeuk Kim0b54fb82017-01-11 14:40:24 -08003441 err = create_discard_cmd_control(sbi);
3442 if (err)
3443 return err;
3444
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003445 err = build_sit_info(sbi);
3446 if (err)
3447 return err;
3448 err = build_free_segmap(sbi);
3449 if (err)
3450 return err;
3451 err = build_curseg(sbi);
3452 if (err)
3453 return err;
3454
3455 /* reinit free segmap based on SIT */
3456 build_sit_entries(sbi);
3457
3458 init_free_segmap(sbi);
3459 err = build_dirty_segmap(sbi);
3460 if (err)
3461 return err;
3462
3463 init_min_max_mtime(sbi);
3464 return 0;
3465}
3466
3467static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
3468 enum dirty_type dirty_type)
3469{
3470 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3471
3472 mutex_lock(&dirty_i->seglist_lock);
Jaegeuk Kim39307a82015-09-22 13:50:47 -07003473 kvfree(dirty_i->dirty_segmap[dirty_type]);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003474 dirty_i->nr_dirty[dirty_type] = 0;
3475 mutex_unlock(&dirty_i->seglist_lock);
3476}
3477
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09003478static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003479{
3480 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim39307a82015-09-22 13:50:47 -07003481 kvfree(dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003482}
3483
3484static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
3485{
3486 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3487 int i;
3488
3489 if (!dirty_i)
3490 return;
3491
3492 /* discard pre-free/dirty segments list */
3493 for (i = 0; i < NR_DIRTY_TYPE; i++)
3494 discard_dirty_segmap(sbi, i);
3495
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09003496 destroy_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003497 SM_I(sbi)->dirty_info = NULL;
3498 kfree(dirty_i);
3499}
3500
3501static void destroy_curseg(struct f2fs_sb_info *sbi)
3502{
3503 struct curseg_info *array = SM_I(sbi)->curseg_array;
3504 int i;
3505
3506 if (!array)
3507 return;
3508 SM_I(sbi)->curseg_array = NULL;
Chao Yub7ad7512016-02-19 18:08:46 +08003509 for (i = 0; i < NR_CURSEG_TYPE; i++) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003510 kfree(array[i].sum_blk);
Chao Yub7ad7512016-02-19 18:08:46 +08003511 kfree(array[i].journal);
3512 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003513 kfree(array);
3514}
3515
3516static void destroy_free_segmap(struct f2fs_sb_info *sbi)
3517{
3518 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
3519 if (!free_i)
3520 return;
3521 SM_I(sbi)->free_info = NULL;
Jaegeuk Kim39307a82015-09-22 13:50:47 -07003522 kvfree(free_i->free_segmap);
3523 kvfree(free_i->free_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003524 kfree(free_i);
3525}
3526
3527static void destroy_sit_info(struct f2fs_sb_info *sbi)
3528{
3529 struct sit_info *sit_i = SIT_I(sbi);
3530 unsigned int start;
3531
3532 if (!sit_i)
3533 return;
3534
3535 if (sit_i->sentries) {
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003536 for (start = 0; start < MAIN_SEGS(sbi); start++) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003537 kfree(sit_i->sentries[start].cur_valid_map);
Chao Yu355e7892017-01-07 18:51:01 +08003538#ifdef CONFIG_F2FS_CHECK_FS
3539 kfree(sit_i->sentries[start].cur_valid_map_mir);
3540#endif
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003541 kfree(sit_i->sentries[start].ckpt_valid_map);
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07003542 kfree(sit_i->sentries[start].discard_map);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003543 }
3544 }
Jaegeuk Kim60a3b782015-02-10 16:44:29 -08003545 kfree(sit_i->tmp_map);
3546
Jaegeuk Kim39307a82015-09-22 13:50:47 -07003547 kvfree(sit_i->sentries);
3548 kvfree(sit_i->sec_entries);
3549 kvfree(sit_i->dirty_sentries_bitmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003550
3551 SM_I(sbi)->sit_info = NULL;
3552 kfree(sit_i->sit_bitmap);
Chao Yuae27d622017-01-07 18:52:34 +08003553#ifdef CONFIG_F2FS_CHECK_FS
3554 kfree(sit_i->sit_bitmap_mir);
3555#endif
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003556 kfree(sit_i);
3557}
3558
3559void destroy_segment_manager(struct f2fs_sb_info *sbi)
3560{
3561 struct f2fs_sm_info *sm_info = SM_I(sbi);
Gu Zhenga688b9d9e2014-04-27 14:21:21 +08003562
Chao Yu3b03f722013-11-06 09:12:04 +08003563 if (!sm_info)
3564 return;
Jaegeuk Kim5eba8c52016-12-07 16:23:32 -08003565 destroy_flush_cmd_control(sbi, true);
Chao Yuf0994052017-03-27 18:14:04 +08003566 destroy_discard_cmd_control(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003567 destroy_dirty_segmap(sbi);
3568 destroy_curseg(sbi);
3569 destroy_free_segmap(sbi);
3570 destroy_sit_info(sbi);
3571 sbi->sm_info = NULL;
3572 kfree(sm_info);
3573}
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09003574
3575int __init create_segment_manager_caches(void)
3576{
3577 discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
Gu Zhenge8512d22014-03-07 18:43:28 +08003578 sizeof(struct discard_entry));
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09003579 if (!discard_entry_slab)
Chao Yu184a5cd2014-09-04 18:13:01 +08003580 goto fail;
3581
Jaegeuk Kimb01a9202017-01-09 14:13:03 -08003582 discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
3583 sizeof(struct discard_cmd));
3584 if (!discard_cmd_slab)
Chao Yu6ab2a302016-09-05 12:28:26 +08003585 goto destroy_discard_entry;
Chao Yu275b66b2016-08-29 23:58:34 +08003586
Chao Yu184a5cd2014-09-04 18:13:01 +08003587 sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
Changman Leec9ee0082014-11-21 15:42:07 +09003588 sizeof(struct sit_entry_set));
Chao Yu184a5cd2014-09-04 18:13:01 +08003589 if (!sit_entry_set_slab)
Jaegeuk Kimb01a9202017-01-09 14:13:03 -08003590 goto destroy_discard_cmd;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07003591
3592 inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
3593 sizeof(struct inmem_pages));
3594 if (!inmem_entry_slab)
3595 goto destroy_sit_entry_set;
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09003596 return 0;
Chao Yu184a5cd2014-09-04 18:13:01 +08003597
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07003598destroy_sit_entry_set:
3599 kmem_cache_destroy(sit_entry_set_slab);
Jaegeuk Kimb01a9202017-01-09 14:13:03 -08003600destroy_discard_cmd:
3601 kmem_cache_destroy(discard_cmd_slab);
Chao Yu6ab2a302016-09-05 12:28:26 +08003602destroy_discard_entry:
Chao Yu184a5cd2014-09-04 18:13:01 +08003603 kmem_cache_destroy(discard_entry_slab);
3604fail:
3605 return -ENOMEM;
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09003606}
3607
3608void destroy_segment_manager_caches(void)
3609{
Chao Yu184a5cd2014-09-04 18:13:01 +08003610 kmem_cache_destroy(sit_entry_set_slab);
Jaegeuk Kimb01a9202017-01-09 14:13:03 -08003611 kmem_cache_destroy(discard_cmd_slab);
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09003612 kmem_cache_destroy(discard_entry_slab);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07003613 kmem_cache_destroy(inmem_entry_slab);
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09003614}