blob: 7e770e40ee1c66b3601f3af18abe8502a8fe9046 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/bio.h>
14#include <linux/blkdev.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010015#include <linux/prefetch.h>
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +090016#include <linux/kthread.h>
Chao Yu74de5932013-11-22 09:09:59 +080017#include <linux/swap.h>
Jaegeuk Kim60b99b42015-10-05 14:49:57 -070018#include <linux/timer.h>
Jaegeuk Kimd430af22017-05-17 10:36:58 -070019#include <linux/freezer.h>
Jaegeuk Kimcfc72cc2017-09-09 12:03:23 -070020#include <linux/sched.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090021
22#include "f2fs.h"
23#include "segment.h"
24#include "node.h"
Jaegeuk Kim766330d2017-08-15 21:27:19 -070025#include "gc.h"
Jaegeuk Kim9e4ded32014-12-17 19:58:58 -080026#include "trace.h"
Namjae Jeon6ec178d2013-04-23 17:51:43 +090027#include <trace/events/f2fs.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090028
Changman Lee9a7f1432013-11-15 10:42:51 +090029#define __reverse_ffz(x) __reverse_ffs(~(x))
30
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +090031static struct kmem_cache *discard_entry_slab;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -070032static struct kmem_cache *discard_cmd_slab;
Chao Yu184a5cd2014-09-04 18:13:01 +080033static struct kmem_cache *sit_entry_set_slab;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -070034static struct kmem_cache *inmem_entry_slab;
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +090035
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070036static unsigned long __reverse_ulong(unsigned char *str)
37{
38 unsigned long tmp = 0;
39 int shift = 24, idx = 0;
40
41#if BITS_PER_LONG == 64
42 shift = 56;
43#endif
44 while (shift >= 0) {
45 tmp |= (unsigned long)str[idx++] << shift;
46 shift -= BITS_PER_BYTE;
47 }
48 return tmp;
49}
50
Changman Lee9a7f1432013-11-15 10:42:51 +090051/*
52 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
53 * MSB and LSB are reversed in a byte by f2fs_set_bit.
54 */
55static inline unsigned long __reverse_ffs(unsigned long word)
56{
57 int num = 0;
58
59#if BITS_PER_LONG == 64
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070060 if ((word & 0xffffffff00000000UL) == 0)
Changman Lee9a7f1432013-11-15 10:42:51 +090061 num += 32;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070062 else
Changman Lee9a7f1432013-11-15 10:42:51 +090063 word >>= 32;
Changman Lee9a7f1432013-11-15 10:42:51 +090064#endif
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070065 if ((word & 0xffff0000) == 0)
Changman Lee9a7f1432013-11-15 10:42:51 +090066 num += 16;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070067 else
Changman Lee9a7f1432013-11-15 10:42:51 +090068 word >>= 16;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070069
70 if ((word & 0xff00) == 0)
Changman Lee9a7f1432013-11-15 10:42:51 +090071 num += 8;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070072 else
Changman Lee9a7f1432013-11-15 10:42:51 +090073 word >>= 8;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070074
Changman Lee9a7f1432013-11-15 10:42:51 +090075 if ((word & 0xf0) == 0)
76 num += 4;
77 else
78 word >>= 4;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070079
Changman Lee9a7f1432013-11-15 10:42:51 +090080 if ((word & 0xc) == 0)
81 num += 2;
82 else
83 word >>= 2;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070084
Changman Lee9a7f1432013-11-15 10:42:51 +090085 if ((word & 0x2) == 0)
86 num += 1;
87 return num;
88}
89
90/*
arter97e1c42042014-08-06 23:22:50 +090091 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
Changman Lee9a7f1432013-11-15 10:42:51 +090092 * f2fs_set_bit makes MSB and LSB reversed in a byte.
Fan Li692223d2015-11-12 08:43:04 +080093 * @size must be integral times of unsigned long.
Changman Lee9a7f1432013-11-15 10:42:51 +090094 * Example:
Jaegeuk Kimf96999c2015-10-20 15:17:19 -070095 * MSB <--> LSB
96 * f2fs_set_bit(0, bitmap) => 1000 0000
97 * f2fs_set_bit(7, bitmap) => 0000 0001
Changman Lee9a7f1432013-11-15 10:42:51 +090098 */
99static unsigned long __find_rev_next_bit(const unsigned long *addr,
100 unsigned long size, unsigned long offset)
101{
102 const unsigned long *p = addr + BIT_WORD(offset);
Fan Li692223d2015-11-12 08:43:04 +0800103 unsigned long result = size;
Changman Lee9a7f1432013-11-15 10:42:51 +0900104 unsigned long tmp;
Changman Lee9a7f1432013-11-15 10:42:51 +0900105
106 if (offset >= size)
107 return size;
108
Fan Li692223d2015-11-12 08:43:04 +0800109 size -= (offset & ~(BITS_PER_LONG - 1));
Changman Lee9a7f1432013-11-15 10:42:51 +0900110 offset %= BITS_PER_LONG;
Changman Lee9a7f1432013-11-15 10:42:51 +0900111
Fan Li692223d2015-11-12 08:43:04 +0800112 while (1) {
113 if (*p == 0)
114 goto pass;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -0700115
Jaegeuk Kimf96999c2015-10-20 15:17:19 -0700116 tmp = __reverse_ulong((unsigned char *)p);
Fan Li692223d2015-11-12 08:43:04 +0800117
118 tmp &= ~0UL >> offset;
119 if (size < BITS_PER_LONG)
120 tmp &= (~0UL << (BITS_PER_LONG - size));
Changman Lee9a7f1432013-11-15 10:42:51 +0900121 if (tmp)
Fan Li692223d2015-11-12 08:43:04 +0800122 goto found;
123pass:
124 if (size <= BITS_PER_LONG)
125 break;
Changman Lee9a7f1432013-11-15 10:42:51 +0900126 size -= BITS_PER_LONG;
Fan Li692223d2015-11-12 08:43:04 +0800127 offset = 0;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -0700128 p++;
Changman Lee9a7f1432013-11-15 10:42:51 +0900129 }
Fan Li692223d2015-11-12 08:43:04 +0800130 return result;
131found:
132 return result - size + __reverse_ffs(tmp);
Changman Lee9a7f1432013-11-15 10:42:51 +0900133}
134
135static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
136 unsigned long size, unsigned long offset)
137{
138 const unsigned long *p = addr + BIT_WORD(offset);
Jaegeuk Kim80609442015-12-04 16:51:13 -0800139 unsigned long result = size;
Changman Lee9a7f1432013-11-15 10:42:51 +0900140 unsigned long tmp;
Changman Lee9a7f1432013-11-15 10:42:51 +0900141
142 if (offset >= size)
143 return size;
144
Jaegeuk Kim80609442015-12-04 16:51:13 -0800145 size -= (offset & ~(BITS_PER_LONG - 1));
Changman Lee9a7f1432013-11-15 10:42:51 +0900146 offset %= BITS_PER_LONG;
Changman Lee9a7f1432013-11-15 10:42:51 +0900147
Jaegeuk Kim80609442015-12-04 16:51:13 -0800148 while (1) {
149 if (*p == ~0UL)
150 goto pass;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -0700151
Jaegeuk Kimf96999c2015-10-20 15:17:19 -0700152 tmp = __reverse_ulong((unsigned char *)p);
Jaegeuk Kim80609442015-12-04 16:51:13 -0800153
154 if (offset)
155 tmp |= ~0UL << (BITS_PER_LONG - offset);
156 if (size < BITS_PER_LONG)
157 tmp |= ~0UL >> size;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -0700158 if (tmp != ~0UL)
Jaegeuk Kim80609442015-12-04 16:51:13 -0800159 goto found;
160pass:
161 if (size <= BITS_PER_LONG)
162 break;
Changman Lee9a7f1432013-11-15 10:42:51 +0900163 size -= BITS_PER_LONG;
Jaegeuk Kim80609442015-12-04 16:51:13 -0800164 offset = 0;
Jaegeuk Kimf96999c2015-10-20 15:17:19 -0700165 p++;
Changman Lee9a7f1432013-11-15 10:42:51 +0900166 }
Jaegeuk Kim80609442015-12-04 16:51:13 -0800167 return result;
168found:
169 return result - size + __reverse_ffz(tmp);
Changman Lee9a7f1432013-11-15 10:42:51 +0900170}
171
Jaegeuk Kim2b170fc2017-09-09 11:11:04 -0700172bool need_SSR(struct f2fs_sb_info *sbi)
173{
174 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
175 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
176 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
177
178 if (test_opt(sbi, LFS))
179 return false;
180 if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
181 return true;
182
183 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
184 2 * reserved_sections(sbi));
185}
186
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700187void register_inmem_page(struct inode *inode, struct page *page)
188{
189 struct f2fs_inode_info *fi = F2FS_I(inode);
190 struct inmem_pages *new;
Jaegeuk Kim9be32d72014-12-05 10:39:49 -0800191
Jaegeuk Kim9e4ded32014-12-17 19:58:58 -0800192 f2fs_trace_pid(page);
Jaegeuk Kim0722b102014-12-05 11:58:02 -0800193
Chao Yudecd36b2015-08-07 18:42:09 +0800194 set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
195 SetPagePrivate(page);
196
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700197 new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
198
199 /* add atomic page indices to the list */
200 new->page = page;
201 INIT_LIST_HEAD(&new->list);
Chao Yudecd36b2015-08-07 18:42:09 +0800202
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700203 /* increase reference count with clean state */
204 mutex_lock(&fi->inmem_lock);
205 get_page(page);
206 list_add_tail(&new->list, &fi->inmem_pages);
Jaegeuk Kim8dcf2ff72014-12-05 17:18:15 -0800207 inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700208 mutex_unlock(&fi->inmem_lock);
Jaegeuk Kim8ce67cb2015-03-17 17:58:08 -0700209
210 trace_f2fs_register_inmem_page(page, INMEM);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700211}
212
Chao Yu28bc1062016-02-06 14:40:34 +0800213static int __revoke_inmem_pages(struct inode *inode,
214 struct list_head *head, bool drop, bool recover)
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700215{
Chao Yu28bc1062016-02-06 14:40:34 +0800216 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700217 struct inmem_pages *cur, *tmp;
Chao Yu28bc1062016-02-06 14:40:34 +0800218 int err = 0;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700219
Chao Yu29b96b52016-02-06 14:38:29 +0800220 list_for_each_entry_safe(cur, tmp, head, list) {
Chao Yu28bc1062016-02-06 14:40:34 +0800221 struct page *page = cur->page;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700222
Chao Yu28bc1062016-02-06 14:40:34 +0800223 if (drop)
224 trace_f2fs_commit_inmem_page(page, INMEM_DROP);
225
226 lock_page(page);
227
228 if (recover) {
229 struct dnode_of_data dn;
230 struct node_info ni;
231
232 trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
Chao Yu4b634f362017-08-08 19:09:08 +0800233retry:
Chao Yu28bc1062016-02-06 14:40:34 +0800234 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4b634f362017-08-08 19:09:08 +0800235 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
236 if (err) {
237 if (err == -ENOMEM) {
238 congestion_wait(BLK_RW_ASYNC, HZ/50);
239 cond_resched();
240 goto retry;
241 }
Chao Yu28bc1062016-02-06 14:40:34 +0800242 err = -EAGAIN;
243 goto next;
244 }
245 get_node_info(sbi, dn.nid, &ni);
246 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
247 cur->old_addr, ni.version, true, true);
248 f2fs_put_dnode(&dn);
249 }
250next:
Jaegeuk Kim63c52d72016-04-12 14:11:03 -0700251 /* we don't need to invalidate this in the sccessful status */
252 if (drop || recover)
253 ClearPageUptodate(page);
Chao Yu28bc1062016-02-06 14:40:34 +0800254 set_page_private(page, 0);
Chao Yuc81ced02016-04-29 20:13:36 +0800255 ClearPagePrivate(page);
Chao Yu28bc1062016-02-06 14:40:34 +0800256 f2fs_put_page(page, 1);
Chao Yudecd36b2015-08-07 18:42:09 +0800257
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700258 list_del(&cur->list);
259 kmem_cache_free(inmem_entry_slab, cur);
Jaegeuk Kim8dcf2ff72014-12-05 17:18:15 -0800260 dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700261 }
Chao Yu28bc1062016-02-06 14:40:34 +0800262 return err;
Chao Yu29b96b52016-02-06 14:38:29 +0800263}
264
265void drop_inmem_pages(struct inode *inode)
266{
267 struct f2fs_inode_info *fi = F2FS_I(inode);
268
269 mutex_lock(&fi->inmem_lock);
Chao Yu28bc1062016-02-06 14:40:34 +0800270 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
Chao Yu29b96b52016-02-06 14:38:29 +0800271 mutex_unlock(&fi->inmem_lock);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700272
273 clear_inode_flag(inode, FI_ATOMIC_FILE);
Chao Yu4e7540c2017-08-18 23:37:36 +0800274 clear_inode_flag(inode, FI_HOT_DATA);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700275 stat_dec_atomic_write(inode);
276}
277
278void drop_inmem_page(struct inode *inode, struct page *page)
279{
280 struct f2fs_inode_info *fi = F2FS_I(inode);
281 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
282 struct list_head *head = &fi->inmem_pages;
283 struct inmem_pages *cur = NULL;
284
285 f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
286
287 mutex_lock(&fi->inmem_lock);
288 list_for_each_entry(cur, head, list) {
289 if (cur->page == page)
290 break;
291 }
292
293 f2fs_bug_on(sbi, !cur || cur->page != page);
294 list_del(&cur->list);
295 mutex_unlock(&fi->inmem_lock);
296
297 dec_page_count(sbi, F2FS_INMEM_PAGES);
298 kmem_cache_free(inmem_entry_slab, cur);
299
300 ClearPageUptodate(page);
301 set_page_private(page, 0);
302 ClearPagePrivate(page);
303 f2fs_put_page(page, 0);
304
305 trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
Chao Yu29b96b52016-02-06 14:38:29 +0800306}
307
Chao Yu28bc1062016-02-06 14:40:34 +0800308static int __commit_inmem_pages(struct inode *inode,
309 struct list_head *revoke_list)
Chao Yu29b96b52016-02-06 14:38:29 +0800310{
311 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
312 struct f2fs_inode_info *fi = F2FS_I(inode);
313 struct inmem_pages *cur, *tmp;
314 struct f2fs_io_info fio = {
315 .sbi = sbi,
316 .type = DATA,
Mike Christie04d328d2016-06-05 14:31:55 -0500317 .op = REQ_OP_WRITE,
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700318 .op_flags = REQ_SYNC | REQ_PRIO,
Chao Yuc0fe4882017-08-02 23:21:48 +0800319 .io_type = FS_DATA_IO,
Chao Yu29b96b52016-02-06 14:38:29 +0800320 };
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700321 pgoff_t last_idx = ULONG_MAX;
Chao Yu29b96b52016-02-06 14:38:29 +0800322 int err = 0;
323
324 list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
Chao Yu28bc1062016-02-06 14:40:34 +0800325 struct page *page = cur->page;
326
327 lock_page(page);
328 if (page->mapping == inode->i_mapping) {
329 trace_f2fs_commit_inmem_page(page, INMEM);
330
331 set_page_dirty(page);
332 f2fs_wait_on_page_writeback(page, DATA, true);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700333 if (clear_page_dirty_for_io(page)) {
Chao Yu29b96b52016-02-06 14:38:29 +0800334 inode_dec_dirty_pages(inode);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700335 remove_dirty_inode(inode);
336 }
Jaegeuk Kim847413f2017-07-19 10:59:55 -0700337retry:
Chao Yu28bc1062016-02-06 14:40:34 +0800338 fio.page = page;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700339 fio.old_blkaddr = NULL_ADDR;
340 fio.encrypted_page = NULL;
Jaegeuk Kim1b48ed62017-05-12 13:51:34 -0700341 fio.need_lock = LOCK_DONE;
Chao Yu29b96b52016-02-06 14:38:29 +0800342 err = do_write_data_page(&fio);
343 if (err) {
Jaegeuk Kim847413f2017-07-19 10:59:55 -0700344 if (err == -ENOMEM) {
345 congestion_wait(BLK_RW_ASYNC, HZ/50);
346 cond_resched();
347 goto retry;
348 }
Chao Yu28bc1062016-02-06 14:40:34 +0800349 unlock_page(page);
Chao Yu29b96b52016-02-06 14:38:29 +0800350 break;
351 }
Chao Yu28bc1062016-02-06 14:40:34 +0800352 /* record old blkaddr for revoking */
353 cur->old_addr = fio.old_blkaddr;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700354 last_idx = page->index;
Chao Yu29b96b52016-02-06 14:38:29 +0800355 }
Chao Yu28bc1062016-02-06 14:40:34 +0800356 unlock_page(page);
357 list_move_tail(&cur->list, revoke_list);
Chao Yu29b96b52016-02-06 14:38:29 +0800358 }
359
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700360 if (last_idx != ULONG_MAX)
Jaegeuk Kim9bc576a2017-05-10 11:28:38 -0700361 f2fs_submit_merged_write_cond(sbi, inode, 0, last_idx, DATA);
Chao Yu28bc1062016-02-06 14:40:34 +0800362
363 if (!err)
364 __revoke_inmem_pages(inode, revoke_list, false, false);
365
Chao Yu29b96b52016-02-06 14:38:29 +0800366 return err;
367}
368
369int commit_inmem_pages(struct inode *inode)
370{
371 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
372 struct f2fs_inode_info *fi = F2FS_I(inode);
Chao Yu28bc1062016-02-06 14:40:34 +0800373 struct list_head revoke_list;
374 int err;
Chao Yu29b96b52016-02-06 14:38:29 +0800375
Chao Yu28bc1062016-02-06 14:40:34 +0800376 INIT_LIST_HEAD(&revoke_list);
Chao Yu29b96b52016-02-06 14:38:29 +0800377 f2fs_balance_fs(sbi, true);
378 f2fs_lock_op(sbi);
379
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700380 set_inode_flag(inode, FI_ATOMIC_COMMIT);
381
Chao Yu29b96b52016-02-06 14:38:29 +0800382 mutex_lock(&fi->inmem_lock);
Chao Yu28bc1062016-02-06 14:40:34 +0800383 err = __commit_inmem_pages(inode, &revoke_list);
384 if (err) {
385 int ret;
386 /*
387 * try to revoke all committed pages, but still we could fail
388 * due to no memory or other reason, if that happened, EAGAIN
389 * will be returned, which means in such case, transaction is
390 * already not integrity, caller should use journal to do the
391 * recovery or rewrite & commit last transaction. For other
392 * error number, revoking was done by filesystem itself.
393 */
394 ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
395 if (ret)
396 err = ret;
397
398 /* drop all uncommitted pages */
399 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
400 }
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700401 mutex_unlock(&fi->inmem_lock);
402
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700403 clear_inode_flag(inode, FI_ATOMIC_COMMIT);
404
Chao Yu29b96b52016-02-06 14:38:29 +0800405 f2fs_unlock_op(sbi);
Jaegeuk Kimedb27de2015-07-25 00:52:52 -0700406 return err;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700407}
408
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900409/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900410 * This function balances dirty node and dentry pages.
411 * In addition, it controls garbage collection.
412 */
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -0800413void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900414{
Chao Yu0f348022016-09-26 19:45:55 +0800415#ifdef CONFIG_F2FS_FAULT_INJECTION
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700416 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
417 f2fs_show_injection_info(FAULT_CHECKPOINT);
Chao Yu0f348022016-09-26 19:45:55 +0800418 f2fs_stop_checkpoint(sbi, false);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700419 }
Chao Yu0f348022016-09-26 19:45:55 +0800420#endif
421
Jaegeuk Kime589c2c2016-06-02 15:24:24 -0700422 /* balance_fs_bg is able to be pending */
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700423 if (need && excess_cached_nats(sbi))
Jaegeuk Kime589c2c2016-06-02 15:24:24 -0700424 f2fs_balance_fs_bg(sbi);
425
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900426 /*
Jaegeuk Kim029cd282012-12-21 17:20:21 +0900427 * We should do GC or end up with checkpoint, if there are so many dirty
428 * dir/node pages without enough free segments.
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900429 */
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700430 if (has_not_enough_free_secs(sbi, 0, 0)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900431 mutex_lock(&sbi->gc_mutex);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700432 f2fs_gc(sbi, false, false, NULL_SEGNO);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900433 }
434}
435
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +0900436void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
437{
Chao Yu1dcc3362015-02-05 17:57:31 +0800438 /* try to shrink extent cache when there is no enough memory */
Jaegeuk Kim554df792015-06-19 13:41:23 -0700439 if (!available_free_memory(sbi, EXTENT_CACHE))
440 f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
Chao Yu1dcc3362015-02-05 17:57:31 +0800441
Jaegeuk Kim1b38dc82015-06-19 15:36:07 -0700442 /* check the # of cached NAT entries */
443 if (!available_free_memory(sbi, NAT_ENTRIES))
444 try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
445
Chao Yu31696582015-07-28 18:33:46 +0800446 if (!available_free_memory(sbi, FREE_NIDS))
Jaegeuk Kimad4edb82016-06-16 16:41:49 -0700447 try_to_free_nids(sbi, MAX_FREE_NIDS);
448 else
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700449 build_free_nids(sbi, false, false);
450
451 if (!is_idle(sbi) && !excess_dirty_nats(sbi))
452 return;
Chao Yu31696582015-07-28 18:33:46 +0800453
Jaegeuk Kim1b38dc82015-06-19 15:36:07 -0700454 /* checkpoint is the only way to shrink partial cached entries */
455 if (!available_free_memory(sbi, NAT_ENTRIES) ||
Jaegeuk Kim60b99b42015-10-05 14:49:57 -0700456 !available_free_memory(sbi, INO_ENTRIES) ||
Chao Yu7d768d22016-01-18 18:31:18 +0800457 excess_prefree_segs(sbi) ||
458 excess_dirty_nats(sbi) ||
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700459 f2fs_time_over(sbi, CP_TIME)) {
Chao Yue9f5b8b2016-02-14 18:54:33 +0800460 if (test_opt(sbi, DATA_FLUSH)) {
461 struct blk_plug plug;
462
463 blk_start_plug(&plug);
Chao Yu36b35a02015-12-17 17:13:28 +0800464 sync_dirty_inodes(sbi, FILE_INODE);
Chao Yue9f5b8b2016-02-14 18:54:33 +0800465 blk_finish_plug(&plug);
466 }
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +0900467 f2fs_sync_fs(sbi->sb, true);
Jaegeuk Kim42190d22016-01-09 13:45:17 -0800468 stat_inc_bg_cp_count(sbi->stat_info);
Chao Yu36b35a02015-12-17 17:13:28 +0800469 }
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +0900470}
471
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700472static int __submit_flush_wait(struct f2fs_sb_info *sbi,
473 struct block_device *bdev)
474{
475 struct bio *bio = f2fs_bio_alloc(0);
476 int ret;
477
478 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
479 bio->bi_bdev = bdev;
480 ret = submit_bio_wait(bio);
481 bio_put(bio);
482
483 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
484 test_opt(sbi, FLUSH_MERGE), ret);
485 return ret;
486}
487
488static int submit_flush_wait(struct f2fs_sb_info *sbi)
489{
490 int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev);
491 int i;
492
493 if (!sbi->s_ndevs || ret)
494 return ret;
495
496 for (i = 1; i < sbi->s_ndevs; i++) {
497 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
498 if (ret)
499 break;
500 }
501 return ret;
502}
503
Gu Zheng2163d192014-04-27 14:21:33 +0800504static int issue_flush_thread(void *data)
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900505{
506 struct f2fs_sb_info *sbi = data;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700507 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800508 wait_queue_head_t *q = &fcc->flush_wait_queue;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900509repeat:
510 if (kthread_should_stop())
511 return 0;
512
Chao Yuce8bd4b2017-07-22 08:52:23 +0800513 sb_start_intwrite(sbi->sb);
514
Gu Zheng721bd4d2014-09-05 18:31:00 +0800515 if (!llist_empty(&fcc->issue_list)) {
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900516 struct flush_cmd *cmd, *next;
517 int ret;
518
Gu Zheng721bd4d2014-09-05 18:31:00 +0800519 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
520 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
521
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700522 ret = submit_flush_wait(sbi);
523 atomic_inc(&fcc->issued_flush);
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900524
Gu Zheng721bd4d2014-09-05 18:31:00 +0800525 llist_for_each_entry_safe(cmd, next,
526 fcc->dispatch_list, llnode) {
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900527 cmd->ret = ret;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900528 complete(&cmd->wait);
529 }
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800530 fcc->dispatch_list = NULL;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900531 }
532
Chao Yuce8bd4b2017-07-22 08:52:23 +0800533 sb_end_intwrite(sbi->sb);
534
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800535 wait_event_interruptible(*q,
Gu Zheng721bd4d2014-09-05 18:31:00 +0800536 kthread_should_stop() || !llist_empty(&fcc->issue_list));
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900537 goto repeat;
538}
539
540int f2fs_issue_flush(struct f2fs_sb_info *sbi)
541{
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700542 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
Chao Yuadf8d902014-05-08 17:00:35 +0800543 struct flush_cmd cmd;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700544 int ret;
Jaegeuk Kim24a9ee02014-07-25 17:46:10 -0700545
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -0700546 if (test_opt(sbi, NOBARRIER))
547 return 0;
548
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700549 if (!test_opt(sbi, FLUSH_MERGE)) {
550 ret = submit_flush_wait(sbi);
551 atomic_inc(&fcc->issued_flush);
552 return ret;
553 }
Jaegeuk Kim740432f2015-08-14 11:43:56 -0700554
Chao Yuabd63f42017-08-31 18:56:05 +0800555 if (atomic_inc_return(&fcc->issing_flush) == 1) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700556 ret = submit_flush_wait(sbi);
557 atomic_dec(&fcc->issing_flush);
558
559 atomic_inc(&fcc->issued_flush);
Jaegeuk Kim740432f2015-08-14 11:43:56 -0700560 return ret;
561 }
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900562
Chao Yuadf8d902014-05-08 17:00:35 +0800563 init_completion(&cmd.wait);
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900564
Gu Zheng721bd4d2014-09-05 18:31:00 +0800565 llist_add(&cmd.llnode, &fcc->issue_list);
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900566
Chao Yu4aab88e2017-08-21 22:53:45 +0800567 /* update issue_list before we wake up issue_flush thread */
568 smp_mb();
569
570 if (waitqueue_active(&fcc->flush_wait_queue))
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800571 wake_up(&fcc->flush_wait_queue);
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900572
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700573 if (fcc->f2fs_issue_flush) {
574 wait_for_completion(&cmd.wait);
575 atomic_dec(&fcc->issing_flush);
576 } else {
Chao Yub0171d92017-08-31 18:56:06 +0800577 struct llist_node *list;
578
579 list = llist_del_all(&fcc->issue_list);
580 if (!list) {
581 wait_for_completion(&cmd.wait);
582 atomic_dec(&fcc->issing_flush);
583 } else {
584 struct flush_cmd *tmp, *next;
585
586 ret = submit_flush_wait(sbi);
587
588 llist_for_each_entry_safe(tmp, next, list, llnode) {
589 if (tmp == &cmd) {
590 cmd.ret = ret;
591 atomic_dec(&fcc->issing_flush);
592 continue;
593 }
594 tmp->ret = ret;
595 complete(&tmp->wait);
596 }
597 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700598 }
Chao Yuadf8d902014-05-08 17:00:35 +0800599
600 return cmd.ret;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900601}
602
Gu Zheng2163d192014-04-27 14:21:33 +0800603int create_flush_cmd_control(struct f2fs_sb_info *sbi)
604{
605 dev_t dev = sbi->sb->s_bdev->bd_dev;
606 struct flush_cmd_control *fcc;
607 int err = 0;
608
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700609 if (SM_I(sbi)->fcc_info) {
610 fcc = SM_I(sbi)->fcc_info;
Yunlong Song471cd232017-06-24 15:57:19 +0800611 if (fcc->f2fs_issue_flush)
612 return err;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700613 goto init_thread;
614 }
615
Gu Zheng2163d192014-04-27 14:21:33 +0800616 fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
617 if (!fcc)
618 return -ENOMEM;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700619 atomic_set(&fcc->issued_flush, 0);
620 atomic_set(&fcc->issing_flush, 0);
Gu Zheng2163d192014-04-27 14:21:33 +0800621 init_waitqueue_head(&fcc->flush_wait_queue);
Gu Zheng721bd4d2014-09-05 18:31:00 +0800622 init_llist_head(&fcc->issue_list);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700623 SM_I(sbi)->fcc_info = fcc;
Yunlei Head9baf42017-06-01 16:43:51 +0800624 if (!test_opt(sbi, FLUSH_MERGE))
625 return err;
626
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700627init_thread:
Gu Zheng2163d192014-04-27 14:21:33 +0800628 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
629 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
630 if (IS_ERR(fcc->f2fs_issue_flush)) {
631 err = PTR_ERR(fcc->f2fs_issue_flush);
632 kfree(fcc);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700633 SM_I(sbi)->fcc_info = NULL;
Gu Zheng2163d192014-04-27 14:21:33 +0800634 return err;
635 }
Gu Zheng2163d192014-04-27 14:21:33 +0800636
637 return err;
638}
639
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700640void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
Gu Zheng2163d192014-04-27 14:21:33 +0800641{
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700642 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
Gu Zheng2163d192014-04-27 14:21:33 +0800643
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700644 if (fcc && fcc->f2fs_issue_flush) {
645 struct task_struct *flush_thread = fcc->f2fs_issue_flush;
646
647 fcc->f2fs_issue_flush = NULL;
648 kthread_stop(flush_thread);
649 }
650 if (free) {
651 kfree(fcc);
652 SM_I(sbi)->fcc_info = NULL;
653 }
Gu Zheng2163d192014-04-27 14:21:33 +0800654}
655
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900656static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
657 enum dirty_type dirty_type)
658{
659 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
660
661 /* need not be added */
662 if (IS_CURSEG(sbi, segno))
663 return;
664
665 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
666 dirty_i->nr_dirty[dirty_type]++;
667
668 if (dirty_type == DIRTY) {
669 struct seg_entry *sentry = get_seg_entry(sbi, segno);
Changman Lee4625d6a2013-10-25 17:31:57 +0900670 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900671
Jaegeuk Kimec325b52014-09-02 16:24:11 -0700672 if (unlikely(t >= DIRTY)) {
673 f2fs_bug_on(sbi, 1);
674 return;
675 }
Changman Lee4625d6a2013-10-25 17:31:57 +0900676 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
677 dirty_i->nr_dirty[t]++;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900678 }
679}
680
681static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
682 enum dirty_type dirty_type)
683{
684 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
685
686 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
687 dirty_i->nr_dirty[dirty_type]--;
688
689 if (dirty_type == DIRTY) {
Changman Lee4625d6a2013-10-25 17:31:57 +0900690 struct seg_entry *sentry = get_seg_entry(sbi, segno);
691 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900692
Changman Lee4625d6a2013-10-25 17:31:57 +0900693 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
694 dirty_i->nr_dirty[t]--;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900695
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700696 if (get_valid_blocks(sbi, segno, true) == 0)
697 clear_bit(GET_SEC_FROM_SEG(sbi, segno),
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900698 dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900699 }
700}
701
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900702/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900703 * Should not occur error such as -ENOMEM.
704 * Adding dirty entry into seglist is not critical operation.
705 * If a given segment is one of current working segments, it won't be added.
706 */
Haicheng Li8d8451a2013-06-13 16:59:28 +0800707static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900708{
709 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
710 unsigned short valid_blocks;
711
712 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
713 return;
714
715 mutex_lock(&dirty_i->seglist_lock);
716
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700717 valid_blocks = get_valid_blocks(sbi, segno, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900718
719 if (valid_blocks == 0) {
720 __locate_dirty_segment(sbi, segno, PRE);
721 __remove_dirty_segment(sbi, segno, DIRTY);
722 } else if (valid_blocks < sbi->blocks_per_seg) {
723 __locate_dirty_segment(sbi, segno, DIRTY);
724 } else {
725 /* Recovery routine with SSR needs this */
726 __remove_dirty_segment(sbi, segno, DIRTY);
727 }
728
729 mutex_unlock(&dirty_i->seglist_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900730}
731
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700732static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
733 struct block_device *bdev, block_t lstart,
734 block_t start, block_t len)
Chao Yu275b66b2016-08-29 23:58:34 +0800735{
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700736 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
737 struct list_head *pend_list;
738 struct discard_cmd *dc;
Chao Yu275b66b2016-08-29 23:58:34 +0800739
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700740 f2fs_bug_on(sbi, !len);
Chao Yu275b66b2016-08-29 23:58:34 +0800741
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700742 pend_list = &dcc->pend_list[plist_idx(len)];
743
744 dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
745 INIT_LIST_HEAD(&dc->list);
746 dc->bdev = bdev;
747 dc->lstart = lstart;
748 dc->start = start;
749 dc->len = len;
750 dc->ref = 0;
751 dc->state = D_PREP;
752 dc->error = 0;
753 init_completion(&dc->wait);
754 list_add_tail(&dc->list, pend_list);
755 atomic_inc(&dcc->discard_cmd_cnt);
756 dcc->undiscard_blks += len;
757
758 return dc;
Chao Yu275b66b2016-08-29 23:58:34 +0800759}
760
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700761static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
762 struct block_device *bdev, block_t lstart,
763 block_t start, block_t len,
764 struct rb_node *parent, struct rb_node **p)
Chao Yu275b66b2016-08-29 23:58:34 +0800765{
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700766 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
767 struct discard_cmd *dc;
Chao Yu275b66b2016-08-29 23:58:34 +0800768
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700769 dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
Chao Yu275b66b2016-08-29 23:58:34 +0800770
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700771 rb_link_node(&dc->rb_node, parent, p);
772 rb_insert_color(&dc->rb_node, &dcc->root);
Chao Yu275b66b2016-08-29 23:58:34 +0800773
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700774 return dc;
Chao Yu275b66b2016-08-29 23:58:34 +0800775}
776
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700777static void __detach_discard_cmd(struct discard_cmd_control *dcc,
778 struct discard_cmd *dc)
Chao Yu275b66b2016-08-29 23:58:34 +0800779{
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700780 if (dc->state == D_DONE)
781 atomic_dec(&dcc->issing_discard);
Chao Yu275b66b2016-08-29 23:58:34 +0800782
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700783 list_del(&dc->list);
784 rb_erase(&dc->rb_node, &dcc->root);
785 dcc->undiscard_blks -= dc->len;
786
787 kmem_cache_free(discard_cmd_slab, dc);
788
789 atomic_dec(&dcc->discard_cmd_cnt);
790}
791
792static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
793 struct discard_cmd *dc)
794{
795 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
796
Chao Yueb6c48e2017-06-05 18:29:07 +0800797 f2fs_bug_on(sbi, dc->ref);
798
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700799 if (dc->error == -EOPNOTSUPP)
800 dc->error = 0;
801
802 if (dc->error)
803 f2fs_msg(sbi->sb, KERN_INFO,
Chao Yufa681212017-05-19 23:46:43 +0800804 "Issue discard(%u, %u, %u) failed, ret: %d",
805 dc->lstart, dc->start, dc->len, dc->error);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700806 __detach_discard_cmd(dcc, dc);
807}
808
809static void f2fs_submit_discard_endio(struct bio *bio)
810{
811 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
812
813 dc->error = bio->bi_error;
814 dc->state = D_DONE;
Chao Yub1f5c762017-05-19 23:46:44 +0800815 complete_all(&dc->wait);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700816 bio_put(bio);
Chao Yu275b66b2016-08-29 23:58:34 +0800817}
818
Chao Yu280fd0f2017-06-30 17:19:02 +0800819void __check_sit_bitmap(struct f2fs_sb_info *sbi,
820 block_t start, block_t end)
821{
822#ifdef CONFIG_F2FS_CHECK_FS
823 struct seg_entry *sentry;
824 unsigned int segno;
825 block_t blk = start;
826 unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
827 unsigned long *map;
828
829 while (blk < end) {
830 segno = GET_SEGNO(sbi, blk);
831 sentry = get_seg_entry(sbi, segno);
832 offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
833
Yunlong Song55824ab2017-08-04 17:07:15 +0800834 if (end < START_BLOCK(sbi, segno + 1))
835 size = GET_BLKOFF_FROM_SEG0(sbi, end);
836 else
837 size = max_blocks;
Chao Yu280fd0f2017-06-30 17:19:02 +0800838 map = (unsigned long *)(sentry->cur_valid_map);
839 offset = __find_rev_next_bit(map, size, offset);
840 f2fs_bug_on(sbi, offset != size);
Yunlong Song55824ab2017-08-04 17:07:15 +0800841 blk = START_BLOCK(sbi, segno + 1);
Chao Yu280fd0f2017-06-30 17:19:02 +0800842 }
843#endif
844}
845
Chao Yu275b66b2016-08-29 23:58:34 +0800846/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700847static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
848 struct discard_cmd *dc)
Chao Yu275b66b2016-08-29 23:58:34 +0800849{
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700850 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
Chao Yu275b66b2016-08-29 23:58:34 +0800851 struct bio *bio = NULL;
Chao Yu275b66b2016-08-29 23:58:34 +0800852
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700853 if (dc->state != D_PREP)
854 return;
Chao Yu275b66b2016-08-29 23:58:34 +0800855
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700856 trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
857
858 dc->error = __blkdev_issue_discard(dc->bdev,
859 SECTOR_FROM_BLOCK(dc->start),
860 SECTOR_FROM_BLOCK(dc->len),
861 GFP_NOFS, 0, &bio);
862 if (!dc->error) {
863 /* should keep before submission to avoid D_DONE right away */
864 dc->state = D_SUBMIT;
865 atomic_inc(&dcc->issued_discard);
866 atomic_inc(&dcc->issing_discard);
867 if (bio) {
868 bio->bi_private = dc;
869 bio->bi_end_io = f2fs_submit_discard_endio;
870 bio->bi_opf |= REQ_SYNC;
871 submit_bio(bio);
872 list_move_tail(&dc->list, &dcc->wait_list);
Chao Yu280fd0f2017-06-30 17:19:02 +0800873 __check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
Chao Yuc0fe4882017-08-02 23:21:48 +0800874
875 f2fs_update_iostat(sbi, FS_DISCARD, 1);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700876 }
877 } else {
878 __remove_discard_cmd(sbi, dc);
879 }
880}
881
882static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
883 struct block_device *bdev, block_t lstart,
884 block_t start, block_t len,
885 struct rb_node **insert_p,
886 struct rb_node *insert_parent)
887{
888 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
889 struct rb_node **p = &dcc->root.rb_node;
890 struct rb_node *parent = NULL;
891 struct discard_cmd *dc = NULL;
892
893 if (insert_p && insert_parent) {
894 parent = insert_parent;
895 p = insert_p;
896 goto do_insert;
Chao Yu275b66b2016-08-29 23:58:34 +0800897 }
898
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700899 p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
900do_insert:
901 dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p);
902 if (!dc)
903 return NULL;
904
905 return dc;
906}
907
908static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
909 struct discard_cmd *dc)
910{
911 list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
912}
913
914static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
915 struct discard_cmd *dc, block_t blkaddr)
916{
917 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
918 struct discard_info di = dc->di;
919 bool modified = false;
920
921 if (dc->state == D_DONE || dc->len == 1) {
922 __remove_discard_cmd(sbi, dc);
923 return;
924 }
925
926 dcc->undiscard_blks -= di.len;
927
928 if (blkaddr > di.lstart) {
929 dc->len = blkaddr - dc->lstart;
930 dcc->undiscard_blks += dc->len;
931 __relocate_discard_cmd(dcc, dc);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700932 modified = true;
933 }
934
935 if (blkaddr < di.lstart + di.len - 1) {
936 if (modified) {
937 __insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
938 di.start + blkaddr + 1 - di.lstart,
939 di.lstart + di.len - 1 - blkaddr,
940 NULL, NULL);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700941 } else {
942 dc->lstart++;
943 dc->len--;
944 dc->start++;
945 dcc->undiscard_blks += dc->len;
946 __relocate_discard_cmd(dcc, dc);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700947 }
948 }
949}
950
951static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
952 struct block_device *bdev, block_t lstart,
953 block_t start, block_t len)
954{
955 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
956 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
957 struct discard_cmd *dc;
958 struct discard_info di = {0};
959 struct rb_node **insert_p = NULL, *insert_parent = NULL;
960 block_t end = lstart + len;
961
962 mutex_lock(&dcc->cmd_lock);
963
964 dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
965 NULL, lstart,
966 (struct rb_entry **)&prev_dc,
967 (struct rb_entry **)&next_dc,
968 &insert_p, &insert_parent, true);
969 if (dc)
970 prev_dc = dc;
971
972 if (!prev_dc) {
973 di.lstart = lstart;
974 di.len = next_dc ? next_dc->lstart - lstart : len;
975 di.len = min(di.len, len);
976 di.start = start;
977 }
978
979 while (1) {
980 struct rb_node *node;
981 bool merged = false;
982 struct discard_cmd *tdc = NULL;
983
984 if (prev_dc) {
985 di.lstart = prev_dc->lstart + prev_dc->len;
986 if (di.lstart < lstart)
987 di.lstart = lstart;
988 if (di.lstart >= end)
989 break;
990
991 if (!next_dc || next_dc->lstart > end)
992 di.len = end - di.lstart;
993 else
994 di.len = next_dc->lstart - di.lstart;
995 di.start = start + di.lstart - lstart;
996 }
997
998 if (!di.len)
999 goto next;
1000
1001 if (prev_dc && prev_dc->state == D_PREP &&
1002 prev_dc->bdev == bdev &&
1003 __is_discard_back_mergeable(&di, &prev_dc->di)) {
1004 prev_dc->di.len += di.len;
1005 dcc->undiscard_blks += di.len;
1006 __relocate_discard_cmd(dcc, prev_dc);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001007 di = prev_dc->di;
1008 tdc = prev_dc;
1009 merged = true;
1010 }
1011
1012 if (next_dc && next_dc->state == D_PREP &&
1013 next_dc->bdev == bdev &&
1014 __is_discard_front_mergeable(&di, &next_dc->di)) {
1015 next_dc->di.lstart = di.lstart;
1016 next_dc->di.len += di.len;
1017 next_dc->di.start = di.start;
1018 dcc->undiscard_blks += di.len;
1019 __relocate_discard_cmd(dcc, next_dc);
1020 if (tdc)
1021 __remove_discard_cmd(sbi, tdc);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001022 merged = true;
1023 }
1024
1025 if (!merged) {
1026 __insert_discard_tree(sbi, bdev, di.lstart, di.start,
1027 di.len, NULL, NULL);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001028 }
1029 next:
1030 prev_dc = next_dc;
1031 if (!prev_dc)
1032 break;
1033
1034 node = rb_next(&prev_dc->rb_node);
1035 next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1036 }
1037
1038 mutex_unlock(&dcc->cmd_lock);
1039}
1040
1041static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
1042 struct block_device *bdev, block_t blkstart, block_t blklen)
1043{
1044 block_t lblkstart = blkstart;
1045
1046 trace_f2fs_queue_discard(bdev, blkstart, blklen);
1047
1048 if (sbi->s_ndevs) {
1049 int devi = f2fs_target_device_index(sbi, blkstart);
1050
1051 blkstart -= FDEV(devi).start_blk;
1052 }
1053 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1054 return 0;
1055}
1056
Chao Yuefe24da2017-08-07 23:09:56 +08001057static int __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond)
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001058{
1059 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1060 struct list_head *pend_list;
1061 struct discard_cmd *dc, *tmp;
1062 struct blk_plug plug;
Chao Yuefe24da2017-08-07 23:09:56 +08001063 int iter = 0, issued = 0;
1064 int i;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001065
1066 mutex_lock(&dcc->cmd_lock);
Yunlei He050e8332017-05-19 14:42:12 +08001067 f2fs_bug_on(sbi,
1068 !__check_rb_tree_consistence(sbi, &dcc->root));
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001069 blk_start_plug(&plug);
Chao Yuefe24da2017-08-07 23:09:56 +08001070 for (i = MAX_PLIST_NUM - 1;
1071 i >= 0 && plist_issue(dcc->pend_list_tag[i]); i--) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001072 pend_list = &dcc->pend_list[i];
1073 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1074 f2fs_bug_on(sbi, dc->state != D_PREP);
1075
Chao Yuefe24da2017-08-07 23:09:56 +08001076 /* Hurry up to finish fstrim */
1077 if (dcc->pend_list_tag[i] & P_TRIM) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001078 __submit_discard_cmd(sbi, dc);
Chao Yuefe24da2017-08-07 23:09:56 +08001079 issued++;
Jaegeuk Kimcfc72cc2017-09-09 12:03:23 -07001080
1081 if (fatal_signal_pending(current))
1082 break;
Chao Yuefe24da2017-08-07 23:09:56 +08001083 continue;
1084 }
1085
1086 if (!issue_cond || is_idle(sbi)) {
1087 issued++;
1088 __submit_discard_cmd(sbi, dc);
1089 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001090 if (issue_cond && iter++ > DISCARD_ISSUE_RATE)
1091 goto out;
1092 }
Chao Yuefe24da2017-08-07 23:09:56 +08001093 if (list_empty(pend_list) && dcc->pend_list_tag[i] & P_TRIM)
1094 dcc->pend_list_tag[i] &= (~P_TRIM);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001095 }
1096out:
1097 blk_finish_plug(&plug);
1098 mutex_unlock(&dcc->cmd_lock);
Chao Yuefe24da2017-08-07 23:09:56 +08001099
1100 return issued;
1101}
1102
1103static void __drop_discard_cmd(struct f2fs_sb_info *sbi)
1104{
1105 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1106 struct list_head *pend_list;
1107 struct discard_cmd *dc, *tmp;
1108 int i;
1109
1110 mutex_lock(&dcc->cmd_lock);
1111 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1112 pend_list = &dcc->pend_list[i];
1113 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1114 f2fs_bug_on(sbi, dc->state != D_PREP);
1115 __remove_discard_cmd(sbi, dc);
1116 }
1117 }
1118 mutex_unlock(&dcc->cmd_lock);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001119}
1120
Chao Yua7aa97d2017-06-05 18:29:06 +08001121static void __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1122 struct discard_cmd *dc)
1123{
1124 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1125
1126 wait_for_completion_io(&dc->wait);
1127 mutex_lock(&dcc->cmd_lock);
1128 f2fs_bug_on(sbi, dc->state != D_DONE);
1129 dc->ref--;
1130 if (!dc->ref)
1131 __remove_discard_cmd(sbi, dc);
1132 mutex_unlock(&dcc->cmd_lock);
1133}
1134
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001135static void __wait_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond)
1136{
1137 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1138 struct list_head *wait_list = &(dcc->wait_list);
1139 struct discard_cmd *dc, *tmp;
Chao Yu3215e782017-05-19 23:46:45 +08001140 bool need_wait;
1141
1142next:
1143 need_wait = false;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001144
1145 mutex_lock(&dcc->cmd_lock);
1146 list_for_each_entry_safe(dc, tmp, wait_list, list) {
Chao Yu3215e782017-05-19 23:46:45 +08001147 if (!wait_cond || (dc->state == D_DONE && !dc->ref)) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001148 wait_for_completion_io(&dc->wait);
1149 __remove_discard_cmd(sbi, dc);
Chao Yu3215e782017-05-19 23:46:45 +08001150 } else {
1151 dc->ref++;
1152 need_wait = true;
1153 break;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001154 }
1155 }
1156 mutex_unlock(&dcc->cmd_lock);
Chao Yu3215e782017-05-19 23:46:45 +08001157
1158 if (need_wait) {
Chao Yua7aa97d2017-06-05 18:29:06 +08001159 __wait_one_discard_bio(sbi, dc);
Chao Yu3215e782017-05-19 23:46:45 +08001160 goto next;
1161 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001162}
1163
1164/* This should be covered by global mutex, &sit_i->sentry_lock */
1165void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1166{
1167 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1168 struct discard_cmd *dc;
1169 bool need_wait = false;
1170
1171 mutex_lock(&dcc->cmd_lock);
1172 dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr);
1173 if (dc) {
1174 if (dc->state == D_PREP) {
1175 __punch_discard_cmd(sbi, dc, blkaddr);
1176 } else {
1177 dc->ref++;
1178 need_wait = true;
1179 }
1180 }
1181 mutex_unlock(&dcc->cmd_lock);
1182
Chao Yua7aa97d2017-06-05 18:29:06 +08001183 if (need_wait)
1184 __wait_one_discard_bio(sbi, dc);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001185}
1186
Chao Yud9d85cc2017-06-29 23:17:45 +08001187void stop_discard_thread(struct f2fs_sb_info *sbi)
1188{
1189 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1190
1191 if (dcc && dcc->f2fs_issue_discard) {
1192 struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1193
1194 dcc->f2fs_issue_discard = NULL;
1195 kthread_stop(discard_thread);
1196 }
1197}
1198
Jaegeuk Kimcfc72cc2017-09-09 12:03:23 -07001199/* This comes from f2fs_put_super and f2fs_trim_fs */
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001200void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
1201{
1202 __issue_discard_cmd(sbi, false);
Chao Yuefe24da2017-08-07 23:09:56 +08001203 __drop_discard_cmd(sbi);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001204 __wait_discard_cmd(sbi, false);
1205}
1206
Chao Yuefe24da2017-08-07 23:09:56 +08001207static void mark_discard_range_all(struct f2fs_sb_info *sbi)
1208{
1209 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1210 int i;
1211
1212 mutex_lock(&dcc->cmd_lock);
1213 for (i = 0; i < MAX_PLIST_NUM; i++)
1214 dcc->pend_list_tag[i] |= P_TRIM;
1215 mutex_unlock(&dcc->cmd_lock);
1216}
1217
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001218static int issue_discard_thread(void *data)
1219{
1220 struct f2fs_sb_info *sbi = data;
1221 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1222 wait_queue_head_t *q = &dcc->discard_wait_queue;
Chao Yuefe24da2017-08-07 23:09:56 +08001223 unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
1224 int issued;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001225
Jaegeuk Kimd430af22017-05-17 10:36:58 -07001226 set_freezable();
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001227
Jaegeuk Kimd430af22017-05-17 10:36:58 -07001228 do {
Chao Yuefe24da2017-08-07 23:09:56 +08001229 wait_event_interruptible_timeout(*q,
1230 kthread_should_stop() || freezing(current) ||
1231 dcc->discard_wake,
1232 msecs_to_jiffies(wait_ms));
Jaegeuk Kimd430af22017-05-17 10:36:58 -07001233 if (try_to_freeze())
1234 continue;
1235 if (kthread_should_stop())
1236 return 0;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001237
Jaegeuk Kim766330d2017-08-15 21:27:19 -07001238 if (dcc->discard_wake) {
Chao Yuefe24da2017-08-07 23:09:56 +08001239 dcc->discard_wake = 0;
Jaegeuk Kim766330d2017-08-15 21:27:19 -07001240 if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
1241 mark_discard_range_all(sbi);
1242 }
Chao Yuefe24da2017-08-07 23:09:56 +08001243
Chao Yuce8bd4b2017-07-22 08:52:23 +08001244 sb_start_intwrite(sbi->sb);
1245
Chao Yuefe24da2017-08-07 23:09:56 +08001246 issued = __issue_discard_cmd(sbi, true);
1247 if (issued) {
1248 __wait_discard_cmd(sbi, true);
1249 wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
1250 } else {
1251 wait_ms = DEF_MAX_DISCARD_ISSUE_TIME;
1252 }
Jaegeuk Kimd430af22017-05-17 10:36:58 -07001253
Chao Yuce8bd4b2017-07-22 08:52:23 +08001254 sb_end_intwrite(sbi->sb);
1255
Jaegeuk Kimd430af22017-05-17 10:36:58 -07001256 } while (!kthread_should_stop());
1257 return 0;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001258}
1259
1260#ifdef CONFIG_BLK_DEV_ZONED
1261static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1262 struct block_device *bdev, block_t blkstart, block_t blklen)
1263{
1264 sector_t sector, nr_sects;
1265 block_t lblkstart = blkstart;
1266 int devi = 0;
1267
1268 if (sbi->s_ndevs) {
1269 devi = f2fs_target_device_index(sbi, blkstart);
1270 blkstart -= FDEV(devi).start_blk;
1271 }
1272
1273 /*
1274 * We need to know the type of the zone: for conventional zones,
1275 * use regular discard if the drive supports it. For sequential
1276 * zones, reset the zone write pointer.
1277 */
1278 switch (get_blkz_type(sbi, bdev, blkstart)) {
1279
1280 case BLK_ZONE_TYPE_CONVENTIONAL:
1281 if (!blk_queue_discard(bdev_get_queue(bdev)))
1282 return 0;
1283 return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
1284 case BLK_ZONE_TYPE_SEQWRITE_REQ:
1285 case BLK_ZONE_TYPE_SEQWRITE_PREF:
1286 sector = SECTOR_FROM_BLOCK(blkstart);
1287 nr_sects = SECTOR_FROM_BLOCK(blklen);
1288
1289 if (sector & (bdev_zone_sectors(bdev) - 1) ||
1290 nr_sects != bdev_zone_sectors(bdev)) {
1291 f2fs_msg(sbi->sb, KERN_INFO,
1292 "(%d) %s: Unaligned discard attempted (block %x + %x)",
1293 devi, sbi->s_ndevs ? FDEV(devi).path: "",
1294 blkstart, blklen);
1295 return -EIO;
1296 }
1297 trace_f2fs_issue_reset_zone(bdev, blkstart);
1298 return blkdev_reset_zones(bdev, sector,
1299 nr_sects, GFP_NOFS);
1300 default:
1301 /* Unknown zone type: broken device ? */
1302 return -EIO;
1303 }
1304}
1305#endif
1306
1307static int __issue_discard_async(struct f2fs_sb_info *sbi,
1308 struct block_device *bdev, block_t blkstart, block_t blklen)
1309{
1310#ifdef CONFIG_BLK_DEV_ZONED
1311 if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
1312 bdev_zoned_model(bdev) != BLK_ZONED_NONE)
1313 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1314#endif
1315 return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
Chao Yu275b66b2016-08-29 23:58:34 +08001316}
1317
Jaegeuk Kim1e87a782014-04-15 13:57:55 +09001318static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
Jaegeuk Kim37208872013-11-12 16:55:17 +09001319 block_t blkstart, block_t blklen)
1320{
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001321 sector_t start = blkstart, len = 0;
1322 struct block_device *bdev;
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001323 struct seg_entry *se;
1324 unsigned int offset;
1325 block_t i;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001326 int err = 0;
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001327
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001328 bdev = f2fs_target_device(sbi, blkstart, NULL);
1329
1330 for (i = blkstart; i < blkstart + blklen; i++, len++) {
1331 if (i != start) {
1332 struct block_device *bdev2 =
1333 f2fs_target_device(sbi, i, NULL);
1334
1335 if (bdev2 != bdev) {
1336 err = __issue_discard_async(sbi, bdev,
1337 start, len);
1338 if (err)
1339 return err;
1340 bdev = bdev2;
1341 start = i;
1342 len = 0;
1343 }
1344 }
1345
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001346 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
1347 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
1348
1349 if (!f2fs_test_and_set_bit(offset, se->discard_map))
1350 sbi->discard_blks--;
1351 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001352
1353 if (len)
1354 err = __issue_discard_async(sbi, bdev, start, len);
1355 return err;
Jaegeuk Kim1e87a782014-04-15 13:57:55 +09001356}
1357
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001358static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
1359 bool check_only)
Jaegeuk Kimadf49832014-10-28 22:27:59 -07001360{
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001361 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1362 int max_blocks = sbi->blocks_per_seg;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001363 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001364 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1365 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001366 unsigned long *discard_map = (unsigned long *)se->discard_map;
Jaegeuk Kim60a3b782015-02-10 16:44:29 -08001367 unsigned long *dmap = SIT_I(sbi)->tmp_map;
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001368 unsigned int start = 0, end = -1;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001369 bool force = (cpc->reason & CP_DISCARD);
1370 struct discard_entry *de = NULL;
1371 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001372 int i;
1373
Jaegeuk Kim3e025742016-08-02 10:56:40 -07001374 if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001375 return false;
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001376
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001377 if (!force) {
1378 if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001379 SM_I(sbi)->dcc_info->nr_discards >=
1380 SM_I(sbi)->dcc_info->max_discards)
1381 return false;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001382 }
1383
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001384 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1385 for (i = 0; i < entries; i++)
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001386 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
Jaegeuk Kimd7bc2482014-12-12 13:53:41 -08001387 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001388
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001389 while (force || SM_I(sbi)->dcc_info->nr_discards <=
1390 SM_I(sbi)->dcc_info->max_discards) {
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001391 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1392 if (start >= max_blocks)
1393 break;
1394
1395 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
Yunlei Hec7b41e12016-07-07 12:13:33 +08001396 if (force && start && end != max_blocks
1397 && (end - start) < cpc->trim_minlen)
1398 continue;
1399
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001400 if (check_only)
1401 return true;
1402
1403 if (!de) {
1404 de = f2fs_kmem_cache_alloc(discard_entry_slab,
1405 GFP_F2FS_ZERO);
1406 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1407 list_add_tail(&de->list, head);
1408 }
1409
1410 for (i = start; i < end; i++)
1411 __set_bit_le(i, (void *)de->discard_map);
1412
1413 SM_I(sbi)->dcc_info->nr_discards += end - start;
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001414 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001415 return false;
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001416}
1417
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001418void release_discard_addrs(struct f2fs_sb_info *sbi)
1419{
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001420 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001421 struct discard_entry *entry, *this;
1422
1423 /* drop caches */
1424 list_for_each_entry_safe(entry, this, head, list) {
1425 list_del(&entry->list);
1426 kmem_cache_free(discard_entry_slab, entry);
1427 }
1428}
1429
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001430/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001431 * Should call clear_prefree_segments after checkpoint is done.
1432 */
1433static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
1434{
1435 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Chao Yub65ee142014-08-04 10:10:07 +08001436 unsigned int segno;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001437
1438 mutex_lock(&dirty_i->seglist_lock);
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001439 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001440 __set_test_and_free(sbi, segno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001441 mutex_unlock(&dirty_i->seglist_lock);
1442}
1443
Jaegeuk Kim836b5a62015-04-30 22:50:06 -07001444void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001445{
Chao Yuefe24da2017-08-07 23:09:56 +08001446 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1447 struct list_head *head = &dcc->entry_list;
Chao Yu2d7b8222014-03-29 11:33:17 +08001448 struct discard_entry *entry, *this;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001449 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Changman Lee29e59c12013-11-11 09:24:37 +09001450 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
Changman Lee29e59c12013-11-11 09:24:37 +09001451 unsigned int start = 0, end = -1;
Jaegeuk Kim36abef42016-06-03 19:29:38 -07001452 unsigned int secno, start_segno;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001453 bool force = (cpc->reason & CP_DISCARD);
Chao Yu275b66b2016-08-29 23:58:34 +08001454
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001455 mutex_lock(&dirty_i->seglist_lock);
Changman Lee29e59c12013-11-11 09:24:37 +09001456
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001457 while (1) {
Changman Lee29e59c12013-11-11 09:24:37 +09001458 int i;
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001459 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
1460 if (start >= MAIN_SEGS(sbi))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001461 break;
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001462 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
1463 start + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001464
Changman Lee29e59c12013-11-11 09:24:37 +09001465 for (i = start; i < end; i++)
1466 clear_bit(i, prefree_map);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001467
Changman Lee29e59c12013-11-11 09:24:37 +09001468 dirty_i->nr_dirty[PRE] -= end - start;
1469
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001470 if (!test_opt(sbi, DISCARD))
Changman Lee29e59c12013-11-11 09:24:37 +09001471 continue;
1472
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001473 if (force && start >= cpc->trim_start &&
1474 (end - 1) <= cpc->trim_end)
1475 continue;
1476
Jaegeuk Kim36abef42016-06-03 19:29:38 -07001477 if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
1478 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
Jaegeuk Kim37208872013-11-12 16:55:17 +09001479 (end - start) << sbi->log_blocks_per_seg);
Jaegeuk Kim36abef42016-06-03 19:29:38 -07001480 continue;
1481 }
1482next:
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001483 secno = GET_SEC_FROM_SEG(sbi, start);
1484 start_segno = GET_SEG_FROM_SEC(sbi, secno);
Jaegeuk Kim36abef42016-06-03 19:29:38 -07001485 if (!IS_CURSEC(sbi, secno) &&
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001486 !get_valid_blocks(sbi, start, true))
Jaegeuk Kim36abef42016-06-03 19:29:38 -07001487 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
1488 sbi->segs_per_sec << sbi->log_blocks_per_seg);
1489
1490 start = start_segno + sbi->segs_per_sec;
1491 if (start < end)
1492 goto next;
Jaegeuk Kim8c53efc32017-02-27 11:57:11 -08001493 else
1494 end = start - 1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001495 }
1496 mutex_unlock(&dirty_i->seglist_lock);
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001497
1498 /* send small discards */
Chao Yu2d7b8222014-03-29 11:33:17 +08001499 list_for_each_entry_safe(entry, this, head, list) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001500 unsigned int cur_pos = 0, next_pos, len, total_len = 0;
1501 bool is_valid = test_bit_le(0, entry->discard_map);
1502
1503find_next:
1504 if (is_valid) {
1505 next_pos = find_next_zero_bit_le(entry->discard_map,
1506 sbi->blocks_per_seg, cur_pos);
1507 len = next_pos - cur_pos;
1508
Damien Le Moal57a91232017-05-26 17:04:40 +09001509 if (f2fs_sb_mounted_blkzoned(sbi->sb) ||
1510 (force && len < cpc->trim_minlen))
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001511 goto skip;
1512
1513 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
1514 len);
1515 cpc->trimmed += len;
1516 total_len += len;
1517 } else {
1518 next_pos = find_next_bit_le(entry->discard_map,
1519 sbi->blocks_per_seg, cur_pos);
1520 }
Jaegeuk Kim836b5a62015-04-30 22:50:06 -07001521skip:
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001522 cur_pos = next_pos;
1523 is_valid = !is_valid;
1524
1525 if (cur_pos < sbi->blocks_per_seg)
1526 goto find_next;
1527
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001528 list_del(&entry->list);
Chao Yuefe24da2017-08-07 23:09:56 +08001529 dcc->nr_discards -= total_len;
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001530 kmem_cache_free(discard_entry_slab, entry);
1531 }
Chao Yu275b66b2016-08-29 23:58:34 +08001532
Jaegeuk Kim200ad892017-08-22 21:15:43 -07001533 wake_up_discard_thread(sbi, false);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001534}
1535
1536static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
1537{
1538 dev_t dev = sbi->sb->s_bdev->bd_dev;
1539 struct discard_cmd_control *dcc;
1540 int err = 0, i;
1541
1542 if (SM_I(sbi)->dcc_info) {
1543 dcc = SM_I(sbi)->dcc_info;
1544 goto init_thread;
1545 }
1546
1547 dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
1548 if (!dcc)
1549 return -ENOMEM;
1550
Chao Yuefe24da2017-08-07 23:09:56 +08001551 dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001552 INIT_LIST_HEAD(&dcc->entry_list);
Chao Yuefe24da2017-08-07 23:09:56 +08001553 for (i = 0; i < MAX_PLIST_NUM; i++) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001554 INIT_LIST_HEAD(&dcc->pend_list[i]);
Chao Yuefe24da2017-08-07 23:09:56 +08001555 if (i >= dcc->discard_granularity - 1)
1556 dcc->pend_list_tag[i] |= P_ACTIVE;
1557 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001558 INIT_LIST_HEAD(&dcc->wait_list);
1559 mutex_init(&dcc->cmd_lock);
1560 atomic_set(&dcc->issued_discard, 0);
1561 atomic_set(&dcc->issing_discard, 0);
1562 atomic_set(&dcc->discard_cmd_cnt, 0);
1563 dcc->nr_discards = 0;
1564 dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
1565 dcc->undiscard_blks = 0;
1566 dcc->root = RB_ROOT;
1567
1568 init_waitqueue_head(&dcc->discard_wait_queue);
1569 SM_I(sbi)->dcc_info = dcc;
1570init_thread:
1571 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
1572 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
1573 if (IS_ERR(dcc->f2fs_issue_discard)) {
1574 err = PTR_ERR(dcc->f2fs_issue_discard);
1575 kfree(dcc);
1576 SM_I(sbi)->dcc_info = NULL;
1577 return err;
1578 }
1579
1580 return err;
1581}
1582
1583static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
1584{
1585 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1586
1587 if (!dcc)
1588 return;
1589
Chao Yud9d85cc2017-06-29 23:17:45 +08001590 stop_discard_thread(sbi);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001591
1592 kfree(dcc);
1593 SM_I(sbi)->dcc_info = NULL;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001594}
1595
Chao Yu184a5cd2014-09-04 18:13:01 +08001596static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001597{
1598 struct sit_info *sit_i = SIT_I(sbi);
Chao Yu184a5cd2014-09-04 18:13:01 +08001599
1600 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001601 sit_i->dirty_sentries++;
Chao Yu184a5cd2014-09-04 18:13:01 +08001602 return false;
1603 }
1604
1605 return true;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001606}
1607
1608static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
1609 unsigned int segno, int modified)
1610{
1611 struct seg_entry *se = get_seg_entry(sbi, segno);
1612 se->type = type;
1613 if (modified)
1614 __mark_sit_entry_dirty(sbi, segno);
1615}
1616
1617static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
1618{
1619 struct seg_entry *se;
1620 unsigned int segno, offset;
1621 long int new_vblocks;
Yunlong Song5ccdd092017-08-02 21:20:13 +08001622 bool exist;
1623#ifdef CONFIG_F2FS_CHECK_FS
1624 bool mir_exist;
1625#endif
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001626
1627 segno = GET_SEGNO(sbi, blkaddr);
1628
1629 se = get_seg_entry(sbi, segno);
1630 new_vblocks = se->valid_blocks + del;
Jaegeuk Kim491c0852014-02-04 13:01:10 +09001631 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001632
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07001633 f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001634 (new_vblocks > sbi->blocks_per_seg)));
1635
1636 se->valid_blocks = new_vblocks;
1637 se->mtime = get_mtime(sbi);
1638 SIT_I(sbi)->max_mtime = se->mtime;
1639
1640 /* Update valid block bitmap */
1641 if (del > 0) {
Yunlong Song5ccdd092017-08-02 21:20:13 +08001642 exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001643#ifdef CONFIG_F2FS_CHECK_FS
Yunlong Song5ccdd092017-08-02 21:20:13 +08001644 mir_exist = f2fs_test_and_set_bit(offset,
1645 se->cur_valid_map_mir);
1646 if (unlikely(exist != mir_exist)) {
1647 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
1648 "when setting bitmap, blk:%u, old bit:%d",
1649 blkaddr, exist);
Jaegeuk Kim05796762014-09-02 16:05:00 -07001650 f2fs_bug_on(sbi, 1);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001651 }
Yunlong Song5ccdd092017-08-02 21:20:13 +08001652#endif
1653 if (unlikely(exist)) {
1654 f2fs_msg(sbi->sb, KERN_ERR,
1655 "Bitmap was wrongly set, blk:%u", blkaddr);
1656 f2fs_bug_on(sbi, 1);
Yunlong Song5fd827b2017-08-02 22:16:54 +08001657 se->valid_blocks--;
1658 del = 0;
Yunlong Song5ccdd092017-08-02 21:20:13 +08001659 }
1660
Jaegeuk Kim3e025742016-08-02 10:56:40 -07001661 if (f2fs_discard_en(sbi) &&
1662 !f2fs_test_and_set_bit(offset, se->discard_map))
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001663 sbi->discard_blks--;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001664
1665 /* don't overwrite by SSR to keep node chain */
1666 if (se->type == CURSEG_WARM_NODE) {
1667 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
1668 se->ckpt_valid_blocks++;
1669 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001670 } else {
Yunlong Song5ccdd092017-08-02 21:20:13 +08001671 exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001672#ifdef CONFIG_F2FS_CHECK_FS
Yunlong Song5ccdd092017-08-02 21:20:13 +08001673 mir_exist = f2fs_test_and_clear_bit(offset,
1674 se->cur_valid_map_mir);
1675 if (unlikely(exist != mir_exist)) {
1676 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
1677 "when clearing bitmap, blk:%u, old bit:%d",
1678 blkaddr, exist);
Jaegeuk Kim05796762014-09-02 16:05:00 -07001679 f2fs_bug_on(sbi, 1);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001680 }
Yunlong Song5ccdd092017-08-02 21:20:13 +08001681#endif
1682 if (unlikely(!exist)) {
1683 f2fs_msg(sbi->sb, KERN_ERR,
1684 "Bitmap was wrongly cleared, blk:%u", blkaddr);
1685 f2fs_bug_on(sbi, 1);
Yunlong Song5fd827b2017-08-02 22:16:54 +08001686 se->valid_blocks++;
1687 del = 0;
Yunlong Song5ccdd092017-08-02 21:20:13 +08001688 }
1689
Jaegeuk Kim3e025742016-08-02 10:56:40 -07001690 if (f2fs_discard_en(sbi) &&
1691 f2fs_test_and_clear_bit(offset, se->discard_map))
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001692 sbi->discard_blks++;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001693 }
1694 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
1695 se->ckpt_valid_blocks += del;
1696
1697 __mark_sit_entry_dirty(sbi, segno);
1698
1699 /* update total number of valid blocks to be written in ckpt area */
1700 SIT_I(sbi)->written_valid_blocks += del;
1701
1702 if (sbi->segs_per_sec > 1)
1703 get_sec_entry(sbi, segno)->valid_blocks += del;
1704}
1705
Jaegeuk Kim5e443812014-01-28 12:22:14 +09001706void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001707{
Jaegeuk Kim5e443812014-01-28 12:22:14 +09001708 update_sit_entry(sbi, new, 1);
1709 if (GET_SEGNO(sbi, old) != NULL_SEGNO)
1710 update_sit_entry(sbi, old, -1);
1711
1712 locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
1713 locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001714}
1715
1716void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
1717{
1718 unsigned int segno = GET_SEGNO(sbi, addr);
1719 struct sit_info *sit_i = SIT_I(sbi);
1720
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07001721 f2fs_bug_on(sbi, addr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001722 if (addr == NEW_ADDR)
1723 return;
1724
1725 /* add it into sit main buffer */
1726 mutex_lock(&sit_i->sentry_lock);
1727
1728 update_sit_entry(sbi, addr, -1);
1729
1730 /* add it into dirty seglist */
1731 locate_dirty_segment(sbi, segno);
1732
1733 mutex_unlock(&sit_i->sentry_lock);
1734}
1735
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001736bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
1737{
1738 struct sit_info *sit_i = SIT_I(sbi);
1739 unsigned int segno, offset;
1740 struct seg_entry *se;
1741 bool is_cp = false;
1742
1743 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
1744 return true;
1745
1746 mutex_lock(&sit_i->sentry_lock);
1747
1748 segno = GET_SEGNO(sbi, blkaddr);
1749 se = get_seg_entry(sbi, segno);
1750 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1751
1752 if (f2fs_test_bit(offset, se->ckpt_valid_map))
1753 is_cp = true;
1754
1755 mutex_unlock(&sit_i->sentry_lock);
1756
1757 return is_cp;
1758}
1759
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001760/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001761 * This function should be resided under the curseg_mutex lock
1762 */
1763static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
Haicheng Lie79efe32013-06-13 16:59:27 +08001764 struct f2fs_summary *sum)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001765{
1766 struct curseg_info *curseg = CURSEG_I(sbi, type);
1767 void *addr = curseg->sum_blk;
Haicheng Lie79efe32013-06-13 16:59:27 +08001768 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001769 memcpy(addr, sum, sizeof(struct f2fs_summary));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001770}
1771
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001772/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001773 * Calculate the number of current summary pages for writing
1774 */
Chao Yu3fa06d72014-12-09 14:21:46 +08001775int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001776{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001777 int valid_sum_count = 0;
Fan Li9a479382013-10-29 16:21:47 +08001778 int i, sum_in_page;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001779
1780 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1781 if (sbi->ckpt->alloc_type[i] == SSR)
1782 valid_sum_count += sbi->blocks_per_seg;
Chao Yu3fa06d72014-12-09 14:21:46 +08001783 else {
1784 if (for_ra)
1785 valid_sum_count += le16_to_cpu(
1786 F2FS_CKPT(sbi)->cur_data_blkoff[i]);
1787 else
1788 valid_sum_count += curseg_blkoff(sbi, i);
1789 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001790 }
1791
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001792 sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
Fan Li9a479382013-10-29 16:21:47 +08001793 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
1794 if (valid_sum_count <= sum_in_page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001795 return 1;
Fan Li9a479382013-10-29 16:21:47 +08001796 else if ((valid_sum_count - sum_in_page) <=
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001797 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001798 return 2;
1799 return 3;
1800}
1801
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001802/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001803 * Caller should put this summary page
1804 */
1805struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
1806{
1807 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
1808}
1809
Chao Yu381722d2015-05-19 17:40:04 +08001810void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
1811{
1812 struct page *page = grab_meta_page(sbi, blk_addr);
1813 void *dst = page_address(page);
1814
1815 if (src)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001816 memcpy(dst, src, PAGE_SIZE);
Chao Yu381722d2015-05-19 17:40:04 +08001817 else
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001818 memset(dst, 0, PAGE_SIZE);
Chao Yu381722d2015-05-19 17:40:04 +08001819 set_page_dirty(page);
1820 f2fs_put_page(page, 1);
1821}
1822
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001823static void write_sum_page(struct f2fs_sb_info *sbi,
1824 struct f2fs_summary_block *sum_blk, block_t blk_addr)
1825{
Chao Yu381722d2015-05-19 17:40:04 +08001826 update_meta_page(sbi, (void *)sum_blk, blk_addr);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001827}
1828
Chao Yub7ad7512016-02-19 18:08:46 +08001829static void write_current_sum_page(struct f2fs_sb_info *sbi,
1830 int type, block_t blk_addr)
1831{
1832 struct curseg_info *curseg = CURSEG_I(sbi, type);
1833 struct page *page = grab_meta_page(sbi, blk_addr);
1834 struct f2fs_summary_block *src = curseg->sum_blk;
1835 struct f2fs_summary_block *dst;
1836
1837 dst = (struct f2fs_summary_block *)page_address(page);
1838
1839 mutex_lock(&curseg->curseg_mutex);
1840
1841 down_read(&curseg->journal_rwsem);
1842 memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
1843 up_read(&curseg->journal_rwsem);
1844
1845 memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
1846 memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
1847
1848 mutex_unlock(&curseg->curseg_mutex);
1849
1850 set_page_dirty(page);
1851 f2fs_put_page(page, 1);
1852}
1853
Jaegeuk Kim60374682013-03-31 13:58:51 +09001854static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
1855{
1856 struct curseg_info *curseg = CURSEG_I(sbi, type);
Haicheng Li81fb5e82013-05-14 18:20:28 +08001857 unsigned int segno = curseg->segno + 1;
Jaegeuk Kim60374682013-03-31 13:58:51 +09001858 struct free_segmap_info *free_i = FREE_I(sbi);
1859
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001860 if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
Haicheng Li81fb5e82013-05-14 18:20:28 +08001861 return !test_bit(segno, free_i->free_segmap);
Jaegeuk Kim60374682013-03-31 13:58:51 +09001862 return 0;
1863}
1864
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001865/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001866 * Find a new segment from the free segments bitmap to right order
1867 * This function should be returned with success, otherwise BUG
1868 */
1869static void get_new_segment(struct f2fs_sb_info *sbi,
1870 unsigned int *newseg, bool new_sec, int dir)
1871{
1872 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001873 unsigned int segno, secno, zoneno;
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001874 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001875 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
1876 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001877 unsigned int left_start = hint;
1878 bool init = true;
1879 int go_left = 0;
1880 int i;
1881
Chao Yu1a118cc2015-02-11 18:20:38 +08001882 spin_lock(&free_i->segmap_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001883
1884 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
1885 segno = find_next_zero_bit(free_i->free_segmap,
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001886 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
1887 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001888 goto got_it;
1889 }
1890find_other_zone:
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001891 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
1892 if (secno >= MAIN_SECS(sbi)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001893 if (dir == ALLOC_RIGHT) {
1894 secno = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001895 MAIN_SECS(sbi), 0);
1896 f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001897 } else {
1898 go_left = 1;
1899 left_start = hint - 1;
1900 }
1901 }
1902 if (go_left == 0)
1903 goto skip_left;
1904
1905 while (test_bit(left_start, free_i->free_secmap)) {
1906 if (left_start > 0) {
1907 left_start--;
1908 continue;
1909 }
1910 left_start = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001911 MAIN_SECS(sbi), 0);
1912 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001913 break;
1914 }
1915 secno = left_start;
1916skip_left:
1917 hint = secno;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001918 segno = GET_SEG_FROM_SEC(sbi, secno);
1919 zoneno = GET_ZONE_FROM_SEC(sbi, secno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001920
1921 /* give up on finding another zone */
1922 if (!init)
1923 goto got_it;
1924 if (sbi->secs_per_zone == 1)
1925 goto got_it;
1926 if (zoneno == old_zoneno)
1927 goto got_it;
1928 if (dir == ALLOC_LEFT) {
1929 if (!go_left && zoneno + 1 >= total_zones)
1930 goto got_it;
1931 if (go_left && zoneno == 0)
1932 goto got_it;
1933 }
1934 for (i = 0; i < NR_CURSEG_TYPE; i++)
1935 if (CURSEG_I(sbi, i)->zone == zoneno)
1936 break;
1937
1938 if (i < NR_CURSEG_TYPE) {
1939 /* zone is in user, try another */
1940 if (go_left)
1941 hint = zoneno * sbi->secs_per_zone - 1;
1942 else if (zoneno + 1 >= total_zones)
1943 hint = 0;
1944 else
1945 hint = (zoneno + 1) * sbi->secs_per_zone;
1946 init = false;
1947 goto find_other_zone;
1948 }
1949got_it:
1950 /* set it as dirty segment in free segmap */
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07001951 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001952 __set_inuse(sbi, segno);
1953 *newseg = segno;
Chao Yu1a118cc2015-02-11 18:20:38 +08001954 spin_unlock(&free_i->segmap_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001955}
1956
1957static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
1958{
1959 struct curseg_info *curseg = CURSEG_I(sbi, type);
1960 struct summary_footer *sum_footer;
1961
1962 curseg->segno = curseg->next_segno;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001963 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001964 curseg->next_blkoff = 0;
1965 curseg->next_segno = NULL_SEGNO;
1966
1967 sum_footer = &(curseg->sum_blk->footer);
1968 memset(sum_footer, 0, sizeof(struct summary_footer));
1969 if (IS_DATASEG(type))
1970 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
1971 if (IS_NODESEG(type))
1972 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
1973 __set_sit_entry_type(sbi, type, curseg->segno, modified);
1974}
1975
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001976static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
1977{
1978 /* if segs_per_sec is large than 1, we need to keep original policy. */
1979 if (sbi->segs_per_sec != 1)
1980 return CURSEG_I(sbi, type)->segno;
1981
1982 if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
1983 return 0;
1984
1985 if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
1986 return SIT_I(sbi)->last_victim[ALLOC_NEXT];
1987 return CURSEG_I(sbi, type)->segno;
1988}
1989
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001990/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001991 * Allocate a current working segment.
1992 * This function always allocates a free segment in LFS manner.
1993 */
1994static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
1995{
1996 struct curseg_info *curseg = CURSEG_I(sbi, type);
1997 unsigned int segno = curseg->segno;
1998 int dir = ALLOC_LEFT;
1999
2000 write_sum_page(sbi, curseg->sum_blk,
Haicheng Li81fb5e82013-05-14 18:20:28 +08002001 GET_SUM_BLOCK(sbi, segno));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002002 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
2003 dir = ALLOC_RIGHT;
2004
2005 if (test_opt(sbi, NOHEAP))
2006 dir = ALLOC_RIGHT;
2007
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002008 segno = __get_next_segno(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002009 get_new_segment(sbi, &segno, new_sec, dir);
2010 curseg->next_segno = segno;
2011 reset_curseg(sbi, type, 1);
2012 curseg->alloc_type = LFS;
2013}
2014
2015static void __next_free_blkoff(struct f2fs_sb_info *sbi,
2016 struct curseg_info *seg, block_t start)
2017{
2018 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
Changman Leee81c93c2013-11-15 13:21:16 +09002019 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
Jaegeuk Kim60a3b782015-02-10 16:44:29 -08002020 unsigned long *target_map = SIT_I(sbi)->tmp_map;
Changman Leee81c93c2013-11-15 13:21:16 +09002021 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2022 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2023 int i, pos;
2024
2025 for (i = 0; i < entries; i++)
2026 target_map[i] = ckpt_map[i] | cur_map[i];
2027
2028 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2029
2030 seg->next_blkoff = pos;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002031}
2032
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002033/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002034 * If a segment is written by LFS manner, next block offset is just obtained
2035 * by increasing the current block offset. However, if a segment is written by
2036 * SSR manner, next block offset obtained by calling __next_free_blkoff
2037 */
2038static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2039 struct curseg_info *seg)
2040{
2041 if (seg->alloc_type == SSR)
2042 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
2043 else
2044 seg->next_blkoff++;
2045}
2046
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002047/*
arter97e1c42042014-08-06 23:22:50 +09002048 * This function always allocates a used segment(from dirty seglist) by SSR
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002049 * manner, so it should recover the existing segment information of valid blocks
2050 */
Chao Yua69c9742017-08-30 18:04:48 +08002051static void change_curseg(struct f2fs_sb_info *sbi, int type)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002052{
2053 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2054 struct curseg_info *curseg = CURSEG_I(sbi, type);
2055 unsigned int new_segno = curseg->next_segno;
2056 struct f2fs_summary_block *sum_node;
2057 struct page *sum_page;
2058
2059 write_sum_page(sbi, curseg->sum_blk,
2060 GET_SUM_BLOCK(sbi, curseg->segno));
2061 __set_test_and_inuse(sbi, new_segno);
2062
2063 mutex_lock(&dirty_i->seglist_lock);
2064 __remove_dirty_segment(sbi, new_segno, PRE);
2065 __remove_dirty_segment(sbi, new_segno, DIRTY);
2066 mutex_unlock(&dirty_i->seglist_lock);
2067
2068 reset_curseg(sbi, type, 1);
2069 curseg->alloc_type = SSR;
2070 __next_free_blkoff(sbi, curseg, 0);
2071
Chao Yua69c9742017-08-30 18:04:48 +08002072 sum_page = get_sum_page(sbi, new_segno);
2073 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2074 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2075 f2fs_put_page(sum_page, 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002076}
2077
Jaegeuk Kim43727522013-02-04 15:11:17 +09002078static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
2079{
2080 struct curseg_info *curseg = CURSEG_I(sbi, type);
2081 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002082 unsigned segno = NULL_SEGNO;
2083 int i, cnt;
2084 bool reversed = false;
Jaegeuk Kim43727522013-02-04 15:11:17 +09002085
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002086 /* need_SSR() already forces to do this */
2087 if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
2088 curseg->next_segno = segno;
2089 return 1;
2090 }
Jaegeuk Kim43727522013-02-04 15:11:17 +09002091
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002092 /* For node segments, let's do SSR more intensively */
2093 if (IS_NODESEG(type)) {
2094 if (type >= CURSEG_WARM_NODE) {
2095 reversed = true;
2096 i = CURSEG_COLD_NODE;
2097 } else {
2098 i = CURSEG_HOT_NODE;
2099 }
2100 cnt = NR_CURSEG_NODE_TYPE;
2101 } else {
2102 if (type >= CURSEG_WARM_DATA) {
2103 reversed = true;
2104 i = CURSEG_COLD_DATA;
2105 } else {
2106 i = CURSEG_HOT_DATA;
2107 }
2108 cnt = NR_CURSEG_DATA_TYPE;
2109 }
2110
2111 for (; cnt-- > 0; reversed ? i-- : i++) {
2112 if (i == type)
2113 continue;
2114 if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) {
2115 curseg->next_segno = segno;
Jaegeuk Kim43727522013-02-04 15:11:17 +09002116 return 1;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002117 }
2118 }
Jaegeuk Kim43727522013-02-04 15:11:17 +09002119 return 0;
2120}
2121
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002122/*
2123 * flush out current segment and replace it with new segment
2124 * This function should be returned with success, otherwise BUG
2125 */
2126static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
2127 int type, bool force)
2128{
2129 struct curseg_info *curseg = CURSEG_I(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002130
Gu Zheng7b405272013-08-19 09:41:15 +08002131 if (force)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002132 new_curseg(sbi, type, true);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002133 else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
2134 type == CURSEG_WARM_NODE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002135 new_curseg(sbi, type, false);
Jaegeuk Kim60374682013-03-31 13:58:51 +09002136 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
2137 new_curseg(sbi, type, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002138 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
Chao Yua69c9742017-08-30 18:04:48 +08002139 change_curseg(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002140 else
2141 new_curseg(sbi, type, false);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +09002142
2143 stat_inc_seg_type(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002144}
2145
2146void allocate_new_segments(struct f2fs_sb_info *sbi)
2147{
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002148 struct curseg_info *curseg;
2149 unsigned int old_segno;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002150 int i;
2151
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002152 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2153 curseg = CURSEG_I(sbi, i);
2154 old_segno = curseg->segno;
2155 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
2156 locate_dirty_segment(sbi, old_segno);
2157 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002158}
2159
2160static const struct segment_allocation default_salloc_ops = {
2161 .allocate_segment = allocate_segment_by_default,
2162};
2163
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002164bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2165{
2166 __u64 trim_start = cpc->trim_start;
2167 bool has_candidate = false;
2168
2169 mutex_lock(&SIT_I(sbi)->sentry_lock);
2170 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
2171 if (add_discard_addrs(sbi, cpc, true)) {
2172 has_candidate = true;
2173 break;
2174 }
2175 }
2176 mutex_unlock(&SIT_I(sbi)->sentry_lock);
2177
2178 cpc->trim_start = trim_start;
2179 return has_candidate;
2180}
2181
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002182int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
2183{
Jaegeuk Kimf7ef9b82015-02-09 12:02:44 -08002184 __u64 start = F2FS_BYTES_TO_BLK(range->start);
2185 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002186 unsigned int start_segno, end_segno;
2187 struct cp_control cpc;
Chao Yuc34f42e2015-12-23 17:50:30 +08002188 int err = 0;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002189
Jaegeuk Kim836b5a62015-04-30 22:50:06 -07002190 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002191 return -EINVAL;
2192
Jan Kara9bd27ae2014-10-21 14:07:33 +02002193 cpc.trimmed = 0;
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07002194 if (end <= MAIN_BLKADDR(sbi))
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002195 goto out;
2196
Yunlei Heed214a12016-09-01 10:14:39 +08002197 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2198 f2fs_msg(sbi->sb, KERN_WARNING,
2199 "Found FS corruption, run fsck to fix.");
2200 goto out;
2201 }
2202
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002203 /* start/end segment number in main_area */
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07002204 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
2205 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
2206 GET_SEGNO(sbi, end);
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002207 cpc.reason = CP_DISCARD;
Jaegeuk Kim836b5a62015-04-30 22:50:06 -07002208 cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002209
2210 /* do checkpoint to issue discard commands safely */
Jaegeuk Kimbba681c2015-01-26 17:41:23 -08002211 for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
2212 cpc.trim_start = start_segno;
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07002213
2214 if (sbi->discard_blks == 0)
2215 break;
2216 else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
2217 cpc.trim_end = end_segno;
2218 else
2219 cpc.trim_end = min_t(unsigned int,
2220 rounddown(start_segno +
Jaegeuk Kimbba681c2015-01-26 17:41:23 -08002221 BATCHED_TRIM_SEGMENTS(sbi),
2222 sbi->segs_per_sec) - 1, end_segno);
2223
2224 mutex_lock(&sbi->gc_mutex);
Chao Yuc34f42e2015-12-23 17:50:30 +08002225 err = write_checkpoint(sbi, &cpc);
Jaegeuk Kimbba681c2015-01-26 17:41:23 -08002226 mutex_unlock(&sbi->gc_mutex);
Chao Yue9328352016-08-21 23:21:29 +08002227 if (err)
2228 break;
Chao Yu74fa5f32016-08-21 23:21:30 +08002229
2230 schedule();
Jaegeuk Kimbba681c2015-01-26 17:41:23 -08002231 }
Chao Yuefe24da2017-08-07 23:09:56 +08002232 /* It's time to issue all the filed discards */
2233 mark_discard_range_all(sbi);
Jaegeuk Kimcfc72cc2017-09-09 12:03:23 -07002234 f2fs_wait_discard_bios(sbi);
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002235out:
Jaegeuk Kimf7ef9b82015-02-09 12:02:44 -08002236 range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
Chao Yuc34f42e2015-12-23 17:50:30 +08002237 return err;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002238}
2239
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002240static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
2241{
2242 struct curseg_info *curseg = CURSEG_I(sbi, type);
2243 if (curseg->next_blkoff < sbi->blocks_per_seg)
2244 return true;
2245 return false;
2246}
2247
Jaegeuk Kim72cc4582017-05-10 14:19:54 -07002248static int __get_segment_type_2(struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002249{
Jaegeuk Kim72cc4582017-05-10 14:19:54 -07002250 if (fio->type == DATA)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002251 return CURSEG_HOT_DATA;
2252 else
2253 return CURSEG_HOT_NODE;
2254}
2255
Jaegeuk Kim72cc4582017-05-10 14:19:54 -07002256static int __get_segment_type_4(struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002257{
Jaegeuk Kim72cc4582017-05-10 14:19:54 -07002258 if (fio->type == DATA) {
2259 struct inode *inode = fio->page->mapping->host;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002260
2261 if (S_ISDIR(inode->i_mode))
2262 return CURSEG_HOT_DATA;
2263 else
2264 return CURSEG_COLD_DATA;
2265 } else {
Jaegeuk Kim72cc4582017-05-10 14:19:54 -07002266 if (IS_DNODE(fio->page) && is_cold_node(fio->page))
Jaegeuk Kima344b9f2014-11-05 20:05:53 -08002267 return CURSEG_WARM_NODE;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002268 else
2269 return CURSEG_COLD_NODE;
2270 }
2271}
2272
Jaegeuk Kim72cc4582017-05-10 14:19:54 -07002273static int __get_segment_type_6(struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002274{
Jaegeuk Kim72cc4582017-05-10 14:19:54 -07002275 if (fio->type == DATA) {
2276 struct inode *inode = fio->page->mapping->host;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002277
Jaegeuk Kim72cc4582017-05-10 14:19:54 -07002278 if (is_cold_data(fio->page) || file_is_cold(inode))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002279 return CURSEG_COLD_DATA;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002280 if (is_inode_flag_set(inode, FI_HOT_DATA))
2281 return CURSEG_HOT_DATA;
2282 return CURSEG_WARM_DATA;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002283 } else {
Jaegeuk Kim72cc4582017-05-10 14:19:54 -07002284 if (IS_DNODE(fio->page))
2285 return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002286 CURSEG_HOT_NODE;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002287 return CURSEG_COLD_NODE;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002288 }
2289}
2290
Jaegeuk Kim72cc4582017-05-10 14:19:54 -07002291static int __get_segment_type(struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002292{
Jaegeuk Kimc4127262017-05-10 11:18:25 -07002293 int type = 0;
2294
Jaegeuk Kim72cc4582017-05-10 14:19:54 -07002295 switch (fio->sbi->active_logs) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002296 case 2:
Jaegeuk Kimc4127262017-05-10 11:18:25 -07002297 type = __get_segment_type_2(fio);
2298 break;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002299 case 4:
Jaegeuk Kimc4127262017-05-10 11:18:25 -07002300 type = __get_segment_type_4(fio);
2301 break;
2302 case 6:
2303 type = __get_segment_type_6(fio);
2304 break;
2305 default:
2306 f2fs_bug_on(fio->sbi, true);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002307 }
Jaegeuk Kim72cc4582017-05-10 14:19:54 -07002308
Jaegeuk Kimc4127262017-05-10 11:18:25 -07002309 if (IS_HOT(type))
2310 fio->temp = HOT;
2311 else if (IS_WARM(type))
2312 fio->temp = WARM;
2313 else
2314 fio->temp = COLD;
2315 return type;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002316}
2317
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09002318void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
2319 block_t old_blkaddr, block_t *new_blkaddr,
Chao Yuc52dc0f2017-05-19 23:37:01 +08002320 struct f2fs_summary *sum, int type,
2321 struct f2fs_io_info *fio, bool add_list)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002322{
2323 struct sit_info *sit_i = SIT_I(sbi);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002324 struct curseg_info *curseg = CURSEG_I(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002325
2326 mutex_lock(&curseg->curseg_mutex);
Jaegeuk Kim21cb1d92015-03-11 13:42:48 -04002327 mutex_lock(&sit_i->sentry_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002328
2329 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002330
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002331 f2fs_wait_discard_bio(sbi, *new_blkaddr);
2332
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002333 /*
2334 * __add_sum_entry should be resided under the curseg_mutex
2335 * because, this function updates a summary entry in the
2336 * current summary block.
2337 */
Haicheng Lie79efe32013-06-13 16:59:27 +08002338 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002339
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002340 __refresh_next_blkoff(sbi, curseg);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +09002341
2342 stat_inc_block_count(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002343
Jaegeuk Kim5e443812014-01-28 12:22:14 +09002344 if (!__has_curseg_space(sbi, type))
2345 sit_i->s_ops->allocate_segment(sbi, type, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002346 /*
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002347 * SIT information should be updated after segment allocation,
2348 * since we need to keep dirty segments precisely under SSR.
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002349 */
2350 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
Jaegeuk Kim5e443812014-01-28 12:22:14 +09002351
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002352 mutex_unlock(&sit_i->sentry_lock);
2353
Chao Yu43101252017-07-31 20:19:09 +08002354 if (page && IS_NODESEG(type)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002355 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
2356
Chao Yu43101252017-07-31 20:19:09 +08002357 f2fs_inode_chksum_set(sbi, page);
2358 }
2359
Chao Yuc52dc0f2017-05-19 23:37:01 +08002360 if (add_list) {
2361 struct f2fs_bio_info *io;
2362
2363 INIT_LIST_HEAD(&fio->list);
2364 fio->in_list = true;
2365 io = sbi->write_io[fio->type] + fio->temp;
2366 spin_lock(&io->io_lock);
2367 list_add_tail(&fio->list, &io->io_list);
2368 spin_unlock(&io->io_lock);
2369 }
2370
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09002371 mutex_unlock(&curseg->curseg_mutex);
2372}
2373
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002374static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09002375{
Jaegeuk Kim72cc4582017-05-10 14:19:54 -07002376 int type = __get_segment_type(fio);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002377 int err;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09002378
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002379reallocate:
Chao Yu7a9d7542016-02-22 18:36:38 +08002380 allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
Chao Yuc52dc0f2017-05-19 23:37:01 +08002381 &fio->new_blkaddr, sum, type, fio, true);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09002382
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002383 /* writeout dirty page into bdev */
Jaegeuk Kim9bc576a2017-05-10 11:28:38 -07002384 err = f2fs_submit_page_write(fio);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002385 if (err == -EAGAIN) {
2386 fio->old_blkaddr = fio->new_blkaddr;
2387 goto reallocate;
2388 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002389}
2390
Chao Yuc0fe4882017-08-02 23:21:48 +08002391void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
2392 enum iostat_type io_type)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002393{
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002394 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002395 .sbi = sbi,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002396 .type = META,
Mike Christie04d328d2016-06-05 14:31:55 -05002397 .op = REQ_OP_WRITE,
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002398 .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
Chao Yu7a9d7542016-02-22 18:36:38 +08002399 .old_blkaddr = page->index,
2400 .new_blkaddr = page->index,
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002401 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -07002402 .encrypted_page = NULL,
Chao Yuc52dc0f2017-05-19 23:37:01 +08002403 .in_list = false,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002404 };
2405
Chao Yu2b947002015-10-12 17:04:21 +08002406 if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
Mike Christie04d328d2016-06-05 14:31:55 -05002407 fio.op_flags &= ~REQ_META;
Chao Yu2b947002015-10-12 17:04:21 +08002408
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002409 set_page_writeback(page);
Jaegeuk Kim9bc576a2017-05-10 11:28:38 -07002410 f2fs_submit_page_write(&fio);
Chao Yuc0fe4882017-08-02 23:21:48 +08002411
2412 f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002413}
2414
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002415void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002416{
2417 struct f2fs_summary sum;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002418
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002419 set_summary(&sum, nid, 0, 0);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002420 do_write_page(&sum, fio);
Chao Yuc0fe4882017-08-02 23:21:48 +08002421
2422 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002423}
2424
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002425void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002426{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002427 struct f2fs_sb_info *sbi = fio->sbi;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002428 struct f2fs_summary sum;
2429 struct node_info ni;
2430
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07002431 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002432 get_node_info(sbi, dn->nid, &ni);
2433 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002434 do_write_page(&sum, fio);
Chao Yuf28b3432016-02-24 17:16:47 +08002435 f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
Chao Yuc0fe4882017-08-02 23:21:48 +08002436
2437 f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002438}
2439
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002440int rewrite_data_page(struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002441{
Chao Yuc0fe4882017-08-02 23:21:48 +08002442 int err;
2443
Chao Yu7a9d7542016-02-22 18:36:38 +08002444 fio->new_blkaddr = fio->old_blkaddr;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002445 stat_inc_inplace_blocks(fio->sbi);
Chao Yuc0fe4882017-08-02 23:21:48 +08002446
2447 err = f2fs_submit_page_bio(fio);
2448
2449 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
2450
2451 return err;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002452}
2453
Chao Yu4356e482016-02-23 17:52:43 +08002454void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Chao Yu19f106b2015-05-06 13:08:06 +08002455 block_t old_blkaddr, block_t new_blkaddr,
Chao Yu28bc1062016-02-06 14:40:34 +08002456 bool recover_curseg, bool recover_newaddr)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002457{
2458 struct sit_info *sit_i = SIT_I(sbi);
2459 struct curseg_info *curseg;
2460 unsigned int segno, old_cursegno;
2461 struct seg_entry *se;
2462 int type;
Chao Yu19f106b2015-05-06 13:08:06 +08002463 unsigned short old_blkoff;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002464
2465 segno = GET_SEGNO(sbi, new_blkaddr);
2466 se = get_seg_entry(sbi, segno);
2467 type = se->type;
2468
Chao Yu19f106b2015-05-06 13:08:06 +08002469 if (!recover_curseg) {
2470 /* for recovery flow */
2471 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
2472 if (old_blkaddr == NULL_ADDR)
2473 type = CURSEG_COLD_DATA;
2474 else
2475 type = CURSEG_WARM_DATA;
2476 }
2477 } else {
2478 if (!IS_CURSEG(sbi, segno))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002479 type = CURSEG_WARM_DATA;
2480 }
Chao Yu19f106b2015-05-06 13:08:06 +08002481
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002482 curseg = CURSEG_I(sbi, type);
2483
2484 mutex_lock(&curseg->curseg_mutex);
2485 mutex_lock(&sit_i->sentry_lock);
2486
2487 old_cursegno = curseg->segno;
Chao Yu19f106b2015-05-06 13:08:06 +08002488 old_blkoff = curseg->next_blkoff;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002489
2490 /* change the current segment */
2491 if (segno != curseg->segno) {
2492 curseg->next_segno = segno;
Chao Yua69c9742017-08-30 18:04:48 +08002493 change_curseg(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002494 }
2495
Jaegeuk Kim491c0852014-02-04 13:01:10 +09002496 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
Haicheng Lie79efe32013-06-13 16:59:27 +08002497 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002498
Chao Yu28bc1062016-02-06 14:40:34 +08002499 if (!recover_curseg || recover_newaddr)
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07002500 update_sit_entry(sbi, new_blkaddr, 1);
2501 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
2502 update_sit_entry(sbi, old_blkaddr, -1);
2503
2504 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
2505 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
2506
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002507 locate_dirty_segment(sbi, old_cursegno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002508
Chao Yu19f106b2015-05-06 13:08:06 +08002509 if (recover_curseg) {
2510 if (old_cursegno != curseg->segno) {
2511 curseg->next_segno = old_cursegno;
Chao Yua69c9742017-08-30 18:04:48 +08002512 change_curseg(sbi, type);
Chao Yu19f106b2015-05-06 13:08:06 +08002513 }
2514 curseg->next_blkoff = old_blkoff;
2515 }
2516
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002517 mutex_unlock(&sit_i->sentry_lock);
2518 mutex_unlock(&curseg->curseg_mutex);
2519}
2520
Chao Yu528e3452015-05-28 19:15:35 +08002521void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
2522 block_t old_addr, block_t new_addr,
Chao Yu28bc1062016-02-06 14:40:34 +08002523 unsigned char version, bool recover_curseg,
2524 bool recover_newaddr)
Chao Yu528e3452015-05-28 19:15:35 +08002525{
2526 struct f2fs_summary sum;
2527
2528 set_summary(&sum, dn->nid, dn->ofs_in_node, version);
2529
Chao Yu28bc1062016-02-06 14:40:34 +08002530 __f2fs_replace_block(sbi, &sum, old_addr, new_addr,
2531 recover_curseg, recover_newaddr);
Chao Yu528e3452015-05-28 19:15:35 +08002532
Chao Yuf28b3432016-02-24 17:16:47 +08002533 f2fs_update_data_blkaddr(dn, new_addr);
Chao Yu528e3452015-05-28 19:15:35 +08002534}
2535
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09002536void f2fs_wait_on_page_writeback(struct page *page,
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08002537 enum page_type type, bool ordered)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09002538{
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09002539 if (PageWriteback(page)) {
Jaegeuk Kim40813632014-09-02 15:31:18 -07002540 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
2541
Jaegeuk Kim9bc576a2017-05-10 11:28:38 -07002542 f2fs_submit_merged_write_cond(sbi, page->mapping->host,
2543 0, page->index, type);
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08002544 if (ordered)
2545 wait_on_page_writeback(page);
2546 else
2547 wait_for_stable_page(page);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09002548 }
2549}
2550
Jaegeuk Kimfb605d02017-09-05 17:04:35 -07002551void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
Chao Yu08b39fb2015-10-08 13:27:34 +08002552{
2553 struct page *cpage;
2554
Yunlei He5d4c0af2016-09-18 08:16:56 +08002555 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
Chao Yu08b39fb2015-10-08 13:27:34 +08002556 return;
2557
Chao Yu08b39fb2015-10-08 13:27:34 +08002558 cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
2559 if (cpage) {
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08002560 f2fs_wait_on_page_writeback(cpage, DATA, true);
Chao Yu08b39fb2015-10-08 13:27:34 +08002561 f2fs_put_page(cpage, 1);
2562 }
2563}
2564
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002565static int read_compacted_summaries(struct f2fs_sb_info *sbi)
2566{
2567 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2568 struct curseg_info *seg_i;
2569 unsigned char *kaddr;
2570 struct page *page;
2571 block_t start;
2572 int i, j, offset;
2573
2574 start = start_sum_block(sbi);
2575
2576 page = get_meta_page(sbi, start++);
2577 kaddr = (unsigned char *)page_address(page);
2578
2579 /* Step 1: restore nat cache */
2580 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08002581 memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002582
2583 /* Step 2: restore sit cache */
2584 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08002585 memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002586 offset = 2 * SUM_JOURNAL_SIZE;
2587
2588 /* Step 3: restore summary entries */
2589 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2590 unsigned short blk_off;
2591 unsigned int segno;
2592
2593 seg_i = CURSEG_I(sbi, i);
2594 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
2595 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
2596 seg_i->next_segno = segno;
2597 reset_curseg(sbi, i, 0);
2598 seg_i->alloc_type = ckpt->alloc_type[i];
2599 seg_i->next_blkoff = blk_off;
2600
2601 if (seg_i->alloc_type == SSR)
2602 blk_off = sbi->blocks_per_seg;
2603
2604 for (j = 0; j < blk_off; j++) {
2605 struct f2fs_summary *s;
2606 s = (struct f2fs_summary *)(kaddr + offset);
2607 seg_i->sum_blk->entries[j] = *s;
2608 offset += SUMMARY_SIZE;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002609 if (offset + SUMMARY_SIZE <= PAGE_SIZE -
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002610 SUM_FOOTER_SIZE)
2611 continue;
2612
2613 f2fs_put_page(page, 1);
2614 page = NULL;
2615
2616 page = get_meta_page(sbi, start++);
2617 kaddr = (unsigned char *)page_address(page);
2618 offset = 0;
2619 }
2620 }
2621 f2fs_put_page(page, 1);
2622 return 0;
2623}
2624
2625static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
2626{
2627 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2628 struct f2fs_summary_block *sum;
2629 struct curseg_info *curseg;
2630 struct page *new;
2631 unsigned short blk_off;
2632 unsigned int segno = 0;
2633 block_t blk_addr = 0;
2634
2635 /* get segment number and block addr */
2636 if (IS_DATASEG(type)) {
2637 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
2638 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
2639 CURSEG_HOT_DATA]);
Jaegeuk Kim119ee912015-01-29 11:45:33 -08002640 if (__exist_node_summaries(sbi))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002641 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
2642 else
2643 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
2644 } else {
2645 segno = le32_to_cpu(ckpt->cur_node_segno[type -
2646 CURSEG_HOT_NODE]);
2647 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
2648 CURSEG_HOT_NODE]);
Jaegeuk Kim119ee912015-01-29 11:45:33 -08002649 if (__exist_node_summaries(sbi))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002650 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
2651 type - CURSEG_HOT_NODE);
2652 else
2653 blk_addr = GET_SUM_BLOCK(sbi, segno);
2654 }
2655
2656 new = get_meta_page(sbi, blk_addr);
2657 sum = (struct f2fs_summary_block *)page_address(new);
2658
2659 if (IS_NODESEG(type)) {
Jaegeuk Kim119ee912015-01-29 11:45:33 -08002660 if (__exist_node_summaries(sbi)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002661 struct f2fs_summary *ns = &sum->entries[0];
2662 int i;
2663 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
2664 ns->version = 0;
2665 ns->ofs_in_node = 0;
2666 }
2667 } else {
Gu Zhengd6537882014-03-07 18:43:36 +08002668 int err;
2669
2670 err = restore_node_summary(sbi, segno, sum);
2671 if (err) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002672 f2fs_put_page(new, 1);
Gu Zhengd6537882014-03-07 18:43:36 +08002673 return err;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002674 }
2675 }
2676 }
2677
2678 /* set uncompleted segment to curseg */
2679 curseg = CURSEG_I(sbi, type);
2680 mutex_lock(&curseg->curseg_mutex);
Chao Yub7ad7512016-02-19 18:08:46 +08002681
2682 /* update journal info */
2683 down_write(&curseg->journal_rwsem);
2684 memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
2685 up_write(&curseg->journal_rwsem);
2686
2687 memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
2688 memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002689 curseg->next_segno = segno;
2690 reset_curseg(sbi, type, 0);
2691 curseg->alloc_type = ckpt->alloc_type[type];
2692 curseg->next_blkoff = blk_off;
2693 mutex_unlock(&curseg->curseg_mutex);
2694 f2fs_put_page(new, 1);
2695 return 0;
2696}
2697
2698static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
2699{
Jin Qiana8d61752017-06-01 11:18:30 -07002700 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
2701 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002702 int type = CURSEG_HOT_DATA;
Chao Yue4fc5fb2014-03-17 16:36:24 +08002703 int err;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002704
Chao Yuaaec2b12016-09-20 11:04:18 +08002705 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
Chao Yu3fa06d72014-12-09 14:21:46 +08002706 int npages = npages_for_summary_flush(sbi, true);
2707
2708 if (npages >= 2)
2709 ra_meta_pages(sbi, start_sum_block(sbi), npages,
Chao Yu26879fb2015-10-12 17:05:59 +08002710 META_CP, true);
Chao Yu3fa06d72014-12-09 14:21:46 +08002711
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002712 /* restore for compacted data summary */
2713 if (read_compacted_summaries(sbi))
2714 return -EINVAL;
2715 type = CURSEG_HOT_NODE;
2716 }
2717
Jaegeuk Kim119ee912015-01-29 11:45:33 -08002718 if (__exist_node_summaries(sbi))
Chao Yu3fa06d72014-12-09 14:21:46 +08002719 ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
Chao Yu26879fb2015-10-12 17:05:59 +08002720 NR_CURSEG_TYPE - type, META_CP, true);
Chao Yu3fa06d72014-12-09 14:21:46 +08002721
Chao Yue4fc5fb2014-03-17 16:36:24 +08002722 for (; type <= CURSEG_COLD_NODE; type++) {
2723 err = read_normal_summaries(sbi, type);
2724 if (err)
2725 return err;
2726 }
2727
Jin Qiana8d61752017-06-01 11:18:30 -07002728 /* sanity check for summary blocks */
2729 if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
2730 sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES)
2731 return -EINVAL;
2732
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002733 return 0;
2734}
2735
2736static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
2737{
2738 struct page *page;
2739 unsigned char *kaddr;
2740 struct f2fs_summary *summary;
2741 struct curseg_info *seg_i;
2742 int written_size = 0;
2743 int i, j;
2744
2745 page = grab_meta_page(sbi, blkaddr++);
2746 kaddr = (unsigned char *)page_address(page);
2747
2748 /* Step 1: write nat cache */
2749 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08002750 memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002751 written_size += SUM_JOURNAL_SIZE;
2752
2753 /* Step 2: write sit cache */
2754 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08002755 memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002756 written_size += SUM_JOURNAL_SIZE;
2757
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002758 /* Step 3: write summary entries */
2759 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2760 unsigned short blkoff;
2761 seg_i = CURSEG_I(sbi, i);
2762 if (sbi->ckpt->alloc_type[i] == SSR)
2763 blkoff = sbi->blocks_per_seg;
2764 else
2765 blkoff = curseg_blkoff(sbi, i);
2766
2767 for (j = 0; j < blkoff; j++) {
2768 if (!page) {
2769 page = grab_meta_page(sbi, blkaddr++);
2770 kaddr = (unsigned char *)page_address(page);
2771 written_size = 0;
2772 }
2773 summary = (struct f2fs_summary *)(kaddr + written_size);
2774 *summary = seg_i->sum_blk->entries[j];
2775 written_size += SUMMARY_SIZE;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002776
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002777 if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002778 SUM_FOOTER_SIZE)
2779 continue;
2780
Chao Yue8d61a72013-10-24 15:08:28 +08002781 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002782 f2fs_put_page(page, 1);
2783 page = NULL;
2784 }
2785 }
Chao Yue8d61a72013-10-24 15:08:28 +08002786 if (page) {
2787 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002788 f2fs_put_page(page, 1);
Chao Yue8d61a72013-10-24 15:08:28 +08002789 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002790}
2791
2792static void write_normal_summaries(struct f2fs_sb_info *sbi,
2793 block_t blkaddr, int type)
2794{
2795 int i, end;
2796 if (IS_DATASEG(type))
2797 end = type + NR_CURSEG_DATA_TYPE;
2798 else
2799 end = type + NR_CURSEG_NODE_TYPE;
2800
Chao Yub7ad7512016-02-19 18:08:46 +08002801 for (i = type; i < end; i++)
2802 write_current_sum_page(sbi, i, blkaddr + (i - type));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002803}
2804
2805void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
2806{
Chao Yuaaec2b12016-09-20 11:04:18 +08002807 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002808 write_compacted_summaries(sbi, start_blk);
2809 else
2810 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
2811}
2812
2813void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
2814{
Jaegeuk Kim119ee912015-01-29 11:45:33 -08002815 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002816}
2817
Chao Yudfc08a12016-02-14 18:50:40 +08002818int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002819 unsigned int val, int alloc)
2820{
2821 int i;
2822
2823 if (type == NAT_JOURNAL) {
Chao Yudfc08a12016-02-14 18:50:40 +08002824 for (i = 0; i < nats_in_cursum(journal); i++) {
2825 if (le32_to_cpu(nid_in_journal(journal, i)) == val)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002826 return i;
2827 }
Chao Yudfc08a12016-02-14 18:50:40 +08002828 if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
2829 return update_nats_in_cursum(journal, 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002830 } else if (type == SIT_JOURNAL) {
Chao Yudfc08a12016-02-14 18:50:40 +08002831 for (i = 0; i < sits_in_cursum(journal); i++)
2832 if (le32_to_cpu(segno_in_journal(journal, i)) == val)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002833 return i;
Chao Yudfc08a12016-02-14 18:50:40 +08002834 if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
2835 return update_sits_in_cursum(journal, 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002836 }
2837 return -1;
2838}
2839
2840static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
2841 unsigned int segno)
2842{
Gu Zheng2cc22182014-10-20 17:45:49 +08002843 return get_meta_page(sbi, current_sit_addr(sbi, segno));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002844}
2845
2846static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
2847 unsigned int start)
2848{
2849 struct sit_info *sit_i = SIT_I(sbi);
2850 struct page *src_page, *dst_page;
2851 pgoff_t src_off, dst_off;
2852 void *src_addr, *dst_addr;
2853
2854 src_off = current_sit_addr(sbi, start);
2855 dst_off = next_sit_addr(sbi, src_off);
2856
2857 /* get current sit block page without lock */
2858 src_page = get_meta_page(sbi, src_off);
2859 dst_page = grab_meta_page(sbi, dst_off);
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07002860 f2fs_bug_on(sbi, PageDirty(src_page));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002861
2862 src_addr = page_address(src_page);
2863 dst_addr = page_address(dst_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002864 memcpy(dst_addr, src_addr, PAGE_SIZE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002865
2866 set_page_dirty(dst_page);
2867 f2fs_put_page(src_page, 1);
2868
2869 set_to_next_sit(sit_i, start);
2870
2871 return dst_page;
2872}
2873
Chao Yu184a5cd2014-09-04 18:13:01 +08002874static struct sit_entry_set *grab_sit_entry_set(void)
2875{
2876 struct sit_entry_set *ses =
Jaegeuk Kim80c54502015-08-20 08:51:56 -07002877 f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
Chao Yu184a5cd2014-09-04 18:13:01 +08002878
2879 ses->entry_cnt = 0;
2880 INIT_LIST_HEAD(&ses->set_list);
2881 return ses;
2882}
2883
2884static void release_sit_entry_set(struct sit_entry_set *ses)
2885{
2886 list_del(&ses->set_list);
2887 kmem_cache_free(sit_entry_set_slab, ses);
2888}
2889
2890static void adjust_sit_entry_set(struct sit_entry_set *ses,
2891 struct list_head *head)
2892{
2893 struct sit_entry_set *next = ses;
2894
2895 if (list_is_last(&ses->set_list, head))
2896 return;
2897
2898 list_for_each_entry_continue(next, head, set_list)
2899 if (ses->entry_cnt <= next->entry_cnt)
2900 break;
2901
2902 list_move_tail(&ses->set_list, &next->set_list);
2903}
2904
2905static void add_sit_entry(unsigned int segno, struct list_head *head)
2906{
2907 struct sit_entry_set *ses;
2908 unsigned int start_segno = START_SEGNO(segno);
2909
2910 list_for_each_entry(ses, head, set_list) {
2911 if (ses->start_segno == start_segno) {
2912 ses->entry_cnt++;
2913 adjust_sit_entry_set(ses, head);
2914 return;
2915 }
2916 }
2917
2918 ses = grab_sit_entry_set();
2919
2920 ses->start_segno = start_segno;
2921 ses->entry_cnt++;
2922 list_add(&ses->set_list, head);
2923}
2924
2925static void add_sits_in_set(struct f2fs_sb_info *sbi)
2926{
2927 struct f2fs_sm_info *sm_info = SM_I(sbi);
2928 struct list_head *set_list = &sm_info->sit_entry_set;
2929 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
Chao Yu184a5cd2014-09-04 18:13:01 +08002930 unsigned int segno;
2931
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07002932 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
Chao Yu184a5cd2014-09-04 18:13:01 +08002933 add_sit_entry(segno, set_list);
2934}
2935
2936static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002937{
2938 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08002939 struct f2fs_journal *journal = curseg->journal;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002940 int i;
2941
Chao Yub7ad7512016-02-19 18:08:46 +08002942 down_write(&curseg->journal_rwsem);
Chao Yudfc08a12016-02-14 18:50:40 +08002943 for (i = 0; i < sits_in_cursum(journal); i++) {
Chao Yu184a5cd2014-09-04 18:13:01 +08002944 unsigned int segno;
2945 bool dirtied;
2946
Chao Yudfc08a12016-02-14 18:50:40 +08002947 segno = le32_to_cpu(segno_in_journal(journal, i));
Chao Yu184a5cd2014-09-04 18:13:01 +08002948 dirtied = __mark_sit_entry_dirty(sbi, segno);
2949
2950 if (!dirtied)
2951 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002952 }
Chao Yudfc08a12016-02-14 18:50:40 +08002953 update_sits_in_cursum(journal, -i);
Chao Yub7ad7512016-02-19 18:08:46 +08002954 up_write(&curseg->journal_rwsem);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002955}
2956
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002957/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002958 * CP calls this function, which flushes SIT entries including sit_journal,
2959 * and moves prefree segs to free segs.
2960 */
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002961void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002962{
2963 struct sit_info *sit_i = SIT_I(sbi);
2964 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
2965 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08002966 struct f2fs_journal *journal = curseg->journal;
Chao Yu184a5cd2014-09-04 18:13:01 +08002967 struct sit_entry_set *ses, *tmp;
2968 struct list_head *head = &SM_I(sbi)->sit_entry_set;
Chao Yu184a5cd2014-09-04 18:13:01 +08002969 bool to_journal = true;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07002970 struct seg_entry *se;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002971
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002972 mutex_lock(&sit_i->sentry_lock);
2973
Wanpeng Li2b11a742015-02-27 16:52:50 +08002974 if (!sit_i->dirty_sentries)
2975 goto out;
2976
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002977 /*
Chao Yu184a5cd2014-09-04 18:13:01 +08002978 * add and account sit entries of dirty bitmap in sit entry
2979 * set temporarily
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002980 */
Chao Yu184a5cd2014-09-04 18:13:01 +08002981 add_sits_in_set(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002982
Chao Yu184a5cd2014-09-04 18:13:01 +08002983 /*
2984 * if there are no enough space in journal to store dirty sit
2985 * entries, remove all entries from journal and add and account
2986 * them in sit entry set.
2987 */
Chao Yudfc08a12016-02-14 18:50:40 +08002988 if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
Chao Yu184a5cd2014-09-04 18:13:01 +08002989 remove_sits_in_journal(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002990
Chao Yu184a5cd2014-09-04 18:13:01 +08002991 /*
2992 * there are two steps to flush sit entries:
2993 * #1, flush sit entries to journal in current cold data summary block.
2994 * #2, flush sit entries to sit page.
2995 */
2996 list_for_each_entry_safe(ses, tmp, head, set_list) {
Jaegeuk Kim4a257ed2014-10-16 11:43:30 -07002997 struct page *page = NULL;
Chao Yu184a5cd2014-09-04 18:13:01 +08002998 struct f2fs_sit_block *raw_sit = NULL;
2999 unsigned int start_segno = ses->start_segno;
3000 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003001 (unsigned long)MAIN_SEGS(sbi));
Chao Yu184a5cd2014-09-04 18:13:01 +08003002 unsigned int segno = start_segno;
Jaegeuk Kimb2955552013-11-12 14:49:56 +09003003
Chao Yu184a5cd2014-09-04 18:13:01 +08003004 if (to_journal &&
Chao Yudfc08a12016-02-14 18:50:40 +08003005 !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
Chao Yu184a5cd2014-09-04 18:13:01 +08003006 to_journal = false;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003007
Chao Yub7ad7512016-02-19 18:08:46 +08003008 if (to_journal) {
3009 down_write(&curseg->journal_rwsem);
3010 } else {
Chao Yu184a5cd2014-09-04 18:13:01 +08003011 page = get_next_sit_page(sbi, start_segno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003012 raw_sit = page_address(page);
3013 }
3014
Chao Yu184a5cd2014-09-04 18:13:01 +08003015 /* flush dirty sit entries in region of current sit set */
3016 for_each_set_bit_from(segno, bitmap, end) {
3017 int offset, sit_offset;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07003018
3019 se = get_seg_entry(sbi, segno);
Chao Yu184a5cd2014-09-04 18:13:01 +08003020
3021 /* add discard candidates */
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003022 if (!(cpc->reason & CP_DISCARD)) {
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07003023 cpc->trim_start = segno;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003024 add_discard_addrs(sbi, cpc, false);
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07003025 }
Chao Yu184a5cd2014-09-04 18:13:01 +08003026
3027 if (to_journal) {
Chao Yudfc08a12016-02-14 18:50:40 +08003028 offset = lookup_journal_in_cursum(journal,
Chao Yu184a5cd2014-09-04 18:13:01 +08003029 SIT_JOURNAL, segno, 1);
3030 f2fs_bug_on(sbi, offset < 0);
Chao Yudfc08a12016-02-14 18:50:40 +08003031 segno_in_journal(journal, offset) =
Chao Yu184a5cd2014-09-04 18:13:01 +08003032 cpu_to_le32(segno);
3033 seg_info_to_raw_sit(se,
Chao Yudfc08a12016-02-14 18:50:40 +08003034 &sit_in_journal(journal, offset));
Chao Yu184a5cd2014-09-04 18:13:01 +08003035 } else {
3036 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
3037 seg_info_to_raw_sit(se,
3038 &raw_sit->entries[sit_offset]);
3039 }
3040
3041 __clear_bit(segno, bitmap);
3042 sit_i->dirty_sentries--;
3043 ses->entry_cnt--;
3044 }
3045
Chao Yub7ad7512016-02-19 18:08:46 +08003046 if (to_journal)
3047 up_write(&curseg->journal_rwsem);
3048 else
Chao Yu184a5cd2014-09-04 18:13:01 +08003049 f2fs_put_page(page, 1);
3050
3051 f2fs_bug_on(sbi, ses->entry_cnt);
3052 release_sit_entry_set(ses);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003053 }
Chao Yu184a5cd2014-09-04 18:13:01 +08003054
3055 f2fs_bug_on(sbi, !list_empty(head));
3056 f2fs_bug_on(sbi, sit_i->dirty_sentries);
Chao Yu184a5cd2014-09-04 18:13:01 +08003057out:
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003058 if (cpc->reason & CP_DISCARD) {
3059 __u64 trim_start = cpc->trim_start;
3060
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07003061 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003062 add_discard_addrs(sbi, cpc, false);
3063
3064 cpc->trim_start = trim_start;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07003065 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003066 mutex_unlock(&sit_i->sentry_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003067
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003068 set_prefree_as_free_segments(sbi);
3069}
3070
3071static int build_sit_info(struct f2fs_sb_info *sbi)
3072{
3073 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003074 struct sit_info *sit_i;
3075 unsigned int sit_segs, start;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003076 char *src_bitmap;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003077 unsigned int bitmap_size;
3078
3079 /* allocate memory for SIT information */
3080 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
3081 if (!sit_i)
3082 return -ENOMEM;
3083
3084 SM_I(sbi)->sit_info = sit_i;
3085
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003086 sit_i->sentries = kvzalloc(MAIN_SEGS(sbi) *
Jaegeuk Kim39307a82015-09-22 13:50:47 -07003087 sizeof(struct seg_entry), GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003088 if (!sit_i->sentries)
3089 return -ENOMEM;
3090
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003091 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003092 sit_i->dirty_sentries_bitmap = kvzalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003093 if (!sit_i->dirty_sentries_bitmap)
3094 return -ENOMEM;
3095
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003096 for (start = 0; start < MAIN_SEGS(sbi); start++) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003097 sit_i->sentries[start].cur_valid_map
3098 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3099 sit_i->sentries[start].ckpt_valid_map
3100 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07003101 if (!sit_i->sentries[start].cur_valid_map ||
Jaegeuk Kim3e025742016-08-02 10:56:40 -07003102 !sit_i->sentries[start].ckpt_valid_map)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003103 return -ENOMEM;
Jaegeuk Kim3e025742016-08-02 10:56:40 -07003104
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003105#ifdef CONFIG_F2FS_CHECK_FS
3106 sit_i->sentries[start].cur_valid_map_mir
3107 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3108 if (!sit_i->sentries[start].cur_valid_map_mir)
3109 return -ENOMEM;
3110#endif
3111
Jaegeuk Kim3e025742016-08-02 10:56:40 -07003112 if (f2fs_discard_en(sbi)) {
3113 sit_i->sentries[start].discard_map
3114 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3115 if (!sit_i->sentries[start].discard_map)
3116 return -ENOMEM;
3117 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003118 }
3119
Jaegeuk Kim60a3b782015-02-10 16:44:29 -08003120 sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3121 if (!sit_i->tmp_map)
3122 return -ENOMEM;
3123
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003124 if (sbi->segs_per_sec > 1) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003125 sit_i->sec_entries = kvzalloc(MAIN_SECS(sbi) *
Jaegeuk Kim39307a82015-09-22 13:50:47 -07003126 sizeof(struct sec_entry), GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003127 if (!sit_i->sec_entries)
3128 return -ENOMEM;
3129 }
3130
3131 /* get information related with SIT */
3132 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
3133
3134 /* setup SIT bitmap from ckeckpoint pack */
3135 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
3136 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
3137
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003138 sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
3139 if (!sit_i->sit_bitmap)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003140 return -ENOMEM;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003141
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003142#ifdef CONFIG_F2FS_CHECK_FS
3143 sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
3144 if (!sit_i->sit_bitmap_mir)
3145 return -ENOMEM;
3146#endif
3147
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003148 /* init SIT information */
3149 sit_i->s_ops = &default_salloc_ops;
3150
3151 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
3152 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003153 sit_i->written_valid_blocks = 0;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003154 sit_i->bitmap_size = bitmap_size;
3155 sit_i->dirty_sentries = 0;
3156 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
3157 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003158 sit_i->mounted_time = ktime_get_real_seconds();
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003159 mutex_init(&sit_i->sentry_lock);
3160 return 0;
3161}
3162
3163static int build_free_segmap(struct f2fs_sb_info *sbi)
3164{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003165 struct free_segmap_info *free_i;
3166 unsigned int bitmap_size, sec_bitmap_size;
3167
3168 /* allocate memory for free segmap information */
3169 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
3170 if (!free_i)
3171 return -ENOMEM;
3172
3173 SM_I(sbi)->free_info = free_i;
3174
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003175 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003176 free_i->free_segmap = kvmalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003177 if (!free_i->free_segmap)
3178 return -ENOMEM;
3179
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003180 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003181 free_i->free_secmap = kvmalloc(sec_bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003182 if (!free_i->free_secmap)
3183 return -ENOMEM;
3184
3185 /* set all segments as dirty temporarily */
3186 memset(free_i->free_segmap, 0xff, bitmap_size);
3187 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
3188
3189 /* init free segmap information */
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003190 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003191 free_i->free_segments = 0;
3192 free_i->free_sections = 0;
Chao Yu1a118cc2015-02-11 18:20:38 +08003193 spin_lock_init(&free_i->segmap_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003194 return 0;
3195}
3196
3197static int build_curseg(struct f2fs_sb_info *sbi)
3198{
Namjae Jeon1042d602012-12-01 10:56:13 +09003199 struct curseg_info *array;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003200 int i;
3201
Fabian Frederickb434bab2014-06-23 18:39:15 +02003202 array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003203 if (!array)
3204 return -ENOMEM;
3205
3206 SM_I(sbi)->curseg_array = array;
3207
3208 for (i = 0; i < NR_CURSEG_TYPE; i++) {
3209 mutex_init(&array[i].curseg_mutex);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003210 array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003211 if (!array[i].sum_blk)
3212 return -ENOMEM;
Chao Yub7ad7512016-02-19 18:08:46 +08003213 init_rwsem(&array[i].journal_rwsem);
3214 array[i].journal = kzalloc(sizeof(struct f2fs_journal),
3215 GFP_KERNEL);
3216 if (!array[i].journal)
3217 return -ENOMEM;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003218 array[i].segno = NULL_SEGNO;
3219 array[i].next_blkoff = 0;
3220 }
3221 return restore_curseg_summaries(sbi);
3222}
3223
3224static void build_sit_entries(struct f2fs_sb_info *sbi)
3225{
3226 struct sit_info *sit_i = SIT_I(sbi);
3227 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08003228 struct f2fs_journal *journal = curseg->journal;
Yunlei He9c094042016-09-24 12:29:18 +08003229 struct seg_entry *se;
3230 struct f2fs_sit_entry sit;
Chao Yu74de5932013-11-22 09:09:59 +08003231 int sit_blk_cnt = SIT_BLK_CNT(sbi);
3232 unsigned int i, start, end;
3233 unsigned int readed, start_blk = 0;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003234
Chao Yu74de5932013-11-22 09:09:59 +08003235 do {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003236 readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
3237 META_SIT, true);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003238
Chao Yu74de5932013-11-22 09:09:59 +08003239 start = start_blk * sit_i->sents_per_block;
3240 end = (start_blk + readed) * sit_i->sents_per_block;
3241
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003242 for (; start < end && start < MAIN_SEGS(sbi); start++) {
Chao Yu74de5932013-11-22 09:09:59 +08003243 struct f2fs_sit_block *sit_blk;
Chao Yu74de5932013-11-22 09:09:59 +08003244 struct page *page;
3245
Yunlei He9c094042016-09-24 12:29:18 +08003246 se = &sit_i->sentries[start];
Chao Yu74de5932013-11-22 09:09:59 +08003247 page = get_current_sit_page(sbi, start);
3248 sit_blk = (struct f2fs_sit_block *)page_address(page);
3249 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
3250 f2fs_put_page(page, 1);
Chao Yud600af232016-08-19 23:13:47 +08003251
Chao Yu74de5932013-11-22 09:09:59 +08003252 check_block_count(sbi, start, &sit);
3253 seg_info_from_raw_sit(se, &sit);
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07003254
3255 /* build discard map only one time */
Jaegeuk Kim3e025742016-08-02 10:56:40 -07003256 if (f2fs_discard_en(sbi)) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003257 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
3258 memset(se->discard_map, 0xff,
3259 SIT_VBLOCK_MAP_SIZE);
3260 } else {
3261 memcpy(se->discard_map,
3262 se->cur_valid_map,
3263 SIT_VBLOCK_MAP_SIZE);
3264 sbi->discard_blks +=
3265 sbi->blocks_per_seg -
3266 se->valid_blocks;
3267 }
Jaegeuk Kim3e025742016-08-02 10:56:40 -07003268 }
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07003269
Chao Yud600af232016-08-19 23:13:47 +08003270 if (sbi->segs_per_sec > 1)
3271 get_sec_entry(sbi, start)->valid_blocks +=
3272 se->valid_blocks;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003273 }
Chao Yu74de5932013-11-22 09:09:59 +08003274 start_blk += readed;
3275 } while (start_blk < sit_blk_cnt);
Chao Yud600af232016-08-19 23:13:47 +08003276
3277 down_read(&curseg->journal_rwsem);
3278 for (i = 0; i < sits_in_cursum(journal); i++) {
Chao Yud600af232016-08-19 23:13:47 +08003279 unsigned int old_valid_blocks;
3280
3281 start = le32_to_cpu(segno_in_journal(journal, i));
3282 se = &sit_i->sentries[start];
3283 sit = sit_in_journal(journal, i);
3284
3285 old_valid_blocks = se->valid_blocks;
3286
3287 check_block_count(sbi, start, &sit);
3288 seg_info_from_raw_sit(se, &sit);
3289
3290 if (f2fs_discard_en(sbi)) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003291 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
3292 memset(se->discard_map, 0xff,
3293 SIT_VBLOCK_MAP_SIZE);
3294 } else {
3295 memcpy(se->discard_map, se->cur_valid_map,
3296 SIT_VBLOCK_MAP_SIZE);
3297 sbi->discard_blks += old_valid_blocks -
3298 se->valid_blocks;
3299 }
Chao Yud600af232016-08-19 23:13:47 +08003300 }
3301
3302 if (sbi->segs_per_sec > 1)
3303 get_sec_entry(sbi, start)->valid_blocks +=
3304 se->valid_blocks - old_valid_blocks;
3305 }
3306 up_read(&curseg->journal_rwsem);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003307}
3308
3309static void init_free_segmap(struct f2fs_sb_info *sbi)
3310{
3311 unsigned int start;
3312 int type;
3313
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003314 for (start = 0; start < MAIN_SEGS(sbi); start++) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003315 struct seg_entry *sentry = get_seg_entry(sbi, start);
3316 if (!sentry->valid_blocks)
3317 __set_free(sbi, start);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003318 else
3319 SIT_I(sbi)->written_valid_blocks +=
3320 sentry->valid_blocks;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003321 }
3322
3323 /* set use the current segments */
3324 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
3325 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
3326 __set_test_and_inuse(sbi, curseg_t->segno);
3327 }
3328}
3329
3330static void init_dirty_segmap(struct f2fs_sb_info *sbi)
3331{
3332 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3333 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003334 unsigned int segno = 0, offset = 0;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003335 unsigned short valid_blocks;
3336
Namjae Jeon8736fbf2013-06-16 09:49:11 +09003337 while (1) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003338 /* find dirty segment based on free segmap */
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003339 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
3340 if (segno >= MAIN_SEGS(sbi))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003341 break;
3342 offset = segno + 1;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003343 valid_blocks = get_valid_blocks(sbi, segno, false);
Jaegeuk Kimec325b52014-09-02 16:24:11 -07003344 if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003345 continue;
Jaegeuk Kimec325b52014-09-02 16:24:11 -07003346 if (valid_blocks > sbi->blocks_per_seg) {
3347 f2fs_bug_on(sbi, 1);
3348 continue;
3349 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003350 mutex_lock(&dirty_i->seglist_lock);
3351 __locate_dirty_segment(sbi, segno, DIRTY);
3352 mutex_unlock(&dirty_i->seglist_lock);
3353 }
3354}
3355
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09003356static int init_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003357{
3358 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003359 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003360
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003361 dirty_i->victim_secmap = kvzalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09003362 if (!dirty_i->victim_secmap)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003363 return -ENOMEM;
3364 return 0;
3365}
3366
3367static int build_dirty_segmap(struct f2fs_sb_info *sbi)
3368{
3369 struct dirty_seglist_info *dirty_i;
3370 unsigned int bitmap_size, i;
3371
3372 /* allocate memory for dirty segments list information */
3373 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
3374 if (!dirty_i)
3375 return -ENOMEM;
3376
3377 SM_I(sbi)->dirty_info = dirty_i;
3378 mutex_init(&dirty_i->seglist_lock);
3379
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003380 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003381
3382 for (i = 0; i < NR_DIRTY_TYPE; i++) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003383 dirty_i->dirty_segmap[i] = kvzalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003384 if (!dirty_i->dirty_segmap[i])
3385 return -ENOMEM;
3386 }
3387
3388 init_dirty_segmap(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09003389 return init_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003390}
3391
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09003392/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003393 * Update min, max modified time for cost-benefit GC algorithm
3394 */
3395static void init_min_max_mtime(struct f2fs_sb_info *sbi)
3396{
3397 struct sit_info *sit_i = SIT_I(sbi);
3398 unsigned int segno;
3399
3400 mutex_lock(&sit_i->sentry_lock);
3401
3402 sit_i->min_mtime = LLONG_MAX;
3403
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003404 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003405 unsigned int i;
3406 unsigned long long mtime = 0;
3407
3408 for (i = 0; i < sbi->segs_per_sec; i++)
3409 mtime += get_seg_entry(sbi, segno + i)->mtime;
3410
3411 mtime = div_u64(mtime, sbi->segs_per_sec);
3412
3413 if (sit_i->min_mtime > mtime)
3414 sit_i->min_mtime = mtime;
3415 }
3416 sit_i->max_mtime = get_mtime(sbi);
3417 mutex_unlock(&sit_i->sentry_lock);
3418}
3419
3420int build_segment_manager(struct f2fs_sb_info *sbi)
3421{
3422 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3423 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Namjae Jeon1042d602012-12-01 10:56:13 +09003424 struct f2fs_sm_info *sm_info;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003425 int err;
3426
3427 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
3428 if (!sm_info)
3429 return -ENOMEM;
3430
3431 /* init sm info */
3432 sbi->sm_info = sm_info;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003433 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
3434 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
3435 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
3436 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3437 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3438 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
3439 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
Jaegeuk Kim58c41032014-03-19 14:17:21 +09003440 sm_info->rec_prefree_segments = sm_info->main_segments *
3441 DEF_RECLAIM_PREFREE_SEGMENTS / 100;
Jaegeuk Kim44a83492016-07-13 18:23:35 -07003442 if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
3443 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
3444
Jaegeuk Kim52763a42016-06-13 09:47:48 -07003445 if (!test_opt(sbi, LFS))
3446 sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
Jaegeuk Kim216fbd62013-11-07 13:13:42 +09003447 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
Jaegeuk Kimc1ce1b02014-09-10 16:53:02 -07003448 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003449 sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09003450
Jaegeuk Kimbba681c2015-01-26 17:41:23 -08003451 sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
3452
Chao Yu184a5cd2014-09-04 18:13:01 +08003453 INIT_LIST_HEAD(&sm_info->sit_entry_set);
3454
Yunlei Head9baf42017-06-01 16:43:51 +08003455 if (!f2fs_readonly(sbi->sb)) {
Gu Zheng2163d192014-04-27 14:21:33 +08003456 err = create_flush_cmd_control(sbi);
3457 if (err)
Gu Zhenga688b9d9e2014-04-27 14:21:21 +08003458 return err;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +09003459 }
3460
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003461 err = create_discard_cmd_control(sbi);
3462 if (err)
3463 return err;
3464
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003465 err = build_sit_info(sbi);
3466 if (err)
3467 return err;
3468 err = build_free_segmap(sbi);
3469 if (err)
3470 return err;
3471 err = build_curseg(sbi);
3472 if (err)
3473 return err;
3474
3475 /* reinit free segmap based on SIT */
3476 build_sit_entries(sbi);
3477
3478 init_free_segmap(sbi);
3479 err = build_dirty_segmap(sbi);
3480 if (err)
3481 return err;
3482
3483 init_min_max_mtime(sbi);
3484 return 0;
3485}
3486
3487static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
3488 enum dirty_type dirty_type)
3489{
3490 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3491
3492 mutex_lock(&dirty_i->seglist_lock);
Jaegeuk Kim39307a82015-09-22 13:50:47 -07003493 kvfree(dirty_i->dirty_segmap[dirty_type]);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003494 dirty_i->nr_dirty[dirty_type] = 0;
3495 mutex_unlock(&dirty_i->seglist_lock);
3496}
3497
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09003498static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003499{
3500 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim39307a82015-09-22 13:50:47 -07003501 kvfree(dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003502}
3503
3504static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
3505{
3506 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3507 int i;
3508
3509 if (!dirty_i)
3510 return;
3511
3512 /* discard pre-free/dirty segments list */
3513 for (i = 0; i < NR_DIRTY_TYPE; i++)
3514 discard_dirty_segmap(sbi, i);
3515
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09003516 destroy_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003517 SM_I(sbi)->dirty_info = NULL;
3518 kfree(dirty_i);
3519}
3520
3521static void destroy_curseg(struct f2fs_sb_info *sbi)
3522{
3523 struct curseg_info *array = SM_I(sbi)->curseg_array;
3524 int i;
3525
3526 if (!array)
3527 return;
3528 SM_I(sbi)->curseg_array = NULL;
Chao Yub7ad7512016-02-19 18:08:46 +08003529 for (i = 0; i < NR_CURSEG_TYPE; i++) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003530 kfree(array[i].sum_blk);
Chao Yub7ad7512016-02-19 18:08:46 +08003531 kfree(array[i].journal);
3532 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003533 kfree(array);
3534}
3535
3536static void destroy_free_segmap(struct f2fs_sb_info *sbi)
3537{
3538 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
3539 if (!free_i)
3540 return;
3541 SM_I(sbi)->free_info = NULL;
Jaegeuk Kim39307a82015-09-22 13:50:47 -07003542 kvfree(free_i->free_segmap);
3543 kvfree(free_i->free_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003544 kfree(free_i);
3545}
3546
3547static void destroy_sit_info(struct f2fs_sb_info *sbi)
3548{
3549 struct sit_info *sit_i = SIT_I(sbi);
3550 unsigned int start;
3551
3552 if (!sit_i)
3553 return;
3554
3555 if (sit_i->sentries) {
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07003556 for (start = 0; start < MAIN_SEGS(sbi); start++) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003557 kfree(sit_i->sentries[start].cur_valid_map);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003558#ifdef CONFIG_F2FS_CHECK_FS
3559 kfree(sit_i->sentries[start].cur_valid_map_mir);
3560#endif
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003561 kfree(sit_i->sentries[start].ckpt_valid_map);
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07003562 kfree(sit_i->sentries[start].discard_map);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003563 }
3564 }
Jaegeuk Kim60a3b782015-02-10 16:44:29 -08003565 kfree(sit_i->tmp_map);
3566
Jaegeuk Kim39307a82015-09-22 13:50:47 -07003567 kvfree(sit_i->sentries);
3568 kvfree(sit_i->sec_entries);
3569 kvfree(sit_i->dirty_sentries_bitmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003570
3571 SM_I(sbi)->sit_info = NULL;
3572 kfree(sit_i->sit_bitmap);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003573#ifdef CONFIG_F2FS_CHECK_FS
3574 kfree(sit_i->sit_bitmap_mir);
3575#endif
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003576 kfree(sit_i);
3577}
3578
3579void destroy_segment_manager(struct f2fs_sb_info *sbi)
3580{
3581 struct f2fs_sm_info *sm_info = SM_I(sbi);
Gu Zhenga688b9d9e2014-04-27 14:21:21 +08003582
Chao Yu3b03f722013-11-06 09:12:04 +08003583 if (!sm_info)
3584 return;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003585 destroy_flush_cmd_control(sbi, true);
3586 destroy_discard_cmd_control(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09003587 destroy_dirty_segmap(sbi);
3588 destroy_curseg(sbi);
3589 destroy_free_segmap(sbi);
3590 destroy_sit_info(sbi);
3591 sbi->sm_info = NULL;
3592 kfree(sm_info);
3593}
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09003594
3595int __init create_segment_manager_caches(void)
3596{
3597 discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
Gu Zhenge8512d22014-03-07 18:43:28 +08003598 sizeof(struct discard_entry));
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09003599 if (!discard_entry_slab)
Chao Yu184a5cd2014-09-04 18:13:01 +08003600 goto fail;
3601
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003602 discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
3603 sizeof(struct discard_cmd));
3604 if (!discard_cmd_slab)
Chao Yu6ab2a302016-09-05 12:28:26 +08003605 goto destroy_discard_entry;
Chao Yu275b66b2016-08-29 23:58:34 +08003606
Chao Yu184a5cd2014-09-04 18:13:01 +08003607 sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
Changman Leec9ee0082014-11-21 15:42:07 +09003608 sizeof(struct sit_entry_set));
Chao Yu184a5cd2014-09-04 18:13:01 +08003609 if (!sit_entry_set_slab)
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003610 goto destroy_discard_cmd;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07003611
3612 inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
3613 sizeof(struct inmem_pages));
3614 if (!inmem_entry_slab)
3615 goto destroy_sit_entry_set;
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09003616 return 0;
Chao Yu184a5cd2014-09-04 18:13:01 +08003617
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07003618destroy_sit_entry_set:
3619 kmem_cache_destroy(sit_entry_set_slab);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003620destroy_discard_cmd:
3621 kmem_cache_destroy(discard_cmd_slab);
Chao Yu6ab2a302016-09-05 12:28:26 +08003622destroy_discard_entry:
Chao Yu184a5cd2014-09-04 18:13:01 +08003623 kmem_cache_destroy(discard_entry_slab);
3624fail:
3625 return -ENOMEM;
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09003626}
3627
3628void destroy_segment_manager_caches(void)
3629{
Chao Yu184a5cd2014-09-04 18:13:01 +08003630 kmem_cache_destroy(sit_entry_set_slab);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07003631 kmem_cache_destroy(discard_cmd_slab);
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09003632 kmem_cache_destroy(discard_entry_slab);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07003633 kmem_cache_destroy(inmem_entry_slab);
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09003634}