blob: 5305a29f91a33514ca20ec2b9f00917087b21016 [file] [log] [blame]
Chao Yua28ef1f2015-07-08 17:59:36 +08001/*
2 * f2fs extent cache support
3 *
4 * Copyright (c) 2015 Motorola Mobility
5 * Copyright (c) 2015 Samsung Electronics
6 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
7 * Chao Yu <chao2.yu@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/fs.h>
15#include <linux/f2fs_fs.h>
16
17#include "f2fs.h"
18#include "node.h"
19#include <trace/events/f2fs.h>
20
21static struct kmem_cache *extent_tree_slab;
22static struct kmem_cache *extent_node_slab;
23
24static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
25 struct extent_tree *et, struct extent_info *ei,
26 struct rb_node *parent, struct rb_node **p)
27{
28 struct extent_node *en;
29
30 en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
31 if (!en)
32 return NULL;
33
34 en->ei = *ei;
35 INIT_LIST_HEAD(&en->list);
36
37 rb_link_node(&en->rb_node, parent, p);
38 rb_insert_color(&en->rb_node, &et->root);
39 et->count++;
40 atomic_inc(&sbi->total_ext_node);
41 return en;
42}
43
44static void __detach_extent_node(struct f2fs_sb_info *sbi,
45 struct extent_tree *et, struct extent_node *en)
46{
47 rb_erase(&en->rb_node, &et->root);
48 et->count--;
49 atomic_dec(&sbi->total_ext_node);
50
51 if (et->cached_en == en)
52 et->cached_en = NULL;
53}
54
55static struct extent_tree *__grab_extent_tree(struct inode *inode)
56{
57 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
58 struct extent_tree *et;
59 nid_t ino = inode->i_ino;
60
61 down_write(&sbi->extent_tree_lock);
62 et = radix_tree_lookup(&sbi->extent_tree_root, ino);
63 if (!et) {
64 et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
65 f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
66 memset(et, 0, sizeof(struct extent_tree));
67 et->ino = ino;
68 et->root = RB_ROOT;
69 et->cached_en = NULL;
70 rwlock_init(&et->lock);
71 atomic_set(&et->refcount, 0);
72 et->count = 0;
Jaegeuk Kim7441cce2015-12-21 19:20:15 -080073 atomic_inc(&sbi->total_ext_tree);
Jaegeuk Kim74fd8d92015-12-21 19:25:50 -080074 } else {
75 atomic_dec(&sbi->total_zombie_tree);
Chao Yua28ef1f2015-07-08 17:59:36 +080076 }
77 atomic_inc(&et->refcount);
78 up_write(&sbi->extent_tree_lock);
79
80 /* never died until evict_inode */
81 F2FS_I(inode)->extent_tree = et;
82
83 return et;
84}
85
Chao Yu91c481f2015-08-19 19:12:20 +080086static struct extent_node *__lookup_extent_tree(struct f2fs_sb_info *sbi,
87 struct extent_tree *et, unsigned int fofs)
Chao Yua28ef1f2015-07-08 17:59:36 +080088{
89 struct rb_node *node = et->root.rb_node;
Fan Lif8b703d2015-08-18 17:13:13 +080090 struct extent_node *en = et->cached_en;
Chao Yua28ef1f2015-07-08 17:59:36 +080091
Fan Lif8b703d2015-08-18 17:13:13 +080092 if (en) {
93 struct extent_info *cei = &en->ei;
Chao Yua28ef1f2015-07-08 17:59:36 +080094
Chao Yu91c481f2015-08-19 19:12:20 +080095 if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) {
96 stat_inc_cached_node_hit(sbi);
Fan Lif8b703d2015-08-18 17:13:13 +080097 return en;
Chao Yu91c481f2015-08-19 19:12:20 +080098 }
Chao Yua28ef1f2015-07-08 17:59:36 +080099 }
100
101 while (node) {
102 en = rb_entry(node, struct extent_node, rb_node);
103
Chao Yu029e13c2015-08-19 19:13:25 +0800104 if (fofs < en->ei.fofs) {
Chao Yua28ef1f2015-07-08 17:59:36 +0800105 node = node->rb_left;
Chao Yu029e13c2015-08-19 19:13:25 +0800106 } else if (fofs >= en->ei.fofs + en->ei.len) {
Chao Yua28ef1f2015-07-08 17:59:36 +0800107 node = node->rb_right;
Chao Yu029e13c2015-08-19 19:13:25 +0800108 } else {
109 stat_inc_rbtree_node_hit(sbi);
Chao Yua28ef1f2015-07-08 17:59:36 +0800110 return en;
Chao Yu029e13c2015-08-19 19:13:25 +0800111 }
Chao Yua28ef1f2015-07-08 17:59:36 +0800112 }
113 return NULL;
114}
115
Chao Yua6f78342015-08-19 19:14:15 +0800116static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
117 struct extent_tree *et, struct extent_info *ei)
Chao Yua28ef1f2015-07-08 17:59:36 +0800118{
119 struct rb_node **p = &et->root.rb_node;
Chao Yua28ef1f2015-07-08 17:59:36 +0800120 struct extent_node *en;
121
Chao Yua6f78342015-08-19 19:14:15 +0800122 en = __attach_extent_node(sbi, et, ei, NULL, p);
Chao Yua28ef1f2015-07-08 17:59:36 +0800123 if (!en)
124 return NULL;
Chao Yua6f78342015-08-19 19:14:15 +0800125
126 et->largest = en->ei;
Chao Yua28ef1f2015-07-08 17:59:36 +0800127 et->cached_en = en;
128 return en;
129}
130
131static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
132 struct extent_tree *et, bool free_all)
133{
134 struct rb_node *node, *next;
135 struct extent_node *en;
136 unsigned int count = et->count;
137
138 node = rb_first(&et->root);
139 while (node) {
140 next = rb_next(node);
141 en = rb_entry(node, struct extent_node, rb_node);
142
143 if (free_all) {
144 spin_lock(&sbi->extent_lock);
145 if (!list_empty(&en->list))
146 list_del_init(&en->list);
147 spin_unlock(&sbi->extent_lock);
148 }
149
150 if (free_all || list_empty(&en->list)) {
151 __detach_extent_node(sbi, et, en);
152 kmem_cache_free(extent_node_slab, en);
153 }
154 node = next;
155 }
156
157 return count - et->count;
158}
159
Fan Li41a099d2015-09-17 18:24:17 +0800160static void __drop_largest_extent(struct inode *inode,
161 pgoff_t fofs, unsigned int len)
Chao Yua28ef1f2015-07-08 17:59:36 +0800162{
163 struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
164
Fan Li41a099d2015-09-17 18:24:17 +0800165 if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs)
Chao Yua28ef1f2015-07-08 17:59:36 +0800166 largest->len = 0;
167}
168
169void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
170{
171 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
172 struct extent_tree *et;
173 struct extent_node *en;
174 struct extent_info ei;
175
176 if (!f2fs_may_extent_tree(inode))
177 return;
178
179 et = __grab_extent_tree(inode);
180
181 if (!i_ext || le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
182 return;
183
184 set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
185 le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
186
187 write_lock(&et->lock);
188 if (et->count)
189 goto out;
190
Chao Yua6f78342015-08-19 19:14:15 +0800191 en = __init_extent_tree(sbi, et, &ei);
Chao Yua28ef1f2015-07-08 17:59:36 +0800192 if (en) {
193 spin_lock(&sbi->extent_lock);
194 list_add_tail(&en->list, &sbi->extent_list);
195 spin_unlock(&sbi->extent_lock);
196 }
197out:
198 write_unlock(&et->lock);
199}
200
201static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
202 struct extent_info *ei)
203{
204 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
205 struct extent_tree *et = F2FS_I(inode)->extent_tree;
206 struct extent_node *en;
207 bool ret = false;
208
209 f2fs_bug_on(sbi, !et);
210
211 trace_f2fs_lookup_extent_tree_start(inode, pgofs);
212
213 read_lock(&et->lock);
214
215 if (et->largest.fofs <= pgofs &&
216 et->largest.fofs + et->largest.len > pgofs) {
217 *ei = et->largest;
218 ret = true;
Chao Yu91c481f2015-08-19 19:12:20 +0800219 stat_inc_largest_node_hit(sbi);
Chao Yua28ef1f2015-07-08 17:59:36 +0800220 goto out;
221 }
222
Chao Yu91c481f2015-08-19 19:12:20 +0800223 en = __lookup_extent_tree(sbi, et, pgofs);
Chao Yua28ef1f2015-07-08 17:59:36 +0800224 if (en) {
225 *ei = en->ei;
226 spin_lock(&sbi->extent_lock);
227 if (!list_empty(&en->list))
228 list_move_tail(&en->list, &sbi->extent_list);
229 et->cached_en = en;
230 spin_unlock(&sbi->extent_lock);
231 ret = true;
Chao Yua28ef1f2015-07-08 17:59:36 +0800232 }
233out:
Chao Yu727edac2015-07-15 17:29:49 +0800234 stat_inc_total_hit(sbi);
Chao Yua28ef1f2015-07-08 17:59:36 +0800235 read_unlock(&et->lock);
236
237 trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
238 return ret;
239}
240
Fan Li0f825ee2015-07-15 18:05:17 +0800241
242/*
243 * lookup extent at @fofs, if hit, return the extent
244 * if not, return NULL and
245 * @prev_ex: extent before fofs
246 * @next_ex: extent after fofs
247 * @insert_p: insert point for new extent at fofs
248 * in order to simpfy the insertion after.
249 * tree must stay unchanged between lookup and insertion.
250 */
251static struct extent_node *__lookup_extent_tree_ret(struct extent_tree *et,
Chao Yu91c481f2015-08-19 19:12:20 +0800252 unsigned int fofs,
253 struct extent_node **prev_ex,
Fan Li0f825ee2015-07-15 18:05:17 +0800254 struct extent_node **next_ex,
255 struct rb_node ***insert_p,
256 struct rb_node **insert_parent)
257{
258 struct rb_node **pnode = &et->root.rb_node;
259 struct rb_node *parent = NULL, *tmp_node;
Chao Yudac2dde2015-08-19 19:16:09 +0800260 struct extent_node *en = et->cached_en;
Fan Li0f825ee2015-07-15 18:05:17 +0800261
Chao Yudac2dde2015-08-19 19:16:09 +0800262 *insert_p = NULL;
263 *insert_parent = NULL;
264 *prev_ex = NULL;
265 *next_ex = NULL;
266
267 if (RB_EMPTY_ROOT(&et->root))
268 return NULL;
269
270 if (en) {
271 struct extent_info *cei = &en->ei;
Fan Li0f825ee2015-07-15 18:05:17 +0800272
273 if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
Chao Yudac2dde2015-08-19 19:16:09 +0800274 goto lookup_neighbors;
Fan Li0f825ee2015-07-15 18:05:17 +0800275 }
276
277 while (*pnode) {
278 parent = *pnode;
279 en = rb_entry(*pnode, struct extent_node, rb_node);
280
281 if (fofs < en->ei.fofs)
282 pnode = &(*pnode)->rb_left;
283 else if (fofs >= en->ei.fofs + en->ei.len)
284 pnode = &(*pnode)->rb_right;
285 else
Chao Yudac2dde2015-08-19 19:16:09 +0800286 goto lookup_neighbors;
Fan Li0f825ee2015-07-15 18:05:17 +0800287 }
288
289 *insert_p = pnode;
290 *insert_parent = parent;
291
292 en = rb_entry(parent, struct extent_node, rb_node);
293 tmp_node = parent;
294 if (parent && fofs > en->ei.fofs)
295 tmp_node = rb_next(parent);
296 *next_ex = tmp_node ?
297 rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
298
299 tmp_node = parent;
300 if (parent && fofs < en->ei.fofs)
301 tmp_node = rb_prev(parent);
302 *prev_ex = tmp_node ?
303 rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
Fan Li0f825ee2015-07-15 18:05:17 +0800304 return NULL;
Chao Yudac2dde2015-08-19 19:16:09 +0800305
306lookup_neighbors:
307 if (fofs == en->ei.fofs) {
308 /* lookup prev node for merging backward later */
309 tmp_node = rb_prev(&en->rb_node);
310 *prev_ex = tmp_node ?
311 rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
312 }
313 if (fofs == en->ei.fofs + en->ei.len - 1) {
314 /* lookup next node for merging frontward later */
315 tmp_node = rb_next(&en->rb_node);
316 *next_ex = tmp_node ?
317 rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
318 }
319 return en;
Fan Li0f825ee2015-07-15 18:05:17 +0800320}
321
Chao Yuef05e222015-08-19 19:15:09 +0800322static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
Fan Li0f825ee2015-07-15 18:05:17 +0800323 struct extent_tree *et, struct extent_info *ei,
324 struct extent_node **den,
325 struct extent_node *prev_ex,
Chao Yuef05e222015-08-19 19:15:09 +0800326 struct extent_node *next_ex)
Fan Li0f825ee2015-07-15 18:05:17 +0800327{
Fan Li0f825ee2015-07-15 18:05:17 +0800328 struct extent_node *en = NULL;
Fan Li0f825ee2015-07-15 18:05:17 +0800329
330 if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
Fan Li0f825ee2015-07-15 18:05:17 +0800331 prev_ex->ei.len += ei->len;
332 ei = &prev_ex->ei;
333 en = prev_ex;
334 }
Chao Yuef05e222015-08-19 19:15:09 +0800335
Fan Li0f825ee2015-07-15 18:05:17 +0800336 if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
Chao Yuef05e222015-08-19 19:15:09 +0800337 if (en) {
Fan Li0f825ee2015-07-15 18:05:17 +0800338 __detach_extent_node(sbi, et, prev_ex);
339 *den = prev_ex;
340 }
341 next_ex->ei.fofs = ei->fofs;
342 next_ex->ei.blk = ei->blk;
343 next_ex->ei.len += ei->len;
344 en = next_ex;
345 }
Chao Yuef05e222015-08-19 19:15:09 +0800346
347 if (en) {
Chao Yu4abd3f52015-09-22 21:07:47 +0800348 __try_update_largest_extent(et, en);
Chao Yuef05e222015-08-19 19:15:09 +0800349 et->cached_en = en;
350 }
351 return en;
352}
353
354static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
355 struct extent_tree *et, struct extent_info *ei,
356 struct rb_node **insert_p,
357 struct rb_node *insert_parent)
358{
359 struct rb_node **p = &et->root.rb_node;
360 struct rb_node *parent = NULL;
361 struct extent_node *en = NULL;
Fan Li0f825ee2015-07-15 18:05:17 +0800362
363 if (insert_p && insert_parent) {
364 parent = insert_parent;
365 p = insert_p;
366 goto do_insert;
367 }
368
369 while (*p) {
370 parent = *p;
371 en = rb_entry(parent, struct extent_node, rb_node);
372
373 if (ei->fofs < en->ei.fofs)
374 p = &(*p)->rb_left;
375 else if (ei->fofs >= en->ei.fofs + en->ei.len)
376 p = &(*p)->rb_right;
377 else
378 f2fs_bug_on(sbi, 1);
379 }
380do_insert:
381 en = __attach_extent_node(sbi, et, ei, parent, p);
382 if (!en)
383 return NULL;
Chao Yuef05e222015-08-19 19:15:09 +0800384
Chao Yu4abd3f52015-09-22 21:07:47 +0800385 __try_update_largest_extent(et, en);
Fan Li0f825ee2015-07-15 18:05:17 +0800386 et->cached_en = en;
387 return en;
388}
389
Jaegeuk Kim514053e2015-09-15 14:46:43 -0700390static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
Chao Yu19b2c302015-08-26 20:34:48 +0800391 pgoff_t fofs, block_t blkaddr, unsigned int len)
Chao Yua28ef1f2015-07-08 17:59:36 +0800392{
393 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
394 struct extent_tree *et = F2FS_I(inode)->extent_tree;
Fan Li4d1fa812015-09-17 18:42:06 +0800395 struct extent_node *en = NULL, *en1 = NULL;
Chao Yu19b2c302015-08-26 20:34:48 +0800396 struct extent_node *prev_en = NULL, *next_en = NULL;
Chao Yua28ef1f2015-07-08 17:59:36 +0800397 struct extent_info ei, dei, prev;
Fan Li0f825ee2015-07-15 18:05:17 +0800398 struct rb_node **insert_p = NULL, *insert_parent = NULL;
Chao Yu19b2c302015-08-26 20:34:48 +0800399 unsigned int end = fofs + len;
400 unsigned int pos = (unsigned int)fofs;
Chao Yua28ef1f2015-07-08 17:59:36 +0800401
402 if (!et)
403 return false;
404
Chao Yu744288c2015-09-06 17:50:13 +0800405 trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
406
Chao Yua28ef1f2015-07-08 17:59:36 +0800407 write_lock(&et->lock);
408
409 if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) {
410 write_unlock(&et->lock);
411 return false;
412 }
413
414 prev = et->largest;
415 dei.len = 0;
416
Fan Li4d1fa812015-09-17 18:42:06 +0800417 /*
418 * drop largest extent before lookup, in case it's already
419 * been shrunk from extent tree
420 */
Fan Li41a099d2015-09-17 18:24:17 +0800421 __drop_largest_extent(inode, fofs, len);
Chao Yua28ef1f2015-07-08 17:59:36 +0800422
Chao Yu19b2c302015-08-26 20:34:48 +0800423 /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
424 en = __lookup_extent_tree_ret(et, fofs, &prev_en, &next_en,
Fan Li0f825ee2015-07-15 18:05:17 +0800425 &insert_p, &insert_parent);
Fan Li4d1fa812015-09-17 18:42:06 +0800426 if (!en)
427 en = next_en;
Chao Yua28ef1f2015-07-08 17:59:36 +0800428
Chao Yu19b2c302015-08-26 20:34:48 +0800429 /* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
Fan Li4d1fa812015-09-17 18:42:06 +0800430 while (en && en->ei.fofs < end) {
431 unsigned int org_end;
432 int parts = 0; /* # of parts current extent split into */
Chao Yua28ef1f2015-07-08 17:59:36 +0800433
Fan Li4d1fa812015-09-17 18:42:06 +0800434 next_en = en1 = NULL;
Chao Yu19b2c302015-08-26 20:34:48 +0800435
436 dei = en->ei;
Fan Li4d1fa812015-09-17 18:42:06 +0800437 org_end = dei.fofs + dei.len;
438 f2fs_bug_on(sbi, pos >= org_end);
Chao Yu19b2c302015-08-26 20:34:48 +0800439
Fan Li4d1fa812015-09-17 18:42:06 +0800440 if (pos > dei.fofs && pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
441 en->ei.len = pos - en->ei.fofs;
442 prev_en = en;
443 parts = 1;
Chao Yua28ef1f2015-07-08 17:59:36 +0800444 }
445
Fan Li4d1fa812015-09-17 18:42:06 +0800446 if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
447 if (parts) {
Chao Yu19b2c302015-08-26 20:34:48 +0800448 set_extent_info(&ei, end,
449 end - dei.fofs + dei.blk,
Fan Li4d1fa812015-09-17 18:42:06 +0800450 org_end - end);
451 en1 = __insert_extent_tree(sbi, et, &ei,
452 NULL, NULL);
453 next_en = en1;
454 } else {
455 en->ei.fofs = end;
456 en->ei.blk += end - dei.fofs;
457 en->ei.len -= end - dei.fofs;
458 next_en = en;
Chao Yu19b2c302015-08-26 20:34:48 +0800459 }
Fan Li4d1fa812015-09-17 18:42:06 +0800460 parts++;
Chao Yu19b2c302015-08-26 20:34:48 +0800461 }
Fan Li4d1fa812015-09-17 18:42:06 +0800462
463 if (!next_en) {
464 struct rb_node *node = rb_next(&en->rb_node);
465
466 next_en = node ?
467 rb_entry(node, struct extent_node, rb_node)
468 : NULL;
469 }
470
Chao Yu4abd3f52015-09-22 21:07:47 +0800471 if (parts)
472 __try_update_largest_extent(et, en);
473 else
Fan Li4d1fa812015-09-17 18:42:06 +0800474 __detach_extent_node(sbi, et, en);
Fan Li4d1fa812015-09-17 18:42:06 +0800475
476 /*
477 * if original extent is split into zero or two parts, extent
478 * tree has been altered by deletion or insertion, therefore
479 * invalidate pointers regard to tree.
480 */
481 if (parts != 1) {
482 insert_p = NULL;
483 insert_parent = NULL;
484 }
485
486 /* update in global extent list */
Chao Yu19b2c302015-08-26 20:34:48 +0800487 spin_lock(&sbi->extent_lock);
Fan Li4d1fa812015-09-17 18:42:06 +0800488 if (!parts && !list_empty(&en->list))
Chao Yu19b2c302015-08-26 20:34:48 +0800489 list_del(&en->list);
490 if (en1)
491 list_add_tail(&en1->list, &sbi->extent_list);
Chao Yu19b2c302015-08-26 20:34:48 +0800492 spin_unlock(&sbi->extent_lock);
493
Fan Li4d1fa812015-09-17 18:42:06 +0800494 /* release extent node */
495 if (!parts)
Chao Yu19b2c302015-08-26 20:34:48 +0800496 kmem_cache_free(extent_node_slab, en);
Fan Li4d1fa812015-09-17 18:42:06 +0800497
498 en = next_en;
Chao Yua28ef1f2015-07-08 17:59:36 +0800499 }
500
Chao Yua28ef1f2015-07-08 17:59:36 +0800501 /* 3. update extent in extent cache */
502 if (blkaddr) {
Chao Yu19b2c302015-08-26 20:34:48 +0800503 struct extent_node *den = NULL;
504
505 set_extent_info(&ei, fofs, blkaddr, len);
Fan Li4d1fa812015-09-17 18:42:06 +0800506 en1 = __try_merge_extent_node(sbi, et, &ei, &den,
Chao Yu19b2c302015-08-26 20:34:48 +0800507 prev_en, next_en);
Fan Li4d1fa812015-09-17 18:42:06 +0800508 if (!en1)
509 en1 = __insert_extent_tree(sbi, et, &ei,
Chao Yuef05e222015-08-19 19:15:09 +0800510 insert_p, insert_parent);
Chao Yua28ef1f2015-07-08 17:59:36 +0800511
512 /* give up extent_cache, if split and small updates happen */
513 if (dei.len >= 1 &&
514 prev.len < F2FS_MIN_EXTENT_LEN &&
515 et->largest.len < F2FS_MIN_EXTENT_LEN) {
516 et->largest.len = 0;
517 set_inode_flag(F2FS_I(inode), FI_NO_EXTENT);
518 }
Chao Yua28ef1f2015-07-08 17:59:36 +0800519
Chao Yu19b2c302015-08-26 20:34:48 +0800520 spin_lock(&sbi->extent_lock);
Fan Li4d1fa812015-09-17 18:42:06 +0800521 if (en1) {
522 if (list_empty(&en1->list))
523 list_add_tail(&en1->list, &sbi->extent_list);
Chao Yu19b2c302015-08-26 20:34:48 +0800524 else
Fan Li4d1fa812015-09-17 18:42:06 +0800525 list_move_tail(&en1->list, &sbi->extent_list);
Chao Yu19b2c302015-08-26 20:34:48 +0800526 }
527 if (den && !list_empty(&den->list))
528 list_del(&den->list);
529 spin_unlock(&sbi->extent_lock);
Chao Yua28ef1f2015-07-08 17:59:36 +0800530
Chao Yu19b2c302015-08-26 20:34:48 +0800531 if (den)
532 kmem_cache_free(extent_node_slab, den);
533 }
Chao Yua28ef1f2015-07-08 17:59:36 +0800534
535 if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
536 __free_extent_tree(sbi, et, true);
537
538 write_unlock(&et->lock);
539
540 return !__is_extent_same(&prev, &et->largest);
541}
542
543unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
544{
545 struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
546 struct extent_node *en, *tmp;
547 unsigned long ino = F2FS_ROOT_INO(sbi);
548 struct radix_tree_root *root = &sbi->extent_tree_root;
549 unsigned int found;
550 unsigned int node_cnt = 0, tree_cnt = 0;
551 int remained;
Jaegeuk Kim74fd8d92015-12-21 19:25:50 -0800552 bool do_free = false;
Chao Yua28ef1f2015-07-08 17:59:36 +0800553
554 if (!test_opt(sbi, EXTENT_CACHE))
555 return 0;
556
Jaegeuk Kim74fd8d92015-12-21 19:25:50 -0800557 if (!atomic_read(&sbi->total_zombie_tree))
558 goto free_node;
559
Chao Yua28ef1f2015-07-08 17:59:36 +0800560 if (!down_write_trylock(&sbi->extent_tree_lock))
561 goto out;
562
563 /* 1. remove unreferenced extent tree */
564 while ((found = radix_tree_gang_lookup(root,
565 (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
566 unsigned i;
567
568 ino = treevec[found - 1]->ino + 1;
569 for (i = 0; i < found; i++) {
570 struct extent_tree *et = treevec[i];
571
572 if (!atomic_read(&et->refcount)) {
573 write_lock(&et->lock);
574 node_cnt += __free_extent_tree(sbi, et, true);
575 write_unlock(&et->lock);
576
577 radix_tree_delete(root, et->ino);
578 kmem_cache_free(extent_tree_slab, et);
Jaegeuk Kim7441cce2015-12-21 19:20:15 -0800579 atomic_dec(&sbi->total_ext_tree);
Jaegeuk Kim74fd8d92015-12-21 19:25:50 -0800580 atomic_dec(&sbi->total_zombie_tree);
Chao Yua28ef1f2015-07-08 17:59:36 +0800581 tree_cnt++;
582
583 if (node_cnt + tree_cnt >= nr_shrink)
584 goto unlock_out;
585 }
586 }
587 }
588 up_write(&sbi->extent_tree_lock);
589
Jaegeuk Kim74fd8d92015-12-21 19:25:50 -0800590free_node:
Chao Yua28ef1f2015-07-08 17:59:36 +0800591 /* 2. remove LRU extent entries */
592 if (!down_write_trylock(&sbi->extent_tree_lock))
593 goto out;
594
595 remained = nr_shrink - (node_cnt + tree_cnt);
596
597 spin_lock(&sbi->extent_lock);
598 list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
599 if (!remained--)
600 break;
601 list_del_init(&en->list);
Jaegeuk Kim74fd8d92015-12-21 19:25:50 -0800602 do_free = true;
Chao Yua28ef1f2015-07-08 17:59:36 +0800603 }
604 spin_unlock(&sbi->extent_lock);
605
Jaegeuk Kim74fd8d92015-12-21 19:25:50 -0800606 if (do_free == false)
607 goto unlock_out;
608
Chao Yu100136a2015-09-11 14:43:02 +0800609 /*
610 * reset ino for searching victims from beginning of global extent tree.
611 */
612 ino = F2FS_ROOT_INO(sbi);
613
Chao Yua28ef1f2015-07-08 17:59:36 +0800614 while ((found = radix_tree_gang_lookup(root,
615 (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
616 unsigned i;
617
618 ino = treevec[found - 1]->ino + 1;
619 for (i = 0; i < found; i++) {
620 struct extent_tree *et = treevec[i];
621
Jaegeuk Kim760de792015-11-30 16:26:44 -0800622 if (write_trylock(&et->lock)) {
623 node_cnt += __free_extent_tree(sbi, et, false);
624 write_unlock(&et->lock);
625 }
Chao Yua28ef1f2015-07-08 17:59:36 +0800626
627 if (node_cnt + tree_cnt >= nr_shrink)
Chao Yubeaa57d2015-10-22 18:24:12 +0800628 goto unlock_out;
Chao Yua28ef1f2015-07-08 17:59:36 +0800629 }
630 }
631unlock_out:
632 up_write(&sbi->extent_tree_lock);
633out:
634 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
635
636 return node_cnt + tree_cnt;
637}
638
639unsigned int f2fs_destroy_extent_node(struct inode *inode)
640{
641 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
642 struct extent_tree *et = F2FS_I(inode)->extent_tree;
643 unsigned int node_cnt = 0;
644
645 if (!et)
646 return 0;
647
648 write_lock(&et->lock);
649 node_cnt = __free_extent_tree(sbi, et, true);
650 write_unlock(&et->lock);
651
652 return node_cnt;
653}
654
655void f2fs_destroy_extent_tree(struct inode *inode)
656{
657 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
658 struct extent_tree *et = F2FS_I(inode)->extent_tree;
659 unsigned int node_cnt = 0;
660
661 if (!et)
662 return;
663
664 if (inode->i_nlink && !is_bad_inode(inode) && et->count) {
665 atomic_dec(&et->refcount);
Jaegeuk Kim74fd8d92015-12-21 19:25:50 -0800666 atomic_inc(&sbi->total_zombie_tree);
Chao Yua28ef1f2015-07-08 17:59:36 +0800667 return;
668 }
669
670 /* free all extent info belong to this extent tree */
671 node_cnt = f2fs_destroy_extent_node(inode);
672
673 /* delete extent tree entry in radix tree */
674 down_write(&sbi->extent_tree_lock);
675 atomic_dec(&et->refcount);
676 f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
677 radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
678 kmem_cache_free(extent_tree_slab, et);
Jaegeuk Kim7441cce2015-12-21 19:20:15 -0800679 atomic_dec(&sbi->total_ext_tree);
Chao Yua28ef1f2015-07-08 17:59:36 +0800680 up_write(&sbi->extent_tree_lock);
681
682 F2FS_I(inode)->extent_tree = NULL;
683
684 trace_f2fs_destroy_extent_tree(inode, node_cnt);
685}
686
687bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
688 struct extent_info *ei)
689{
690 if (!f2fs_may_extent_tree(inode))
691 return false;
692
693 return f2fs_lookup_extent_tree(inode, pgofs, ei);
694}
695
696void f2fs_update_extent_cache(struct dnode_of_data *dn)
697{
698 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
699 pgoff_t fofs;
700
701 if (!f2fs_may_extent_tree(dn->inode))
702 return;
703
704 f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
705
Chao Yu19b2c302015-08-26 20:34:48 +0800706
Chao Yua28ef1f2015-07-08 17:59:36 +0800707 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
708 dn->ofs_in_node;
709
Chao Yu19b2c302015-08-26 20:34:48 +0800710 if (f2fs_update_extent_tree_range(dn->inode, fofs, dn->data_blkaddr, 1))
711 sync_inode_page(dn);
712}
713
714void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
715 pgoff_t fofs, block_t blkaddr, unsigned int len)
716
717{
718 if (!f2fs_may_extent_tree(dn->inode))
719 return;
720
721 if (f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len))
Chao Yua28ef1f2015-07-08 17:59:36 +0800722 sync_inode_page(dn);
723}
724
725void init_extent_cache_info(struct f2fs_sb_info *sbi)
726{
727 INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
728 init_rwsem(&sbi->extent_tree_lock);
729 INIT_LIST_HEAD(&sbi->extent_list);
730 spin_lock_init(&sbi->extent_lock);
Jaegeuk Kim7441cce2015-12-21 19:20:15 -0800731 atomic_set(&sbi->total_ext_tree, 0);
Jaegeuk Kim74fd8d92015-12-21 19:25:50 -0800732 atomic_set(&sbi->total_zombie_tree, 0);
Chao Yua28ef1f2015-07-08 17:59:36 +0800733 atomic_set(&sbi->total_ext_node, 0);
734}
735
736int __init create_extent_cache(void)
737{
738 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
739 sizeof(struct extent_tree));
740 if (!extent_tree_slab)
741 return -ENOMEM;
742 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
743 sizeof(struct extent_node));
744 if (!extent_node_slab) {
745 kmem_cache_destroy(extent_tree_slab);
746 return -ENOMEM;
747 }
748 return 0;
749}
750
751void destroy_extent_cache(void)
752{
753 kmem_cache_destroy(extent_node_slab);
754 kmem_cache_destroy(extent_tree_slab);
755}