blob: 8c3211ce121568aee77ec61d024c0a614d30193b [file] [log] [blame]
Darrick J. Wong3993bae2016-10-03 09:11:32 -07001/*
2 * Copyright (C) 2016 Oracle. All Rights Reserved.
3 *
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20#include "xfs.h"
21#include "xfs_fs.h"
22#include "xfs_shared.h"
23#include "xfs_format.h"
24#include "xfs_log_format.h"
25#include "xfs_trans_resv.h"
26#include "xfs_mount.h"
27#include "xfs_defer.h"
28#include "xfs_da_format.h"
29#include "xfs_da_btree.h"
30#include "xfs_inode.h"
31#include "xfs_trans.h"
32#include "xfs_inode_item.h"
33#include "xfs_bmap.h"
34#include "xfs_bmap_util.h"
35#include "xfs_error.h"
36#include "xfs_dir2.h"
37#include "xfs_dir2_priv.h"
38#include "xfs_ioctl.h"
39#include "xfs_trace.h"
40#include "xfs_log.h"
41#include "xfs_icache.h"
42#include "xfs_pnfs.h"
43#include "xfs_refcount_btree.h"
44#include "xfs_refcount.h"
45#include "xfs_bmap_btree.h"
46#include "xfs_trans_space.h"
47#include "xfs_bit.h"
48#include "xfs_alloc.h"
49#include "xfs_quota_defs.h"
50#include "xfs_quota.h"
51#include "xfs_btree.h"
52#include "xfs_bmap_btree.h"
53#include "xfs_reflink.h"
Darrick J. Wong2a067052016-10-03 09:11:33 -070054#include "xfs_iomap.h"
Darrick J. Wong43caeb12016-10-03 09:11:35 -070055#include "xfs_rmap_btree.h"
Darrick J. Wong3993bae2016-10-03 09:11:32 -070056
57/*
58 * Copy on Write of Shared Blocks
59 *
60 * XFS must preserve "the usual" file semantics even when two files share
61 * the same physical blocks. This means that a write to one file must not
62 * alter the blocks in a different file; the way that we'll do that is
63 * through the use of a copy-on-write mechanism. At a high level, that
64 * means that when we want to write to a shared block, we allocate a new
65 * block, write the data to the new block, and if that succeeds we map the
66 * new block into the file.
67 *
68 * XFS provides a "delayed allocation" mechanism that defers the allocation
69 * of disk blocks to dirty-but-not-yet-mapped file blocks as long as
70 * possible. This reduces fragmentation by enabling the filesystem to ask
71 * for bigger chunks less often, which is exactly what we want for CoW.
72 *
73 * The delalloc mechanism begins when the kernel wants to make a block
74 * writable (write_begin or page_mkwrite). If the offset is not mapped, we
75 * create a delalloc mapping, which is a regular in-core extent, but without
76 * a real startblock. (For delalloc mappings, the startblock encodes both
77 * a flag that this is a delalloc mapping, and a worst-case estimate of how
78 * many blocks might be required to put the mapping into the BMBT.) delalloc
79 * mappings are a reservation against the free space in the filesystem;
80 * adjacent mappings can also be combined into fewer larger mappings.
81 *
82 * When dirty pages are being written out (typically in writepage), the
83 * delalloc reservations are converted into real mappings by allocating
84 * blocks and replacing the delalloc mapping with real ones. A delalloc
85 * mapping can be replaced by several real ones if the free space is
86 * fragmented.
87 *
88 * We want to adapt the delalloc mechanism for copy-on-write, since the
89 * write paths are similar. The first two steps (creating the reservation
90 * and allocating the blocks) are exactly the same as delalloc except that
91 * the mappings must be stored in a separate CoW fork because we do not want
92 * to disturb the mapping in the data fork until we're sure that the write
93 * succeeded. IO completion in this case is the process of removing the old
94 * mapping from the data fork and moving the new mapping from the CoW fork to
95 * the data fork. This will be discussed shortly.
96 *
97 * For now, unaligned directio writes will be bounced back to the page cache.
98 * Block-aligned directio writes will use the same mechanism as buffered
99 * writes.
100 *
101 * CoW remapping must be done after the data block write completes,
102 * because we don't want to destroy the old data fork map until we're sure
103 * the new block has been written. Since the new mappings are kept in a
104 * separate fork, we can simply iterate these mappings to find the ones
105 * that cover the file blocks that we just CoW'd. For each extent, simply
106 * unmap the corresponding range in the data fork, map the new range into
107 * the data fork, and remove the extent from the CoW fork.
108 *
109 * Since the remapping operation can be applied to an arbitrary file
110 * range, we record the need for the remap step as a flag in the ioend
111 * instead of declaring a new IO type. This is required for direct io
112 * because we only have ioend for the whole dio, and we have to be able to
113 * remember the presence of unwritten blocks and CoW blocks with a single
114 * ioend structure. Better yet, the more ground we can cover with one
115 * ioend, the better.
116 */
Darrick J. Wong2a067052016-10-03 09:11:33 -0700117
118/*
119 * Given an AG extent, find the lowest-numbered run of shared blocks
120 * within that range and return the range in fbno/flen. If
121 * find_end_of_shared is true, return the longest contiguous extent of
122 * shared blocks. If there are no shared extents, fbno and flen will
123 * be set to NULLAGBLOCK and 0, respectively.
124 */
125int
126xfs_reflink_find_shared(
127 struct xfs_mount *mp,
128 xfs_agnumber_t agno,
129 xfs_agblock_t agbno,
130 xfs_extlen_t aglen,
131 xfs_agblock_t *fbno,
132 xfs_extlen_t *flen,
133 bool find_end_of_shared)
134{
135 struct xfs_buf *agbp;
136 struct xfs_btree_cur *cur;
137 int error;
138
139 error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
140 if (error)
141 return error;
142
143 cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL);
144
145 error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
146 find_end_of_shared);
147
148 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
149
150 xfs_buf_relse(agbp);
151 return error;
152}
153
154/*
155 * Trim the mapping to the next block where there's a change in the
156 * shared/unshared status. More specifically, this means that we
157 * find the lowest-numbered extent of shared blocks that coincides with
158 * the given block mapping. If the shared extent overlaps the start of
159 * the mapping, trim the mapping to the end of the shared extent. If
160 * the shared region intersects the mapping, trim the mapping to the
161 * start of the shared extent. If there are no shared regions that
162 * overlap, just return the original extent.
163 */
164int
165xfs_reflink_trim_around_shared(
166 struct xfs_inode *ip,
167 struct xfs_bmbt_irec *irec,
168 bool *shared,
169 bool *trimmed)
170{
171 xfs_agnumber_t agno;
172 xfs_agblock_t agbno;
173 xfs_extlen_t aglen;
174 xfs_agblock_t fbno;
175 xfs_extlen_t flen;
176 int error = 0;
177
178 /* Holes, unwritten, and delalloc extents cannot be shared */
179 if (!xfs_is_reflink_inode(ip) ||
180 ISUNWRITTEN(irec) ||
181 irec->br_startblock == HOLESTARTBLOCK ||
182 irec->br_startblock == DELAYSTARTBLOCK) {
183 *shared = false;
184 return 0;
185 }
186
187 trace_xfs_reflink_trim_around_shared(ip, irec);
188
189 agno = XFS_FSB_TO_AGNO(ip->i_mount, irec->br_startblock);
190 agbno = XFS_FSB_TO_AGBNO(ip->i_mount, irec->br_startblock);
191 aglen = irec->br_blockcount;
192
193 error = xfs_reflink_find_shared(ip->i_mount, agno, agbno,
194 aglen, &fbno, &flen, true);
195 if (error)
196 return error;
197
198 *shared = *trimmed = false;
199 if (fbno == NULLAGBLOCK) {
200 /* No shared blocks at all. */
201 return 0;
202 } else if (fbno == agbno) {
203 /*
204 * The start of this extent is shared. Truncate the
205 * mapping at the end of the shared region so that a
206 * subsequent iteration starts at the start of the
207 * unshared region.
208 */
209 irec->br_blockcount = flen;
210 *shared = true;
211 if (flen != aglen)
212 *trimmed = true;
213 return 0;
214 } else {
215 /*
216 * There's a shared extent midway through this extent.
217 * Truncate the mapping at the start of the shared
218 * extent so that a subsequent iteration starts at the
219 * start of the shared region.
220 */
221 irec->br_blockcount = fbno - agbno;
222 *trimmed = true;
223 return 0;
224 }
225}
226
227/* Create a CoW reservation for a range of blocks within a file. */
228static int
229__xfs_reflink_reserve_cow(
230 struct xfs_inode *ip,
231 xfs_fileoff_t *offset_fsb,
232 xfs_fileoff_t end_fsb)
233{
234 struct xfs_bmbt_irec got, prev, imap;
235 xfs_fileoff_t orig_end_fsb;
236 int nimaps, eof = 0, error = 0;
237 bool shared = false, trimmed = false;
238 xfs_extnum_t idx;
239
240 /* Already reserved? Skip the refcount btree access. */
241 xfs_bmap_search_extents(ip, *offset_fsb, XFS_COW_FORK, &eof, &idx,
242 &got, &prev);
243 if (!eof && got.br_startoff <= *offset_fsb) {
244 end_fsb = orig_end_fsb = got.br_startoff + got.br_blockcount;
245 trace_xfs_reflink_cow_found(ip, &got);
246 goto done;
247 }
248
249 /* Read extent from the source file. */
250 nimaps = 1;
251 error = xfs_bmapi_read(ip, *offset_fsb, end_fsb - *offset_fsb,
252 &imap, &nimaps, 0);
253 if (error)
254 goto out_unlock;
255 ASSERT(nimaps == 1);
256
257 /* Trim the mapping to the nearest shared extent boundary. */
258 error = xfs_reflink_trim_around_shared(ip, &imap, &shared, &trimmed);
259 if (error)
260 goto out_unlock;
261
262 end_fsb = orig_end_fsb = imap.br_startoff + imap.br_blockcount;
263
264 /* Not shared? Just report the (potentially capped) extent. */
265 if (!shared)
266 goto done;
267
268 /*
269 * Fork all the shared blocks from our write offset until the end of
270 * the extent.
271 */
272 error = xfs_qm_dqattach_locked(ip, 0);
273 if (error)
274 goto out_unlock;
275
276retry:
277 error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, *offset_fsb,
278 end_fsb - *offset_fsb, &got,
279 &prev, &idx, eof);
280 switch (error) {
281 case 0:
282 break;
283 case -ENOSPC:
284 case -EDQUOT:
285 /* retry without any preallocation */
286 trace_xfs_reflink_cow_enospc(ip, &imap);
287 if (end_fsb != orig_end_fsb) {
288 end_fsb = orig_end_fsb;
289 goto retry;
290 }
291 /*FALLTHRU*/
292 default:
293 goto out_unlock;
294 }
295
296 trace_xfs_reflink_cow_alloc(ip, &got);
297done:
298 *offset_fsb = end_fsb;
299out_unlock:
300 return error;
301}
302
303/* Create a CoW reservation for part of a file. */
304int
305xfs_reflink_reserve_cow_range(
306 struct xfs_inode *ip,
307 xfs_off_t offset,
308 xfs_off_t count)
309{
310 struct xfs_mount *mp = ip->i_mount;
311 xfs_fileoff_t offset_fsb, end_fsb;
312 int error;
313
314 trace_xfs_reflink_reserve_cow_range(ip, offset, count);
315
316 offset_fsb = XFS_B_TO_FSBT(mp, offset);
317 end_fsb = XFS_B_TO_FSB(mp, offset + count);
318
319 xfs_ilock(ip, XFS_ILOCK_EXCL);
320 while (offset_fsb < end_fsb) {
321 error = __xfs_reflink_reserve_cow(ip, &offset_fsb, end_fsb);
322 if (error) {
323 trace_xfs_reflink_reserve_cow_range_error(ip, error,
324 _RET_IP_);
325 break;
326 }
327 }
328 xfs_iunlock(ip, XFS_ILOCK_EXCL);
329
330 return error;
331}
Darrick J. Wongef473662016-10-03 09:11:34 -0700332
333/*
334 * Find the CoW reservation (and whether or not it needs block allocation)
335 * for a given byte offset of a file.
336 */
337bool
338xfs_reflink_find_cow_mapping(
339 struct xfs_inode *ip,
340 xfs_off_t offset,
341 struct xfs_bmbt_irec *imap,
342 bool *need_alloc)
343{
344 struct xfs_bmbt_irec irec;
345 struct xfs_ifork *ifp;
346 struct xfs_bmbt_rec_host *gotp;
347 xfs_fileoff_t bno;
348 xfs_extnum_t idx;
349
350 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
351 ASSERT(xfs_is_reflink_inode(ip));
352
353 /* Find the extent in the CoW fork. */
354 ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
355 bno = XFS_B_TO_FSBT(ip->i_mount, offset);
356 gotp = xfs_iext_bno_to_ext(ifp, bno, &idx);
357 if (!gotp)
358 return false;
359
360 xfs_bmbt_get_all(gotp, &irec);
361 if (bno >= irec.br_startoff + irec.br_blockcount ||
362 bno < irec.br_startoff)
363 return false;
364
365 trace_xfs_reflink_find_cow_mapping(ip, offset, 1, XFS_IO_OVERWRITE,
366 &irec);
367
368 /* If it's still delalloc, we must allocate later. */
369 *imap = irec;
370 *need_alloc = !!(isnullstartblock(irec.br_startblock));
371
372 return true;
373}
374
375/*
376 * Trim an extent to end at the next CoW reservation past offset_fsb.
377 */
378int
379xfs_reflink_trim_irec_to_next_cow(
380 struct xfs_inode *ip,
381 xfs_fileoff_t offset_fsb,
382 struct xfs_bmbt_irec *imap)
383{
384 struct xfs_bmbt_irec irec;
385 struct xfs_ifork *ifp;
386 struct xfs_bmbt_rec_host *gotp;
387 xfs_extnum_t idx;
388
389 if (!xfs_is_reflink_inode(ip))
390 return 0;
391
392 /* Find the extent in the CoW fork. */
393 ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
394 gotp = xfs_iext_bno_to_ext(ifp, offset_fsb, &idx);
395 if (!gotp)
396 return 0;
397 xfs_bmbt_get_all(gotp, &irec);
398
399 /* This is the extent before; try sliding up one. */
400 if (irec.br_startoff < offset_fsb) {
401 idx++;
402 if (idx >= ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
403 return 0;
404 gotp = xfs_iext_get_ext(ifp, idx);
405 xfs_bmbt_get_all(gotp, &irec);
406 }
407
408 if (irec.br_startoff >= imap->br_startoff + imap->br_blockcount)
409 return 0;
410
411 imap->br_blockcount = irec.br_startoff - imap->br_startoff;
412 trace_xfs_reflink_trim_irec(ip, imap);
413
414 return 0;
415}
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700416
417/*
418 * Cancel all pending CoW reservations for some block range of an inode.
419 */
420int
421xfs_reflink_cancel_cow_blocks(
422 struct xfs_inode *ip,
423 struct xfs_trans **tpp,
424 xfs_fileoff_t offset_fsb,
425 xfs_fileoff_t end_fsb)
426{
427 struct xfs_bmbt_irec irec;
428 xfs_filblks_t count_fsb;
429 xfs_fsblock_t firstfsb;
430 struct xfs_defer_ops dfops;
431 int error = 0;
432 int nimaps;
433
434 if (!xfs_is_reflink_inode(ip))
435 return 0;
436
437 /* Go find the old extent in the CoW fork. */
438 while (offset_fsb < end_fsb) {
439 nimaps = 1;
440 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
441 error = xfs_bmapi_read(ip, offset_fsb, count_fsb, &irec,
442 &nimaps, XFS_BMAPI_COWFORK);
443 if (error)
444 break;
445 ASSERT(nimaps == 1);
446
447 trace_xfs_reflink_cancel_cow(ip, &irec);
448
449 if (irec.br_startblock == DELAYSTARTBLOCK) {
450 /* Free a delayed allocation. */
451 xfs_mod_fdblocks(ip->i_mount, irec.br_blockcount,
452 false);
453 ip->i_delayed_blks -= irec.br_blockcount;
454
455 /* Remove the mapping from the CoW fork. */
456 error = xfs_bunmapi_cow(ip, &irec);
457 if (error)
458 break;
459 } else if (irec.br_startblock == HOLESTARTBLOCK) {
460 /* empty */
461 } else {
462 xfs_trans_ijoin(*tpp, ip, 0);
463 xfs_defer_init(&dfops, &firstfsb);
464
465 xfs_bmap_add_free(ip->i_mount, &dfops,
466 irec.br_startblock, irec.br_blockcount,
467 NULL);
468
469 /* Update quota accounting */
470 xfs_trans_mod_dquot_byino(*tpp, ip, XFS_TRANS_DQ_BCOUNT,
471 -(long)irec.br_blockcount);
472
473 /* Roll the transaction */
474 error = xfs_defer_finish(tpp, &dfops, ip);
475 if (error) {
476 xfs_defer_cancel(&dfops);
477 break;
478 }
479
480 /* Remove the mapping from the CoW fork. */
481 error = xfs_bunmapi_cow(ip, &irec);
482 if (error)
483 break;
484 }
485
486 /* Roll on... */
487 offset_fsb = irec.br_startoff + irec.br_blockcount;
488 }
489
490 return error;
491}
492
493/*
494 * Cancel all pending CoW reservations for some byte range of an inode.
495 */
496int
497xfs_reflink_cancel_cow_range(
498 struct xfs_inode *ip,
499 xfs_off_t offset,
500 xfs_off_t count)
501{
502 struct xfs_trans *tp;
503 xfs_fileoff_t offset_fsb;
504 xfs_fileoff_t end_fsb;
505 int error;
506
507 trace_xfs_reflink_cancel_cow_range(ip, offset, count);
508
509 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
510 if (count == NULLFILEOFF)
511 end_fsb = NULLFILEOFF;
512 else
513 end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
514
515 /* Start a rolling transaction to remove the mappings */
516 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
517 0, 0, 0, &tp);
518 if (error)
519 goto out;
520
521 xfs_ilock(ip, XFS_ILOCK_EXCL);
522 xfs_trans_ijoin(tp, ip, 0);
523
524 /* Scrape out the old CoW reservations */
525 error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb);
526 if (error)
527 goto out_cancel;
528
529 error = xfs_trans_commit(tp);
530
531 xfs_iunlock(ip, XFS_ILOCK_EXCL);
532 return error;
533
534out_cancel:
535 xfs_trans_cancel(tp);
536 xfs_iunlock(ip, XFS_ILOCK_EXCL);
537out:
538 trace_xfs_reflink_cancel_cow_range_error(ip, error, _RET_IP_);
539 return error;
540}
541
542/*
543 * Remap parts of a file's data fork after a successful CoW.
544 */
545int
546xfs_reflink_end_cow(
547 struct xfs_inode *ip,
548 xfs_off_t offset,
549 xfs_off_t count)
550{
551 struct xfs_bmbt_irec irec;
552 struct xfs_bmbt_irec uirec;
553 struct xfs_trans *tp;
554 xfs_fileoff_t offset_fsb;
555 xfs_fileoff_t end_fsb;
556 xfs_filblks_t count_fsb;
557 xfs_fsblock_t firstfsb;
558 struct xfs_defer_ops dfops;
559 int error;
560 unsigned int resblks;
561 xfs_filblks_t ilen;
562 xfs_filblks_t rlen;
563 int nimaps;
564
565 trace_xfs_reflink_end_cow(ip, offset, count);
566
567 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
568 end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
569 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
570
571 /* Start a rolling transaction to switch the mappings */
572 resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
573 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
574 resblks, 0, 0, &tp);
575 if (error)
576 goto out;
577
578 xfs_ilock(ip, XFS_ILOCK_EXCL);
579 xfs_trans_ijoin(tp, ip, 0);
580
581 /* Go find the old extent in the CoW fork. */
582 while (offset_fsb < end_fsb) {
583 /* Read extent from the source file */
584 nimaps = 1;
585 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
586 error = xfs_bmapi_read(ip, offset_fsb, count_fsb, &irec,
587 &nimaps, XFS_BMAPI_COWFORK);
588 if (error)
589 goto out_cancel;
590 ASSERT(nimaps == 1);
591
592 ASSERT(irec.br_startblock != DELAYSTARTBLOCK);
593 trace_xfs_reflink_cow_remap(ip, &irec);
594
595 /*
596 * We can have a hole in the CoW fork if part of a directio
597 * write is CoW but part of it isn't.
598 */
599 rlen = ilen = irec.br_blockcount;
600 if (irec.br_startblock == HOLESTARTBLOCK)
601 goto next_extent;
602
603 /* Unmap the old blocks in the data fork. */
604 while (rlen) {
605 xfs_defer_init(&dfops, &firstfsb);
606 error = __xfs_bunmapi(tp, ip, irec.br_startoff,
607 &rlen, 0, 1, &firstfsb, &dfops);
608 if (error)
609 goto out_defer;
610
611 /*
612 * Trim the extent to whatever got unmapped.
613 * Remember, bunmapi works backwards.
614 */
615 uirec.br_startblock = irec.br_startblock + rlen;
616 uirec.br_startoff = irec.br_startoff + rlen;
617 uirec.br_blockcount = irec.br_blockcount - rlen;
618 irec.br_blockcount = rlen;
619 trace_xfs_reflink_cow_remap_piece(ip, &uirec);
620
621 /* Map the new blocks into the data fork. */
622 error = xfs_bmap_map_extent(tp->t_mountp, &dfops,
623 ip, &uirec);
624 if (error)
625 goto out_defer;
626
627 /* Remove the mapping from the CoW fork. */
628 error = xfs_bunmapi_cow(ip, &uirec);
629 if (error)
630 goto out_defer;
631
632 error = xfs_defer_finish(&tp, &dfops, ip);
633 if (error)
634 goto out_defer;
635 }
636
637next_extent:
638 /* Roll on... */
639 offset_fsb = irec.br_startoff + ilen;
640 }
641
642 error = xfs_trans_commit(tp);
643 xfs_iunlock(ip, XFS_ILOCK_EXCL);
644 if (error)
645 goto out;
646 return 0;
647
648out_defer:
649 xfs_defer_cancel(&dfops);
650out_cancel:
651 xfs_trans_cancel(tp);
652 xfs_iunlock(ip, XFS_ILOCK_EXCL);
653out:
654 trace_xfs_reflink_end_cow_error(ip, error, _RET_IP_);
655 return error;
656}