blob: d4b7b21a6e56be886e808f33b1173ab2d22c5d42 [file] [log] [blame]
David Chinnerfe4fa4b2008-10-30 17:06:08 +11001/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h"
30#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
33#include "xfs_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_inode.h"
37#include "xfs_dinode.h"
38#include "xfs_error.h"
39#include "xfs_mru_cache.h"
40#include "xfs_filestream.h"
41#include "xfs_vnodeops.h"
42#include "xfs_utils.h"
43#include "xfs_buf_item.h"
44#include "xfs_inode_item.h"
45#include "xfs_rw.h"
46
David Chinnera167b172008-10-30 17:06:18 +110047#include <linux/kthread.h>
48#include <linux/freezer.h>
49
David Chinnerfe4fa4b2008-10-30 17:06:08 +110050/*
David Chinner683a8972008-10-30 17:07:29 +110051 * Sync all the inodes in the given AG according to the
52 * direction given by the flags.
David Chinnerfe4fa4b2008-10-30 17:06:08 +110053 */
David Chinner683a8972008-10-30 17:07:29 +110054STATIC int
55xfs_sync_inodes_ag(
56 xfs_mount_t *mp,
57 int ag,
David Chinner2030b5a2008-10-30 17:15:12 +110058 int flags)
David Chinner683a8972008-10-30 17:07:29 +110059{
David Chinner683a8972008-10-30 17:07:29 +110060 xfs_perag_t *pag = &mp->m_perag[ag];
David Chinner683a8972008-10-30 17:07:29 +110061 int nr_found;
62 int first_index = 0;
63 int error = 0;
64 int last_error = 0;
65 int fflag = XFS_B_ASYNC;
66 int lock_flags = XFS_ILOCK_SHARED;
67
68 if (flags & SYNC_DELWRI)
69 fflag = XFS_B_DELWRI;
70 if (flags & SYNC_WAIT)
71 fflag = 0; /* synchronous overrides all */
72
73 if (flags & (SYNC_DELWRI | SYNC_CLOSE)) {
74 /*
75 * We need the I/O lock if we're going to call any of
76 * the flush/inval routines.
77 */
78 lock_flags |= XFS_IOLOCK_SHARED;
79 }
80
81 do {
David Chinnerbc60a992008-10-30 17:15:03 +110082 struct inode *inode;
83 boolean_t inode_refed;
84 xfs_inode_t *ip = NULL;
85
David Chinner683a8972008-10-30 17:07:29 +110086 /*
87 * use a gang lookup to find the next inode in the tree
88 * as the tree is sparse and a gang lookup walks to find
89 * the number of objects requested.
90 */
91 read_lock(&pag->pag_ici_lock);
92 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
93 (void**)&ip, first_index, 1);
94
95 if (!nr_found) {
96 read_unlock(&pag->pag_ici_lock);
97 break;
98 }
99
100 /* update the index for the next lookup */
101 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
102
103 /*
104 * skip inodes in reclaim. Let xfs_syncsub do that for
105 * us so we don't need to worry.
106 */
David Chinnerbc60a992008-10-30 17:15:03 +1100107 if (xfs_iflags_test(ip, (XFS_IRECLAIM|XFS_IRECLAIMABLE))) {
David Chinner683a8972008-10-30 17:07:29 +1100108 read_unlock(&pag->pag_ici_lock);
109 continue;
110 }
111
112 /* bad inodes are dealt with elsewhere */
David Chinnerbc60a992008-10-30 17:15:03 +1100113 inode = VFS_I(ip);
114 if (is_bad_inode(inode)) {
David Chinner683a8972008-10-30 17:07:29 +1100115 read_unlock(&pag->pag_ici_lock);
116 continue;
117 }
118
119 /* nothing to sync during shutdown */
120 if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) {
121 read_unlock(&pag->pag_ici_lock);
122 return 0;
123 }
124
125 /*
David Chinnerbc60a992008-10-30 17:15:03 +1100126 * If we can't get a reference on the VFS_I, the inode must be
127 * in reclaim. If we can get the inode lock without blocking,
128 * it is safe to flush the inode because we hold the tree lock
129 * and xfs_iextract will block right now. Hence if we lock the
130 * inode while holding the tree lock, xfs_ireclaim() is
131 * guaranteed to block on the inode lock we now hold and hence
132 * it is safe to reference the inode until we drop the inode
133 * locks completely.
David Chinner683a8972008-10-30 17:07:29 +1100134 */
David Chinnerbc60a992008-10-30 17:15:03 +1100135 inode_refed = B_FALSE;
136 if (igrab(inode)) {
David Chinner683a8972008-10-30 17:07:29 +1100137 read_unlock(&pag->pag_ici_lock);
David Chinner683a8972008-10-30 17:07:29 +1100138 xfs_ilock(ip, lock_flags);
David Chinnerbc60a992008-10-30 17:15:03 +1100139 inode_refed = B_TRUE;
David Chinner683a8972008-10-30 17:07:29 +1100140 } else {
David Chinnerbc60a992008-10-30 17:15:03 +1100141 if (!xfs_ilock_nowait(ip, lock_flags)) {
142 /* leave it to reclaim */
143 read_unlock(&pag->pag_ici_lock);
144 continue;
145 }
David Chinner683a8972008-10-30 17:07:29 +1100146 read_unlock(&pag->pag_ici_lock);
147 }
David Chinnerbc60a992008-10-30 17:15:03 +1100148
David Chinner683a8972008-10-30 17:07:29 +1100149 /*
150 * If we have to flush data or wait for I/O completion
151 * we need to drop the ilock that we currently hold.
152 * If we need to drop the lock, insert a marker if we
153 * have not already done so.
154 */
155 if (flags & SYNC_CLOSE) {
156 xfs_iunlock(ip, XFS_ILOCK_SHARED);
157 if (XFS_FORCED_SHUTDOWN(mp))
158 xfs_tosspages(ip, 0, -1, FI_REMAPF);
159 else
160 error = xfs_flushinval_pages(ip, 0, -1,
161 FI_REMAPF);
162 /* wait for I/O on freeze */
163 if (flags & SYNC_IOWAIT)
164 vn_iowait(ip);
165
166 xfs_ilock(ip, XFS_ILOCK_SHARED);
167 }
168
David Chinnerbc60a992008-10-30 17:15:03 +1100169 if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) {
David Chinner683a8972008-10-30 17:07:29 +1100170 xfs_iunlock(ip, XFS_ILOCK_SHARED);
171 error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE);
172 if (flags & SYNC_IOWAIT)
173 vn_iowait(ip);
174 xfs_ilock(ip, XFS_ILOCK_SHARED);
175 }
176
177 if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) {
178 if (flags & SYNC_WAIT) {
179 xfs_iflock(ip);
180 if (!xfs_inode_clean(ip))
181 error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
182 else
183 xfs_ifunlock(ip);
184 } else if (xfs_iflock_nowait(ip)) {
185 if (!xfs_inode_clean(ip))
186 error = xfs_iflush(ip, XFS_IFLUSH_DELWRI);
187 else
188 xfs_ifunlock(ip);
David Chinner683a8972008-10-30 17:07:29 +1100189 }
190 }
191
192 if (lock_flags)
193 xfs_iunlock(ip, lock_flags);
194
David Chinnerbc60a992008-10-30 17:15:03 +1100195 if (inode_refed) {
David Chinner683a8972008-10-30 17:07:29 +1100196 IRELE(ip);
David Chinner683a8972008-10-30 17:07:29 +1100197 }
198
199 if (error)
200 last_error = error;
201 /*
202 * bail out if the filesystem is corrupted.
203 */
204 if (error == EFSCORRUPTED)
205 return XFS_ERROR(error);
206
207 } while (nr_found);
208
209 return last_error;
210}
211
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100212int
213xfs_sync_inodes(
214 xfs_mount_t *mp,
David Chinner2030b5a2008-10-30 17:15:12 +1100215 int flags)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100216{
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100217 int error;
218 int last_error;
David Chinner683a8972008-10-30 17:07:29 +1100219 int i;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100220
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100221 if (mp->m_flags & XFS_MOUNT_RDONLY)
222 return 0;
223 error = 0;
224 last_error = 0;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100225
David Chinner683a8972008-10-30 17:07:29 +1100226 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
227 if (!mp->m_perag[i].pag_ici_init)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100228 continue;
David Chinner2030b5a2008-10-30 17:15:12 +1100229 error = xfs_sync_inodes_ag(mp, i, flags);
David Chinner683a8972008-10-30 17:07:29 +1100230 if (error)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100231 last_error = error;
David Chinner683a8972008-10-30 17:07:29 +1100232 if (error == EFSCORRUPTED)
233 break;
234 }
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100235 return XFS_ERROR(last_error);
236}
237
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100238STATIC int
239xfs_commit_dummy_trans(
240 struct xfs_mount *mp,
241 uint log_flags)
242{
243 struct xfs_inode *ip = mp->m_rootip;
244 struct xfs_trans *tp;
245 int error;
246
247 /*
248 * Put a dummy transaction in the log to tell recovery
249 * that all others are OK.
250 */
251 tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
252 error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
253 if (error) {
254 xfs_trans_cancel(tp, 0);
255 return error;
256 }
257
258 xfs_ilock(ip, XFS_ILOCK_EXCL);
259
260 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
261 xfs_trans_ihold(tp, ip);
262 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
263 /* XXX(hch): ignoring the error here.. */
264 error = xfs_trans_commit(tp, 0);
265
266 xfs_iunlock(ip, XFS_ILOCK_EXCL);
267
268 xfs_log_force(mp, 0, log_flags);
269 return 0;
270}
271
272STATIC int
273xfs_sync_fsdata(
274 struct xfs_mount *mp,
275 int flags)
276{
277 struct xfs_buf *bp;
278 struct xfs_buf_log_item *bip;
279 int error = 0;
280
281 /*
282 * If this is xfssyncd() then only sync the superblock if we can
283 * lock it without sleeping and it is not pinned.
284 */
285 if (flags & SYNC_BDFLUSH) {
286 ASSERT(!(flags & SYNC_WAIT));
287
288 bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
289 if (!bp)
290 goto out;
291
292 bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *);
293 if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp))
294 goto out_brelse;
295 } else {
296 bp = xfs_getsb(mp, 0);
297
298 /*
299 * If the buffer is pinned then push on the log so we won't
300 * get stuck waiting in the write for someone, maybe
301 * ourselves, to flush the log.
302 *
303 * Even though we just pushed the log above, we did not have
304 * the superblock buffer locked at that point so it can
305 * become pinned in between there and here.
306 */
307 if (XFS_BUF_ISPINNED(bp))
308 xfs_log_force(mp, 0, XFS_LOG_FORCE);
309 }
310
311
312 if (flags & SYNC_WAIT)
313 XFS_BUF_UNASYNC(bp);
314 else
315 XFS_BUF_ASYNC(bp);
316
317 return xfs_bwrite(mp, bp);
318
319 out_brelse:
320 xfs_buf_relse(bp);
321 out:
322 return error;
323}
324
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100325/*
David Chinnerdfd837a2008-10-30 17:15:21 +1100326 * xfs_sync flushes any pending I/O to file system vfsp.
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100327 *
David Chinnerdfd837a2008-10-30 17:15:21 +1100328 * This routine is called by vfs_sync() to make sure that things make it
329 * out to disk eventually, on sync() system calls to flush out everything,
330 * and when the file system is unmounted. For the vfs_sync() case, all
331 * we really need to do is sync out the log to make all of our meta-data
332 * updates permanent (except for timestamps). For calls from pflushd(),
333 * dirty pages are kept moving by calling pdflush() on the inodes
334 * containing them. We also flush the inodes that we can lock without
335 * sleeping and the superblock if we can lock it without sleeping from
336 * vfs_sync() so that items at the tail of the log are always moving out.
337 *
338 * Flags:
339 * SYNC_BDFLUSH - We're being called from vfs_sync() so we don't want
340 * to sleep if we can help it. All we really need
341 * to do is ensure that the log is synced at least
342 * periodically. We also push the inodes and
343 * superblock if we can lock them without sleeping
344 * and they are not pinned.
345 * SYNC_ATTR - We need to flush the inodes. If SYNC_BDFLUSH is not
346 * set, then we really want to lock each inode and flush
347 * it.
348 * SYNC_WAIT - All the flushes that take place in this call should
349 * be synchronous.
350 * SYNC_DELWRI - This tells us to push dirty pages associated with
351 * inodes. SYNC_WAIT and SYNC_BDFLUSH are used to
352 * determine if they should be flushed sync, async, or
353 * delwri.
354 * SYNC_CLOSE - This flag is passed when the system is being
355 * unmounted. We should sync and invalidate everything.
356 * SYNC_FSDATA - This indicates that the caller would like to make
357 * sure the superblock is safe on disk. We can ensure
358 * this by simply making sure the log gets flushed
359 * if SYNC_BDFLUSH is set, and by actually writing it
360 * out otherwise.
361 * SYNC_IOWAIT - The caller wants us to wait for all data I/O to complete
362 * before we return (including direct I/O). Forms the drain
363 * side of the write barrier needed to safely quiesce the
364 * filesystem.
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100365 *
366 */
David Chinnerdfd837a2008-10-30 17:15:21 +1100367int
368xfs_sync(
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100369 xfs_mount_t *mp,
David Chinner2030b5a2008-10-30 17:15:12 +1100370 int flags)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100371{
David Chinnerdfd837a2008-10-30 17:15:21 +1100372 int error;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100373 int last_error = 0;
374 uint log_flags = XFS_LOG_FORCE;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100375
376 /*
David Chinnerdfd837a2008-10-30 17:15:21 +1100377 * Get the Quota Manager to flush the dquots.
378 *
379 * If XFS quota support is not enabled or this filesystem
380 * instance does not use quotas XFS_QM_DQSYNC will always
381 * return zero.
382 */
383 error = XFS_QM_DQSYNC(mp, flags);
384 if (error) {
385 /*
386 * If we got an IO error, we will be shutting down.
387 * So, there's nothing more for us to do here.
388 */
389 ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp));
390 if (XFS_FORCED_SHUTDOWN(mp))
391 return XFS_ERROR(error);
392 }
393
394 if (flags & SYNC_IOWAIT)
395 xfs_filestream_flush(mp);
396
397 /*
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100398 * Sync out the log. This ensures that the log is periodically
399 * flushed even if there is not enough activity to fill it up.
400 */
401 if (flags & SYNC_WAIT)
402 log_flags |= XFS_LOG_SYNC;
403
404 xfs_log_force(mp, (xfs_lsn_t)0, log_flags);
405
406 if (flags & (SYNC_ATTR|SYNC_DELWRI)) {
407 if (flags & SYNC_BDFLUSH)
David Chinner75c68f42008-10-30 17:06:28 +1100408 xfs_finish_reclaim_all(mp, 1, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100409 else
David Chinner2030b5a2008-10-30 17:15:12 +1100410 error = xfs_sync_inodes(mp, flags);
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100411 }
412
413 /*
414 * Flushing out dirty data above probably generated more
415 * log activity, so if this isn't vfs_sync() then flush
416 * the log again.
417 */
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100418 if (flags & SYNC_DELWRI)
419 xfs_log_force(mp, 0, log_flags);
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100420
421 if (flags & SYNC_FSDATA) {
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100422 error = xfs_sync_fsdata(mp, flags);
423 if (error)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100424 last_error = error;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100425 }
426
427 /*
428 * Now check to see if the log needs a "dummy" transaction.
429 */
430 if (!(flags & SYNC_REMOUNT) && xfs_log_need_covered(mp)) {
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100431 error = xfs_commit_dummy_trans(mp, log_flags);
432 if (error)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100433 return error;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100434 }
435
436 /*
437 * When shutting down, we need to insure that the AIL is pushed
438 * to disk or the filesystem can appear corrupt from the PROM.
439 */
440 if ((flags & (SYNC_CLOSE|SYNC_WAIT)) == (SYNC_CLOSE|SYNC_WAIT)) {
441 XFS_bflush(mp->m_ddev_targp);
442 if (mp->m_rtdev_targp) {
443 XFS_bflush(mp->m_rtdev_targp);
444 }
445 }
446
447 return XFS_ERROR(last_error);
448}
David Chinnera167b172008-10-30 17:06:18 +1100449
450/*
451 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
452 * Doing this has two advantages:
453 * - It saves on stack space, which is tight in certain situations
454 * - It can be used (with care) as a mechanism to avoid deadlocks.
455 * Flushing while allocating in a full filesystem requires both.
456 */
457STATIC void
458xfs_syncd_queue_work(
459 struct xfs_mount *mp,
460 void *data,
461 void (*syncer)(struct xfs_mount *, void *))
462{
463 struct bhv_vfs_sync_work *work;
464
465 work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
466 INIT_LIST_HEAD(&work->w_list);
467 work->w_syncer = syncer;
468 work->w_data = data;
469 work->w_mount = mp;
470 spin_lock(&mp->m_sync_lock);
471 list_add_tail(&work->w_list, &mp->m_sync_list);
472 spin_unlock(&mp->m_sync_lock);
473 wake_up_process(mp->m_sync_task);
474}
475
476/*
477 * Flush delayed allocate data, attempting to free up reserved space
478 * from existing allocations. At this point a new allocation attempt
479 * has failed with ENOSPC and we are in the process of scratching our
480 * heads, looking about for more room...
481 */
482STATIC void
483xfs_flush_inode_work(
484 struct xfs_mount *mp,
485 void *arg)
486{
487 struct inode *inode = arg;
488 filemap_flush(inode->i_mapping);
489 iput(inode);
490}
491
492void
493xfs_flush_inode(
494 xfs_inode_t *ip)
495{
496 struct inode *inode = VFS_I(ip);
497
498 igrab(inode);
499 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
500 delay(msecs_to_jiffies(500));
501}
502
503/*
504 * This is the "bigger hammer" version of xfs_flush_inode_work...
505 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
506 */
507STATIC void
508xfs_flush_device_work(
509 struct xfs_mount *mp,
510 void *arg)
511{
512 struct inode *inode = arg;
513 sync_blockdev(mp->m_super->s_bdev);
514 iput(inode);
515}
516
517void
518xfs_flush_device(
519 xfs_inode_t *ip)
520{
521 struct inode *inode = VFS_I(ip);
522
523 igrab(inode);
524 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
525 delay(msecs_to_jiffies(500));
526 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
527}
528
529STATIC void
530xfs_sync_worker(
531 struct xfs_mount *mp,
532 void *unused)
533{
534 int error;
535
536 if (!(mp->m_flags & XFS_MOUNT_RDONLY))
537 error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR);
538 mp->m_sync_seq++;
539 wake_up(&mp->m_wait_single_sync_task);
540}
541
542STATIC int
543xfssyncd(
544 void *arg)
545{
546 struct xfs_mount *mp = arg;
547 long timeleft;
548 bhv_vfs_sync_work_t *work, *n;
549 LIST_HEAD (tmp);
550
551 set_freezable();
552 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
553 for (;;) {
554 timeleft = schedule_timeout_interruptible(timeleft);
555 /* swsusp */
556 try_to_freeze();
557 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
558 break;
559
560 spin_lock(&mp->m_sync_lock);
561 /*
562 * We can get woken by laptop mode, to do a sync -
563 * that's the (only!) case where the list would be
564 * empty with time remaining.
565 */
566 if (!timeleft || list_empty(&mp->m_sync_list)) {
567 if (!timeleft)
568 timeleft = xfs_syncd_centisecs *
569 msecs_to_jiffies(10);
570 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
571 list_add_tail(&mp->m_sync_work.w_list,
572 &mp->m_sync_list);
573 }
574 list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
575 list_move(&work->w_list, &tmp);
576 spin_unlock(&mp->m_sync_lock);
577
578 list_for_each_entry_safe(work, n, &tmp, w_list) {
579 (*work->w_syncer)(mp, work->w_data);
580 list_del(&work->w_list);
581 if (work == &mp->m_sync_work)
582 continue;
583 kmem_free(work);
584 }
585 }
586
587 return 0;
588}
589
590int
591xfs_syncd_init(
592 struct xfs_mount *mp)
593{
594 mp->m_sync_work.w_syncer = xfs_sync_worker;
595 mp->m_sync_work.w_mount = mp;
596 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
597 if (IS_ERR(mp->m_sync_task))
598 return -PTR_ERR(mp->m_sync_task);
599 return 0;
600}
601
602void
603xfs_syncd_stop(
604 struct xfs_mount *mp)
605{
606 kthread_stop(mp->m_sync_task);
607}
608