blob: a51534c71b3693a5571ce898aad35ddebf1eaee7 [file] [log] [blame]
David Chinnerfe4fa4b2008-10-30 17:06:08 +11001/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h"
30#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
33#include "xfs_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_inode.h"
37#include "xfs_dinode.h"
38#include "xfs_error.h"
39#include "xfs_mru_cache.h"
40#include "xfs_filestream.h"
41#include "xfs_vnodeops.h"
42#include "xfs_utils.h"
43#include "xfs_buf_item.h"
44#include "xfs_inode_item.h"
45#include "xfs_rw.h"
46
David Chinnera167b172008-10-30 17:06:18 +110047#include <linux/kthread.h>
48#include <linux/freezer.h>
49
David Chinnerfe4fa4b2008-10-30 17:06:08 +110050/*
51 * xfs_sync flushes any pending I/O to file system vfsp.
52 *
53 * This routine is called by vfs_sync() to make sure that things make it
54 * out to disk eventually, on sync() system calls to flush out everything,
55 * and when the file system is unmounted. For the vfs_sync() case, all
56 * we really need to do is sync out the log to make all of our meta-data
57 * updates permanent (except for timestamps). For calls from pflushd(),
58 * dirty pages are kept moving by calling pdflush() on the inodes
59 * containing them. We also flush the inodes that we can lock without
60 * sleeping and the superblock if we can lock it without sleeping from
61 * vfs_sync() so that items at the tail of the log are always moving out.
62 *
63 * Flags:
64 * SYNC_BDFLUSH - We're being called from vfs_sync() so we don't want
65 * to sleep if we can help it. All we really need
66 * to do is ensure that the log is synced at least
67 * periodically. We also push the inodes and
68 * superblock if we can lock them without sleeping
69 * and they are not pinned.
70 * SYNC_ATTR - We need to flush the inodes. If SYNC_BDFLUSH is not
71 * set, then we really want to lock each inode and flush
72 * it.
73 * SYNC_WAIT - All the flushes that take place in this call should
74 * be synchronous.
75 * SYNC_DELWRI - This tells us to push dirty pages associated with
76 * inodes. SYNC_WAIT and SYNC_BDFLUSH are used to
77 * determine if they should be flushed sync, async, or
78 * delwri.
79 * SYNC_CLOSE - This flag is passed when the system is being
80 * unmounted. We should sync and invalidate everything.
81 * SYNC_FSDATA - This indicates that the caller would like to make
82 * sure the superblock is safe on disk. We can ensure
83 * this by simply making sure the log gets flushed
84 * if SYNC_BDFLUSH is set, and by actually writing it
85 * out otherwise.
86 * SYNC_IOWAIT - The caller wants us to wait for all data I/O to complete
87 * before we return (including direct I/O). Forms the drain
88 * side of the write barrier needed to safely quiesce the
89 * filesystem.
90 *
91 */
92int
93xfs_sync(
94 xfs_mount_t *mp,
95 int flags)
96{
97 int error;
98
99 /*
100 * Get the Quota Manager to flush the dquots.
101 *
102 * If XFS quota support is not enabled or this filesystem
103 * instance does not use quotas XFS_QM_DQSYNC will always
104 * return zero.
105 */
106 error = XFS_QM_DQSYNC(mp, flags);
107 if (error) {
108 /*
109 * If we got an IO error, we will be shutting down.
110 * So, there's nothing more for us to do here.
111 */
112 ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp));
113 if (XFS_FORCED_SHUTDOWN(mp))
114 return XFS_ERROR(error);
115 }
116
117 if (flags & SYNC_IOWAIT)
118 xfs_filestream_flush(mp);
119
120 return xfs_syncsub(mp, flags, NULL);
121}
122
123/*
124 * xfs sync routine for internal use
125 *
126 * This routine supports all of the flags defined for the generic vfs_sync
127 * interface as explained above under xfs_sync.
128 *
129 */
130int
131xfs_sync_inodes(
132 xfs_mount_t *mp,
133 int flags,
134 int *bypassed)
135{
136 xfs_inode_t *ip = NULL;
137 struct inode *vp = NULL;
138 int error;
139 int last_error;
140 uint64_t fflag;
141 uint lock_flags;
142 uint base_lock_flags;
143 boolean_t mount_locked;
144 boolean_t vnode_refed;
145 int preempt;
146 xfs_iptr_t *ipointer;
147#ifdef DEBUG
148 boolean_t ipointer_in = B_FALSE;
149
150#define IPOINTER_SET ipointer_in = B_TRUE
151#define IPOINTER_CLR ipointer_in = B_FALSE
152#else
153#define IPOINTER_SET
154#define IPOINTER_CLR
155#endif
156
157
158/* Insert a marker record into the inode list after inode ip. The list
159 * must be locked when this is called. After the call the list will no
160 * longer be locked.
161 */
162#define IPOINTER_INSERT(ip, mp) { \
163 ASSERT(ipointer_in == B_FALSE); \
164 ipointer->ip_mnext = ip->i_mnext; \
165 ipointer->ip_mprev = ip; \
166 ip->i_mnext = (xfs_inode_t *)ipointer; \
167 ipointer->ip_mnext->i_mprev = (xfs_inode_t *)ipointer; \
168 preempt = 0; \
169 XFS_MOUNT_IUNLOCK(mp); \
170 mount_locked = B_FALSE; \
171 IPOINTER_SET; \
172 }
173
174/* Remove the marker from the inode list. If the marker was the only item
175 * in the list then there are no remaining inodes and we should zero out
176 * the whole list. If we are the current head of the list then move the head
177 * past us.
178 */
179#define IPOINTER_REMOVE(ip, mp) { \
180 ASSERT(ipointer_in == B_TRUE); \
181 if (ipointer->ip_mnext != (xfs_inode_t *)ipointer) { \
182 ip = ipointer->ip_mnext; \
183 ip->i_mprev = ipointer->ip_mprev; \
184 ipointer->ip_mprev->i_mnext = ip; \
185 if (mp->m_inodes == (xfs_inode_t *)ipointer) { \
186 mp->m_inodes = ip; \
187 } \
188 } else { \
189 ASSERT(mp->m_inodes == (xfs_inode_t *)ipointer); \
190 mp->m_inodes = NULL; \
191 ip = NULL; \
192 } \
193 IPOINTER_CLR; \
194 }
195
196#define XFS_PREEMPT_MASK 0x7f
197
198 ASSERT(!(flags & SYNC_BDFLUSH));
199
200 if (bypassed)
201 *bypassed = 0;
202 if (mp->m_flags & XFS_MOUNT_RDONLY)
203 return 0;
204 error = 0;
205 last_error = 0;
206 preempt = 0;
207
208 /* Allocate a reference marker */
209 ipointer = (xfs_iptr_t *)kmem_zalloc(sizeof(xfs_iptr_t), KM_SLEEP);
210
211 fflag = XFS_B_ASYNC; /* default is don't wait */
212 if (flags & SYNC_DELWRI)
213 fflag = XFS_B_DELWRI;
214 if (flags & SYNC_WAIT)
215 fflag = 0; /* synchronous overrides all */
216
217 base_lock_flags = XFS_ILOCK_SHARED;
218 if (flags & (SYNC_DELWRI | SYNC_CLOSE)) {
219 /*
220 * We need the I/O lock if we're going to call any of
221 * the flush/inval routines.
222 */
223 base_lock_flags |= XFS_IOLOCK_SHARED;
224 }
225
226 XFS_MOUNT_ILOCK(mp);
227
228 ip = mp->m_inodes;
229
230 mount_locked = B_TRUE;
231 vnode_refed = B_FALSE;
232
233 IPOINTER_CLR;
234
235 do {
236 ASSERT(ipointer_in == B_FALSE);
237 ASSERT(vnode_refed == B_FALSE);
238
239 lock_flags = base_lock_flags;
240
241 /*
242 * There were no inodes in the list, just break out
243 * of the loop.
244 */
245 if (ip == NULL) {
246 break;
247 }
248
249 /*
250 * We found another sync thread marker - skip it
251 */
252 if (ip->i_mount == NULL) {
253 ip = ip->i_mnext;
254 continue;
255 }
256
257 vp = VFS_I(ip);
258
259 /*
260 * If the vnode is gone then this is being torn down,
261 * call reclaim if it is flushed, else let regular flush
262 * code deal with it later in the loop.
263 */
264
265 if (vp == NULL) {
266 /* Skip ones already in reclaim */
267 if (ip->i_flags & XFS_IRECLAIM) {
268 ip = ip->i_mnext;
269 continue;
270 }
271 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) {
272 ip = ip->i_mnext;
273 } else if ((xfs_ipincount(ip) == 0) &&
274 xfs_iflock_nowait(ip)) {
275 IPOINTER_INSERT(ip, mp);
276
277 xfs_finish_reclaim(ip, 1,
278 XFS_IFLUSH_DELWRI_ELSE_ASYNC);
279
280 XFS_MOUNT_ILOCK(mp);
281 mount_locked = B_TRUE;
282 IPOINTER_REMOVE(ip, mp);
283 } else {
284 xfs_iunlock(ip, XFS_ILOCK_EXCL);
285 ip = ip->i_mnext;
286 }
287 continue;
288 }
289
290 if (VN_BAD(vp)) {
291 ip = ip->i_mnext;
292 continue;
293 }
294
295 if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) {
296 XFS_MOUNT_IUNLOCK(mp);
297 kmem_free(ipointer);
298 return 0;
299 }
300
301 /*
302 * Try to lock without sleeping. We're out of order with
303 * the inode list lock here, so if we fail we need to drop
304 * the mount lock and try again. If we're called from
305 * bdflush() here, then don't bother.
306 *
307 * The inode lock here actually coordinates with the
308 * almost spurious inode lock in xfs_ireclaim() to prevent
309 * the vnode we handle here without a reference from
310 * being freed while we reference it. If we lock the inode
311 * while it's on the mount list here, then the spurious inode
312 * lock in xfs_ireclaim() after the inode is pulled from
313 * the mount list will sleep until we release it here.
314 * This keeps the vnode from being freed while we reference
315 * it.
316 */
317 if (xfs_ilock_nowait(ip, lock_flags) == 0) {
318 if (vp == NULL) {
319 ip = ip->i_mnext;
320 continue;
321 }
322
323 vp = vn_grab(vp);
324 if (vp == NULL) {
325 ip = ip->i_mnext;
326 continue;
327 }
328
329 IPOINTER_INSERT(ip, mp);
330 xfs_ilock(ip, lock_flags);
331
332 ASSERT(vp == VFS_I(ip));
333 ASSERT(ip->i_mount == mp);
334
335 vnode_refed = B_TRUE;
336 }
337
338 /* From here on in the loop we may have a marker record
339 * in the inode list.
340 */
341
342 /*
343 * If we have to flush data or wait for I/O completion
344 * we need to drop the ilock that we currently hold.
345 * If we need to drop the lock, insert a marker if we
346 * have not already done so.
347 */
348 if ((flags & (SYNC_CLOSE|SYNC_IOWAIT)) ||
349 ((flags & SYNC_DELWRI) && VN_DIRTY(vp))) {
350 if (mount_locked) {
351 IPOINTER_INSERT(ip, mp);
352 }
353 xfs_iunlock(ip, XFS_ILOCK_SHARED);
354
355 if (flags & SYNC_CLOSE) {
356 /* Shutdown case. Flush and invalidate. */
357 if (XFS_FORCED_SHUTDOWN(mp))
358 xfs_tosspages(ip, 0, -1,
359 FI_REMAPF);
360 else
361 error = xfs_flushinval_pages(ip,
362 0, -1, FI_REMAPF);
363 } else if ((flags & SYNC_DELWRI) && VN_DIRTY(vp)) {
364 error = xfs_flush_pages(ip, 0,
365 -1, fflag, FI_NONE);
366 }
367
368 /*
369 * When freezing, we need to wait ensure all I/O (including direct
370 * I/O) is complete to ensure no further data modification can take
371 * place after this point
372 */
373 if (flags & SYNC_IOWAIT)
374 vn_iowait(ip);
375
376 xfs_ilock(ip, XFS_ILOCK_SHARED);
377 }
378
379 if ((flags & SYNC_ATTR) &&
380 (ip->i_update_core ||
381 (ip->i_itemp && ip->i_itemp->ili_format.ilf_fields))) {
382 if (mount_locked)
383 IPOINTER_INSERT(ip, mp);
384
385 if (flags & SYNC_WAIT) {
386 xfs_iflock(ip);
387 error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
388
389 /*
390 * If we can't acquire the flush lock, then the inode
391 * is already being flushed so don't bother waiting.
392 *
393 * If we can lock it then do a delwri flush so we can
394 * combine multiple inode flushes in each disk write.
395 */
396 } else if (xfs_iflock_nowait(ip)) {
397 error = xfs_iflush(ip, XFS_IFLUSH_DELWRI);
398 } else if (bypassed) {
399 (*bypassed)++;
400 }
401 }
402
403 if (lock_flags != 0) {
404 xfs_iunlock(ip, lock_flags);
405 }
406
407 if (vnode_refed) {
408 /*
409 * If we had to take a reference on the vnode
410 * above, then wait until after we've unlocked
411 * the inode to release the reference. This is
412 * because we can be already holding the inode
413 * lock when IRELE() calls xfs_inactive().
414 *
415 * Make sure to drop the mount lock before calling
416 * IRELE() so that we don't trip over ourselves if
417 * we have to go for the mount lock again in the
418 * inactive code.
419 */
420 if (mount_locked) {
421 IPOINTER_INSERT(ip, mp);
422 }
423
424 IRELE(ip);
425
426 vnode_refed = B_FALSE;
427 }
428
429 if (error) {
430 last_error = error;
431 }
432
433 /*
434 * bail out if the filesystem is corrupted.
435 */
436 if (error == EFSCORRUPTED) {
437 if (!mount_locked) {
438 XFS_MOUNT_ILOCK(mp);
439 IPOINTER_REMOVE(ip, mp);
440 }
441 XFS_MOUNT_IUNLOCK(mp);
442 ASSERT(ipointer_in == B_FALSE);
443 kmem_free(ipointer);
444 return XFS_ERROR(error);
445 }
446
447 /* Let other threads have a chance at the mount lock
448 * if we have looped many times without dropping the
449 * lock.
450 */
451 if ((++preempt & XFS_PREEMPT_MASK) == 0) {
452 if (mount_locked) {
453 IPOINTER_INSERT(ip, mp);
454 }
455 }
456
457 if (mount_locked == B_FALSE) {
458 XFS_MOUNT_ILOCK(mp);
459 mount_locked = B_TRUE;
460 IPOINTER_REMOVE(ip, mp);
461 continue;
462 }
463
464 ASSERT(ipointer_in == B_FALSE);
465 ip = ip->i_mnext;
466
467 } while (ip != mp->m_inodes);
468
469 XFS_MOUNT_IUNLOCK(mp);
470
471 ASSERT(ipointer_in == B_FALSE);
472
473 kmem_free(ipointer);
474 return XFS_ERROR(last_error);
475}
476
477/*
478 * xfs sync routine for internal use
479 *
480 * This routine supports all of the flags defined for the generic vfs_sync
481 * interface as explained above under xfs_sync.
482 *
483 */
484int
485xfs_syncsub(
486 xfs_mount_t *mp,
487 int flags,
488 int *bypassed)
489{
490 int error = 0;
491 int last_error = 0;
492 uint log_flags = XFS_LOG_FORCE;
493 xfs_buf_t *bp;
494 xfs_buf_log_item_t *bip;
495
496 /*
497 * Sync out the log. This ensures that the log is periodically
498 * flushed even if there is not enough activity to fill it up.
499 */
500 if (flags & SYNC_WAIT)
501 log_flags |= XFS_LOG_SYNC;
502
503 xfs_log_force(mp, (xfs_lsn_t)0, log_flags);
504
505 if (flags & (SYNC_ATTR|SYNC_DELWRI)) {
506 if (flags & SYNC_BDFLUSH)
507 xfs_finish_reclaim_all(mp, 1);
508 else
509 error = xfs_sync_inodes(mp, flags, bypassed);
510 }
511
512 /*
513 * Flushing out dirty data above probably generated more
514 * log activity, so if this isn't vfs_sync() then flush
515 * the log again.
516 */
517 if (flags & SYNC_DELWRI) {
518 xfs_log_force(mp, (xfs_lsn_t)0, log_flags);
519 }
520
521 if (flags & SYNC_FSDATA) {
522 /*
523 * If this is vfs_sync() then only sync the superblock
524 * if we can lock it without sleeping and it is not pinned.
525 */
526 if (flags & SYNC_BDFLUSH) {
527 bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
528 if (bp != NULL) {
529 bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*);
530 if ((bip != NULL) &&
531 xfs_buf_item_dirty(bip)) {
532 if (!(XFS_BUF_ISPINNED(bp))) {
533 XFS_BUF_ASYNC(bp);
534 error = xfs_bwrite(mp, bp);
535 } else {
536 xfs_buf_relse(bp);
537 }
538 } else {
539 xfs_buf_relse(bp);
540 }
541 }
542 } else {
543 bp = xfs_getsb(mp, 0);
544 /*
545 * If the buffer is pinned then push on the log so
546 * we won't get stuck waiting in the write for
547 * someone, maybe ourselves, to flush the log.
548 * Even though we just pushed the log above, we
549 * did not have the superblock buffer locked at
550 * that point so it can become pinned in between
551 * there and here.
552 */
553 if (XFS_BUF_ISPINNED(bp))
554 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
555 if (flags & SYNC_WAIT)
556 XFS_BUF_UNASYNC(bp);
557 else
558 XFS_BUF_ASYNC(bp);
559 error = xfs_bwrite(mp, bp);
560 }
561 if (error) {
562 last_error = error;
563 }
564 }
565
566 /*
567 * Now check to see if the log needs a "dummy" transaction.
568 */
569 if (!(flags & SYNC_REMOUNT) && xfs_log_need_covered(mp)) {
570 xfs_trans_t *tp;
571 xfs_inode_t *ip;
572
573 /*
574 * Put a dummy transaction in the log to tell
575 * recovery that all others are OK.
576 */
577 tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
578 if ((error = xfs_trans_reserve(tp, 0,
579 XFS_ICHANGE_LOG_RES(mp),
580 0, 0, 0))) {
581 xfs_trans_cancel(tp, 0);
582 return error;
583 }
584
585 ip = mp->m_rootip;
586 xfs_ilock(ip, XFS_ILOCK_EXCL);
587
588 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
589 xfs_trans_ihold(tp, ip);
590 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
591 error = xfs_trans_commit(tp, 0);
592 xfs_iunlock(ip, XFS_ILOCK_EXCL);
593 xfs_log_force(mp, (xfs_lsn_t)0, log_flags);
594 }
595
596 /*
597 * When shutting down, we need to insure that the AIL is pushed
598 * to disk or the filesystem can appear corrupt from the PROM.
599 */
600 if ((flags & (SYNC_CLOSE|SYNC_WAIT)) == (SYNC_CLOSE|SYNC_WAIT)) {
601 XFS_bflush(mp->m_ddev_targp);
602 if (mp->m_rtdev_targp) {
603 XFS_bflush(mp->m_rtdev_targp);
604 }
605 }
606
607 return XFS_ERROR(last_error);
608}
David Chinnera167b172008-10-30 17:06:18 +1100609
610/*
611 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
612 * Doing this has two advantages:
613 * - It saves on stack space, which is tight in certain situations
614 * - It can be used (with care) as a mechanism to avoid deadlocks.
615 * Flushing while allocating in a full filesystem requires both.
616 */
617STATIC void
618xfs_syncd_queue_work(
619 struct xfs_mount *mp,
620 void *data,
621 void (*syncer)(struct xfs_mount *, void *))
622{
623 struct bhv_vfs_sync_work *work;
624
625 work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
626 INIT_LIST_HEAD(&work->w_list);
627 work->w_syncer = syncer;
628 work->w_data = data;
629 work->w_mount = mp;
630 spin_lock(&mp->m_sync_lock);
631 list_add_tail(&work->w_list, &mp->m_sync_list);
632 spin_unlock(&mp->m_sync_lock);
633 wake_up_process(mp->m_sync_task);
634}
635
636/*
637 * Flush delayed allocate data, attempting to free up reserved space
638 * from existing allocations. At this point a new allocation attempt
639 * has failed with ENOSPC and we are in the process of scratching our
640 * heads, looking about for more room...
641 */
642STATIC void
643xfs_flush_inode_work(
644 struct xfs_mount *mp,
645 void *arg)
646{
647 struct inode *inode = arg;
648 filemap_flush(inode->i_mapping);
649 iput(inode);
650}
651
652void
653xfs_flush_inode(
654 xfs_inode_t *ip)
655{
656 struct inode *inode = VFS_I(ip);
657
658 igrab(inode);
659 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
660 delay(msecs_to_jiffies(500));
661}
662
663/*
664 * This is the "bigger hammer" version of xfs_flush_inode_work...
665 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
666 */
667STATIC void
668xfs_flush_device_work(
669 struct xfs_mount *mp,
670 void *arg)
671{
672 struct inode *inode = arg;
673 sync_blockdev(mp->m_super->s_bdev);
674 iput(inode);
675}
676
677void
678xfs_flush_device(
679 xfs_inode_t *ip)
680{
681 struct inode *inode = VFS_I(ip);
682
683 igrab(inode);
684 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
685 delay(msecs_to_jiffies(500));
686 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
687}
688
689STATIC void
690xfs_sync_worker(
691 struct xfs_mount *mp,
692 void *unused)
693{
694 int error;
695
696 if (!(mp->m_flags & XFS_MOUNT_RDONLY))
697 error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR);
698 mp->m_sync_seq++;
699 wake_up(&mp->m_wait_single_sync_task);
700}
701
702STATIC int
703xfssyncd(
704 void *arg)
705{
706 struct xfs_mount *mp = arg;
707 long timeleft;
708 bhv_vfs_sync_work_t *work, *n;
709 LIST_HEAD (tmp);
710
711 set_freezable();
712 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
713 for (;;) {
714 timeleft = schedule_timeout_interruptible(timeleft);
715 /* swsusp */
716 try_to_freeze();
717 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
718 break;
719
720 spin_lock(&mp->m_sync_lock);
721 /*
722 * We can get woken by laptop mode, to do a sync -
723 * that's the (only!) case where the list would be
724 * empty with time remaining.
725 */
726 if (!timeleft || list_empty(&mp->m_sync_list)) {
727 if (!timeleft)
728 timeleft = xfs_syncd_centisecs *
729 msecs_to_jiffies(10);
730 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
731 list_add_tail(&mp->m_sync_work.w_list,
732 &mp->m_sync_list);
733 }
734 list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
735 list_move(&work->w_list, &tmp);
736 spin_unlock(&mp->m_sync_lock);
737
738 list_for_each_entry_safe(work, n, &tmp, w_list) {
739 (*work->w_syncer)(mp, work->w_data);
740 list_del(&work->w_list);
741 if (work == &mp->m_sync_work)
742 continue;
743 kmem_free(work);
744 }
745 }
746
747 return 0;
748}
749
750int
751xfs_syncd_init(
752 struct xfs_mount *mp)
753{
754 mp->m_sync_work.w_syncer = xfs_sync_worker;
755 mp->m_sync_work.w_mount = mp;
756 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
757 if (IS_ERR(mp->m_sync_task))
758 return -PTR_ERR(mp->m_sync_task);
759 return 0;
760}
761
762void
763xfs_syncd_stop(
764 struct xfs_mount *mp)
765{
766 kthread_stop(mp->m_sync_task);
767}
768