Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
Browse files Browse the repository at this point in the history
* 'for-linus' of git://oss.sgi.com/xfs/xfs:
  xfs: remove xfs_flush_space
  xfs: flush delayed allcoation blocks on ENOSPC in create
  xfs: block callers of xfs_flush_inodes() correctly
  xfs: make inode flush at ENOSPC synchronous
  xfs: use xfs_sync_inodes() for device flushing
  xfs: inform the xfsaild of the push target before sleeping
  xfs: prevent unwritten extent conversion from blocking I/O completion
  xfs: fix double free of inode
  xfs: validate log feature fields correctly
  • Loading branch information
torvalds committed Apr 13, 2009
2 parents 80a04d3 + dc2a553 commit 3c1795c
Show file tree
Hide file tree
Showing 13 changed files with 180 additions and 161 deletions.
38 changes: 21 additions & 17 deletions fs/xfs/linux-2.6/xfs_aops.c
Original file line number Diff line number Diff line change
Expand Up @@ -152,23 +152,6 @@ xfs_find_bdev_for_inode(
return mp->m_ddev_targp->bt_bdev;
}

/*
* Schedule IO completion handling on a xfsdatad if this was
* the final hold on this ioend. If we are asked to wait,
* flush the workqueue.
*/
STATIC void
xfs_finish_ioend(
xfs_ioend_t *ioend,
int wait)
{
if (atomic_dec_and_test(&ioend->io_remaining)) {
queue_work(xfsdatad_workqueue, &ioend->io_work);
if (wait)
flush_workqueue(xfsdatad_workqueue);
}
}

/*
* We're now finished for good with this ioend structure.
* Update the page state via the associated buffer_heads,
Expand Down Expand Up @@ -309,6 +292,27 @@ xfs_end_bio_read(
xfs_destroy_ioend(ioend);
}

/*
* Schedule IO completion handling on a xfsdatad if this was
* the final hold on this ioend. If we are asked to wait,
* flush the workqueue.
*/
STATIC void
xfs_finish_ioend(
xfs_ioend_t *ioend,
int wait)
{
if (atomic_dec_and_test(&ioend->io_remaining)) {
struct workqueue_struct *wq = xfsdatad_workqueue;
if (ioend->io_work.func == xfs_end_bio_unwritten)
wq = xfsconvertd_workqueue;

queue_work(wq, &ioend->io_work);
if (wait)
flush_workqueue(wq);
}
}

/*
* Allocate and initialise an IO completion structure.
* We need to track unwritten extent write completion here initially.
Expand Down
1 change: 1 addition & 0 deletions fs/xfs/linux-2.6/xfs_aops.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#define __XFS_AOPS_H__

extern struct workqueue_struct *xfsdatad_workqueue;
extern struct workqueue_struct *xfsconvertd_workqueue;
extern mempool_t *xfs_ioend_pool;

/*
Expand Down
9 changes: 9 additions & 0 deletions fs/xfs/linux-2.6/xfs_buf.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ static struct shrinker xfs_buf_shake = {

static struct workqueue_struct *xfslogd_workqueue;
struct workqueue_struct *xfsdatad_workqueue;
struct workqueue_struct *xfsconvertd_workqueue;

#ifdef XFS_BUF_TRACE
void
Expand Down Expand Up @@ -1775,6 +1776,7 @@ xfs_flush_buftarg(
xfs_buf_t *bp, *n;
int pincount = 0;

xfs_buf_runall_queues(xfsconvertd_workqueue);
xfs_buf_runall_queues(xfsdatad_workqueue);
xfs_buf_runall_queues(xfslogd_workqueue);

Expand Down Expand Up @@ -1831,9 +1833,15 @@ xfs_buf_init(void)
if (!xfsdatad_workqueue)
goto out_destroy_xfslogd_workqueue;

xfsconvertd_workqueue = create_workqueue("xfsconvertd");
if (!xfsconvertd_workqueue)
goto out_destroy_xfsdatad_workqueue;

register_shrinker(&xfs_buf_shake);
return 0;

out_destroy_xfsdatad_workqueue:
destroy_workqueue(xfsdatad_workqueue);
out_destroy_xfslogd_workqueue:
destroy_workqueue(xfslogd_workqueue);
out_free_buf_zone:
Expand All @@ -1849,6 +1857,7 @@ void
xfs_buf_terminate(void)
{
unregister_shrinker(&xfs_buf_shake);
destroy_workqueue(xfsconvertd_workqueue);
destroy_workqueue(xfsdatad_workqueue);
destroy_workqueue(xfslogd_workqueue);
kmem_zone_destroy(xfs_buf_zone);
Expand Down
14 changes: 7 additions & 7 deletions fs/xfs/linux-2.6/xfs_fs_subr.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,14 +74,14 @@ xfs_flush_pages(

if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
xfs_iflags_clear(ip, XFS_ITRUNCATED);
ret = filemap_fdatawrite(mapping);
if (flags & XFS_B_ASYNC)
return -ret;
ret2 = filemap_fdatawait(mapping);
if (!ret)
ret = ret2;
ret = -filemap_fdatawrite(mapping);
}
return -ret;
if (flags & XFS_B_ASYNC)
return ret;
ret2 = xfs_wait_on_pages(ip, first, last);
if (!ret)
ret = ret2;
return ret;
}

int
Expand Down
18 changes: 17 additions & 1 deletion fs/xfs/linux-2.6/xfs_lrw.c
Original file line number Diff line number Diff line change
Expand Up @@ -751,10 +751,26 @@ xfs_write(
goto relock;
}
} else {
int enospc = 0;
ssize_t ret2 = 0;

write_retry:
xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs,
*offset, ioflags);
ret = generic_file_buffered_write(iocb, iovp, segs,
ret2 = generic_file_buffered_write(iocb, iovp, segs,
pos, offset, count, ret);
/*
* if we just got an ENOSPC, flush the inode now we
* aren't holding any page locks and retry *once*
*/
if (ret2 == -ENOSPC && !enospc) {
error = xfs_flush_pages(xip, 0, -1, 0, FI_NONE);
if (error)
goto out_unlock_internal;
enospc = 1;
goto write_retry;
}
ret = ret2;
}

current->backing_dev_info = NULL;
Expand Down
78 changes: 33 additions & 45 deletions fs/xfs/linux-2.6/xfs_sync.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,12 +62,6 @@ xfs_sync_inodes_ag(
uint32_t first_index = 0;
int error = 0;
int last_error = 0;
int fflag = XFS_B_ASYNC;

if (flags & SYNC_DELWRI)
fflag = XFS_B_DELWRI;
if (flags & SYNC_WAIT)
fflag = 0; /* synchronous overrides all */

do {
struct inode *inode;
Expand Down Expand Up @@ -128,11 +122,23 @@ xfs_sync_inodes_ag(
* If we have to flush data or wait for I/O completion
* we need to hold the iolock.
*/
if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) {
xfs_ilock(ip, XFS_IOLOCK_SHARED);
lock_flags |= XFS_IOLOCK_SHARED;
error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE);
if (flags & SYNC_IOWAIT)
if (flags & SYNC_DELWRI) {
if (VN_DIRTY(inode)) {
if (flags & SYNC_TRYLOCK) {
if (xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
lock_flags |= XFS_IOLOCK_SHARED;
} else {
xfs_ilock(ip, XFS_IOLOCK_SHARED);
lock_flags |= XFS_IOLOCK_SHARED;
}
if (lock_flags & XFS_IOLOCK_SHARED) {
error = xfs_flush_pages(ip, 0, -1,
(flags & SYNC_WAIT) ? 0
: XFS_B_ASYNC,
FI_NONE);
}
}
if (VN_CACHED(inode) && (flags & SYNC_IOWAIT))
xfs_ioend_wait(ip);
}
xfs_ilock(ip, XFS_ILOCK_SHARED);
Expand Down Expand Up @@ -398,15 +404,17 @@ STATIC void
xfs_syncd_queue_work(
struct xfs_mount *mp,
void *data,
void (*syncer)(struct xfs_mount *, void *))
void (*syncer)(struct xfs_mount *, void *),
struct completion *completion)
{
struct bhv_vfs_sync_work *work;
struct xfs_sync_work *work;

work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
INIT_LIST_HEAD(&work->w_list);
work->w_syncer = syncer;
work->w_data = data;
work->w_mount = mp;
work->w_completion = completion;
spin_lock(&mp->m_sync_lock);
list_add_tail(&work->w_list, &mp->m_sync_list);
spin_unlock(&mp->m_sync_lock);
Expand All @@ -420,49 +428,26 @@ xfs_syncd_queue_work(
* heads, looking about for more room...
*/
STATIC void
xfs_flush_inode_work(
struct xfs_mount *mp,
void *arg)
{
struct inode *inode = arg;
filemap_flush(inode->i_mapping);
iput(inode);
}

void
xfs_flush_inode(
xfs_inode_t *ip)
{
struct inode *inode = VFS_I(ip);

igrab(inode);
xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
delay(msecs_to_jiffies(500));
}

/*
* This is the "bigger hammer" version of xfs_flush_inode_work...
* (IOW, "If at first you don't succeed, use a Bigger Hammer").
*/
STATIC void
xfs_flush_device_work(
xfs_flush_inodes_work(
struct xfs_mount *mp,
void *arg)
{
struct inode *inode = arg;
sync_blockdev(mp->m_super->s_bdev);
xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK);
xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK | SYNC_IOWAIT);
iput(inode);
}

void
xfs_flush_device(
xfs_flush_inodes(
xfs_inode_t *ip)
{
struct inode *inode = VFS_I(ip);
DECLARE_COMPLETION_ONSTACK(completion);

igrab(inode);
xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
delay(msecs_to_jiffies(500));
xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
wait_for_completion(&completion);
xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
}

Expand Down Expand Up @@ -497,7 +482,7 @@ xfssyncd(
{
struct xfs_mount *mp = arg;
long timeleft;
bhv_vfs_sync_work_t *work, *n;
xfs_sync_work_t *work, *n;
LIST_HEAD (tmp);

set_freezable();
Expand Down Expand Up @@ -532,6 +517,8 @@ xfssyncd(
list_del(&work->w_list);
if (work == &mp->m_sync_work)
continue;
if (work->w_completion)
complete(work->w_completion);
kmem_free(work);
}
}
Expand All @@ -545,6 +532,7 @@ xfs_syncd_init(
{
mp->m_sync_work.w_syncer = xfs_sync_worker;
mp->m_sync_work.w_mount = mp;
mp->m_sync_work.w_completion = NULL;
mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
if (IS_ERR(mp->m_sync_task))
return -PTR_ERR(mp->m_sync_task);
Expand Down
9 changes: 5 additions & 4 deletions fs/xfs/linux-2.6/xfs_sync.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,20 @@
struct xfs_mount;
struct xfs_perag;

typedef struct bhv_vfs_sync_work {
typedef struct xfs_sync_work {
struct list_head w_list;
struct xfs_mount *w_mount;
void *w_data; /* syncer routine argument */
void (*w_syncer)(struct xfs_mount *, void *);
} bhv_vfs_sync_work_t;
struct completion *w_completion;
} xfs_sync_work_t;

#define SYNC_ATTR 0x0001 /* sync attributes */
#define SYNC_DELWRI 0x0002 /* look at delayed writes */
#define SYNC_WAIT 0x0004 /* wait for i/o to complete */
#define SYNC_BDFLUSH 0x0008 /* BDFLUSH is calling -- don't block */
#define SYNC_IOWAIT 0x0010 /* wait for all I/O to complete */
#define SYNC_TRYLOCK 0x0020 /* only try to lock inodes */

int xfs_syncd_init(struct xfs_mount *mp);
void xfs_syncd_stop(struct xfs_mount *mp);
Expand All @@ -43,8 +45,7 @@ int xfs_sync_fsdata(struct xfs_mount *mp, int flags);
int xfs_quiesce_data(struct xfs_mount *mp);
void xfs_quiesce_attr(struct xfs_mount *mp);

void xfs_flush_inode(struct xfs_inode *ip);
void xfs_flush_device(struct xfs_inode *ip);
void xfs_flush_inodes(struct xfs_inode *ip);

int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode);
int xfs_reclaim_inodes(struct xfs_mount *mp, int noblock, int mode);
Expand Down
23 changes: 14 additions & 9 deletions fs/xfs/xfs_iget.c
Original file line number Diff line number Diff line change
Expand Up @@ -69,15 +69,6 @@ xfs_inode_alloc(
ASSERT(!spin_is_locked(&ip->i_flags_lock));
ASSERT(completion_done(&ip->i_flush));

/*
* initialise the VFS inode here to get failures
* out of the way early.
*/
if (!inode_init_always(mp->m_super, VFS_I(ip))) {
kmem_zone_free(xfs_inode_zone, ip);
return NULL;
}

/* initialise the xfs inode */
ip->i_ino = ino;
ip->i_mount = mp;
Expand Down Expand Up @@ -113,6 +104,20 @@ xfs_inode_alloc(
#ifdef XFS_DIR2_TRACE
ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
#endif
/*
* Now initialise the VFS inode. We do this after the xfs_inode
* initialisation as internal failures will result in ->destroy_inode
* being called and that will pass down through the reclaim path and
* free the XFS inode. This path requires the XFS inode to already be
* initialised. Hence if this call fails, the xfs_inode has already
* been freed and we should not reference it at all in the error
* handling.
*/
if (!inode_init_always(mp->m_super, VFS_I(ip)))
return NULL;

/* prevent anyone from using this yet */
VFS_I(ip)->i_state = I_NEW|I_LOCK;

return ip;
}
Expand Down
Loading

0 comments on commit 3c1795c

Please sign in to comment.