xfs: refactor xfs_defer_finish_noroll

commit bb47d79750f1a68a75d4c7defc2da934ba31de14 upstream.

Split out a helper that operates on a single xfs_defer_pending structure
to untangle the code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Chandan Babu R <chandan.babu@oracle.com>
Acked-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Christoph Hellwig 2023-02-16 10:50:02 +05:30 committed by Greg Kroah-Hartman
parent 42a2406f90
commit 562da8e704

View File

@ -359,6 +359,53 @@ xfs_defer_cancel_list(
}
}
/*
* Log an intent-done item for the first pending intent, and finish the work
* items.
*/
static int
xfs_defer_finish_one(
struct xfs_trans *tp,
struct xfs_defer_pending *dfp)
{
const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
void *state = NULL;
struct list_head *li, *n;
int error;
trace_xfs_defer_pending_finish(tp->t_mountp, dfp);
dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count);
list_for_each_safe(li, n, &dfp->dfp_work) {
list_del(li);
dfp->dfp_count--;
error = ops->finish_item(tp, li, dfp->dfp_done, &state);
if (error == -EAGAIN) {
/*
* Caller wants a fresh transaction; put the work item
* back on the list and log a new log intent item to
* replace the old one. See "Requesting a Fresh
* Transaction while Finishing Deferred Work" above.
*/
list_add(li, &dfp->dfp_work);
dfp->dfp_count++;
dfp->dfp_done = NULL;
xfs_defer_create_intent(tp, dfp, false);
}
if (error)
goto out;
}
/* Done with the dfp, free it. */
list_del(&dfp->dfp_list);
kmem_free(dfp);
out:
if (ops->finish_cleanup)
ops->finish_cleanup(tp, state, error);
return error;
}
/*
* Finish all the pending work. This involves logging intent items for
* any work items that wandered in since the last transaction roll (if
@ -372,11 +419,7 @@ xfs_defer_finish_noroll(
struct xfs_trans **tp)
{
struct xfs_defer_pending *dfp;
struct list_head *li;
struct list_head *n;
void *state;
int error = 0;
const struct xfs_defer_op_type *ops;
LIST_HEAD(dop_pending);
ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
@ -385,83 +428,30 @@ xfs_defer_finish_noroll(
/* Until we run out of pending work to finish... */
while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
/* log intents and pull in intake items */
xfs_defer_create_intents(*tp);
list_splice_tail_init(&(*tp)->t_dfops, &dop_pending);
/*
* Roll the transaction.
*/
error = xfs_defer_trans_roll(tp);
if (error)
goto out;
goto out_shutdown;
/* Log an intent-done item for the first pending item. */
dfp = list_first_entry(&dop_pending, struct xfs_defer_pending,
dfp_list);
ops = defer_op_types[dfp->dfp_type];
trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
dfp->dfp_done = ops->create_done(*tp, dfp->dfp_intent,
dfp->dfp_count);
/* Finish the work items. */
state = NULL;
list_for_each_safe(li, n, &dfp->dfp_work) {
list_del(li);
dfp->dfp_count--;
error = ops->finish_item(*tp, li, dfp->dfp_done,
&state);
if (error == -EAGAIN) {
/*
* Caller wants a fresh transaction;
* put the work item back on the list
* and jump out.
*/
list_add(li, &dfp->dfp_work);
dfp->dfp_count++;
break;
} else if (error) {
/*
* Clean up after ourselves and jump out.
* xfs_defer_cancel will take care of freeing
* all these lists and stuff.
*/
if (ops->finish_cleanup)
ops->finish_cleanup(*tp, state, error);
goto out;
}
}
if (error == -EAGAIN) {
/*
* Caller wants a fresh transaction, so log a new log
* intent item to replace the old one and roll the
* transaction. See "Requesting a Fresh Transaction
* while Finishing Deferred Work" above.
*/
dfp->dfp_done = NULL;
xfs_defer_create_intent(*tp, dfp, false);
} else {
/* Done with the dfp, free it. */
list_del(&dfp->dfp_list);
kmem_free(dfp);
}
if (ops->finish_cleanup)
ops->finish_cleanup(*tp, state, error);
}
out:
if (error) {
xfs_defer_trans_abort(*tp, &dop_pending);
xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
trace_xfs_defer_finish_error(*tp, error);
xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
xfs_defer_cancel(*tp);
return error;
error = xfs_defer_finish_one(*tp, dfp);
if (error && error != -EAGAIN)
goto out_shutdown;
}
trace_xfs_defer_finish_done(*tp, _RET_IP_);
return 0;
out_shutdown:
xfs_defer_trans_abort(*tp, &dop_pending);
xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
trace_xfs_defer_finish_error(*tp, error);
xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
xfs_defer_cancel(*tp);
return error;
}
int