if (ctx->fshandle)
free_handle(ctx->fshandle, ctx->fshandle_len);
- if (ctx->rtdev)
- disk_close(ctx->rtdev);
- if (ctx->logdev)
- disk_close(ctx->logdev);
- if (ctx->datadev)
- disk_close(ctx->datadev);
+ if (ctx->verify_disks[XFS_DEV_DATA])
+ disk_close(ctx->verify_disks[XFS_DEV_DATA]);
+ if (ctx->verify_disks[XFS_DEV_LOG])
+ disk_close(ctx->verify_disks[XFS_DEV_LOG]);
+ if (ctx->verify_disks[XFS_DEV_RT])
+ disk_close(ctx->verify_disks[XFS_DEV_RT]);
fshandle_destroy();
error = -xfd_close(&ctx->mnt);
if (error)
}
/* Open the raw devices. */
- ctx->datadev = disk_open(ctx->fsinfo.fs_name);
- if (!ctx->datadev) {
+ ctx->verify_disks[XFS_DEV_DATA] = disk_open(ctx->fsinfo.fs_name);
+ if (!ctx->verify_disks[XFS_DEV_DATA]) {
str_error(ctx, ctx->mntpoint, _("Unable to open data device."));
return ECANCELED;
}
- ctx->nr_io_threads = disk_heads(ctx->datadev);
+ ctx->nr_io_threads = disk_heads(ctx->verify_disks[XFS_DEV_DATA]);
if (verbose) {
fprintf(stdout, _("%s: using %d threads to scrub.\n"),
ctx->mntpoint, scrub_nproc(ctx));
}
if (ctx->fsinfo.fs_log) {
- ctx->logdev = disk_open(ctx->fsinfo.fs_log);
- if (!ctx->logdev) {
+ ctx->verify_disks[XFS_DEV_LOG] = disk_open(ctx->fsinfo.fs_log);
+ if (!ctx->verify_disks[XFS_DEV_LOG]) {
str_error(ctx, ctx->mntpoint,
_("Unable to open external log device."));
return ECANCELED;
}
}
if (ctx->fsinfo.fs_rt) {
- ctx->rtdev = disk_open(ctx->fsinfo.fs_rt);
- if (!ctx->rtdev) {
+ ctx->verify_disks[XFS_DEV_RT] = disk_open(ctx->fsinfo.fs_rt);
+ if (!ctx->verify_disks[XFS_DEV_RT]) {
str_error(ctx, ctx->mntpoint,
_("Unable to open realtime device."));
return ECANCELED;
abort();
}
-/* Find the device major/minor for a given file descriptor. */
-static dev_t
-disk_to_dev(
+/* Return fsmap device for XFS device index. */
+static uint32_t
+to_fsmap_dev(
struct scrub_ctx *ctx,
- struct disk *disk)
+ enum xfs_device dev)
{
- if (ctx->mnt.fsgeom.rtstart) {
- if (disk == ctx->datadev)
- return XFS_DEV_DATA;
- if (disk == ctx->logdev)
- return XFS_DEV_LOG;
- if (disk == ctx->rtdev)
- return XFS_DEV_RT;
- } else {
- if (disk == ctx->datadev)
- return ctx->fsinfo.fs_datadev;
- if (disk == ctx->logdev)
- return ctx->fsinfo.fs_logdev;
- if (disk == ctx->rtdev)
- return ctx->fsinfo.fs_rtdev;
+ if (ctx->mnt.fsgeom.rtstart)
+ return dev;
+
+ switch (dev) {
+ case XFS_DEV_DATA:
+ return ctx->fsinfo.fs_datadev;
+ case XFS_DEV_LOG:
+ return ctx->fsinfo.fs_logdev;
+ case XFS_DEV_RT:
+ return ctx->fsinfo.fs_rtdev;
+ default:
+ abort();
}
- abort();
}
/* Find the incore bad blocks bitmap for a given disk. */
static struct bitmap *
bitmap_for_disk(
- struct scrub_ctx *ctx,
- struct disk *disk,
+ enum xfs_device dev,
struct media_verify_state *vs)
{
- if (disk == ctx->datadev)
+ switch (dev) {
+ case XFS_DEV_DATA:
return vs->d_bad;
- if (disk == ctx->rtdev)
+ case XFS_DEV_RT:
return vs->r_bad;
- return NULL;
+ default:
+ return NULL;
+ }
}
struct disk_ioerr_report {
struct scrub_ctx *ctx;
- struct disk *disk;
+ enum xfs_device dev;
};
struct owner_decode {
struct disk_ioerr_report *dioerr = arg;
/* Go figure out which blocks are bad from the fsmap. */
- keys[0].fmr_device = disk_to_dev(dioerr->ctx, dioerr->disk);
+ keys[0].fmr_device = to_fsmap_dev(dioerr->ctx, dioerr->dev);
keys[0].fmr_physical = start;
keys[1].fmr_device = keys[0].fmr_device;
keys[1].fmr_physical = start + length - 1;
static int
report_disk_ioerrs(
struct scrub_ctx *ctx,
- struct disk *disk,
- struct media_verify_state *vs)
+ struct media_verify_state *vs,
+ enum xfs_device dev)
{
+ struct bitmap *tree = bitmap_for_disk(dev, vs);
struct disk_ioerr_report dioerr = {
.ctx = ctx,
- .disk = disk,
+ .dev = dev,
};
- struct bitmap *tree;
- if (!disk)
- return 0;
- tree = bitmap_for_disk(ctx, disk, vs);
if (!tree)
return 0;
return -bitmap_iterate(tree, report_ioerr, &dioerr);
if (vs->r_trunc)
str_corrupt(ctx, ctx->mntpoint, _("rt device truncated"));
- ret = report_disk_ioerrs(ctx, ctx->datadev, vs);
+ ret = report_disk_ioerrs(ctx, vs, XFS_DEV_DATA);
if (ret) {
str_liberror(ctx, ret, _("walking datadev io errors"));
return ret;
}
- ret = report_disk_ioerrs(ctx, ctx->rtdev, vs);
+ ret = report_disk_ioerrs(ctx, vs, XFS_DEV_RT);
if (ret) {
str_liberror(ctx, ret, _("walking rtdev io errors"));
return ret;
static void
remember_ioerr(
struct scrub_ctx *ctx,
- struct disk *disk,
+ enum xfs_device dev,
uint64_t start,
uint64_t length,
int error,
int ret;
if (!length) {
- if (disk == ctx->datadev)
+ switch (dev) {
+ case XFS_DEV_DATA:
vs->d_trunc = true;
- else if (disk == ctx->logdev)
+ break;
+ case XFS_DEV_LOG:
vs->l_trunc = true;
- else if (disk == ctx->rtdev)
+ break;
+ case XFS_DEV_RT:
vs->r_trunc = true;
+ break;
+ }
return;
}
- tree = bitmap_for_disk(ctx, disk, vs);
+ tree = bitmap_for_disk(dev, vs);
if (!tree) {
str_liberror(ctx, ENOENT, _("finding bad block bitmap"));
return;
goto out_dbad;
}
- ret = read_verify_pool_alloc(ctx, ctx->datadev, remember_ioerr, &vs,
+ ret = read_verify_pool_alloc(ctx, XFS_DEV_DATA, remember_ioerr, &vs,
&vs.rvp_data);
if (ret) {
str_liberror(ctx, ret, _("creating datadev media verifier"));
goto out_rbad;
}
- if (ctx->logdev) {
- ret = read_verify_pool_alloc(ctx, ctx->logdev, remember_ioerr,
+ if (ctx->fsinfo.fs_log) {
+ ret = read_verify_pool_alloc(ctx, XFS_DEV_LOG, remember_ioerr,
&vs, &vs.rvp_log);
if (ret) {
str_liberror(ctx, ret,
goto out_datapool;
}
}
- if (ctx->rtdev) {
- ret = read_verify_pool_alloc(ctx, ctx->rtdev, remember_ioerr,
+ if (ctx->fsinfo.fs_rt) {
+ ret = read_verify_pool_alloc(ctx, XFS_DEV_RT, remember_ioerr,
&vs, &vs.rvp_realtime);
if (ret) {
str_liberror(ctx, ret,
* can contribute to the progress counter. Hence we need to set
* nr_threads appropriately to handle that many threads.
*/
- *nr_threads = disk_heads(ctx->datadev);
- if (ctx->rtdev)
- *nr_threads += disk_heads(ctx->rtdev);
- if (ctx->logdev)
- *nr_threads += disk_heads(ctx->logdev);
+ *nr_threads = disk_heads(ctx->verify_disks[XFS_DEV_DATA]);
+ if (ctx->verify_disks[XFS_DEV_RT])
+ *nr_threads += disk_heads(ctx->verify_disks[XFS_DEV_RT]);
+ if (ctx->verify_disks[XFS_DEV_LOG])
+ *nr_threads += disk_heads(ctx->verify_disks[XFS_DEV_LOG]);
*rshift = 20;
return 0;
}
struct scrub_ctx *ctx; /* scrub context */
void *readbuf; /* read buffer */
struct ptcounter *verified_bytes;
- struct disk *disk; /* which disk? */
void *ioerr_arg;
read_verify_ioerr_fn_t ioerr_fn; /* io error callback */
size_t miniosz; /* minimum io size, bytes */
+ enum xfs_device dev; /* which device? */
/*
* Store a runtime error code here so that we can stop the pool and
/*
* Create a thread pool to run read verifiers.
*
- * @disk is the disk we want to verify.
* @ioerr_fn will be called when IO errors occur.
*/
int
read_verify_pool_alloc(
struct scrub_ctx *ctx,
- struct disk *disk,
+ enum xfs_device dev,
read_verify_ioerr_fn_t ioerr_fn,
void *ioerr_arg,
struct read_verify_pool **prvp)
{
struct read_verify_pool *rvp;
- unsigned int verifier_threads = disk_heads(disk);
+ unsigned int verifier_threads =
+ disk_heads(ctx->verify_disks[XFS_DEV_DATA]);
int ret;
if (rvp_io_max_size() % ctx->mnt.fsgeom.blocksize)
goto out_buf;
rvp->miniosz = ctx->mnt.fsgeom.blocksize;
rvp->ctx = ctx;
- rvp->disk = disk;
+ rvp->dev = dev;
rvp->ioerr_fn = ioerr_fn;
rvp->ioerr_arg = ioerr_arg;
ret = -workqueue_create(&rvp->wq, (struct xfs_mount *)rvp,
while (rv->io_length > 0) {
read_error = 0;
len = min(rv->io_length, io_max_size);
- dbg_printf("diskverify %d %"PRIu64" %zu\n", rvp->disk->d_fd,
+ dbg_printf("diskverify %u %"PRIu64" %zu\n", rvp->dev,
rv->io_start, len);
- sz = disk_read_verify(rvp->disk, rvp->readbuf, rv->io_start,
- len);
+ sz = disk_read_verify(rvp->ctx->verify_disks[rvp->dev],
+ rvp->readbuf, rv->io_start, len);
if (sz == len && io_max_size < rvp->miniosz) {
/*
* If the verify request was 100% successful and less
* io_start to the next miniosz block.
*/
sz = rvp->miniosz - (rv->io_start % rvp->miniosz);
- dbg_printf("IOERR %d @ %"PRIu64" %zu err %d\n",
- rvp->disk->d_fd, rv->io_start, sz,
- read_error);
- rvp->ioerr_fn(rvp->ctx, rvp->disk, rv->io_start, sz,
+ dbg_printf("IOERR %u @ %"PRIu64" %zu err %d\n",
+ rvp->dev, rv->io_start, sz, read_error);
+ rvp->ioerr_fn(rvp->ctx, rvp->dev, rv->io_start, sz,
read_error, rvp->ioerr_arg);
} else if (sz == 0) {
/* No bytes at all? Did we hit the end of the disk? */
- dbg_printf("EOF %d @ %"PRIu64" %zu err %d\n",
- rvp->disk->d_fd, rv->io_start, sz,
- read_error);
- rvp->ioerr_fn(rvp->ctx, rvp->disk, rv->io_start, sz,
+ dbg_printf("EOF %u @ %"PRIu64" %zu err %d\n",
+ rvp->dev, rv->io_start, sz, read_error);
+ rvp->ioerr_fn(rvp->ctx, rvp->dev, rv->io_start, sz,
read_error, rvp->ioerr_arg);
break;
} else if (sz < len) {
* next full block.
*/
io_max_size = rvp->miniosz - (sz % rvp->miniosz);
- dbg_printf("SHORT %d READ @ %"PRIu64" %zu try for %zd\n",
- rvp->disk->d_fd, rv->io_start, sz,
+ dbg_printf("SHORT %u READ @ %"PRIu64" %zu try for %zd\n",
+ rvp->dev, rv->io_start, sz,
io_max_size);
} else {
/* We should never get back more bytes than we asked. */
if (!rvp)
return 0;
- dbg_printf("verify fd %d start %"PRIu64" len %"PRIu64"\n",
- rvp->disk->d_fd, rs->io_start, rs->io_length);
+ dbg_printf("verify dev %u start %"PRIu64" len %"PRIu64"\n",
+ rvp->dev, rs->io_start, rs->io_length);
/* Worker thread saw a runtime error, don't queue more. */
if (rvp->runtime_error)
struct scrub_ctx;
struct read_verify_pool;
-struct disk;
struct read_verify_schedule {
struct read_verify_pool *rvp;
/* Function called when an IO error happens. */
typedef void (*read_verify_ioerr_fn_t)(struct scrub_ctx *ctx,
- struct disk *disk, uint64_t start, uint64_t length,
+ enum xfs_device dev, uint64_t start, uint64_t length,
int error, void *arg);
-int read_verify_pool_alloc(struct scrub_ctx *ctx, struct disk *disk,
+int read_verify_pool_alloc(struct scrub_ctx *ctx, enum xfs_device dev,
read_verify_ioerr_fn_t ioerr_fn, void *ioerr_arg,
struct read_verify_pool **prvp);
void read_verify_pool_abort(struct read_verify_pool *rvp);
struct statvfs mnt_sv;
struct statfs mnt_sf;
- /* Open block devices */
- struct disk *datadev;
- struct disk *logdev;
- struct disk *rtdev;
+ /* Open block devices for legacy verify */
+ struct disk *verify_disks[XFS_DEV_RT + 1];
/* What does the user want us to do? */
enum scrub_mode mode;