2016-05-13 04:28:10 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2016 Red Hat, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* This file is released under the GPL.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "dm-core.h"
|
|
|
|
#include "dm-rq.h"
|
|
|
|
|
|
|
|
#include <linux/blk-mq.h>
|
|
|
|
|
|
|
|
#define DM_MSG_PREFIX "core-rq"
|
|
|
|
|
2019-02-21 04:37:44 +08:00
|
|
|
/*
|
|
|
|
* One of these is allocated per request.
|
|
|
|
*/
|
|
|
|
struct dm_rq_target_io {
|
|
|
|
struct mapped_device *md;
|
|
|
|
struct dm_target *ti;
|
|
|
|
struct request *orig, *clone;
|
|
|
|
struct kthread_work work;
|
|
|
|
blk_status_t error;
|
|
|
|
union map_info info;
|
|
|
|
struct dm_stats_aux stats_aux;
|
|
|
|
unsigned long duration_jiffies;
|
|
|
|
unsigned n_sectors;
|
|
|
|
unsigned completed;
|
|
|
|
};
|
|
|
|
|
2016-05-13 04:28:10 +08:00
|
|
|
#define DM_MQ_NR_HW_QUEUES 1
|
|
|
|
#define DM_MQ_QUEUE_DEPTH 2048
|
|
|
|
static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
|
|
|
|
static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Request-based DM's mempools' reserved IOs set by the user.
|
|
|
|
*/
|
|
|
|
#define RESERVED_REQUEST_BASED_IOS 256
|
|
|
|
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
|
|
|
|
|
|
|
|
unsigned dm_get_reserved_rq_based_ios(void)
|
|
|
|
{
|
|
|
|
return __dm_get_module_param(&reserved_rq_based_ios,
|
|
|
|
RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned dm_get_blk_mq_nr_hw_queues(void)
|
|
|
|
{
|
|
|
|
return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned dm_get_blk_mq_queue_depth(void)
|
|
|
|
{
|
|
|
|
return __dm_get_module_param(&dm_mq_queue_depth,
|
|
|
|
DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
|
|
|
|
}
|
|
|
|
|
|
|
|
int dm_request_based(struct mapped_device *md)
|
|
|
|
{
|
2018-11-16 03:22:51 +08:00
|
|
|
return queue_is_mq(md->queue);
|
2016-05-13 04:28:10 +08:00
|
|
|
}
|
|
|
|
|
2018-10-11 10:49:26 +08:00
|
|
|
void dm_start_queue(struct request_queue *q)
|
2016-09-01 23:59:33 +08:00
|
|
|
{
|
2017-06-06 23:22:04 +08:00
|
|
|
blk_mq_unquiesce_queue(q);
|
2016-09-01 23:59:33 +08:00
|
|
|
blk_mq_kick_requeue_list(q);
|
|
|
|
}
|
|
|
|
|
2018-10-11 10:49:26 +08:00
|
|
|
void dm_stop_queue(struct request_queue *q)
|
2016-09-01 06:18:11 +08:00
|
|
|
{
|
2016-10-29 08:22:16 +08:00
|
|
|
blk_mq_quiesce_queue(q);
|
2016-05-13 04:28:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Partial completion handling for request-based dm
|
|
|
|
*/
|
|
|
|
static void end_clone_bio(struct bio *clone)
|
|
|
|
{
|
|
|
|
struct dm_rq_clone_bio_info *info =
|
|
|
|
container_of(clone, struct dm_rq_clone_bio_info, clone);
|
|
|
|
struct dm_rq_target_io *tio = info->tio;
|
|
|
|
unsigned int nr_bytes = info->orig->bi_iter.bi_size;
|
2017-06-03 15:38:06 +08:00
|
|
|
blk_status_t error = clone->bi_status;
|
2017-08-24 20:19:52 +08:00
|
|
|
bool is_last = !clone->bi_next;
|
2016-05-13 04:28:10 +08:00
|
|
|
|
|
|
|
bio_put(clone);
|
|
|
|
|
|
|
|
if (tio->error)
|
|
|
|
/*
|
|
|
|
* An error has already been detected on the request.
|
|
|
|
* Once error occurred, just let clone->end_io() handle
|
|
|
|
* the remainder.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
else if (error) {
|
|
|
|
/*
|
|
|
|
* Don't notice the error to the upper layer yet.
|
|
|
|
* The error handling decision is made by the target driver,
|
|
|
|
* when the request is completed.
|
|
|
|
*/
|
|
|
|
tio->error = error;
|
2017-08-24 20:19:52 +08:00
|
|
|
goto exit;
|
2016-05-13 04:28:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* I/O for the bio successfully completed.
|
|
|
|
* Notice the data completion to the upper layer.
|
|
|
|
*/
|
2017-08-24 20:19:52 +08:00
|
|
|
tio->completed += nr_bytes;
|
2016-05-13 04:28:10 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the original request.
|
2019-06-21 01:50:50 +08:00
|
|
|
* Do not use blk_mq_end_request() here, because it may complete
|
2016-05-13 04:28:10 +08:00
|
|
|
* the original request before the clone, and break the ordering.
|
|
|
|
*/
|
2017-08-24 20:19:52 +08:00
|
|
|
if (is_last)
|
|
|
|
exit:
|
|
|
|
blk_update_request(tio->orig, BLK_STS_OK, tio->completed);
|
2016-05-13 04:28:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct dm_rq_target_io *tio_from_request(struct request *rq)
|
|
|
|
{
|
2017-01-23 01:32:46 +08:00
|
|
|
return blk_mq_rq_to_pdu(rq);
|
2016-05-13 04:28:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void rq_end_stats(struct mapped_device *md, struct request *orig)
|
|
|
|
{
|
|
|
|
if (unlikely(dm_stats_used(&md->stats))) {
|
|
|
|
struct dm_rq_target_io *tio = tio_from_request(orig);
|
|
|
|
tio->duration_jiffies = jiffies - tio->duration_jiffies;
|
|
|
|
dm_stats_account_io(&md->stats, rq_data_dir(orig),
|
|
|
|
blk_rq_pos(orig), tio->n_sectors, true,
|
|
|
|
tio->duration_jiffies, &tio->stats_aux);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't touch any member of the md after calling this function because
|
|
|
|
* the md may be freed in dm_put() at the end of this function.
|
|
|
|
* Or do dm_get() before calling this function and dm_put() later.
|
|
|
|
*/
|
2018-11-09 03:59:41 +08:00
|
|
|
static void rq_completed(struct mapped_device *md)
|
2016-05-13 04:28:10 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* dm_put() must be at the end of this function. See the comment above
|
|
|
|
*/
|
|
|
|
dm_put(md);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Complete the clone and the original request.
|
|
|
|
* Must be called without clone's queue lock held,
|
|
|
|
* see end_clone_request() for more details.
|
|
|
|
*/
|
2017-06-03 15:38:04 +08:00
|
|
|
static void dm_end_request(struct request *clone, blk_status_t error)
|
2016-05-13 04:28:10 +08:00
|
|
|
{
|
|
|
|
struct dm_rq_target_io *tio = clone->end_io_data;
|
|
|
|
struct mapped_device *md = tio->md;
|
|
|
|
struct request *rq = tio->orig;
|
|
|
|
|
2017-01-23 01:32:46 +08:00
|
|
|
blk_rq_unprep_clone(clone);
|
2019-04-24 23:19:05 +08:00
|
|
|
tio->ti->type->release_clone_rq(clone, NULL);
|
2016-05-13 04:28:10 +08:00
|
|
|
|
|
|
|
rq_end_stats(md, rq);
|
2018-10-11 10:49:26 +08:00
|
|
|
blk_mq_end_request(rq, error);
|
2018-11-09 03:59:41 +08:00
|
|
|
rq_completed(md);
|
2016-05-13 04:28:10 +08:00
|
|
|
}
|
|
|
|
|
2016-09-14 22:36:39 +08:00
|
|
|
static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
|
2016-05-13 04:28:10 +08:00
|
|
|
{
|
2016-10-29 08:20:32 +08:00
|
|
|
blk_mq_delay_kick_requeue_list(q, msecs);
|
2016-05-13 04:28:10 +08:00
|
|
|
}
|
|
|
|
|
2016-09-14 22:36:39 +08:00
|
|
|
void dm_mq_kick_requeue_list(struct mapped_device *md)
|
|
|
|
{
|
2020-09-20 01:09:11 +08:00
|
|
|
__dm_mq_kick_requeue_list(md->queue, 0);
|
2016-09-14 22:36:39 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dm_mq_kick_requeue_list);
|
|
|
|
|
|
|
|
static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
|
|
|
|
{
|
2016-10-29 08:21:41 +08:00
|
|
|
blk_mq_requeue_request(rq, false);
|
2016-09-14 22:36:39 +08:00
|
|
|
__dm_mq_kick_requeue_list(rq->q, msecs);
|
|
|
|
}
|
|
|
|
|
2016-09-14 00:16:14 +08:00
|
|
|
static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
|
2016-05-13 04:28:10 +08:00
|
|
|
{
|
2016-09-14 00:16:14 +08:00
|
|
|
struct mapped_device *md = tio->md;
|
|
|
|
struct request *rq = tio->orig;
|
2017-08-10 02:32:16 +08:00
|
|
|
unsigned long delay_ms = delay_requeue ? 100 : 0;
|
2016-05-13 04:28:10 +08:00
|
|
|
|
|
|
|
rq_end_stats(md, rq);
|
2017-01-23 01:32:46 +08:00
|
|
|
if (tio->clone) {
|
|
|
|
blk_rq_unprep_clone(tio->clone);
|
2019-04-24 23:19:05 +08:00
|
|
|
tio->ti->type->release_clone_rq(tio->clone, NULL);
|
2017-01-23 01:32:46 +08:00
|
|
|
}
|
2016-05-13 04:28:10 +08:00
|
|
|
|
2018-10-11 10:49:26 +08:00
|
|
|
dm_mq_delay_requeue_request(rq, delay_ms);
|
2018-11-09 03:59:41 +08:00
|
|
|
rq_completed(md);
|
2016-05-13 04:28:10 +08:00
|
|
|
}
|
|
|
|
|
2017-06-03 15:38:04 +08:00
|
|
|
static void dm_done(struct request *clone, blk_status_t error, bool mapped)
|
2016-05-13 04:28:10 +08:00
|
|
|
{
|
2017-04-26 15:40:37 +08:00
|
|
|
int r = DM_ENDIO_DONE;
|
2016-05-13 04:28:10 +08:00
|
|
|
struct dm_rq_target_io *tio = clone->end_io_data;
|
|
|
|
dm_request_endio_fn rq_end_io = NULL;
|
|
|
|
|
|
|
|
if (tio->ti) {
|
|
|
|
rq_end_io = tio->ti->type->rq_end_io;
|
|
|
|
|
|
|
|
if (mapped && rq_end_io)
|
|
|
|
r = rq_end_io(tio->ti, clone, error, &tio->info);
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:38:04 +08:00
|
|
|
if (unlikely(error == BLK_STS_TARGET)) {
|
dm: disable DISCARD if the underlying storage no longer supports it
Storage devices which report supporting discard commands like
WRITE_SAME_16 with unmap, but reject discard commands sent to the
storage device. This is a clear storage firmware bug but it doesn't
change the fact that should a program cause discards to be sent to a
multipath device layered on this buggy storage, all paths can end up
failed at the same time from the discards, causing possible I/O loss.
The first discard to a path will fail with Illegal Request, Invalid
field in cdb, e.g.:
kernel: sd 8:0:8:19: [sdfn] tag#0 FAILED Result: hostbyte=DID_OK driverbyte=DRIVER_SENSE
kernel: sd 8:0:8:19: [sdfn] tag#0 Sense Key : Illegal Request [current]
kernel: sd 8:0:8:19: [sdfn] tag#0 Add. Sense: Invalid field in cdb
kernel: sd 8:0:8:19: [sdfn] tag#0 CDB: Write same(16) 93 08 00 00 00 00 00 a0 08 00 00 00 80 00 00 00
kernel: blk_update_request: critical target error, dev sdfn, sector 10487808
The SCSI layer converts this to the BLK_STS_TARGET error number, the sd
device disables its support for discard on this path, and because of the
BLK_STS_TARGET error multipath fails the discard without failing any
path or retrying down a different path. But subsequent discards can
cause path failures. Any discards sent to the path which already failed
a discard ends up failing with EIO from blk_cloned_rq_check_limits with
an "over max size limit" error since the discard limit was set to 0 by
the sd driver for the path. As the error is EIO, this now fails the
path and multipath tries to send the discard down the next path. This
cycle continues as discards are sent until all paths fail.
Fix this by training DM core to disable DISCARD if the underlying
storage already did so.
Also, fix branching in dm_done() and clone_endio() to reflect the
mutually exclussive nature of the IO operations in question.
Cc: stable@vger.kernel.org
Reported-by: David Jeffery <djeffery@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2019-04-04 00:23:11 +08:00
|
|
|
if (req_op(clone) == REQ_OP_DISCARD &&
|
|
|
|
!clone->q->limits.max_discard_sectors)
|
|
|
|
disable_discard(tio->md);
|
|
|
|
else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
|
|
|
|
!clone->q->limits.max_write_zeroes_sectors)
|
2017-04-06 01:21:05 +08:00
|
|
|
disable_write_zeroes(tio->md);
|
|
|
|
}
|
2016-05-13 04:28:10 +08:00
|
|
|
|
2017-04-26 15:40:37 +08:00
|
|
|
switch (r) {
|
|
|
|
case DM_ENDIO_DONE:
|
2016-05-13 04:28:10 +08:00
|
|
|
/* The target wants to complete the I/O */
|
2017-04-26 15:40:37 +08:00
|
|
|
dm_end_request(clone, error);
|
|
|
|
break;
|
|
|
|
case DM_ENDIO_INCOMPLETE:
|
2016-05-13 04:28:10 +08:00
|
|
|
/* The target will handle the I/O */
|
|
|
|
return;
|
2017-04-26 15:40:37 +08:00
|
|
|
case DM_ENDIO_REQUEUE:
|
2016-05-13 04:28:10 +08:00
|
|
|
/* The target wants to requeue the I/O */
|
2016-09-14 00:16:14 +08:00
|
|
|
dm_requeue_original_request(tio, false);
|
2017-04-26 15:40:37 +08:00
|
|
|
break;
|
2018-01-13 08:53:40 +08:00
|
|
|
case DM_ENDIO_DELAY_REQUEUE:
|
|
|
|
/* The target wants to requeue the I/O after a delay */
|
|
|
|
dm_requeue_original_request(tio, true);
|
|
|
|
break;
|
2017-04-26 15:40:37 +08:00
|
|
|
default:
|
2022-08-24 19:25:57 +08:00
|
|
|
DMCRIT("unimplemented target endio return value: %d", r);
|
2016-05-13 04:28:10 +08:00
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Request completion handler for request-based dm
|
|
|
|
*/
|
|
|
|
static void dm_softirq_done(struct request *rq)
|
|
|
|
{
|
|
|
|
bool mapped = true;
|
|
|
|
struct dm_rq_target_io *tio = tio_from_request(rq);
|
|
|
|
struct request *clone = tio->clone;
|
|
|
|
|
|
|
|
if (!clone) {
|
2017-02-25 04:19:32 +08:00
|
|
|
struct mapped_device *md = tio->md;
|
|
|
|
|
|
|
|
rq_end_stats(md, rq);
|
2018-10-11 10:49:26 +08:00
|
|
|
blk_mq_end_request(rq, tio->error);
|
2018-11-09 03:59:41 +08:00
|
|
|
rq_completed(md);
|
2016-05-13 04:28:10 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-10-20 21:12:13 +08:00
|
|
|
if (rq->rq_flags & RQF_FAILED)
|
2016-05-13 04:28:10 +08:00
|
|
|
mapped = false;
|
|
|
|
|
|
|
|
dm_done(clone, tio->error, mapped);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Complete the clone and the original request with the error status
|
|
|
|
* through softirq context.
|
|
|
|
*/
|
2017-06-03 15:38:04 +08:00
|
|
|
static void dm_complete_request(struct request *rq, blk_status_t error)
|
2016-05-13 04:28:10 +08:00
|
|
|
{
|
|
|
|
struct dm_rq_target_io *tio = tio_from_request(rq);
|
|
|
|
|
|
|
|
tio->error = error;
|
2020-06-11 14:44:47 +08:00
|
|
|
if (likely(!blk_should_fake_timeout(rq->q)))
|
|
|
|
blk_mq_complete_request(rq);
|
2016-05-13 04:28:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Complete the not-mapped clone and the original request with the error status
|
|
|
|
* through softirq context.
|
|
|
|
* Target's rq_end_io() function isn't called.
|
2018-10-11 10:49:26 +08:00
|
|
|
* This may be used when the target's clone_and_map_rq() function fails.
|
2016-05-13 04:28:10 +08:00
|
|
|
*/
|
2017-06-03 15:38:04 +08:00
|
|
|
static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
|
2016-05-13 04:28:10 +08:00
|
|
|
{
|
2016-10-20 21:12:13 +08:00
|
|
|
rq->rq_flags |= RQF_FAILED;
|
2016-05-13 04:28:10 +08:00
|
|
|
dm_complete_request(rq, error);
|
|
|
|
}
|
|
|
|
|
2022-09-22 05:19:54 +08:00
|
|
|
static enum rq_end_io_ret end_clone_request(struct request *clone,
|
|
|
|
blk_status_t error)
|
2016-05-13 04:28:10 +08:00
|
|
|
{
|
|
|
|
struct dm_rq_target_io *tio = clone->end_io_data;
|
|
|
|
|
|
|
|
dm_complete_request(tio->orig, error);
|
2022-09-22 05:19:54 +08:00
|
|
|
return RQ_END_IO_NONE;
|
2016-05-13 04:28:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct dm_rq_target_io *tio = data;
|
|
|
|
struct dm_rq_clone_bio_info *info =
|
|
|
|
container_of(bio, struct dm_rq_clone_bio_info, clone);
|
|
|
|
|
|
|
|
info->orig = bio_orig;
|
|
|
|
info->tio = tio;
|
|
|
|
bio->bi_end_io = end_clone_bio;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int setup_clone(struct request *clone, struct request *rq,
|
|
|
|
struct dm_rq_target_io *tio, gfp_t gfp_mask)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
2022-06-08 14:34:06 +08:00
|
|
|
r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
|
2016-05-13 04:28:10 +08:00
|
|
|
dm_rq_bio_constructor, tio);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
clone->end_io = end_clone_request;
|
|
|
|
clone->end_io_data = tio;
|
|
|
|
|
|
|
|
tio->clone = clone;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
|
|
|
|
struct mapped_device *md)
|
|
|
|
{
|
|
|
|
tio->md = md;
|
|
|
|
tio->ti = NULL;
|
|
|
|
tio->clone = NULL;
|
|
|
|
tio->orig = rq;
|
|
|
|
tio->error = 0;
|
2017-08-24 20:19:52 +08:00
|
|
|
tio->completed = 0;
|
2016-05-13 04:28:10 +08:00
|
|
|
/*
|
|
|
|
* Avoid initializing info for blk-mq; it passes
|
|
|
|
* target-specific data through info.ptr
|
|
|
|
* (see: dm_mq_init_request)
|
|
|
|
*/
|
|
|
|
if (!md->init_tio_pdu)
|
|
|
|
memset(&tio->info, 0, sizeof(tio->info));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns:
|
2016-09-10 07:24:57 +08:00
|
|
|
* DM_MAPIO_* : the request has been processed as indicated
|
|
|
|
* DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
|
2016-05-13 04:28:10 +08:00
|
|
|
* < 0 : the request was completed due to failure
|
|
|
|
*/
|
2016-09-14 00:16:14 +08:00
|
|
|
static int map_request(struct dm_rq_target_io *tio)
|
2016-05-13 04:28:10 +08:00
|
|
|
{
|
|
|
|
int r;
|
|
|
|
struct dm_target *ti = tio->ti;
|
2016-09-14 00:16:14 +08:00
|
|
|
struct mapped_device *md = tio->md;
|
|
|
|
struct request *rq = tio->orig;
|
2016-05-13 04:28:10 +08:00
|
|
|
struct request *clone = NULL;
|
2018-01-18 00:25:57 +08:00
|
|
|
blk_status_t ret;
|
2016-05-13 04:28:10 +08:00
|
|
|
|
2017-01-23 01:32:46 +08:00
|
|
|
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
|
2016-05-13 04:28:10 +08:00
|
|
|
switch (r) {
|
|
|
|
case DM_MAPIO_SUBMITTED:
|
|
|
|
/* The target has taken the I/O to submit by itself later */
|
|
|
|
break;
|
|
|
|
case DM_MAPIO_REMAPPED:
|
2017-01-23 01:32:46 +08:00
|
|
|
if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
|
|
|
|
/* -ENOMEM */
|
2019-04-24 23:19:05 +08:00
|
|
|
ti->type->release_clone_rq(clone, &tio->info);
|
2017-01-23 01:32:46 +08:00
|
|
|
return DM_MAPIO_REQUEUE;
|
|
|
|
}
|
|
|
|
|
2016-05-13 04:28:10 +08:00
|
|
|
/* The target has remapped the I/O so dispatch it */
|
2020-12-04 00:21:39 +08:00
|
|
|
trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
|
2016-05-13 04:28:10 +08:00
|
|
|
blk_rq_pos(rq));
|
2022-02-15 18:05:40 +08:00
|
|
|
ret = blk_insert_cloned_request(clone);
|
|
|
|
switch (ret) {
|
|
|
|
case BLK_STS_OK:
|
|
|
|
break;
|
|
|
|
case BLK_STS_RESOURCE:
|
|
|
|
case BLK_STS_DEV_RESOURCE:
|
2018-01-18 00:25:57 +08:00
|
|
|
blk_rq_unprep_clone(clone);
|
2019-07-25 10:04:59 +08:00
|
|
|
blk_mq_cleanup_rq(clone);
|
2019-04-24 23:19:05 +08:00
|
|
|
tio->ti->type->release_clone_rq(clone, &tio->info);
|
2018-01-18 00:25:57 +08:00
|
|
|
tio->clone = NULL;
|
2018-12-11 00:55:56 +08:00
|
|
|
return DM_MAPIO_REQUEUE;
|
2022-02-15 18:05:40 +08:00
|
|
|
default:
|
|
|
|
/* must complete clone in terms of original request */
|
|
|
|
dm_complete_request(rq, ret);
|
2018-01-18 00:25:57 +08:00
|
|
|
}
|
2016-05-13 04:28:10 +08:00
|
|
|
break;
|
|
|
|
case DM_MAPIO_REQUEUE:
|
|
|
|
/* The target wants to requeue the I/O */
|
2016-09-10 07:24:57 +08:00
|
|
|
break;
|
|
|
|
case DM_MAPIO_DELAY_REQUEUE:
|
|
|
|
/* The target wants to requeue the I/O after a delay */
|
2016-09-14 00:16:14 +08:00
|
|
|
dm_requeue_original_request(tio, true);
|
2016-05-13 04:28:10 +08:00
|
|
|
break;
|
2017-04-26 15:40:39 +08:00
|
|
|
case DM_MAPIO_KILL:
|
2016-05-13 04:28:10 +08:00
|
|
|
/* The target wants to complete the I/O */
|
2017-06-03 15:38:04 +08:00
|
|
|
dm_kill_unmapped_request(rq, BLK_STS_IOERR);
|
2017-05-15 23:28:36 +08:00
|
|
|
break;
|
2017-04-26 15:40:39 +08:00
|
|
|
default:
|
2022-08-24 19:25:57 +08:00
|
|
|
DMCRIT("unimplemented target map return value: %d", r);
|
2017-04-26 15:40:39 +08:00
|
|
|
BUG();
|
2016-05-13 04:28:10 +08:00
|
|
|
}
|
|
|
|
|
2016-09-10 07:24:57 +08:00
|
|
|
return r;
|
2016-05-13 04:28:10 +08:00
|
|
|
}
|
|
|
|
|
2018-10-11 10:49:26 +08:00
|
|
|
/* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
|
|
|
|
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
|
|
|
|
{
|
|
|
|
return sprintf(buf, "%u\n", 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2016-05-13 04:28:10 +08:00
|
|
|
static void dm_start_request(struct mapped_device *md, struct request *orig)
|
|
|
|
{
|
2018-10-11 10:49:26 +08:00
|
|
|
blk_mq_start_request(orig);
|
2016-05-13 04:28:10 +08:00
|
|
|
|
|
|
|
if (unlikely(dm_stats_used(&md->stats))) {
|
|
|
|
struct dm_rq_target_io *tio = tio_from_request(orig);
|
|
|
|
tio->duration_jiffies = jiffies;
|
|
|
|
tio->n_sectors = blk_rq_sectors(orig);
|
|
|
|
dm_stats_account_io(&md->stats, rq_data_dir(orig),
|
|
|
|
blk_rq_pos(orig), tio->n_sectors, false, 0,
|
|
|
|
&tio->stats_aux);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hold the md reference here for the in-flight I/O.
|
|
|
|
* We can't rely on the reference count by device opener,
|
|
|
|
* because the device may be closed during the request completion
|
|
|
|
* when all bios are completed.
|
|
|
|
* See the comment in rq_completed() too.
|
|
|
|
*/
|
|
|
|
dm_get(md);
|
|
|
|
}
|
|
|
|
|
2018-10-11 10:49:26 +08:00
|
|
|
static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
|
|
|
unsigned int hctx_idx, unsigned int numa_node)
|
2017-01-23 01:32:46 +08:00
|
|
|
{
|
2018-10-11 10:49:26 +08:00
|
|
|
struct mapped_device *md = set->driver_data;
|
2017-01-23 01:32:46 +08:00
|
|
|
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must initialize md member of tio, otherwise it won't
|
|
|
|
* be available in dm_mq_queue_rq.
|
|
|
|
*/
|
|
|
|
tio->md = md;
|
|
|
|
|
|
|
|
if (md->init_tio_pdu) {
|
|
|
|
/* target-specific per-io data is immediately after the tio */
|
|
|
|
tio->info.ptr = tio + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:38:05 +08:00
|
|
|
static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
2016-05-13 04:28:10 +08:00
|
|
|
const struct blk_mq_queue_data *bd)
|
|
|
|
{
|
|
|
|
struct request *rq = bd->rq;
|
|
|
|
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
|
|
|
|
struct mapped_device *md = tio->md;
|
|
|
|
struct dm_target *ti = md->immutable_target;
|
|
|
|
|
2021-09-23 17:11:31 +08:00
|
|
|
/*
|
|
|
|
* blk-mq's unquiesce may come from outside events, such as
|
|
|
|
* elevator switch, updating nr_requests or others, and request may
|
|
|
|
* come during suspend, so simply ask for blk-mq to requeue it.
|
|
|
|
*/
|
|
|
|
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)))
|
|
|
|
return BLK_STS_RESOURCE;
|
|
|
|
|
2016-05-13 04:28:10 +08:00
|
|
|
if (unlikely(!ti)) {
|
|
|
|
int srcu_idx;
|
2022-02-23 02:28:12 +08:00
|
|
|
struct dm_table *map;
|
2016-05-13 04:28:10 +08:00
|
|
|
|
2022-02-23 02:28:12 +08:00
|
|
|
map = dm_get_live_table(md, &srcu_idx);
|
|
|
|
if (unlikely(!map)) {
|
|
|
|
dm_put_live_table(md, srcu_idx);
|
|
|
|
return BLK_STS_RESOURCE;
|
|
|
|
}
|
2016-05-13 04:28:10 +08:00
|
|
|
ti = dm_table_find_target(map, 0);
|
|
|
|
dm_put_live_table(md, srcu_idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ti->type->busy && ti->type->busy(ti))
|
2017-06-03 15:38:05 +08:00
|
|
|
return BLK_STS_RESOURCE;
|
2016-05-13 04:28:10 +08:00
|
|
|
|
|
|
|
dm_start_request(md, rq);
|
|
|
|
|
|
|
|
/* Init tio using md established in .init_request */
|
|
|
|
init_tio(tio, rq, md);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Establish tio->ti before calling map_request().
|
|
|
|
*/
|
|
|
|
tio->ti = ti;
|
|
|
|
|
|
|
|
/* Direct call is fine since .queue_rq allows allocations */
|
2016-09-14 00:16:14 +08:00
|
|
|
if (map_request(tio) == DM_MAPIO_REQUEUE) {
|
2016-05-13 04:28:10 +08:00
|
|
|
/* Undo dm_start_request() before requeuing */
|
|
|
|
rq_end_stats(md, rq);
|
2018-11-09 03:59:41 +08:00
|
|
|
rq_completed(md);
|
2017-06-03 15:38:05 +08:00
|
|
|
return BLK_STS_RESOURCE;
|
2016-05-13 04:28:10 +08:00
|
|
|
}
|
|
|
|
|
2017-06-03 15:38:05 +08:00
|
|
|
return BLK_STS_OK;
|
2016-05-13 04:28:10 +08:00
|
|
|
}
|
|
|
|
|
2017-03-31 04:39:16 +08:00
|
|
|
static const struct blk_mq_ops dm_mq_ops = {
|
2016-05-13 04:28:10 +08:00
|
|
|
.queue_rq = dm_mq_queue_rq,
|
|
|
|
.complete = dm_softirq_done,
|
|
|
|
.init_request = dm_mq_init_request,
|
|
|
|
};
|
|
|
|
|
2016-05-25 09:16:51 +08:00
|
|
|
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
|
2016-05-13 04:28:10 +08:00
|
|
|
{
|
2016-05-25 09:16:51 +08:00
|
|
|
struct dm_target *immutable_tgt;
|
2016-05-13 04:28:10 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
|
|
|
|
if (!md->tag_set)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
md->tag_set->ops = &dm_mq_ops;
|
|
|
|
md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
|
|
|
|
md->tag_set->numa_node = md->numa_node_id;
|
2020-05-29 21:53:15 +08:00
|
|
|
md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
|
2016-05-13 04:28:10 +08:00
|
|
|
md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
|
|
|
|
md->tag_set->driver_data = md;
|
|
|
|
|
|
|
|
md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
|
2016-05-25 09:16:51 +08:00
|
|
|
immutable_tgt = dm_table_get_immutable_target(t);
|
2016-05-13 04:28:10 +08:00
|
|
|
if (immutable_tgt && immutable_tgt->per_io_data_size) {
|
|
|
|
/* any target-specific per-io data is immediately after the tio */
|
|
|
|
md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
|
|
|
|
md->init_tio_pdu = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = blk_mq_alloc_tag_set(md->tag_set);
|
|
|
|
if (err)
|
|
|
|
goto out_kfree_tag_set;
|
|
|
|
|
2021-06-02 14:53:17 +08:00
|
|
|
err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
|
|
|
|
if (err)
|
2016-05-13 04:28:10 +08:00
|
|
|
goto out_tag_set;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_tag_set:
|
|
|
|
blk_mq_free_tag_set(md->tag_set);
|
|
|
|
out_kfree_tag_set:
|
|
|
|
kfree(md->tag_set);
|
dm rq: fix double free of blk_mq_tag_set in dev remove after table load fails
When loading a device-mapper table for a request-based mapped device,
and the allocation/initialization of the blk_mq_tag_set for the device
fails, a following device remove will cause a double free.
E.g. (dmesg):
device-mapper: core: Cannot initialize queue for request-based dm-mq mapped device
device-mapper: ioctl: unable to set up device queue for new table.
Unable to handle kernel pointer dereference in virtual kernel address space
Failing address: 0305e098835de000 TEID: 0305e098835de803
Fault in home space mode while using kernel ASCE.
AS:000000025efe0007 R3:0000000000000024
Oops: 0038 ilc:3 [#1] SMP
Modules linked in: ... lots of modules ...
Supported: Yes, External
CPU: 0 PID: 7348 Comm: multipathd Kdump: loaded Tainted: G W X 5.3.18-53-default #1 SLE15-SP3
Hardware name: IBM 8561 T01 7I2 (LPAR)
Krnl PSW : 0704e00180000000 000000025e368eca (kfree+0x42/0x330)
R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:2 PM:0 RI:0 EA:3
Krnl GPRS: 000000000000004a 000000025efe5230 c1773200d779968d 0000000000000000
000000025e520270 000000025e8d1b40 0000000000000003 00000007aae10000
000000025e5202a2 0000000000000001 c1773200d779968d 0305e098835de640
00000007a8170000 000003ff80138650 000000025e5202a2 000003e00396faa8
Krnl Code: 000000025e368eb8: c4180041e100 lgrl %r1,25eba50b8
000000025e368ebe: ecba06b93a55 risbg %r11,%r10,6,185,58
#000000025e368ec4: e3b010000008 ag %r11,0(%r1)
>000000025e368eca: e310b0080004 lg %r1,8(%r11)
000000025e368ed0: a7110001 tmll %r1,1
000000025e368ed4: a7740129 brc 7,25e369126
000000025e368ed8: e320b0080004 lg %r2,8(%r11)
000000025e368ede: b904001b lgr %r1,%r11
Call Trace:
[<000000025e368eca>] kfree+0x42/0x330
[<000000025e5202a2>] blk_mq_free_tag_set+0x72/0xb8
[<000003ff801316a8>] dm_mq_cleanup_mapped_device+0x38/0x50 [dm_mod]
[<000003ff80120082>] free_dev+0x52/0xd0 [dm_mod]
[<000003ff801233f0>] __dm_destroy+0x150/0x1d0 [dm_mod]
[<000003ff8012bb9a>] dev_remove+0x162/0x1c0 [dm_mod]
[<000003ff8012a988>] ctl_ioctl+0x198/0x478 [dm_mod]
[<000003ff8012ac8a>] dm_ctl_ioctl+0x22/0x38 [dm_mod]
[<000000025e3b11ee>] ksys_ioctl+0xbe/0xe0
[<000000025e3b127a>] __s390x_sys_ioctl+0x2a/0x40
[<000000025e8c15ac>] system_call+0xd8/0x2c8
Last Breaking-Event-Address:
[<000000025e52029c>] blk_mq_free_tag_set+0x6c/0xb8
Kernel panic - not syncing: Fatal exception: panic_on_oops
When allocation/initialization of the blk_mq_tag_set fails in
dm_mq_init_request_queue(), it is uninitialized/freed, but the pointer
is not reset to NULL; so when dev_remove() later gets into
dm_mq_cleanup_mapped_device() it sees the pointer and tries to
uninitialize and free it again.
Fix this by setting the pointer to NULL in dm_mq_init_request_queue()
error-handling. Also set it to NULL in dm_mq_cleanup_mapped_device().
Cc: <stable@vger.kernel.org> # 4.6+
Fixes: 1c357a1e86a4 ("dm: allocate blk_mq_tag_set rather than embed in mapped_device")
Signed-off-by: Benjamin Block <bblock@linux.ibm.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2021-04-30 05:37:00 +08:00
|
|
|
md->tag_set = NULL;
|
2016-05-13 04:28:10 +08:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void dm_mq_cleanup_mapped_device(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
if (md->tag_set) {
|
|
|
|
blk_mq_free_tag_set(md->tag_set);
|
|
|
|
kfree(md->tag_set);
|
dm rq: fix double free of blk_mq_tag_set in dev remove after table load fails
When loading a device-mapper table for a request-based mapped device,
and the allocation/initialization of the blk_mq_tag_set for the device
fails, a following device remove will cause a double free.
E.g. (dmesg):
device-mapper: core: Cannot initialize queue for request-based dm-mq mapped device
device-mapper: ioctl: unable to set up device queue for new table.
Unable to handle kernel pointer dereference in virtual kernel address space
Failing address: 0305e098835de000 TEID: 0305e098835de803
Fault in home space mode while using kernel ASCE.
AS:000000025efe0007 R3:0000000000000024
Oops: 0038 ilc:3 [#1] SMP
Modules linked in: ... lots of modules ...
Supported: Yes, External
CPU: 0 PID: 7348 Comm: multipathd Kdump: loaded Tainted: G W X 5.3.18-53-default #1 SLE15-SP3
Hardware name: IBM 8561 T01 7I2 (LPAR)
Krnl PSW : 0704e00180000000 000000025e368eca (kfree+0x42/0x330)
R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:2 PM:0 RI:0 EA:3
Krnl GPRS: 000000000000004a 000000025efe5230 c1773200d779968d 0000000000000000
000000025e520270 000000025e8d1b40 0000000000000003 00000007aae10000
000000025e5202a2 0000000000000001 c1773200d779968d 0305e098835de640
00000007a8170000 000003ff80138650 000000025e5202a2 000003e00396faa8
Krnl Code: 000000025e368eb8: c4180041e100 lgrl %r1,25eba50b8
000000025e368ebe: ecba06b93a55 risbg %r11,%r10,6,185,58
#000000025e368ec4: e3b010000008 ag %r11,0(%r1)
>000000025e368eca: e310b0080004 lg %r1,8(%r11)
000000025e368ed0: a7110001 tmll %r1,1
000000025e368ed4: a7740129 brc 7,25e369126
000000025e368ed8: e320b0080004 lg %r2,8(%r11)
000000025e368ede: b904001b lgr %r1,%r11
Call Trace:
[<000000025e368eca>] kfree+0x42/0x330
[<000000025e5202a2>] blk_mq_free_tag_set+0x72/0xb8
[<000003ff801316a8>] dm_mq_cleanup_mapped_device+0x38/0x50 [dm_mod]
[<000003ff80120082>] free_dev+0x52/0xd0 [dm_mod]
[<000003ff801233f0>] __dm_destroy+0x150/0x1d0 [dm_mod]
[<000003ff8012bb9a>] dev_remove+0x162/0x1c0 [dm_mod]
[<000003ff8012a988>] ctl_ioctl+0x198/0x478 [dm_mod]
[<000003ff8012ac8a>] dm_ctl_ioctl+0x22/0x38 [dm_mod]
[<000000025e3b11ee>] ksys_ioctl+0xbe/0xe0
[<000000025e3b127a>] __s390x_sys_ioctl+0x2a/0x40
[<000000025e8c15ac>] system_call+0xd8/0x2c8
Last Breaking-Event-Address:
[<000000025e52029c>] blk_mq_free_tag_set+0x6c/0xb8
Kernel panic - not syncing: Fatal exception: panic_on_oops
When allocation/initialization of the blk_mq_tag_set fails in
dm_mq_init_request_queue(), it is uninitialized/freed, but the pointer
is not reset to NULL; so when dev_remove() later gets into
dm_mq_cleanup_mapped_device() it sees the pointer and tries to
uninitialize and free it again.
Fix this by setting the pointer to NULL in dm_mq_init_request_queue()
error-handling. Also set it to NULL in dm_mq_cleanup_mapped_device().
Cc: <stable@vger.kernel.org> # 4.6+
Fixes: 1c357a1e86a4 ("dm: allocate blk_mq_tag_set rather than embed in mapped_device")
Signed-off-by: Benjamin Block <bblock@linux.ibm.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2021-04-30 05:37:00 +08:00
|
|
|
md->tag_set = NULL;
|
2016-05-13 04:28:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
|
|
|
|
|
2018-10-11 10:49:26 +08:00
|
|
|
/* Unused, but preserved for userspace compatibility */
|
|
|
|
static bool use_blk_mq = true;
|
2016-05-13 04:28:10 +08:00
|
|
|
module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
|
|
|
|
|
|
|
|
module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
|
|
|
|
|
|
|
|
module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");
|