mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 11:46:19 +00:00
dm: remove queue next_ordered workaround for barriers
This patch removes DM's bio-based vs request-based conditional setting of next_ordered. For bio-based DM the next_ordered check is no longer a concern (as that check is now in the __make_request path). For request-based DM the default of QUEUE_ORDERED_NONE is now appropriate. bio-based DM was changed to work-around the previously misplaced next_ordered check with this commit:99360b4c18
request-based DM does not yet support barriers but reacted to the above bio-based DM change with this commit:5d67aa2366
The above changes are no longer needed given Neil Brown's recent fix to put the next_ordered check in the __make_request path:db64f680ba
Signed-off-by: Mike Snitzer <snitzer@redhat.com> Cc: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Cc: NeilBrown <neilb@suse.de> Acked-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Acked-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
This commit is contained in:
parent
69885683d2
commit
a732c207d1
3 changed files with 0 additions and 16 deletions
|
@ -830,11 +830,6 @@ unsigned dm_table_get_type(struct dm_table *t)
|
||||||
return t->type;
|
return t->type;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool dm_table_bio_based(struct dm_table *t)
|
|
||||||
{
|
|
||||||
return dm_table_get_type(t) == DM_TYPE_BIO_BASED;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool dm_table_request_based(struct dm_table *t)
|
bool dm_table_request_based(struct dm_table *t)
|
||||||
{
|
{
|
||||||
return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
|
return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
|
||||||
|
|
|
@ -2203,16 +2203,6 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* It is enought that blk_queue_ordered() is called only once when
|
|
||||||
* the first bio-based table is bound.
|
|
||||||
*
|
|
||||||
* This setting should be moved to alloc_dev() when request-based dm
|
|
||||||
* supports barrier.
|
|
||||||
*/
|
|
||||||
if (!md->map && dm_table_bio_based(table))
|
|
||||||
blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
|
|
||||||
|
|
||||||
__unbind(md);
|
__unbind(md);
|
||||||
r = __bind(md, table, &limits);
|
r = __bind(md, table, &limits);
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits);
|
||||||
int dm_table_any_busy_target(struct dm_table *t);
|
int dm_table_any_busy_target(struct dm_table *t);
|
||||||
int dm_table_set_type(struct dm_table *t);
|
int dm_table_set_type(struct dm_table *t);
|
||||||
unsigned dm_table_get_type(struct dm_table *t);
|
unsigned dm_table_get_type(struct dm_table *t);
|
||||||
bool dm_table_bio_based(struct dm_table *t);
|
|
||||||
bool dm_table_request_based(struct dm_table *t);
|
bool dm_table_request_based(struct dm_table *t);
|
||||||
int dm_table_alloc_md_mempools(struct dm_table *t);
|
int dm_table_alloc_md_mempools(struct dm_table *t);
|
||||||
void dm_table_free_md_mempools(struct dm_table *t);
|
void dm_table_free_md_mempools(struct dm_table *t);
|
||||||
|
|
Loading…
Reference in a new issue