mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-11-01 09:13:37 +00:00
block, bfq: give a better name to bfq_bfqq_may_idle
The actual goal of the function bfq_bfqq_may_idle is to tell whether it is better to perform device idling (more precisely: I/O-dispatch plugging) for the input bfq_queue, either to boost throughput or to preserve service guarantees. This commit improves the name of the function accordingly. Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com> Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name> Signed-off-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
9fae8dd59f
commit
277a4a9b56
1 changed files with 8 additions and 8 deletions
|
|
@ -634,7 +634,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd)
|
||||||
* The following function returns true if every queue must receive the
|
* The following function returns true if every queue must receive the
|
||||||
* same share of the throughput (this condition is used when deciding
|
* same share of the throughput (this condition is used when deciding
|
||||||
* whether idling may be disabled, see the comments in the function
|
* whether idling may be disabled, see the comments in the function
|
||||||
* bfq_bfqq_may_idle()).
|
* bfq_better_to_idle()).
|
||||||
*
|
*
|
||||||
* Such a scenario occurs when:
|
* Such a scenario occurs when:
|
||||||
* 1) all active queues have the same weight,
|
* 1) all active queues have the same weight,
|
||||||
|
|
@ -3355,7 +3355,7 @@ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
|
||||||
* issues taken into account are not trivial. We discuss these issues
|
* issues taken into account are not trivial. We discuss these issues
|
||||||
* individually while introducing the variables.
|
* individually while introducing the variables.
|
||||||
*/
|
*/
|
||||||
static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
|
static bool bfq_better_to_idle(struct bfq_queue *bfqq)
|
||||||
{
|
{
|
||||||
struct bfq_data *bfqd = bfqq->bfqd;
|
struct bfq_data *bfqd = bfqq->bfqd;
|
||||||
bool rot_without_queueing =
|
bool rot_without_queueing =
|
||||||
|
|
@ -3588,19 +3588,19 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the in-service queue is empty but the function bfq_bfqq_may_idle
|
* If the in-service queue is empty but the function bfq_better_to_idle
|
||||||
* returns true, then:
|
* returns true, then:
|
||||||
* 1) the queue must remain in service and cannot be expired, and
|
* 1) the queue must remain in service and cannot be expired, and
|
||||||
* 2) the device must be idled to wait for the possible arrival of a new
|
* 2) the device must be idled to wait for the possible arrival of a new
|
||||||
* request for the queue.
|
* request for the queue.
|
||||||
* See the comments on the function bfq_bfqq_may_idle for the reasons
|
* See the comments on the function bfq_better_to_idle for the reasons
|
||||||
* why performing device idling is the best choice to boost the throughput
|
* why performing device idling is the best choice to boost the throughput
|
||||||
* and preserve service guarantees when bfq_bfqq_may_idle itself
|
* and preserve service guarantees when bfq_better_to_idle itself
|
||||||
* returns true.
|
* returns true.
|
||||||
*/
|
*/
|
||||||
static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
|
static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
|
||||||
{
|
{
|
||||||
return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq);
|
return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -3686,7 +3686,7 @@ check_queue:
|
||||||
* may idle after their completion, then keep it anyway.
|
* may idle after their completion, then keep it anyway.
|
||||||
*/
|
*/
|
||||||
if (bfq_bfqq_wait_request(bfqq) ||
|
if (bfq_bfqq_wait_request(bfqq) ||
|
||||||
(bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
|
(bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
|
||||||
bfqq = NULL;
|
bfqq = NULL;
|
||||||
goto keep_queue;
|
goto keep_queue;
|
||||||
}
|
}
|
||||||
|
|
@ -4734,7 +4734,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
|
||||||
BFQQE_BUDGET_TIMEOUT);
|
BFQQE_BUDGET_TIMEOUT);
|
||||||
else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
|
else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
|
||||||
(bfqq->dispatched == 0 ||
|
(bfqq->dispatched == 0 ||
|
||||||
!bfq_bfqq_may_idle(bfqq)))
|
!bfq_better_to_idle(bfqq)))
|
||||||
bfq_bfqq_expire(bfqd, bfqq, false,
|
bfq_bfqq_expire(bfqd, bfqq, false,
|
||||||
BFQQE_NO_MORE_REQUESTS);
|
BFQQE_NO_MORE_REQUESTS);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue