diff --git a/src/engine/sched.c b/src/engine/sched.c index c9c34e09150..3558902e101 100644 --- a/src/engine/sched.c +++ b/src/engine/sched.c @@ -771,7 +771,7 @@ check_space_pressure(struct dss_xstream *dx, struct sched_pool_info *spi) { struct sched_info *info = &dx->dx_sched_info; struct vos_pool_space vps = { 0 }; - uint64_t scm_left, nvme_left, ne_left, ne_sys; + uint64_t scm_left, nvme_left, ne_left; struct pressure_ratio *pr; int orig_pressure, rc; @@ -807,12 +807,8 @@ check_space_pressure(struct dss_xstream *dx, struct sched_pool_info *spi) if (vps.vps_ne_total == 0) { ne_left = UINT64_MAX; } else { - D_ASSERT(vps.vps_ne_total < SCM_TOTAL(&vps)); - ne_sys = SCM_SYS(&vps) * vps.vps_ne_total / SCM_TOTAL(&vps); - if (vps.vps_ne_free > ne_sys) - ne_left = vps.vps_ne_free - ne_sys; - else - ne_left = 0; + ne_left = vps.vps_ne_free; + D_ASSERT(ne_left <= vps.vps_ne_total); } if (NVME_TOTAL(&vps) == 0) /* NVMe not enabled */ diff --git a/src/vos/vos_aggregate.c b/src/vos/vos_aggregate.c index 63f5603b0e5..af597ffb238 100644 --- a/src/vos/vos_aggregate.c +++ b/src/vos/vos_aggregate.c @@ -1,5 +1,6 @@ /** * (C) Copyright 2019-2024 Intel Corporation. + * (C) Copyright 2025 Hewlett Packard Enterprise Development LP * * SPDX-License-Identifier: BSD-2-Clause-Patent */ @@ -177,10 +178,24 @@ struct vos_agg_param { }; static inline void -credits_set(struct vos_agg_credits *vac, bool tight) +credits_set(struct vos_pool *pool, struct vos_agg_credits *vac, bool tight) { - vac->vac_creds_scan = tight ? AGG_CREDS_SCAN_TIGHT : AGG_CREDS_SCAN_SLACK; - vac->vac_creds_del = tight ? AGG_CREDS_DEL_TIGHT : AGG_CREDS_DEL_SLACK; + unsigned int multiplier = 1; + + /* + * When md-on-ssd phase2 pool runs into space pressure, larger SCAN credits will + * be used to reduce yield & reprobe on iterating, larger DEL credits will be used + * to drop more punched objects to GC in one batch, so that GC will likely reclaim + * more objects when reclaiming a bucket. + * + * Though larger aggregation credits will lower front end I/O performance, it can + * greatly reduce page misses for GC when free space/page is tight. + */ + if (tight && vos_pool_is_evictable(pool)) + multiplier = 100; + + vac->vac_creds_scan = (tight ? AGG_CREDS_SCAN_TIGHT : AGG_CREDS_SCAN_SLACK) * multiplier; + vac->vac_creds_del = (tight ? AGG_CREDS_DEL_TIGHT : AGG_CREDS_DEL_SLACK) * multiplier; vac->vac_creds_merge = tight ? AGG_CREDS_MERGE_TIGHT : AGG_CREDS_MERGE_SLACK; } @@ -323,7 +338,7 @@ vos_aggregate_yield(struct vos_agg_param *agg_param) if (agg_param->ap_yield_func == NULL) { bio_yield(agg_param->ap_umm); - credits_set(&agg_param->ap_credits, true); + credits_set(cont->vc_pool, &agg_param->ap_credits, true); return false; } @@ -333,7 +348,7 @@ vos_aggregate_yield(struct vos_agg_param *agg_param) return true; /* rc == 0: tight mode; rc == 1: slack mode */ - credits_set(&agg_param->ap_credits, rc == 0); + credits_set(cont->vc_pool, &agg_param->ap_credits, rc == 0); return false; } @@ -2702,7 +2717,7 @@ vos_aggregate(daos_handle_t coh, daos_epoch_range_t *epr, /* Set aggregation parameters */ ad->ad_agg_param.ap_umm = &cont->vc_pool->vp_umm; ad->ad_agg_param.ap_coh = coh; - credits_set(&ad->ad_agg_param.ap_credits, true); + credits_set(cont->vc_pool, &ad->ad_agg_param.ap_credits, true); ad->ad_agg_param.ap_discard = 0; ad->ad_agg_param.ap_yield_func = yield_func; ad->ad_agg_param.ap_yield_arg = yield_arg; @@ -2822,7 +2837,7 @@ vos_discard(daos_handle_t coh, daos_unit_oid_t *oidp, daos_epoch_range_t *epr, /* Set aggregation parameters */ ad->ad_agg_param.ap_umm = &cont->vc_pool->vp_umm; ad->ad_agg_param.ap_coh = coh; - credits_set(&ad->ad_agg_param.ap_credits, true); + credits_set(cont->vc_pool, &ad->ad_agg_param.ap_credits, true); ad->ad_agg_param.ap_discard = 1; ad->ad_agg_param.ap_yield_func = yield_func; ad->ad_agg_param.ap_yield_arg = yield_arg;