mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
xprtrdma: Add a "max_payload" op for each memreg mode
The max_payload computation is generalized to ensure that the payload maximum is the lesser of RPC_MAX_DATA_SEGS and the number of data segments that can be transmitted in an inline buffer. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Sagi Grimberg <sagig@mellanox.com> Tested-by: Devesh Sharma <Devesh.Sharma@Emulex.Com> Tested-by: Meghana Cheripady <Meghana.Cheripady@Emulex.Com> Tested-by: Veeresh U. Kokatnur <veereshuk@chelsio.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
a0ce85f595
commit
1c9351ee0e
6 changed files with 59 additions and 36 deletions
|
@ -17,6 +17,19 @@
|
||||||
# define RPCDBG_FACILITY RPCDBG_TRANS
|
# define RPCDBG_FACILITY RPCDBG_TRANS
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Maximum scatter/gather per FMR */
|
||||||
|
#define RPCRDMA_MAX_FMR_SGES (64)
|
||||||
|
|
||||||
|
/* FMR mode conveys up to 64 pages of payload per chunk segment.
|
||||||
|
*/
|
||||||
|
static size_t
|
||||||
|
fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
|
||||||
|
{
|
||||||
|
return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
|
||||||
|
rpcrdma_max_segments(r_xprt) * RPCRDMA_MAX_FMR_SGES);
|
||||||
|
}
|
||||||
|
|
||||||
const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
|
const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
|
||||||
|
.ro_maxpages = fmr_op_maxpages,
|
||||||
.ro_displayname = "fmr",
|
.ro_displayname = "fmr",
|
||||||
};
|
};
|
||||||
|
|
|
@ -17,6 +17,19 @@
|
||||||
# define RPCDBG_FACILITY RPCDBG_TRANS
|
# define RPCDBG_FACILITY RPCDBG_TRANS
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* FRWR mode conveys a list of pages per chunk segment. The
|
||||||
|
* maximum length of that list is the FRWR page list depth.
|
||||||
|
*/
|
||||||
|
static size_t
|
||||||
|
frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
|
||||||
|
{
|
||||||
|
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||||
|
|
||||||
|
return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
|
||||||
|
rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth);
|
||||||
|
}
|
||||||
|
|
||||||
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
|
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
|
||||||
|
.ro_maxpages = frwr_op_maxpages,
|
||||||
.ro_displayname = "frwr",
|
.ro_displayname = "frwr",
|
||||||
};
|
};
|
||||||
|
|
|
@ -19,6 +19,16 @@
|
||||||
# define RPCDBG_FACILITY RPCDBG_TRANS
|
# define RPCDBG_FACILITY RPCDBG_TRANS
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* PHYSICAL memory registration conveys one page per chunk segment.
|
||||||
|
*/
|
||||||
|
static size_t
|
||||||
|
physical_op_maxpages(struct rpcrdma_xprt *r_xprt)
|
||||||
|
{
|
||||||
|
return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
|
||||||
|
rpcrdma_max_segments(r_xprt));
|
||||||
|
}
|
||||||
|
|
||||||
const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
|
const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
|
||||||
|
.ro_maxpages = physical_op_maxpages,
|
||||||
.ro_displayname = "physical",
|
.ro_displayname = "physical",
|
||||||
};
|
};
|
||||||
|
|
|
@ -406,7 +406,10 @@ xprt_setup_rdma(struct xprt_create *args)
|
||||||
xprt_rdma_connect_worker);
|
xprt_rdma_connect_worker);
|
||||||
|
|
||||||
xprt_rdma_format_addresses(xprt);
|
xprt_rdma_format_addresses(xprt);
|
||||||
xprt->max_payload = rpcrdma_max_payload(new_xprt);
|
xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt);
|
||||||
|
if (xprt->max_payload == 0)
|
||||||
|
goto out4;
|
||||||
|
xprt->max_payload <<= PAGE_SHIFT;
|
||||||
dprintk("RPC: %s: transport data payload maximum: %zu bytes\n",
|
dprintk("RPC: %s: transport data payload maximum: %zu bytes\n",
|
||||||
__func__, xprt->max_payload);
|
__func__, xprt->max_payload);
|
||||||
|
|
||||||
|
|
|
@ -2212,43 +2212,24 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Physical mapping means one Read/Write list entry per-page.
|
/* How many chunk list items fit within our inline buffers?
|
||||||
* All list entries must fit within an inline buffer
|
|
||||||
*
|
|
||||||
* NB: The server must return a Write list for NFS READ,
|
|
||||||
* which has the same constraint. Factor in the inline
|
|
||||||
* rsize as well.
|
|
||||||
*/
|
*/
|
||||||
static size_t
|
unsigned int
|
||||||
rpcrdma_physical_max_payload(struct rpcrdma_xprt *r_xprt)
|
rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt)
|
||||||
{
|
{
|
||||||
struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
|
struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
|
||||||
unsigned int inline_size, pages;
|
int bytes, segments;
|
||||||
|
|
||||||
inline_size = min_t(unsigned int,
|
bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize);
|
||||||
cdata->inline_wsize, cdata->inline_rsize);
|
bytes -= RPCRDMA_HDRLEN_MIN;
|
||||||
inline_size -= RPCRDMA_HDRLEN_MIN;
|
if (bytes < sizeof(struct rpcrdma_segment) * 2) {
|
||||||
pages = inline_size / sizeof(struct rpcrdma_segment);
|
pr_warn("RPC: %s: inline threshold too small\n",
|
||||||
return pages << PAGE_SHIFT;
|
__func__);
|
||||||
}
|
return 0;
|
||||||
|
|
||||||
static size_t
|
|
||||||
rpcrdma_mr_max_payload(struct rpcrdma_xprt *r_xprt)
|
|
||||||
{
|
|
||||||
return RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t
|
|
||||||
rpcrdma_max_payload(struct rpcrdma_xprt *r_xprt)
|
|
||||||
{
|
|
||||||
size_t result;
|
|
||||||
|
|
||||||
switch (r_xprt->rx_ia.ri_memreg_strategy) {
|
|
||||||
case RPCRDMA_ALLPHYSICAL:
|
|
||||||
result = rpcrdma_physical_max_payload(r_xprt);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
result = rpcrdma_mr_max_payload(r_xprt);
|
|
||||||
}
|
}
|
||||||
return result;
|
|
||||||
|
segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1);
|
||||||
|
dprintk("RPC: %s: max chunk list size = %d segments\n",
|
||||||
|
__func__, segments);
|
||||||
|
return segments;
|
||||||
}
|
}
|
||||||
|
|
|
@ -334,7 +334,9 @@ struct rpcrdma_stats {
|
||||||
/*
|
/*
|
||||||
* Per-registration mode operations
|
* Per-registration mode operations
|
||||||
*/
|
*/
|
||||||
|
struct rpcrdma_xprt;
|
||||||
struct rpcrdma_memreg_ops {
|
struct rpcrdma_memreg_ops {
|
||||||
|
size_t (*ro_maxpages)(struct rpcrdma_xprt *);
|
||||||
const char *ro_displayname;
|
const char *ro_displayname;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -411,6 +413,8 @@ struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *,
|
||||||
void rpcrdma_free_regbuf(struct rpcrdma_ia *,
|
void rpcrdma_free_regbuf(struct rpcrdma_ia *,
|
||||||
struct rpcrdma_regbuf *);
|
struct rpcrdma_regbuf *);
|
||||||
|
|
||||||
|
unsigned int rpcrdma_max_segments(struct rpcrdma_xprt *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
|
* RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
|
||||||
*/
|
*/
|
||||||
|
@ -422,7 +426,6 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
|
||||||
* RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
|
* RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
|
||||||
*/
|
*/
|
||||||
int rpcrdma_marshal_req(struct rpc_rqst *);
|
int rpcrdma_marshal_req(struct rpc_rqst *);
|
||||||
size_t rpcrdma_max_payload(struct rpcrdma_xprt *);
|
|
||||||
|
|
||||||
/* Temporary NFS request map cache. Created in svc_rdma.c */
|
/* Temporary NFS request map cache. Created in svc_rdma.c */
|
||||||
extern struct kmem_cache *svc_rdma_map_cachep;
|
extern struct kmem_cache *svc_rdma_map_cachep;
|
||||||
|
|
Loading…
Add table
Reference in a new issue