Loading net/smc/smc_core.c +63 −51 Original line number Diff line number Diff line Loading @@ -487,51 +487,17 @@ static inline int smc_rmb_wnd_update_limit(int rmbe_size) return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2); } static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr, bool is_rmb, int bufsize) { struct smc_connection *conn = &smc->conn; struct smc_link_group *lgr = conn->lgr; struct smc_buf_desc *buf_desc = NULL; struct list_head *buf_list; int bufsize, bufsize_short; struct smc_buf_desc *buf_desc; struct smc_link *lnk; int sk_buf_size; rwlock_t *lock; int rc; lnk = &lgr->lnk[SMC_SINGLE_LINK]; if (is_rmb) /* use socket recv buffer size (w/o overhead) as start value */ sk_buf_size = smc->sk.sk_rcvbuf / 2; else /* use socket send buffer size (w/o overhead) as start value */ sk_buf_size = smc->sk.sk_sndbuf / 2; for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2); bufsize_short >= 0; bufsize_short--) { if (is_rmb) { lock = &lgr->rmbs_lock; buf_list = &lgr->rmbs[bufsize_short]; } else { lock = &lgr->sndbufs_lock; buf_list = &lgr->sndbufs[bufsize_short]; } bufsize = smc_uncompress_bufsize(bufsize_short); if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC) continue; /* check for reusable slot in the link group */ buf_desc = smc_buf_get_slot(lgr, bufsize_short, lock, buf_list); if (buf_desc) { memset(buf_desc->cpu_addr, 0, bufsize); break; /* found reusable slot */ } /* try to allocate the determined number of pages */ /* try to alloc a new buffer */ buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); if (!buf_desc) break; /* give up with -ENOMEM */ return ERR_PTR(-ENOMEM); buf_desc->cpu_addr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | Loading @@ -540,28 +506,28 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) get_order(bufsize)); if (!buf_desc->cpu_addr) { kfree(buf_desc); buf_desc = NULL; continue; return ERR_PTR(-EAGAIN); } buf_desc->order = get_order(bufsize); /* build the sg table from the pages */ lnk = &lgr->lnk[SMC_SINGLE_LINK]; rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1, GFP_KERNEL); if (rc) { smc_buf_free(buf_desc, lnk, is_rmb); buf_desc = NULL; continue; return ERR_PTR(rc); } sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl, buf_desc->cpu_addr, bufsize); /* map sg table to DMA address */ rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc, is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE); rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc, is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE); /* SMC protocol depends on mapping to one DMA address only */ if (rc != 1) { smc_buf_free(buf_desc, lnk, is_rmb); buf_desc = NULL; continue; /* if mapping failed, try smaller one */ return ERR_PTR(-EAGAIN); } /* create a new memory region for the RMB */ Loading @@ -572,10 +538,56 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) buf_desc); if (rc) { smc_buf_free(buf_desc, lnk, is_rmb); buf_desc = NULL; continue; return ERR_PTR(rc); } } return buf_desc; } static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) { struct smc_connection *conn = &smc->conn; struct smc_link_group *lgr = conn->lgr; struct smc_buf_desc *buf_desc = NULL; struct list_head *buf_list; int bufsize, bufsize_short; int sk_buf_size; rwlock_t *lock; if (is_rmb) /* use socket recv buffer size (w/o overhead) as start value */ sk_buf_size = smc->sk.sk_rcvbuf / 2; else /* use socket send buffer size (w/o overhead) as start value */ sk_buf_size = smc->sk.sk_sndbuf / 2; for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2); bufsize_short >= 0; bufsize_short--) { if (is_rmb) { lock = &lgr->rmbs_lock; buf_list = &lgr->rmbs[bufsize_short]; } else { lock = &lgr->sndbufs_lock; buf_list = &lgr->sndbufs[bufsize_short]; } bufsize = smc_uncompress_bufsize(bufsize_short); if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC) continue; /* check for reusable slot in the link group */ buf_desc = smc_buf_get_slot(lgr, bufsize_short, lock, buf_list); if (buf_desc) { memset(buf_desc->cpu_addr, 0, bufsize); break; /* found reusable slot */ } buf_desc = smc_new_buf_create(lgr, is_rmb, bufsize); if (PTR_ERR(buf_desc) == -ENOMEM) break; if (IS_ERR(buf_desc)) continue; buf_desc->used = 1; write_lock_bh(lock); Loading @@ -584,7 +596,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) break; /* found */ } if (!buf_desc || !buf_desc->cpu_addr) if (IS_ERR(buf_desc)) return -ENOMEM; if (is_rmb) { Loading Loading
net/smc/smc_core.c +63 −51 Original line number Diff line number Diff line Loading @@ -487,51 +487,17 @@ static inline int smc_rmb_wnd_update_limit(int rmbe_size) return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2); } static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr, bool is_rmb, int bufsize) { struct smc_connection *conn = &smc->conn; struct smc_link_group *lgr = conn->lgr; struct smc_buf_desc *buf_desc = NULL; struct list_head *buf_list; int bufsize, bufsize_short; struct smc_buf_desc *buf_desc; struct smc_link *lnk; int sk_buf_size; rwlock_t *lock; int rc; lnk = &lgr->lnk[SMC_SINGLE_LINK]; if (is_rmb) /* use socket recv buffer size (w/o overhead) as start value */ sk_buf_size = smc->sk.sk_rcvbuf / 2; else /* use socket send buffer size (w/o overhead) as start value */ sk_buf_size = smc->sk.sk_sndbuf / 2; for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2); bufsize_short >= 0; bufsize_short--) { if (is_rmb) { lock = &lgr->rmbs_lock; buf_list = &lgr->rmbs[bufsize_short]; } else { lock = &lgr->sndbufs_lock; buf_list = &lgr->sndbufs[bufsize_short]; } bufsize = smc_uncompress_bufsize(bufsize_short); if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC) continue; /* check for reusable slot in the link group */ buf_desc = smc_buf_get_slot(lgr, bufsize_short, lock, buf_list); if (buf_desc) { memset(buf_desc->cpu_addr, 0, bufsize); break; /* found reusable slot */ } /* try to allocate the determined number of pages */ /* try to alloc a new buffer */ buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); if (!buf_desc) break; /* give up with -ENOMEM */ return ERR_PTR(-ENOMEM); buf_desc->cpu_addr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | Loading @@ -540,28 +506,28 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) get_order(bufsize)); if (!buf_desc->cpu_addr) { kfree(buf_desc); buf_desc = NULL; continue; return ERR_PTR(-EAGAIN); } buf_desc->order = get_order(bufsize); /* build the sg table from the pages */ lnk = &lgr->lnk[SMC_SINGLE_LINK]; rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1, GFP_KERNEL); if (rc) { smc_buf_free(buf_desc, lnk, is_rmb); buf_desc = NULL; continue; return ERR_PTR(rc); } sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl, buf_desc->cpu_addr, bufsize); /* map sg table to DMA address */ rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc, is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE); rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc, is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE); /* SMC protocol depends on mapping to one DMA address only */ if (rc != 1) { smc_buf_free(buf_desc, lnk, is_rmb); buf_desc = NULL; continue; /* if mapping failed, try smaller one */ return ERR_PTR(-EAGAIN); } /* create a new memory region for the RMB */ Loading @@ -572,10 +538,56 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) buf_desc); if (rc) { smc_buf_free(buf_desc, lnk, is_rmb); buf_desc = NULL; continue; return ERR_PTR(rc); } } return buf_desc; } static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) { struct smc_connection *conn = &smc->conn; struct smc_link_group *lgr = conn->lgr; struct smc_buf_desc *buf_desc = NULL; struct list_head *buf_list; int bufsize, bufsize_short; int sk_buf_size; rwlock_t *lock; if (is_rmb) /* use socket recv buffer size (w/o overhead) as start value */ sk_buf_size = smc->sk.sk_rcvbuf / 2; else /* use socket send buffer size (w/o overhead) as start value */ sk_buf_size = smc->sk.sk_sndbuf / 2; for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2); bufsize_short >= 0; bufsize_short--) { if (is_rmb) { lock = &lgr->rmbs_lock; buf_list = &lgr->rmbs[bufsize_short]; } else { lock = &lgr->sndbufs_lock; buf_list = &lgr->sndbufs[bufsize_short]; } bufsize = smc_uncompress_bufsize(bufsize_short); if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC) continue; /* check for reusable slot in the link group */ buf_desc = smc_buf_get_slot(lgr, bufsize_short, lock, buf_list); if (buf_desc) { memset(buf_desc->cpu_addr, 0, bufsize); break; /* found reusable slot */ } buf_desc = smc_new_buf_create(lgr, is_rmb, bufsize); if (PTR_ERR(buf_desc) == -ENOMEM) break; if (IS_ERR(buf_desc)) continue; buf_desc->used = 1; write_lock_bh(lock); Loading @@ -584,7 +596,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) break; /* found */ } if (!buf_desc || !buf_desc->cpu_addr) if (IS_ERR(buf_desc)) return -ENOMEM; if (is_rmb) { Loading