本文整理汇总了C++中ib_dma_map_single函数的典型用法代码示例。如果您正苦于以下问题:C++ ib_dma_map_single函数的具体用法?C++ ib_dma_map_single怎么用?C++ ib_dma_map_single使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ib_dma_map_single函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: post_recv
static int
post_recv(struct p9_client *client, struct p9_rdma_context *c)
{
struct p9_trans_rdma *rdma = client->trans;
struct ib_recv_wr wr, *bad_wr;
struct ib_sge sge;
c->busa = ib_dma_map_single(rdma->cm_id->device,
c->rc->sdata, client->msize,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
goto error;
sge.addr = c->busa;
sge.length = client->msize;
sge.lkey = rdma->lkey;
wr.next = NULL;
c->wc_op = IB_WC_RECV;
wr.wr_id = (unsigned long) c;
wr.sg_list = &sge;
wr.num_sge = 1;
return ib_post_recv(rdma->qp, &wr, &bad_wr);
error:
p9_debug(P9_DEBUG_ERROR, "EIO\n");
return -EIO;
}
开发者ID:Seagate,项目名称:SMR_FS-EXT4,代码行数:28,代码来源:trans_rdma.c
示例2: rdma_set_ctxt_sge
static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
struct svc_rdma_op_ctxt *ctxt,
struct svc_rdma_fastreg_mr *frmr,
struct kvec *vec,
u64 *sgl_offset,
int count)
{
int i;
ctxt->count = count;
ctxt->direction = DMA_FROM_DEVICE;
for (i = 0; i < count; i++) {
ctxt->sge[i].length = 0; /* in case map fails */
if (!frmr) {
ctxt->sge[i].addr =
ib_dma_map_single(xprt->sc_cm_id->device,
vec[i].iov_base,
vec[i].iov_len,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
ctxt->sge[i].addr))
return -EINVAL;
ctxt->sge[i].lkey = xprt->sc_dma_lkey;
atomic_inc(&xprt->sc_dma_used);
} else {
ctxt->sge[i].addr = (unsigned long)vec[i].iov_base;
ctxt->sge[i].lkey = frmr->mr->lkey;
}
ctxt->sge[i].length = vec[i].iov_len;
*sgl_offset = *sgl_offset + vec[i].iov_len;
}
return 0;
}
开发者ID:12rafael,项目名称:jellytimekernel,代码行数:33,代码来源:svc_rdma_recvfrom.c
示例3: post_recv
static int
post_recv(struct p9_client *client, struct p9_rdma_context *c)
{
struct p9_trans_rdma *rdma = client->trans;
struct ib_recv_wr wr;
struct ib_sge sge;
c->busa = ib_dma_map_single(rdma->cm_id->device,
c->rc.sdata, client->msize,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
goto error;
c->cqe.done = recv_done;
sge.addr = c->busa;
sge.length = client->msize;
sge.lkey = rdma->pd->local_dma_lkey;
wr.next = NULL;
wr.wr_cqe = &c->cqe;
wr.sg_list = &sge;
wr.num_sge = 1;
return ib_post_recv(rdma->qp, &wr, NULL);
error:
p9_debug(P9_DEBUG_ERROR, "EIO\n");
return -EIO;
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:29,代码来源:trans_rdma.c
示例4: iser_alloc_rx_descriptors
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iscsi_session *session)
{
int i, j;
u64 dma_addr;
struct iser_rx_desc *rx_desc;
struct ib_sge *rx_sg;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
iser_conn->qp_max_recv_dtos = session->cmds_max;
iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max))
goto create_rdma_reg_res_failed;
if (iser_alloc_login_buf(iser_conn))
goto alloc_login_buf_fail;
iser_conn->num_rx_descs = session->cmds_max;
iser_conn->rx_descs = kmalloc(iser_conn->num_rx_descs *
sizeof(struct iser_rx_desc), GFP_KERNEL);
if (!iser_conn->rx_descs)
goto rx_desc_alloc_fail;
rx_desc = iser_conn->rx_descs;
for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) {
dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(device->ib_device, dma_addr))
goto rx_desc_dma_map_failed;
rx_desc->dma_addr = dma_addr;
rx_sg = &rx_desc->rx_sg;
rx_sg->addr = rx_desc->dma_addr;
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
rx_sg->lkey = device->mr->lkey;
}
iser_conn->rx_desc_head = 0;
return 0;
rx_desc_dma_map_failed:
rx_desc = iser_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++)
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
kfree(iser_conn->rx_descs);
iser_conn->rx_descs = NULL;
rx_desc_alloc_fail:
iser_free_login_buf(iser_conn);
alloc_login_buf_fail:
device->iser_free_rdma_reg_res(ib_conn);
create_rdma_reg_res_failed:
iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM;
}
开发者ID:antiguru,项目名称:ofed-compat-rdma,代码行数:60,代码来源:iser_initiator.c
示例5: VNIC_BUF_SIZE
struct sk_buff *vnic_alloc_rx_skb(struct vnic_rx_ring *ring, int buf_ind,
gfp_t gfp_flag)
{
struct ib_device *ca = ring->port->dev->ca;
struct sk_buff *skb;
u64 mapping;
int buf_size = VNIC_BUF_SIZE(ring->port);
skb = alloc_skb(buf_size, gfp_flag);
if (!skb) {
vnic_dbg_data(ring->port->name,
"alloc_skb for size %d failed\n", buf_size);
goto err_alloc;
}
mapping = ib_dma_map_single(ca, skb->data, buf_size, DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(ca, mapping))) {
vnic_dbg_data(ring->port->name,
"ib_dma_map_single len %d failed\n", buf_size);
goto err_map;
}
ring->rx_info[buf_ind].skb = skb;
ring->rx_info[buf_ind].dma_addr[0] = mapping;
return skb;
err_map:
dev_kfree_skb_any(skb);
err_alloc:
return NULL;
}
开发者ID:u9621071,项目名称:kernel-uek-UEK3,代码行数:32,代码来源:vnic_data_rx.c
示例6: iser_alloc_login_buf
static int iser_alloc_login_buf(struct iser_conn *iser_conn)
{
struct iser_device *device = iser_conn->ib_conn.device;
int req_err, resp_err;
BUG_ON(device == NULL);
iser_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
ISER_RX_LOGIN_SIZE, GFP_KERNEL);
if (!iser_conn->login_buf)
goto out_err;
iser_conn->login_req_buf = iser_conn->login_buf;
iser_conn->login_resp_buf = iser_conn->login_buf +
ISCSI_DEF_MAX_RECV_SEG_LEN;
iser_conn->login_req_dma = ib_dma_map_single(device->ib_device,
iser_conn->login_req_buf,
ISCSI_DEF_MAX_RECV_SEG_LEN,
DMA_TO_DEVICE);
iser_conn->login_resp_dma = ib_dma_map_single(device->ib_device,
iser_conn->login_resp_buf,
ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
req_err = ib_dma_mapping_error(device->ib_device,
iser_conn->login_req_dma);
resp_err = ib_dma_mapping_error(device->ib_device,
iser_conn->login_resp_dma);
if (req_err || resp_err) {
if (req_err)
iser_conn->login_req_dma = 0;
if (resp_err)
iser_conn->login_resp_dma = 0;
goto free_login_buf;
}
return 0;
free_login_buf:
iser_free_login_buf(iser_conn);
out_err:
iser_err("unable to alloc or map login buf\n");
return -ENOMEM;
}
开发者ID:BORETS24,项目名称:common.git-android-4.4,代码行数:47,代码来源:iser_initiator.c
示例7: vnic_alloc_frag
/*
* allocate a single fragment on a single ring entry and map it
* to HW address.
*/
static int vnic_alloc_frag(struct vnic_rx_ring *ring,
struct vnic_frag_data *frags_data, int i)
{
struct vnic_frag_info *frag_info = &ring->frag_info[i];
struct vnic_rx_alloc *page_alloc = &ring->page_alloc[i];
struct skb_frag_struct *skb_frags = &frags_data->frags[i];
struct skb_frag_struct skbf = *skb_frags;
struct page *page;
struct ib_device *ib_device = ring->port->dev->ca;
u64 dma;
int decision;
if (vnic_rx_linear)
return 0;
if (page_alloc->offset >= frag_info->last_offset) {
decision = 0;
/* Allocate new page */
page = alloc_pages(GFP_ATOMIC | __GFP_COMP, VNIC_ALLOC_ORDER);
if (!page) {
/*frags_data->dma_addr[i] = NULL;
ring->rx_info[wr_id].info = VNIC_FRAG_ALLOC_FAIL;
ring->need_refill = 1; */
return -ENOMEM;
}
skbf.page.p = page_alloc->page;
skbf.page_offset = page_alloc->offset;
} else {
decision = 1;
page = page_alloc->page;
get_page(page);
skbf.page.p = page;
skbf.page_offset = page_alloc->offset;
}
skbf.size = frag_info->frag_size;
dma = ib_dma_map_single(ib_device, page_address(skbf.page.p) +
skbf.page_offset, frag_info->frag_size,
PCI_DMA_FROMDEVICE);
if (unlikely(ib_dma_mapping_error(ib_device, dma))) {
vnic_dbg_data(ring->port->name,
"ib_dma_map_single len %d failed\n",
frag_info->frag_size);
put_page(page);
return -ENOMEM;
}
if (!decision) {
page_alloc->page = page;
page_alloc->offset = frag_info->frag_align;
} else
page_alloc->offset += frag_info->frag_stride;
*skb_frags = skbf;
frags_data->dma_addr[i] = dma;
return 0;
}
开发者ID:u9621071,项目名称:kernel-uek-UEK3,代码行数:62,代码来源:vnic_data_rx.c
示例8: iser_alloc_login_buf
static int iser_alloc_login_buf(struct iser_conn *iser_conn)
{
struct iser_device *device = iser_conn->ib_conn.device;
struct iser_login_desc *desc = &iser_conn->login_desc;
desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
if (!desc->req)
return -ENOMEM;
desc->req_dma = ib_dma_map_single(device->ib_device, desc->req,
ISCSI_DEF_MAX_RECV_SEG_LEN,
DMA_TO_DEVICE);
if (ib_dma_mapping_error(device->ib_device,
desc->req_dma))
goto free_req;
desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
if (!desc->rsp)
goto unmap_req;
desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp,
ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(device->ib_device,
desc->rsp_dma))
goto free_rsp;
return 0;
free_rsp:
kfree(desc->rsp);
unmap_req:
ib_dma_unmap_single(device->ib_device, desc->req_dma,
ISCSI_DEF_MAX_RECV_SEG_LEN,
DMA_TO_DEVICE);
free_req:
kfree(desc->req);
return -ENOMEM;
}
开发者ID:Krostazino,项目名称:linux,代码行数:40,代码来源:iser_initiator.c
示例9: netdev_priv
static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
struct ipoib_cm_rx_buf *rx_ring,
int id, int frags,
u64 mapping[IPOIB_CM_RX_SG])
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
int i;
skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
if (unlikely(!skb))
return NULL;
/*
* IPoIB adds a 4 byte header. So we need 12 more bytes to align the
* IP header to a multiple of 16.
*/
skb_reserve(skb, 12);
mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
dev_kfree_skb_any(skb);
return NULL;
}
for (i = 0; i < frags; i++) {
struct page *page = alloc_page(GFP_ATOMIC);
if (!page)
goto partial_error;
skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page,
0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
goto partial_error;
}
rx_ring[id].skb = skb;
return skb;
partial_error:
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
for (; i > 0; --i)
ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
return NULL;
}
开发者ID:Core2idiot,项目名称:Kernel-Samsung-3.0...-,代码行数:52,代码来源:ipoib_cm.c
示例10: rdma_map_address
u64 rdma_map_address(void* addr, int length)
{
u64 dma_addr;
LOG_KERN(LOG_INFO, ("Mapping addr\n"));
dma_addr = ib_dma_map_single(rdma_ib_device.dev, addr, length, DMA_BIDIRECTIONAL);
if (ib_dma_mapping_error(rdma_ib_device.dev, dma_addr) != 0) {
LOG_KERN(LOG_INFO, ("Error mapping myaddr"));
return 0; //error
}
return dma_addr;
}
开发者ID:jcarreira,项目名称:disag-firebox,代码行数:14,代码来源:rdma_library.c
示例11: svc_rdma_post_recv
int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
{
struct ib_recv_wr recv_wr, *bad_recv_wr;
struct svc_rdma_op_ctxt *ctxt;
struct page *page;
dma_addr_t pa;
int sge_no;
int buflen;
int ret;
ctxt = svc_rdma_get_context(xprt);
buflen = 0;
ctxt->direction = DMA_FROM_DEVICE;
for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
BUG_ON(sge_no >= xprt->sc_max_sge);
page = svc_rdma_get_page();
ctxt->pages[sge_no] = page;
pa = ib_dma_map_single(xprt->sc_cm_id->device,
page_address(page), PAGE_SIZE,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
goto err_put_ctxt;
atomic_inc(&xprt->sc_dma_used);
ctxt->sge[sge_no].addr = pa;
ctxt->sge[sge_no].length = PAGE_SIZE;
ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
buflen += PAGE_SIZE;
}
ctxt->count = sge_no;
recv_wr.next = NULL;
recv_wr.sg_list = &ctxt->sge[0];
recv_wr.num_sge = ctxt->count;
recv_wr.wr_id = (u64)(unsigned long)ctxt;
svc_xprt_get(&xprt->sc_xprt);
ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
if (ret) {
svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 1);
svc_xprt_put(&xprt->sc_xprt);
}
return ret;
err_put_ctxt:
svc_rdma_put_context(ctxt, 1);
return -ENOMEM;
}
开发者ID:325116067,项目名称:semc-qsd8x50,代码行数:47,代码来源:svc_rdma_transport.c
示例12: iser_reg_single
/**
* iser_reg_single - fills registered buffer descriptor with
* registration information
*/
void iser_reg_single(struct iser_device *device,
struct iser_regd_buf *regd_buf,
enum dma_data_direction direction)
{
u64 dma_addr;
dma_addr = ib_dma_map_single(device->ib_device,
regd_buf->virt_addr,
regd_buf->data_size, direction);
BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));
regd_buf->reg.lkey = device->mr->lkey;
regd_buf->reg.len = regd_buf->data_size;
regd_buf->reg.va = dma_addr;
regd_buf->reg.is_fmr = 0;
regd_buf->dma_addr = dma_addr;
regd_buf->direction = direction;
}
开发者ID:smx-smx,项目名称:dsl-n55u,代码行数:23,代码来源:iser_memory.c
示例13: iser_alloc_rx_descriptors
int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
{
int i, j;
u64 dma_addr;
struct iser_rx_desc *rx_desc;
struct ib_sge *rx_sg;
struct iser_device *device = ib_conn->device;
ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS *
sizeof(struct iser_rx_desc), GFP_KERNEL);
if (!ib_conn->rx_descs)
goto rx_desc_alloc_fail;
rx_desc = ib_conn->rx_descs;
for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++) {
dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(device->ib_device, dma_addr))
goto rx_desc_dma_map_failed;
rx_desc->dma_addr = dma_addr;
rx_sg = &rx_desc->rx_sg;
rx_sg->addr = rx_desc->dma_addr;
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
rx_sg->lkey = device->mr->lkey;
}
ib_conn->rx_desc_head = 0;
return 0;
rx_desc_dma_map_failed:
rx_desc = ib_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++)
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
kfree(ib_conn->rx_descs);
ib_conn->rx_descs = NULL;
rx_desc_alloc_fail:
iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM;
}
开发者ID:bond-os,项目名称:linux,代码行数:43,代码来源:iser_initiator.c
示例14: iser_initialize_task_headers
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc)
{
struct iser_conn *ib_conn = task->conn->dd_data;
struct iser_device *device = ib_conn->device;
struct iscsi_iser_task *iser_task = task->dd_data;
u64 dma_addr;
dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
if (ib_dma_mapping_error(device->ib_device, dma_addr))
return -ENOMEM;
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
tx_desc->tx_sg[0].lkey = device->mr->lkey;
iser_task->ib_conn = ib_conn;
return 0;
}
开发者ID:7799,项目名称:linux,代码行数:21,代码来源:iscsi_iser.c
示例15: iser_initialize_task_headers
/**
* iser_initialize_task_headers() - Initialize task headers
* @task: iscsi task
* @tx_desc: iser tx descriptor
*
* Notes:
* This routine may race with iser teardown flow for scsi
* error handling TMFs. So for TMF we should acquire the
* state mutex to avoid dereferencing the IB device which
* may have already been terminated.
*/
int
iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc)
{
struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->ib_conn.device;
struct iscsi_iser_task *iser_task = task->dd_data;
u64 dma_addr;
const bool mgmt_task = !task->sc && !in_interrupt();
int ret = 0;
if (unlikely(mgmt_task))
mutex_lock(&iser_conn->state_mutex);
if (unlikely(iser_conn->state != ISER_CONN_UP)) {
ret = -ENODEV;
goto out;
}
dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
if (ib_dma_mapping_error(device->ib_device, dma_addr)) {
ret = -ENOMEM;
goto out;
}
tx_desc->wr_idx = 0;
tx_desc->mapped = true;
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
iser_task->iser_conn = iser_conn;
out:
if (unlikely(mgmt_task))
mutex_unlock(&iser_conn->state_mutex);
return ret;
}
开发者ID:513855417,项目名称:linux,代码行数:51,代码来源:iscsi_iser.c
示例16: svc_rdma_recv_ctxt_alloc
static struct svc_rdma_recv_ctxt *
svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
{
struct svc_rdma_recv_ctxt *ctxt;
dma_addr_t addr;
void *buffer;
ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
goto fail0;
buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
if (!buffer)
goto fail1;
addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
rdma->sc_max_req_size, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
goto fail2;
ctxt->rc_recv_wr.next = NULL;
ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
ctxt->rc_recv_wr.num_sge = 1;
ctxt->rc_cqe.done = svc_rdma_wc_receive;
ctxt->rc_recv_sge.addr = addr;
ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
ctxt->rc_recv_buf = buffer;
ctxt->rc_temp = false;
return ctxt;
fail2:
kfree(buffer);
fail1:
kfree(ctxt);
fail0:
return NULL;
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:37,代码来源:svc_rdma_recvfrom.c
示例17: rdma_setup
static int rdma_setup(rdma_ctx_t ctx)
{
// create receive buffer
ctx->rdma_recv_buffer = kmalloc(RDMA_BUFFER_SIZE, GFP_KERNEL);
CHECK_MSG_RET(ctx->rdma_recv_buffer != 0, "Error kmalloc", -1);
// create memory region
ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_LOCAL_WRITE);
CHECK_MSG_RET(ctx->mr != 0, "Error creating MR", -1);
ctx->rkey = ctx->mr->rkey;
// get dma_addr
ctx->dma_addr = ib_dma_map_single(rdma_ib_device.dev, ctx->rdma_recv_buffer,
RDMA_BUFFER_SIZE, DMA_BIDIRECTIONAL);
CHECK_MSG_RET(ib_dma_mapping_error(rdma_ib_device.dev, ctx->dma_addr) == 0,
"Error ib_dma_map_single", -1);
// modify QP until RTS
modify_qp(ctx);
return 0;
}
开发者ID:jcarreira,项目名称:disag-firebox,代码行数:24,代码来源:rdma_library.c
示例18: sdp_post_send
void
sdp_post_send(struct sdp_sock *ssk, struct mbuf *mb)
{
struct sdp_buf *tx_req;
struct sdp_bsdh *h;
unsigned long mseq;
struct ib_device *dev;
struct ib_send_wr *bad_wr;
struct ib_sge ibsge[SDP_MAX_SEND_SGES];
struct ib_sge *sge;
struct ib_send_wr tx_wr = { NULL };
int i, rc;
u64 addr;
SDPSTATS_COUNTER_MID_INC(post_send, h->mid);
SDPSTATS_HIST(send_size, mb->len);
if (!ssk->qp_active) {
m_freem(mb);
return;
}
mseq = ring_head(ssk->tx_ring);
h = mtod(mb, struct sdp_bsdh *);
ssk->tx_packets++;
ssk->tx_bytes += mb->m_pkthdr.len;
#ifdef SDP_ZCOPY
if (unlikely(h->mid == SDP_MID_SRCAVAIL)) {
struct tx_srcavail_state *tx_sa = TX_SRCAVAIL_STATE(mb);
if (ssk->tx_sa != tx_sa) {
sdp_dbg_data(ssk->socket, "SrcAvail cancelled "
"before being sent!\n");
WARN_ON(1);
m_freem(mb);
return;
}
TX_SRCAVAIL_STATE(mb)->mseq = mseq;
}
#endif
if (unlikely(mb->m_flags & M_URG))
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
mb->m_flags |= M_RDONLY; /* Don't allow compression once sent. */
h->bufs = htons(rx_ring_posted(ssk));
h->len = htonl(mb->m_pkthdr.len);
h->mseq = htonl(mseq);
h->mseq_ack = htonl(mseq_ack(ssk));
sdp_prf1(ssk->socket, mb, "TX: %s bufs: %d mseq:%ld ack:%d",
mid2str(h->mid), rx_ring_posted(ssk), mseq,
ntohl(h->mseq_ack));
SDP_DUMP_PACKET(ssk->socket, "TX", mb, h);
tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)];
tx_req->mb = mb;
dev = ssk->ib_device;
sge = &ibsge[0];
for (i = 0; mb != NULL; i++, mb = mb->m_next, sge++) {
addr = ib_dma_map_single(dev, mb->m_data, mb->m_len,
DMA_TO_DEVICE);
/* TODO: proper error handling */
BUG_ON(ib_dma_mapping_error(dev, addr));
BUG_ON(i >= SDP_MAX_SEND_SGES);
tx_req->mapping[i] = addr;
sge->addr = addr;
sge->length = mb->m_len;
sge->lkey = ssk->sdp_dev->mr->lkey;
}
tx_wr.next = NULL;
tx_wr.wr_id = mseq | SDP_OP_SEND;
tx_wr.sg_list = ibsge;
tx_wr.num_sge = i;
tx_wr.opcode = IB_WR_SEND;
tx_wr.send_flags = IB_SEND_SIGNALED;
if (unlikely(tx_req->mb->m_flags & M_URG))
tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &tx_wr, &bad_wr);
if (unlikely(rc)) {
sdp_dbg(ssk->socket,
"ib_post_send failed with status %d.\n", rc);
sdp_cleanup_sdp_buf(ssk, tx_req, DMA_TO_DEVICE);
sdp_notify(ssk, ECONNRESET);
m_freem(tx_req->mb);
return;
}
atomic_inc(&ssk->tx_ring.head);
atomic_dec(&ssk->tx_ring.credits);
atomic_set(&ssk->remote_credits, rx_ring_posted(ssk));
return;
}
开发者ID:AhmadTux,项目名称:freebsd,代码行数:100,代码来源:sdp_tx.c
示例19: rdma_request
static int rdma_request(struct p9_client *client, struct p9_req_t *req)
{
struct p9_trans_rdma *rdma = client->trans;
struct ib_send_wr wr, *bad_wr;
struct ib_sge sge;
int err = 0;
unsigned long flags;
struct p9_rdma_context *c = NULL;
struct p9_rdma_context *rpl_context = NULL;
/* Allocate an fcall for the reply */
rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL);
if (!rpl_context)
goto err_close;
/*
* If the request has a buffer, steal it, otherwise
* allocate a new one. Typically, requests should already
* have receive buffers allocated and just swap them around
*/
if (!req->rc) {
req->rc = kmalloc(sizeof(struct p9_fcall)+client->msize,
GFP_KERNEL);
if (req->rc) {
req->rc->sdata = (char *) req->rc +
sizeof(struct p9_fcall);
req->rc->capacity = client->msize;
}
}
rpl_context->rc = req->rc;
if (!rpl_context->rc) {
kfree(rpl_context);
goto err_close;
}
/*
* Post a receive buffer for this request. We need to ensure
* there is a reply buffer available for every outstanding
* request. A flushed request can result in no reply for an
* outstanding request, so we must keep a count to avoid
* overflowing the RQ.
*/
if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
err = post_recv(client, rpl_context);
if (err) {
kfree(rpl_context->rc);
kfree(rpl_context);
goto err_close;
}
} else
atomic_dec(&rdma->rq_count);
/* remove posted receive buffer from request structure */
req->rc = NULL;
/* Post the request */
c = kmalloc(sizeof *c, GFP_KERNEL);
if (!c)
goto err_close;
c->req = req;
c->busa = ib_dma_map_single(rdma->cm_id->device,
c->req->tc->sdata, c->req->tc->size,
DMA_TO_DEVICE);
if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
goto error;
sge.addr = c->busa;
sge.length = c->req->tc->size;
sge.lkey = rdma->lkey;
wr.next = NULL;
c->wc_op = IB_WC_SEND;
wr.wr_id = (unsigned long) c;
wr.opcode = IB_WR_SEND;
wr.send_flags = IB_SEND_SIGNALED;
wr.sg_list = &sge;
wr.num_sge = 1;
if (down_interruptible(&rdma->sq_sem))
goto error;
return ib_post_send(rdma->qp, &wr, &bad_wr);
error:
P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
return -EIO;
err_close:
spin_lock_irqsave(&rdma->req_lock, flags);
if (rdma->state < P9_RDMA_CLOSING) {
rdma->state = P9_RDMA_CLOSING;
spin_unlock_irqrestore(&rdma->req_lock, flags);
rdma_disconnect(rdma->cm_id);
} else
spin_unlock_irqrestore(&rdma->req_lock, flags);
return err;
}
开发者ID:458941968,项目名称:mini2440-kernel-2.6.29,代码行数:98,代码来源:trans_rdma.c
示例20: send_reply
static int send_reply(struct svcxprt_rdma *rdma,
struct svc_rqst *rqstp,
struct page *page,
struct rpcrdma_msg *rdma_resp,
struct svc_rdma_op_ctxt *ctxt,
struct svc_rdma_req_map *vec,
int byte_count)
{
struct ib_send_wr send_wr;
struct ib_send_wr inv_wr;
int sge_no;
int sge_bytes;
int page_no;
int ret;
/* Post a recv buffer to handle another request. */
ret = svc_rdma_post_recv(rdma);
if (ret) {
printk(KERN_INFO
"svcrdma: could not post a receive buffer, err=%d."
"Closing transport %p.\n", ret, rdma);
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
svc_rdma_put_frmr(rdma, vec->frmr);
svc_rdma_put_context(ctxt, 0);
return -ENOTCONN;
}
/* Prepare the context */
ctxt->pages[0] = page;
ctxt->count = 1;
ctxt->frmr = vec->frmr;
if (vec->frmr)
set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
else
clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
/* Prepare the SGE for the RPCRDMA Header */
ctxt->sge[0].lkey = rdma->sc_dma_lkey;
ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
ctxt->sge[0].addr =
ib_dma_map_single(rdma->sc_cm_id->device, page_address(page),
ctxt->sge[0].length, DMA_TO_DEVICE);
if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
goto err;
atomic_inc(&rdma->sc_dma_used);
ctxt->direction = DMA_TO_DEVICE;
/* Determine how many of our SGE are to be transmitted */
for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
byte_count -= sge_bytes;
if (!vec->frmr) {
ctxt->sge[sge_no].addr =
ib_dma_map_single(rdma->sc_cm_id->device,
vec->sge[sge_no].iov_base,
sge_bytes, DMA_TO_DEVICE);
if (ib_dma_mapping_error(rdma->sc_cm_id->device,
ctxt->sge[sge_no].addr))
goto err;
atomic_inc(&rdma->sc_dma_used);
ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey;
} else {
ctxt->sge[sge_no].addr = (unsigned long)
vec->sge[sge_no].iov_base;
ctxt->sge[sge_no].lkey = vec->frmr->mr->lkey;
}
ctxt->sge[sge_no].length = sge_bytes;
}
BUG_ON(byte_count != 0);
/* Save all respages in the ctxt and remove them from the
* respages array. They are our pages until the I/O
* completes.
*/
for (page_no = 0; page_no < rqstp->rq_resused; page_no++) {
ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
ctxt->count++;
rqstp->rq_respages[page_no] = NULL;
/*
* If there are more pages than SGE, terminate SGE
* list so that svc_rdma_unmap_dma doesn't attempt to
* unmap garbage.
*/
if (page_no+1 >= sge_no)
ctxt->sge[page_no+1].length = 0;
}
BUG_ON(sge_no > rdma->sc_max_sge);
memset(&send_wr, 0, sizeof send_wr);
ctxt->wr_op = IB_WR_SEND;
send_wr.wr_id = (unsigned long)ctxt;
send_wr.sg_list = ctxt->sge;
send_wr.num_sge = sge_no;
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
if (vec->frmr) {
/* Prepare INVALIDATE WR */
memset(&inv_wr, 0, sizeof inv_wr);
inv_wr.opcode = IB_WR_LOCAL_INV;
inv_wr.send_flags = IB_SEND_SIGNALED;
//.........这里部分代码省略.........
开发者ID:flwh,项目名称:Alcatel_OT_985_kernel,代码行数:101,代码来源:svc_rdma_sendto.c
注:本文中的ib_dma_map_single函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论