Loading Documentation/infiniband/user_mad.txt +9 −4 Original line number Original line Diff line number Diff line Loading @@ -26,6 +26,11 @@ Creating MAD agents ioctl. Also, all agents registered through a file descriptor will ioctl. Also, all agents registered through a file descriptor will be unregistered when the descriptor is closed. be unregistered when the descriptor is closed. 2014 -- a new registration ioctl is now provided which allows additional fields to be provided during registration. Users of this registration call are implicitly setting the use of pkey_index (see below). Receiving MADs Receiving MADs MADs are received using read(). The receive side now supports MADs are received using read(). The receive side now supports Loading Loading @@ -104,10 +109,10 @@ P_Key Index Handling The old ib_umad interface did not allow setting the P_Key index for The old ib_umad interface did not allow setting the P_Key index for MADs that are sent and did not provide a way for obtaining the P_Key MADs that are sent and did not provide a way for obtaining the P_Key index of received MADs. A new layout for struct ib_user_mad_hdr index of received MADs. A new layout for struct ib_user_mad_hdr with a pkey_index member has been defined; however, to preserve with a pkey_index member has been defined; however, to preserve binary binary compatibility with older applications, this new layout will compatibility with older applications, this new layout will not be used not be used unless the IB_USER_MAD_ENABLE_PKEY ioctl is called unless one of IB_USER_MAD_ENABLE_PKEY or IB_USER_MAD_REGISTER_AGENT2 ioctl's before a file descriptor is used for anything else. are called before a file descriptor is used for anything else. In September 2008, the IB_USER_MAD_ABI_VERSION will be incremented In September 2008, the IB_USER_MAD_ABI_VERSION will be incremented to 6, the new layout of struct ib_user_mad_hdr will be used by to 6, the new layout of struct ib_user_mad_hdr will be used by Loading drivers/infiniband/core/agent.c +8 −8 Original line number Original line Diff line number Diff line Loading @@ -94,14 +94,14 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, port_priv = ib_get_agent_port(device, port_num); port_priv = ib_get_agent_port(device, port_num); if (!port_priv) { if (!port_priv) { printk(KERN_ERR SPFX "Unable to find port agent\n"); dev_err(&device->dev, "Unable to find port agent\n"); return; return; } } agent = port_priv->agent[qpn]; agent = port_priv->agent[qpn]; ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); if (IS_ERR(ah)) { if (IS_ERR(ah)) { printk(KERN_ERR SPFX "ib_create_ah_from_wc error %ld\n", dev_err(&device->dev, "ib_create_ah_from_wc error %ld\n", PTR_ERR(ah)); PTR_ERR(ah)); return; return; } } Loading @@ -110,7 +110,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_KERNEL); GFP_KERNEL); if (IS_ERR(send_buf)) { if (IS_ERR(send_buf)) { printk(KERN_ERR SPFX "ib_create_send_mad error\n"); dev_err(&device->dev, "ib_create_send_mad error\n"); goto err1; goto err1; } } Loading @@ -125,7 +125,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, } } if (ib_post_send_mad(send_buf, NULL)) { if (ib_post_send_mad(send_buf, NULL)) { printk(KERN_ERR SPFX "ib_post_send_mad error\n"); dev_err(&device->dev, "ib_post_send_mad error\n"); goto err2; goto err2; } } return; return; Loading @@ -151,7 +151,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) /* Create new device info */ /* Create new device info */ port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); if (!port_priv) { if (!port_priv) { printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n"); dev_err(&device->dev, "No memory for ib_agent_port_private\n"); ret = -ENOMEM; ret = -ENOMEM; goto error1; goto error1; } } Loading @@ -161,7 +161,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) port_priv->agent[0] = ib_register_mad_agent(device, port_num, port_priv->agent[0] = ib_register_mad_agent(device, port_num, IB_QPT_SMI, NULL, 0, IB_QPT_SMI, NULL, 0, &agent_send_handler, &agent_send_handler, NULL, NULL); NULL, NULL, 0); if (IS_ERR(port_priv->agent[0])) { if (IS_ERR(port_priv->agent[0])) { ret = PTR_ERR(port_priv->agent[0]); ret = PTR_ERR(port_priv->agent[0]); goto error2; goto error2; Loading @@ -172,7 +172,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) port_priv->agent[1] = ib_register_mad_agent(device, port_num, port_priv->agent[1] = ib_register_mad_agent(device, port_num, IB_QPT_GSI, NULL, 0, IB_QPT_GSI, NULL, 0, &agent_send_handler, &agent_send_handler, NULL, NULL); NULL, NULL, 0); if (IS_ERR(port_priv->agent[1])) { if (IS_ERR(port_priv->agent[1])) { ret = PTR_ERR(port_priv->agent[1]); ret = PTR_ERR(port_priv->agent[1]); goto error3; goto error3; Loading Loading @@ -202,7 +202,7 @@ int ib_agent_port_close(struct ib_device *device, int port_num) port_priv = __ib_get_agent_port(device, port_num); port_priv = __ib_get_agent_port(device, port_num); if (port_priv == NULL) { if (port_priv == NULL) { spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); printk(KERN_ERR SPFX "Port %d not found\n", port_num); dev_err(&device->dev, "Port %d not found\n", port_num); return -ENODEV; return -ENODEV; } } list_del(&port_priv->port_list); list_del(&port_priv->port_list); Loading drivers/infiniband/core/cm.c +3 −2 Original line number Original line Diff line number Diff line Loading @@ -3753,7 +3753,7 @@ static void cm_add_one(struct ib_device *ib_device) struct cm_port *port; struct cm_port *port; struct ib_mad_reg_req reg_req = { struct ib_mad_reg_req reg_req = { .mgmt_class = IB_MGMT_CLASS_CM, .mgmt_class = IB_MGMT_CLASS_CM, .mgmt_class_version = IB_CM_CLASS_VERSION .mgmt_class_version = IB_CM_CLASS_VERSION, }; }; struct ib_port_modify port_modify = { struct ib_port_modify port_modify = { .set_port_cap_mask = IB_PORT_CM_SUP .set_port_cap_mask = IB_PORT_CM_SUP Loading Loading @@ -3801,7 +3801,8 @@ static void cm_add_one(struct ib_device *ib_device) 0, 0, cm_send_handler, cm_send_handler, cm_recv_handler, cm_recv_handler, port); port, 0); if (IS_ERR(port->mad_agent)) if (IS_ERR(port->mad_agent)) goto error2; goto error2; Loading drivers/infiniband/core/iwcm.c +27 −0 Original line number Original line Diff line number Diff line Loading @@ -46,6 +46,7 @@ #include <linux/completion.h> #include <linux/completion.h> #include <linux/slab.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/module.h> #include <linux/sysctl.h> #include <rdma/iw_cm.h> #include <rdma/iw_cm.h> #include <rdma/ib_addr.h> #include <rdma/ib_addr.h> Loading @@ -65,6 +66,20 @@ struct iwcm_work { struct list_head free_list; struct list_head free_list; }; }; static unsigned int default_backlog = 256; static struct ctl_table_header *iwcm_ctl_table_hdr; static struct ctl_table iwcm_ctl_table[] = { { .procname = "default_backlog", .data = &default_backlog, .maxlen = sizeof(default_backlog), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; /* /* * The following services provide a mechanism for pre-allocating iwcm_work * The following services provide a mechanism for pre-allocating iwcm_work * elements. The design pre-allocates them based on the cm_id type: * elements. The design pre-allocates them based on the cm_id type: Loading Loading @@ -425,6 +440,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); if (!backlog) backlog = default_backlog; ret = alloc_work_entries(cm_id_priv, backlog); ret = alloc_work_entries(cm_id_priv, backlog); if (ret) if (ret) return ret; return ret; Loading Loading @@ -1030,11 +1048,20 @@ static int __init iw_cm_init(void) if (!iwcm_wq) if (!iwcm_wq) return -ENOMEM; return -ENOMEM; iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm", iwcm_ctl_table); if (!iwcm_ctl_table_hdr) { pr_err("iw_cm: couldn't register sysctl paths\n"); destroy_workqueue(iwcm_wq); return -ENOMEM; } return 0; return 0; } } static void __exit iw_cm_cleanup(void) static void __exit iw_cm_cleanup(void) { { unregister_net_sysctl_table(iwcm_ctl_table_hdr); destroy_workqueue(iwcm_wq); destroy_workqueue(iwcm_wq); } } Loading drivers/infiniband/core/mad.c +182 −101 Original line number Original line Diff line number Diff line Loading @@ -33,6 +33,9 @@ * SOFTWARE. * SOFTWARE. * * */ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/dma-mapping.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/module.h> Loading Loading @@ -195,7 +198,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, u8 rmpp_version, u8 rmpp_version, ib_mad_send_handler send_handler, ib_mad_send_handler send_handler, ib_mad_recv_handler recv_handler, ib_mad_recv_handler recv_handler, void *context) void *context, u32 registration_flags) { { struct ib_mad_port_private *port_priv; struct ib_mad_port_private *port_priv; struct ib_mad_agent *ret = ERR_PTR(-EINVAL); struct ib_mad_agent *ret = ERR_PTR(-EINVAL); Loading @@ -211,68 +215,109 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, /* Validate parameters */ /* Validate parameters */ qpn = get_spl_qp_index(qp_type); qpn = get_spl_qp_index(qp_type); if (qpn == -1) if (qpn == -1) { dev_notice(&device->dev, "ib_register_mad_agent: invalid QP Type %d\n", qp_type); goto error1; goto error1; } if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { dev_notice(&device->dev, "ib_register_mad_agent: invalid RMPP Version %u\n", rmpp_version); goto error1; goto error1; } /* Validate MAD registration request if supplied */ /* Validate MAD registration request if supplied */ if (mad_reg_req) { if (mad_reg_req) { if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { dev_notice(&device->dev, "ib_register_mad_agent: invalid Class Version %u\n", mad_reg_req->mgmt_class_version); goto error1; goto error1; if (!recv_handler) } if (!recv_handler) { dev_notice(&device->dev, "ib_register_mad_agent: no recv_handler\n"); goto error1; goto error1; } if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { /* /* * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only * one in this range currently allowed * one in this range currently allowed */ */ if (mad_reg_req->mgmt_class != if (mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { dev_notice(&device->dev, "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", mad_reg_req->mgmt_class); goto error1; goto error1; } } else if (mad_reg_req->mgmt_class == 0) { } else if (mad_reg_req->mgmt_class == 0) { /* /* * Class 0 is reserved in IBA and is used for * Class 0 is reserved in IBA and is used for * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE */ */ dev_notice(&device->dev, "ib_register_mad_agent: Invalid Mgmt Class 0\n"); goto error1; goto error1; } else if (is_vendor_class(mad_reg_req->mgmt_class)) { } else if (is_vendor_class(mad_reg_req->mgmt_class)) { /* /* * If class is in "new" vendor range, * If class is in "new" vendor range, * ensure supplied OUI is not zero * ensure supplied OUI is not zero */ */ if (!is_vendor_oui(mad_reg_req->oui)) if (!is_vendor_oui(mad_reg_req->oui)) { dev_notice(&device->dev, "ib_register_mad_agent: No OUI specified for class 0x%x\n", mad_reg_req->mgmt_class); goto error1; goto error1; } } } /* Make sure class supplied is consistent with RMPP */ /* Make sure class supplied is consistent with RMPP */ if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { if (rmpp_version) if (rmpp_version) { dev_notice(&device->dev, "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", mad_reg_req->mgmt_class); goto error1; goto error1; } } } /* Make sure class supplied is consistent with QP type */ /* Make sure class supplied is consistent with QP type */ if (qp_type == IB_QPT_SMI) { if (qp_type == IB_QPT_SMI) { if ((mad_reg_req->mgmt_class != if ((mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) && IB_MGMT_CLASS_SUBN_LID_ROUTED) && (mad_reg_req->mgmt_class != (mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { dev_notice(&device->dev, "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", mad_reg_req->mgmt_class); goto error1; goto error1; } } else { } else { if ((mad_reg_req->mgmt_class == if ((mad_reg_req->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || IB_MGMT_CLASS_SUBN_LID_ROUTED) || (mad_reg_req->mgmt_class == (mad_reg_req->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { dev_notice(&device->dev, "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", mad_reg_req->mgmt_class); goto error1; goto error1; } } } } else { } else { /* No registration request supplied */ /* No registration request supplied */ if (!send_handler) if (!send_handler) goto error1; goto error1; if (registration_flags & IB_MAD_USER_RMPP) goto error1; } } /* Validate device and port */ /* Validate device and port */ port_priv = ib_get_mad_port(device, port_num); port_priv = ib_get_mad_port(device, port_num); if (!port_priv) { if (!port_priv) { dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n"); ret = ERR_PTR(-ENODEV); ret = ERR_PTR(-ENODEV); goto error1; goto error1; } } Loading @@ -280,6 +325,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, /* Verify the QP requested is supported. For example, Ethernet devices /* Verify the QP requested is supported. For example, Ethernet devices * will not have QP0 */ * will not have QP0 */ if (!port_priv->qp_info[qpn].qp) { if (!port_priv->qp_info[qpn].qp) { dev_notice(&device->dev, "ib_register_mad_agent: QP %d not supported\n", qpn); ret = ERR_PTR(-EPROTONOSUPPORT); ret = ERR_PTR(-EPROTONOSUPPORT); goto error1; goto error1; } } Loading Loading @@ -316,6 +363,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, mad_agent_priv->agent.context = context; mad_agent_priv->agent.context = context; mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; mad_agent_priv->agent.port_num = port_num; mad_agent_priv->agent.port_num = port_num; mad_agent_priv->agent.flags = registration_flags; spin_lock_init(&mad_agent_priv->lock); spin_lock_init(&mad_agent_priv->lock); INIT_LIST_HEAD(&mad_agent_priv->send_list); INIT_LIST_HEAD(&mad_agent_priv->send_list); INIT_LIST_HEAD(&mad_agent_priv->wait_list); INIT_LIST_HEAD(&mad_agent_priv->wait_list); Loading Loading @@ -706,7 +754,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, smi_handle_dr_smp_send(smp, device->node_type, port_num) == smi_handle_dr_smp_send(smp, device->node_type, port_num) == IB_SMI_DISCARD) { IB_SMI_DISCARD) { ret = -EINVAL; ret = -EINVAL; printk(KERN_ERR PFX "Invalid directed route\n"); dev_err(&device->dev, "Invalid directed route\n"); goto out; goto out; } } Loading @@ -718,7 +766,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, local = kmalloc(sizeof *local, GFP_ATOMIC); local = kmalloc(sizeof *local, GFP_ATOMIC); if (!local) { if (!local) { ret = -ENOMEM; ret = -ENOMEM; printk(KERN_ERR PFX "No memory for ib_mad_local_private\n"); dev_err(&device->dev, "No memory for ib_mad_local_private\n"); goto out; goto out; } } local->mad_priv = NULL; local->mad_priv = NULL; Loading @@ -726,7 +774,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); if (!mad_priv) { if (!mad_priv) { ret = -ENOMEM; ret = -ENOMEM; printk(KERN_ERR PFX "No memory for local response MAD\n"); dev_err(&device->dev, "No memory for local response MAD\n"); kfree(local); kfree(local); goto out; goto out; } } Loading Loading @@ -837,8 +885,8 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); if (!seg) { if (!seg) { printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem " dev_err(&send_buf->mad_agent->device->dev, "alloc failed for len %zd, gfp %#x\n", "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n", sizeof (*seg) + seg_size, gfp_mask); sizeof (*seg) + seg_size, gfp_mask); free_send_rmpp_list(send_wr); free_send_rmpp_list(send_wr); return -ENOMEM; return -ENOMEM; Loading @@ -862,6 +910,12 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, return 0; return 0; } } int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent) { return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); } EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, u32 remote_qpn, u16 pkey_index, u32 remote_qpn, u16 pkey_index, int rmpp_active, int rmpp_active, Loading @@ -878,9 +932,11 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, pad = get_pad_size(hdr_len, data_len); pad = get_pad_size(hdr_len, data_len); message_size = hdr_len + data_len + pad; message_size = hdr_len + data_len + pad; if ((!mad_agent->rmpp_version && if (ib_mad_kernel_rmpp_agent(mad_agent)) { (rmpp_active || message_size > sizeof(struct ib_mad))) || if (!rmpp_active && message_size > sizeof(struct ib_mad)) (!rmpp_active && message_size > sizeof(struct ib_mad))) return ERR_PTR(-EINVAL); } else if (rmpp_active || message_size > sizeof(struct ib_mad)) return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL); size = rmpp_active ? hdr_len : sizeof(struct ib_mad); size = rmpp_active ? hdr_len : sizeof(struct ib_mad); Loading Loading @@ -1135,7 +1191,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, &mad_agent_priv->send_list); &mad_agent_priv->send_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { ret = ib_send_rmpp_mad(mad_send_wr); ret = ib_send_rmpp_mad(mad_send_wr); if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) ret = ib_send_mad(mad_send_wr); ret = ib_send_mad(mad_send_wr); Loading Loading @@ -1199,7 +1255,8 @@ EXPORT_SYMBOL(ib_redirect_mad_qp); int ib_process_mad_wc(struct ib_mad_agent *mad_agent, int ib_process_mad_wc(struct ib_mad_agent *mad_agent, struct ib_wc *wc) struct ib_wc *wc) { { printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n"); dev_err(&mad_agent->device->dev, "ib_process_mad_wc() not implemented yet\n"); return 0; return 0; } } EXPORT_SYMBOL(ib_process_mad_wc); EXPORT_SYMBOL(ib_process_mad_wc); Loading @@ -1211,7 +1268,7 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method, for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { if ((*method)->agent[i]) { if ((*method)->agent[i]) { printk(KERN_ERR PFX "Method %d already in use\n", i); pr_err("Method %d already in use\n", i); return -EINVAL; return -EINVAL; } } } } Loading @@ -1223,8 +1280,7 @@ static int allocate_method_table(struct ib_mad_mgmt_method_table **method) /* Allocate management method table */ /* Allocate management method table */ *method = kzalloc(sizeof **method, GFP_ATOMIC); *method = kzalloc(sizeof **method, GFP_ATOMIC); if (!*method) { if (!*method) { printk(KERN_ERR PFX "No memory for " pr_err("No memory for ib_mad_mgmt_method_table\n"); "ib_mad_mgmt_method_table\n"); return -ENOMEM; return -ENOMEM; } } Loading Loading @@ -1319,8 +1375,8 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, /* Allocate management class table for "new" class version */ /* Allocate management class table for "new" class version */ *class = kzalloc(sizeof **class, GFP_ATOMIC); *class = kzalloc(sizeof **class, GFP_ATOMIC); if (!*class) { if (!*class) { printk(KERN_ERR PFX "No memory for " dev_err(&agent_priv->agent.device->dev, "ib_mad_mgmt_class_table\n"); "No memory for ib_mad_mgmt_class_table\n"); ret = -ENOMEM; ret = -ENOMEM; goto error1; goto error1; } } Loading Loading @@ -1386,8 +1442,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, /* Allocate mgmt vendor class table for "new" class version */ /* Allocate mgmt vendor class table for "new" class version */ vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); if (!vendor) { if (!vendor) { printk(KERN_ERR PFX "No memory for " dev_err(&agent_priv->agent.device->dev, "ib_mad_mgmt_vendor_class_table\n"); "No memory for ib_mad_mgmt_vendor_class_table\n"); goto error1; goto error1; } } Loading @@ -1397,8 +1453,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, /* Allocate table for this management vendor class */ /* Allocate table for this management vendor class */ vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); if (!vendor_class) { if (!vendor_class) { printk(KERN_ERR PFX "No memory for " dev_err(&agent_priv->agent.device->dev, "ib_mad_mgmt_vendor_class\n"); "No memory for ib_mad_mgmt_vendor_class\n"); goto error2; goto error2; } } Loading Loading @@ -1429,7 +1485,7 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, goto check_in_use; goto check_in_use; } } } } printk(KERN_ERR PFX "All OUI slots in use\n"); dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); goto error3; goto error3; check_in_use: check_in_use: Loading Loading @@ -1640,8 +1696,8 @@ find_mad_agent(struct ib_mad_port_private *port_priv, if (mad_agent->agent.recv_handler) if (mad_agent->agent.recv_handler) atomic_inc(&mad_agent->refcount); atomic_inc(&mad_agent->refcount); else { else { printk(KERN_NOTICE PFX "No receive handler for client " dev_notice(&port_priv->device->dev, "%p on port %d\n", "No receive handler for client %p on port %d\n", &mad_agent->agent, port_priv->port_num); &mad_agent->agent, port_priv->port_num); mad_agent = NULL; mad_agent = NULL; } } Loading @@ -1658,8 +1714,8 @@ static int validate_mad(struct ib_mad *mad, u32 qp_num) /* Make sure MAD base version is understood */ /* Make sure MAD base version is understood */ if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { printk(KERN_ERR PFX "MAD received with unsupported base " pr_err("MAD received with unsupported base version %d\n", "version %d\n", mad->mad_hdr.base_version); mad->mad_hdr.base_version); goto out; goto out; } } Loading @@ -1685,6 +1741,7 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; return !mad_agent_priv->agent.rmpp_version || return !mad_agent_priv->agent.rmpp_version || !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE) || IB_MGMT_RMPP_FLAG_ACTIVE) || (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); Loading Loading @@ -1812,7 +1869,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); if (mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc); mad_recv_wc); if (!mad_recv_wc) { if (!mad_recv_wc) { Loading @@ -1827,10 +1884,25 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); if (!mad_send_wr) { if (!mad_send_wr) { spin_unlock_irqrestore(&mad_agent_priv->lock, flags); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { /* user rmpp is in effect * and this is an active RMPP MAD */ mad_recv_wc->wc->wr_id = 0; mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_recv_wc); atomic_dec(&mad_agent_priv->refcount); } else { /* not user rmpp, revert to normal behavior and * drop the mad */ ib_free_recv_mad(mad_recv_wc); ib_free_recv_mad(mad_recv_wc); deref_mad_agent(mad_agent_priv); deref_mad_agent(mad_agent_priv); return; return; } } } else { ib_mark_mad_done(mad_send_wr); ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); Loading @@ -1844,6 +1916,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, mad_send_wc.vendor_err = 0; mad_send_wc.vendor_err = 0; mad_send_wc.send_buf = &mad_send_wr->send_buf; mad_send_wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); } } else { } else { mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_recv_wc); mad_recv_wc); Loading Loading @@ -1911,8 +1984,8 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); if (!response) { if (!response) { printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory " dev_err(&port_priv->device->dev, "for response buffer\n"); "ib_mad_recv_done_handler no memory for response buffer\n"); goto out; goto out; } } Loading Loading @@ -2083,7 +2156,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, mad_agent_priv = mad_send_wr->mad_agent_priv; mad_agent_priv = mad_send_wr->mad_agent_priv; spin_lock_irqsave(&mad_agent_priv->lock, flags); spin_lock_irqsave(&mad_agent_priv->lock, flags); if (mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); if (ret == IB_RMPP_RESULT_CONSUMED) if (ret == IB_RMPP_RESULT_CONSUMED) goto done; goto done; Loading Loading @@ -2176,7 +2249,8 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, &bad_send_wr); &bad_send_wr); if (ret) { if (ret) { printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); dev_err(&port_priv->device->dev, "ib_post_send failed: %d\n", ret); mad_send_wr = queued_send_wr; mad_send_wr = queued_send_wr; wc->status = IB_WC_LOC_QP_OP_ERR; wc->status = IB_WC_LOC_QP_OP_ERR; goto retry; goto retry; Loading Loading @@ -2248,8 +2322,9 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv, IB_QP_STATE | IB_QP_CUR_STATE); IB_QP_STATE | IB_QP_CUR_STATE); kfree(attr); kfree(attr); if (ret) if (ret) printk(KERN_ERR PFX "mad_error_handler - " dev_err(&port_priv->device->dev, "ib_modify_qp to RTS : %d\n", ret); "mad_error_handler - ib_modify_qp to RTS : %d\n", ret); else else mark_sends_for_retry(qp_info); mark_sends_for_retry(qp_info); } } Loading Loading @@ -2408,7 +2483,8 @@ static void local_completions(struct work_struct *work) if (local->mad_priv) { if (local->mad_priv) { recv_mad_agent = local->recv_mad_agent; recv_mad_agent = local->recv_mad_agent; if (!recv_mad_agent) { if (!recv_mad_agent) { printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); dev_err(&mad_agent_priv->agent.device->dev, "No receive MAD agent for local completion\n"); free_mad = 1; free_mad = 1; goto local_send_completion; goto local_send_completion; } } Loading Loading @@ -2476,7 +2552,7 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { ret = ib_retry_rmpp(mad_send_wr); ret = ib_retry_rmpp(mad_send_wr); switch (ret) { switch (ret) { case IB_RMPP_RESULT_UNHANDLED: case IB_RMPP_RESULT_UNHANDLED: Loading Loading @@ -2589,7 +2665,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, } else { } else { mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); if (!mad_priv) { if (!mad_priv) { printk(KERN_ERR PFX "No memory for receive buffer\n"); dev_err(&qp_info->port_priv->device->dev, "No memory for receive buffer\n"); ret = -ENOMEM; ret = -ENOMEM; break; break; } } Loading Loading @@ -2625,7 +2702,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, sizeof mad_priv->header, sizeof mad_priv->header, DMA_FROM_DEVICE); DMA_FROM_DEVICE); kmem_cache_free(ib_mad_cache, mad_priv); kmem_cache_free(ib_mad_cache, mad_priv); printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); dev_err(&qp_info->port_priv->device->dev, "ib_post_recv failed: %d\n", ret); break; break; } } } while (post); } while (post); Loading Loading @@ -2681,7 +2759,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) attr = kmalloc(sizeof *attr, GFP_KERNEL); attr = kmalloc(sizeof *attr, GFP_KERNEL); if (!attr) { if (!attr) { printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); dev_err(&port_priv->device->dev, "Couldn't kmalloc ib_qp_attr\n"); return -ENOMEM; return -ENOMEM; } } Loading @@ -2705,16 +2784,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) ret = ib_modify_qp(qp, attr, IB_QP_STATE | ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY); IB_QP_PKEY_INDEX | IB_QP_QKEY); if (ret) { if (ret) { printk(KERN_ERR PFX "Couldn't change QP%d state to " dev_err(&port_priv->device->dev, "INIT: %d\n", i, ret); "Couldn't change QP%d state to INIT: %d\n", i, ret); goto out; goto out; } } attr->qp_state = IB_QPS_RTR; attr->qp_state = IB_QPS_RTR; ret = ib_modify_qp(qp, attr, IB_QP_STATE); ret = ib_modify_qp(qp, attr, IB_QP_STATE); if (ret) { if (ret) { printk(KERN_ERR PFX "Couldn't change QP%d state to " dev_err(&port_priv->device->dev, "RTR: %d\n", i, ret); "Couldn't change QP%d state to RTR: %d\n", i, ret); goto out; goto out; } } Loading @@ -2722,16 +2803,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) attr->sq_psn = IB_MAD_SEND_Q_PSN; attr->sq_psn = IB_MAD_SEND_Q_PSN; ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); if (ret) { if (ret) { printk(KERN_ERR PFX "Couldn't change QP%d state to " dev_err(&port_priv->device->dev, "RTS: %d\n", i, ret); "Couldn't change QP%d state to RTS: %d\n", i, ret); goto out; goto out; } } } } ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); if (ret) { if (ret) { printk(KERN_ERR PFX "Failed to request completion " dev_err(&port_priv->device->dev, "notification: %d\n", ret); "Failed to request completion notification: %d\n", ret); goto out; goto out; } } Loading @@ -2741,7 +2824,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); if (ret) { if (ret) { printk(KERN_ERR PFX "Couldn't post receive WRs\n"); dev_err(&port_priv->device->dev, "Couldn't post receive WRs\n"); goto out; goto out; } } } } Loading @@ -2755,7 +2839,8 @@ static void qp_event_handler(struct ib_event *event, void *qp_context) struct ib_mad_qp_info *qp_info = qp_context; struct ib_mad_qp_info *qp_info = qp_context; /* It's worse than that! He's dead, Jim! */ /* It's worse than that! He's dead, Jim! */ printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n", dev_err(&qp_info->port_priv->device->dev, "Fatal error (%d) on MAD QP (%d)\n", event->event, qp_info->qp->qp_num); event->event, qp_info->qp->qp_num); } } Loading Loading @@ -2801,7 +2886,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info, qp_init_attr.event_handler = qp_event_handler; qp_init_attr.event_handler = qp_event_handler; qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); if (IS_ERR(qp_info->qp)) { if (IS_ERR(qp_info->qp)) { printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n", dev_err(&qp_info->port_priv->device->dev, "Couldn't create ib_mad QP%d\n", get_spl_qp_index(qp_type)); get_spl_qp_index(qp_type)); ret = PTR_ERR(qp_info->qp); ret = PTR_ERR(qp_info->qp); goto error; goto error; Loading Loading @@ -2840,7 +2926,7 @@ static int ib_mad_port_open(struct ib_device *device, /* Create new device info */ /* Create new device info */ port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); if (!port_priv) { if (!port_priv) { printk(KERN_ERR PFX "No memory for ib_mad_port_private\n"); dev_err(&device->dev, "No memory for ib_mad_port_private\n"); return -ENOMEM; return -ENOMEM; } } Loading @@ -2860,21 +2946,21 @@ static int ib_mad_port_open(struct ib_device *device, ib_mad_thread_completion_handler, ib_mad_thread_completion_handler, NULL, port_priv, cq_size, 0); NULL, port_priv, cq_size, 0); if (IS_ERR(port_priv->cq)) { if (IS_ERR(port_priv->cq)) { printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); ret = PTR_ERR(port_priv->cq); ret = PTR_ERR(port_priv->cq); goto error3; goto error3; } } port_priv->pd = ib_alloc_pd(device); port_priv->pd = ib_alloc_pd(device); if (IS_ERR(port_priv->pd)) { if (IS_ERR(port_priv->pd)) { printk(KERN_ERR PFX "Couldn't create ib_mad PD\n"); dev_err(&device->dev, "Couldn't create ib_mad PD\n"); ret = PTR_ERR(port_priv->pd); ret = PTR_ERR(port_priv->pd); goto error4; goto error4; } } port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(port_priv->mr)) { if (IS_ERR(port_priv->mr)) { printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n"); dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n"); ret = PTR_ERR(port_priv->mr); ret = PTR_ERR(port_priv->mr); goto error5; goto error5; } } Loading Loading @@ -2902,7 +2988,7 @@ static int ib_mad_port_open(struct ib_device *device, ret = ib_mad_port_start(port_priv); ret = ib_mad_port_start(port_priv); if (ret) { if (ret) { printk(KERN_ERR PFX "Couldn't start port\n"); dev_err(&device->dev, "Couldn't start port\n"); goto error9; goto error9; } } Loading Loading @@ -2946,7 +3032,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) port_priv = __ib_get_mad_port(device, port_num); port_priv = __ib_get_mad_port(device, port_num); if (port_priv == NULL) { if (port_priv == NULL) { spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); printk(KERN_ERR PFX "Port %d not found\n", port_num); dev_err(&device->dev, "Port %d not found\n", port_num); return -ENODEV; return -ENODEV; } } list_del_init(&port_priv->port_list); list_del_init(&port_priv->port_list); Loading Loading @@ -2984,14 +3070,12 @@ static void ib_mad_init_device(struct ib_device *device) for (i = start; i <= end; i++) { for (i = start; i <= end; i++) { if (ib_mad_port_open(device, i)) { if (ib_mad_port_open(device, i)) { printk(KERN_ERR PFX "Couldn't open %s port %d\n", dev_err(&device->dev, "Couldn't open port %d\n", i); device->name, i); goto error; goto error; } } if (ib_agent_port_open(device, i)) { if (ib_agent_port_open(device, i)) { printk(KERN_ERR PFX "Couldn't open %s port %d " dev_err(&device->dev, "for agents\n", "Couldn't open port %d for agents\n", i); device->name, i); goto error_agent; goto error_agent; } } } } Loading @@ -2999,20 +3083,17 @@ static void ib_mad_init_device(struct ib_device *device) error_agent: error_agent: if (ib_mad_port_close(device, i)) if (ib_mad_port_close(device, i)) printk(KERN_ERR PFX "Couldn't close %s port %d\n", dev_err(&device->dev, "Couldn't close port %d\n", i); device->name, i); error: error: i--; i--; while (i >= start) { while (i >= start) { if (ib_agent_port_close(device, i)) if (ib_agent_port_close(device, i)) printk(KERN_ERR PFX "Couldn't close %s port %d " dev_err(&device->dev, "for agents\n", "Couldn't close port %d for agents\n", i); device->name, i); if (ib_mad_port_close(device, i)) if (ib_mad_port_close(device, i)) printk(KERN_ERR PFX "Couldn't close %s port %d\n", dev_err(&device->dev, "Couldn't close port %d\n", i); device->name, i); i--; i--; } } } } Loading @@ -3033,12 +3114,12 @@ static void ib_mad_remove_device(struct ib_device *device) } } for (i = 0; i < num_ports; i++, cur_port++) { for (i = 0; i < num_ports; i++, cur_port++) { if (ib_agent_port_close(device, cur_port)) if (ib_agent_port_close(device, cur_port)) printk(KERN_ERR PFX "Couldn't close %s port %d " dev_err(&device->dev, "for agents\n", "Couldn't close port %d for agents\n", device->name, cur_port); cur_port); if (ib_mad_port_close(device, cur_port)) if (ib_mad_port_close(device, cur_port)) printk(KERN_ERR PFX "Couldn't close %s port %d\n", dev_err(&device->dev, "Couldn't close port %d\n", device->name, cur_port); cur_port); } } } } Loading @@ -3064,7 +3145,7 @@ static int __init ib_mad_init_module(void) SLAB_HWCACHE_ALIGN, SLAB_HWCACHE_ALIGN, NULL); NULL); if (!ib_mad_cache) { if (!ib_mad_cache) { printk(KERN_ERR PFX "Couldn't create ib_mad cache\n"); pr_err("Couldn't create ib_mad cache\n"); ret = -ENOMEM; ret = -ENOMEM; goto error1; goto error1; } } Loading @@ -3072,7 +3153,7 @@ static int __init ib_mad_init_module(void) INIT_LIST_HEAD(&ib_mad_port_list); INIT_LIST_HEAD(&ib_mad_port_list); if (ib_register_client(&mad_client)) { if (ib_register_client(&mad_client)) { printk(KERN_ERR PFX "Couldn't register ib_mad client\n"); pr_err("Couldn't register ib_mad client\n"); ret = -EINVAL; ret = -EINVAL; goto error2; goto error2; } } Loading Loading
Documentation/infiniband/user_mad.txt +9 −4 Original line number Original line Diff line number Diff line Loading @@ -26,6 +26,11 @@ Creating MAD agents ioctl. Also, all agents registered through a file descriptor will ioctl. Also, all agents registered through a file descriptor will be unregistered when the descriptor is closed. be unregistered when the descriptor is closed. 2014 -- a new registration ioctl is now provided which allows additional fields to be provided during registration. Users of this registration call are implicitly setting the use of pkey_index (see below). Receiving MADs Receiving MADs MADs are received using read(). The receive side now supports MADs are received using read(). The receive side now supports Loading Loading @@ -104,10 +109,10 @@ P_Key Index Handling The old ib_umad interface did not allow setting the P_Key index for The old ib_umad interface did not allow setting the P_Key index for MADs that are sent and did not provide a way for obtaining the P_Key MADs that are sent and did not provide a way for obtaining the P_Key index of received MADs. A new layout for struct ib_user_mad_hdr index of received MADs. A new layout for struct ib_user_mad_hdr with a pkey_index member has been defined; however, to preserve with a pkey_index member has been defined; however, to preserve binary binary compatibility with older applications, this new layout will compatibility with older applications, this new layout will not be used not be used unless the IB_USER_MAD_ENABLE_PKEY ioctl is called unless one of IB_USER_MAD_ENABLE_PKEY or IB_USER_MAD_REGISTER_AGENT2 ioctl's before a file descriptor is used for anything else. are called before a file descriptor is used for anything else. In September 2008, the IB_USER_MAD_ABI_VERSION will be incremented In September 2008, the IB_USER_MAD_ABI_VERSION will be incremented to 6, the new layout of struct ib_user_mad_hdr will be used by to 6, the new layout of struct ib_user_mad_hdr will be used by Loading
drivers/infiniband/core/agent.c +8 −8 Original line number Original line Diff line number Diff line Loading @@ -94,14 +94,14 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, port_priv = ib_get_agent_port(device, port_num); port_priv = ib_get_agent_port(device, port_num); if (!port_priv) { if (!port_priv) { printk(KERN_ERR SPFX "Unable to find port agent\n"); dev_err(&device->dev, "Unable to find port agent\n"); return; return; } } agent = port_priv->agent[qpn]; agent = port_priv->agent[qpn]; ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); if (IS_ERR(ah)) { if (IS_ERR(ah)) { printk(KERN_ERR SPFX "ib_create_ah_from_wc error %ld\n", dev_err(&device->dev, "ib_create_ah_from_wc error %ld\n", PTR_ERR(ah)); PTR_ERR(ah)); return; return; } } Loading @@ -110,7 +110,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_KERNEL); GFP_KERNEL); if (IS_ERR(send_buf)) { if (IS_ERR(send_buf)) { printk(KERN_ERR SPFX "ib_create_send_mad error\n"); dev_err(&device->dev, "ib_create_send_mad error\n"); goto err1; goto err1; } } Loading @@ -125,7 +125,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, } } if (ib_post_send_mad(send_buf, NULL)) { if (ib_post_send_mad(send_buf, NULL)) { printk(KERN_ERR SPFX "ib_post_send_mad error\n"); dev_err(&device->dev, "ib_post_send_mad error\n"); goto err2; goto err2; } } return; return; Loading @@ -151,7 +151,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) /* Create new device info */ /* Create new device info */ port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); if (!port_priv) { if (!port_priv) { printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n"); dev_err(&device->dev, "No memory for ib_agent_port_private\n"); ret = -ENOMEM; ret = -ENOMEM; goto error1; goto error1; } } Loading @@ -161,7 +161,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) port_priv->agent[0] = ib_register_mad_agent(device, port_num, port_priv->agent[0] = ib_register_mad_agent(device, port_num, IB_QPT_SMI, NULL, 0, IB_QPT_SMI, NULL, 0, &agent_send_handler, &agent_send_handler, NULL, NULL); NULL, NULL, 0); if (IS_ERR(port_priv->agent[0])) { if (IS_ERR(port_priv->agent[0])) { ret = PTR_ERR(port_priv->agent[0]); ret = PTR_ERR(port_priv->agent[0]); goto error2; goto error2; Loading @@ -172,7 +172,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) port_priv->agent[1] = ib_register_mad_agent(device, port_num, port_priv->agent[1] = ib_register_mad_agent(device, port_num, IB_QPT_GSI, NULL, 0, IB_QPT_GSI, NULL, 0, &agent_send_handler, &agent_send_handler, NULL, NULL); NULL, NULL, 0); if (IS_ERR(port_priv->agent[1])) { if (IS_ERR(port_priv->agent[1])) { ret = PTR_ERR(port_priv->agent[1]); ret = PTR_ERR(port_priv->agent[1]); goto error3; goto error3; Loading Loading @@ -202,7 +202,7 @@ int ib_agent_port_close(struct ib_device *device, int port_num) port_priv = __ib_get_agent_port(device, port_num); port_priv = __ib_get_agent_port(device, port_num); if (port_priv == NULL) { if (port_priv == NULL) { spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); printk(KERN_ERR SPFX "Port %d not found\n", port_num); dev_err(&device->dev, "Port %d not found\n", port_num); return -ENODEV; return -ENODEV; } } list_del(&port_priv->port_list); list_del(&port_priv->port_list); Loading
drivers/infiniband/core/cm.c +3 −2 Original line number Original line Diff line number Diff line Loading @@ -3753,7 +3753,7 @@ static void cm_add_one(struct ib_device *ib_device) struct cm_port *port; struct cm_port *port; struct ib_mad_reg_req reg_req = { struct ib_mad_reg_req reg_req = { .mgmt_class = IB_MGMT_CLASS_CM, .mgmt_class = IB_MGMT_CLASS_CM, .mgmt_class_version = IB_CM_CLASS_VERSION .mgmt_class_version = IB_CM_CLASS_VERSION, }; }; struct ib_port_modify port_modify = { struct ib_port_modify port_modify = { .set_port_cap_mask = IB_PORT_CM_SUP .set_port_cap_mask = IB_PORT_CM_SUP Loading Loading @@ -3801,7 +3801,8 @@ static void cm_add_one(struct ib_device *ib_device) 0, 0, cm_send_handler, cm_send_handler, cm_recv_handler, cm_recv_handler, port); port, 0); if (IS_ERR(port->mad_agent)) if (IS_ERR(port->mad_agent)) goto error2; goto error2; Loading
drivers/infiniband/core/iwcm.c +27 −0 Original line number Original line Diff line number Diff line Loading @@ -46,6 +46,7 @@ #include <linux/completion.h> #include <linux/completion.h> #include <linux/slab.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/module.h> #include <linux/sysctl.h> #include <rdma/iw_cm.h> #include <rdma/iw_cm.h> #include <rdma/ib_addr.h> #include <rdma/ib_addr.h> Loading @@ -65,6 +66,20 @@ struct iwcm_work { struct list_head free_list; struct list_head free_list; }; }; static unsigned int default_backlog = 256; static struct ctl_table_header *iwcm_ctl_table_hdr; static struct ctl_table iwcm_ctl_table[] = { { .procname = "default_backlog", .data = &default_backlog, .maxlen = sizeof(default_backlog), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; /* /* * The following services provide a mechanism for pre-allocating iwcm_work * The following services provide a mechanism for pre-allocating iwcm_work * elements. The design pre-allocates them based on the cm_id type: * elements. The design pre-allocates them based on the cm_id type: Loading Loading @@ -425,6 +440,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); if (!backlog) backlog = default_backlog; ret = alloc_work_entries(cm_id_priv, backlog); ret = alloc_work_entries(cm_id_priv, backlog); if (ret) if (ret) return ret; return ret; Loading Loading @@ -1030,11 +1048,20 @@ static int __init iw_cm_init(void) if (!iwcm_wq) if (!iwcm_wq) return -ENOMEM; return -ENOMEM; iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm", iwcm_ctl_table); if (!iwcm_ctl_table_hdr) { pr_err("iw_cm: couldn't register sysctl paths\n"); destroy_workqueue(iwcm_wq); return -ENOMEM; } return 0; return 0; } } static void __exit iw_cm_cleanup(void) static void __exit iw_cm_cleanup(void) { { unregister_net_sysctl_table(iwcm_ctl_table_hdr); destroy_workqueue(iwcm_wq); destroy_workqueue(iwcm_wq); } } Loading
drivers/infiniband/core/mad.c +182 −101 Original line number Original line Diff line number Diff line Loading @@ -33,6 +33,9 @@ * SOFTWARE. * SOFTWARE. * * */ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/dma-mapping.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/module.h> Loading Loading @@ -195,7 +198,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, u8 rmpp_version, u8 rmpp_version, ib_mad_send_handler send_handler, ib_mad_send_handler send_handler, ib_mad_recv_handler recv_handler, ib_mad_recv_handler recv_handler, void *context) void *context, u32 registration_flags) { { struct ib_mad_port_private *port_priv; struct ib_mad_port_private *port_priv; struct ib_mad_agent *ret = ERR_PTR(-EINVAL); struct ib_mad_agent *ret = ERR_PTR(-EINVAL); Loading @@ -211,68 +215,109 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, /* Validate parameters */ /* Validate parameters */ qpn = get_spl_qp_index(qp_type); qpn = get_spl_qp_index(qp_type); if (qpn == -1) if (qpn == -1) { dev_notice(&device->dev, "ib_register_mad_agent: invalid QP Type %d\n", qp_type); goto error1; goto error1; } if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { dev_notice(&device->dev, "ib_register_mad_agent: invalid RMPP Version %u\n", rmpp_version); goto error1; goto error1; } /* Validate MAD registration request if supplied */ /* Validate MAD registration request if supplied */ if (mad_reg_req) { if (mad_reg_req) { if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { dev_notice(&device->dev, "ib_register_mad_agent: invalid Class Version %u\n", mad_reg_req->mgmt_class_version); goto error1; goto error1; if (!recv_handler) } if (!recv_handler) { dev_notice(&device->dev, "ib_register_mad_agent: no recv_handler\n"); goto error1; goto error1; } if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { /* /* * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only * one in this range currently allowed * one in this range currently allowed */ */ if (mad_reg_req->mgmt_class != if (mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { dev_notice(&device->dev, "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", mad_reg_req->mgmt_class); goto error1; goto error1; } } else if (mad_reg_req->mgmt_class == 0) { } else if (mad_reg_req->mgmt_class == 0) { /* /* * Class 0 is reserved in IBA and is used for * Class 0 is reserved in IBA and is used for * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE */ */ dev_notice(&device->dev, "ib_register_mad_agent: Invalid Mgmt Class 0\n"); goto error1; goto error1; } else if (is_vendor_class(mad_reg_req->mgmt_class)) { } else if (is_vendor_class(mad_reg_req->mgmt_class)) { /* /* * If class is in "new" vendor range, * If class is in "new" vendor range, * ensure supplied OUI is not zero * ensure supplied OUI is not zero */ */ if (!is_vendor_oui(mad_reg_req->oui)) if (!is_vendor_oui(mad_reg_req->oui)) { dev_notice(&device->dev, "ib_register_mad_agent: No OUI specified for class 0x%x\n", mad_reg_req->mgmt_class); goto error1; goto error1; } } } /* Make sure class supplied is consistent with RMPP */ /* Make sure class supplied is consistent with RMPP */ if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { if (rmpp_version) if (rmpp_version) { dev_notice(&device->dev, "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", mad_reg_req->mgmt_class); goto error1; goto error1; } } } /* Make sure class supplied is consistent with QP type */ /* Make sure class supplied is consistent with QP type */ if (qp_type == IB_QPT_SMI) { if (qp_type == IB_QPT_SMI) { if ((mad_reg_req->mgmt_class != if ((mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) && IB_MGMT_CLASS_SUBN_LID_ROUTED) && (mad_reg_req->mgmt_class != (mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { dev_notice(&device->dev, "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", mad_reg_req->mgmt_class); goto error1; goto error1; } } else { } else { if ((mad_reg_req->mgmt_class == if ((mad_reg_req->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || IB_MGMT_CLASS_SUBN_LID_ROUTED) || (mad_reg_req->mgmt_class == (mad_reg_req->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { dev_notice(&device->dev, "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", mad_reg_req->mgmt_class); goto error1; goto error1; } } } } else { } else { /* No registration request supplied */ /* No registration request supplied */ if (!send_handler) if (!send_handler) goto error1; goto error1; if (registration_flags & IB_MAD_USER_RMPP) goto error1; } } /* Validate device and port */ /* Validate device and port */ port_priv = ib_get_mad_port(device, port_num); port_priv = ib_get_mad_port(device, port_num); if (!port_priv) { if (!port_priv) { dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n"); ret = ERR_PTR(-ENODEV); ret = ERR_PTR(-ENODEV); goto error1; goto error1; } } Loading @@ -280,6 +325,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, /* Verify the QP requested is supported. For example, Ethernet devices /* Verify the QP requested is supported. For example, Ethernet devices * will not have QP0 */ * will not have QP0 */ if (!port_priv->qp_info[qpn].qp) { if (!port_priv->qp_info[qpn].qp) { dev_notice(&device->dev, "ib_register_mad_agent: QP %d not supported\n", qpn); ret = ERR_PTR(-EPROTONOSUPPORT); ret = ERR_PTR(-EPROTONOSUPPORT); goto error1; goto error1; } } Loading Loading @@ -316,6 +363,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, mad_agent_priv->agent.context = context; mad_agent_priv->agent.context = context; mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; mad_agent_priv->agent.port_num = port_num; mad_agent_priv->agent.port_num = port_num; mad_agent_priv->agent.flags = registration_flags; spin_lock_init(&mad_agent_priv->lock); spin_lock_init(&mad_agent_priv->lock); INIT_LIST_HEAD(&mad_agent_priv->send_list); INIT_LIST_HEAD(&mad_agent_priv->send_list); INIT_LIST_HEAD(&mad_agent_priv->wait_list); INIT_LIST_HEAD(&mad_agent_priv->wait_list); Loading Loading @@ -706,7 +754,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, smi_handle_dr_smp_send(smp, device->node_type, port_num) == smi_handle_dr_smp_send(smp, device->node_type, port_num) == IB_SMI_DISCARD) { IB_SMI_DISCARD) { ret = -EINVAL; ret = -EINVAL; printk(KERN_ERR PFX "Invalid directed route\n"); dev_err(&device->dev, "Invalid directed route\n"); goto out; goto out; } } Loading @@ -718,7 +766,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, local = kmalloc(sizeof *local, GFP_ATOMIC); local = kmalloc(sizeof *local, GFP_ATOMIC); if (!local) { if (!local) { ret = -ENOMEM; ret = -ENOMEM; printk(KERN_ERR PFX "No memory for ib_mad_local_private\n"); dev_err(&device->dev, "No memory for ib_mad_local_private\n"); goto out; goto out; } } local->mad_priv = NULL; local->mad_priv = NULL; Loading @@ -726,7 +774,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); if (!mad_priv) { if (!mad_priv) { ret = -ENOMEM; ret = -ENOMEM; printk(KERN_ERR PFX "No memory for local response MAD\n"); dev_err(&device->dev, "No memory for local response MAD\n"); kfree(local); kfree(local); goto out; goto out; } } Loading Loading @@ -837,8 +885,8 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); if (!seg) { if (!seg) { printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem " dev_err(&send_buf->mad_agent->device->dev, "alloc failed for len %zd, gfp %#x\n", "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n", sizeof (*seg) + seg_size, gfp_mask); sizeof (*seg) + seg_size, gfp_mask); free_send_rmpp_list(send_wr); free_send_rmpp_list(send_wr); return -ENOMEM; return -ENOMEM; Loading @@ -862,6 +910,12 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, return 0; return 0; } } int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent) { return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); } EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, u32 remote_qpn, u16 pkey_index, u32 remote_qpn, u16 pkey_index, int rmpp_active, int rmpp_active, Loading @@ -878,9 +932,11 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, pad = get_pad_size(hdr_len, data_len); pad = get_pad_size(hdr_len, data_len); message_size = hdr_len + data_len + pad; message_size = hdr_len + data_len + pad; if ((!mad_agent->rmpp_version && if (ib_mad_kernel_rmpp_agent(mad_agent)) { (rmpp_active || message_size > sizeof(struct ib_mad))) || if (!rmpp_active && message_size > sizeof(struct ib_mad)) (!rmpp_active && message_size > sizeof(struct ib_mad))) return ERR_PTR(-EINVAL); } else if (rmpp_active || message_size > sizeof(struct ib_mad)) return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL); size = rmpp_active ? hdr_len : sizeof(struct ib_mad); size = rmpp_active ? hdr_len : sizeof(struct ib_mad); Loading Loading @@ -1135,7 +1191,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, &mad_agent_priv->send_list); &mad_agent_priv->send_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { ret = ib_send_rmpp_mad(mad_send_wr); ret = ib_send_rmpp_mad(mad_send_wr); if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) ret = ib_send_mad(mad_send_wr); ret = ib_send_mad(mad_send_wr); Loading Loading @@ -1199,7 +1255,8 @@ EXPORT_SYMBOL(ib_redirect_mad_qp); int ib_process_mad_wc(struct ib_mad_agent *mad_agent, int ib_process_mad_wc(struct ib_mad_agent *mad_agent, struct ib_wc *wc) struct ib_wc *wc) { { printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n"); dev_err(&mad_agent->device->dev, "ib_process_mad_wc() not implemented yet\n"); return 0; return 0; } } EXPORT_SYMBOL(ib_process_mad_wc); EXPORT_SYMBOL(ib_process_mad_wc); Loading @@ -1211,7 +1268,7 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method, for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { if ((*method)->agent[i]) { if ((*method)->agent[i]) { printk(KERN_ERR PFX "Method %d already in use\n", i); pr_err("Method %d already in use\n", i); return -EINVAL; return -EINVAL; } } } } Loading @@ -1223,8 +1280,7 @@ static int allocate_method_table(struct ib_mad_mgmt_method_table **method) /* Allocate management method table */ /* Allocate management method table */ *method = kzalloc(sizeof **method, GFP_ATOMIC); *method = kzalloc(sizeof **method, GFP_ATOMIC); if (!*method) { if (!*method) { printk(KERN_ERR PFX "No memory for " pr_err("No memory for ib_mad_mgmt_method_table\n"); "ib_mad_mgmt_method_table\n"); return -ENOMEM; return -ENOMEM; } } Loading Loading @@ -1319,8 +1375,8 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, /* Allocate management class table for "new" class version */ /* Allocate management class table for "new" class version */ *class = kzalloc(sizeof **class, GFP_ATOMIC); *class = kzalloc(sizeof **class, GFP_ATOMIC); if (!*class) { if (!*class) { printk(KERN_ERR PFX "No memory for " dev_err(&agent_priv->agent.device->dev, "ib_mad_mgmt_class_table\n"); "No memory for ib_mad_mgmt_class_table\n"); ret = -ENOMEM; ret = -ENOMEM; goto error1; goto error1; } } Loading Loading @@ -1386,8 +1442,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, /* Allocate mgmt vendor class table for "new" class version */ /* Allocate mgmt vendor class table for "new" class version */ vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); if (!vendor) { if (!vendor) { printk(KERN_ERR PFX "No memory for " dev_err(&agent_priv->agent.device->dev, "ib_mad_mgmt_vendor_class_table\n"); "No memory for ib_mad_mgmt_vendor_class_table\n"); goto error1; goto error1; } } Loading @@ -1397,8 +1453,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, /* Allocate table for this management vendor class */ /* Allocate table for this management vendor class */ vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); if (!vendor_class) { if (!vendor_class) { printk(KERN_ERR PFX "No memory for " dev_err(&agent_priv->agent.device->dev, "ib_mad_mgmt_vendor_class\n"); "No memory for ib_mad_mgmt_vendor_class\n"); goto error2; goto error2; } } Loading Loading @@ -1429,7 +1485,7 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, goto check_in_use; goto check_in_use; } } } } printk(KERN_ERR PFX "All OUI slots in use\n"); dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); goto error3; goto error3; check_in_use: check_in_use: Loading Loading @@ -1640,8 +1696,8 @@ find_mad_agent(struct ib_mad_port_private *port_priv, if (mad_agent->agent.recv_handler) if (mad_agent->agent.recv_handler) atomic_inc(&mad_agent->refcount); atomic_inc(&mad_agent->refcount); else { else { printk(KERN_NOTICE PFX "No receive handler for client " dev_notice(&port_priv->device->dev, "%p on port %d\n", "No receive handler for client %p on port %d\n", &mad_agent->agent, port_priv->port_num); &mad_agent->agent, port_priv->port_num); mad_agent = NULL; mad_agent = NULL; } } Loading @@ -1658,8 +1714,8 @@ static int validate_mad(struct ib_mad *mad, u32 qp_num) /* Make sure MAD base version is understood */ /* Make sure MAD base version is understood */ if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { printk(KERN_ERR PFX "MAD received with unsupported base " pr_err("MAD received with unsupported base version %d\n", "version %d\n", mad->mad_hdr.base_version); mad->mad_hdr.base_version); goto out; goto out; } } Loading @@ -1685,6 +1741,7 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; return !mad_agent_priv->agent.rmpp_version || return !mad_agent_priv->agent.rmpp_version || !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE) || IB_MGMT_RMPP_FLAG_ACTIVE) || (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); Loading Loading @@ -1812,7 +1869,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); if (mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc); mad_recv_wc); if (!mad_recv_wc) { if (!mad_recv_wc) { Loading @@ -1827,10 +1884,25 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); if (!mad_send_wr) { if (!mad_send_wr) { spin_unlock_irqrestore(&mad_agent_priv->lock, flags); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { /* user rmpp is in effect * and this is an active RMPP MAD */ mad_recv_wc->wc->wr_id = 0; mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_recv_wc); atomic_dec(&mad_agent_priv->refcount); } else { /* not user rmpp, revert to normal behavior and * drop the mad */ ib_free_recv_mad(mad_recv_wc); ib_free_recv_mad(mad_recv_wc); deref_mad_agent(mad_agent_priv); deref_mad_agent(mad_agent_priv); return; return; } } } else { ib_mark_mad_done(mad_send_wr); ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); Loading @@ -1844,6 +1916,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, mad_send_wc.vendor_err = 0; mad_send_wc.vendor_err = 0; mad_send_wc.send_buf = &mad_send_wr->send_buf; mad_send_wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); } } else { } else { mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_recv_wc); mad_recv_wc); Loading Loading @@ -1911,8 +1984,8 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); if (!response) { if (!response) { printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory " dev_err(&port_priv->device->dev, "for response buffer\n"); "ib_mad_recv_done_handler no memory for response buffer\n"); goto out; goto out; } } Loading Loading @@ -2083,7 +2156,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, mad_agent_priv = mad_send_wr->mad_agent_priv; mad_agent_priv = mad_send_wr->mad_agent_priv; spin_lock_irqsave(&mad_agent_priv->lock, flags); spin_lock_irqsave(&mad_agent_priv->lock, flags); if (mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); if (ret == IB_RMPP_RESULT_CONSUMED) if (ret == IB_RMPP_RESULT_CONSUMED) goto done; goto done; Loading Loading @@ -2176,7 +2249,8 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, &bad_send_wr); &bad_send_wr); if (ret) { if (ret) { printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); dev_err(&port_priv->device->dev, "ib_post_send failed: %d\n", ret); mad_send_wr = queued_send_wr; mad_send_wr = queued_send_wr; wc->status = IB_WC_LOC_QP_OP_ERR; wc->status = IB_WC_LOC_QP_OP_ERR; goto retry; goto retry; Loading Loading @@ -2248,8 +2322,9 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv, IB_QP_STATE | IB_QP_CUR_STATE); IB_QP_STATE | IB_QP_CUR_STATE); kfree(attr); kfree(attr); if (ret) if (ret) printk(KERN_ERR PFX "mad_error_handler - " dev_err(&port_priv->device->dev, "ib_modify_qp to RTS : %d\n", ret); "mad_error_handler - ib_modify_qp to RTS : %d\n", ret); else else mark_sends_for_retry(qp_info); mark_sends_for_retry(qp_info); } } Loading Loading @@ -2408,7 +2483,8 @@ static void local_completions(struct work_struct *work) if (local->mad_priv) { if (local->mad_priv) { recv_mad_agent = local->recv_mad_agent; recv_mad_agent = local->recv_mad_agent; if (!recv_mad_agent) { if (!recv_mad_agent) { printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); dev_err(&mad_agent_priv->agent.device->dev, "No receive MAD agent for local completion\n"); free_mad = 1; free_mad = 1; goto local_send_completion; goto local_send_completion; } } Loading Loading @@ -2476,7 +2552,7 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { ret = ib_retry_rmpp(mad_send_wr); ret = ib_retry_rmpp(mad_send_wr); switch (ret) { switch (ret) { case IB_RMPP_RESULT_UNHANDLED: case IB_RMPP_RESULT_UNHANDLED: Loading Loading @@ -2589,7 +2665,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, } else { } else { mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); if (!mad_priv) { if (!mad_priv) { printk(KERN_ERR PFX "No memory for receive buffer\n"); dev_err(&qp_info->port_priv->device->dev, "No memory for receive buffer\n"); ret = -ENOMEM; ret = -ENOMEM; break; break; } } Loading Loading @@ -2625,7 +2702,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, sizeof mad_priv->header, sizeof mad_priv->header, DMA_FROM_DEVICE); DMA_FROM_DEVICE); kmem_cache_free(ib_mad_cache, mad_priv); kmem_cache_free(ib_mad_cache, mad_priv); printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); dev_err(&qp_info->port_priv->device->dev, "ib_post_recv failed: %d\n", ret); break; break; } } } while (post); } while (post); Loading Loading @@ -2681,7 +2759,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) attr = kmalloc(sizeof *attr, GFP_KERNEL); attr = kmalloc(sizeof *attr, GFP_KERNEL); if (!attr) { if (!attr) { printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); dev_err(&port_priv->device->dev, "Couldn't kmalloc ib_qp_attr\n"); return -ENOMEM; return -ENOMEM; } } Loading @@ -2705,16 +2784,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) ret = ib_modify_qp(qp, attr, IB_QP_STATE | ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY); IB_QP_PKEY_INDEX | IB_QP_QKEY); if (ret) { if (ret) { printk(KERN_ERR PFX "Couldn't change QP%d state to " dev_err(&port_priv->device->dev, "INIT: %d\n", i, ret); "Couldn't change QP%d state to INIT: %d\n", i, ret); goto out; goto out; } } attr->qp_state = IB_QPS_RTR; attr->qp_state = IB_QPS_RTR; ret = ib_modify_qp(qp, attr, IB_QP_STATE); ret = ib_modify_qp(qp, attr, IB_QP_STATE); if (ret) { if (ret) { printk(KERN_ERR PFX "Couldn't change QP%d state to " dev_err(&port_priv->device->dev, "RTR: %d\n", i, ret); "Couldn't change QP%d state to RTR: %d\n", i, ret); goto out; goto out; } } Loading @@ -2722,16 +2803,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) attr->sq_psn = IB_MAD_SEND_Q_PSN; attr->sq_psn = IB_MAD_SEND_Q_PSN; ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); if (ret) { if (ret) { printk(KERN_ERR PFX "Couldn't change QP%d state to " dev_err(&port_priv->device->dev, "RTS: %d\n", i, ret); "Couldn't change QP%d state to RTS: %d\n", i, ret); goto out; goto out; } } } } ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); if (ret) { if (ret) { printk(KERN_ERR PFX "Failed to request completion " dev_err(&port_priv->device->dev, "notification: %d\n", ret); "Failed to request completion notification: %d\n", ret); goto out; goto out; } } Loading @@ -2741,7 +2824,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); if (ret) { if (ret) { printk(KERN_ERR PFX "Couldn't post receive WRs\n"); dev_err(&port_priv->device->dev, "Couldn't post receive WRs\n"); goto out; goto out; } } } } Loading @@ -2755,7 +2839,8 @@ static void qp_event_handler(struct ib_event *event, void *qp_context) struct ib_mad_qp_info *qp_info = qp_context; struct ib_mad_qp_info *qp_info = qp_context; /* It's worse than that! He's dead, Jim! */ /* It's worse than that! He's dead, Jim! */ printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n", dev_err(&qp_info->port_priv->device->dev, "Fatal error (%d) on MAD QP (%d)\n", event->event, qp_info->qp->qp_num); event->event, qp_info->qp->qp_num); } } Loading Loading @@ -2801,7 +2886,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info, qp_init_attr.event_handler = qp_event_handler; qp_init_attr.event_handler = qp_event_handler; qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); if (IS_ERR(qp_info->qp)) { if (IS_ERR(qp_info->qp)) { printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n", dev_err(&qp_info->port_priv->device->dev, "Couldn't create ib_mad QP%d\n", get_spl_qp_index(qp_type)); get_spl_qp_index(qp_type)); ret = PTR_ERR(qp_info->qp); ret = PTR_ERR(qp_info->qp); goto error; goto error; Loading Loading @@ -2840,7 +2926,7 @@ static int ib_mad_port_open(struct ib_device *device, /* Create new device info */ /* Create new device info */ port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); if (!port_priv) { if (!port_priv) { printk(KERN_ERR PFX "No memory for ib_mad_port_private\n"); dev_err(&device->dev, "No memory for ib_mad_port_private\n"); return -ENOMEM; return -ENOMEM; } } Loading @@ -2860,21 +2946,21 @@ static int ib_mad_port_open(struct ib_device *device, ib_mad_thread_completion_handler, ib_mad_thread_completion_handler, NULL, port_priv, cq_size, 0); NULL, port_priv, cq_size, 0); if (IS_ERR(port_priv->cq)) { if (IS_ERR(port_priv->cq)) { printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); ret = PTR_ERR(port_priv->cq); ret = PTR_ERR(port_priv->cq); goto error3; goto error3; } } port_priv->pd = ib_alloc_pd(device); port_priv->pd = ib_alloc_pd(device); if (IS_ERR(port_priv->pd)) { if (IS_ERR(port_priv->pd)) { printk(KERN_ERR PFX "Couldn't create ib_mad PD\n"); dev_err(&device->dev, "Couldn't create ib_mad PD\n"); ret = PTR_ERR(port_priv->pd); ret = PTR_ERR(port_priv->pd); goto error4; goto error4; } } port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(port_priv->mr)) { if (IS_ERR(port_priv->mr)) { printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n"); dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n"); ret = PTR_ERR(port_priv->mr); ret = PTR_ERR(port_priv->mr); goto error5; goto error5; } } Loading Loading @@ -2902,7 +2988,7 @@ static int ib_mad_port_open(struct ib_device *device, ret = ib_mad_port_start(port_priv); ret = ib_mad_port_start(port_priv); if (ret) { if (ret) { printk(KERN_ERR PFX "Couldn't start port\n"); dev_err(&device->dev, "Couldn't start port\n"); goto error9; goto error9; } } Loading Loading @@ -2946,7 +3032,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) port_priv = __ib_get_mad_port(device, port_num); port_priv = __ib_get_mad_port(device, port_num); if (port_priv == NULL) { if (port_priv == NULL) { spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); printk(KERN_ERR PFX "Port %d not found\n", port_num); dev_err(&device->dev, "Port %d not found\n", port_num); return -ENODEV; return -ENODEV; } } list_del_init(&port_priv->port_list); list_del_init(&port_priv->port_list); Loading Loading @@ -2984,14 +3070,12 @@ static void ib_mad_init_device(struct ib_device *device) for (i = start; i <= end; i++) { for (i = start; i <= end; i++) { if (ib_mad_port_open(device, i)) { if (ib_mad_port_open(device, i)) { printk(KERN_ERR PFX "Couldn't open %s port %d\n", dev_err(&device->dev, "Couldn't open port %d\n", i); device->name, i); goto error; goto error; } } if (ib_agent_port_open(device, i)) { if (ib_agent_port_open(device, i)) { printk(KERN_ERR PFX "Couldn't open %s port %d " dev_err(&device->dev, "for agents\n", "Couldn't open port %d for agents\n", i); device->name, i); goto error_agent; goto error_agent; } } } } Loading @@ -2999,20 +3083,17 @@ static void ib_mad_init_device(struct ib_device *device) error_agent: error_agent: if (ib_mad_port_close(device, i)) if (ib_mad_port_close(device, i)) printk(KERN_ERR PFX "Couldn't close %s port %d\n", dev_err(&device->dev, "Couldn't close port %d\n", i); device->name, i); error: error: i--; i--; while (i >= start) { while (i >= start) { if (ib_agent_port_close(device, i)) if (ib_agent_port_close(device, i)) printk(KERN_ERR PFX "Couldn't close %s port %d " dev_err(&device->dev, "for agents\n", "Couldn't close port %d for agents\n", i); device->name, i); if (ib_mad_port_close(device, i)) if (ib_mad_port_close(device, i)) printk(KERN_ERR PFX "Couldn't close %s port %d\n", dev_err(&device->dev, "Couldn't close port %d\n", i); device->name, i); i--; i--; } } } } Loading @@ -3033,12 +3114,12 @@ static void ib_mad_remove_device(struct ib_device *device) } } for (i = 0; i < num_ports; i++, cur_port++) { for (i = 0; i < num_ports; i++, cur_port++) { if (ib_agent_port_close(device, cur_port)) if (ib_agent_port_close(device, cur_port)) printk(KERN_ERR PFX "Couldn't close %s port %d " dev_err(&device->dev, "for agents\n", "Couldn't close port %d for agents\n", device->name, cur_port); cur_port); if (ib_mad_port_close(device, cur_port)) if (ib_mad_port_close(device, cur_port)) printk(KERN_ERR PFX "Couldn't close %s port %d\n", dev_err(&device->dev, "Couldn't close port %d\n", device->name, cur_port); cur_port); } } } } Loading @@ -3064,7 +3145,7 @@ static int __init ib_mad_init_module(void) SLAB_HWCACHE_ALIGN, SLAB_HWCACHE_ALIGN, NULL); NULL); if (!ib_mad_cache) { if (!ib_mad_cache) { printk(KERN_ERR PFX "Couldn't create ib_mad cache\n"); pr_err("Couldn't create ib_mad cache\n"); ret = -ENOMEM; ret = -ENOMEM; goto error1; goto error1; } } Loading @@ -3072,7 +3153,7 @@ static int __init ib_mad_init_module(void) INIT_LIST_HEAD(&ib_mad_port_list); INIT_LIST_HEAD(&ib_mad_port_list); if (ib_register_client(&mad_client)) { if (ib_register_client(&mad_client)) { printk(KERN_ERR PFX "Couldn't register ib_mad client\n"); pr_err("Couldn't register ib_mad client\n"); ret = -EINVAL; ret = -EINVAL; goto error2; goto error2; } } Loading