ubuntu-linux-kernel/drivers/infiniband/hw/cxgb4/provider.c

664 lines
19 KiB
C

/*
* Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/inetdevice.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
#include "iw_cxgb4.h"
static int fastreg_support = 1;
module_param(fastreg_support, int, 0644);
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr,
struct ib_udata *udata)
{
return ERR_PTR(-ENOSYS);
}
static int c4iw_ah_destroy(struct ib_ah *ah)
{
return -ENOSYS;
}
static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
return -ENOSYS;
}
static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
return -ENOSYS;
}
static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
u8 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
const struct ib_mad_hdr *in_mad,
size_t in_mad_size,
struct ib_mad_hdr *out_mad,
size_t *out_mad_size,
u16 *out_mad_pkey_index)
{
return -ENOSYS;
}
void _c4iw_free_ucontext(struct kref *kref)
{
struct c4iw_ucontext *ucontext;
struct c4iw_dev *rhp;
struct c4iw_mm_entry *mm, *tmp;
ucontext = container_of(kref, struct c4iw_ucontext, kref);
rhp = to_c4iw_dev(ucontext->ibucontext.device);
pr_debug("ucontext %p\n", ucontext);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
kfree(ucontext);
}
static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
{
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
pr_debug("context %p\n", context);
c4iw_put_ucontext(ucontext);
return 0;
}
static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct c4iw_ucontext *context;
struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
struct c4iw_alloc_ucontext_resp uresp;
int ret = 0;
struct c4iw_mm_entry *mm = NULL;
pr_debug("ibdev %p\n", ibdev);
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) {
ret = -ENOMEM;
goto err;
}
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
kref_init(&context->kref);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
} else {
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
if (!mm) {
ret = -ENOMEM;
goto err_free;
}
uresp.status_page_size = PAGE_SIZE;
spin_lock(&context->mmap_lock);
uresp.status_page_key = context->key;
context->key += PAGE_SIZE;
spin_unlock(&context->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp,
sizeof(uresp) - sizeof(uresp.reserved));
if (ret)
goto err_mm;
mm->key = uresp.status_page_key;
mm->addr = virt_to_phys(rhp->rdev.status_page);
mm->len = PAGE_SIZE;
insert_mmap(context, mm);
}
return &context->ibucontext;
err_mm:
kfree(mm);
err_free:
kfree(context);
err:
return ERR_PTR(ret);
}
static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
int len = vma->vm_end - vma->vm_start;
u32 key = vma->vm_pgoff << PAGE_SHIFT;
struct c4iw_rdev *rdev;
int ret = 0;
struct c4iw_mm_entry *mm;
struct c4iw_ucontext *ucontext;
u64 addr;
pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff,
key, len);
if (vma->vm_start & (PAGE_SIZE-1))
return -EINVAL;
rdev = &(to_c4iw_dev(context->device)->rdev);
ucontext = to_c4iw_ucontext(context);
mm = remove_mmap(ucontext, key, len);
if (!mm)
return -EINVAL;
addr = mm->addr;
kfree(mm);
if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) &&
(addr < (pci_resource_start(rdev->lldi.pdev, 0) +
pci_resource_len(rdev->lldi.pdev, 0)))) {
/*
* MA_SYNC register...
*/
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
} else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
(addr < (pci_resource_start(rdev->lldi.pdev, 2) +
pci_resource_len(rdev->lldi.pdev, 2)))) {
/*
* Map user DB or OCQP memory...
*/
if (addr >= rdev->oc_mw_pa)
vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
else {
if (!is_t4(rdev->lldi.adapter_type))
vma->vm_page_prot =
t4_pgprot_wc(vma->vm_page_prot);
else
vma->vm_page_prot =
pgprot_noncached(vma->vm_page_prot);
}
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
} else {
/*
* Map WQ or CQ contig dma memory...
*/
ret = remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
}
return ret;
}
static int c4iw_deallocate_pd(struct ib_pd *pd)
{
struct c4iw_dev *rhp;
struct c4iw_pd *php;
php = to_c4iw_pd(pd);
rhp = php->rhp;
pr_debug("ibpd %p pdid 0x%x\n", pd, php->pdid);
c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
mutex_lock(&rhp->rdev.stats.lock);
rhp->rdev.stats.pd.cur--;
mutex_unlock(&rhp->rdev.stats.lock);
kfree(php);
return 0;
}
static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct c4iw_pd *php;
u32 pdid;
struct c4iw_dev *rhp;
pr_debug("ibdev %p\n", ibdev);
rhp = (struct c4iw_dev *) ibdev;
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
if (!pdid)
return ERR_PTR(-EINVAL);
php = kzalloc(sizeof(*php), GFP_KERNEL);
if (!php) {
c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
return ERR_PTR(-ENOMEM);
}
php->pdid = pdid;
php->rhp = rhp;
if (context) {
if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
c4iw_deallocate_pd(&php->ibpd);
return ERR_PTR(-EFAULT);
}
}
mutex_lock(&rhp->rdev.stats.lock);
rhp->rdev.stats.pd.cur++;
if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
mutex_unlock(&rhp->rdev.stats.lock);
pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php);
return &php->ibpd;
}
static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
pr_debug("ibdev %p\n", ibdev);
*pkey = 0;
return 0;
}
static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid)
{
struct c4iw_dev *dev;
pr_debug("ibdev %p, port %d, index %d, gid %p\n",
ibdev, port, index, gid);
if (!port)
return -EINVAL;
dev = to_c4iw_dev(ibdev);
memset(&(gid->raw[0]), 0, sizeof(gid->raw));
memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
return 0;
}
static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
struct c4iw_dev *dev;
pr_debug("ibdev %p\n", ibdev);
if (uhw->inlen || uhw->outlen)
return -EINVAL;
dev = to_c4iw_dev(ibdev);
memset(props, 0, sizeof *props);
memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
props->fw_ver = dev->rdev.lldi.fw_vers;
props->device_cap_flags = dev->device_cap_flags;
props->page_size_cap = T4_PAGESIZE_MASK;
props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
props->max_mr_size = T4_MAX_MR_SIZE;
props->max_qp = dev->rdev.lldi.vr->qp.size / 2;
props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
props->max_sge = T4_MAX_RECV_SGE;
props->max_sge_rd = 1;
props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter;
props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp,
c4iw_max_read_depth);
props->max_qp_init_rd_atom = props->max_qp_rd_atom;
props->max_cq = dev->rdev.lldi.vr->qp.size;
props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
props->max_mr = c4iw_num_stags(&dev->rdev);
props->max_pd = T4_MAX_NUM_PD;
props->local_ca_ack_delay = 0;
props->max_fast_reg_page_list_len =
t4_max_fr_depth(dev->rdev.lldi.ulptx_memwrite_dsgl && use_dsgl);
return 0;
}
static int c4iw_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
struct c4iw_dev *dev;
struct net_device *netdev;
struct in_device *inetdev;
pr_debug("ibdev %p\n", ibdev);
dev = to_c4iw_dev(ibdev);
netdev = dev->rdev.lldi.ports[port-1];
/* props being zeroed by the caller, avoid zeroing it here */
props->max_mtu = IB_MTU_4096;
props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
if (!netif_carrier_ok(netdev))
props->state = IB_PORT_DOWN;
else {
inetdev = in_dev_get(netdev);
if (inetdev) {
if (inetdev->ifa_list)
props->state = IB_PORT_ACTIVE;
else
props->state = IB_PORT_INIT;
in_dev_put(inetdev);
} else
props->state = IB_PORT_INIT;
}
props->port_cap_flags =
IB_PORT_CM_SUP |
IB_PORT_SNMP_TUNNEL_SUP |
IB_PORT_REINIT_SUP |
IB_PORT_DEVICE_MGMT_SUP |
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
props->gid_tbl_len = 1;
props->pkey_tbl_len = 1;
props->active_width = 2;
props->active_speed = IB_SPEED_DDR;
props->max_msg_sz = -1;
return 0;
}
static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
pr_debug("dev 0x%p\n", dev);
return sprintf(buf, "%d\n",
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
}
static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
struct ethtool_drvinfo info;
struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
pr_debug("dev 0x%p\n", dev);
lldev->ethtool_ops->get_drvinfo(lldev, &info);
return sprintf(buf, "%s\n", info.driver);
}
static ssize_t show_board(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
pr_debug("dev 0x%p\n", dev);
return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
c4iw_dev->rdev.lldi.pdev->device);
}
enum counters {
IP4INSEGS,
IP4OUTSEGS,
IP4RETRANSSEGS,
IP4OUTRSTS,
IP6INSEGS,
IP6OUTSEGS,
IP6RETRANSSEGS,
IP6OUTRSTS,
NR_COUNTERS
};
static const char * const names[] = {
[IP4INSEGS] = "ip4InSegs",
[IP4OUTSEGS] = "ip4OutSegs",
[IP4RETRANSSEGS] = "ip4RetransSegs",
[IP4OUTRSTS] = "ip4OutRsts",
[IP6INSEGS] = "ip6InSegs",
[IP6OUTSEGS] = "ip6OutSegs",
[IP6RETRANSSEGS] = "ip6RetransSegs",
[IP6OUTRSTS] = "ip6OutRsts"
};
static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev,
u8 port_num)
{
BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
if (port_num != 0)
return NULL;
return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
static int c4iw_get_mib(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u8 port, int index)
{
struct tp_tcp_stats v4, v6;
struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
stats->value[IP4INSEGS] = v4.tcp_in_segs;
stats->value[IP4OUTSEGS] = v4.tcp_out_segs;
stats->value[IP4RETRANSSEGS] = v4.tcp_retrans_segs;
stats->value[IP4OUTRSTS] = v4.tcp_out_rsts;
stats->value[IP6INSEGS] = v6.tcp_in_segs;
stats->value[IP6OUTSEGS] = v6.tcp_out_segs;
stats->value[IP6RETRANSSEGS] = v6.tcp_retrans_segs;
stats->value[IP6OUTRSTS] = v6.tcp_out_rsts;
return stats->num_counters;
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static struct device_attribute *c4iw_class_attributes[] = {
&dev_attr_hw_rev,
&dev_attr_hca_type,
&dev_attr_board_id,
};
static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
int err;
immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
err = ib_query_port(ibdev, port_num, &attr);
if (err)
return err;
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
return 0;
}
static void get_dev_fw_str(struct ib_device *dev, char *str)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev);
pr_debug("dev 0x%p\n", dev);
snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
}
void c4iw_register_device(struct work_struct *work)
{
int ret;
int i;
struct uld_ctx *ctx = container_of(work, struct uld_ctx, reg_work);
struct c4iw_dev *dev = ctx->dev;
pr_debug("c4iw_dev %p\n", dev);
strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
dev->ibdev.owner = THIS_MODULE;
dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
if (fastreg_support)
dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
dev->ibdev.local_dma_lkey = 0;
dev->ibdev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_REG_MR) |
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
(1ull << IB_USER_VERBS_CMD_QUERY_QP) |
(1ull << IB_USER_VERBS_CMD_POLL_CQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
(1ull << IB_USER_VERBS_CMD_POST_RECV);
dev->ibdev.node_type = RDMA_NODE_RNIC;
BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq;
dev->ibdev.dev.parent = &dev->rdev.lldi.pdev->dev;
dev->ibdev.query_device = c4iw_query_device;
dev->ibdev.query_port = c4iw_query_port;
dev->ibdev.query_pkey = c4iw_query_pkey;
dev->ibdev.query_gid = c4iw_query_gid;
dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
dev->ibdev.dealloc_ucontext = c4iw_dealloc_ucontext;
dev->ibdev.mmap = c4iw_mmap;
dev->ibdev.alloc_pd = c4iw_allocate_pd;
dev->ibdev.dealloc_pd = c4iw_deallocate_pd;
dev->ibdev.create_ah = c4iw_ah_create;
dev->ibdev.destroy_ah = c4iw_ah_destroy;
dev->ibdev.create_qp = c4iw_create_qp;
dev->ibdev.modify_qp = c4iw_ib_modify_qp;
dev->ibdev.query_qp = c4iw_ib_query_qp;
dev->ibdev.destroy_qp = c4iw_destroy_qp;
dev->ibdev.create_cq = c4iw_create_cq;
dev->ibdev.destroy_cq = c4iw_destroy_cq;
dev->ibdev.resize_cq = c4iw_resize_cq;
dev->ibdev.poll_cq = c4iw_poll_cq;
dev->ibdev.get_dma_mr = c4iw_get_dma_mr;
dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
dev->ibdev.dereg_mr = c4iw_dereg_mr;
dev->ibdev.alloc_mw = c4iw_alloc_mw;
dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
dev->ibdev.alloc_mr = c4iw_alloc_mr;
dev->ibdev.map_mr_sg = c4iw_map_mr_sg;
dev->ibdev.attach_mcast = c4iw_multicast_attach;
dev->ibdev.detach_mcast = c4iw_multicast_detach;
dev->ibdev.process_mad = c4iw_process_mad;
dev->ibdev.req_notify_cq = c4iw_arm_cq;
dev->ibdev.post_send = c4iw_post_send;
dev->ibdev.post_recv = c4iw_post_receive;
dev->ibdev.alloc_hw_stats = c4iw_alloc_stats;
dev->ibdev.get_hw_stats = c4iw_get_mib;
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = c4iw_port_immutable;
dev->ibdev.get_dev_fw_str = get_dev_fw_str;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm) {
ret = -ENOMEM;
goto err_dealloc_ctx;
}
dev->ibdev.iwcm->connect = c4iw_connect;
dev->ibdev.iwcm->accept = c4iw_accept_cr;
dev->ibdev.iwcm->reject = c4iw_reject_cr;
dev->ibdev.iwcm->create_listen = c4iw_create_listen;
dev->ibdev.iwcm->destroy_listen = c4iw_destroy_listen;
dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
dev->ibdev.iwcm->get_qp = c4iw_get_qp;
memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
sizeof(dev->ibdev.iwcm->ifname));
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
goto err_kfree_iwcm;
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
ret = device_create_file(&dev->ibdev.dev,
c4iw_class_attributes[i]);
if (ret)
goto err_unregister_device;
}
return;
err_unregister_device:
ib_unregister_device(&dev->ibdev);
err_kfree_iwcm:
kfree(dev->ibdev.iwcm);
err_dealloc_ctx:
pr_err("%s - Failed registering iwarp device: %d\n",
pci_name(ctx->lldi.pdev), ret);
c4iw_dealloc(ctx);
return;
}
void c4iw_unregister_device(struct c4iw_dev *dev)
{
int i;
pr_debug("c4iw_dev %p\n", dev);
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
device_remove_file(&dev->ibdev.dev,
c4iw_class_attributes[i]);
ib_unregister_device(&dev->ibdev);
kfree(dev->ibdev.iwcm);
return;
}