drivers/infiniband: Import OnePlus Changes

Signed-off-by: Cyber Knight <cyberknight755@gmail.com>
This commit is contained in:
Cyber Knight 2021-09-26 01:09:13 +08:00
parent 4d8f07aabb
commit dac9e090c8
No known key found for this signature in database
GPG Key ID: 23BD4CCD326E9D64
16 changed files with 26 additions and 55 deletions

View File

@ -1315,13 +1315,13 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
return -EINVAL;
optval = memdup_user((void __user *) (unsigned long) cmd.optval,
cmd.optlen);
if (IS_ERR(optval)) {

View File

@ -1343,7 +1343,7 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
dev_put(netdev);
if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) {
if (!rc) {
netdev_speed = lksettings.base.speed;
} else {
netdev_speed = SPEED_1000;

View File

@ -3265,7 +3265,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
err = pick_local_ipaddrs(dev, cm_id);
if (err)
goto fail3;
goto fail2;
}
/* find a route */
@ -3287,7 +3287,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
err = pick_local_ip6addrs(dev, cm_id);
if (err)
goto fail3;
goto fail2;
}
/* find a route */

View File

@ -243,6 +243,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
break;
case IB_WR_LOCAL_INV:
break;
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
case IB_WR_LSO:

View File

@ -2052,9 +2052,9 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
if (!dst || dst->error) {
if (dst) {
dst_release(dst);
i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
dst->error);
dst_release(dst);
}
return rc;
}

View File

@ -307,9 +307,6 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
if (!sriov->is_going_down) {
id->scheduled_delete = 1;
schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
} else if (id->scheduled_delete) {
/* Adjust timeout if already scheduled */
mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
}
spin_unlock_irqrestore(&sriov->going_down_lock, flags);
spin_unlock(&sriov->id_map_lock);

View File

@ -1305,18 +1305,6 @@ static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
}
static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg)
{
unsigned long flags;
struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
queue_work(ctx->wi_wq, &ctx->work);
spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
}
static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
struct mlx4_ib_demux_pv_qp *tun_qp,
int index)
@ -2024,8 +2012,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
cq_size *= 2;
cq_attr.cqe = cq_size;
ctx->cq = ib_create_cq(ctx->ib_dev,
create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler,
ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
NULL, ctx, &cq_attr);
if (IS_ERR(ctx->cq)) {
ret = PTR_ERR(ctx->cq);
@ -2062,7 +2049,6 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq;
ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
if (ret) {
@ -2206,7 +2192,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
goto err_mcg;
}
snprintf(name, sizeof(name), "mlx4_ibt%d", port);
snprintf(name, sizeof name, "mlx4_ibt%d", port);
ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->wq) {
pr_err("Failed to create tunnelling WQ for port %d\n", port);
@ -2214,15 +2200,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
goto err_wq;
}
snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->wi_wq) {
pr_err("Failed to create wire WQ for port %d\n", port);
ret = -ENOMEM;
goto err_wiwq;
}
snprintf(name, sizeof(name), "mlx4_ibud%d", port);
snprintf(name, sizeof name, "mlx4_ibud%d", port);
ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->ud_wq) {
pr_err("Failed to create up/down WQ for port %d\n", port);
@ -2233,10 +2211,6 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
return 0;
err_udwq:
destroy_workqueue(ctx->wi_wq);
ctx->wi_wq = NULL;
err_wiwq:
destroy_workqueue(ctx->wq);
ctx->wq = NULL;
@ -2284,14 +2258,12 @@ static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
}
flush_workqueue(ctx->wq);
flush_workqueue(ctx->wi_wq);
for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
free_pv_object(dev, i, ctx->port);
}
kfree(ctx->tun);
destroy_workqueue(ctx->ud_wq);
destroy_workqueue(ctx->wi_wq);
destroy_workqueue(ctx->wq);
}
}

View File

@ -463,7 +463,6 @@ struct mlx4_ib_demux_pv_ctx {
struct ib_pd *pd;
struct work_struct work;
struct workqueue_struct *wq;
struct workqueue_struct *wi_wq;
struct mlx4_ib_demux_pv_qp qp[2];
};
@ -471,7 +470,6 @@ struct mlx4_ib_demux_ctx {
struct ib_device *ib_dev;
int port;
struct workqueue_struct *wq;
struct workqueue_struct *wi_wq;
struct workqueue_struct *ud_wq;
spinlock_t ud_lock;
atomic64_t subnet_prefix;

View File

@ -548,7 +548,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
/* Part 2 - check capabilities */
page_size = ~qed_attr->page_size_caps + 1;
page_size = ~dev->attr.page_size_caps + 1;
if (page_size > PAGE_SIZE) {
DP_ERR(dev,
"Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",

View File

@ -97,7 +97,9 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
if (!rdi)
return rdi;
rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL);
rdi->ports = kcalloc(nports,
sizeof(struct rvt_ibport **),
GFP_KERNEL);
if (!rdi->ports)
ib_dealloc_device(&rdi->ibdev);

View File

@ -126,8 +126,6 @@ static int rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
rxe->attr.max_pkeys = RXE_MAX_PKEYS;
rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
rxe->ndev->dev_addr);
rxe->max_ucontext = RXE_MAX_UCONTEXT;
@ -172,6 +170,9 @@ static int rxe_init_ports(struct rxe_dev *rxe)
rxe_init_port_param(port);
if (!port->attr.pkey_tbl_len || !port->attr.gid_tbl_len)
return -EINVAL;
port->pkey_tbl = kcalloc(port->attr.pkey_tbl_len,
sizeof(*port->pkey_tbl), GFP_KERNEL);

View File

@ -203,7 +203,6 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
vaddr = page_address(sg_page(sg));
if (!vaddr) {
pr_warn("null vaddr\n");
ib_umem_release(umem);
err = -ENOMEM;
goto err1;
}

View File

@ -593,16 +593,15 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
struct ib_gid_attr sgid_attr;
if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
int max_rd_atomic = attr->max_rd_atomic ?
roundup_pow_of_two(attr->max_rd_atomic) : 0;
int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
qp->attr.max_rd_atomic = max_rd_atomic;
atomic_set(&qp->req.rd_atomic, max_rd_atomic);
}
if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
int max_dest_rd_atomic =
__roundup_pow_of_two(attr->max_dest_rd_atomic);
qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;

View File

@ -1178,7 +1178,7 @@ static ssize_t parent_show(struct device *device,
struct rxe_dev *rxe = container_of(device, struct rxe_dev,
ib_dev.dev);
return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1));
return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
}
static DEVICE_ATTR_RO(parent);

View File

@ -509,7 +509,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev);
int ipoib_ib_dev_open_default(struct net_device *dev);
int ipoib_ib_dev_open(struct net_device *dev);
void ipoib_ib_dev_stop(struct net_device *dev);
int ipoib_ib_dev_stop(struct net_device *dev);
void ipoib_ib_dev_up(struct net_device *dev);
void ipoib_ib_dev_down(struct net_device *dev);
int ipoib_ib_dev_stop_default(struct net_device *dev);

View File

@ -809,7 +809,7 @@ timeout:
return 0;
}
void ipoib_ib_dev_stop(struct net_device *dev)
int ipoib_ib_dev_stop(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@ -817,6 +817,8 @@ void ipoib_ib_dev_stop(struct net_device *dev)
clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
ipoib_flush_ah(dev);
return 0;
}
void ipoib_ib_tx_timer_func(unsigned long ctx)