Skip to content

Commit 78b26a3

Browse files
Bob Pearsonrleon
Bob Pearson
authored andcommitted
RDMA/rxe: Remove tasklet call from rxe_cq.c
Remove the tasklet call in rxe_cq.c and also the is_dying in the cq struct. There is no reason for the rxe driver to defer the call to the cq completion handler by scheduling a tasklet. rxe_cq_post() is not called in a hard irq context. The rxe driver currently is incorrect because the tasklet call is made without protecting the cq pointer with a reference from having the underlying memory freed before the deferred routine is called. Executing the comp_handler inline fixes this problem. Fixes: 8700e3e ("Soft RoCE driver") Signed-off-by: Bob Pearson <[email protected]> Link: https://lore.kernel.org/r/[email protected] Acked-by: Zhu Yanjun <[email protected]> Signed-off-by: Leon Romanovsky <[email protected]>
1 parent cba968e commit 78b26a3

File tree

3 files changed

+3
-33
lines changed

3 files changed

+3
-33
lines changed

drivers/infiniband/sw/rxe/rxe_cq.c

Lines changed: 3 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -39,21 +39,6 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
3939
return -EINVAL;
4040
}
4141

42-
static void rxe_send_complete(struct tasklet_struct *t)
43-
{
44-
struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
45-
unsigned long flags;
46-
47-
spin_lock_irqsave(&cq->cq_lock, flags);
48-
if (cq->is_dying) {
49-
spin_unlock_irqrestore(&cq->cq_lock, flags);
50-
return;
51-
}
52-
spin_unlock_irqrestore(&cq->cq_lock, flags);
53-
54-
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
55-
}
56-
5742
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
5843
int comp_vector, struct ib_udata *udata,
5944
struct rxe_create_cq_resp __user *uresp)
@@ -79,10 +64,6 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
7964

8065
cq->is_user = uresp;
8166

82-
cq->is_dying = false;
83-
84-
tasklet_setup(&cq->comp_task, rxe_send_complete);
85-
8667
spin_lock_init(&cq->cq_lock);
8768
cq->ibcq.cqe = cqe;
8869
return 0;
@@ -103,6 +84,7 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
10384
return err;
10485
}
10586

87+
/* caller holds reference to cq */
10688
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
10789
{
10890
struct ib_event ev;
@@ -136,21 +118,13 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
136118
if ((cq->notify == IB_CQ_NEXT_COMP) ||
137119
(cq->notify == IB_CQ_SOLICITED && solicited)) {
138120
cq->notify = 0;
139-
tasklet_schedule(&cq->comp_task);
121+
122+
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
140123
}
141124

142125
return 0;
143126
}
144127

145-
void rxe_cq_disable(struct rxe_cq *cq)
146-
{
147-
unsigned long flags;
148-
149-
spin_lock_irqsave(&cq->cq_lock, flags);
150-
cq->is_dying = true;
151-
spin_unlock_irqrestore(&cq->cq_lock, flags);
152-
}
153-
154128
void rxe_cq_cleanup(struct rxe_pool_elem *elem)
155129
{
156130
struct rxe_cq *cq = container_of(elem, typeof(*cq), elem);

drivers/infiniband/sw/rxe/rxe_verbs.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1178,8 +1178,6 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
11781178
goto err_out;
11791179
}
11801180

1181-
rxe_cq_disable(cq);
1182-
11831181
err = rxe_cleanup(cq);
11841182
if (err)
11851183
rxe_err_cq(cq, "cleanup failed, err = %d", err);

drivers/infiniband/sw/rxe/rxe_verbs.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,9 +63,7 @@ struct rxe_cq {
6363
struct rxe_queue *queue;
6464
spinlock_t cq_lock;
6565
u8 notify;
66-
bool is_dying;
6766
bool is_user;
68-
struct tasklet_struct comp_task;
6967
atomic_t num_wq;
7068
};
7169

0 commit comments

Comments
 (0)