1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell.
5 *
6 */
7
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11
12 #include "rvu_reg.h"
13 #include "mbox.h"
14 #include "rvu_trace.h"
15
16 static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
17
__otx2_mbox_reset(struct otx2_mbox * mbox,int devid)18 void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
19 {
20 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
21 struct mbox_hdr *tx_hdr, *rx_hdr;
22 void *hw_mbase = mdev->hwbase;
23
24 tx_hdr = hw_mbase + mbox->tx_start;
25 rx_hdr = hw_mbase + mbox->rx_start;
26
27 mdev->msg_size = 0;
28 mdev->rsp_size = 0;
29 tx_hdr->num_msgs = 0;
30 tx_hdr->msg_size = 0;
31 rx_hdr->num_msgs = 0;
32 rx_hdr->msg_size = 0;
33 }
34 EXPORT_SYMBOL(__otx2_mbox_reset);
35
otx2_mbox_reset(struct otx2_mbox * mbox,int devid)36 void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
37 {
38 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
39
40 spin_lock(&mdev->mbox_lock);
41 __otx2_mbox_reset(mbox, devid);
42 spin_unlock(&mdev->mbox_lock);
43 }
44 EXPORT_SYMBOL(otx2_mbox_reset);
45
otx2_mbox_destroy(struct otx2_mbox * mbox)46 void otx2_mbox_destroy(struct otx2_mbox *mbox)
47 {
48 mbox->reg_base = NULL;
49 mbox->hwbase = NULL;
50
51 kfree(mbox->dev);
52 mbox->dev = NULL;
53 }
54 EXPORT_SYMBOL(otx2_mbox_destroy);
55
otx2_mbox_setup(struct otx2_mbox * mbox,struct pci_dev * pdev,void * reg_base,int direction,int ndevs)56 static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
57 void *reg_base, int direction, int ndevs)
58 {
59 switch (direction) {
60 case MBOX_DIR_AFPF:
61 case MBOX_DIR_PFVF:
62 mbox->tx_start = MBOX_DOWN_TX_START;
63 mbox->rx_start = MBOX_DOWN_RX_START;
64 mbox->tx_size = MBOX_DOWN_TX_SIZE;
65 mbox->rx_size = MBOX_DOWN_RX_SIZE;
66 break;
67 case MBOX_DIR_PFAF:
68 case MBOX_DIR_VFPF:
69 mbox->tx_start = MBOX_DOWN_RX_START;
70 mbox->rx_start = MBOX_DOWN_TX_START;
71 mbox->tx_size = MBOX_DOWN_RX_SIZE;
72 mbox->rx_size = MBOX_DOWN_TX_SIZE;
73 break;
74 case MBOX_DIR_AFPF_UP:
75 case MBOX_DIR_PFVF_UP:
76 mbox->tx_start = MBOX_UP_TX_START;
77 mbox->rx_start = MBOX_UP_RX_START;
78 mbox->tx_size = MBOX_UP_TX_SIZE;
79 mbox->rx_size = MBOX_UP_RX_SIZE;
80 break;
81 case MBOX_DIR_PFAF_UP:
82 case MBOX_DIR_VFPF_UP:
83 mbox->tx_start = MBOX_UP_RX_START;
84 mbox->rx_start = MBOX_UP_TX_START;
85 mbox->tx_size = MBOX_UP_RX_SIZE;
86 mbox->rx_size = MBOX_UP_TX_SIZE;
87 break;
88 default:
89 return -ENODEV;
90 }
91
92 switch (direction) {
93 case MBOX_DIR_AFPF:
94 case MBOX_DIR_AFPF_UP:
95 mbox->trigger = RVU_AF_AFPF_MBOX0;
96 mbox->tr_shift = 4;
97 break;
98 case MBOX_DIR_PFAF:
99 case MBOX_DIR_PFAF_UP:
100 mbox->trigger = RVU_PF_PFAF_MBOX1;
101 mbox->tr_shift = 0;
102 break;
103 case MBOX_DIR_PFVF:
104 case MBOX_DIR_PFVF_UP:
105 mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
106 mbox->tr_shift = 12;
107 break;
108 case MBOX_DIR_VFPF:
109 case MBOX_DIR_VFPF_UP:
110 mbox->trigger = RVU_VF_VFPF_MBOX1;
111 mbox->tr_shift = 0;
112 break;
113 default:
114 return -ENODEV;
115 }
116
117 mbox->reg_base = reg_base;
118 mbox->pdev = pdev;
119
120 mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
121 if (!mbox->dev) {
122 otx2_mbox_destroy(mbox);
123 return -ENOMEM;
124 }
125 mbox->ndevs = ndevs;
126
127 return 0;
128 }
129
otx2_mbox_init(struct otx2_mbox * mbox,void * hwbase,struct pci_dev * pdev,void * reg_base,int direction,int ndevs)130 int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
131 void *reg_base, int direction, int ndevs)
132 {
133 struct otx2_mbox_dev *mdev;
134 int devid, err;
135
136 err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
137 if (err)
138 return err;
139
140 mbox->hwbase = hwbase;
141
142 for (devid = 0; devid < ndevs; devid++) {
143 mdev = &mbox->dev[devid];
144 mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
145 mdev->hwbase = mdev->mbase;
146 spin_lock_init(&mdev->mbox_lock);
147 /* Init header to reset value */
148 otx2_mbox_reset(mbox, devid);
149 }
150
151 return 0;
152 }
153 EXPORT_SYMBOL(otx2_mbox_init);
154
155 /* Initialize mailbox with the set of mailbox region addresses
156 * in the array hwbase.
157 */
otx2_mbox_regions_init(struct otx2_mbox * mbox,void ** hwbase,struct pci_dev * pdev,void * reg_base,int direction,int ndevs,unsigned long * pf_bmap)158 int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
159 struct pci_dev *pdev, void *reg_base,
160 int direction, int ndevs, unsigned long *pf_bmap)
161 {
162 struct otx2_mbox_dev *mdev;
163 int devid, err;
164
165 err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
166 if (err)
167 return err;
168
169 mbox->hwbase = hwbase[0];
170
171 for (devid = 0; devid < ndevs; devid++) {
172 if (!test_bit(devid, pf_bmap))
173 continue;
174
175 mdev = &mbox->dev[devid];
176 mdev->mbase = hwbase[devid];
177 mdev->hwbase = hwbase[devid];
178 spin_lock_init(&mdev->mbox_lock);
179 /* Init header to reset value */
180 otx2_mbox_reset(mbox, devid);
181 }
182
183 return 0;
184 }
185 EXPORT_SYMBOL(otx2_mbox_regions_init);
186
otx2_mbox_wait_for_rsp(struct otx2_mbox * mbox,int devid)187 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
188 {
189 unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
190 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
191 struct device *sender = &mbox->pdev->dev;
192
193 while (!time_after(jiffies, timeout)) {
194 if (mdev->num_msgs == mdev->msgs_acked)
195 return 0;
196 usleep_range(800, 1000);
197 }
198 dev_dbg(sender, "timed out while waiting for rsp\n");
199 return -EIO;
200 }
201 EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
202
otx2_mbox_busy_poll_for_rsp(struct otx2_mbox * mbox,int devid)203 int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
204 {
205 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
206 unsigned long timeout = jiffies + 1 * HZ;
207
208 while (!time_after(jiffies, timeout)) {
209 if (mdev->num_msgs == mdev->msgs_acked)
210 return 0;
211 cpu_relax();
212 }
213 return -EIO;
214 }
215 EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
216
otx2_mbox_msg_send(struct otx2_mbox * mbox,int devid)217 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
218 {
219 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
220 struct mbox_hdr *tx_hdr, *rx_hdr;
221 void *hw_mbase = mdev->hwbase;
222
223 tx_hdr = hw_mbase + mbox->tx_start;
224 rx_hdr = hw_mbase + mbox->rx_start;
225
226 /* If bounce buffer is implemented copy mbox messages from
227 * bounce buffer to hw mbox memory.
228 */
229 if (mdev->mbase != hw_mbase)
230 memcpy(hw_mbase + mbox->tx_start + msgs_offset,
231 mdev->mbase + mbox->tx_start + msgs_offset,
232 mdev->msg_size);
233
234 spin_lock(&mdev->mbox_lock);
235
236 tx_hdr->msg_size = mdev->msg_size;
237
238 /* Reset header for next messages */
239 mdev->msg_size = 0;
240 mdev->rsp_size = 0;
241 mdev->msgs_acked = 0;
242
243 /* Sync mbox data into memory */
244 smp_wmb();
245
246 /* num_msgs != 0 signals to the peer that the buffer has a number of
247 * messages. So this should be written after writing all the messages
248 * to the shared memory.
249 */
250 tx_hdr->num_msgs = mdev->num_msgs;
251 rx_hdr->num_msgs = 0;
252
253 trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
254
255 spin_unlock(&mdev->mbox_lock);
256
257 /* The interrupt should be fired after num_msgs is written
258 * to the shared memory
259 */
260 writeq(1, (void __iomem *)mbox->reg_base +
261 (mbox->trigger | (devid << mbox->tr_shift)));
262 }
263 EXPORT_SYMBOL(otx2_mbox_msg_send);
264
otx2_mbox_alloc_msg_rsp(struct otx2_mbox * mbox,int devid,int size,int size_rsp)265 struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
266 int size, int size_rsp)
267 {
268 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
269 struct mbox_msghdr *msghdr = NULL;
270
271 spin_lock(&mdev->mbox_lock);
272 size = ALIGN(size, MBOX_MSG_ALIGN);
273 size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
274 /* Check if there is space in mailbox */
275 if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
276 goto exit;
277 if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
278 goto exit;
279
280 if (mdev->msg_size == 0)
281 mdev->num_msgs = 0;
282 mdev->num_msgs++;
283
284 msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
285
286 /* Clear the whole msg region */
287 memset(msghdr, 0, size);
288 /* Init message header with reset values */
289 msghdr->ver = OTX2_MBOX_VERSION;
290 mdev->msg_size += size;
291 mdev->rsp_size += size_rsp;
292 msghdr->next_msgoff = mdev->msg_size + msgs_offset;
293 exit:
294 spin_unlock(&mdev->mbox_lock);
295
296 return msghdr;
297 }
298 EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
299
otx2_mbox_get_rsp(struct otx2_mbox * mbox,int devid,struct mbox_msghdr * msg)300 struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
301 struct mbox_msghdr *msg)
302 {
303 unsigned long imsg = mbox->tx_start + msgs_offset;
304 unsigned long irsp = mbox->rx_start + msgs_offset;
305 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
306 u16 msgs;
307
308 spin_lock(&mdev->mbox_lock);
309
310 if (mdev->num_msgs != mdev->msgs_acked)
311 goto error;
312
313 for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
314 struct mbox_msghdr *pmsg = mdev->mbase + imsg;
315 struct mbox_msghdr *prsp = mdev->mbase + irsp;
316
317 if (msg == pmsg) {
318 if (pmsg->id != prsp->id)
319 goto error;
320 spin_unlock(&mdev->mbox_lock);
321 return prsp;
322 }
323
324 imsg = mbox->tx_start + pmsg->next_msgoff;
325 irsp = mbox->rx_start + prsp->next_msgoff;
326 }
327
328 error:
329 spin_unlock(&mdev->mbox_lock);
330 return ERR_PTR(-ENODEV);
331 }
332 EXPORT_SYMBOL(otx2_mbox_get_rsp);
333
otx2_mbox_check_rsp_msgs(struct otx2_mbox * mbox,int devid)334 int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
335 {
336 unsigned long ireq = mbox->tx_start + msgs_offset;
337 unsigned long irsp = mbox->rx_start + msgs_offset;
338 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
339 int rc = -ENODEV;
340 u16 msgs;
341
342 spin_lock(&mdev->mbox_lock);
343
344 if (mdev->num_msgs != mdev->msgs_acked)
345 goto exit;
346
347 for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
348 struct mbox_msghdr *preq = mdev->mbase + ireq;
349 struct mbox_msghdr *prsp = mdev->mbase + irsp;
350
351 if (preq->id != prsp->id) {
352 trace_otx2_msg_check(mbox->pdev, preq->id,
353 prsp->id, prsp->rc);
354 goto exit;
355 }
356 if (prsp->rc) {
357 rc = prsp->rc;
358 trace_otx2_msg_check(mbox->pdev, preq->id,
359 prsp->id, prsp->rc);
360 goto exit;
361 }
362
363 ireq = mbox->tx_start + preq->next_msgoff;
364 irsp = mbox->rx_start + prsp->next_msgoff;
365 }
366 rc = 0;
367 exit:
368 spin_unlock(&mdev->mbox_lock);
369 return rc;
370 }
371 EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
372
373 int
otx2_reply_invalid_msg(struct otx2_mbox * mbox,int devid,u16 pcifunc,u16 id)374 otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
375 {
376 struct msg_rsp *rsp;
377
378 rsp = (struct msg_rsp *)
379 otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
380 if (!rsp)
381 return -ENOMEM;
382 rsp->hdr.id = id;
383 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
384 rsp->hdr.rc = MBOX_MSG_INVALID;
385 rsp->hdr.pcifunc = pcifunc;
386 return 0;
387 }
388 EXPORT_SYMBOL(otx2_reply_invalid_msg);
389
otx2_mbox_nonempty(struct otx2_mbox * mbox,int devid)390 bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
391 {
392 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
393 bool ret;
394
395 spin_lock(&mdev->mbox_lock);
396 ret = mdev->num_msgs != 0;
397 spin_unlock(&mdev->mbox_lock);
398
399 return ret;
400 }
401 EXPORT_SYMBOL(otx2_mbox_nonempty);
402
otx2_mbox_id2name(u16 id)403 const char *otx2_mbox_id2name(u16 id)
404 {
405 switch (id) {
406 #define M(_name, _id, _1, _2, _3) case _id: return # _name;
407 MBOX_MESSAGES
408 #undef M
409 default:
410 return "INVALID ID";
411 }
412 }
413 EXPORT_SYMBOL(otx2_mbox_id2name);
414
415 MODULE_AUTHOR("Marvell.");
416 MODULE_LICENSE("GPL v2");
417