1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2017 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2017 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Intel PCIe GEN3 NTB Linux driver
44 *
45 */
46
47 #include <linux/debugfs.h>
48 #include <linux/delay.h>
49 #include <linux/init.h>
50 #include <linux/interrupt.h>
51 #include <linux/module.h>
52 #include <linux/pci.h>
53 #include <linux/random.h>
54 #include <linux/slab.h>
55 #include <linux/ntb.h>
56
57 #include "ntb_hw_intel.h"
58 #include "ntb_hw_gen1.h"
59 #include "ntb_hw_gen3.h"
60
61 static int gen3_poll_link(struct intel_ntb_dev *ndev);
62
63 static const struct intel_ntb_reg gen3_reg = {
64 .poll_link = gen3_poll_link,
65 .link_is_up = xeon_link_is_up,
66 .db_ioread = gen3_db_ioread,
67 .db_iowrite = gen3_db_iowrite,
68 .db_size = sizeof(u32),
69 .ntb_ctl = GEN3_NTBCNTL_OFFSET,
70 .mw_bar = {2, 4},
71 };
72
73 static const struct intel_ntb_alt_reg gen3_pri_reg = {
74 .db_bell = GEN3_EM_DOORBELL_OFFSET,
75 .db_clear = GEN3_IM_INT_STATUS_OFFSET,
76 .db_mask = GEN3_IM_INT_DISABLE_OFFSET,
77 .spad = GEN3_IM_SPAD_OFFSET,
78 };
79
80 static const struct intel_ntb_alt_reg gen3_b2b_reg = {
81 .db_bell = GEN3_IM_DOORBELL_OFFSET,
82 .db_clear = GEN3_EM_INT_STATUS_OFFSET,
83 .db_mask = GEN3_EM_INT_DISABLE_OFFSET,
84 .spad = GEN3_B2B_SPAD_OFFSET,
85 };
86
87 static const struct intel_ntb_xlat_reg gen3_sec_xlat = {
88 /* .bar0_base = GEN3_EMBAR0_OFFSET, */
89 .bar2_limit = GEN3_IMBAR1XLMT_OFFSET,
90 .bar2_xlat = GEN3_IMBAR1XBASE_OFFSET,
91 };
92
gen3_poll_link(struct intel_ntb_dev * ndev)93 static int gen3_poll_link(struct intel_ntb_dev *ndev)
94 {
95 u16 reg_val;
96 int rc;
97
98 ndev->reg->db_iowrite(ndev->db_link_mask,
99 ndev->self_mmio +
100 ndev->self_reg->db_clear);
101
102 rc = pci_read_config_word(ndev->ntb.pdev,
103 GEN3_LINK_STATUS_OFFSET, ®_val);
104 if (rc)
105 return 0;
106
107 if (reg_val == ndev->lnk_sta)
108 return 0;
109
110 ndev->lnk_sta = reg_val;
111
112 return 1;
113 }
114
gen3_init_isr(struct intel_ntb_dev * ndev)115 static int gen3_init_isr(struct intel_ntb_dev *ndev)
116 {
117 int i;
118
119 /*
120 * The MSIX vectors and the interrupt status bits are not lined up
121 * on Skylake. By default the link status bit is bit 32, however it
122 * is by default MSIX vector0. We need to fixup to line them up.
123 * The vectors at reset is 1-32,0. We need to reprogram to 0-32.
124 */
125
126 for (i = 0; i < GEN3_DB_MSIX_VECTOR_COUNT; i++)
127 iowrite8(i, ndev->self_mmio + GEN3_INTVEC_OFFSET + i);
128
129 /* move link status down one as workaround */
130 if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) {
131 iowrite8(GEN3_DB_MSIX_VECTOR_COUNT - 2,
132 ndev->self_mmio + GEN3_INTVEC_OFFSET +
133 (GEN3_DB_MSIX_VECTOR_COUNT - 1));
134 }
135
136 return ndev_init_isr(ndev, GEN3_DB_MSIX_VECTOR_COUNT,
137 GEN3_DB_MSIX_VECTOR_COUNT,
138 GEN3_DB_MSIX_VECTOR_SHIFT,
139 GEN3_DB_TOTAL_SHIFT);
140 }
141
gen3_setup_b2b_mw(struct intel_ntb_dev * ndev,const struct intel_b2b_addr * addr,const struct intel_b2b_addr * peer_addr)142 static int gen3_setup_b2b_mw(struct intel_ntb_dev *ndev,
143 const struct intel_b2b_addr *addr,
144 const struct intel_b2b_addr *peer_addr)
145 {
146 struct pci_dev *pdev;
147 void __iomem *mmio;
148 phys_addr_t bar_addr;
149
150 pdev = ndev->ntb.pdev;
151 mmio = ndev->self_mmio;
152
153 /* setup incoming bar limits == base addrs (zero length windows) */
154 bar_addr = addr->bar2_addr64;
155 iowrite64(bar_addr, mmio + GEN3_IMBAR1XLMT_OFFSET);
156 bar_addr = ioread64(mmio + GEN3_IMBAR1XLMT_OFFSET);
157 dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr);
158
159 bar_addr = addr->bar4_addr64;
160 iowrite64(bar_addr, mmio + GEN3_IMBAR2XLMT_OFFSET);
161 bar_addr = ioread64(mmio + GEN3_IMBAR2XLMT_OFFSET);
162 dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
163
164 /* zero incoming translation addrs */
165 iowrite64(0, mmio + GEN3_IMBAR1XBASE_OFFSET);
166 iowrite64(0, mmio + GEN3_IMBAR2XBASE_OFFSET);
167
168 ndev->peer_mmio = ndev->self_mmio;
169
170 return 0;
171 }
172
gen3_init_ntb(struct intel_ntb_dev * ndev)173 static int gen3_init_ntb(struct intel_ntb_dev *ndev)
174 {
175 int rc;
176
177
178 ndev->mw_count = XEON_MW_COUNT;
179 ndev->spad_count = GEN3_SPAD_COUNT;
180 ndev->db_count = GEN3_DB_COUNT;
181 ndev->db_link_mask = GEN3_DB_LINK_BIT;
182
183 /* DB fixup for using 31 right now */
184 if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD)
185 ndev->db_link_mask |= BIT_ULL(31);
186
187 switch (ndev->ntb.topo) {
188 case NTB_TOPO_B2B_USD:
189 case NTB_TOPO_B2B_DSD:
190 ndev->self_reg = &gen3_pri_reg;
191 ndev->peer_reg = &gen3_b2b_reg;
192 ndev->xlat_reg = &gen3_sec_xlat;
193
194 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
195 rc = gen3_setup_b2b_mw(ndev,
196 &xeon_b2b_dsd_addr,
197 &xeon_b2b_usd_addr);
198 } else {
199 rc = gen3_setup_b2b_mw(ndev,
200 &xeon_b2b_usd_addr,
201 &xeon_b2b_dsd_addr);
202 }
203
204 if (rc)
205 return rc;
206
207 /* Enable Bus Master and Memory Space on the secondary side */
208 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
209 ndev->self_mmio + GEN3_SPCICMD_OFFSET);
210
211 break;
212
213 default:
214 return -EINVAL;
215 }
216
217 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
218
219 ndev->reg->db_iowrite(ndev->db_valid_mask,
220 ndev->self_mmio +
221 ndev->self_reg->db_mask);
222
223 return 0;
224 }
225
gen3_init_dev(struct intel_ntb_dev * ndev)226 int gen3_init_dev(struct intel_ntb_dev *ndev)
227 {
228 struct pci_dev *pdev;
229 u8 ppd;
230 int rc;
231
232 pdev = ndev->ntb.pdev;
233
234 ndev->reg = &gen3_reg;
235
236 rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
237 if (rc)
238 return -EIO;
239
240 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
241 dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
242 ntb_topo_string(ndev->ntb.topo));
243 if (ndev->ntb.topo == NTB_TOPO_NONE)
244 return -EINVAL;
245
246 ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD;
247
248 rc = gen3_init_ntb(ndev);
249 if (rc)
250 return rc;
251
252 return gen3_init_isr(ndev);
253 }
254
ndev_ntb3_debugfs_read(struct file * filp,char __user * ubuf,size_t count,loff_t * offp)255 ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
256 size_t count, loff_t *offp)
257 {
258 struct intel_ntb_dev *ndev;
259 void __iomem *mmio;
260 char *buf;
261 size_t buf_size;
262 ssize_t ret, off;
263 union { u64 v64; u32 v32; u16 v16; } u;
264
265 ndev = filp->private_data;
266 mmio = ndev->self_mmio;
267
268 buf_size = min(count, 0x800ul);
269
270 buf = kmalloc(buf_size, GFP_KERNEL);
271 if (!buf)
272 return -ENOMEM;
273
274 off = 0;
275
276 off += scnprintf(buf + off, buf_size - off,
277 "NTB Device Information:\n");
278
279 off += scnprintf(buf + off, buf_size - off,
280 "Connection Topology -\t%s\n",
281 ntb_topo_string(ndev->ntb.topo));
282
283 off += scnprintf(buf + off, buf_size - off,
284 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
285 off += scnprintf(buf + off, buf_size - off,
286 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
287
288 if (!ndev->reg->link_is_up(ndev))
289 off += scnprintf(buf + off, buf_size - off,
290 "Link Status -\t\tDown\n");
291 else {
292 off += scnprintf(buf + off, buf_size - off,
293 "Link Status -\t\tUp\n");
294 off += scnprintf(buf + off, buf_size - off,
295 "Link Speed -\t\tPCI-E Gen %u\n",
296 NTB_LNK_STA_SPEED(ndev->lnk_sta));
297 off += scnprintf(buf + off, buf_size - off,
298 "Link Width -\t\tx%u\n",
299 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
300 }
301
302 off += scnprintf(buf + off, buf_size - off,
303 "Memory Window Count -\t%u\n", ndev->mw_count);
304 off += scnprintf(buf + off, buf_size - off,
305 "Scratchpad Count -\t%u\n", ndev->spad_count);
306 off += scnprintf(buf + off, buf_size - off,
307 "Doorbell Count -\t%u\n", ndev->db_count);
308 off += scnprintf(buf + off, buf_size - off,
309 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
310 off += scnprintf(buf + off, buf_size - off,
311 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
312
313 off += scnprintf(buf + off, buf_size - off,
314 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
315 off += scnprintf(buf + off, buf_size - off,
316 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
317 off += scnprintf(buf + off, buf_size - off,
318 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
319
320 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
321 off += scnprintf(buf + off, buf_size - off,
322 "Doorbell Mask -\t\t%#llx\n", u.v64);
323
324 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
325 off += scnprintf(buf + off, buf_size - off,
326 "Doorbell Bell -\t\t%#llx\n", u.v64);
327
328 off += scnprintf(buf + off, buf_size - off,
329 "\nNTB Incoming XLAT:\n");
330
331 u.v64 = ioread64(mmio + GEN3_IMBAR1XBASE_OFFSET);
332 off += scnprintf(buf + off, buf_size - off,
333 "IMBAR1XBASE -\t\t%#018llx\n", u.v64);
334
335 u.v64 = ioread64(mmio + GEN3_IMBAR2XBASE_OFFSET);
336 off += scnprintf(buf + off, buf_size - off,
337 "IMBAR2XBASE -\t\t%#018llx\n", u.v64);
338
339 u.v64 = ioread64(mmio + GEN3_IMBAR1XLMT_OFFSET);
340 off += scnprintf(buf + off, buf_size - off,
341 "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64);
342
343 u.v64 = ioread64(mmio + GEN3_IMBAR2XLMT_OFFSET);
344 off += scnprintf(buf + off, buf_size - off,
345 "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64);
346
347 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
348 off += scnprintf(buf + off, buf_size - off,
349 "\nNTB Outgoing B2B XLAT:\n");
350
351 u.v64 = ioread64(mmio + GEN3_EMBAR1XBASE_OFFSET);
352 off += scnprintf(buf + off, buf_size - off,
353 "EMBAR1XBASE -\t\t%#018llx\n", u.v64);
354
355 u.v64 = ioread64(mmio + GEN3_EMBAR2XBASE_OFFSET);
356 off += scnprintf(buf + off, buf_size - off,
357 "EMBAR2XBASE -\t\t%#018llx\n", u.v64);
358
359 u.v64 = ioread64(mmio + GEN3_EMBAR1XLMT_OFFSET);
360 off += scnprintf(buf + off, buf_size - off,
361 "EMBAR1XLMT -\t\t%#018llx\n", u.v64);
362
363 u.v64 = ioread64(mmio + GEN3_EMBAR2XLMT_OFFSET);
364 off += scnprintf(buf + off, buf_size - off,
365 "EMBAR2XLMT -\t\t%#018llx\n", u.v64);
366
367 off += scnprintf(buf + off, buf_size - off,
368 "\nNTB Secondary BAR:\n");
369
370 u.v64 = ioread64(mmio + GEN3_EMBAR0_OFFSET);
371 off += scnprintf(buf + off, buf_size - off,
372 "EMBAR0 -\t\t%#018llx\n", u.v64);
373
374 u.v64 = ioread64(mmio + GEN3_EMBAR1_OFFSET);
375 off += scnprintf(buf + off, buf_size - off,
376 "EMBAR1 -\t\t%#018llx\n", u.v64);
377
378 u.v64 = ioread64(mmio + GEN3_EMBAR2_OFFSET);
379 off += scnprintf(buf + off, buf_size - off,
380 "EMBAR2 -\t\t%#018llx\n", u.v64);
381 }
382
383 off += scnprintf(buf + off, buf_size - off,
384 "\nNTB Statistics:\n");
385
386 u.v16 = ioread16(mmio + GEN3_USMEMMISS_OFFSET);
387 off += scnprintf(buf + off, buf_size - off,
388 "Upstream Memory Miss -\t%u\n", u.v16);
389
390 off += scnprintf(buf + off, buf_size - off,
391 "\nNTB Hardware Errors:\n");
392
393 if (!pci_read_config_word(ndev->ntb.pdev,
394 GEN3_DEVSTS_OFFSET, &u.v16))
395 off += scnprintf(buf + off, buf_size - off,
396 "DEVSTS -\t\t%#06x\n", u.v16);
397
398 if (!pci_read_config_word(ndev->ntb.pdev,
399 GEN3_LINK_STATUS_OFFSET, &u.v16))
400 off += scnprintf(buf + off, buf_size - off,
401 "LNKSTS -\t\t%#06x\n", u.v16);
402
403 if (!pci_read_config_dword(ndev->ntb.pdev,
404 GEN3_UNCERRSTS_OFFSET, &u.v32))
405 off += scnprintf(buf + off, buf_size - off,
406 "UNCERRSTS -\t\t%#06x\n", u.v32);
407
408 if (!pci_read_config_dword(ndev->ntb.pdev,
409 GEN3_CORERRSTS_OFFSET, &u.v32))
410 off += scnprintf(buf + off, buf_size - off,
411 "CORERRSTS -\t\t%#06x\n", u.v32);
412
413 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
414 kfree(buf);
415 return ret;
416 }
417
intel_ntb3_link_enable(struct ntb_dev * ntb,enum ntb_speed max_speed,enum ntb_width max_width)418 int intel_ntb3_link_enable(struct ntb_dev *ntb, enum ntb_speed max_speed,
419 enum ntb_width max_width)
420 {
421 struct intel_ntb_dev *ndev;
422 u32 ntb_ctl;
423
424 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
425
426 dev_dbg(&ntb->pdev->dev,
427 "Enabling link with max_speed %d max_width %d\n",
428 max_speed, max_width);
429
430 if (max_speed != NTB_SPEED_AUTO)
431 dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
432 if (max_width != NTB_WIDTH_AUTO)
433 dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
434
435 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
436 ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
437 ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
438 ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
439 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
440
441 return 0;
442 }
intel_ntb3_mw_set_trans(struct ntb_dev * ntb,int pidx,int idx,dma_addr_t addr,resource_size_t size)443 static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
444 dma_addr_t addr, resource_size_t size)
445 {
446 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
447 unsigned long xlat_reg, limit_reg;
448 resource_size_t bar_size, mw_size;
449 void __iomem *mmio;
450 u64 base, limit, reg_val;
451 int bar;
452
453 if (pidx != NTB_DEF_PEER_IDX)
454 return -EINVAL;
455
456 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
457 idx += 1;
458
459 bar = ndev_mw_to_bar(ndev, idx);
460 if (bar < 0)
461 return bar;
462
463 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
464
465 if (idx == ndev->b2b_idx)
466 mw_size = bar_size - ndev->b2b_off;
467 else
468 mw_size = bar_size;
469
470 /* hardware requires that addr is aligned to bar size */
471 if (addr & (bar_size - 1))
472 return -EINVAL;
473
474 /* make sure the range fits in the usable mw size */
475 if (size > mw_size)
476 return -EINVAL;
477
478 mmio = ndev->self_mmio;
479 xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
480 limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
481 base = pci_resource_start(ndev->ntb.pdev, bar);
482
483 /* Set the limit if supported, if size is not mw_size */
484 if (limit_reg && size != mw_size)
485 limit = base + size;
486 else
487 limit = base + mw_size;
488
489 /* set and verify setting the translation address */
490 iowrite64(addr, mmio + xlat_reg);
491 reg_val = ioread64(mmio + xlat_reg);
492 if (reg_val != addr) {
493 iowrite64(0, mmio + xlat_reg);
494 return -EIO;
495 }
496
497 dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
498
499 /* set and verify setting the limit */
500 iowrite64(limit, mmio + limit_reg);
501 reg_val = ioread64(mmio + limit_reg);
502 if (reg_val != limit) {
503 iowrite64(base, mmio + limit_reg);
504 iowrite64(0, mmio + xlat_reg);
505 return -EIO;
506 }
507
508 dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
509
510 /* setup the EP */
511 limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000;
512 base = ioread64(mmio + GEN3_EMBAR1_OFFSET + (8 * idx));
513 base &= ~0xf;
514
515 if (limit_reg && size != mw_size)
516 limit = base + size;
517 else
518 limit = base + mw_size;
519
520 /* set and verify setting the limit */
521 iowrite64(limit, mmio + limit_reg);
522 reg_val = ioread64(mmio + limit_reg);
523 if (reg_val != limit) {
524 iowrite64(base, mmio + limit_reg);
525 iowrite64(0, mmio + xlat_reg);
526 return -EIO;
527 }
528
529 dev_dbg(&ntb->pdev->dev, "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
530
531 return 0;
532 }
533
intel_ntb3_peer_db_addr(struct ntb_dev * ntb,phys_addr_t * db_addr,resource_size_t * db_size,u64 * db_data,int db_bit)534 int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
535 resource_size_t *db_size,
536 u64 *db_data, int db_bit)
537 {
538 phys_addr_t db_addr_base;
539 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
540
541 if (unlikely(db_bit >= BITS_PER_LONG_LONG))
542 return -EINVAL;
543
544 if (unlikely(BIT_ULL(db_bit) & ~ntb_ndev(ntb)->db_valid_mask))
545 return -EINVAL;
546
547 ndev_db_addr(ndev, &db_addr_base, db_size, ndev->peer_addr,
548 ndev->peer_reg->db_bell);
549
550 if (db_addr) {
551 *db_addr = db_addr_base + (db_bit * 4);
552 dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx db bit %d\n",
553 *db_addr, db_bit);
554 }
555
556 if (db_data) {
557 *db_data = 1;
558 dev_dbg(&ndev->ntb.pdev->dev, "Peer db data %llx db bit %d\n",
559 *db_data, db_bit);
560 }
561
562 return 0;
563 }
564
intel_ntb3_peer_db_set(struct ntb_dev * ntb,u64 db_bits)565 int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
566 {
567 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
568 int bit;
569
570 if (db_bits & ~ndev->db_valid_mask)
571 return -EINVAL;
572
573 while (db_bits) {
574 bit = __ffs(db_bits);
575 iowrite32(1, ndev->peer_mmio +
576 ndev->peer_reg->db_bell + (bit * 4));
577 db_bits &= db_bits - 1;
578 }
579
580 return 0;
581 }
582
intel_ntb3_db_read(struct ntb_dev * ntb)583 u64 intel_ntb3_db_read(struct ntb_dev *ntb)
584 {
585 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
586
587 return ndev_db_read(ndev,
588 ndev->self_mmio +
589 ndev->self_reg->db_clear);
590 }
591
intel_ntb3_db_clear(struct ntb_dev * ntb,u64 db_bits)592 int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
593 {
594 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
595
596 return ndev_db_write(ndev, db_bits,
597 ndev->self_mmio +
598 ndev->self_reg->db_clear);
599 }
600
601 const struct ntb_dev_ops intel_ntb3_ops = {
602 .mw_count = intel_ntb_mw_count,
603 .mw_get_align = intel_ntb_mw_get_align,
604 .mw_set_trans = intel_ntb3_mw_set_trans,
605 .peer_mw_count = intel_ntb_peer_mw_count,
606 .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
607 .link_is_up = intel_ntb_link_is_up,
608 .link_enable = intel_ntb3_link_enable,
609 .link_disable = intel_ntb_link_disable,
610 .db_valid_mask = intel_ntb_db_valid_mask,
611 .db_vector_count = intel_ntb_db_vector_count,
612 .db_vector_mask = intel_ntb_db_vector_mask,
613 .db_read = intel_ntb3_db_read,
614 .db_clear = intel_ntb3_db_clear,
615 .db_set_mask = intel_ntb_db_set_mask,
616 .db_clear_mask = intel_ntb_db_clear_mask,
617 .peer_db_addr = intel_ntb3_peer_db_addr,
618 .peer_db_set = intel_ntb3_peer_db_set,
619 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
620 .spad_count = intel_ntb_spad_count,
621 .spad_read = intel_ntb_spad_read,
622 .spad_write = intel_ntb_spad_write,
623 .peer_spad_addr = intel_ntb_peer_spad_addr,
624 .peer_spad_read = intel_ntb_peer_spad_read,
625 .peer_spad_write = intel_ntb_peer_spad_write,
626 };
627
628