1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7 #include <linux/crc32.h>
8 #include <linux/etherdevice.h>
9 #include "qed.h"
10 #include "qed_sriov.h"
11 #include "qed_vf.h"
12
qed_vf_pf_prep(struct qed_hwfn * p_hwfn,u16 type,u16 length)13 static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
14 {
15 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
16 void *p_tlv;
17
18 /* This lock is released when we receive PF's response
19 * in qed_send_msg2pf().
20 * So, qed_vf_pf_prep() and qed_send_msg2pf()
21 * must come in sequence.
22 */
23 mutex_lock(&(p_iov->mutex));
24
25 DP_VERBOSE(p_hwfn,
26 QED_MSG_IOV,
27 "preparing to send 0x%04x tlv over vf pf channel\n",
28 type);
29
30 /* Reset Request offset */
31 p_iov->offset = (u8 *)p_iov->vf2pf_request;
32
33 /* Clear mailbox - both request and reply */
34 memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
35 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
36
37 /* Init type and length */
38 p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length);
39
40 /* Init first tlv header */
41 ((struct vfpf_first_tlv *)p_tlv)->reply_address =
42 (u64)p_iov->pf2vf_reply_phys;
43
44 return p_tlv;
45 }
46
qed_vf_pf_req_end(struct qed_hwfn * p_hwfn,int req_status)47 static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
48 {
49 union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
50
51 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
52 "VF request status = 0x%x, PF reply status = 0x%x\n",
53 req_status, resp->default_resp.hdr.status);
54
55 mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
56 }
57
58 #define QED_VF_CHANNEL_USLEEP_ITERATIONS 90
59 #define QED_VF_CHANNEL_USLEEP_DELAY 100
60 #define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
61 #define QED_VF_CHANNEL_MSLEEP_DELAY 25
62
qed_send_msg2pf(struct qed_hwfn * p_hwfn,u8 * done,u32 resp_size)63 static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
64 {
65 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
66 struct ustorm_trigger_vf_zone trigger;
67 struct ustorm_vf_zone *zone_data;
68 int iter, rc = 0;
69
70 zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
71
72 /* output tlvs list */
73 qed_dp_tlv_list(p_hwfn, p_req);
74
75 /* need to add the END TLV to the message size */
76 resp_size += sizeof(struct channel_list_end_tlv);
77
78 /* Send TLVs over HW channel */
79 memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
80 trigger.vf_pf_msg_valid = 1;
81
82 DP_VERBOSE(p_hwfn,
83 QED_MSG_IOV,
84 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
85 GET_FIELD(p_hwfn->hw_info.concrete_fid,
86 PXP_CONCRETE_FID_PFID),
87 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
88 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
89 &zone_data->non_trigger.vf_pf_msg_addr,
90 *((u32 *)&trigger), &zone_data->trigger);
91
92 REG_WR(p_hwfn,
93 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
94 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
95
96 REG_WR(p_hwfn,
97 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
98 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
99
100 /* The message data must be written first, to prevent trigger before
101 * data is written.
102 */
103 wmb();
104
105 REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
106
107 /* When PF would be done with the response, it would write back to the
108 * `done' address from a coherent DMA zone. Poll until then.
109 */
110
111 iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
112 while (!*done && iter--) {
113 udelay(QED_VF_CHANNEL_USLEEP_DELAY);
114 dma_rmb();
115 }
116
117 iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
118 while (!*done && iter--) {
119 msleep(QED_VF_CHANNEL_MSLEEP_DELAY);
120 dma_rmb();
121 }
122
123 if (!*done) {
124 DP_NOTICE(p_hwfn,
125 "VF <-- PF Timeout [Type %d]\n",
126 p_req->first_tlv.tl.type);
127 rc = -EBUSY;
128 } else {
129 if ((*done != PFVF_STATUS_SUCCESS) &&
130 (*done != PFVF_STATUS_NO_RESOURCE))
131 DP_NOTICE(p_hwfn,
132 "PF response: %d [Type %d]\n",
133 *done, p_req->first_tlv.tl.type);
134 else
135 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
136 "PF response: %d [Type %d]\n",
137 *done, p_req->first_tlv.tl.type);
138 }
139
140 return rc;
141 }
142
qed_vf_pf_add_qid(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid)143 static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn,
144 struct qed_queue_cid *p_cid)
145 {
146 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
147 struct vfpf_qid_tlv *p_qid_tlv;
148
149 /* Only add QIDs for the queue if it was negotiated with PF */
150 if (!(p_iov->acquire_resp.pfdev_info.capabilities &
151 PFVF_ACQUIRE_CAP_QUEUE_QIDS))
152 return;
153
154 p_qid_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
155 CHANNEL_TLV_QID, sizeof(*p_qid_tlv));
156 p_qid_tlv->qid = p_cid->qid_usage_idx;
157 }
158
_qed_vf_pf_release(struct qed_hwfn * p_hwfn,bool b_final)159 static int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
160 {
161 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
162 struct pfvf_def_resp_tlv *resp;
163 struct vfpf_first_tlv *req;
164 u32 size;
165 int rc;
166
167 /* clear mailbox and prep first tlv */
168 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
169
170 /* add list termination tlv */
171 qed_add_tlv(p_hwfn, &p_iov->offset,
172 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
173
174 resp = &p_iov->pf2vf_reply->default_resp;
175 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
176
177 if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
178 rc = -EAGAIN;
179
180 qed_vf_pf_req_end(p_hwfn, rc);
181 if (!b_final)
182 return rc;
183
184 p_hwfn->b_int_enabled = 0;
185
186 if (p_iov->vf2pf_request)
187 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
188 sizeof(union vfpf_tlvs),
189 p_iov->vf2pf_request,
190 p_iov->vf2pf_request_phys);
191 if (p_iov->pf2vf_reply)
192 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
193 sizeof(union pfvf_tlvs),
194 p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
195
196 if (p_iov->bulletin.p_virt) {
197 size = sizeof(struct qed_bulletin_content);
198 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
199 size,
200 p_iov->bulletin.p_virt, p_iov->bulletin.phys);
201 }
202
203 kfree(p_hwfn->vf_iov_info);
204 p_hwfn->vf_iov_info = NULL;
205
206 return rc;
207 }
208
qed_vf_pf_release(struct qed_hwfn * p_hwfn)209 int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
210 {
211 return _qed_vf_pf_release(p_hwfn, true);
212 }
213
214 #define VF_ACQUIRE_THRESH 3
qed_vf_pf_acquire_reduce_resc(struct qed_hwfn * p_hwfn,struct vf_pf_resc_request * p_req,struct pf_vf_resc * p_resp)215 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
216 struct vf_pf_resc_request *p_req,
217 struct pf_vf_resc *p_resp)
218 {
219 DP_VERBOSE(p_hwfn,
220 QED_MSG_IOV,
221 "PF unwilling to fulfill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
222 p_req->num_rxqs,
223 p_resp->num_rxqs,
224 p_req->num_rxqs,
225 p_resp->num_txqs,
226 p_req->num_sbs,
227 p_resp->num_sbs,
228 p_req->num_mac_filters,
229 p_resp->num_mac_filters,
230 p_req->num_vlan_filters,
231 p_resp->num_vlan_filters,
232 p_req->num_mc_filters,
233 p_resp->num_mc_filters, p_req->num_cids, p_resp->num_cids);
234
235 /* humble our request */
236 p_req->num_txqs = p_resp->num_txqs;
237 p_req->num_rxqs = p_resp->num_rxqs;
238 p_req->num_sbs = p_resp->num_sbs;
239 p_req->num_mac_filters = p_resp->num_mac_filters;
240 p_req->num_vlan_filters = p_resp->num_vlan_filters;
241 p_req->num_mc_filters = p_resp->num_mc_filters;
242 p_req->num_cids = p_resp->num_cids;
243 }
244
qed_vf_pf_acquire(struct qed_hwfn * p_hwfn)245 static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
246 {
247 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
248 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
249 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
250 struct vf_pf_resc_request *p_resc;
251 u8 retry_cnt = VF_ACQUIRE_THRESH;
252 bool resources_acquired = false;
253 struct vfpf_acquire_tlv *req;
254 int rc = 0, attempts = 0;
255
256 /* clear mailbox and prep first tlv */
257 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
258 p_resc = &req->resc_request;
259
260 /* starting filling the request */
261 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
262
263 p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF;
264 p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF;
265 p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
266 p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
267 p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
268 p_resc->num_cids = QED_ETH_VF_DEFAULT_NUM_CIDS;
269
270 req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
271 req->vfdev_info.fw_major = FW_MAJOR_VERSION;
272 req->vfdev_info.fw_minor = FW_MINOR_VERSION;
273 req->vfdev_info.fw_revision = FW_REVISION_VERSION;
274 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
275 req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
276 req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
277
278 /* Fill capability field with any non-deprecated config we support */
279 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
280
281 /* If we've mapped the doorbell bar, try using queue qids */
282 if (p_iov->b_doorbell_bar) {
283 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR |
284 VFPF_ACQUIRE_CAP_QUEUE_QIDS;
285 p_resc->num_cids = QED_ETH_VF_MAX_NUM_CIDS;
286 }
287
288 /* pf 2 vf bulletin board address */
289 req->bulletin_addr = p_iov->bulletin.phys;
290 req->bulletin_size = p_iov->bulletin.size;
291
292 /* add list termination tlv */
293 qed_add_tlv(p_hwfn, &p_iov->offset,
294 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
295
296 while (!resources_acquired) {
297 DP_VERBOSE(p_hwfn,
298 QED_MSG_IOV, "attempting to acquire resources\n");
299
300 /* Clear response buffer, as this might be a re-send */
301 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
302
303 /* send acquire request */
304 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
305
306 /* Re-try acquire in case of vf-pf hw channel timeout */
307 if (retry_cnt && rc == -EBUSY) {
308 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
309 "VF retrying to acquire due to VPC timeout\n");
310 retry_cnt--;
311 continue;
312 }
313
314 if (rc)
315 goto exit;
316
317 /* copy acquire response from buffer to p_hwfn */
318 memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
319
320 attempts++;
321
322 if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
323 /* PF agrees to allocate our resources */
324 if (!(resp->pfdev_info.capabilities &
325 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
326 /* It's possible legacy PF mistakenly accepted;
327 * but we don't care - simply mark it as
328 * legacy and continue.
329 */
330 req->vfdev_info.capabilities |=
331 VFPF_ACQUIRE_CAP_PRE_FP_HSI;
332 }
333 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
334 resources_acquired = true;
335 } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
336 attempts < VF_ACQUIRE_THRESH) {
337 qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
338 &resp->resc);
339 } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
340 if (pfdev_info->major_fp_hsi &&
341 (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
342 DP_NOTICE(p_hwfn,
343 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
344 pfdev_info->major_fp_hsi,
345 pfdev_info->minor_fp_hsi,
346 ETH_HSI_VER_MAJOR,
347 ETH_HSI_VER_MINOR,
348 pfdev_info->major_fp_hsi);
349 rc = -EINVAL;
350 goto exit;
351 }
352
353 if (!pfdev_info->major_fp_hsi) {
354 if (req->vfdev_info.capabilities &
355 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
356 DP_NOTICE(p_hwfn,
357 "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n");
358 rc = -EINVAL;
359 goto exit;
360 } else {
361 DP_INFO(p_hwfn,
362 "PF is old - try re-acquire to see if it supports FW-version override\n");
363 req->vfdev_info.capabilities |=
364 VFPF_ACQUIRE_CAP_PRE_FP_HSI;
365 continue;
366 }
367 }
368
369 /* If PF/VF are using same Major, PF must have had
370 * it's reasons. Simply fail.
371 */
372 DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n");
373 rc = -EINVAL;
374 goto exit;
375 } else {
376 DP_ERR(p_hwfn,
377 "PF returned error %d to VF acquisition request\n",
378 resp->hdr.status);
379 rc = -EAGAIN;
380 goto exit;
381 }
382 }
383
384 /* Mark the PF as legacy, if needed */
385 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
386 p_iov->b_pre_fp_hsi = true;
387
388 /* In case PF doesn't support multi-queue Tx, update the number of
389 * CIDs to reflect the number of queues [older PFs didn't fill that
390 * field].
391 */
392 if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_QUEUE_QIDS))
393 resp->resc.num_cids = resp->resc.num_rxqs + resp->resc.num_txqs;
394
395 /* Update bulletin board size with response from PF */
396 p_iov->bulletin.size = resp->bulletin_size;
397
398 /* get HW info */
399 p_hwfn->cdev->type = resp->pfdev_info.dev_type;
400 p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev;
401
402 p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff;
403
404 /* Learn of the possibility of CMT */
405 if (IS_LEAD_HWFN(p_hwfn)) {
406 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
407 DP_NOTICE(p_hwfn, "100g VF\n");
408 p_hwfn->cdev->num_hwfns = 2;
409 }
410 }
411
412 if (!p_iov->b_pre_fp_hsi &&
413 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
414 DP_INFO(p_hwfn,
415 "PF is using older fastpath HSI; %02x.%02x is configured\n",
416 ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
417 }
418
419 exit:
420 qed_vf_pf_req_end(p_hwfn, rc);
421
422 return rc;
423 }
424
qed_vf_hw_bar_size(struct qed_hwfn * p_hwfn,enum BAR_ID bar_id)425 u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
426 {
427 u32 bar_size;
428
429 /* Regview size is fixed */
430 if (bar_id == BAR_ID_0)
431 return 1 << 17;
432
433 /* Doorbell is received from PF */
434 bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size;
435 if (bar_size)
436 return 1 << bar_size;
437 return 0;
438 }
439
qed_vf_hw_prepare(struct qed_hwfn * p_hwfn)440 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
441 {
442 struct qed_hwfn *p_lead = QED_LEADING_HWFN(p_hwfn->cdev);
443 struct qed_vf_iov *p_iov;
444 u32 reg;
445 int rc;
446
447 /* Set number of hwfns - might be overridden once leading hwfn learns
448 * actual configuration from PF.
449 */
450 if (IS_LEAD_HWFN(p_hwfn))
451 p_hwfn->cdev->num_hwfns = 1;
452
453 reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
454 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
455
456 reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
457 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
458
459 /* Allocate vf sriov info */
460 p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
461 if (!p_iov)
462 return -ENOMEM;
463
464 /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell
465 * value, but there are several incompatibily scenarios where that
466 * would be incorrect and we'd need to override it.
467 */
468 if (!p_hwfn->doorbells) {
469 p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
470 PXP_VF_BAR0_START_DQ;
471 } else if (p_hwfn == p_lead) {
472 /* For leading hw-function, value is always correct, but need
473 * to handle scenario where legacy PF would not support 100g
474 * mapped bars later.
475 */
476 p_iov->b_doorbell_bar = true;
477 } else {
478 /* here, value would be correct ONLY if the leading hwfn
479 * received indication that mapped-bars are supported.
480 */
481 if (p_lead->vf_iov_info->b_doorbell_bar)
482 p_iov->b_doorbell_bar = true;
483 else
484 p_hwfn->doorbells = (u8 __iomem *)
485 p_hwfn->regview + PXP_VF_BAR0_START_DQ;
486 }
487
488 /* Allocate vf2pf msg */
489 p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
490 sizeof(union vfpf_tlvs),
491 &p_iov->vf2pf_request_phys,
492 GFP_KERNEL);
493 if (!p_iov->vf2pf_request)
494 goto free_p_iov;
495
496 p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
497 sizeof(union pfvf_tlvs),
498 &p_iov->pf2vf_reply_phys,
499 GFP_KERNEL);
500 if (!p_iov->pf2vf_reply)
501 goto free_vf2pf_request;
502
503 DP_VERBOSE(p_hwfn,
504 QED_MSG_IOV,
505 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
506 p_iov->vf2pf_request,
507 (u64)p_iov->vf2pf_request_phys,
508 p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
509
510 /* Allocate Bulletin board */
511 p_iov->bulletin.size = sizeof(struct qed_bulletin_content);
512 p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
513 p_iov->bulletin.size,
514 &p_iov->bulletin.phys,
515 GFP_KERNEL);
516 if (!p_iov->bulletin.p_virt)
517 goto free_pf2vf_reply;
518
519 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
520 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
521 p_iov->bulletin.p_virt,
522 (u64)p_iov->bulletin.phys, p_iov->bulletin.size);
523
524 mutex_init(&p_iov->mutex);
525
526 p_hwfn->vf_iov_info = p_iov;
527
528 p_hwfn->hw_info.personality = QED_PCI_ETH;
529
530 rc = qed_vf_pf_acquire(p_hwfn);
531
532 /* If VF is 100g using a mapped bar and PF is too old to support that,
533 * acquisition would succeed - but the VF would have no way knowing
534 * the size of the doorbell bar configured in HW and thus will not
535 * know how to split it for 2nd hw-function.
536 * In this case we re-try without the indication of the mapped
537 * doorbell.
538 */
539 if (!rc && p_iov->b_doorbell_bar &&
540 !qed_vf_hw_bar_size(p_hwfn, BAR_ID_1) &&
541 (p_hwfn->cdev->num_hwfns > 1)) {
542 rc = _qed_vf_pf_release(p_hwfn, false);
543 if (rc)
544 return rc;
545
546 p_iov->b_doorbell_bar = false;
547 p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
548 PXP_VF_BAR0_START_DQ;
549 rc = qed_vf_pf_acquire(p_hwfn);
550 }
551
552 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
553 "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
554 p_hwfn->regview, p_hwfn->doorbells, p_hwfn->cdev->doorbells);
555
556 return rc;
557
558 free_pf2vf_reply:
559 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
560 sizeof(union pfvf_tlvs),
561 p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
562 free_vf2pf_request:
563 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
564 sizeof(union vfpf_tlvs),
565 p_iov->vf2pf_request, p_iov->vf2pf_request_phys);
566 free_p_iov:
567 kfree(p_iov);
568
569 return -ENOMEM;
570 }
571
572 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
573 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
574 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
575
576 static void
__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv * p_req,struct qed_tunn_update_type * p_src,enum qed_tunn_mode mask,u8 * p_cls)577 __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
578 struct qed_tunn_update_type *p_src,
579 enum qed_tunn_mode mask, u8 *p_cls)
580 {
581 if (p_src->b_update_mode) {
582 p_req->tun_mode_update_mask |= BIT(mask);
583
584 if (p_src->b_mode_enabled)
585 p_req->tunn_mode |= BIT(mask);
586 }
587
588 *p_cls = p_src->tun_cls;
589 }
590
591 static void
qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv * p_req,struct qed_tunn_update_type * p_src,enum qed_tunn_mode mask,u8 * p_cls,struct qed_tunn_update_udp_port * p_port,u8 * p_update_port,u16 * p_udp_port)592 qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
593 struct qed_tunn_update_type *p_src,
594 enum qed_tunn_mode mask,
595 u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
596 u8 *p_update_port, u16 *p_udp_port)
597 {
598 if (p_port->b_update_port) {
599 *p_update_port = 1;
600 *p_udp_port = p_port->port;
601 }
602
603 __qed_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls);
604 }
605
qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info * p_tun)606 void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
607 {
608 if (p_tun->vxlan.b_mode_enabled)
609 p_tun->vxlan.b_update_mode = true;
610 if (p_tun->l2_geneve.b_mode_enabled)
611 p_tun->l2_geneve.b_update_mode = true;
612 if (p_tun->ip_geneve.b_mode_enabled)
613 p_tun->ip_geneve.b_update_mode = true;
614 if (p_tun->l2_gre.b_mode_enabled)
615 p_tun->l2_gre.b_update_mode = true;
616 if (p_tun->ip_gre.b_mode_enabled)
617 p_tun->ip_gre.b_update_mode = true;
618
619 p_tun->b_update_rx_cls = true;
620 p_tun->b_update_tx_cls = true;
621 }
622
623 static void
__qed_vf_update_tunn_param(struct qed_tunn_update_type * p_tun,u16 feature_mask,u8 tunn_mode,u8 tunn_cls,enum qed_tunn_mode val)624 __qed_vf_update_tunn_param(struct qed_tunn_update_type *p_tun,
625 u16 feature_mask, u8 tunn_mode,
626 u8 tunn_cls, enum qed_tunn_mode val)
627 {
628 if (feature_mask & BIT(val)) {
629 p_tun->b_mode_enabled = tunn_mode;
630 p_tun->tun_cls = tunn_cls;
631 } else {
632 p_tun->b_mode_enabled = false;
633 }
634 }
635
qed_vf_update_tunn_param(struct qed_hwfn * p_hwfn,struct qed_tunnel_info * p_tun,struct pfvf_update_tunn_param_tlv * p_resp)636 static void qed_vf_update_tunn_param(struct qed_hwfn *p_hwfn,
637 struct qed_tunnel_info *p_tun,
638 struct pfvf_update_tunn_param_tlv *p_resp)
639 {
640 /* Update mode and classes provided by PF */
641 u16 feat_mask = p_resp->tunn_feature_mask;
642
643 __qed_vf_update_tunn_param(&p_tun->vxlan, feat_mask,
644 p_resp->vxlan_mode, p_resp->vxlan_clss,
645 QED_MODE_VXLAN_TUNN);
646 __qed_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask,
647 p_resp->l2geneve_mode,
648 p_resp->l2geneve_clss,
649 QED_MODE_L2GENEVE_TUNN);
650 __qed_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask,
651 p_resp->ipgeneve_mode,
652 p_resp->ipgeneve_clss,
653 QED_MODE_IPGENEVE_TUNN);
654 __qed_vf_update_tunn_param(&p_tun->l2_gre, feat_mask,
655 p_resp->l2gre_mode, p_resp->l2gre_clss,
656 QED_MODE_L2GRE_TUNN);
657 __qed_vf_update_tunn_param(&p_tun->ip_gre, feat_mask,
658 p_resp->ipgre_mode, p_resp->ipgre_clss,
659 QED_MODE_IPGRE_TUNN);
660 p_tun->geneve_port.port = p_resp->geneve_udp_port;
661 p_tun->vxlan_port.port = p_resp->vxlan_udp_port;
662
663 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
664 "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
665 p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled,
666 p_tun->ip_geneve.b_mode_enabled,
667 p_tun->l2_gre.b_mode_enabled, p_tun->ip_gre.b_mode_enabled);
668 }
669
qed_vf_pf_tunnel_param_update(struct qed_hwfn * p_hwfn,struct qed_tunnel_info * p_src)670 int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
671 struct qed_tunnel_info *p_src)
672 {
673 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
674 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
675 struct pfvf_update_tunn_param_tlv *p_resp;
676 struct vfpf_update_tunn_param_tlv *p_req;
677 int rc;
678
679 p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM,
680 sizeof(*p_req));
681
682 if (p_src->b_update_rx_cls && p_src->b_update_tx_cls)
683 p_req->update_tun_cls = 1;
684
685 qed_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, QED_MODE_VXLAN_TUNN,
686 &p_req->vxlan_clss, &p_src->vxlan_port,
687 &p_req->update_vxlan_port,
688 &p_req->vxlan_port);
689 qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve,
690 QED_MODE_L2GENEVE_TUNN,
691 &p_req->l2geneve_clss, &p_src->geneve_port,
692 &p_req->update_geneve_port,
693 &p_req->geneve_port);
694 __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve,
695 QED_MODE_IPGENEVE_TUNN,
696 &p_req->ipgeneve_clss);
697 __qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre,
698 QED_MODE_L2GRE_TUNN, &p_req->l2gre_clss);
699 __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre,
700 QED_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
701
702 /* add list termination tlv */
703 qed_add_tlv(p_hwfn, &p_iov->offset,
704 CHANNEL_TLV_LIST_END,
705 sizeof(struct channel_list_end_tlv));
706
707 p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
708 rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
709
710 if (rc)
711 goto exit;
712
713 if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) {
714 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
715 "Failed to update tunnel parameters\n");
716 rc = -EINVAL;
717 }
718
719 qed_vf_update_tunn_param(p_hwfn, p_tun, p_resp);
720 exit:
721 qed_vf_pf_req_end(p_hwfn, rc);
722 return rc;
723 }
724
725 int
qed_vf_pf_rxq_start(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,u16 bd_max_bytes,dma_addr_t bd_chain_phys_addr,dma_addr_t cqe_pbl_addr,u16 cqe_pbl_size,void __iomem ** pp_prod)726 qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
727 struct qed_queue_cid *p_cid,
728 u16 bd_max_bytes,
729 dma_addr_t bd_chain_phys_addr,
730 dma_addr_t cqe_pbl_addr,
731 u16 cqe_pbl_size, void __iomem **pp_prod)
732 {
733 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
734 struct pfvf_start_queue_resp_tlv *resp;
735 struct vfpf_start_rxq_tlv *req;
736 u8 rx_qid = p_cid->rel.queue_id;
737 int rc;
738
739 /* clear mailbox and prep first tlv */
740 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
741
742 req->rx_qid = rx_qid;
743 req->cqe_pbl_addr = cqe_pbl_addr;
744 req->cqe_pbl_size = cqe_pbl_size;
745 req->rxq_addr = bd_chain_phys_addr;
746 req->hw_sb = p_cid->sb_igu_id;
747 req->sb_index = p_cid->sb_idx;
748 req->bd_max_bytes = bd_max_bytes;
749 req->stat_id = -1;
750
751 /* If PF is legacy, we'll need to calculate producers ourselves
752 * as well as clean them.
753 */
754 if (p_iov->b_pre_fp_hsi) {
755 u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
756 u32 init_prod_val = 0;
757
758 *pp_prod = (u8 __iomem *)
759 p_hwfn->regview +
760 MSTORM_QZONE_START(p_hwfn->cdev) +
761 hw_qid * MSTORM_QZONE_SIZE;
762
763 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
764 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
765 (u32 *)(&init_prod_val));
766 }
767
768 qed_vf_pf_add_qid(p_hwfn, p_cid);
769
770 /* add list termination tlv */
771 qed_add_tlv(p_hwfn, &p_iov->offset,
772 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
773
774 resp = &p_iov->pf2vf_reply->queue_start;
775 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
776 if (rc)
777 goto exit;
778
779 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
780 rc = -EINVAL;
781 goto exit;
782 }
783
784 /* Learn the address of the producer from the response */
785 if (!p_iov->b_pre_fp_hsi) {
786 u32 init_prod_val = 0;
787
788 *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
789 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
790 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
791 rx_qid, *pp_prod, resp->offset);
792
793 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
794 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
795 (u32 *)&init_prod_val);
796 }
797 exit:
798 qed_vf_pf_req_end(p_hwfn, rc);
799
800 return rc;
801 }
802
qed_vf_pf_rxq_stop(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,bool cqe_completion)803 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
804 struct qed_queue_cid *p_cid, bool cqe_completion)
805 {
806 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
807 struct vfpf_stop_rxqs_tlv *req;
808 struct pfvf_def_resp_tlv *resp;
809 int rc;
810
811 /* clear mailbox and prep first tlv */
812 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
813
814 req->rx_qid = p_cid->rel.queue_id;
815 req->num_rxqs = 1;
816 req->cqe_completion = cqe_completion;
817
818 qed_vf_pf_add_qid(p_hwfn, p_cid);
819
820 /* add list termination tlv */
821 qed_add_tlv(p_hwfn, &p_iov->offset,
822 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
823
824 resp = &p_iov->pf2vf_reply->default_resp;
825 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
826 if (rc)
827 goto exit;
828
829 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
830 rc = -EINVAL;
831 goto exit;
832 }
833
834 exit:
835 qed_vf_pf_req_end(p_hwfn, rc);
836
837 return rc;
838 }
839
840 int
qed_vf_pf_txq_start(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,dma_addr_t pbl_addr,u16 pbl_size,void __iomem ** pp_doorbell)841 qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
842 struct qed_queue_cid *p_cid,
843 dma_addr_t pbl_addr,
844 u16 pbl_size, void __iomem **pp_doorbell)
845 {
846 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
847 struct pfvf_start_queue_resp_tlv *resp;
848 struct vfpf_start_txq_tlv *req;
849 u16 qid = p_cid->rel.queue_id;
850 int rc;
851
852 /* clear mailbox and prep first tlv */
853 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
854
855 req->tx_qid = qid;
856
857 /* Tx */
858 req->pbl_addr = pbl_addr;
859 req->pbl_size = pbl_size;
860 req->hw_sb = p_cid->sb_igu_id;
861 req->sb_index = p_cid->sb_idx;
862
863 qed_vf_pf_add_qid(p_hwfn, p_cid);
864
865 /* add list termination tlv */
866 qed_add_tlv(p_hwfn, &p_iov->offset,
867 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
868
869 resp = &p_iov->pf2vf_reply->queue_start;
870 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
871 if (rc)
872 goto exit;
873
874 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
875 rc = -EINVAL;
876 goto exit;
877 }
878
879 /* Modern PFs provide the actual offsets, while legacy
880 * provided only the queue id.
881 */
882 if (!p_iov->b_pre_fp_hsi) {
883 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
884 } else {
885 u8 cid = p_iov->acquire_resp.resc.cid[qid];
886
887 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
888 qed_db_addr_vf(cid,
889 DQ_DEMS_LEGACY);
890 }
891
892 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
893 "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n",
894 qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset);
895 exit:
896 qed_vf_pf_req_end(p_hwfn, rc);
897
898 return rc;
899 }
900
qed_vf_pf_txq_stop(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid)901 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
902 {
903 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
904 struct vfpf_stop_txqs_tlv *req;
905 struct pfvf_def_resp_tlv *resp;
906 int rc;
907
908 /* clear mailbox and prep first tlv */
909 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
910
911 req->tx_qid = p_cid->rel.queue_id;
912 req->num_txqs = 1;
913
914 qed_vf_pf_add_qid(p_hwfn, p_cid);
915
916 /* add list termination tlv */
917 qed_add_tlv(p_hwfn, &p_iov->offset,
918 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
919
920 resp = &p_iov->pf2vf_reply->default_resp;
921 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
922 if (rc)
923 goto exit;
924
925 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
926 rc = -EINVAL;
927 goto exit;
928 }
929
930 exit:
931 qed_vf_pf_req_end(p_hwfn, rc);
932
933 return rc;
934 }
935
qed_vf_pf_vport_start(struct qed_hwfn * p_hwfn,u8 vport_id,u16 mtu,u8 inner_vlan_removal,enum qed_tpa_mode tpa_mode,u8 max_buffers_per_cqe,u8 only_untagged)936 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
937 u8 vport_id,
938 u16 mtu,
939 u8 inner_vlan_removal,
940 enum qed_tpa_mode tpa_mode,
941 u8 max_buffers_per_cqe, u8 only_untagged)
942 {
943 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
944 struct vfpf_vport_start_tlv *req;
945 struct pfvf_def_resp_tlv *resp;
946 int rc, i;
947
948 /* clear mailbox and prep first tlv */
949 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
950
951 req->mtu = mtu;
952 req->vport_id = vport_id;
953 req->inner_vlan_removal = inner_vlan_removal;
954 req->tpa_mode = tpa_mode;
955 req->max_buffers_per_cqe = max_buffers_per_cqe;
956 req->only_untagged = only_untagged;
957
958 /* status blocks */
959 for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) {
960 struct qed_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i];
961
962 if (p_sb)
963 req->sb_addr[i] = p_sb->sb_phys;
964 }
965
966 /* add list termination tlv */
967 qed_add_tlv(p_hwfn, &p_iov->offset,
968 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
969
970 resp = &p_iov->pf2vf_reply->default_resp;
971 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
972 if (rc)
973 goto exit;
974
975 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
976 rc = -EINVAL;
977 goto exit;
978 }
979
980 exit:
981 qed_vf_pf_req_end(p_hwfn, rc);
982
983 return rc;
984 }
985
qed_vf_pf_vport_stop(struct qed_hwfn * p_hwfn)986 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
987 {
988 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
989 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
990 int rc;
991
992 /* clear mailbox and prep first tlv */
993 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
994 sizeof(struct vfpf_first_tlv));
995
996 /* add list termination tlv */
997 qed_add_tlv(p_hwfn, &p_iov->offset,
998 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
999
1000 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1001 if (rc)
1002 goto exit;
1003
1004 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1005 rc = -EINVAL;
1006 goto exit;
1007 }
1008
1009 exit:
1010 qed_vf_pf_req_end(p_hwfn, rc);
1011
1012 return rc;
1013 }
1014
1015 static bool
qed_vf_handle_vp_update_is_needed(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_data,u16 tlv)1016 qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn,
1017 struct qed_sp_vport_update_params *p_data,
1018 u16 tlv)
1019 {
1020 switch (tlv) {
1021 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
1022 return !!(p_data->update_vport_active_rx_flg ||
1023 p_data->update_vport_active_tx_flg);
1024 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
1025 return !!p_data->update_tx_switching_flg;
1026 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
1027 return !!p_data->update_inner_vlan_removal_flg;
1028 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
1029 return !!p_data->update_accept_any_vlan_flg;
1030 case CHANNEL_TLV_VPORT_UPDATE_MCAST:
1031 return !!p_data->update_approx_mcast_flg;
1032 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
1033 return !!(p_data->accept_flags.update_rx_mode_config ||
1034 p_data->accept_flags.update_tx_mode_config);
1035 case CHANNEL_TLV_VPORT_UPDATE_RSS:
1036 return !!p_data->rss_params;
1037 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
1038 return !!p_data->sge_tpa_params;
1039 default:
1040 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n",
1041 tlv);
1042 return false;
1043 }
1044 }
1045
1046 static void
qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_data)1047 qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn,
1048 struct qed_sp_vport_update_params *p_data)
1049 {
1050 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1051 struct pfvf_def_resp_tlv *p_resp;
1052 u16 tlv;
1053
1054 for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1055 tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) {
1056 if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
1057 continue;
1058
1059 p_resp = (struct pfvf_def_resp_tlv *)
1060 qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply,
1061 tlv);
1062 if (p_resp && p_resp->hdr.status)
1063 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1064 "TLV[%d] Configuration %s\n",
1065 tlv,
1066 (p_resp && p_resp->hdr.status) ? "succeeded"
1067 : "failed");
1068 }
1069 }
1070
qed_vf_pf_vport_update(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_params)1071 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
1072 struct qed_sp_vport_update_params *p_params)
1073 {
1074 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1075 struct vfpf_vport_update_tlv *req;
1076 struct pfvf_def_resp_tlv *resp;
1077 u8 update_rx, update_tx;
1078 u32 resp_size = 0;
1079 u16 size, tlv;
1080 int rc;
1081
1082 resp = &p_iov->pf2vf_reply->default_resp;
1083 resp_size = sizeof(*resp);
1084
1085 update_rx = p_params->update_vport_active_rx_flg;
1086 update_tx = p_params->update_vport_active_tx_flg;
1087
1088 /* clear mailbox and prep header tlv */
1089 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
1090
1091 /* Prepare extended tlvs */
1092 if (update_rx || update_tx) {
1093 struct vfpf_vport_update_activate_tlv *p_act_tlv;
1094
1095 size = sizeof(struct vfpf_vport_update_activate_tlv);
1096 p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
1097 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
1098 size);
1099 resp_size += sizeof(struct pfvf_def_resp_tlv);
1100
1101 if (update_rx) {
1102 p_act_tlv->update_rx = update_rx;
1103 p_act_tlv->active_rx = p_params->vport_active_rx_flg;
1104 }
1105
1106 if (update_tx) {
1107 p_act_tlv->update_tx = update_tx;
1108 p_act_tlv->active_tx = p_params->vport_active_tx_flg;
1109 }
1110 }
1111
1112 if (p_params->update_tx_switching_flg) {
1113 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
1114
1115 size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
1116 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1117 p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
1118 tlv, size);
1119 resp_size += sizeof(struct pfvf_def_resp_tlv);
1120
1121 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
1122 }
1123
1124 if (p_params->update_approx_mcast_flg) {
1125 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
1126
1127 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
1128 p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
1129 CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
1130 resp_size += sizeof(struct pfvf_def_resp_tlv);
1131
1132 memcpy(p_mcast_tlv->bins, p_params->bins,
1133 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1134 }
1135
1136 update_rx = p_params->accept_flags.update_rx_mode_config;
1137 update_tx = p_params->accept_flags.update_tx_mode_config;
1138
1139 if (update_rx || update_tx) {
1140 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
1141
1142 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1143 size = sizeof(struct vfpf_vport_update_accept_param_tlv);
1144 p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
1145 resp_size += sizeof(struct pfvf_def_resp_tlv);
1146
1147 if (update_rx) {
1148 p_accept_tlv->update_rx_mode = update_rx;
1149 p_accept_tlv->rx_accept_filter =
1150 p_params->accept_flags.rx_accept_filter;
1151 }
1152
1153 if (update_tx) {
1154 p_accept_tlv->update_tx_mode = update_tx;
1155 p_accept_tlv->tx_accept_filter =
1156 p_params->accept_flags.tx_accept_filter;
1157 }
1158 }
1159
1160 if (p_params->rss_params) {
1161 struct qed_rss_params *rss_params = p_params->rss_params;
1162 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
1163 int i, table_size;
1164
1165 size = sizeof(struct vfpf_vport_update_rss_tlv);
1166 p_rss_tlv = qed_add_tlv(p_hwfn,
1167 &p_iov->offset,
1168 CHANNEL_TLV_VPORT_UPDATE_RSS, size);
1169 resp_size += sizeof(struct pfvf_def_resp_tlv);
1170
1171 if (rss_params->update_rss_config)
1172 p_rss_tlv->update_rss_flags |=
1173 VFPF_UPDATE_RSS_CONFIG_FLAG;
1174 if (rss_params->update_rss_capabilities)
1175 p_rss_tlv->update_rss_flags |=
1176 VFPF_UPDATE_RSS_CAPS_FLAG;
1177 if (rss_params->update_rss_ind_table)
1178 p_rss_tlv->update_rss_flags |=
1179 VFPF_UPDATE_RSS_IND_TABLE_FLAG;
1180 if (rss_params->update_rss_key)
1181 p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
1182
1183 p_rss_tlv->rss_enable = rss_params->rss_enable;
1184 p_rss_tlv->rss_caps = rss_params->rss_caps;
1185 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
1186
1187 table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE,
1188 1 << p_rss_tlv->rss_table_size_log);
1189 for (i = 0; i < table_size; i++) {
1190 struct qed_queue_cid *p_queue;
1191
1192 p_queue = rss_params->rss_ind_table[i];
1193 p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
1194 }
1195 memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
1196 sizeof(rss_params->rss_key));
1197 }
1198
1199 if (p_params->update_accept_any_vlan_flg) {
1200 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
1201
1202 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
1203 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1204 p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
1205
1206 resp_size += sizeof(struct pfvf_def_resp_tlv);
1207 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
1208 p_any_vlan_tlv->update_accept_any_vlan_flg =
1209 p_params->update_accept_any_vlan_flg;
1210 }
1211
1212 /* add list termination tlv */
1213 qed_add_tlv(p_hwfn, &p_iov->offset,
1214 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
1215
1216 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
1217 if (rc)
1218 goto exit;
1219
1220 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1221 rc = -EINVAL;
1222 goto exit;
1223 }
1224
1225 qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
1226
1227 exit:
1228 qed_vf_pf_req_end(p_hwfn, rc);
1229
1230 return rc;
1231 }
1232
qed_vf_pf_reset(struct qed_hwfn * p_hwfn)1233 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
1234 {
1235 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1236 struct pfvf_def_resp_tlv *resp;
1237 struct vfpf_first_tlv *req;
1238 int rc;
1239
1240 /* clear mailbox and prep first tlv */
1241 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
1242
1243 /* add list termination tlv */
1244 qed_add_tlv(p_hwfn, &p_iov->offset,
1245 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
1246
1247 resp = &p_iov->pf2vf_reply->default_resp;
1248 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1249 if (rc)
1250 goto exit;
1251
1252 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1253 rc = -EAGAIN;
1254 goto exit;
1255 }
1256
1257 p_hwfn->b_int_enabled = 0;
1258
1259 exit:
1260 qed_vf_pf_req_end(p_hwfn, rc);
1261
1262 return rc;
1263 }
1264
qed_vf_pf_filter_mcast(struct qed_hwfn * p_hwfn,struct qed_filter_mcast * p_filter_cmd)1265 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
1266 struct qed_filter_mcast *p_filter_cmd)
1267 {
1268 struct qed_sp_vport_update_params sp_params;
1269 int i;
1270
1271 memset(&sp_params, 0, sizeof(sp_params));
1272 sp_params.update_approx_mcast_flg = 1;
1273
1274 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1275 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1276 u32 bit;
1277
1278 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1279 sp_params.bins[bit / 32] |= 1 << (bit % 32);
1280 }
1281 }
1282
1283 qed_vf_pf_vport_update(p_hwfn, &sp_params);
1284 }
1285
qed_vf_pf_filter_ucast(struct qed_hwfn * p_hwfn,struct qed_filter_ucast * p_ucast)1286 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
1287 struct qed_filter_ucast *p_ucast)
1288 {
1289 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1290 struct vfpf_ucast_filter_tlv *req;
1291 struct pfvf_def_resp_tlv *resp;
1292 int rc;
1293
1294 /* clear mailbox and prep first tlv */
1295 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
1296 req->opcode = (u8)p_ucast->opcode;
1297 req->type = (u8)p_ucast->type;
1298 memcpy(req->mac, p_ucast->mac, ETH_ALEN);
1299 req->vlan = p_ucast->vlan;
1300
1301 /* add list termination tlv */
1302 qed_add_tlv(p_hwfn, &p_iov->offset,
1303 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
1304
1305 resp = &p_iov->pf2vf_reply->default_resp;
1306 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1307 if (rc)
1308 goto exit;
1309
1310 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1311 rc = -EAGAIN;
1312 goto exit;
1313 }
1314
1315 exit:
1316 qed_vf_pf_req_end(p_hwfn, rc);
1317
1318 return rc;
1319 }
1320
qed_vf_pf_int_cleanup(struct qed_hwfn * p_hwfn)1321 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
1322 {
1323 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1324 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
1325 int rc;
1326
1327 /* clear mailbox and prep first tlv */
1328 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
1329 sizeof(struct vfpf_first_tlv));
1330
1331 /* add list termination tlv */
1332 qed_add_tlv(p_hwfn, &p_iov->offset,
1333 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
1334
1335 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1336 if (rc)
1337 goto exit;
1338
1339 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1340 rc = -EINVAL;
1341 goto exit;
1342 }
1343
1344 exit:
1345 qed_vf_pf_req_end(p_hwfn, rc);
1346
1347 return rc;
1348 }
1349
qed_vf_pf_get_coalesce(struct qed_hwfn * p_hwfn,u16 * p_coal,struct qed_queue_cid * p_cid)1350 int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
1351 u16 *p_coal, struct qed_queue_cid *p_cid)
1352 {
1353 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1354 struct pfvf_read_coal_resp_tlv *resp;
1355 struct vfpf_read_coal_req_tlv *req;
1356 int rc;
1357
1358 /* clear mailbox and prep header tlv */
1359 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, sizeof(*req));
1360 req->qid = p_cid->rel.queue_id;
1361 req->is_rx = p_cid->b_is_rx ? 1 : 0;
1362
1363 qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
1364 sizeof(struct channel_list_end_tlv));
1365 resp = &p_iov->pf2vf_reply->read_coal_resp;
1366
1367 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1368 if (rc)
1369 goto exit;
1370
1371 if (resp->hdr.status != PFVF_STATUS_SUCCESS)
1372 goto exit;
1373
1374 *p_coal = resp->coal;
1375 exit:
1376 qed_vf_pf_req_end(p_hwfn, rc);
1377
1378 return rc;
1379 }
1380
1381 int
qed_vf_pf_bulletin_update_mac(struct qed_hwfn * p_hwfn,const u8 * p_mac)1382 qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
1383 const u8 *p_mac)
1384 {
1385 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1386 struct vfpf_bulletin_update_mac_tlv *p_req;
1387 struct pfvf_def_resp_tlv *p_resp;
1388 int rc;
1389
1390 if (!p_mac)
1391 return -EINVAL;
1392
1393 /* clear mailbox and prep header tlv */
1394 p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_BULLETIN_UPDATE_MAC,
1395 sizeof(*p_req));
1396 ether_addr_copy(p_req->mac, p_mac);
1397 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1398 "Requesting bulletin update for MAC[%pM]\n", p_mac);
1399
1400 /* add list termination tlv */
1401 qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
1402 sizeof(struct channel_list_end_tlv));
1403
1404 p_resp = &p_iov->pf2vf_reply->default_resp;
1405 rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
1406 qed_vf_pf_req_end(p_hwfn, rc);
1407 return rc;
1408 }
1409
1410 int
qed_vf_pf_set_coalesce(struct qed_hwfn * p_hwfn,u16 rx_coal,u16 tx_coal,struct qed_queue_cid * p_cid)1411 qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
1412 u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid)
1413 {
1414 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1415 struct vfpf_update_coalesce *req;
1416 struct pfvf_def_resp_tlv *resp;
1417 int rc;
1418
1419 /* clear mailbox and prep header tlv */
1420 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, sizeof(*req));
1421
1422 req->rx_coal = rx_coal;
1423 req->tx_coal = tx_coal;
1424 req->qid = p_cid->rel.queue_id;
1425
1426 DP_VERBOSE(p_hwfn,
1427 QED_MSG_IOV,
1428 "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n",
1429 rx_coal, tx_coal, req->qid);
1430
1431 /* add list termination tlv */
1432 qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
1433 sizeof(struct channel_list_end_tlv));
1434
1435 resp = &p_iov->pf2vf_reply->default_resp;
1436 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1437 if (rc)
1438 goto exit;
1439
1440 if (resp->hdr.status != PFVF_STATUS_SUCCESS)
1441 goto exit;
1442
1443 if (rx_coal)
1444 p_hwfn->cdev->rx_coalesce_usecs = rx_coal;
1445
1446 if (tx_coal)
1447 p_hwfn->cdev->tx_coalesce_usecs = tx_coal;
1448
1449 exit:
1450 qed_vf_pf_req_end(p_hwfn, rc);
1451 return rc;
1452 }
1453
qed_vf_get_igu_sb_id(struct qed_hwfn * p_hwfn,u16 sb_id)1454 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
1455 {
1456 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1457
1458 if (!p_iov) {
1459 DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
1460 return 0;
1461 }
1462
1463 return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
1464 }
1465
qed_vf_set_sb_info(struct qed_hwfn * p_hwfn,u16 sb_id,struct qed_sb_info * p_sb)1466 void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
1467 u16 sb_id, struct qed_sb_info *p_sb)
1468 {
1469 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1470
1471 if (!p_iov) {
1472 DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
1473 return;
1474 }
1475
1476 if (sb_id >= PFVF_MAX_SBS_PER_VF) {
1477 DP_NOTICE(p_hwfn, "Can't configure SB %04x\n", sb_id);
1478 return;
1479 }
1480
1481 p_iov->sbs_info[sb_id] = p_sb;
1482 }
1483
qed_vf_read_bulletin(struct qed_hwfn * p_hwfn,u8 * p_change)1484 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
1485 {
1486 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1487 struct qed_bulletin_content shadow;
1488 u32 crc, crc_size;
1489
1490 crc_size = sizeof(p_iov->bulletin.p_virt->crc);
1491 *p_change = 0;
1492
1493 /* Need to guarantee PF is not in the middle of writing it */
1494 memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
1495
1496 /* If version did not update, no need to do anything */
1497 if (shadow.version == p_iov->bulletin_shadow.version)
1498 return 0;
1499
1500 /* Verify the bulletin we see is valid */
1501 crc = crc32(0, (u8 *)&shadow + crc_size,
1502 p_iov->bulletin.size - crc_size);
1503 if (crc != shadow.crc)
1504 return -EAGAIN;
1505
1506 /* Set the shadow bulletin and process it */
1507 memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
1508
1509 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1510 "Read a bulletin update %08x\n", shadow.version);
1511
1512 *p_change = 1;
1513
1514 return 0;
1515 }
1516
__qed_vf_get_link_params(struct qed_hwfn * p_hwfn,struct qed_mcp_link_params * p_params,struct qed_bulletin_content * p_bulletin)1517 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1518 struct qed_mcp_link_params *p_params,
1519 struct qed_bulletin_content *p_bulletin)
1520 {
1521 memset(p_params, 0, sizeof(*p_params));
1522
1523 p_params->speed.autoneg = p_bulletin->req_autoneg;
1524 p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
1525 p_params->speed.forced_speed = p_bulletin->req_forced_speed;
1526 p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
1527 p_params->pause.forced_rx = p_bulletin->req_forced_rx;
1528 p_params->pause.forced_tx = p_bulletin->req_forced_tx;
1529 p_params->loopback_mode = p_bulletin->req_loopback;
1530 }
1531
qed_vf_get_link_params(struct qed_hwfn * p_hwfn,struct qed_mcp_link_params * params)1532 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1533 struct qed_mcp_link_params *params)
1534 {
1535 __qed_vf_get_link_params(p_hwfn, params,
1536 &(p_hwfn->vf_iov_info->bulletin_shadow));
1537 }
1538
__qed_vf_get_link_state(struct qed_hwfn * p_hwfn,struct qed_mcp_link_state * p_link,struct qed_bulletin_content * p_bulletin)1539 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1540 struct qed_mcp_link_state *p_link,
1541 struct qed_bulletin_content *p_bulletin)
1542 {
1543 memset(p_link, 0, sizeof(*p_link));
1544
1545 p_link->link_up = p_bulletin->link_up;
1546 p_link->speed = p_bulletin->speed;
1547 p_link->full_duplex = p_bulletin->full_duplex;
1548 p_link->an = p_bulletin->autoneg;
1549 p_link->an_complete = p_bulletin->autoneg_complete;
1550 p_link->parallel_detection = p_bulletin->parallel_detection;
1551 p_link->pfc_enabled = p_bulletin->pfc_enabled;
1552 p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
1553 p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
1554 p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
1555 p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
1556 p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
1557 }
1558
qed_vf_get_link_state(struct qed_hwfn * p_hwfn,struct qed_mcp_link_state * link)1559 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1560 struct qed_mcp_link_state *link)
1561 {
1562 __qed_vf_get_link_state(p_hwfn, link,
1563 &(p_hwfn->vf_iov_info->bulletin_shadow));
1564 }
1565
__qed_vf_get_link_caps(struct qed_hwfn * p_hwfn,struct qed_mcp_link_capabilities * p_link_caps,struct qed_bulletin_content * p_bulletin)1566 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1567 struct qed_mcp_link_capabilities *p_link_caps,
1568 struct qed_bulletin_content *p_bulletin)
1569 {
1570 memset(p_link_caps, 0, sizeof(*p_link_caps));
1571 p_link_caps->speed_capabilities = p_bulletin->capability_speed;
1572 }
1573
qed_vf_get_link_caps(struct qed_hwfn * p_hwfn,struct qed_mcp_link_capabilities * p_link_caps)1574 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1575 struct qed_mcp_link_capabilities *p_link_caps)
1576 {
1577 __qed_vf_get_link_caps(p_hwfn, p_link_caps,
1578 &(p_hwfn->vf_iov_info->bulletin_shadow));
1579 }
1580
qed_vf_get_num_rxqs(struct qed_hwfn * p_hwfn,u8 * num_rxqs)1581 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
1582 {
1583 *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
1584 }
1585
qed_vf_get_num_txqs(struct qed_hwfn * p_hwfn,u8 * num_txqs)1586 void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs)
1587 {
1588 *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs;
1589 }
1590
qed_vf_get_num_cids(struct qed_hwfn * p_hwfn,u8 * num_cids)1591 void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids)
1592 {
1593 *num_cids = p_hwfn->vf_iov_info->acquire_resp.resc.num_cids;
1594 }
1595
qed_vf_get_port_mac(struct qed_hwfn * p_hwfn,u8 * port_mac)1596 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
1597 {
1598 memcpy(port_mac,
1599 p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN);
1600 }
1601
qed_vf_get_num_vlan_filters(struct qed_hwfn * p_hwfn,u8 * num_vlan_filters)1602 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
1603 {
1604 struct qed_vf_iov *p_vf;
1605
1606 p_vf = p_hwfn->vf_iov_info;
1607 *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
1608 }
1609
qed_vf_get_num_mac_filters(struct qed_hwfn * p_hwfn,u8 * num_mac_filters)1610 void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters)
1611 {
1612 struct qed_vf_iov *p_vf = p_hwfn->vf_iov_info;
1613
1614 *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters;
1615 }
1616
qed_vf_check_mac(struct qed_hwfn * p_hwfn,u8 * mac)1617 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
1618 {
1619 struct qed_bulletin_content *bulletin;
1620
1621 bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
1622 if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
1623 return true;
1624
1625 /* Forbid VF from changing a MAC enforced by PF */
1626 if (ether_addr_equal(bulletin->mac, mac))
1627 return false;
1628
1629 return false;
1630 }
1631
qed_vf_bulletin_get_forced_mac(struct qed_hwfn * hwfn,u8 * dst_mac,u8 * p_is_forced)1632 static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
1633 u8 *dst_mac, u8 *p_is_forced)
1634 {
1635 struct qed_bulletin_content *bulletin;
1636
1637 bulletin = &hwfn->vf_iov_info->bulletin_shadow;
1638
1639 if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
1640 if (p_is_forced)
1641 *p_is_forced = 1;
1642 } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
1643 if (p_is_forced)
1644 *p_is_forced = 0;
1645 } else {
1646 return false;
1647 }
1648
1649 ether_addr_copy(dst_mac, bulletin->mac);
1650
1651 return true;
1652 }
1653
1654 static void
qed_vf_bulletin_get_udp_ports(struct qed_hwfn * p_hwfn,u16 * p_vxlan_port,u16 * p_geneve_port)1655 qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn,
1656 u16 *p_vxlan_port, u16 *p_geneve_port)
1657 {
1658 struct qed_bulletin_content *p_bulletin;
1659
1660 p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
1661
1662 *p_vxlan_port = p_bulletin->vxlan_udp_port;
1663 *p_geneve_port = p_bulletin->geneve_udp_port;
1664 }
1665
qed_vf_get_fw_version(struct qed_hwfn * p_hwfn,u16 * fw_major,u16 * fw_minor,u16 * fw_rev,u16 * fw_eng)1666 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
1667 u16 *fw_major, u16 *fw_minor,
1668 u16 *fw_rev, u16 *fw_eng)
1669 {
1670 struct pf_vf_pfdev_info *info;
1671
1672 info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
1673
1674 *fw_major = info->fw_major;
1675 *fw_minor = info->fw_minor;
1676 *fw_rev = info->fw_rev;
1677 *fw_eng = info->fw_eng;
1678 }
1679
qed_handle_bulletin_change(struct qed_hwfn * hwfn)1680 static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
1681 {
1682 struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
1683 u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
1684 void *cookie = hwfn->cdev->ops_cookie;
1685 u16 vxlan_port, geneve_port;
1686
1687 qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port);
1688 is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
1689 &is_mac_forced);
1690 if (is_mac_exist && cookie)
1691 ops->force_mac(cookie, mac, !!is_mac_forced);
1692
1693 ops->ports_update(cookie, vxlan_port, geneve_port);
1694
1695 /* Always update link configuration according to bulletin */
1696 qed_link_update(hwfn, NULL);
1697 }
1698
qed_iov_vf_task(struct work_struct * work)1699 void qed_iov_vf_task(struct work_struct *work)
1700 {
1701 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1702 iov_task.work);
1703 u8 change = 0;
1704
1705 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
1706 return;
1707
1708 /* Handle bulletin board changes */
1709 qed_vf_read_bulletin(hwfn, &change);
1710 if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
1711 &hwfn->iov_task_flags))
1712 change = 1;
1713 if (change)
1714 qed_handle_bulletin_change(hwfn);
1715
1716 /* As VF is polling bulletin board, need to constantly re-schedule */
1717 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ);
1718 }
1719