Lines Matching refs:f

56 static int set_tcb_field(struct adapter *adap, struct filter_entry *f,  in set_tcb_field()  argument
75 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3); in set_tcb_field()
82 static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f, in set_tcb_tflag() argument
86 return set_tcb_field(adap, f, ftid, TCB_T_FLAGS_W, 1ULL << bit_pos, in set_tcb_tflag()
120 static void mk_set_tcb_ulp(struct filter_entry *f, in mk_set_tcb_ulp() argument
132 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid)); in mk_set_tcb_ulp()
143 static int configure_filter_smac(struct adapter *adap, struct filter_entry *f) in configure_filter_smac() argument
148 err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W, in configure_filter_smac()
150 TCB_SMAC_SEL_V(f->smt->idx), 1); in configure_filter_smac()
154 err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1); in configure_filter_smac()
160 f->tid, err); in configure_filter_smac()
164 static void set_nat_params(struct adapter *adap, struct filter_entry *f, in set_nat_params() argument
168 u8 *nat_lp = (u8 *)&f->fs.nat_lport; in set_nat_params()
169 u8 *nat_fp = (u8 *)&f->fs.nat_fport; in set_nat_params()
172 if (f->fs.type) { in set_nat_params()
173 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W, in set_nat_params()
174 WORD_MASK, f->fs.nat_lip[15] | in set_nat_params()
175 f->fs.nat_lip[14] << 8 | in set_nat_params()
176 f->fs.nat_lip[13] << 16 | in set_nat_params()
177 (u64)f->fs.nat_lip[12] << 24, 1); in set_nat_params()
179 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1, in set_nat_params()
180 WORD_MASK, f->fs.nat_lip[11] | in set_nat_params()
181 f->fs.nat_lip[10] << 8 | in set_nat_params()
182 f->fs.nat_lip[9] << 16 | in set_nat_params()
183 (u64)f->fs.nat_lip[8] << 24, 1); in set_nat_params()
185 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2, in set_nat_params()
186 WORD_MASK, f->fs.nat_lip[7] | in set_nat_params()
187 f->fs.nat_lip[6] << 8 | in set_nat_params()
188 f->fs.nat_lip[5] << 16 | in set_nat_params()
189 (u64)f->fs.nat_lip[4] << 24, 1); in set_nat_params()
191 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3, in set_nat_params()
192 WORD_MASK, f->fs.nat_lip[3] | in set_nat_params()
193 f->fs.nat_lip[2] << 8 | in set_nat_params()
194 f->fs.nat_lip[1] << 16 | in set_nat_params()
195 (u64)f->fs.nat_lip[0] << 24, 1); in set_nat_params()
197 set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W, in set_nat_params()
198 WORD_MASK, f->fs.nat_lip[3] | in set_nat_params()
199 f->fs.nat_lip[2] << 8 | in set_nat_params()
200 f->fs.nat_lip[1] << 16 | in set_nat_params()
201 (u64)f->fs.nat_lip[0] << 24, 1); in set_nat_params()
206 if (f->fs.type) { in set_nat_params()
207 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W, in set_nat_params()
208 WORD_MASK, f->fs.nat_fip[15] | in set_nat_params()
209 f->fs.nat_fip[14] << 8 | in set_nat_params()
210 f->fs.nat_fip[13] << 16 | in set_nat_params()
211 (u64)f->fs.nat_fip[12] << 24, 1); in set_nat_params()
213 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1, in set_nat_params()
214 WORD_MASK, f->fs.nat_fip[11] | in set_nat_params()
215 f->fs.nat_fip[10] << 8 | in set_nat_params()
216 f->fs.nat_fip[9] << 16 | in set_nat_params()
217 (u64)f->fs.nat_fip[8] << 24, 1); in set_nat_params()
219 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2, in set_nat_params()
220 WORD_MASK, f->fs.nat_fip[7] | in set_nat_params()
221 f->fs.nat_fip[6] << 8 | in set_nat_params()
222 f->fs.nat_fip[5] << 16 | in set_nat_params()
223 (u64)f->fs.nat_fip[4] << 24, 1); in set_nat_params()
225 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3, in set_nat_params()
226 WORD_MASK, f->fs.nat_fip[3] | in set_nat_params()
227 f->fs.nat_fip[2] << 8 | in set_nat_params()
228 f->fs.nat_fip[1] << 16 | in set_nat_params()
229 (u64)f->fs.nat_fip[0] << 24, 1); in set_nat_params()
232 set_tcb_field(adap, f, tid, in set_nat_params()
234 WORD_MASK, f->fs.nat_fip[3] | in set_nat_params()
235 f->fs.nat_fip[2] << 8 | in set_nat_params()
236 f->fs.nat_fip[1] << 16 | in set_nat_params()
237 (u64)f->fs.nat_fip[0] << 24, 1); in set_nat_params()
241 set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK, in set_nat_params()
362 struct filter_entry *f; in get_filter_count() local
370 f = adapter->tids.tid_tab[fidx - adapter->tids.tid_base]; in get_filter_count()
371 if (!f) in get_filter_count()
380 f = &adapter->tids.hpftid_tab[fidx]; in get_filter_count()
382 f = &adapter->tids.ftid_tab[fidx - in get_filter_count()
384 if (!f->valid) in get_filter_count()
387 tcbaddr = tcb_base + f->tid * TCB_SIZE; in get_filter_count()
552 struct filter_entry *f; in cxgb4_get_free_ftid() local
627 f = &t->hpftid_tab[ftid]; in cxgb4_get_free_ftid()
628 if (f->valid && tc_prio < f->fs.tc_prio) in cxgb4_get_free_ftid()
637 f = &t->ftid_tab[ftid]; in cxgb4_get_free_ftid()
638 if (f->valid && tc_prio > f->fs.tc_prio) in cxgb4_get_free_ftid()
761 struct filter_entry *f; in del_filter_wr() local
766 f = &adapter->tids.hpftid_tab[fidx]; in del_filter_wr()
768 f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids]; in del_filter_wr()
777 t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id); in del_filter_wr()
782 f->pending = 1; in del_filter_wr()
796 struct filter_entry *f; in set_filter_wr() local
800 f = &adapter->tids.hpftid_tab[fidx]; in set_filter_wr()
802 f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids]; in set_filter_wr()
812 if (f->fs.newdmac || f->fs.newvlan) { in set_filter_wr()
814 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan, in set_filter_wr()
815 f->fs.eport, f->fs.dmac); in set_filter_wr()
816 if (!f->l2t) { in set_filter_wr()
825 if (f->fs.newsmac) { in set_filter_wr()
826 f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac); in set_filter_wr()
827 if (!f->smt) { in set_filter_wr()
828 if (f->l2t) { in set_filter_wr()
829 cxgb4_l2t_release(f->l2t); in set_filter_wr()
830 f->l2t = NULL; in set_filter_wr()
853 htonl(FW_FILTER_WR_TID_V(f->tid) | in set_filter_wr()
854 FW_FILTER_WR_RQTYPE_V(f->fs.type) | in set_filter_wr()
856 FW_FILTER_WR_IQ_V(f->fs.iq)); in set_filter_wr()
858 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) | in set_filter_wr()
859 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) | in set_filter_wr()
860 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) | in set_filter_wr()
861 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) | in set_filter_wr()
862 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) | in set_filter_wr()
863 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) | in set_filter_wr()
864 FW_FILTER_WR_DMAC_V(f->fs.newdmac) | in set_filter_wr()
865 FW_FILTER_WR_SMAC_V(f->fs.newsmac) | in set_filter_wr()
866 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT || in set_filter_wr()
867 f->fs.newvlan == VLAN_REWRITE) | in set_filter_wr()
868 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE || in set_filter_wr()
869 f->fs.newvlan == VLAN_REWRITE) | in set_filter_wr()
870 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) | in set_filter_wr()
871 FW_FILTER_WR_TXCHAN_V(f->fs.eport) | in set_filter_wr()
872 FW_FILTER_WR_PRIO_V(f->fs.prio) | in set_filter_wr()
873 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0)); in set_filter_wr()
874 fwr->ethtype = htons(f->fs.val.ethtype); in set_filter_wr()
875 fwr->ethtypem = htons(f->fs.mask.ethtype); in set_filter_wr()
877 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) | in set_filter_wr()
878 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) | in set_filter_wr()
879 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) | in set_filter_wr()
880 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) | in set_filter_wr()
881 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) | in set_filter_wr()
882 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld)); in set_filter_wr()
883 if (f->fs.newsmac) in set_filter_wr()
884 fwr->smac_sel = f->smt->idx; in set_filter_wr()
889 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) | in set_filter_wr()
890 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) | in set_filter_wr()
891 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) | in set_filter_wr()
892 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) | in set_filter_wr()
893 FW_FILTER_WR_PORT_V(f->fs.val.iport) | in set_filter_wr()
894 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) | in set_filter_wr()
895 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) | in set_filter_wr()
896 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype)); in set_filter_wr()
897 fwr->ptcl = f->fs.val.proto; in set_filter_wr()
898 fwr->ptclm = f->fs.mask.proto; in set_filter_wr()
899 fwr->ttyp = f->fs.val.tos; in set_filter_wr()
900 fwr->ttypm = f->fs.mask.tos; in set_filter_wr()
901 fwr->ivlan = htons(f->fs.val.ivlan); in set_filter_wr()
902 fwr->ivlanm = htons(f->fs.mask.ivlan); in set_filter_wr()
903 fwr->ovlan = htons(f->fs.val.ovlan); in set_filter_wr()
904 fwr->ovlanm = htons(f->fs.mask.ovlan); in set_filter_wr()
905 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip)); in set_filter_wr()
906 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm)); in set_filter_wr()
907 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip)); in set_filter_wr()
908 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm)); in set_filter_wr()
909 fwr->lp = htons(f->fs.val.lport); in set_filter_wr()
910 fwr->lpm = htons(f->fs.mask.lport); in set_filter_wr()
911 fwr->fp = htons(f->fs.val.fport); in set_filter_wr()
912 fwr->fpm = htons(f->fs.mask.fport); in set_filter_wr()
915 u8 *nat_lp = (u8 *)&f->fs.nat_lport; in set_filter_wr()
916 u8 *nat_fp = (u8 *)&f->fs.nat_fport; in set_filter_wr()
919 FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ? in set_filter_wr()
922 FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode); in set_filter_wr()
923 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip)); in set_filter_wr()
924 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip)); in set_filter_wr()
932 f->pending = 1; in set_filter_wr()
933 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3); in set_filter_wr()
939 int writable_filter(struct filter_entry *f) in writable_filter() argument
941 if (f->locked) in writable_filter()
943 if (f->pending) in writable_filter()
955 struct filter_entry *f; in delete_filter() local
963 f = &adapter->tids.hpftid_tab[fidx]; in delete_filter()
965 f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids]; in delete_filter()
966 ret = writable_filter(f); in delete_filter()
969 if (f->valid) in delete_filter()
978 void clear_filter(struct adapter *adap, struct filter_entry *f) in clear_filter() argument
980 struct port_info *pi = netdev_priv(f->dev); in clear_filter()
986 if (f->l2t) in clear_filter()
987 cxgb4_l2t_release(f->l2t); in clear_filter()
989 if (f->smt) in clear_filter()
990 cxgb4_smt_release(f->smt); in clear_filter()
992 if (f->fs.val.encap_vld && f->fs.val.ovlan_vld) in clear_filter()
994 f->fs.val.ovlan & 0x1ff, 0); in clear_filter()
996 if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type) in clear_filter()
997 cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1); in clear_filter()
1003 memset(f, 0, sizeof(*f)); in clear_filter()
1012 struct filter_entry *f = &adapter->tids.hpftid_tab[0]; in clear_all_filters() local
1014 for (i = 0; i < adapter->tids.nhpftids; i++, f++) in clear_all_filters()
1015 if (f->valid || f->pending) in clear_all_filters()
1016 cxgb4_del_filter(dev, i, &f->fs); in clear_all_filters()
1020 struct filter_entry *f = &adapter->tids.ftid_tab[0]; in clear_all_filters() local
1026 for (i = adapter->tids.nhpftids; i < max_ftid; i++, f++) in clear_all_filters()
1027 if (f->valid || f->pending) in clear_all_filters()
1028 cxgb4_del_filter(dev, i, &f->fs); in clear_all_filters()
1033 struct filter_entry *f; in clear_all_filters() local
1038 f = (struct filter_entry *) in clear_all_filters()
1041 if (f && (f->valid || f->pending)) in clear_all_filters()
1042 cxgb4_del_filter(dev, f->tid, &f->fs); in clear_all_filters()
1047 f = (struct filter_entry *)adapter->tids.tid_tab[i]; in clear_all_filters()
1049 if (f && (f->valid || f->pending)) in clear_all_filters()
1050 cxgb4_del_filter(dev, f->tid, &f->fs); in clear_all_filters()
1294 static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb, in mk_act_open_req6() argument
1304 req->local_port = cpu_to_be16(f->fs.val.lport); in mk_act_open_req6()
1305 req->peer_port = cpu_to_be16(f->fs.val.fport); in mk_act_open_req6()
1306 req->local_ip_hi = *(__be64 *)(&f->fs.val.lip); in mk_act_open_req6()
1307 req->local_ip_lo = *(((__be64 *)&f->fs.val.lip) + 1); in mk_act_open_req6()
1308 req->peer_ip_hi = *(__be64 *)(&f->fs.val.fip); in mk_act_open_req6()
1309 req->peer_ip_lo = *(((__be64 *)&f->fs.val.fip) + 1); in mk_act_open_req6()
1310 req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE || in mk_act_open_req6()
1311 f->fs.newvlan == VLAN_REWRITE) | in mk_act_open_req6()
1312 DELACK_V(f->fs.hitcnts) | in mk_act_open_req6()
1313 L2T_IDX_V(f->l2t ? f->l2t->idx : 0) | in mk_act_open_req6()
1314 SMAC_SEL_V((cxgb4_port_viid(f->dev) & in mk_act_open_req6()
1316 TX_CHAN_V(f->fs.eport) | in mk_act_open_req6()
1317 NO_CONG_V(f->fs.rpttid) | in mk_act_open_req6()
1318 ULP_MODE_V(f->fs.nat_mode ? in mk_act_open_req6()
1321 t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs, in mk_act_open_req6()
1322 f->dev))); in mk_act_open_req6()
1324 RSS_QUEUE_V(f->fs.iq) | in mk_act_open_req6()
1325 TX_QUEUE_V(f->fs.nat_mode) | in mk_act_open_req6()
1327 RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) | in mk_act_open_req6()
1328 PACE_V((f->fs.maskhash) | in mk_act_open_req6()
1329 ((f->fs.dirsteerhash) << 1))); in mk_act_open_req6()
1332 static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb, in mk_act_open_req() argument
1342 req->local_port = cpu_to_be16(f->fs.val.lport); in mk_act_open_req()
1343 req->peer_port = cpu_to_be16(f->fs.val.fport); in mk_act_open_req()
1344 memcpy(&req->local_ip, f->fs.val.lip, 4); in mk_act_open_req()
1345 memcpy(&req->peer_ip, f->fs.val.fip, 4); in mk_act_open_req()
1346 req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE || in mk_act_open_req()
1347 f->fs.newvlan == VLAN_REWRITE) | in mk_act_open_req()
1348 DELACK_V(f->fs.hitcnts) | in mk_act_open_req()
1349 L2T_IDX_V(f->l2t ? f->l2t->idx : 0) | in mk_act_open_req()
1350 SMAC_SEL_V((cxgb4_port_viid(f->dev) & in mk_act_open_req()
1352 TX_CHAN_V(f->fs.eport) | in mk_act_open_req()
1353 NO_CONG_V(f->fs.rpttid) | in mk_act_open_req()
1354 ULP_MODE_V(f->fs.nat_mode ? in mk_act_open_req()
1358 t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs, in mk_act_open_req()
1359 f->dev))); in mk_act_open_req()
1361 RSS_QUEUE_V(f->fs.iq) | in mk_act_open_req()
1362 TX_QUEUE_V(f->fs.nat_mode) | in mk_act_open_req()
1364 RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) | in mk_act_open_req()
1365 PACE_V((f->fs.maskhash) | in mk_act_open_req()
1366 ((f->fs.dirsteerhash) << 1))); in mk_act_open_req()
1376 struct filter_entry *f; in cxgb4_set_hash_filter() local
1391 f = kzalloc(sizeof(*f), GFP_KERNEL); in cxgb4_set_hash_filter()
1392 if (!f) in cxgb4_set_hash_filter()
1395 f->fs = *fs; in cxgb4_set_hash_filter()
1396 f->ctx = ctx; in cxgb4_set_hash_filter()
1397 f->dev = dev; in cxgb4_set_hash_filter()
1398 f->fs.iq = iq; in cxgb4_set_hash_filter()
1404 if (f->fs.newdmac || f->fs.newvlan) { in cxgb4_set_hash_filter()
1406 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan, in cxgb4_set_hash_filter()
1407 f->fs.eport, f->fs.dmac); in cxgb4_set_hash_filter()
1408 if (!f->l2t) { in cxgb4_set_hash_filter()
1417 if (f->fs.newsmac) { in cxgb4_set_hash_filter()
1418 f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac); in cxgb4_set_hash_filter()
1419 if (!f->smt) { in cxgb4_set_hash_filter()
1420 if (f->l2t) { in cxgb4_set_hash_filter()
1421 cxgb4_l2t_release(f->l2t); in cxgb4_set_hash_filter()
1422 f->l2t = NULL; in cxgb4_set_hash_filter()
1429 atid = cxgb4_alloc_atid(t, f); in cxgb4_set_hash_filter()
1437 f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf; in cxgb4_set_hash_filter()
1438 f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf; in cxgb4_set_hash_filter()
1439 f->fs.val.ovlan_vld = fs->val.pfvf_vld; in cxgb4_set_hash_filter()
1440 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld; in cxgb4_set_hash_filter()
1442 if (f->fs.val.encap_vld) { in cxgb4_set_hash_filter()
1443 struct port_info *pi = netdev_priv(f->dev); in cxgb4_set_hash_filter()
1450 f->fs.val.vni, in cxgb4_set_hash_filter()
1451 f->fs.mask.vni, in cxgb4_set_hash_filter()
1456 f->fs.val.ovlan = ret; in cxgb4_set_hash_filter()
1457 f->fs.mask.ovlan = 0xffff; in cxgb4_set_hash_filter()
1458 f->fs.val.ovlan_vld = 1; in cxgb4_set_hash_filter()
1459 f->fs.mask.ovlan_vld = 1; in cxgb4_set_hash_filter()
1464 if (f->fs.type) { in cxgb4_set_hash_filter()
1465 ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1); in cxgb4_set_hash_filter()
1475 mk_act_open_req6(f, skb, in cxgb4_set_hash_filter()
1485 mk_act_open_req(f, skb, in cxgb4_set_hash_filter()
1490 f->pending = 1; in cxgb4_set_hash_filter()
1491 set_wr_txq(skb, CPL_PRIORITY_SETUP, f->fs.val.iport & 0x3); in cxgb4_set_hash_filter()
1496 cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1); in cxgb4_set_hash_filter()
1499 if (f->fs.val.encap_vld && f->fs.val.ovlan_vld) in cxgb4_set_hash_filter()
1500 t4_free_encap_mac_filt(adapter, pi->viid, f->fs.val.ovlan, 1); in cxgb4_set_hash_filter()
1506 if (f->smt) { in cxgb4_set_hash_filter()
1507 cxgb4_smt_release(f->smt); in cxgb4_set_hash_filter()
1508 f->smt = NULL; in cxgb4_set_hash_filter()
1512 if (f->l2t) { in cxgb4_set_hash_filter()
1513 cxgb4_l2t_release(f->l2t); in cxgb4_set_hash_filter()
1514 f->l2t = NULL; in cxgb4_set_hash_filter()
1518 kfree(f); in cxgb4_set_hash_filter()
1535 struct filter_entry *f, *tab; in __cxgb4_set_filter() local
1590 f = &tab[fidx]; in __cxgb4_set_filter()
1591 if (f->valid) { in __cxgb4_set_filter()
1614 f = &tab[fidx]; in __cxgb4_set_filter()
1615 if (f->valid) { in __cxgb4_set_filter()
1630 f = &tab[fidx]; in __cxgb4_set_filter()
1631 if (f->valid) { in __cxgb4_set_filter()
1642 f = &tab[filter_id]; in __cxgb4_set_filter()
1643 if (f->valid) in __cxgb4_set_filter()
1661 ret = writable_filter(f); in __cxgb4_set_filter()
1678 f->fs = *fs; in __cxgb4_set_filter()
1679 f->fs.iq = iq; in __cxgb4_set_filter()
1680 f->dev = dev; in __cxgb4_set_filter()
1684 f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf; in __cxgb4_set_filter()
1685 f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf; in __cxgb4_set_filter()
1686 f->fs.val.ovlan_vld = fs->val.pfvf_vld; in __cxgb4_set_filter()
1687 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld; in __cxgb4_set_filter()
1689 if (f->fs.val.encap_vld) { in __cxgb4_set_filter()
1690 struct port_info *pi = netdev_priv(f->dev); in __cxgb4_set_filter()
1697 f->fs.val.vni, in __cxgb4_set_filter()
1698 f->fs.mask.vni, in __cxgb4_set_filter()
1703 f->fs.val.ovlan = ret; in __cxgb4_set_filter()
1704 f->fs.mask.ovlan = 0x1ff; in __cxgb4_set_filter()
1705 f->fs.val.ovlan_vld = 1; in __cxgb4_set_filter()
1706 f->fs.mask.ovlan_vld = 1; in __cxgb4_set_filter()
1713 f->ctx = ctx; in __cxgb4_set_filter()
1714 f->tid = fidx; /* Save the actual tid */ in __cxgb4_set_filter()
1722 if (f->fs.prio) in __cxgb4_set_filter()
1730 clear_filter(adapter, f); in __cxgb4_set_filter()
1744 struct filter_entry *f; in cxgb4_del_hash_filter() local
1755 f = lookup_tid(t, filter_id); in cxgb4_del_hash_filter()
1756 if (!f) { in cxgb4_del_hash_filter()
1762 ret = writable_filter(f); in cxgb4_del_hash_filter()
1766 if (!f->valid) in cxgb4_del_hash_filter()
1769 f->ctx = ctx; in cxgb4_del_hash_filter()
1770 f->pending = 1; in cxgb4_del_hash_filter()
1778 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3); in cxgb4_del_hash_filter()
1784 mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M), in cxgb4_del_hash_filter()
1788 mk_abort_req_ulp(abort_req, f->tid); in cxgb4_del_hash_filter()
1790 mk_abort_rpl_ulp(abort_rpl, f->tid); in cxgb4_del_hash_filter()
1806 struct filter_entry *f; in __cxgb4_del_filter() local
1824 f = &adapter->tids.hpftid_tab[filter_id]; in __cxgb4_del_filter()
1826 f = &adapter->tids.ftid_tab[filter_id - adapter->tids.nhpftids]; in __cxgb4_del_filter()
1828 ret = writable_filter(f); in __cxgb4_del_filter()
1832 if (f->valid) { in __cxgb4_del_filter()
1833 f->ctx = ctx; in __cxgb4_del_filter()
1834 if (f->fs.prio) in __cxgb4_del_filter()
1836 f->tid - adapter->tids.hpftid_base, in __cxgb4_del_filter()
1837 f->fs.type ? PF_INET6 : PF_INET); in __cxgb4_del_filter()
1840 f->tid - adapter->tids.ftid_base, in __cxgb4_del_filter()
1841 f->fs.type ? PF_INET6 : PF_INET, in __cxgb4_del_filter()
1905 struct filter_entry *f) in configure_filter_tcb() argument
1907 if (f->fs.hitcnts) { in configure_filter_tcb()
1908 set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W, in configure_filter_tcb()
1912 set_tcb_field(adap, f, tid, TCB_RTT_TS_RECENT_AGE_W, in configure_filter_tcb()
1918 if (f->fs.newdmac) in configure_filter_tcb()
1919 set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1, in configure_filter_tcb()
1922 if (f->fs.newvlan == VLAN_INSERT || in configure_filter_tcb()
1923 f->fs.newvlan == VLAN_REWRITE) in configure_filter_tcb()
1924 set_tcb_tflag(adap, f, tid, TF_CCTRL_RFR_S, 1, in configure_filter_tcb()
1926 if (f->fs.newsmac) in configure_filter_tcb()
1927 configure_filter_smac(adap, f); in configure_filter_tcb()
1929 if (f->fs.nat_mode) { in configure_filter_tcb()
1930 switch (f->fs.nat_mode) { in configure_filter_tcb()
1932 set_nat_params(adap, f, tid, true, false, false, false); in configure_filter_tcb()
1936 set_nat_params(adap, f, tid, true, false, true, false); in configure_filter_tcb()
1940 set_nat_params(adap, f, tid, true, true, true, false); in configure_filter_tcb()
1943 set_nat_params(adap, f, tid, true, false, true, true); in configure_filter_tcb()
1947 set_nat_params(adap, f, tid, false, true, false, true); in configure_filter_tcb()
1951 set_nat_params(adap, f, tid, true, true, false, true); in configure_filter_tcb()
1955 set_nat_params(adap, f, tid, true, true, true, true); in configure_filter_tcb()
1960 __func__, f->fs.nat_mode); in configure_filter_tcb()
1974 struct filter_entry *f; in hash_del_filter_rpl() local
1979 f = lookup_tid(t, tid); in hash_del_filter_rpl()
1980 if (!f) { in hash_del_filter_rpl()
1985 ctx = f->ctx; in hash_del_filter_rpl()
1986 f->ctx = NULL; in hash_del_filter_rpl()
1987 clear_filter(adap, f); in hash_del_filter_rpl()
1989 kfree(f); in hash_del_filter_rpl()
2003 struct filter_entry *f; in hash_filter_rpl() local
2008 f = lookup_atid(t, ftid); in hash_filter_rpl()
2009 if (!f) { in hash_filter_rpl()
2014 ctx = f->ctx; in hash_filter_rpl()
2015 f->ctx = NULL; in hash_filter_rpl()
2019 f->tid = tid; in hash_filter_rpl()
2020 f->pending = 0; in hash_filter_rpl()
2021 f->valid = 1; in hash_filter_rpl()
2022 cxgb4_insert_tid(t, f, f->tid, 0); in hash_filter_rpl()
2025 ctx->tid = f->tid; in hash_filter_rpl()
2028 if (configure_filter_tcb(adap, tid, f)) { in hash_filter_rpl()
2029 clear_filter(adap, f); in hash_filter_rpl()
2031 kfree(f); in hash_filter_rpl()
2038 switch (f->fs.action) { in hash_filter_rpl()
2040 if (f->fs.dirsteer) in hash_filter_rpl()
2041 set_tcb_tflag(adap, f, tid, in hash_filter_rpl()
2045 set_tcb_tflag(adap, f, tid, TF_DROP_S, 1, 1); in hash_filter_rpl()
2048 set_tcb_tflag(adap, f, tid, TF_LPBK_S, 1, 1); in hash_filter_rpl()
2065 clear_filter(adap, f); in hash_filter_rpl()
2067 kfree(f); in hash_filter_rpl()
2077 struct filter_entry *f = NULL; in filter_rpl() local
2086 f = &adap->tids.hpftid_tab[idx]; in filter_rpl()
2092 f = &adap->tids.ftid_tab[idx]; in filter_rpl()
2096 if (f->tid != tid) in filter_rpl()
2101 if (f) { in filter_rpl()
2108 ctx = f->ctx; in filter_rpl()
2109 f->ctx = NULL; in filter_rpl()
2115 clear_filter(adap, f); in filter_rpl()
2119 f->pending = 0; /* async setup completed */ in filter_rpl()
2120 f->valid = 1; in filter_rpl()
2131 clear_filter(adap, f); in filter_rpl()