1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2021 Hisilicon Limited.
3 
4 #include <linux/skbuff.h>
5 #include "hclge_main.h"
6 #include "hnae3.h"
7 
hclge_ptp_get_cycle(struct hclge_dev * hdev)8 static int hclge_ptp_get_cycle(struct hclge_dev *hdev)
9 {
10 	struct hclge_ptp *ptp = hdev->ptp;
11 
12 	ptp->cycle.quo = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG) &
13 			 HCLGE_PTP_CYCLE_QUO_MASK;
14 	ptp->cycle.numer = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
15 	ptp->cycle.den = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
16 
17 	if (ptp->cycle.den == 0) {
18 		dev_err(&hdev->pdev->dev, "invalid ptp cycle denominator!\n");
19 		return -EINVAL;
20 	}
21 
22 	return 0;
23 }
24 
hclge_ptp_adjfreq(struct ptp_clock_info * ptp,s32 ppb)25 static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
26 {
27 	struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
28 	struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle;
29 	u64 adj_val, adj_base, diff;
30 	unsigned long flags;
31 	bool is_neg = false;
32 	u32 quo, numerator;
33 
34 	if (ppb < 0) {
35 		ppb = -ppb;
36 		is_neg = true;
37 	}
38 
39 	adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer;
40 	adj_val = adj_base * ppb;
41 	diff = div_u64(adj_val, 1000000000ULL);
42 
43 	if (is_neg)
44 		adj_val = adj_base - diff;
45 	else
46 		adj_val = adj_base + diff;
47 
48 	/* This clock cycle is defined by three part: quotient, numerator
49 	 * and denominator. For example, 2.5ns, the quotient is 2,
50 	 * denominator is fixed to ptp->cycle.den, and numerator
51 	 * is 0.5 * ptp->cycle.den.
52 	 */
53 	quo = div_u64_rem(adj_val, cycle->den, &numerator);
54 
55 	spin_lock_irqsave(&hdev->ptp->lock, flags);
56 	writel(quo & HCLGE_PTP_CYCLE_QUO_MASK,
57 	       hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
58 	writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
59 	writel(cycle->den, hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
60 	writel(HCLGE_PTP_CYCLE_ADJ_EN,
61 	       hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG);
62 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
63 
64 	return 0;
65 }
66 
hclge_ptp_set_tx_info(struct hnae3_handle * handle,struct sk_buff * skb)67 bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
68 {
69 	struct hclge_vport *vport = hclge_get_vport(handle);
70 	struct hclge_dev *hdev = vport->back;
71 	struct hclge_ptp *ptp = hdev->ptp;
72 
73 	if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
74 	    test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
75 		ptp->tx_skipped++;
76 		return false;
77 	}
78 
79 	ptp->tx_start = jiffies;
80 	ptp->tx_skb = skb_get(skb);
81 	ptp->tx_cnt++;
82 
83 	return true;
84 }
85 
hclge_ptp_clean_tx_hwts(struct hclge_dev * hdev)86 void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev)
87 {
88 	struct sk_buff *skb = hdev->ptp->tx_skb;
89 	struct skb_shared_hwtstamps hwts;
90 	u32 hi, lo;
91 	u64 ns;
92 
93 	ns = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_NSEC_REG) &
94 	     HCLGE_PTP_TX_TS_NSEC_MASK;
95 	lo = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_L_REG);
96 	hi = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_H_REG) &
97 	     HCLGE_PTP_TX_TS_SEC_H_MASK;
98 	hdev->ptp->last_tx_seqid = readl(hdev->ptp->io_base +
99 		HCLGE_PTP_TX_TS_SEQID_REG);
100 
101 	if (skb) {
102 		hdev->ptp->tx_skb = NULL;
103 		hdev->ptp->tx_cleaned++;
104 
105 		ns += (((u64)hi) << 32 | lo) * NSEC_PER_SEC;
106 		hwts.hwtstamp = ns_to_ktime(ns);
107 		skb_tstamp_tx(skb, &hwts);
108 		dev_kfree_skb_any(skb);
109 	}
110 
111 	clear_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state);
112 }
113 
hclge_ptp_get_rx_hwts(struct hnae3_handle * handle,struct sk_buff * skb,u32 nsec,u32 sec)114 void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
115 			   u32 nsec, u32 sec)
116 {
117 	struct hclge_vport *vport = hclge_get_vport(handle);
118 	struct hclge_dev *hdev = vport->back;
119 	unsigned long flags;
120 	u64 ns = nsec;
121 	u32 sec_h;
122 
123 	if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
124 		return;
125 
126 	/* Since the BD does not have enough space for the higher 16 bits of
127 	 * second, and this part will not change frequently, so read it
128 	 * from register.
129 	 */
130 	spin_lock_irqsave(&hdev->ptp->lock, flags);
131 	sec_h = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG);
132 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
133 
134 	ns += (((u64)sec_h) << HCLGE_PTP_SEC_H_OFFSET | sec) * NSEC_PER_SEC;
135 	skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
136 	hdev->ptp->last_rx = jiffies;
137 	hdev->ptp->rx_cnt++;
138 }
139 
hclge_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)140 static int hclge_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
141 			      struct ptp_system_timestamp *sts)
142 {
143 	struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
144 	unsigned long flags;
145 	u32 hi, lo;
146 	u64 ns;
147 
148 	spin_lock_irqsave(&hdev->ptp->lock, flags);
149 	ns = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_NSEC_REG);
150 	hi = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG);
151 	lo = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_L_REG);
152 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
153 
154 	ns += (((u64)hi) << HCLGE_PTP_SEC_H_OFFSET | lo) * NSEC_PER_SEC;
155 	*ts = ns_to_timespec64(ns);
156 
157 	return 0;
158 }
159 
hclge_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)160 static int hclge_ptp_settime(struct ptp_clock_info *ptp,
161 			     const struct timespec64 *ts)
162 {
163 	struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
164 	unsigned long flags;
165 
166 	spin_lock_irqsave(&hdev->ptp->lock, flags);
167 	writel(ts->tv_nsec, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG);
168 	writel(ts->tv_sec >> HCLGE_PTP_SEC_H_OFFSET,
169 	       hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_H_REG);
170 	writel(ts->tv_sec & HCLGE_PTP_SEC_L_MASK,
171 	       hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_L_REG);
172 	/* synchronize the time of phc */
173 	writel(HCLGE_PTP_TIME_SYNC_EN,
174 	       hdev->ptp->io_base + HCLGE_PTP_TIME_SYNC_REG);
175 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
176 
177 	return 0;
178 }
179 
hclge_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)180 static int hclge_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
181 {
182 	struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
183 	unsigned long flags;
184 	bool is_neg = false;
185 	u32 adj_val = 0;
186 
187 	if (delta < 0) {
188 		adj_val |= HCLGE_PTP_TIME_NSEC_NEG;
189 		delta = -delta;
190 		is_neg = true;
191 	}
192 
193 	if (delta > HCLGE_PTP_TIME_NSEC_MASK) {
194 		struct timespec64 ts;
195 		s64 ns;
196 
197 		hclge_ptp_gettimex(ptp, &ts, NULL);
198 		ns = timespec64_to_ns(&ts);
199 		ns = is_neg ? ns - delta : ns + delta;
200 		ts = ns_to_timespec64(ns);
201 		return hclge_ptp_settime(ptp, &ts);
202 	}
203 
204 	adj_val |= delta & HCLGE_PTP_TIME_NSEC_MASK;
205 
206 	spin_lock_irqsave(&hdev->ptp->lock, flags);
207 	writel(adj_val, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG);
208 	writel(HCLGE_PTP_TIME_ADJ_EN,
209 	       hdev->ptp->io_base + HCLGE_PTP_TIME_ADJ_REG);
210 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
211 
212 	return 0;
213 }
214 
hclge_ptp_get_cfg(struct hclge_dev * hdev,struct ifreq * ifr)215 int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
216 {
217 	if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state))
218 		return -EOPNOTSUPP;
219 
220 	return copy_to_user(ifr->ifr_data, &hdev->ptp->ts_cfg,
221 		sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
222 }
223 
hclge_ptp_int_en(struct hclge_dev * hdev,bool en)224 static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en)
225 {
226 	struct hclge_ptp_int_cmd *req;
227 	struct hclge_desc desc;
228 	int ret;
229 
230 	req = (struct hclge_ptp_int_cmd *)desc.data;
231 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_INT_EN, false);
232 	req->int_en = en ? 1 : 0;
233 
234 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
235 	if (ret)
236 		dev_err(&hdev->pdev->dev,
237 			"failed to %s ptp interrupt, ret = %d\n",
238 			en ? "enable" : "disable", ret);
239 
240 	return ret;
241 }
242 
hclge_ptp_cfg_qry(struct hclge_dev * hdev,u32 * cfg)243 int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg)
244 {
245 	struct hclge_ptp_cfg_cmd *req;
246 	struct hclge_desc desc;
247 	int ret;
248 
249 	req = (struct hclge_ptp_cfg_cmd *)desc.data;
250 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, true);
251 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
252 	if (ret) {
253 		dev_err(&hdev->pdev->dev,
254 			"failed to query ptp config, ret = %d\n", ret);
255 		return ret;
256 	}
257 
258 	*cfg = le32_to_cpu(req->cfg);
259 
260 	return 0;
261 }
262 
hclge_ptp_cfg(struct hclge_dev * hdev,u32 cfg)263 static int hclge_ptp_cfg(struct hclge_dev *hdev, u32 cfg)
264 {
265 	struct hclge_ptp_cfg_cmd *req;
266 	struct hclge_desc desc;
267 	int ret;
268 
269 	req = (struct hclge_ptp_cfg_cmd *)desc.data;
270 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, false);
271 	req->cfg = cpu_to_le32(cfg);
272 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
273 	if (ret)
274 		dev_err(&hdev->pdev->dev,
275 			"failed to config ptp, ret = %d\n", ret);
276 
277 	return ret;
278 }
279 
hclge_ptp_set_tx_mode(struct hwtstamp_config * cfg,unsigned long * flags,u32 * ptp_cfg)280 static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg,
281 				 unsigned long *flags, u32 *ptp_cfg)
282 {
283 	switch (cfg->tx_type) {
284 	case HWTSTAMP_TX_OFF:
285 		clear_bit(HCLGE_PTP_FLAG_TX_EN, flags);
286 		break;
287 	case HWTSTAMP_TX_ON:
288 		set_bit(HCLGE_PTP_FLAG_TX_EN, flags);
289 		*ptp_cfg |= HCLGE_PTP_TX_EN_B;
290 		break;
291 	default:
292 		return -ERANGE;
293 	}
294 
295 	return 0;
296 }
297 
hclge_ptp_set_rx_mode(struct hwtstamp_config * cfg,unsigned long * flags,u32 * ptp_cfg)298 static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg,
299 				 unsigned long *flags, u32 *ptp_cfg)
300 {
301 	int rx_filter = cfg->rx_filter;
302 
303 	switch (cfg->rx_filter) {
304 	case HWTSTAMP_FILTER_NONE:
305 		clear_bit(HCLGE_PTP_FLAG_RX_EN, flags);
306 		break;
307 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
308 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
309 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
310 		set_bit(HCLGE_PTP_FLAG_RX_EN, flags);
311 		*ptp_cfg |= HCLGE_PTP_RX_EN_B;
312 		*ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT;
313 		rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
314 		break;
315 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
316 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
317 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
318 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
319 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
320 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
321 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
322 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
323 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
324 		set_bit(HCLGE_PTP_FLAG_RX_EN, flags);
325 		*ptp_cfg |= HCLGE_PTP_RX_EN_B;
326 		*ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT;
327 		*ptp_cfg |= HCLGE_PTP_MSG1_V2_DEFAULT << HCLGE_PTP_MSG1_SHIFT;
328 		*ptp_cfg |= HCLGE_PTP_MSG0_V2_EVENT << HCLGE_PTP_MSG0_SHIFT;
329 		*ptp_cfg |= HCLGE_PTP_MSG_TYPE_V2 << HCLGE_PTP_MSG_TYPE_SHIFT;
330 		rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
331 		break;
332 	case HWTSTAMP_FILTER_ALL:
333 	default:
334 		return -ERANGE;
335 	}
336 
337 	cfg->rx_filter = rx_filter;
338 
339 	return 0;
340 }
341 
hclge_ptp_set_ts_mode(struct hclge_dev * hdev,struct hwtstamp_config * cfg)342 static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev,
343 				 struct hwtstamp_config *cfg)
344 {
345 	unsigned long flags = hdev->ptp->flags;
346 	u32 ptp_cfg = 0;
347 	int ret;
348 
349 	if (test_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags))
350 		ptp_cfg |= HCLGE_PTP_EN_B;
351 
352 	ret = hclge_ptp_set_tx_mode(cfg, &flags, &ptp_cfg);
353 	if (ret)
354 		return ret;
355 
356 	ret = hclge_ptp_set_rx_mode(cfg, &flags, &ptp_cfg);
357 	if (ret)
358 		return ret;
359 
360 	ret = hclge_ptp_cfg(hdev, ptp_cfg);
361 	if (ret)
362 		return ret;
363 
364 	hdev->ptp->flags = flags;
365 	hdev->ptp->ptp_cfg = ptp_cfg;
366 
367 	return 0;
368 }
369 
hclge_ptp_set_cfg(struct hclge_dev * hdev,struct ifreq * ifr)370 int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
371 {
372 	struct hwtstamp_config cfg;
373 	int ret;
374 
375 	if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) {
376 		dev_err(&hdev->pdev->dev, "phc is unsupported\n");
377 		return -EOPNOTSUPP;
378 	}
379 
380 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
381 		return -EFAULT;
382 
383 	ret = hclge_ptp_set_ts_mode(hdev, &cfg);
384 	if (ret)
385 		return ret;
386 
387 	hdev->ptp->ts_cfg = cfg;
388 
389 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
390 }
391 
hclge_ptp_get_ts_info(struct hnae3_handle * handle,struct ethtool_ts_info * info)392 int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
393 			  struct ethtool_ts_info *info)
394 {
395 	struct hclge_vport *vport = hclge_get_vport(handle);
396 	struct hclge_dev *hdev = vport->back;
397 
398 	if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) {
399 		dev_err(&hdev->pdev->dev, "phc is unsupported\n");
400 		return -EOPNOTSUPP;
401 	}
402 
403 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
404 				SOF_TIMESTAMPING_RX_SOFTWARE |
405 				SOF_TIMESTAMPING_SOFTWARE |
406 				SOF_TIMESTAMPING_TX_HARDWARE |
407 				SOF_TIMESTAMPING_RX_HARDWARE |
408 				SOF_TIMESTAMPING_RAW_HARDWARE;
409 
410 	if (hdev->ptp->clock)
411 		info->phc_index = ptp_clock_index(hdev->ptp->clock);
412 	else
413 		info->phc_index = -1;
414 
415 	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
416 
417 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
418 			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
419 			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
420 			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
421 
422 	info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
423 			    BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
424 			    BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
425 			    BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
426 			    BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
427 			    BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
428 			    BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
429 			    BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
430 
431 	return 0;
432 }
433 
hclge_ptp_create_clock(struct hclge_dev * hdev)434 static int hclge_ptp_create_clock(struct hclge_dev *hdev)
435 {
436 	struct hclge_ptp *ptp;
437 
438 	ptp = devm_kzalloc(&hdev->pdev->dev, sizeof(*ptp), GFP_KERNEL);
439 	if (!ptp)
440 		return -ENOMEM;
441 
442 	ptp->hdev = hdev;
443 	snprintf(ptp->info.name, sizeof(ptp->info.name), "%s",
444 		 HCLGE_DRIVER_NAME);
445 	ptp->info.owner = THIS_MODULE;
446 	ptp->info.max_adj = HCLGE_PTP_CYCLE_ADJ_MAX;
447 	ptp->info.n_ext_ts = 0;
448 	ptp->info.pps = 0;
449 	ptp->info.adjfreq = hclge_ptp_adjfreq;
450 	ptp->info.adjtime = hclge_ptp_adjtime;
451 	ptp->info.gettimex64 = hclge_ptp_gettimex;
452 	ptp->info.settime64 = hclge_ptp_settime;
453 
454 	ptp->info.n_alarm = 0;
455 	ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
456 	if (IS_ERR(ptp->clock)) {
457 		dev_err(&hdev->pdev->dev,
458 			"%d failed to register ptp clock, ret = %ld\n",
459 			ptp->info.n_alarm, PTR_ERR(ptp->clock));
460 		return -ENODEV;
461 	} else if (!ptp->clock) {
462 		dev_err(&hdev->pdev->dev, "failed to register ptp clock\n");
463 		return -ENODEV;
464 	}
465 
466 	spin_lock_init(&ptp->lock);
467 	ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
468 	ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
469 	ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
470 	hdev->ptp = ptp;
471 
472 	return 0;
473 }
474 
hclge_ptp_destroy_clock(struct hclge_dev * hdev)475 static void hclge_ptp_destroy_clock(struct hclge_dev *hdev)
476 {
477 	ptp_clock_unregister(hdev->ptp->clock);
478 	hdev->ptp->clock = NULL;
479 	devm_kfree(&hdev->pdev->dev, hdev->ptp);
480 	hdev->ptp = NULL;
481 }
482 
hclge_ptp_init(struct hclge_dev * hdev)483 int hclge_ptp_init(struct hclge_dev *hdev)
484 {
485 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
486 	struct timespec64 ts;
487 	int ret;
488 
489 	if (!test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps))
490 		return 0;
491 
492 	if (!hdev->ptp) {
493 		ret = hclge_ptp_create_clock(hdev);
494 		if (ret)
495 			return ret;
496 
497 		ret = hclge_ptp_get_cycle(hdev);
498 		if (ret)
499 			return ret;
500 	}
501 
502 	ret = hclge_ptp_int_en(hdev, true);
503 	if (ret)
504 		goto out;
505 
506 	set_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags);
507 	ret = hclge_ptp_adjfreq(&hdev->ptp->info, 0);
508 	if (ret) {
509 		dev_err(&hdev->pdev->dev,
510 			"failed to init freq, ret = %d\n", ret);
511 		goto out;
512 	}
513 
514 	ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg);
515 	if (ret) {
516 		dev_err(&hdev->pdev->dev,
517 			"failed to init ts mode, ret = %d\n", ret);
518 		goto out;
519 	}
520 
521 	ktime_get_real_ts64(&ts);
522 	ret = hclge_ptp_settime(&hdev->ptp->info, &ts);
523 	if (ret) {
524 		dev_err(&hdev->pdev->dev,
525 			"failed to init ts time, ret = %d\n", ret);
526 		goto out;
527 	}
528 
529 	set_bit(HCLGE_STATE_PTP_EN, &hdev->state);
530 	dev_info(&hdev->pdev->dev, "phc initializes ok!\n");
531 
532 	return 0;
533 
534 out:
535 	hclge_ptp_destroy_clock(hdev);
536 
537 	return ret;
538 }
539 
hclge_ptp_uninit(struct hclge_dev * hdev)540 void hclge_ptp_uninit(struct hclge_dev *hdev)
541 {
542 	struct hclge_ptp *ptp = hdev->ptp;
543 
544 	if (!ptp)
545 		return;
546 
547 	hclge_ptp_int_en(hdev, false);
548 	clear_bit(HCLGE_STATE_PTP_EN, &hdev->state);
549 	clear_bit(HCLGE_PTP_FLAG_EN, &ptp->flags);
550 	ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
551 	ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
552 
553 	if (hclge_ptp_set_ts_mode(hdev, &ptp->ts_cfg))
554 		dev_err(&hdev->pdev->dev, "failed to disable phc\n");
555 
556 	if (ptp->tx_skb) {
557 		struct sk_buff *skb = ptp->tx_skb;
558 
559 		ptp->tx_skb = NULL;
560 		dev_kfree_skb_any(skb);
561 	}
562 
563 	hclge_ptp_destroy_clock(hdev);
564 }
565