2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/clocksource.h>
37 MLX5E_CYCLES_SHIFT = 23
40 void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
41 struct skb_shared_hwtstamps *hwts)
45 read_lock(&tstamp->lock);
46 nsec = timecounter_cyc2time(&tstamp->clock, timestamp);
47 read_unlock(&tstamp->lock);
49 hwts->hwtstamp = ns_to_ktime(nsec);
52 static cycle_t mlx5e_read_internal_timer(const struct cyclecounter *cc)
54 struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp,
57 return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
60 static void mlx5e_timestamp_overflow(struct work_struct *work)
62 struct delayed_work *dwork = to_delayed_work(work);
63 struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
65 struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
68 write_lock_irqsave(&tstamp->lock, flags);
69 timecounter_read(&tstamp->clock);
70 write_unlock_irqrestore(&tstamp->lock, flags);
71 queue_delayed_work(priv->wq, &tstamp->overflow_work,
72 msecs_to_jiffies(tstamp->overflow_period * 1000));
75 int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
77 struct mlx5e_priv *priv = netdev_priv(dev);
78 struct hwtstamp_config config;
80 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
83 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
87 switch (config.tx_type) {
96 switch (config.rx_filter) {
97 case HWTSTAMP_FILTER_NONE:
98 /* Reset CQE compression to Admin default */
99 mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_admin);
101 case HWTSTAMP_FILTER_ALL:
102 case HWTSTAMP_FILTER_SOME:
103 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
104 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
105 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
106 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
107 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
108 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
109 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
110 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
111 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
112 case HWTSTAMP_FILTER_PTP_V2_EVENT:
113 case HWTSTAMP_FILTER_PTP_V2_SYNC:
114 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
115 /* Disable CQE compression */
116 mlx5e_modify_rx_cqe_compression(priv, false);
117 config.rx_filter = HWTSTAMP_FILTER_ALL;
123 memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config));
125 return copy_to_user(ifr->ifr_data, &config,
126 sizeof(config)) ? -EFAULT : 0;
129 int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr)
131 struct mlx5e_priv *priv = netdev_priv(dev);
132 struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config;
134 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
137 return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
140 static int mlx5e_ptp_settime(struct ptp_clock_info *ptp,
141 const struct timespec64 *ts)
143 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
145 u64 ns = timespec64_to_ns(ts);
148 write_lock_irqsave(&tstamp->lock, flags);
149 timecounter_init(&tstamp->clock, &tstamp->cycles, ns);
150 write_unlock_irqrestore(&tstamp->lock, flags);
155 static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp,
156 struct timespec64 *ts)
158 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
163 write_lock_irqsave(&tstamp->lock, flags);
164 ns = timecounter_read(&tstamp->clock);
165 write_unlock_irqrestore(&tstamp->lock, flags);
167 *ts = ns_to_timespec64(ns);
172 static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
174 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
178 write_lock_irqsave(&tstamp->lock, flags);
179 timecounter_adjtime(&tstamp->clock, delta);
180 write_unlock_irqrestore(&tstamp->lock, flags);
185 static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
191 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
199 adj = tstamp->nominal_c_mult;
201 diff = div_u64(adj, 1000000000ULL);
203 write_lock_irqsave(&tstamp->lock, flags);
204 timecounter_read(&tstamp->clock);
205 tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff :
206 tstamp->nominal_c_mult + diff;
207 write_unlock_irqrestore(&tstamp->lock, flags);
212 static const struct ptp_clock_info mlx5e_ptp_clock_info = {
213 .owner = THIS_MODULE,
214 .max_adj = 100000000,
220 .adjfreq = mlx5e_ptp_adjfreq,
221 .adjtime = mlx5e_ptp_adjtime,
222 .gettime64 = mlx5e_ptp_gettime,
223 .settime64 = mlx5e_ptp_settime,
227 static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
229 tstamp->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
230 tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
233 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
235 struct mlx5e_tstamp *tstamp = &priv->tstamp;
241 mlx5e_timestamp_init_config(tstamp);
242 dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz);
244 mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n");
247 rwlock_init(&tstamp->lock);
248 tstamp->cycles.read = mlx5e_read_internal_timer;
249 tstamp->cycles.shift = MLX5E_CYCLES_SHIFT;
250 tstamp->cycles.mult = clocksource_khz2mult(dev_freq,
251 tstamp->cycles.shift);
252 tstamp->nominal_c_mult = tstamp->cycles.mult;
253 tstamp->cycles.mask = CLOCKSOURCE_MASK(41);
254 tstamp->mdev = priv->mdev;
256 timecounter_init(&tstamp->clock, &tstamp->cycles,
257 ktime_to_ns(ktime_get_real()));
259 /* Calculate period in seconds to call the overflow watchdog - to make
260 * sure counter is checked at least once every wrap around.
261 * The period is calculated as the minimum between max HW cycles count
262 * (The clock source mask) and max amount of cycles that can be
263 * multiplied by clock multiplier where the result doesn't exceed
266 overflow_cycles = div64_u64(~0ULL >> 1, tstamp->cycles.mult);
267 overflow_cycles = min(overflow_cycles, tstamp->cycles.mask >> 1);
269 ns = cyclecounter_cyc2ns(&tstamp->cycles, overflow_cycles,
271 do_div(ns, NSEC_PER_SEC / HZ);
272 tstamp->overflow_period = ns;
274 INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
275 if (tstamp->overflow_period)
276 queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
278 mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
280 /* Configure the PHC */
281 tstamp->ptp_info = mlx5e_ptp_clock_info;
282 snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
284 tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
285 &priv->mdev->pdev->dev);
286 if (IS_ERR(tstamp->ptp)) {
287 mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n",
288 PTR_ERR(tstamp->ptp));
293 void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
295 struct mlx5e_tstamp *tstamp = &priv->tstamp;
297 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
300 if (priv->tstamp.ptp) {
301 ptp_clock_unregister(priv->tstamp.ptp);
302 priv->tstamp.ptp = NULL;
305 cancel_delayed_work_sync(&tstamp->overflow_work);