linux/drivers/net/ethernet/ti/am65-cpts.c

1333 lines
36 KiB
C
Raw Permalink Normal View History

// SPDX-License-Identifier: GPL-2.0
/* TI K3 AM65x Common Platform Time Sync
*
* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
*
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/net_tstamp.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/ptp_classify.h>
#include <linux/ptp_clock_kernel.h>
#include "am65-cpts.h"
struct am65_genf_regs {
u32 comp_lo; /* Comparison Low Value 0:31 */
u32 comp_hi; /* Comparison High Value 32:63 */
u32 control; /* control */
u32 length; /* Length */
u32 ppm_low; /* PPM Load Low Value 0:31 */
u32 ppm_hi; /* PPM Load High Value 32:63 */
u32 ts_nudge; /* Nudge value */
} __aligned(32) __packed;
#define AM65_CPTS_GENF_MAX_NUM 9
#define AM65_CPTS_ESTF_MAX_NUM 8
struct am65_cpts_regs {
u32 idver; /* Identification and version */
u32 control; /* Time sync control */
u32 rftclk_sel; /* Reference Clock Select Register */
u32 ts_push; /* Time stamp event push */
u32 ts_load_val_lo; /* Time Stamp Load Low Value 0:31 */
u32 ts_load_en; /* Time stamp load enable */
u32 ts_comp_lo; /* Time Stamp Comparison Low Value 0:31 */
u32 ts_comp_length; /* Time Stamp Comparison Length */
u32 intstat_raw; /* Time sync interrupt status raw */
u32 intstat_masked; /* Time sync interrupt status masked */
u32 int_enable; /* Time sync interrupt enable */
u32 ts_comp_nudge; /* Time Stamp Comparison Nudge Value */
u32 event_pop; /* Event interrupt pop */
u32 event_0; /* Event Time Stamp lo 0:31 */
u32 event_1; /* Event Type Fields */
u32 event_2; /* Event Type Fields domain */
u32 event_3; /* Event Time Stamp hi 32:63 */
u32 ts_load_val_hi; /* Time Stamp Load High Value 32:63 */
u32 ts_comp_hi; /* Time Stamp Comparison High Value 32:63 */
u32 ts_add_val; /* Time Stamp Add value */
u32 ts_ppm_low; /* Time Stamp PPM Load Low Value 0:31 */
u32 ts_ppm_hi; /* Time Stamp PPM Load High Value 32:63 */
u32 ts_nudge; /* Time Stamp Nudge value */
u32 reserv[33];
struct am65_genf_regs genf[AM65_CPTS_GENF_MAX_NUM];
struct am65_genf_regs estf[AM65_CPTS_ESTF_MAX_NUM];
};
/* CONTROL_REG */
#define AM65_CPTS_CONTROL_EN BIT(0)
#define AM65_CPTS_CONTROL_INT_TEST BIT(1)
#define AM65_CPTS_CONTROL_TS_COMP_POLARITY BIT(2)
#define AM65_CPTS_CONTROL_TSTAMP_EN BIT(3)
#define AM65_CPTS_CONTROL_SEQUENCE_EN BIT(4)
#define AM65_CPTS_CONTROL_64MODE BIT(5)
#define AM65_CPTS_CONTROL_TS_COMP_TOG BIT(6)
#define AM65_CPTS_CONTROL_TS_PPM_DIR BIT(7)
#define AM65_CPTS_CONTROL_HW1_TS_PUSH_EN BIT(8)
#define AM65_CPTS_CONTROL_HW2_TS_PUSH_EN BIT(9)
#define AM65_CPTS_CONTROL_HW3_TS_PUSH_EN BIT(10)
#define AM65_CPTS_CONTROL_HW4_TS_PUSH_EN BIT(11)
#define AM65_CPTS_CONTROL_HW5_TS_PUSH_EN BIT(12)
#define AM65_CPTS_CONTROL_HW6_TS_PUSH_EN BIT(13)
#define AM65_CPTS_CONTROL_HW7_TS_PUSH_EN BIT(14)
#define AM65_CPTS_CONTROL_HW8_TS_PUSH_EN BIT(15)
#define AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET (8)
#define AM65_CPTS_CONTROL_TX_GENF_CLR_EN BIT(17)
#define AM65_CPTS_CONTROL_TS_SYNC_SEL_MASK (0xF)
#define AM65_CPTS_CONTROL_TS_SYNC_SEL_SHIFT (28)
/* RFTCLK_SEL_REG */
#define AM65_CPTS_RFTCLK_SEL_MASK (0x1F)
/* TS_PUSH_REG */
#define AM65_CPTS_TS_PUSH BIT(0)
/* TS_LOAD_EN_REG */
#define AM65_CPTS_TS_LOAD_EN BIT(0)
/* INTSTAT_RAW_REG */
#define AM65_CPTS_INTSTAT_RAW_TS_PEND BIT(0)
/* INTSTAT_MASKED_REG */
#define AM65_CPTS_INTSTAT_MASKED_TS_PEND BIT(0)
/* INT_ENABLE_REG */
#define AM65_CPTS_INT_ENABLE_TS_PEND_EN BIT(0)
/* TS_COMP_NUDGE_REG */
#define AM65_CPTS_TS_COMP_NUDGE_MASK (0xFF)
/* EVENT_POP_REG */
#define AM65_CPTS_EVENT_POP BIT(0)
/* EVENT_1_REG */
#define AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK GENMASK(15, 0)
#define AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK GENMASK(19, 16)
#define AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT (16)
#define AM65_CPTS_EVENT_1_EVENT_TYPE_MASK GENMASK(23, 20)
#define AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT (20)
#define AM65_CPTS_EVENT_1_PORT_NUMBER_MASK GENMASK(28, 24)
#define AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT (24)
/* EVENT_2_REG */
#define AM65_CPTS_EVENT_2_REG_DOMAIN_MASK (0xFF)
#define AM65_CPTS_EVENT_2_REG_DOMAIN_SHIFT (0)
enum {
AM65_CPTS_EV_PUSH, /* Time Stamp Push Event */
AM65_CPTS_EV_ROLL, /* Time Stamp Rollover Event */
AM65_CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
AM65_CPTS_EV_HW, /* Hardware Time Stamp Push Event */
AM65_CPTS_EV_RX, /* Ethernet Receive Event */
AM65_CPTS_EV_TX, /* Ethernet Transmit Event */
AM65_CPTS_EV_TS_COMP, /* Time Stamp Compare Event */
AM65_CPTS_EV_HOST, /* Host Transmit Event */
};
struct am65_cpts_event {
struct list_head list;
unsigned long tmo;
u32 event1;
u32 event2;
u64 timestamp;
};
#define AM65_CPTS_FIFO_DEPTH (16)
#define AM65_CPTS_MAX_EVENTS (32)
#define AM65_CPTS_EVENT_RX_TX_TIMEOUT (20) /* ms */
#define AM65_CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
#define AM65_CPTS_MIN_PPM 0x400
struct am65_cpts {
struct device *dev;
struct am65_cpts_regs __iomem *reg;
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
int phc_index;
struct clk_hw *clk_mux_hw;
struct device_node *clk_mux_np;
struct clk *refclk;
u32 refclk_freq;
struct list_head events;
struct list_head pool;
struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
spinlock_t lock; /* protects events lists*/
u32 ext_ts_inputs;
u32 genf_num;
u32 ts_add_val;
int irq;
struct mutex ptp_clk_lock; /* PHC access sync */
u64 timestamp;
u32 genf_enable;
u32 hw_ts_enable;
net: ethernet: ti: am65-cpts: adjust estf following ptp changes When the CPTS clock is synced/adjusted by running linuxptp (ptp4l/phc2sys), it will cause the TSN EST schedule to drift away over time. This is because the schedule is driven by the EstF periodic counter whose pulse length is defined in ref_clk cycles and it does not automatically sync to CPTS clock. _______ _| ^ expected cycle start time boundary _______________ _|_|___|_| ^ EstF drifted away -> direction To fix it, the same PPM adjustment has to be applied to EstF as done to the PHC CPTS clock, in order to correct the TSN EST cycle length and keep them in sync. Drifted cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375877023 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376377018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376877018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230377377018 Stable cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863196375473 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Acked-by: Richard Cochran <richardcochran@gmail.com> Link: https://lore.kernel.org/r/20230321062600.2539544-1-s-vadapalli@ti.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-03-21 11:56:00 +05:30
u32 estf_enable;
struct sk_buff_head txq;
bool pps_enabled;
bool pps_present;
u32 pps_hw_ts_idx;
u32 pps_genf_idx;
/* context save/restore */
u64 sr_cpts_ns;
u64 sr_ktime_ns;
u32 sr_control;
u32 sr_int_enable;
u32 sr_rftclk_sel;
u32 sr_ts_ppm_hi;
u32 sr_ts_ppm_low;
struct am65_genf_regs sr_genf[AM65_CPTS_GENF_MAX_NUM];
struct am65_genf_regs sr_estf[AM65_CPTS_ESTF_MAX_NUM];
};
struct am65_cpts_skb_cb_data {
unsigned long tmo;
u32 skb_mtype_seqid;
};
#define am65_cpts_write32(c, v, r) writel(v, &(c)->reg->r)
#define am65_cpts_read32(c, r) readl(&(c)->reg->r)
static void am65_cpts_settime(struct am65_cpts *cpts, u64 start_tstamp)
{
u32 val;
val = upper_32_bits(start_tstamp);
am65_cpts_write32(cpts, val, ts_load_val_hi);
val = lower_32_bits(start_tstamp);
am65_cpts_write32(cpts, val, ts_load_val_lo);
am65_cpts_write32(cpts, AM65_CPTS_TS_LOAD_EN, ts_load_en);
}
static void am65_cpts_set_add_val(struct am65_cpts *cpts)
{
/* select coefficient according to the rate */
cpts->ts_add_val = (NSEC_PER_SEC / cpts->refclk_freq - 1) & 0x7;
am65_cpts_write32(cpts, cpts->ts_add_val, ts_add_val);
}
static void am65_cpts_disable(struct am65_cpts *cpts)
{
am65_cpts_write32(cpts, 0, control);
am65_cpts_write32(cpts, 0, int_enable);
}
static int am65_cpts_event_get_port(struct am65_cpts_event *event)
{
return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT;
}
static int am65_cpts_event_get_type(struct am65_cpts_event *event)
{
return (event->event1 & AM65_CPTS_EVENT_1_EVENT_TYPE_MASK) >>
AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
}
static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
{
struct list_head *this, *next;
struct am65_cpts_event *event;
int removed = 0;
list_for_each_safe(this, next, &cpts->events) {
event = list_entry(this, struct am65_cpts_event, list);
if (time_after(jiffies, event->tmo)) {
list_del_init(&event->list);
list_add(&event->list, &cpts->pool);
++removed;
}
}
if (removed)
dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
return removed ? 0 : -1;
}
static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts,
struct am65_cpts_event *event)
{
u32 r = am65_cpts_read32(cpts, intstat_raw);
if (r & AM65_CPTS_INTSTAT_RAW_TS_PEND) {
event->timestamp = am65_cpts_read32(cpts, event_0);
event->event1 = am65_cpts_read32(cpts, event_1);
event->event2 = am65_cpts_read32(cpts, event_2);
event->timestamp |= (u64)am65_cpts_read32(cpts, event_3) << 32;
am65_cpts_write32(cpts, AM65_CPTS_EVENT_POP, event_pop);
return false;
}
return true;
}
static int __am65_cpts_fifo_read(struct am65_cpts *cpts)
{
struct ptp_clock_event pevent;
struct am65_cpts_event *event;
bool schedule = false;
int i, type, ret = 0;
for (i = 0; i < AM65_CPTS_FIFO_DEPTH; i++) {
event = list_first_entry_or_null(&cpts->pool,
struct am65_cpts_event, list);
if (!event) {
if (am65_cpts_cpts_purge_events(cpts)) {
dev_err(cpts->dev, "cpts: event pool empty\n");
ret = -1;
goto out;
}
continue;
}
if (am65_cpts_fifo_pop_event(cpts, event))
break;
type = am65_cpts_event_get_type(event);
switch (type) {
case AM65_CPTS_EV_PUSH:
cpts->timestamp = event->timestamp;
dev_dbg(cpts->dev, "AM65_CPTS_EV_PUSH t:%llu\n",
cpts->timestamp);
break;
case AM65_CPTS_EV_RX:
case AM65_CPTS_EV_TX:
event->tmo = jiffies +
msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
list_move_tail(&event->list, &cpts->events);
dev_dbg(cpts->dev,
"AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
event->event1, event->event2,
event->timestamp);
schedule = true;
break;
case AM65_CPTS_EV_HW:
pevent.index = am65_cpts_event_get_port(event) - 1;
pevent.timestamp = event->timestamp;
if (cpts->pps_enabled && pevent.index == cpts->pps_hw_ts_idx) {
pevent.type = PTP_CLOCK_PPSUSR;
pevent.pps_times.ts_real = ns_to_timespec64(pevent.timestamp);
} else {
pevent.type = PTP_CLOCK_EXTTS;
}
dev_dbg(cpts->dev, "AM65_CPTS_EV_HW:%s p:%d t:%llu\n",
pevent.type == PTP_CLOCK_EXTTS ?
"extts" : "pps",
pevent.index, event->timestamp);
ptp_clock_event(cpts->ptp_clock, &pevent);
break;
case AM65_CPTS_EV_HOST:
break;
case AM65_CPTS_EV_ROLL:
case AM65_CPTS_EV_HALF:
case AM65_CPTS_EV_TS_COMP:
dev_dbg(cpts->dev,
"AM65_CPTS_EVT: %d e1:%08x e2:%08x t:%lld\n",
type,
event->event1, event->event2,
event->timestamp);
break;
default:
dev_err(cpts->dev, "cpts: unknown event type\n");
ret = -1;
goto out;
}
}
out:
if (schedule)
ptp_schedule_worker(cpts->ptp_clock, 0);
return ret;
}
static int am65_cpts_fifo_read(struct am65_cpts *cpts)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&cpts->lock, flags);
ret = __am65_cpts_fifo_read(cpts);
spin_unlock_irqrestore(&cpts->lock, flags);
return ret;
}
static u64 am65_cpts_gettime(struct am65_cpts *cpts,
struct ptp_system_timestamp *sts)
{
unsigned long flags;
u64 val = 0;
/* temporarily disable cpts interrupt to avoid intentional
* doubled read. Interrupt can be in-flight - it's Ok.
*/
am65_cpts_write32(cpts, 0, int_enable);
/* use spin_lock_irqsave() here as it has to run very fast */
spin_lock_irqsave(&cpts->lock, flags);
ptp_read_system_prets(sts);
am65_cpts_write32(cpts, AM65_CPTS_TS_PUSH, ts_push);
am65_cpts_read32(cpts, ts_push);
ptp_read_system_postts(sts);
spin_unlock_irqrestore(&cpts->lock, flags);
am65_cpts_fifo_read(cpts);
am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
val = cpts->timestamp;
return val;
}
static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id)
{
struct am65_cpts *cpts = dev_id;
if (am65_cpts_fifo_read(cpts))
dev_dbg(cpts->dev, "cpts: unable to obtain a time stamp\n");
return IRQ_HANDLED;
}
/* PTP clock operations */
static int am65_cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
net: ethernet: ti: am65-cpts: adjust estf following ptp changes When the CPTS clock is synced/adjusted by running linuxptp (ptp4l/phc2sys), it will cause the TSN EST schedule to drift away over time. This is because the schedule is driven by the EstF periodic counter whose pulse length is defined in ref_clk cycles and it does not automatically sync to CPTS clock. _______ _| ^ expected cycle start time boundary _______________ _|_|___|_| ^ EstF drifted away -> direction To fix it, the same PPM adjustment has to be applied to EstF as done to the PHC CPTS clock, in order to correct the TSN EST cycle length and keep them in sync. Drifted cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375877023 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376377018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376877018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230377377018 Stable cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863196375473 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Acked-by: Richard Cochran <richardcochran@gmail.com> Link: https://lore.kernel.org/r/20230321062600.2539544-1-s-vadapalli@ti.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-03-21 11:56:00 +05:30
u32 estf_ctrl_val = 0, estf_ppm_hi = 0, estf_ppm_low = 0;
s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
net: ethernet: ti: am65-cpts: adjust pps following ptp changes When CPTS clock is sync/adjusted by running linuxptp (ptp4l) it will cause PPS jitter as Genf running PPS is not adjusted. The same PPM adjustment has to be applied to GenF as to PHC clock to correct PPS length and keep them in sync. Testing: Master: ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp.cfg & testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Slave: linuxptp/ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp1.cfg -s & <port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED;> testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Master log: source 0 - assert 620.000000689, sequence: 530 source 0 - assert 621.000000689, sequence: 531 source 0 - assert 622.000000689, sequence: 532 source 0 - assert 623.000000689, sequence: 533 source 0 - assert 624.000000689, sequence: 534 source 0 - assert 625.000000689, sequence: 535 source 0 - assert 626.000000689, sequence: 536 source 0 - assert 627.000000689, sequence: 537 source 0 - assert 628.000000689, sequence: 538 source 0 - assert 629.000000689, sequence: 539 source 0 - assert 630.000000689, sequence: 540 source 0 - assert 631.000000689, sequence: 541 source 0 - assert 632.000000689, sequence: 542 source 0 - assert 633.000000689, sequence: 543 source 0 - assert 634.000000689, sequence: 544 source 0 - assert 635.000000689, sequence: 545 Slave log: source 0 - assert 620.000000706, sequence: 252 source 0 - assert 621.000000709, sequence: 253 source 0 - assert 622.000000707, sequence: 254 source 0 - assert 623.000000707, sequence: 255 source 0 - assert 624.000000706, sequence: 256 source 0 - assert 625.000000705, sequence: 257 source 0 - assert 626.000000709, sequence: 258 source 0 - assert 627.000000709, sequence: 259 source 0 - assert 628.000000707, sequence: 260 source 0 - assert 629.000000706, sequence: 261 source 0 - assert 630.000000710, sequence: 262 source 0 - assert 631.000000708, sequence: 263 source 0 - assert 632.000000705, sequence: 264 source 0 - assert 633.000000710, sequence: 265 source 0 - assert 634.000000708, sequence: 266 source 0 - assert 635.000000707, sequence: 267 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2023-01-16 14:25:34 +05:30
int pps_index = cpts->pps_genf_idx;
u64 adj_period, pps_adj_period;
u32 ctrl_val, ppm_hi, ppm_low;
unsigned long flags;
net: ethernet: ti: am65-cpts: adjust estf following ptp changes When the CPTS clock is synced/adjusted by running linuxptp (ptp4l/phc2sys), it will cause the TSN EST schedule to drift away over time. This is because the schedule is driven by the EstF periodic counter whose pulse length is defined in ref_clk cycles and it does not automatically sync to CPTS clock. _______ _| ^ expected cycle start time boundary _______________ _|_|___|_| ^ EstF drifted away -> direction To fix it, the same PPM adjustment has to be applied to EstF as done to the PHC CPTS clock, in order to correct the TSN EST cycle length and keep them in sync. Drifted cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375877023 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376377018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376877018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230377377018 Stable cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863196375473 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Acked-by: Richard Cochran <richardcochran@gmail.com> Link: https://lore.kernel.org/r/20230321062600.2539544-1-s-vadapalli@ti.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-03-21 11:56:00 +05:30
int neg_adj = 0, i;
if (ppb < 0) {
neg_adj = 1;
ppb = -ppb;
}
/* base freq = 1GHz = 1 000 000 000
* ppb_norm = ppb * base_freq / clock_freq;
* ppm_norm = ppb_norm / 1000
* adj_period = 1 000 000 / ppm_norm
* adj_period = 1 000 000 000 / ppb_norm
* adj_period = 1 000 000 000 / (ppb * base_freq / clock_freq)
* adj_period = (1 000 000 000 * clock_freq) / (ppb * base_freq)
* adj_period = clock_freq / ppb
*/
adj_period = div_u64(cpts->refclk_freq, ppb);
mutex_lock(&cpts->ptp_clk_lock);
net: ethernet: ti: am65-cpts: adjust pps following ptp changes When CPTS clock is sync/adjusted by running linuxptp (ptp4l) it will cause PPS jitter as Genf running PPS is not adjusted. The same PPM adjustment has to be applied to GenF as to PHC clock to correct PPS length and keep them in sync. Testing: Master: ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp.cfg & testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Slave: linuxptp/ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp1.cfg -s & <port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED;> testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Master log: source 0 - assert 620.000000689, sequence: 530 source 0 - assert 621.000000689, sequence: 531 source 0 - assert 622.000000689, sequence: 532 source 0 - assert 623.000000689, sequence: 533 source 0 - assert 624.000000689, sequence: 534 source 0 - assert 625.000000689, sequence: 535 source 0 - assert 626.000000689, sequence: 536 source 0 - assert 627.000000689, sequence: 537 source 0 - assert 628.000000689, sequence: 538 source 0 - assert 629.000000689, sequence: 539 source 0 - assert 630.000000689, sequence: 540 source 0 - assert 631.000000689, sequence: 541 source 0 - assert 632.000000689, sequence: 542 source 0 - assert 633.000000689, sequence: 543 source 0 - assert 634.000000689, sequence: 544 source 0 - assert 635.000000689, sequence: 545 Slave log: source 0 - assert 620.000000706, sequence: 252 source 0 - assert 621.000000709, sequence: 253 source 0 - assert 622.000000707, sequence: 254 source 0 - assert 623.000000707, sequence: 255 source 0 - assert 624.000000706, sequence: 256 source 0 - assert 625.000000705, sequence: 257 source 0 - assert 626.000000709, sequence: 258 source 0 - assert 627.000000709, sequence: 259 source 0 - assert 628.000000707, sequence: 260 source 0 - assert 629.000000706, sequence: 261 source 0 - assert 630.000000710, sequence: 262 source 0 - assert 631.000000708, sequence: 263 source 0 - assert 632.000000705, sequence: 264 source 0 - assert 633.000000710, sequence: 265 source 0 - assert 634.000000708, sequence: 266 source 0 - assert 635.000000707, sequence: 267 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2023-01-16 14:25:34 +05:30
ctrl_val = am65_cpts_read32(cpts, control);
if (neg_adj)
net: ethernet: ti: am65-cpts: adjust pps following ptp changes When CPTS clock is sync/adjusted by running linuxptp (ptp4l) it will cause PPS jitter as Genf running PPS is not adjusted. The same PPM adjustment has to be applied to GenF as to PHC clock to correct PPS length and keep them in sync. Testing: Master: ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp.cfg & testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Slave: linuxptp/ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp1.cfg -s & <port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED;> testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Master log: source 0 - assert 620.000000689, sequence: 530 source 0 - assert 621.000000689, sequence: 531 source 0 - assert 622.000000689, sequence: 532 source 0 - assert 623.000000689, sequence: 533 source 0 - assert 624.000000689, sequence: 534 source 0 - assert 625.000000689, sequence: 535 source 0 - assert 626.000000689, sequence: 536 source 0 - assert 627.000000689, sequence: 537 source 0 - assert 628.000000689, sequence: 538 source 0 - assert 629.000000689, sequence: 539 source 0 - assert 630.000000689, sequence: 540 source 0 - assert 631.000000689, sequence: 541 source 0 - assert 632.000000689, sequence: 542 source 0 - assert 633.000000689, sequence: 543 source 0 - assert 634.000000689, sequence: 544 source 0 - assert 635.000000689, sequence: 545 Slave log: source 0 - assert 620.000000706, sequence: 252 source 0 - assert 621.000000709, sequence: 253 source 0 - assert 622.000000707, sequence: 254 source 0 - assert 623.000000707, sequence: 255 source 0 - assert 624.000000706, sequence: 256 source 0 - assert 625.000000705, sequence: 257 source 0 - assert 626.000000709, sequence: 258 source 0 - assert 627.000000709, sequence: 259 source 0 - assert 628.000000707, sequence: 260 source 0 - assert 629.000000706, sequence: 261 source 0 - assert 630.000000710, sequence: 262 source 0 - assert 631.000000708, sequence: 263 source 0 - assert 632.000000705, sequence: 264 source 0 - assert 633.000000710, sequence: 265 source 0 - assert 634.000000708, sequence: 266 source 0 - assert 635.000000707, sequence: 267 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2023-01-16 14:25:34 +05:30
ctrl_val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
else
net: ethernet: ti: am65-cpts: adjust pps following ptp changes When CPTS clock is sync/adjusted by running linuxptp (ptp4l) it will cause PPS jitter as Genf running PPS is not adjusted. The same PPM adjustment has to be applied to GenF as to PHC clock to correct PPS length and keep them in sync. Testing: Master: ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp.cfg & testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Slave: linuxptp/ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp1.cfg -s & <port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED;> testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Master log: source 0 - assert 620.000000689, sequence: 530 source 0 - assert 621.000000689, sequence: 531 source 0 - assert 622.000000689, sequence: 532 source 0 - assert 623.000000689, sequence: 533 source 0 - assert 624.000000689, sequence: 534 source 0 - assert 625.000000689, sequence: 535 source 0 - assert 626.000000689, sequence: 536 source 0 - assert 627.000000689, sequence: 537 source 0 - assert 628.000000689, sequence: 538 source 0 - assert 629.000000689, sequence: 539 source 0 - assert 630.000000689, sequence: 540 source 0 - assert 631.000000689, sequence: 541 source 0 - assert 632.000000689, sequence: 542 source 0 - assert 633.000000689, sequence: 543 source 0 - assert 634.000000689, sequence: 544 source 0 - assert 635.000000689, sequence: 545 Slave log: source 0 - assert 620.000000706, sequence: 252 source 0 - assert 621.000000709, sequence: 253 source 0 - assert 622.000000707, sequence: 254 source 0 - assert 623.000000707, sequence: 255 source 0 - assert 624.000000706, sequence: 256 source 0 - assert 625.000000705, sequence: 257 source 0 - assert 626.000000709, sequence: 258 source 0 - assert 627.000000709, sequence: 259 source 0 - assert 628.000000707, sequence: 260 source 0 - assert 629.000000706, sequence: 261 source 0 - assert 630.000000710, sequence: 262 source 0 - assert 631.000000708, sequence: 263 source 0 - assert 632.000000705, sequence: 264 source 0 - assert 633.000000710, sequence: 265 source 0 - assert 634.000000708, sequence: 266 source 0 - assert 635.000000707, sequence: 267 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2023-01-16 14:25:34 +05:30
ctrl_val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
ppm_hi = upper_32_bits(adj_period) & 0x3FF;
ppm_low = lower_32_bits(adj_period);
if (cpts->pps_enabled) {
net: ethernet: ti: am65-cpts: adjust estf following ptp changes When the CPTS clock is synced/adjusted by running linuxptp (ptp4l/phc2sys), it will cause the TSN EST schedule to drift away over time. This is because the schedule is driven by the EstF periodic counter whose pulse length is defined in ref_clk cycles and it does not automatically sync to CPTS clock. _______ _| ^ expected cycle start time boundary _______________ _|_|___|_| ^ EstF drifted away -> direction To fix it, the same PPM adjustment has to be applied to EstF as done to the PHC CPTS clock, in order to correct the TSN EST cycle length and keep them in sync. Drifted cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375877023 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376377018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376877018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230377377018 Stable cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863196375473 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Acked-by: Richard Cochran <richardcochran@gmail.com> Link: https://lore.kernel.org/r/20230321062600.2539544-1-s-vadapalli@ti.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-03-21 11:56:00 +05:30
estf_ctrl_val = am65_cpts_read32(cpts, genf[pps_index].control);
net: ethernet: ti: am65-cpts: adjust pps following ptp changes When CPTS clock is sync/adjusted by running linuxptp (ptp4l) it will cause PPS jitter as Genf running PPS is not adjusted. The same PPM adjustment has to be applied to GenF as to PHC clock to correct PPS length and keep them in sync. Testing: Master: ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp.cfg & testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Slave: linuxptp/ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp1.cfg -s & <port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED;> testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Master log: source 0 - assert 620.000000689, sequence: 530 source 0 - assert 621.000000689, sequence: 531 source 0 - assert 622.000000689, sequence: 532 source 0 - assert 623.000000689, sequence: 533 source 0 - assert 624.000000689, sequence: 534 source 0 - assert 625.000000689, sequence: 535 source 0 - assert 626.000000689, sequence: 536 source 0 - assert 627.000000689, sequence: 537 source 0 - assert 628.000000689, sequence: 538 source 0 - assert 629.000000689, sequence: 539 source 0 - assert 630.000000689, sequence: 540 source 0 - assert 631.000000689, sequence: 541 source 0 - assert 632.000000689, sequence: 542 source 0 - assert 633.000000689, sequence: 543 source 0 - assert 634.000000689, sequence: 544 source 0 - assert 635.000000689, sequence: 545 Slave log: source 0 - assert 620.000000706, sequence: 252 source 0 - assert 621.000000709, sequence: 253 source 0 - assert 622.000000707, sequence: 254 source 0 - assert 623.000000707, sequence: 255 source 0 - assert 624.000000706, sequence: 256 source 0 - assert 625.000000705, sequence: 257 source 0 - assert 626.000000709, sequence: 258 source 0 - assert 627.000000709, sequence: 259 source 0 - assert 628.000000707, sequence: 260 source 0 - assert 629.000000706, sequence: 261 source 0 - assert 630.000000710, sequence: 262 source 0 - assert 631.000000708, sequence: 263 source 0 - assert 632.000000705, sequence: 264 source 0 - assert 633.000000710, sequence: 265 source 0 - assert 634.000000708, sequence: 266 source 0 - assert 635.000000707, sequence: 267 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2023-01-16 14:25:34 +05:30
if (neg_adj)
net: ethernet: ti: am65-cpts: adjust estf following ptp changes When the CPTS clock is synced/adjusted by running linuxptp (ptp4l/phc2sys), it will cause the TSN EST schedule to drift away over time. This is because the schedule is driven by the EstF periodic counter whose pulse length is defined in ref_clk cycles and it does not automatically sync to CPTS clock. _______ _| ^ expected cycle start time boundary _______________ _|_|___|_| ^ EstF drifted away -> direction To fix it, the same PPM adjustment has to be applied to EstF as done to the PHC CPTS clock, in order to correct the TSN EST cycle length and keep them in sync. Drifted cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375877023 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376377018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376877018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230377377018 Stable cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863196375473 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Acked-by: Richard Cochran <richardcochran@gmail.com> Link: https://lore.kernel.org/r/20230321062600.2539544-1-s-vadapalli@ti.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-03-21 11:56:00 +05:30
estf_ctrl_val &= ~BIT(1);
net: ethernet: ti: am65-cpts: adjust pps following ptp changes When CPTS clock is sync/adjusted by running linuxptp (ptp4l) it will cause PPS jitter as Genf running PPS is not adjusted. The same PPM adjustment has to be applied to GenF as to PHC clock to correct PPS length and keep them in sync. Testing: Master: ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp.cfg & testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Slave: linuxptp/ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp1.cfg -s & <port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED;> testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Master log: source 0 - assert 620.000000689, sequence: 530 source 0 - assert 621.000000689, sequence: 531 source 0 - assert 622.000000689, sequence: 532 source 0 - assert 623.000000689, sequence: 533 source 0 - assert 624.000000689, sequence: 534 source 0 - assert 625.000000689, sequence: 535 source 0 - assert 626.000000689, sequence: 536 source 0 - assert 627.000000689, sequence: 537 source 0 - assert 628.000000689, sequence: 538 source 0 - assert 629.000000689, sequence: 539 source 0 - assert 630.000000689, sequence: 540 source 0 - assert 631.000000689, sequence: 541 source 0 - assert 632.000000689, sequence: 542 source 0 - assert 633.000000689, sequence: 543 source 0 - assert 634.000000689, sequence: 544 source 0 - assert 635.000000689, sequence: 545 Slave log: source 0 - assert 620.000000706, sequence: 252 source 0 - assert 621.000000709, sequence: 253 source 0 - assert 622.000000707, sequence: 254 source 0 - assert 623.000000707, sequence: 255 source 0 - assert 624.000000706, sequence: 256 source 0 - assert 625.000000705, sequence: 257 source 0 - assert 626.000000709, sequence: 258 source 0 - assert 627.000000709, sequence: 259 source 0 - assert 628.000000707, sequence: 260 source 0 - assert 629.000000706, sequence: 261 source 0 - assert 630.000000710, sequence: 262 source 0 - assert 631.000000708, sequence: 263 source 0 - assert 632.000000705, sequence: 264 source 0 - assert 633.000000710, sequence: 265 source 0 - assert 634.000000708, sequence: 266 source 0 - assert 635.000000707, sequence: 267 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2023-01-16 14:25:34 +05:30
else
net: ethernet: ti: am65-cpts: adjust estf following ptp changes When the CPTS clock is synced/adjusted by running linuxptp (ptp4l/phc2sys), it will cause the TSN EST schedule to drift away over time. This is because the schedule is driven by the EstF periodic counter whose pulse length is defined in ref_clk cycles and it does not automatically sync to CPTS clock. _______ _| ^ expected cycle start time boundary _______________ _|_|___|_| ^ EstF drifted away -> direction To fix it, the same PPM adjustment has to be applied to EstF as done to the PHC CPTS clock, in order to correct the TSN EST cycle length and keep them in sync. Drifted cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375877023 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376377018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376877018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230377377018 Stable cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863196375473 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Acked-by: Richard Cochran <richardcochran@gmail.com> Link: https://lore.kernel.org/r/20230321062600.2539544-1-s-vadapalli@ti.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-03-21 11:56:00 +05:30
estf_ctrl_val |= BIT(1);
net: ethernet: ti: am65-cpts: adjust pps following ptp changes When CPTS clock is sync/adjusted by running linuxptp (ptp4l) it will cause PPS jitter as Genf running PPS is not adjusted. The same PPM adjustment has to be applied to GenF as to PHC clock to correct PPS length and keep them in sync. Testing: Master: ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp.cfg & testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Slave: linuxptp/ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp1.cfg -s & <port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED;> testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Master log: source 0 - assert 620.000000689, sequence: 530 source 0 - assert 621.000000689, sequence: 531 source 0 - assert 622.000000689, sequence: 532 source 0 - assert 623.000000689, sequence: 533 source 0 - assert 624.000000689, sequence: 534 source 0 - assert 625.000000689, sequence: 535 source 0 - assert 626.000000689, sequence: 536 source 0 - assert 627.000000689, sequence: 537 source 0 - assert 628.000000689, sequence: 538 source 0 - assert 629.000000689, sequence: 539 source 0 - assert 630.000000689, sequence: 540 source 0 - assert 631.000000689, sequence: 541 source 0 - assert 632.000000689, sequence: 542 source 0 - assert 633.000000689, sequence: 543 source 0 - assert 634.000000689, sequence: 544 source 0 - assert 635.000000689, sequence: 545 Slave log: source 0 - assert 620.000000706, sequence: 252 source 0 - assert 621.000000709, sequence: 253 source 0 - assert 622.000000707, sequence: 254 source 0 - assert 623.000000707, sequence: 255 source 0 - assert 624.000000706, sequence: 256 source 0 - assert 625.000000705, sequence: 257 source 0 - assert 626.000000709, sequence: 258 source 0 - assert 627.000000709, sequence: 259 source 0 - assert 628.000000707, sequence: 260 source 0 - assert 629.000000706, sequence: 261 source 0 - assert 630.000000710, sequence: 262 source 0 - assert 631.000000708, sequence: 263 source 0 - assert 632.000000705, sequence: 264 source 0 - assert 633.000000710, sequence: 265 source 0 - assert 634.000000708, sequence: 266 source 0 - assert 635.000000707, sequence: 267 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2023-01-16 14:25:34 +05:30
/* GenF PPM will do correction using cpts refclk tick which is
* (cpts->ts_add_val + 1) ns, so GenF length PPM adj period
* need to be corrected.
*/
pps_adj_period = adj_period * (cpts->ts_add_val + 1);
net: ethernet: ti: am65-cpts: adjust estf following ptp changes When the CPTS clock is synced/adjusted by running linuxptp (ptp4l/phc2sys), it will cause the TSN EST schedule to drift away over time. This is because the schedule is driven by the EstF periodic counter whose pulse length is defined in ref_clk cycles and it does not automatically sync to CPTS clock. _______ _| ^ expected cycle start time boundary _______________ _|_|___|_| ^ EstF drifted away -> direction To fix it, the same PPM adjustment has to be applied to EstF as done to the PHC CPTS clock, in order to correct the TSN EST cycle length and keep them in sync. Drifted cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375877023 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376377018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376877018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230377377018 Stable cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863196375473 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Acked-by: Richard Cochran <richardcochran@gmail.com> Link: https://lore.kernel.org/r/20230321062600.2539544-1-s-vadapalli@ti.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-03-21 11:56:00 +05:30
estf_ppm_hi = upper_32_bits(pps_adj_period) & 0x3FF;
estf_ppm_low = lower_32_bits(pps_adj_period);
net: ethernet: ti: am65-cpts: adjust pps following ptp changes When CPTS clock is sync/adjusted by running linuxptp (ptp4l) it will cause PPS jitter as Genf running PPS is not adjusted. The same PPM adjustment has to be applied to GenF as to PHC clock to correct PPS length and keep them in sync. Testing: Master: ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp.cfg & testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Slave: linuxptp/ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp1.cfg -s & <port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED;> testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Master log: source 0 - assert 620.000000689, sequence: 530 source 0 - assert 621.000000689, sequence: 531 source 0 - assert 622.000000689, sequence: 532 source 0 - assert 623.000000689, sequence: 533 source 0 - assert 624.000000689, sequence: 534 source 0 - assert 625.000000689, sequence: 535 source 0 - assert 626.000000689, sequence: 536 source 0 - assert 627.000000689, sequence: 537 source 0 - assert 628.000000689, sequence: 538 source 0 - assert 629.000000689, sequence: 539 source 0 - assert 630.000000689, sequence: 540 source 0 - assert 631.000000689, sequence: 541 source 0 - assert 632.000000689, sequence: 542 source 0 - assert 633.000000689, sequence: 543 source 0 - assert 634.000000689, sequence: 544 source 0 - assert 635.000000689, sequence: 545 Slave log: source 0 - assert 620.000000706, sequence: 252 source 0 - assert 621.000000709, sequence: 253 source 0 - assert 622.000000707, sequence: 254 source 0 - assert 623.000000707, sequence: 255 source 0 - assert 624.000000706, sequence: 256 source 0 - assert 625.000000705, sequence: 257 source 0 - assert 626.000000709, sequence: 258 source 0 - assert 627.000000709, sequence: 259 source 0 - assert 628.000000707, sequence: 260 source 0 - assert 629.000000706, sequence: 261 source 0 - assert 630.000000710, sequence: 262 source 0 - assert 631.000000708, sequence: 263 source 0 - assert 632.000000705, sequence: 264 source 0 - assert 633.000000710, sequence: 265 source 0 - assert 634.000000708, sequence: 266 source 0 - assert 635.000000707, sequence: 267 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2023-01-16 14:25:34 +05:30
}
net: ethernet: ti: am65-cpts: adjust pps following ptp changes When CPTS clock is sync/adjusted by running linuxptp (ptp4l) it will cause PPS jitter as Genf running PPS is not adjusted. The same PPM adjustment has to be applied to GenF as to PHC clock to correct PPS length and keep them in sync. Testing: Master: ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp.cfg & testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Slave: linuxptp/ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp1.cfg -s & <port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED;> testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Master log: source 0 - assert 620.000000689, sequence: 530 source 0 - assert 621.000000689, sequence: 531 source 0 - assert 622.000000689, sequence: 532 source 0 - assert 623.000000689, sequence: 533 source 0 - assert 624.000000689, sequence: 534 source 0 - assert 625.000000689, sequence: 535 source 0 - assert 626.000000689, sequence: 536 source 0 - assert 627.000000689, sequence: 537 source 0 - assert 628.000000689, sequence: 538 source 0 - assert 629.000000689, sequence: 539 source 0 - assert 630.000000689, sequence: 540 source 0 - assert 631.000000689, sequence: 541 source 0 - assert 632.000000689, sequence: 542 source 0 - assert 633.000000689, sequence: 543 source 0 - assert 634.000000689, sequence: 544 source 0 - assert 635.000000689, sequence: 545 Slave log: source 0 - assert 620.000000706, sequence: 252 source 0 - assert 621.000000709, sequence: 253 source 0 - assert 622.000000707, sequence: 254 source 0 - assert 623.000000707, sequence: 255 source 0 - assert 624.000000706, sequence: 256 source 0 - assert 625.000000705, sequence: 257 source 0 - assert 626.000000709, sequence: 258 source 0 - assert 627.000000709, sequence: 259 source 0 - assert 628.000000707, sequence: 260 source 0 - assert 629.000000706, sequence: 261 source 0 - assert 630.000000710, sequence: 262 source 0 - assert 631.000000708, sequence: 263 source 0 - assert 632.000000705, sequence: 264 source 0 - assert 633.000000710, sequence: 265 source 0 - assert 634.000000708, sequence: 266 source 0 - assert 635.000000707, sequence: 267 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2023-01-16 14:25:34 +05:30
spin_lock_irqsave(&cpts->lock, flags);
/* All below writes must be done extremely fast:
* - delay between PPM dir and PPM value changes can cause err due old
* PPM correction applied in wrong direction
* - delay between CPTS-clock PPM cfg and GenF PPM cfg can cause err
* due CPTS-clock PPM working with new cfg while GenF PPM cfg still
* with old for short period of time
*/
am65_cpts_write32(cpts, ctrl_val, control);
am65_cpts_write32(cpts, ppm_hi, ts_ppm_hi);
am65_cpts_write32(cpts, ppm_low, ts_ppm_low);
if (cpts->pps_enabled) {
net: ethernet: ti: am65-cpts: adjust estf following ptp changes When the CPTS clock is synced/adjusted by running linuxptp (ptp4l/phc2sys), it will cause the TSN EST schedule to drift away over time. This is because the schedule is driven by the EstF periodic counter whose pulse length is defined in ref_clk cycles and it does not automatically sync to CPTS clock. _______ _| ^ expected cycle start time boundary _______________ _|_|___|_| ^ EstF drifted away -> direction To fix it, the same PPM adjustment has to be applied to EstF as done to the PHC CPTS clock, in order to correct the TSN EST cycle length and keep them in sync. Drifted cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375877023 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376377018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376877018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230377377018 Stable cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863196375473 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Acked-by: Richard Cochran <richardcochran@gmail.com> Link: https://lore.kernel.org/r/20230321062600.2539544-1-s-vadapalli@ti.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-03-21 11:56:00 +05:30
am65_cpts_write32(cpts, estf_ctrl_val, genf[pps_index].control);
am65_cpts_write32(cpts, estf_ppm_hi, genf[pps_index].ppm_hi);
am65_cpts_write32(cpts, estf_ppm_low, genf[pps_index].ppm_low);
net: ethernet: ti: am65-cpts: adjust pps following ptp changes When CPTS clock is sync/adjusted by running linuxptp (ptp4l) it will cause PPS jitter as Genf running PPS is not adjusted. The same PPM adjustment has to be applied to GenF as to PHC clock to correct PPS length and keep them in sync. Testing: Master: ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp.cfg & testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Slave: linuxptp/ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp1.cfg -s & <port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED;> testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Master log: source 0 - assert 620.000000689, sequence: 530 source 0 - assert 621.000000689, sequence: 531 source 0 - assert 622.000000689, sequence: 532 source 0 - assert 623.000000689, sequence: 533 source 0 - assert 624.000000689, sequence: 534 source 0 - assert 625.000000689, sequence: 535 source 0 - assert 626.000000689, sequence: 536 source 0 - assert 627.000000689, sequence: 537 source 0 - assert 628.000000689, sequence: 538 source 0 - assert 629.000000689, sequence: 539 source 0 - assert 630.000000689, sequence: 540 source 0 - assert 631.000000689, sequence: 541 source 0 - assert 632.000000689, sequence: 542 source 0 - assert 633.000000689, sequence: 543 source 0 - assert 634.000000689, sequence: 544 source 0 - assert 635.000000689, sequence: 545 Slave log: source 0 - assert 620.000000706, sequence: 252 source 0 - assert 621.000000709, sequence: 253 source 0 - assert 622.000000707, sequence: 254 source 0 - assert 623.000000707, sequence: 255 source 0 - assert 624.000000706, sequence: 256 source 0 - assert 625.000000705, sequence: 257 source 0 - assert 626.000000709, sequence: 258 source 0 - assert 627.000000709, sequence: 259 source 0 - assert 628.000000707, sequence: 260 source 0 - assert 629.000000706, sequence: 261 source 0 - assert 630.000000710, sequence: 262 source 0 - assert 631.000000708, sequence: 263 source 0 - assert 632.000000705, sequence: 264 source 0 - assert 633.000000710, sequence: 265 source 0 - assert 634.000000708, sequence: 266 source 0 - assert 635.000000707, sequence: 267 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2023-01-16 14:25:34 +05:30
}
net: ethernet: ti: am65-cpts: adjust estf following ptp changes When the CPTS clock is synced/adjusted by running linuxptp (ptp4l/phc2sys), it will cause the TSN EST schedule to drift away over time. This is because the schedule is driven by the EstF periodic counter whose pulse length is defined in ref_clk cycles and it does not automatically sync to CPTS clock. _______ _| ^ expected cycle start time boundary _______________ _|_|___|_| ^ EstF drifted away -> direction To fix it, the same PPM adjustment has to be applied to EstF as done to the PHC CPTS clock, in order to correct the TSN EST cycle length and keep them in sync. Drifted cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375877023 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376377018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376877018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230377377018 Stable cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863196375473 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Acked-by: Richard Cochran <richardcochran@gmail.com> Link: https://lore.kernel.org/r/20230321062600.2539544-1-s-vadapalli@ti.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-03-21 11:56:00 +05:30
for (i = 0; i < AM65_CPTS_ESTF_MAX_NUM; i++) {
if (cpts->estf_enable & BIT(i)) {
am65_cpts_write32(cpts, estf_ctrl_val, estf[i].control);
am65_cpts_write32(cpts, estf_ppm_hi, estf[i].ppm_hi);
am65_cpts_write32(cpts, estf_ppm_low, estf[i].ppm_low);
}
}
net: ethernet: ti: am65-cpts: adjust pps following ptp changes When CPTS clock is sync/adjusted by running linuxptp (ptp4l) it will cause PPS jitter as Genf running PPS is not adjusted. The same PPM adjustment has to be applied to GenF as to PHC clock to correct PPS length and keep them in sync. Testing: Master: ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp.cfg & testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Slave: linuxptp/ptp4l -P -2 -H -i eth0 -l 6 -m -q -p /dev/ptp1 -f ptp1.cfg -s & <port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED;> testptp -d /dev/ptp1 -P 1 ppstest /dev/pps0 Master log: source 0 - assert 620.000000689, sequence: 530 source 0 - assert 621.000000689, sequence: 531 source 0 - assert 622.000000689, sequence: 532 source 0 - assert 623.000000689, sequence: 533 source 0 - assert 624.000000689, sequence: 534 source 0 - assert 625.000000689, sequence: 535 source 0 - assert 626.000000689, sequence: 536 source 0 - assert 627.000000689, sequence: 537 source 0 - assert 628.000000689, sequence: 538 source 0 - assert 629.000000689, sequence: 539 source 0 - assert 630.000000689, sequence: 540 source 0 - assert 631.000000689, sequence: 541 source 0 - assert 632.000000689, sequence: 542 source 0 - assert 633.000000689, sequence: 543 source 0 - assert 634.000000689, sequence: 544 source 0 - assert 635.000000689, sequence: 545 Slave log: source 0 - assert 620.000000706, sequence: 252 source 0 - assert 621.000000709, sequence: 253 source 0 - assert 622.000000707, sequence: 254 source 0 - assert 623.000000707, sequence: 255 source 0 - assert 624.000000706, sequence: 256 source 0 - assert 625.000000705, sequence: 257 source 0 - assert 626.000000709, sequence: 258 source 0 - assert 627.000000709, sequence: 259 source 0 - assert 628.000000707, sequence: 260 source 0 - assert 629.000000706, sequence: 261 source 0 - assert 630.000000710, sequence: 262 source 0 - assert 631.000000708, sequence: 263 source 0 - assert 632.000000705, sequence: 264 source 0 - assert 633.000000710, sequence: 265 source 0 - assert 634.000000708, sequence: 266 source 0 - assert 635.000000707, sequence: 267 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2023-01-16 14:25:34 +05:30
/* All GenF/EstF can be updated here the same way */
spin_unlock_irqrestore(&cpts->lock, flags);
mutex_unlock(&cpts->ptp_clk_lock);
return 0;
}
static int am65_cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
s64 ns;
mutex_lock(&cpts->ptp_clk_lock);
ns = am65_cpts_gettime(cpts, NULL);
ns += delta;
am65_cpts_settime(cpts, ns);
mutex_unlock(&cpts->ptp_clk_lock);
return 0;
}
static int am65_cpts_ptp_gettimex(struct ptp_clock_info *ptp,
struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
u64 ns;
mutex_lock(&cpts->ptp_clk_lock);
ns = am65_cpts_gettime(cpts, sts);
mutex_unlock(&cpts->ptp_clk_lock);
*ts = ns_to_timespec64(ns);
return 0;
}
u64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
{
u64 ns;
/* reuse ptp_clk_lock as it serialize ts push */
mutex_lock(&cpts->ptp_clk_lock);
ns = am65_cpts_gettime(cpts, NULL);
mutex_unlock(&cpts->ptp_clk_lock);
return ns;
}
EXPORT_SYMBOL_GPL(am65_cpts_ns_gettime);
static int am65_cpts_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
u64 ns;
ns = timespec64_to_ns(ts);
mutex_lock(&cpts->ptp_clk_lock);
am65_cpts_settime(cpts, ns);
mutex_unlock(&cpts->ptp_clk_lock);
return 0;
}
static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on)
{
u32 v;
v = am65_cpts_read32(cpts, control);
if (on) {
v |= BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
cpts->hw_ts_enable |= BIT(index);
} else {
v &= ~BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
cpts->hw_ts_enable &= ~BIT(index);
}
am65_cpts_write32(cpts, v, control);
}
static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on)
{
if (index >= cpts->ptp_info.n_ext_ts)
return -ENXIO;
if (cpts->pps_present && index == cpts->pps_hw_ts_idx)
return -EINVAL;
if (((cpts->hw_ts_enable & BIT(index)) >> index) == on)
return 0;
mutex_lock(&cpts->ptp_clk_lock);
am65_cpts_extts_enable_hw(cpts, index, on);
mutex_unlock(&cpts->ptp_clk_lock);
dev_dbg(cpts->dev, "%s: ExtTS:%u %s\n",
__func__, index, on ? "enabled" : "disabled");
return 0;
}
int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
struct am65_cpts_estf_cfg *cfg)
{
u64 cycles;
u32 val;
cycles = cfg->ns_period * cpts->refclk_freq;
cycles = DIV_ROUND_UP(cycles, NSEC_PER_SEC);
if (cycles > U32_MAX)
return -EINVAL;
/* according to TRM should be zeroed */
am65_cpts_write32(cpts, 0, estf[idx].length);
val = upper_32_bits(cfg->ns_start);
am65_cpts_write32(cpts, val, estf[idx].comp_hi);
val = lower_32_bits(cfg->ns_start);
am65_cpts_write32(cpts, val, estf[idx].comp_lo);
val = lower_32_bits(cycles);
am65_cpts_write32(cpts, val, estf[idx].length);
net: ethernet: ti: am65-cpts: adjust estf following ptp changes When the CPTS clock is synced/adjusted by running linuxptp (ptp4l/phc2sys), it will cause the TSN EST schedule to drift away over time. This is because the schedule is driven by the EstF periodic counter whose pulse length is defined in ref_clk cycles and it does not automatically sync to CPTS clock. _______ _| ^ expected cycle start time boundary _______________ _|_|___|_| ^ EstF drifted away -> direction To fix it, the same PPM adjustment has to be applied to EstF as done to the PHC CPTS clock, in order to correct the TSN EST cycle length and keep them in sync. Drifted cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375877023 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376377018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376877018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230377377018 Stable cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863196375473 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Acked-by: Richard Cochran <richardcochran@gmail.com> Link: https://lore.kernel.org/r/20230321062600.2539544-1-s-vadapalli@ti.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-03-21 11:56:00 +05:30
am65_cpts_write32(cpts, 0, estf[idx].control);
am65_cpts_write32(cpts, 0, estf[idx].ppm_hi);
am65_cpts_write32(cpts, 0, estf[idx].ppm_low);
cpts->estf_enable |= BIT(idx);
dev_dbg(cpts->dev, "%s: ESTF:%u enabled\n", __func__, idx);
return 0;
}
EXPORT_SYMBOL_GPL(am65_cpts_estf_enable);
void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
{
am65_cpts_write32(cpts, 0, estf[idx].length);
net: ethernet: ti: am65-cpts: adjust estf following ptp changes When the CPTS clock is synced/adjusted by running linuxptp (ptp4l/phc2sys), it will cause the TSN EST schedule to drift away over time. This is because the schedule is driven by the EstF periodic counter whose pulse length is defined in ref_clk cycles and it does not automatically sync to CPTS clock. _______ _| ^ expected cycle start time boundary _______________ _|_|___|_| ^ EstF drifted away -> direction To fix it, the same PPM adjustment has to be applied to EstF as done to the PHC CPTS clock, in order to correct the TSN EST cycle length and keep them in sync. Drifted cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230373877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230374877017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375377017 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230375877023 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376377018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230376877018 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635968230377377018 Stable cycle: AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863193875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863194875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195375473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863195875473 AM65_CPTS_EVT: 7 e1:01770001 e2:000000ff t:1635966863196375473 Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com> Reviewed-by: Roger Quadros <rogerq@kernel.org> Acked-by: Richard Cochran <richardcochran@gmail.com> Link: https://lore.kernel.org/r/20230321062600.2539544-1-s-vadapalli@ti.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-03-21 11:56:00 +05:30
cpts->estf_enable &= ~BIT(idx);
dev_dbg(cpts->dev, "%s: ESTF:%u disabled\n", __func__, idx);
}
EXPORT_SYMBOL_GPL(am65_cpts_estf_disable);
static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
struct ptp_perout_request *req, int on)
{
u64 ns_period, ns_start, cycles;
struct timespec64 ts;
u32 val;
if (on) {
ts.tv_sec = req->period.sec;
ts.tv_nsec = req->period.nsec;
ns_period = timespec64_to_ns(&ts);
cycles = (ns_period * cpts->refclk_freq) / NSEC_PER_SEC;
ts.tv_sec = req->start.sec;
ts.tv_nsec = req->start.nsec;
ns_start = timespec64_to_ns(&ts);
val = upper_32_bits(ns_start);
am65_cpts_write32(cpts, val, genf[req->index].comp_hi);
val = lower_32_bits(ns_start);
am65_cpts_write32(cpts, val, genf[req->index].comp_lo);
val = lower_32_bits(cycles);
am65_cpts_write32(cpts, val, genf[req->index].length);
am65_cpts_write32(cpts, 0, genf[req->index].control);
am65_cpts_write32(cpts, 0, genf[req->index].ppm_hi);
am65_cpts_write32(cpts, 0, genf[req->index].ppm_low);
cpts->genf_enable |= BIT(req->index);
} else {
am65_cpts_write32(cpts, 0, genf[req->index].length);
cpts->genf_enable &= ~BIT(req->index);
}
}
static int am65_cpts_perout_enable(struct am65_cpts *cpts,
struct ptp_perout_request *req, int on)
{
if (req->index >= cpts->ptp_info.n_per_out)
return -ENXIO;
if (cpts->pps_present && req->index == cpts->pps_genf_idx)
return -EINVAL;
if (!!(cpts->genf_enable & BIT(req->index)) == !!on)
return 0;
mutex_lock(&cpts->ptp_clk_lock);
am65_cpts_perout_enable_hw(cpts, req, on);
mutex_unlock(&cpts->ptp_clk_lock);
dev_dbg(cpts->dev, "%s: GenF:%u %s\n",
__func__, req->index, on ? "enabled" : "disabled");
return 0;
}
static int am65_cpts_pps_enable(struct am65_cpts *cpts, int on)
{
int ret = 0;
struct timespec64 ts;
struct ptp_clock_request rq;
u64 ns;
if (!cpts->pps_present)
return -EINVAL;
if (cpts->pps_enabled == !!on)
return 0;
mutex_lock(&cpts->ptp_clk_lock);
if (on) {
am65_cpts_extts_enable_hw(cpts, cpts->pps_hw_ts_idx, on);
ns = am65_cpts_gettime(cpts, NULL);
ts = ns_to_timespec64(ns);
rq.perout.period.sec = 1;
rq.perout.period.nsec = 0;
rq.perout.start.sec = ts.tv_sec + 2;
rq.perout.start.nsec = 0;
rq.perout.index = cpts->pps_genf_idx;
am65_cpts_perout_enable_hw(cpts, &rq.perout, on);
cpts->pps_enabled = true;
} else {
rq.perout.index = cpts->pps_genf_idx;
am65_cpts_perout_enable_hw(cpts, &rq.perout, on);
am65_cpts_extts_enable_hw(cpts, cpts->pps_hw_ts_idx, on);
cpts->pps_enabled = false;
}
mutex_unlock(&cpts->ptp_clk_lock);
dev_dbg(cpts->dev, "%s: pps: %s\n",
__func__, on ? "enabled" : "disabled");
return ret;
}
static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
return am65_cpts_extts_enable(cpts, rq->extts.index, on);
case PTP_CLK_REQ_PEROUT:
return am65_cpts_perout_enable(cpts, &rq->perout, on);
case PTP_CLK_REQ_PPS:
return am65_cpts_pps_enable(cpts, on);
default:
break;
}
return -EOPNOTSUPP;
}
static long am65_cpts_ts_work(struct ptp_clock_info *ptp);
static struct ptp_clock_info am65_ptp_info = {
.owner = THIS_MODULE,
.name = "CTPS timer",
.adjfine = am65_cpts_ptp_adjfine,
.adjtime = am65_cpts_ptp_adjtime,
.gettimex64 = am65_cpts_ptp_gettimex,
.settime64 = am65_cpts_ptp_settime,
.enable = am65_cpts_ptp_enable,
.do_aux_work = am65_cpts_ts_work,
};
static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
struct am65_cpts_event *event)
{
struct sk_buff_head txq_list;
struct sk_buff *skb, *tmp;
unsigned long flags;
bool found = false;
u32 mtype_seqid;
mtype_seqid = event->event1 &
(AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
AM65_CPTS_EVENT_1_EVENT_TYPE_MASK |
AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
__skb_queue_head_init(&txq_list);
spin_lock_irqsave(&cpts->txq.lock, flags);
skb_queue_splice_init(&cpts->txq, &txq_list);
spin_unlock_irqrestore(&cpts->txq.lock, flags);
/* no need to grab txq.lock as access is always done under cpts->lock */
skb_queue_walk_safe(&txq_list, skb, tmp) {
struct skb_shared_hwtstamps ssh;
struct am65_cpts_skb_cb_data *skb_cb =
(struct am65_cpts_skb_cb_data *)skb->cb;
net: ethernet: ti: am65-cpts: Fix PTPv1 message type on TX packets The CPTS, by design, captures the messageType (Sync, Delay_Req, etc.) field from the second nibble of the PTP header which is defined in the PTPv2 (1588-2008) specification. In the PTPv1 (1588-2002) specification the first two bytes of the PTP header are defined as the versionType which is always 0x0001. This means that any PTPv1 packets that are tagged for TX timestamping by the CPTS will have their messageType set to 0x0 which corresponds to a Sync message type. This causes issues when a PTPv1 stack is expecting a Delay_Req (messageType: 0x1) timestamp that never appears. Fix this by checking if the ptp_class of the timestamped TX packet is PTP_CLASS_V1 and then matching the PTP sequence ID to the stored sequence ID in the skb->cb data structure. If the sequence IDs match and the packet is of type PTPv1 then there is a chance that the messageType has been incorrectly stored by the CPTS so overwrite the messageType stored by the CPTS with the messageType from the skb->cb data structure. This allows the PTPv1 stack to receive TX timestamps for Delay_Req packets which are necessary to lock onto a PTP Leader. Signed-off-by: Jason Reeder <jreeder@ti.com> Signed-off-by: Ravi Gunasekaran <r-gunasekaran@ti.com> Tested-by: Ed Trexel <ed.trexel@hp.com> Fixes: f6bd59526ca5 ("net: ethernet: ti: introduce am654 common platform time sync driver") Link: https://lore.kernel.org/r/20240424071626.32558-1-r-gunasekaran@ti.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-04-24 12:46:26 +05:30
if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
(skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
mtype_seqid = skb_cb->skb_mtype_seqid;
if (mtype_seqid == skb_cb->skb_mtype_seqid) {
u64 ns = event->timestamp;
memset(&ssh, 0, sizeof(ssh));
ssh.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(skb, &ssh);
found = true;
__skb_unlink(skb, &txq_list);
dev_consume_skb_any(skb);
dev_dbg(cpts->dev,
"match tx timestamp mtype_seqid %08x\n",
mtype_seqid);
break;
}
if (time_after(jiffies, skb_cb->tmo)) {
/* timeout any expired skbs over 100 ms */
dev_dbg(cpts->dev,
"expiring tx timestamp mtype_seqid %08x\n",
mtype_seqid);
__skb_unlink(skb, &txq_list);
dev_consume_skb_any(skb);
}
}
spin_lock_irqsave(&cpts->txq.lock, flags);
skb_queue_splice(&txq_list, &cpts->txq);
spin_unlock_irqrestore(&cpts->txq.lock, flags);
return found;
}
static void am65_cpts_find_ts(struct am65_cpts *cpts)
{
struct am65_cpts_event *event;
struct list_head *this, *next;
LIST_HEAD(events_free);
unsigned long flags;
LIST_HEAD(events);
spin_lock_irqsave(&cpts->lock, flags);
list_splice_init(&cpts->events, &events);
spin_unlock_irqrestore(&cpts->lock, flags);
list_for_each_safe(this, next, &events) {
event = list_entry(this, struct am65_cpts_event, list);
if (am65_cpts_match_tx_ts(cpts, event) ||
time_after(jiffies, event->tmo)) {
list_del_init(&event->list);
list_add(&event->list, &events_free);
}
}
spin_lock_irqsave(&cpts->lock, flags);
list_splice_tail(&events, &cpts->events);
list_splice_tail(&events_free, &cpts->pool);
spin_unlock_irqrestore(&cpts->lock, flags);
}
static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
{
struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
unsigned long flags;
long delay = -1;
am65_cpts_find_ts(cpts);
spin_lock_irqsave(&cpts->txq.lock, flags);
if (!skb_queue_empty(&cpts->txq))
delay = AM65_CPTS_SKB_TX_WORK_TIMEOUT;
spin_unlock_irqrestore(&cpts->txq.lock, flags);
return delay;
}
static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
{
unsigned int ptp_class = ptp_classify_raw(skb);
struct ptp_header *hdr;
u8 msgtype;
u16 seqid;
if (ptp_class == PTP_CLASS_NONE)
return 0;
hdr = ptp_parse_header(skb, ptp_class);
if (!hdr)
return 0;
msgtype = ptp_get_msgtype(hdr, ptp_class);
seqid = ntohs(hdr->sequence_id);
*mtype_seqid = (msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK;
*mtype_seqid |= (seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
return 1;
}
static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, u32 skb_mtype_seqid)
{
struct list_head *this, *next;
struct am65_cpts_event *event;
unsigned long flags;
u32 mtype_seqid;
u64 ns = 0;
spin_lock_irqsave(&cpts->lock, flags);
__am65_cpts_fifo_read(cpts);
list_for_each_safe(this, next, &cpts->events) {
event = list_entry(this, struct am65_cpts_event, list);
if (time_after(jiffies, event->tmo)) {
list_move(&event->list, &cpts->pool);
continue;
}
mtype_seqid = event->event1 &
(AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK |
AM65_CPTS_EVENT_1_EVENT_TYPE_MASK);
if (mtype_seqid == skb_mtype_seqid) {
ns = event->timestamp;
list_move(&event->list, &cpts->pool);
break;
}
}
spin_unlock_irqrestore(&cpts->lock, flags);
return ns;
}
void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
{
struct am65_cpts_skb_cb_data *skb_cb = (struct am65_cpts_skb_cb_data *)skb->cb;
struct skb_shared_hwtstamps *ssh;
int ret;
u64 ns;
/* am65_cpts_rx_timestamp() is called before eth_type_trans(), so
* skb MAC Hdr properties are not configured yet. Hence need to
* reset skb MAC header here
*/
skb_reset_mac_header(skb);
ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
if (!ret)
return; /* if not PTP class packet */
skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_RX << AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
dev_dbg(cpts->dev, "%s mtype seqid %08x\n", __func__, skb_cb->skb_mtype_seqid);
ns = am65_cpts_find_rx_ts(cpts, skb_cb->skb_mtype_seqid);
if (!ns)
return;
ssh = skb_hwtstamps(skb);
memset(ssh, 0, sizeof(*ssh));
ssh->hwtstamp = ns_to_ktime(ns);
}
EXPORT_SYMBOL_GPL(am65_cpts_rx_timestamp);
/**
* am65_cpts_tx_timestamp - save tx packet for timestamping
* @cpts: cpts handle
* @skb: packet
*
* This functions saves tx packet for timestamping if packet can be timestamped.
* The future processing is done in from PTP auxiliary worker.
*/
void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
{
struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
return;
/* add frame to queue for processing later.
* The periodic FIFO check will handle this.
*/
skb_get(skb);
/* get the timestamp for timeouts */
skb_cb->tmo = jiffies + msecs_to_jiffies(100);
skb_queue_tail(&cpts->txq, skb);
ptp_schedule_worker(cpts->ptp_clock, 0);
}
EXPORT_SYMBOL_GPL(am65_cpts_tx_timestamp);
/**
* am65_cpts_prep_tx_timestamp - check and prepare tx packet for timestamping
* @cpts: cpts handle
* @skb: packet
*
* This functions should be called from .xmit().
* It checks if packet can be timestamped, fills internal cpts data
* in skb-cb and marks packet as SKBTX_IN_PROGRESS.
*/
void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
{
struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
int ret;
if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
return;
ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
if (!ret)
return;
skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_TX <<
AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
}
EXPORT_SYMBOL_GPL(am65_cpts_prep_tx_timestamp);
int am65_cpts_phc_index(struct am65_cpts *cpts)
{
return cpts->phc_index;
}
EXPORT_SYMBOL_GPL(am65_cpts_phc_index);
static void cpts_free_clk_mux(void *data)
{
struct am65_cpts *cpts = data;
of_clk_del_provider(cpts->clk_mux_np);
clk_hw_unregister_mux(cpts->clk_mux_hw);
of_node_put(cpts->clk_mux_np);
}
static int cpts_of_mux_clk_setup(struct am65_cpts *cpts,
struct device_node *node)
{
unsigned int num_parents;
const char **parent_names;
char *clk_mux_name;
void __iomem *reg;
int ret = -EINVAL;
cpts->clk_mux_np = of_get_child_by_name(node, "refclk-mux");
if (!cpts->clk_mux_np)
return 0;
num_parents = of_clk_get_parent_count(cpts->clk_mux_np);
if (num_parents < 1) {
dev_err(cpts->dev, "mux-clock %pOF must have parents\n",
cpts->clk_mux_np);
goto mux_fail;
}
parent_names = devm_kcalloc(cpts->dev, sizeof(char *), num_parents,
GFP_KERNEL);
if (!parent_names) {
ret = -ENOMEM;
goto mux_fail;
}
of_clk_parent_fill(cpts->clk_mux_np, parent_names, num_parents);
clk_mux_name = devm_kasprintf(cpts->dev, GFP_KERNEL, "%s.%pOFn",
dev_name(cpts->dev), cpts->clk_mux_np);
if (!clk_mux_name) {
ret = -ENOMEM;
goto mux_fail;
}
reg = &cpts->reg->rftclk_sel;
/* dev must be NULL to avoid recursive incrementing
* of module refcnt
*/
cpts->clk_mux_hw = clk_hw_register_mux(NULL, clk_mux_name,
parent_names, num_parents,
0, reg, 0, 5, 0, NULL);
if (IS_ERR(cpts->clk_mux_hw)) {
ret = PTR_ERR(cpts->clk_mux_hw);
goto mux_fail;
}
ret = of_clk_add_hw_provider(cpts->clk_mux_np, of_clk_hw_simple_get,
cpts->clk_mux_hw);
if (ret)
goto clk_hw_register;
ret = devm_add_action_or_reset(cpts->dev, cpts_free_clk_mux, cpts);
if (ret)
dev_err(cpts->dev, "failed to add clkmux reset action %d", ret);
return ret;
clk_hw_register:
clk_hw_unregister_mux(cpts->clk_mux_hw);
mux_fail:
of_node_put(cpts->clk_mux_np);
return ret;
}
static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
{
u32 prop[2];
if (!of_property_read_u32(node, "ti,cpts-ext-ts-inputs", &prop[0]))
cpts->ext_ts_inputs = prop[0];
if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0]))
cpts->genf_num = prop[0];
if (!of_property_read_u32_array(node, "ti,pps", prop, 2)) {
cpts->pps_present = true;
if (prop[0] > 7) {
dev_err(cpts->dev, "invalid HWx_TS_PUSH index: %u provided\n", prop[0]);
cpts->pps_present = false;
}
if (prop[1] > 1) {
dev_err(cpts->dev, "invalid GENFy index: %u provided\n", prop[1]);
cpts->pps_present = false;
}
if (cpts->pps_present) {
cpts->pps_hw_ts_idx = prop[0];
cpts->pps_genf_idx = prop[1];
}
}
return cpts_of_mux_clk_setup(cpts, node);
}
void am65_cpts_release(struct am65_cpts *cpts)
{
ptp_clock_unregister(cpts->ptp_clock);
am65_cpts_disable(cpts);
clk_disable_unprepare(cpts->refclk);
}
EXPORT_SYMBOL_GPL(am65_cpts_release);
struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node)
{
struct am65_cpts *cpts;
int ret, i;
cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
if (!cpts)
return ERR_PTR(-ENOMEM);
cpts->dev = dev;
cpts->reg = (struct am65_cpts_regs __iomem *)regs;
cpts->irq = of_irq_get_byname(node, "cpts");
if (cpts->irq <= 0) {
ret = cpts->irq ?: -ENXIO;
dev_err_probe(dev, ret, "Failed to get IRQ number\n");
return ERR_PTR(ret);
}
ret = am65_cpts_of_parse(cpts, node);
if (ret)
return ERR_PTR(ret);
mutex_init(&cpts->ptp_clk_lock);
INIT_LIST_HEAD(&cpts->events);
INIT_LIST_HEAD(&cpts->pool);
spin_lock_init(&cpts->lock);
skb_queue_head_init(&cpts->txq);
for (i = 0; i < AM65_CPTS_MAX_EVENTS; i++)
list_add(&cpts->pool_data[i].list, &cpts->pool);
cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
if (IS_ERR(cpts->refclk)) {
ret = PTR_ERR(cpts->refclk);
dev_err_probe(dev, ret, "Failed to get refclk\n");
return ERR_PTR(ret);
}
ret = clk_prepare_enable(cpts->refclk);
if (ret) {
dev_err(dev, "Failed to enable refclk %d\n", ret);
return ERR_PTR(ret);
}
cpts->refclk_freq = clk_get_rate(cpts->refclk);
am65_ptp_info.max_adj = cpts->refclk_freq / AM65_CPTS_MIN_PPM;
cpts->ptp_info = am65_ptp_info;
if (cpts->ext_ts_inputs)
cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs;
if (cpts->genf_num)
cpts->ptp_info.n_per_out = cpts->genf_num;
if (cpts->pps_present)
cpts->ptp_info.pps = 1;
am65_cpts_set_add_val(cpts);
am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN |
AM65_CPTS_CONTROL_64MODE |
AM65_CPTS_CONTROL_TX_GENF_CLR_EN,
control);
am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
/* set time to the current system time */
am65_cpts_settime(cpts, ktime_to_ns(ktime_get_real()));
cpts->ptp_clock = ptp_clock_register(&cpts->ptp_info, cpts->dev);
if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
dev_err(dev, "Failed to register ptp clk %ld\n",
PTR_ERR(cpts->ptp_clock));
ret = cpts->ptp_clock ? PTR_ERR(cpts->ptp_clock) : -ENODEV;
goto refclk_disable;
}
cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
am65_cpts_interrupt,
IRQF_ONESHOT, dev_name(dev), cpts);
if (ret < 0) {
dev_err(cpts->dev, "error attaching irq %d\n", ret);
goto reset_ptpclk;
}
dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u pps:%d\n",
am65_cpts_read32(cpts, idver),
cpts->refclk_freq, cpts->ts_add_val, cpts->pps_present);
return cpts;
reset_ptpclk:
am65_cpts_release(cpts);
refclk_disable:
clk_disable_unprepare(cpts->refclk);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(am65_cpts_create);
void am65_cpts_suspend(struct am65_cpts *cpts)
{
/* save state and disable CPTS */
cpts->sr_control = am65_cpts_read32(cpts, control);
cpts->sr_int_enable = am65_cpts_read32(cpts, int_enable);
cpts->sr_rftclk_sel = am65_cpts_read32(cpts, rftclk_sel);
cpts->sr_ts_ppm_hi = am65_cpts_read32(cpts, ts_ppm_hi);
cpts->sr_ts_ppm_low = am65_cpts_read32(cpts, ts_ppm_low);
cpts->sr_cpts_ns = am65_cpts_gettime(cpts, NULL);
cpts->sr_ktime_ns = ktime_to_ns(ktime_get_real());
am65_cpts_disable(cpts);
clk_disable(cpts->refclk);
/* Save GENF state */
memcpy_fromio(&cpts->sr_genf, &cpts->reg->genf, sizeof(cpts->sr_genf));
/* Save ESTF state */
memcpy_fromio(&cpts->sr_estf, &cpts->reg->estf, sizeof(cpts->sr_estf));
}
EXPORT_SYMBOL_GPL(am65_cpts_suspend);
void am65_cpts_resume(struct am65_cpts *cpts)
{
int i;
s64 ktime_ns;
/* restore state and enable CPTS */
clk_enable(cpts->refclk);
am65_cpts_write32(cpts, cpts->sr_rftclk_sel, rftclk_sel);
am65_cpts_set_add_val(cpts);
am65_cpts_write32(cpts, cpts->sr_control, control);
am65_cpts_write32(cpts, cpts->sr_int_enable, int_enable);
/* Restore time to saved CPTS time + time in suspend/resume */
ktime_ns = ktime_to_ns(ktime_get_real());
ktime_ns -= cpts->sr_ktime_ns;
am65_cpts_settime(cpts, cpts->sr_cpts_ns + ktime_ns);
/* Restore compensation (PPM) */
am65_cpts_write32(cpts, cpts->sr_ts_ppm_hi, ts_ppm_hi);
am65_cpts_write32(cpts, cpts->sr_ts_ppm_low, ts_ppm_low);
/* Restore GENF state */
for (i = 0; i < AM65_CPTS_GENF_MAX_NUM; i++) {
am65_cpts_write32(cpts, 0, genf[i].length); /* TRM sequence */
am65_cpts_write32(cpts, cpts->sr_genf[i].comp_hi, genf[i].comp_hi);
am65_cpts_write32(cpts, cpts->sr_genf[i].comp_lo, genf[i].comp_lo);
am65_cpts_write32(cpts, cpts->sr_genf[i].length, genf[i].length);
am65_cpts_write32(cpts, cpts->sr_genf[i].control, genf[i].control);
am65_cpts_write32(cpts, cpts->sr_genf[i].ppm_hi, genf[i].ppm_hi);
am65_cpts_write32(cpts, cpts->sr_genf[i].ppm_low, genf[i].ppm_low);
}
/* Restore ESTTF state */
for (i = 0; i < AM65_CPTS_ESTF_MAX_NUM; i++) {
am65_cpts_write32(cpts, 0, estf[i].length); /* TRM sequence */
am65_cpts_write32(cpts, cpts->sr_estf[i].comp_hi, estf[i].comp_hi);
am65_cpts_write32(cpts, cpts->sr_estf[i].comp_lo, estf[i].comp_lo);
am65_cpts_write32(cpts, cpts->sr_estf[i].length, estf[i].length);
am65_cpts_write32(cpts, cpts->sr_estf[i].control, estf[i].control);
am65_cpts_write32(cpts, cpts->sr_estf[i].ppm_hi, estf[i].ppm_hi);
am65_cpts_write32(cpts, cpts->sr_estf[i].ppm_low, estf[i].ppm_low);
}
}
EXPORT_SYMBOL_GPL(am65_cpts_resume);
static int am65_cpts_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct am65_cpts *cpts;
void __iomem *base;
base = devm_platform_ioremap_resource_byname(pdev, "cpts");
if (IS_ERR(base))
return PTR_ERR(base);
cpts = am65_cpts_create(dev, base, node);
return PTR_ERR_OR_ZERO(cpts);
}
static const struct of_device_id am65_cpts_of_match[] = {
{ .compatible = "ti,am65-cpts", },
{ .compatible = "ti,j721e-cpts", },
{},
};
MODULE_DEVICE_TABLE(of, am65_cpts_of_match);
static struct platform_driver am65_cpts_driver = {
.probe = am65_cpts_probe,
.driver = {
.name = "am65-cpts",
.of_match_table = am65_cpts_of_match,
},
};
module_platform_driver(am65_cpts_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
MODULE_DESCRIPTION("TI K3 AM65 CPTS driver");