2019-05-20 09:19:02 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2011-04-22 12:03:08 +02:00
|
|
|
/*
|
|
|
|
* PTP 1588 clock support - character device implementation.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2010 OMICRON electronics GmbH
|
|
|
|
*/
|
2025-01-25 10:28:38 +01:00
|
|
|
#include <linux/compat.h>
|
2011-04-22 12:03:08 +02:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/posix-clock.h>
|
|
|
|
#include <linux/poll.h>
|
|
|
|
#include <linux/sched.h>
|
2012-11-26 01:44:34 +00:00
|
|
|
#include <linux/slab.h>
|
2016-02-22 03:15:25 -08:00
|
|
|
#include <linux/timekeeping.h>
|
2023-10-12 00:39:57 +02:00
|
|
|
#include <linux/debugfs.h>
|
2011-04-22 12:03:08 +02:00
|
|
|
|
2018-10-16 15:06:41 +02:00
|
|
|
#include <linux/nospec.h>
|
|
|
|
|
2011-04-22 12:03:08 +02:00
|
|
|
#include "ptp_private.h"
|
|
|
|
|
2014-03-20 22:21:52 +01:00
|
|
|
static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
|
|
|
|
enum ptp_pin_function func, unsigned int chan)
|
|
|
|
{
|
|
|
|
struct ptp_clock_request rq;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
memset(&rq, 0, sizeof(rq));
|
|
|
|
|
|
|
|
switch (func) {
|
|
|
|
case PTP_PF_NONE:
|
|
|
|
break;
|
|
|
|
case PTP_PF_EXTTS:
|
|
|
|
rq.type = PTP_CLK_REQ_EXTTS;
|
|
|
|
rq.extts.index = chan;
|
|
|
|
err = ops->enable(ops, &rq, 0);
|
|
|
|
break;
|
|
|
|
case PTP_PF_PEROUT:
|
|
|
|
rq.type = PTP_CLK_REQ_PEROUT;
|
|
|
|
rq.perout.index = chan;
|
|
|
|
err = ops->enable(ops, &rq, 0);
|
|
|
|
break;
|
|
|
|
case PTP_PF_PHYSYNC:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
|
|
|
|
enum ptp_pin_function func, unsigned int chan)
|
|
|
|
{
|
|
|
|
struct ptp_clock_info *info = ptp->info;
|
|
|
|
struct ptp_pin_desc *pin1 = NULL, *pin2 = &info->pin_config[pin];
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
/* Check to see if any other pin previously had this function. */
|
|
|
|
for (i = 0; i < info->n_pins; i++) {
|
|
|
|
if (info->pin_config[i].func == func &&
|
|
|
|
info->pin_config[i].chan == chan) {
|
|
|
|
pin1 = &info->pin_config[i];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (pin1 && i == pin)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Check the desired function and channel. */
|
|
|
|
switch (func) {
|
|
|
|
case PTP_PF_NONE:
|
|
|
|
break;
|
|
|
|
case PTP_PF_EXTTS:
|
|
|
|
if (chan >= info->n_ext_ts)
|
|
|
|
return -EINVAL;
|
|
|
|
break;
|
|
|
|
case PTP_PF_PEROUT:
|
|
|
|
if (chan >= info->n_per_out)
|
|
|
|
return -EINVAL;
|
|
|
|
break;
|
|
|
|
case PTP_PF_PHYSYNC:
|
2014-06-27 12:05:33 +02:00
|
|
|
if (chan != 0)
|
|
|
|
return -EINVAL;
|
2018-07-17 20:17:33 -05:00
|
|
|
break;
|
2014-03-20 22:21:52 +01:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info->verify(info, pin, func, chan)) {
|
2024-06-04 14:05:27 +02:00
|
|
|
pr_err("driver cannot use function %u and channel %u on pin %u\n",
|
|
|
|
func, chan, pin);
|
2014-03-20 22:21:52 +01:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Disable whatever function was previously assigned. */
|
|
|
|
if (pin1) {
|
|
|
|
ptp_disable_pinfunc(info, func, chan);
|
|
|
|
pin1->func = PTP_PF_NONE;
|
|
|
|
pin1->chan = 0;
|
|
|
|
}
|
|
|
|
ptp_disable_pinfunc(info, pin2->func, pin2->chan);
|
|
|
|
pin2->func = func;
|
|
|
|
pin2->chan = chan;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-10-12 00:39:53 +02:00
|
|
|
int ptp_open(struct posix_clock_context *pccontext, fmode_t fmode)
|
2011-04-22 12:03:08 +02:00
|
|
|
{
|
2025-06-25 13:52:39 +02:00
|
|
|
struct ptp_clock *ptp = container_of(pccontext->clk, struct ptp_clock, clock);
|
2023-10-12 00:39:55 +02:00
|
|
|
struct timestamp_event_queue *queue;
|
2023-10-12 00:39:57 +02:00
|
|
|
char debugfsname[32];
|
2023-10-12 00:39:55 +02:00
|
|
|
|
|
|
|
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
|
|
|
|
if (!queue)
|
|
|
|
return -EINVAL;
|
2023-10-12 00:39:56 +02:00
|
|
|
queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
|
|
|
|
if (!queue->mask) {
|
|
|
|
kfree(queue);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
|
2023-10-12 00:39:55 +02:00
|
|
|
spin_lock_init(&queue->lock);
|
2025-06-25 13:52:38 +02:00
|
|
|
scoped_guard(spinlock_irq, &ptp->tsevqs_lock)
|
|
|
|
list_add_tail(&queue->qlist, &ptp->tsevqs);
|
2023-10-12 00:39:55 +02:00
|
|
|
pccontext->private_clkdata = queue;
|
2023-10-12 00:39:57 +02:00
|
|
|
|
|
|
|
/* Debugfs contents */
|
|
|
|
sprintf(debugfsname, "0x%p", queue);
|
|
|
|
queue->debugfs_instance =
|
|
|
|
debugfs_create_dir(debugfsname, ptp->debugfs_root);
|
|
|
|
queue->dfs_bitmap.array = (u32 *)queue->mask;
|
|
|
|
queue->dfs_bitmap.n_elements =
|
|
|
|
DIV_ROUND_UP(PTP_MAX_CHANNELS, BITS_PER_BYTE * sizeof(u32));
|
|
|
|
debugfs_create_u32_array("mask", 0444, queue->debugfs_instance,
|
|
|
|
&queue->dfs_bitmap);
|
|
|
|
|
2023-10-12 00:39:55 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ptp_release(struct posix_clock_context *pccontext)
|
|
|
|
{
|
|
|
|
struct timestamp_event_queue *queue = pccontext->private_clkdata;
|
2023-11-07 16:00:41 +08:00
|
|
|
struct ptp_clock *ptp =
|
|
|
|
container_of(pccontext->clk, struct ptp_clock, clock);
|
2023-10-12 00:39:55 +02:00
|
|
|
|
2023-11-07 16:00:41 +08:00
|
|
|
debugfs_remove(queue->debugfs_instance);
|
|
|
|
pccontext->private_clkdata = NULL;
|
2025-06-25 13:52:38 +02:00
|
|
|
scoped_guard(spinlock_irq, &ptp->tsevqs_lock)
|
|
|
|
list_del(&queue->qlist);
|
2023-11-07 16:00:41 +08:00
|
|
|
bitmap_free(queue->mask);
|
|
|
|
kfree(queue);
|
2011-04-22 12:03:08 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2025-06-25 13:52:24 +02:00
|
|
|
static long ptp_clock_getcaps(struct ptp_clock *ptp, void __user *arg)
|
|
|
|
{
|
|
|
|
struct ptp_clock_caps caps = {
|
|
|
|
.max_adj = ptp->info->max_adj,
|
|
|
|
.n_alarm = ptp->info->n_alarm,
|
|
|
|
.n_ext_ts = ptp->info->n_ext_ts,
|
|
|
|
.n_per_out = ptp->info->n_per_out,
|
|
|
|
.pps = ptp->info->pps,
|
|
|
|
.n_pins = ptp->info->n_pins,
|
|
|
|
.cross_timestamping = ptp->info->getcrosststamp != NULL,
|
|
|
|
.adjust_phase = ptp->info->adjphase != NULL &&
|
|
|
|
ptp->info->getmaxphase != NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (caps.adjust_phase)
|
|
|
|
caps.max_phase_adj = ptp->info->getmaxphase(ptp->info);
|
|
|
|
|
|
|
|
return copy_to_user(arg, &caps, sizeof(caps)) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
|
2025-06-25 13:52:25 +02:00
|
|
|
static long ptp_extts_request(struct ptp_clock *ptp, unsigned int cmd, void __user *arg)
|
|
|
|
{
|
|
|
|
struct ptp_clock_request req = { .type = PTP_CLK_REQ_EXTTS };
|
|
|
|
struct ptp_clock_info *ops = ptp->info;
|
|
|
|
unsigned int supported_extts_flags;
|
|
|
|
|
|
|
|
if (copy_from_user(&req.extts, arg, sizeof(req.extts)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (cmd == PTP_EXTTS_REQUEST2) {
|
|
|
|
/* Tell the drivers to check the flags carefully. */
|
|
|
|
req.extts.flags |= PTP_STRICT_FLAGS;
|
|
|
|
/* Make sure no reserved bit is set. */
|
|
|
|
if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
|
|
|
|
req.extts.rsv[0] || req.extts.rsv[1])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Ensure one of the rising/falling edge bits is set. */
|
|
|
|
if ((req.extts.flags & PTP_ENABLE_FEATURE) &&
|
|
|
|
(req.extts.flags & PTP_EXTTS_EDGES) == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
|
|
|
req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
|
|
|
|
memset(req.extts.rsv, 0, sizeof(req.extts.rsv));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req.extts.index >= ops->n_ext_ts)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
supported_extts_flags = ptp->info->supported_extts_flags;
|
|
|
|
/* The PTP_ENABLE_FEATURE flag is always supported. */
|
|
|
|
supported_extts_flags |= PTP_ENABLE_FEATURE;
|
|
|
|
/* If the driver does not support strictly checking flags, the
|
|
|
|
* PTP_RISING_EDGE and PTP_FALLING_EDGE flags are merely hints
|
|
|
|
* which are not enforced.
|
|
|
|
*/
|
|
|
|
if (!(supported_extts_flags & PTP_STRICT_FLAGS))
|
|
|
|
supported_extts_flags |= PTP_EXTTS_EDGES;
|
|
|
|
/* Reject unsupported flags */
|
|
|
|
if (req.extts.flags & ~supported_extts_flags)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &ptp->pincfg_mux)
|
|
|
|
return ops->enable(ops, &req, req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0);
|
|
|
|
}
|
|
|
|
|
2025-06-25 13:52:27 +02:00
|
|
|
static long ptp_perout_request(struct ptp_clock *ptp, unsigned int cmd, void __user *arg)
|
|
|
|
{
|
|
|
|
struct ptp_clock_request req = { .type = PTP_CLK_REQ_PEROUT };
|
|
|
|
struct ptp_perout_request *perout = &req.perout;
|
|
|
|
struct ptp_clock_info *ops = ptp->info;
|
|
|
|
|
|
|
|
if (copy_from_user(perout, arg, sizeof(*perout)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (cmd == PTP_PEROUT_REQUEST2) {
|
|
|
|
if (perout->flags & ~PTP_PEROUT_VALID_FLAGS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The "on" field has undefined meaning if
|
|
|
|
* PTP_PEROUT_DUTY_CYCLE isn't set, we must still treat it
|
|
|
|
* as reserved, which must be set to zero.
|
|
|
|
*/
|
|
|
|
if (!(perout->flags & PTP_PEROUT_DUTY_CYCLE) &&
|
|
|
|
!mem_is_zero(perout->rsv, sizeof(perout->rsv)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (perout->flags & PTP_PEROUT_DUTY_CYCLE) {
|
|
|
|
/* The duty cycle must be subunitary. */
|
|
|
|
if (perout->on.sec > perout->period.sec ||
|
|
|
|
(perout->on.sec == perout->period.sec &&
|
|
|
|
perout->on.nsec > perout->period.nsec))
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (perout->flags & PTP_PEROUT_PHASE) {
|
|
|
|
/*
|
|
|
|
* The phase should be specified modulo the period,
|
|
|
|
* therefore anything equal or larger than 1 period
|
|
|
|
* is invalid.
|
|
|
|
*/
|
|
|
|
if (perout->phase.sec > perout->period.sec ||
|
|
|
|
(perout->phase.sec == perout->period.sec &&
|
|
|
|
perout->phase.nsec >= perout->period.nsec))
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
perout->flags &= PTP_PEROUT_V1_VALID_FLAGS;
|
|
|
|
memset(perout->rsv, 0, sizeof(perout->rsv));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (perout->index >= ops->n_per_out)
|
|
|
|
return -EINVAL;
|
|
|
|
if (perout->flags & ~ops->supported_perout_flags)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &ptp->pincfg_mux)
|
|
|
|
return ops->enable(ops, &req, perout->period.sec || perout->period.nsec);
|
|
|
|
}
|
|
|
|
|
2025-06-25 13:52:28 +02:00
|
|
|
static long ptp_enable_pps(struct ptp_clock *ptp, bool enable)
|
|
|
|
{
|
|
|
|
struct ptp_clock_request req = { .type = PTP_CLK_REQ_PPS };
|
|
|
|
struct ptp_clock_info *ops = ptp->info;
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_TIME))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &ptp->pincfg_mux)
|
|
|
|
return ops->enable(ops, &req, enable);
|
|
|
|
}
|
|
|
|
|
2025-06-25 13:52:29 +02:00
|
|
|
static long ptp_sys_offset_precise(struct ptp_clock *ptp, void __user *arg)
|
|
|
|
{
|
|
|
|
struct ptp_sys_offset_precise precise_offset;
|
|
|
|
struct system_device_crosststamp xtstamp;
|
|
|
|
struct timespec64 ts;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!ptp->info->getcrosststamp)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
err = ptp->info->getcrosststamp(ptp->info, &xtstamp);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
memset(&precise_offset, 0, sizeof(precise_offset));
|
|
|
|
ts = ktime_to_timespec64(xtstamp.device);
|
|
|
|
precise_offset.device.sec = ts.tv_sec;
|
|
|
|
precise_offset.device.nsec = ts.tv_nsec;
|
|
|
|
ts = ktime_to_timespec64(xtstamp.sys_realtime);
|
|
|
|
precise_offset.sys_realtime.sec = ts.tv_sec;
|
|
|
|
precise_offset.sys_realtime.nsec = ts.tv_nsec;
|
|
|
|
ts = ktime_to_timespec64(xtstamp.sys_monoraw);
|
|
|
|
precise_offset.sys_monoraw.sec = ts.tv_sec;
|
|
|
|
precise_offset.sys_monoraw.nsec = ts.tv_nsec;
|
|
|
|
|
|
|
|
return copy_to_user(arg, &precise_offset, sizeof(precise_offset)) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
|
2025-06-25 13:52:30 +02:00
|
|
|
static long ptp_sys_offset_extended(struct ptp_clock *ptp, void __user *arg)
|
|
|
|
{
|
|
|
|
struct ptp_sys_offset_extended *extoff __free(kfree) = NULL;
|
|
|
|
struct ptp_system_timestamp sts;
|
|
|
|
|
|
|
|
if (!ptp->info->gettimex64)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
extoff = memdup_user(arg, sizeof(*extoff));
|
|
|
|
if (IS_ERR(extoff))
|
|
|
|
return PTR_ERR(extoff);
|
|
|
|
|
2025-07-01 15:27:02 +02:00
|
|
|
if (extoff->n_samples > PTP_MAX_SAMPLES || extoff->rsv[0] || extoff->rsv[1])
|
2025-06-25 13:52:30 +02:00
|
|
|
return -EINVAL;
|
|
|
|
|
2025-07-01 15:27:02 +02:00
|
|
|
switch (extoff->clockid) {
|
|
|
|
case CLOCK_REALTIME:
|
|
|
|
case CLOCK_MONOTONIC:
|
|
|
|
case CLOCK_MONOTONIC_RAW:
|
|
|
|
break;
|
|
|
|
case CLOCK_AUX ... CLOCK_AUX_LAST:
|
|
|
|
if (IS_ENABLED(CONFIG_POSIX_AUX_CLOCKS))
|
|
|
|
break;
|
|
|
|
fallthrough;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2025-06-25 13:52:30 +02:00
|
|
|
sts.clockid = extoff->clockid;
|
|
|
|
for (unsigned int i = 0; i < extoff->n_samples; i++) {
|
|
|
|
struct timespec64 ts;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = ptp->info->gettimex64(ptp->info, &ts, &sts);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2025-07-01 15:27:02 +02:00
|
|
|
|
|
|
|
/* Filter out disabled or unavailable clocks */
|
|
|
|
if (sts.pre_ts.tv_sec < 0 || sts.post_ts.tv_sec < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2025-06-25 13:52:30 +02:00
|
|
|
extoff->ts[i][0].sec = sts.pre_ts.tv_sec;
|
|
|
|
extoff->ts[i][0].nsec = sts.pre_ts.tv_nsec;
|
|
|
|
extoff->ts[i][1].sec = ts.tv_sec;
|
|
|
|
extoff->ts[i][1].nsec = ts.tv_nsec;
|
|
|
|
extoff->ts[i][2].sec = sts.post_ts.tv_sec;
|
|
|
|
extoff->ts[i][2].nsec = sts.post_ts.tv_nsec;
|
|
|
|
}
|
|
|
|
|
|
|
|
return copy_to_user(arg, extoff, sizeof(*extoff)) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
|
2025-06-25 13:52:31 +02:00
|
|
|
static long ptp_sys_offset(struct ptp_clock *ptp, void __user *arg)
|
|
|
|
{
|
|
|
|
struct ptp_sys_offset *sysoff __free(kfree) = NULL;
|
|
|
|
struct ptp_clock_time *pct;
|
|
|
|
struct timespec64 ts;
|
|
|
|
|
|
|
|
sysoff = memdup_user(arg, sizeof(*sysoff));
|
|
|
|
if (IS_ERR(sysoff))
|
|
|
|
return PTR_ERR(sysoff);
|
|
|
|
|
|
|
|
if (sysoff->n_samples > PTP_MAX_SAMPLES)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
pct = &sysoff->ts[0];
|
|
|
|
for (unsigned int i = 0; i < sysoff->n_samples; i++) {
|
|
|
|
struct ptp_clock_info *ops = ptp->info;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
ktime_get_real_ts64(&ts);
|
|
|
|
pct->sec = ts.tv_sec;
|
|
|
|
pct->nsec = ts.tv_nsec;
|
|
|
|
pct++;
|
|
|
|
if (ops->gettimex64)
|
|
|
|
err = ops->gettimex64(ops, &ts, NULL);
|
|
|
|
else
|
|
|
|
err = ops->gettime64(ops, &ts);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
pct->sec = ts.tv_sec;
|
|
|
|
pct->nsec = ts.tv_nsec;
|
|
|
|
pct++;
|
|
|
|
}
|
|
|
|
ktime_get_real_ts64(&ts);
|
|
|
|
pct->sec = ts.tv_sec;
|
|
|
|
pct->nsec = ts.tv_nsec;
|
|
|
|
|
|
|
|
return copy_to_user(arg, sysoff, sizeof(*sysoff)) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
|
2025-06-25 13:52:33 +02:00
|
|
|
static long ptp_pin_getfunc(struct ptp_clock *ptp, unsigned int cmd, void __user *arg)
|
|
|
|
{
|
|
|
|
struct ptp_clock_info *ops = ptp->info;
|
|
|
|
struct ptp_pin_desc pd;
|
|
|
|
|
|
|
|
if (copy_from_user(&pd, arg, sizeof(pd)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (cmd == PTP_PIN_GETFUNC2 && !mem_is_zero(pd.rsv, sizeof(pd.rsv)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (pd.index >= ops->n_pins)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &ptp->pincfg_mux)
|
|
|
|
pd = ops->pin_config[array_index_nospec(pd.index, ops->n_pins)];
|
|
|
|
|
|
|
|
return copy_to_user(arg, &pd, sizeof(pd)) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
|
2025-06-25 13:52:34 +02:00
|
|
|
static long ptp_pin_setfunc(struct ptp_clock *ptp, unsigned int cmd, void __user *arg)
|
|
|
|
{
|
|
|
|
struct ptp_clock_info *ops = ptp->info;
|
|
|
|
struct ptp_pin_desc pd;
|
|
|
|
unsigned int pin_index;
|
|
|
|
|
|
|
|
if (copy_from_user(&pd, arg, sizeof(pd)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (cmd == PTP_PIN_SETFUNC2 && !mem_is_zero(pd.rsv, sizeof(pd.rsv)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (pd.index >= ops->n_pins)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
pin_index = array_index_nospec(pd.index, ops->n_pins);
|
|
|
|
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &ptp->pincfg_mux)
|
|
|
|
return ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
|
|
|
|
}
|
|
|
|
|
2025-06-25 13:52:35 +02:00
|
|
|
static long ptp_mask_clear_all(struct timestamp_event_queue *tsevq)
|
|
|
|
{
|
|
|
|
bitmap_clear(tsevq->mask, 0, PTP_MAX_CHANNELS);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2025-06-25 13:52:36 +02:00
|
|
|
static long ptp_mask_en_single(struct timestamp_event_queue *tsevq, void __user *arg)
|
|
|
|
{
|
|
|
|
unsigned int channel;
|
|
|
|
|
|
|
|
if (copy_from_user(&channel, arg, sizeof(channel)))
|
|
|
|
return -EFAULT;
|
|
|
|
if (channel >= PTP_MAX_CHANNELS)
|
|
|
|
return -EFAULT;
|
|
|
|
set_bit(channel, tsevq->mask);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-10-12 00:39:53 +02:00
|
|
|
long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
2011-04-22 12:03:08 +02:00
|
|
|
{
|
2025-06-25 13:52:36 +02:00
|
|
|
struct ptp_clock *ptp = container_of(pccontext->clk, struct ptp_clock, clock);
|
2025-06-25 13:52:24 +02:00
|
|
|
void __user *argptr;
|
2011-04-22 12:03:08 +02:00
|
|
|
|
2025-01-25 10:28:38 +01:00
|
|
|
if (in_compat_syscall() && cmd != PTP_ENABLE_PPS && cmd != PTP_ENABLE_PPS2)
|
|
|
|
arg = (unsigned long)compat_ptr(arg);
|
2025-06-25 13:52:24 +02:00
|
|
|
argptr = (void __force __user *)arg;
|
2025-01-25 10:28:38 +01:00
|
|
|
|
2011-04-22 12:03:08 +02:00
|
|
|
switch (cmd) {
|
|
|
|
case PTP_CLOCK_GETCAPS:
|
2019-09-11 09:16:21 +03:00
|
|
|
case PTP_CLOCK_GETCAPS2:
|
2025-06-25 13:52:24 +02:00
|
|
|
return ptp_clock_getcaps(ptp, argptr);
|
2011-04-22 12:03:08 +02:00
|
|
|
|
|
|
|
case PTP_EXTTS_REQUEST:
|
2019-09-11 09:16:21 +03:00
|
|
|
case PTP_EXTTS_REQUEST2:
|
2025-06-25 13:52:25 +02:00
|
|
|
if ((pccontext->fp->f_mode & FMODE_WRITE) == 0)
|
|
|
|
return -EACCES;
|
|
|
|
return ptp_extts_request(ptp, cmd, argptr);
|
2011-04-22 12:03:08 +02:00
|
|
|
|
|
|
|
case PTP_PEROUT_REQUEST:
|
2019-09-11 09:16:21 +03:00
|
|
|
case PTP_PEROUT_REQUEST2:
|
2025-06-25 13:52:27 +02:00
|
|
|
if ((pccontext->fp->f_mode & FMODE_WRITE) == 0)
|
|
|
|
return -EACCES;
|
|
|
|
return ptp_perout_request(ptp, cmd, argptr);
|
2011-04-22 12:03:08 +02:00
|
|
|
|
|
|
|
case PTP_ENABLE_PPS:
|
2019-09-11 09:16:21 +03:00
|
|
|
case PTP_ENABLE_PPS2:
|
2025-06-25 13:52:28 +02:00
|
|
|
if ((pccontext->fp->f_mode & FMODE_WRITE) == 0)
|
|
|
|
return -EACCES;
|
|
|
|
return ptp_enable_pps(ptp, !!arg);
|
2011-04-22 12:03:08 +02:00
|
|
|
|
2016-02-22 03:15:25 -08:00
|
|
|
case PTP_SYS_OFFSET_PRECISE:
|
2019-09-11 09:16:21 +03:00
|
|
|
case PTP_SYS_OFFSET_PRECISE2:
|
2025-06-25 13:52:29 +02:00
|
|
|
return ptp_sys_offset_precise(ptp, argptr);
|
2016-02-22 03:15:25 -08:00
|
|
|
|
2018-11-09 11:14:44 +01:00
|
|
|
case PTP_SYS_OFFSET_EXTENDED:
|
2019-09-11 09:16:21 +03:00
|
|
|
case PTP_SYS_OFFSET_EXTENDED2:
|
2025-06-25 13:52:30 +02:00
|
|
|
return ptp_sys_offset_extended(ptp, argptr);
|
2018-11-09 11:14:44 +01:00
|
|
|
|
2012-10-31 06:19:07 +00:00
|
|
|
case PTP_SYS_OFFSET:
|
2019-09-11 09:16:21 +03:00
|
|
|
case PTP_SYS_OFFSET2:
|
2025-06-25 13:52:31 +02:00
|
|
|
return ptp_sys_offset(ptp, argptr);
|
2012-10-31 06:19:07 +00:00
|
|
|
|
2014-03-20 22:21:52 +01:00
|
|
|
case PTP_PIN_GETFUNC:
|
2019-09-11 09:16:21 +03:00
|
|
|
case PTP_PIN_GETFUNC2:
|
2025-06-25 13:52:33 +02:00
|
|
|
return ptp_pin_getfunc(ptp, cmd, argptr);
|
2014-03-20 22:21:52 +01:00
|
|
|
|
|
|
|
case PTP_PIN_SETFUNC:
|
2019-09-11 09:16:21 +03:00
|
|
|
case PTP_PIN_SETFUNC2:
|
2025-06-25 13:52:34 +02:00
|
|
|
if ((pccontext->fp->f_mode & FMODE_WRITE) == 0)
|
|
|
|
return -EACCES;
|
|
|
|
return ptp_pin_setfunc(ptp, cmd, argptr);
|
2014-03-20 22:21:52 +01:00
|
|
|
|
2023-10-12 00:39:56 +02:00
|
|
|
case PTP_MASK_CLEAR_ALL:
|
2025-06-25 13:52:35 +02:00
|
|
|
return ptp_mask_clear_all(pccontext->private_clkdata);
|
2023-10-12 00:39:56 +02:00
|
|
|
|
|
|
|
case PTP_MASK_EN_SINGLE:
|
2025-06-25 13:52:36 +02:00
|
|
|
return ptp_mask_en_single(pccontext->private_clkdata, argptr);
|
2023-10-12 00:39:56 +02:00
|
|
|
|
2011-04-22 12:03:08 +02:00
|
|
|
default:
|
2025-06-25 13:52:36 +02:00
|
|
|
return -ENOTTY;
|
2011-04-22 12:03:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-12 00:39:53 +02:00
|
|
|
__poll_t ptp_poll(struct posix_clock_context *pccontext, struct file *fp,
|
|
|
|
poll_table *wait)
|
2011-04-22 12:03:08 +02:00
|
|
|
{
|
2023-10-12 00:39:53 +02:00
|
|
|
struct ptp_clock *ptp =
|
|
|
|
container_of(pccontext->clk, struct ptp_clock, clock);
|
2023-10-12 00:39:54 +02:00
|
|
|
struct timestamp_event_queue *queue;
|
2011-04-22 12:03:08 +02:00
|
|
|
|
2023-10-12 00:39:55 +02:00
|
|
|
queue = pccontext->private_clkdata;
|
|
|
|
if (!queue)
|
|
|
|
return EPOLLERR;
|
2011-04-22 12:03:08 +02:00
|
|
|
|
2023-10-12 00:39:55 +02:00
|
|
|
poll_wait(fp, &ptp->tsev_wq, wait);
|
2023-10-12 00:39:54 +02:00
|
|
|
|
|
|
|
return queue_cnt(queue) ? EPOLLIN : 0;
|
2011-04-22 12:03:08 +02:00
|
|
|
}
|
|
|
|
|
2012-11-26 01:44:34 +00:00
|
|
|
#define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event))
|
|
|
|
|
2023-10-12 00:39:53 +02:00
|
|
|
ssize_t ptp_read(struct posix_clock_context *pccontext, uint rdflags,
|
|
|
|
char __user *buf, size_t cnt)
|
2011-04-22 12:03:08 +02:00
|
|
|
{
|
2025-06-25 13:52:39 +02:00
|
|
|
struct ptp_clock *ptp = container_of(pccontext->clk, struct ptp_clock, clock);
|
2023-10-12 00:39:54 +02:00
|
|
|
struct timestamp_event_queue *queue;
|
2012-11-26 01:44:34 +00:00
|
|
|
struct ptp_extts_event *event;
|
2025-06-25 13:52:39 +02:00
|
|
|
ssize_t result;
|
2011-04-22 12:03:08 +02:00
|
|
|
|
2023-10-12 00:39:55 +02:00
|
|
|
queue = pccontext->private_clkdata;
|
2025-06-25 13:52:39 +02:00
|
|
|
if (!queue)
|
|
|
|
return -EINVAL;
|
2023-10-12 00:39:54 +02:00
|
|
|
|
2025-06-25 13:52:39 +02:00
|
|
|
if (cnt % sizeof(*event) != 0)
|
|
|
|
return -EINVAL;
|
2011-04-22 12:03:08 +02:00
|
|
|
|
2012-11-26 01:44:34 +00:00
|
|
|
if (cnt > EXTTS_BUFSIZE)
|
|
|
|
cnt = EXTTS_BUFSIZE;
|
2011-04-22 12:03:08 +02:00
|
|
|
|
2025-06-25 13:52:39 +02:00
|
|
|
if (wait_event_interruptible(ptp->tsev_wq, ptp->defunct || queue_cnt(queue)))
|
2011-04-22 12:03:08 +02:00
|
|
|
return -ERESTARTSYS;
|
|
|
|
|
2025-06-25 13:52:39 +02:00
|
|
|
if (ptp->defunct)
|
|
|
|
return -ENODEV;
|
2011-04-22 12:03:08 +02:00
|
|
|
|
2012-11-26 01:44:34 +00:00
|
|
|
event = kmalloc(EXTTS_BUFSIZE, GFP_KERNEL);
|
2025-06-25 13:52:39 +02:00
|
|
|
if (!event)
|
|
|
|
return -ENOMEM;
|
2012-11-26 01:44:34 +00:00
|
|
|
|
2025-06-25 13:52:38 +02:00
|
|
|
scoped_guard(spinlock_irq, &queue->lock) {
|
2025-06-25 13:52:39 +02:00
|
|
|
size_t qcnt = min((size_t)queue_cnt(queue), cnt / sizeof(*event));
|
2011-04-22 12:03:08 +02:00
|
|
|
|
2025-06-25 13:52:39 +02:00
|
|
|
for (size_t i = 0; i < qcnt; i++) {
|
2025-06-25 13:52:38 +02:00
|
|
|
event[i] = queue->buf[queue->head];
|
|
|
|
/* Paired with READ_ONCE() in queue_cnt() */
|
|
|
|
WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
|
|
|
|
}
|
2025-06-25 13:52:39 +02:00
|
|
|
cnt = qcnt * sizeof(*event);
|
2011-04-22 12:03:08 +02:00
|
|
|
}
|
|
|
|
|
2012-11-26 01:44:34 +00:00
|
|
|
result = cnt;
|
2025-06-25 13:52:39 +02:00
|
|
|
if (copy_to_user(buf, event, cnt))
|
2012-11-26 01:44:34 +00:00
|
|
|
result = -EFAULT;
|
2011-04-22 12:03:08 +02:00
|
|
|
|
2012-11-26 01:44:34 +00:00
|
|
|
kfree(event);
|
|
|
|
return result;
|
2011-04-22 12:03:08 +02:00
|
|
|
}
|