2011-11-13 01:44:55 +08:00
|
|
|
/**
|
|
|
|
* @file clock.c
|
|
|
|
* @note Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*/
|
|
|
|
#include <errno.h>
|
|
|
|
#include <poll.h>
|
2011-11-26 16:26:59 +08:00
|
|
|
#include <stdlib.h>
|
2011-11-13 01:44:55 +08:00
|
|
|
#include <string.h>
|
|
|
|
#include <time.h>
|
|
|
|
|
|
|
|
#include "bmc.h"
|
|
|
|
#include "clock.h"
|
|
|
|
#include "foreign.h"
|
2011-11-27 21:19:29 +08:00
|
|
|
#include "mave.h"
|
2011-11-13 01:44:55 +08:00
|
|
|
#include "missing.h"
|
|
|
|
#include "msg.h"
|
|
|
|
#include "phc.h"
|
|
|
|
#include "port.h"
|
|
|
|
#include "servo.h"
|
|
|
|
#include "print.h"
|
2012-07-29 20:31:30 +08:00
|
|
|
#include "tlv.h"
|
2012-08-24 21:30:02 +08:00
|
|
|
#include "uds.h"
|
2011-11-13 01:44:55 +08:00
|
|
|
#include "util.h"
|
|
|
|
|
2012-08-24 21:30:02 +08:00
|
|
|
#define CLK_N_PORTS (MAX_PORTS + 1) /* plus one for the UDS interface */
|
2012-03-24 19:46:24 +08:00
|
|
|
#define FAULT_RESET_SECONDS 15
|
|
|
|
#define N_CLOCK_PFD (N_POLLFD + 1) /* one extra per port, for the fault timer */
|
2011-11-27 21:19:29 +08:00
|
|
|
#define MAVE_LENGTH 10
|
2012-09-04 13:37:10 +08:00
|
|
|
#define POW2_41 ((double)(1ULL << 41))
|
2011-11-27 21:19:29 +08:00
|
|
|
|
2011-11-13 01:44:55 +08:00
|
|
|
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
|
|
|
|
|
2012-08-18 01:16:32 +08:00
|
|
|
struct freq_estimator {
|
|
|
|
tmv_t origin1;
|
|
|
|
tmv_t ingress1;
|
|
|
|
int max_count;
|
|
|
|
int count;
|
|
|
|
};
|
|
|
|
|
2011-11-13 01:44:55 +08:00
|
|
|
struct clock {
|
|
|
|
clockid_t clkid;
|
|
|
|
struct servo *servo;
|
|
|
|
struct defaultDS dds;
|
|
|
|
struct dataset default_dataset;
|
|
|
|
struct currentDS cur;
|
|
|
|
struct parentDS dad;
|
|
|
|
struct timePropertiesDS tds;
|
2012-08-07 15:20:55 +08:00
|
|
|
struct ClockIdentity ptl[PATH_TRACE_MAX];
|
2011-11-13 01:44:55 +08:00
|
|
|
struct foreign_clock *best;
|
2012-09-12 23:06:34 +08:00
|
|
|
struct ClockIdentity best_id;
|
2012-08-24 21:30:02 +08:00
|
|
|
struct port *port[CLK_N_PORTS];
|
|
|
|
struct pollfd pollfd[CLK_N_PORTS*N_CLOCK_PFD];
|
|
|
|
int fault_fd[CLK_N_PORTS];
|
2012-03-24 19:46:24 +08:00
|
|
|
time_t fault_timeout;
|
2012-08-24 21:30:02 +08:00
|
|
|
int nports; /* does not include the UDS port */
|
2012-08-28 00:57:43 +08:00
|
|
|
int utc_timescale;
|
2011-11-13 01:44:55 +08:00
|
|
|
tmv_t master_offset;
|
|
|
|
tmv_t path_delay;
|
2011-11-27 21:19:29 +08:00
|
|
|
struct mave *avg_delay;
|
2012-08-18 01:16:32 +08:00
|
|
|
struct freq_estimator fest;
|
2012-08-31 22:04:12 +08:00
|
|
|
struct time_status_np status;
|
2012-08-31 21:49:19 +08:00
|
|
|
double nrr;
|
2011-11-13 01:44:55 +08:00
|
|
|
tmv_t c1;
|
|
|
|
tmv_t c2;
|
|
|
|
tmv_t t1;
|
|
|
|
tmv_t t2;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct clock the_clock;
|
|
|
|
|
|
|
|
static void handle_state_decision_event(struct clock *c);
|
|
|
|
|
2012-07-29 20:31:30 +08:00
|
|
|
static int cid_eq(struct ClockIdentity *a, struct ClockIdentity *b)
|
|
|
|
{
|
|
|
|
return 0 == memcmp(a, b, sizeof(*a));
|
|
|
|
}
|
|
|
|
|
2012-08-28 03:09:59 +08:00
|
|
|
void clock_destroy(struct clock *c)
|
2011-11-13 01:44:55 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < c->nports; i++) {
|
|
|
|
port_close(c->port[i]);
|
2012-03-24 19:46:24 +08:00
|
|
|
close(c->fault_fd[i]);
|
2011-11-13 01:44:55 +08:00
|
|
|
}
|
2012-08-24 21:30:02 +08:00
|
|
|
port_close(c->port[i]); /*uds*/
|
2011-11-13 01:44:55 +08:00
|
|
|
if (c->clkid != CLOCK_REALTIME) {
|
|
|
|
phc_close(c->clkid);
|
|
|
|
}
|
2012-08-20 15:05:42 +08:00
|
|
|
servo_destroy(c->servo);
|
|
|
|
mave_destroy(c->avg_delay);
|
2011-11-13 01:44:55 +08:00
|
|
|
memset(c, 0, sizeof(*c));
|
2012-08-28 03:09:10 +08:00
|
|
|
msg_cleanup();
|
2011-11-13 01:44:55 +08:00
|
|
|
}
|
|
|
|
|
2012-03-24 19:46:24 +08:00
|
|
|
static int clock_fault_timeout(struct clock *c, int index, int set)
|
|
|
|
{
|
|
|
|
struct itimerspec tmo = {
|
|
|
|
{0, 0}, {0, 0}
|
|
|
|
};
|
|
|
|
if (set) {
|
|
|
|
pr_debug("waiting %d seconds to clear fault on port %d",
|
|
|
|
c->fault_timeout, index);
|
|
|
|
tmo.it_value.tv_sec = c->fault_timeout;
|
|
|
|
} else {
|
|
|
|
pr_debug("clearing fault on port %d", index);
|
|
|
|
}
|
|
|
|
return timerfd_settime(c->fault_fd[index], 0, &tmo, NULL);
|
|
|
|
}
|
|
|
|
|
2012-09-02 20:30:50 +08:00
|
|
|
static void clock_freq_est_reset(struct clock *c)
|
|
|
|
{
|
|
|
|
c->fest.origin1 = tmv_zero();
|
|
|
|
c->fest.ingress1 = tmv_zero();
|
|
|
|
c->fest.count = 0;
|
|
|
|
};
|
|
|
|
|
2012-08-22 13:31:21 +08:00
|
|
|
static int clock_management_response(struct clock *c, struct port *p, int id,
|
|
|
|
struct ptp_message *req)
|
|
|
|
{
|
|
|
|
int datalen = 0, err, pdulen, respond = 0;
|
|
|
|
struct management_tlv *tlv;
|
|
|
|
struct ptp_message *rsp;
|
2012-09-12 23:44:20 +08:00
|
|
|
struct time_status_np *tsn;
|
2012-08-25 22:17:26 +08:00
|
|
|
struct PortIdentity pid = port_identity(p);
|
2012-08-22 13:31:21 +08:00
|
|
|
|
|
|
|
rsp = port_management_reply(pid, p, req);
|
|
|
|
if (!rsp) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
tlv = (struct management_tlv *) rsp->management.suffix;
|
|
|
|
tlv->type = TLV_MANAGEMENT;
|
|
|
|
tlv->id = id;
|
|
|
|
|
|
|
|
switch (id) {
|
|
|
|
case CURRENT_DATA_SET:
|
|
|
|
memcpy(tlv->data, &c->cur, sizeof(c->cur));
|
|
|
|
datalen = sizeof(c->cur);
|
|
|
|
respond = 1;
|
|
|
|
break;
|
2012-09-12 23:44:20 +08:00
|
|
|
case TIME_STATUS_NP:
|
|
|
|
tsn = (struct time_status_np *) tlv->data;
|
|
|
|
tsn->master_offset = c->master_offset;
|
|
|
|
tsn->ingress_time = tmv_to_nanoseconds(c->t2);
|
|
|
|
tsn->cumulativeScaledRateOffset =
|
|
|
|
(UInteger32) (c->status.cumulativeScaledRateOffset +
|
|
|
|
c->nrr * POW2_41 - POW2_41);
|
|
|
|
tsn->scaledLastGmPhaseChange = c->status.scaledLastGmPhaseChange;
|
|
|
|
tsn->gmTimeBaseIndicator = c->status.gmTimeBaseIndicator;
|
|
|
|
tsn->lastGmPhaseChange = c->status.lastGmPhaseChange;
|
|
|
|
if (cid_eq(&c->dad.grandmasterIdentity, &c->dds.clockIdentity))
|
|
|
|
tsn->gmPresent = 0;
|
|
|
|
else
|
|
|
|
tsn->gmPresent = 1;
|
|
|
|
tsn->gmIdentity = c->dad.grandmasterIdentity;
|
|
|
|
datalen = sizeof(*tsn);
|
|
|
|
respond = 1;
|
|
|
|
break;
|
2012-08-22 13:31:21 +08:00
|
|
|
}
|
|
|
|
if (respond) {
|
|
|
|
tlv->length = sizeof(tlv->id) + datalen;
|
|
|
|
pdulen = rsp->header.messageLength + sizeof(*tlv) + datalen;
|
|
|
|
rsp->header.messageLength = pdulen;
|
|
|
|
rsp->tlv_count = 1;
|
|
|
|
err = msg_pre_send(rsp);
|
|
|
|
if (err) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
err = port_forward(p, rsp, pdulen);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
msg_put(rsp);
|
|
|
|
return respond ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
2012-07-08 18:14:28 +08:00
|
|
|
static int clock_master_lost(struct clock *c)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < c->nports; i++) {
|
|
|
|
if (PS_SLAVE == port_state(c->port[i]))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2012-08-18 01:16:32 +08:00
|
|
|
static enum servo_state clock_no_adjust(struct clock *c)
|
|
|
|
{
|
2012-08-31 22:08:14 +08:00
|
|
|
double fui;
|
2012-08-18 01:16:32 +08:00
|
|
|
double ratio;
|
2012-08-31 00:24:10 +08:00
|
|
|
tmv_t origin2;
|
2012-08-18 01:16:32 +08:00
|
|
|
struct freq_estimator *f = &c->fest;
|
|
|
|
enum servo_state state = SERVO_UNLOCKED;
|
|
|
|
/*
|
|
|
|
* We have clock.t1 as the origin time stamp, and clock.t2 as
|
2012-08-30 23:19:48 +08:00
|
|
|
* the ingress. According to the master's clock, the time at
|
|
|
|
* which the sync arrived is:
|
2012-08-18 01:16:32 +08:00
|
|
|
*
|
|
|
|
* origin = origin_ts + path_delay + correction
|
|
|
|
*
|
|
|
|
* The ratio of the local clock freqency to the master clock
|
|
|
|
* is estimated by:
|
|
|
|
*
|
|
|
|
* (ingress_2 - ingress_1) / (origin_2 - origin_1)
|
|
|
|
*
|
|
|
|
* Both of the origin time estimates include the path delay,
|
|
|
|
* but we assume that the path delay is in fact constant.
|
|
|
|
* By leaving out the path delay altogther, we can avoid the
|
|
|
|
* error caused by our imperfect path delay measurement.
|
|
|
|
*/
|
2012-09-02 20:33:44 +08:00
|
|
|
if (!f->ingress1) {
|
|
|
|
f->ingress1 = c->t2;
|
|
|
|
f->origin1 = tmv_add(c->t1, tmv_add(c->c1, c->c2));
|
|
|
|
return state;
|
|
|
|
}
|
2012-08-18 01:16:32 +08:00
|
|
|
f->count++;
|
|
|
|
if (f->count < f->max_count) {
|
|
|
|
return state;
|
|
|
|
}
|
2012-09-04 13:51:59 +08:00
|
|
|
if (tmv_eq(c->t2, f->ingress1)) {
|
|
|
|
pr_warning("bad timestamps in rate ratio calculation");
|
|
|
|
return state;
|
|
|
|
}
|
2012-08-31 00:24:10 +08:00
|
|
|
/*
|
|
|
|
* origin2 = c->t1 (+c->path_delay) + c->c1 + c->c2;
|
|
|
|
*/
|
|
|
|
origin2 = tmv_add(c->t1, tmv_add(c->c1, c->c2));
|
2012-09-02 20:33:44 +08:00
|
|
|
|
|
|
|
ratio = tmv_dbl(tmv_sub(origin2, f->origin1)) /
|
|
|
|
tmv_dbl(tmv_sub(c->t2, f->ingress1));
|
|
|
|
|
|
|
|
pr_info("master offset %10lld s%d ratio %.9f path delay %10lld",
|
|
|
|
c->master_offset, state, ratio, c->path_delay);
|
|
|
|
|
2012-09-04 13:37:10 +08:00
|
|
|
fui = 1.0 + (c->status.cumulativeScaledRateOffset + 0.0) / POW2_41;
|
2012-09-02 20:33:44 +08:00
|
|
|
|
|
|
|
pr_debug("peer/local %.9f", c->nrr);
|
|
|
|
pr_debug("fup_info %.9f", fui);
|
|
|
|
pr_debug("product %.9f", fui * c->nrr);
|
|
|
|
pr_debug("sum-1 %.9f", fui + c->nrr - 1.0);
|
|
|
|
pr_debug("master/local %.9f", ratio);
|
|
|
|
pr_debug("diff %+.9f", ratio - (fui + c->nrr - 1.0));
|
|
|
|
|
2012-08-18 01:16:32 +08:00
|
|
|
f->ingress1 = c->t2;
|
2012-08-31 00:24:10 +08:00
|
|
|
f->origin1 = origin2;
|
2012-08-18 01:16:32 +08:00
|
|
|
f->count = 0;
|
|
|
|
|
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
2011-11-13 01:44:55 +08:00
|
|
|
static void clock_ppb(clockid_t clkid, double ppb)
|
|
|
|
{
|
|
|
|
struct timex tx;
|
|
|
|
memset(&tx, 0, sizeof(tx));
|
|
|
|
tx.modes = ADJ_FREQUENCY;
|
|
|
|
tx.freq = (long) (ppb * 65.536);
|
|
|
|
if (clock_adjtime(clkid, &tx) < 0)
|
|
|
|
pr_err("failed to adjust the clock: %m");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void clock_step(clockid_t clkid, int64_t ns)
|
|
|
|
{
|
|
|
|
struct timex tx;
|
|
|
|
int sign = 1;
|
|
|
|
if (ns < 0) {
|
|
|
|
sign = -1;
|
|
|
|
ns *= -1;
|
|
|
|
}
|
|
|
|
memset(&tx, 0, sizeof(tx));
|
|
|
|
tx.modes = ADJ_SETOFFSET | ADJ_NANO;
|
|
|
|
tx.time.tv_sec = sign * (ns / NS_PER_SEC);
|
|
|
|
tx.time.tv_usec = sign * (ns % NS_PER_SEC);
|
|
|
|
/*
|
|
|
|
* The value of a timeval is the sum of its fields, but the
|
|
|
|
* field tv_usec must always be non-negative.
|
|
|
|
*/
|
|
|
|
if (tx.time.tv_usec < 0) {
|
|
|
|
tx.time.tv_sec -= 1;
|
|
|
|
tx.time.tv_usec += 1000000000;
|
|
|
|
}
|
|
|
|
if (clock_adjtime(clkid, &tx) < 0)
|
|
|
|
pr_err("failed to step clock: %m");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void clock_update_grandmaster(struct clock *c)
|
|
|
|
{
|
|
|
|
memset(&c->cur, 0, sizeof(c->cur));
|
2012-08-07 15:20:55 +08:00
|
|
|
memset(c->ptl, 0, sizeof(c->ptl));
|
2011-11-13 01:44:55 +08:00
|
|
|
c->dad.parentPortIdentity.clockIdentity = c->dds.clockIdentity;
|
|
|
|
c->dad.parentPortIdentity.portNumber = 0;
|
|
|
|
c->dad.grandmasterIdentity = c->dds.clockIdentity;
|
|
|
|
c->dad.grandmasterClockQuality = c->dds.clockQuality;
|
|
|
|
c->dad.grandmasterPriority1 = c->dds.priority1;
|
|
|
|
c->dad.grandmasterPriority2 = c->dds.priority2;
|
2012-08-07 15:20:55 +08:00
|
|
|
c->dad.path_length = 0;
|
2011-11-13 01:44:55 +08:00
|
|
|
c->tds.currentUtcOffset = CURRENT_UTC_OFFSET;
|
|
|
|
c->tds.currentUtcOffsetValid = FALSE;
|
|
|
|
c->tds.leap61 = FALSE;
|
|
|
|
c->tds.leap59 = FALSE;
|
|
|
|
c->tds.timeTraceable = FALSE;
|
|
|
|
c->tds.frequencyTraceable = FALSE;
|
2012-08-28 00:57:43 +08:00
|
|
|
c->tds.ptpTimescale = c->utc_timescale ? FALSE : TRUE;
|
2011-11-13 01:44:55 +08:00
|
|
|
c->tds.timeSource = INTERNAL_OSCILLATOR;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void clock_update_slave(struct clock *c)
|
|
|
|
{
|
|
|
|
struct ptp_message *msg = TAILQ_FIRST(&c->best->messages);
|
|
|
|
c->cur.stepsRemoved = 1 + c->best->dataset.stepsRemoved;
|
|
|
|
c->dad.parentPortIdentity = c->best->dataset.sender;
|
|
|
|
c->dad.grandmasterIdentity = msg->announce.grandmasterIdentity;
|
|
|
|
c->dad.grandmasterClockQuality = msg->announce.grandmasterClockQuality;
|
|
|
|
c->dad.grandmasterPriority1 = msg->announce.grandmasterPriority1;
|
|
|
|
c->dad.grandmasterPriority2 = msg->announce.grandmasterPriority2;
|
|
|
|
c->tds.currentUtcOffset = msg->announce.currentUtcOffset;
|
|
|
|
c->tds.currentUtcOffsetValid = field_is_set(msg, 1, UTC_OFF_VALID);
|
|
|
|
c->tds.leap61 = field_is_set(msg, 1, LEAP_61);
|
|
|
|
c->tds.leap59 = field_is_set(msg, 1, LEAP_59);
|
|
|
|
c->tds.timeTraceable = field_is_set(msg, 1, TIME_TRACEABLE);
|
|
|
|
c->tds.frequencyTraceable = field_is_set(msg, 1, FREQ_TRACEABLE);
|
|
|
|
c->tds.ptpTimescale = field_is_set(msg, 1, PTP_TIMESCALE);
|
|
|
|
c->tds.timeSource = msg->announce.timeSource;
|
2012-08-27 02:36:43 +08:00
|
|
|
if (!c->tds.ptpTimescale) {
|
|
|
|
pr_warning("foreign master not using PTP timescale");
|
|
|
|
}
|
|
|
|
if (c->tds.currentUtcOffset < CURRENT_UTC_OFFSET) {
|
|
|
|
pr_warning("running in a temporal vortex");
|
|
|
|
}
|
2011-11-13 01:44:55 +08:00
|
|
|
}
|
|
|
|
|
2012-08-28 00:57:43 +08:00
|
|
|
static void clock_utc_correct(struct clock *c)
|
|
|
|
{
|
|
|
|
struct timespec offset;
|
|
|
|
if (!c->utc_timescale)
|
|
|
|
return;
|
|
|
|
if (!c->tds.ptpTimescale)
|
|
|
|
return;
|
|
|
|
if (c->tds.currentUtcOffsetValid && c->tds.timeTraceable) {
|
|
|
|
offset.tv_sec = c->tds.currentUtcOffset;
|
|
|
|
} else if (c->tds.currentUtcOffset > CURRENT_UTC_OFFSET) {
|
|
|
|
offset.tv_sec = c->tds.currentUtcOffset;
|
|
|
|
} else {
|
|
|
|
offset.tv_sec = CURRENT_UTC_OFFSET;
|
|
|
|
}
|
|
|
|
offset.tv_nsec = 0;
|
|
|
|
/* Local clock is UTC, but master is TAI. */
|
|
|
|
c->master_offset = tmv_add(c->master_offset, timespec_to_tmv(offset));
|
|
|
|
}
|
|
|
|
|
2012-08-24 21:30:02 +08:00
|
|
|
static int forwarding(struct clock *c, struct port *p)
|
2012-07-29 20:31:30 +08:00
|
|
|
{
|
|
|
|
enum port_state ps = port_state(p);
|
|
|
|
switch (ps) {
|
|
|
|
case PS_MASTER:
|
|
|
|
case PS_GRAND_MASTER:
|
|
|
|
case PS_SLAVE:
|
|
|
|
case PS_UNCALIBRATED:
|
|
|
|
case PS_PRE_MASTER:
|
|
|
|
return 1;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2012-08-24 21:30:02 +08:00
|
|
|
if (p == c->port[c->nports]) { /*uds*/
|
|
|
|
return 1;
|
|
|
|
}
|
2012-07-29 20:31:30 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-13 01:44:55 +08:00
|
|
|
/* public methods */
|
|
|
|
|
|
|
|
UInteger8 clock_class(struct clock *c)
|
|
|
|
{
|
|
|
|
return c->dds.clockQuality.clockClass;
|
|
|
|
}
|
|
|
|
|
2012-05-10 02:46:16 +08:00
|
|
|
struct clock *clock_create(int phc_index, struct interface *iface, int count,
|
2012-08-21 01:56:50 +08:00
|
|
|
enum timestamp_type timestamping, struct defaultDS *ds)
|
2011-11-13 01:44:55 +08:00
|
|
|
{
|
2012-08-21 01:56:13 +08:00
|
|
|
int i, max_adj, sw_ts = timestamping == TS_SOFTWARE ? 1 : 0;
|
2011-11-13 01:44:55 +08:00
|
|
|
struct clock *c = &the_clock;
|
2012-05-10 02:46:16 +08:00
|
|
|
char phc[32];
|
2012-08-24 21:30:02 +08:00
|
|
|
struct interface udsif;
|
|
|
|
|
|
|
|
memset(&udsif, 0, sizeof(udsif));
|
|
|
|
snprintf(udsif.name, sizeof(udsif.name), UDS_PATH);
|
|
|
|
udsif.transport = TRANS_UDS;
|
2011-11-13 01:44:55 +08:00
|
|
|
|
2011-11-26 16:26:59 +08:00
|
|
|
srandom(time(NULL));
|
|
|
|
|
2011-11-13 01:44:55 +08:00
|
|
|
if (c->nports)
|
|
|
|
clock_destroy(c);
|
|
|
|
|
2012-09-05 19:40:21 +08:00
|
|
|
if (c->dds.free_running) {
|
|
|
|
c->clkid = CLOCK_INVALID;
|
|
|
|
} else if (phc_index >= 0) {
|
2012-05-10 02:46:16 +08:00
|
|
|
snprintf(phc, 31, "/dev/ptp%d", phc_index);
|
2011-11-13 01:44:55 +08:00
|
|
|
c->clkid = phc_open(phc);
|
|
|
|
if (c->clkid == CLOCK_INVALID) {
|
|
|
|
pr_err("Failed to open %s: %m", phc);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
max_adj = phc_max_adj(c->clkid);
|
|
|
|
if (!max_adj) {
|
|
|
|
pr_err("clock is not adjustable");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
c->clkid = CLOCK_REALTIME;
|
2012-08-28 00:57:43 +08:00
|
|
|
c->utc_timescale = 1;
|
2011-11-13 01:44:55 +08:00
|
|
|
max_adj = 512000;
|
|
|
|
}
|
|
|
|
|
2011-12-13 11:57:28 +08:00
|
|
|
c->servo = servo_create("pi", max_adj, sw_ts);
|
2011-11-13 01:44:55 +08:00
|
|
|
if (!c->servo) {
|
|
|
|
pr_err("Failed to create clock servo");
|
|
|
|
return NULL;
|
|
|
|
}
|
2011-11-27 21:19:29 +08:00
|
|
|
c->avg_delay = mave_create(MAVE_LENGTH);
|
|
|
|
if (!c->avg_delay) {
|
|
|
|
pr_err("Failed to create moving average");
|
|
|
|
return NULL;
|
|
|
|
}
|
2011-11-13 01:44:55 +08:00
|
|
|
|
|
|
|
c->dds = *ds;
|
|
|
|
|
|
|
|
/* Initialize the parentDS. */
|
2012-02-27 01:03:16 +08:00
|
|
|
clock_update_grandmaster(c);
|
2011-11-13 01:44:55 +08:00
|
|
|
c->dad.parentStats = 0;
|
|
|
|
c->dad.observedParentOffsetScaledLogVariance = 0xffff;
|
|
|
|
c->dad.observedParentClockPhaseChangeRate = 0x7fffffff;
|
2012-08-07 15:20:55 +08:00
|
|
|
c->dad.ptl = c->ptl;
|
2011-11-13 01:44:55 +08:00
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(c->pollfd); i++) {
|
|
|
|
c->pollfd[i].fd = -1;
|
|
|
|
c->pollfd[i].events = 0;
|
|
|
|
}
|
|
|
|
|
2012-03-24 19:46:24 +08:00
|
|
|
c->fault_timeout = FAULT_RESET_SECONDS;
|
2012-08-18 01:16:32 +08:00
|
|
|
c->fest.max_count = 2;
|
2012-03-24 19:46:24 +08:00
|
|
|
|
2011-11-13 01:44:55 +08:00
|
|
|
for (i = 0; i < count; i++) {
|
2012-08-21 01:56:50 +08:00
|
|
|
c->port[i] = port_open(phc_index, timestamping, 1+i, &iface[i], c);
|
2011-11-13 01:44:55 +08:00
|
|
|
if (!c->port[i]) {
|
|
|
|
pr_err("failed to open port %s", iface[i].name);
|
|
|
|
return NULL;
|
|
|
|
}
|
2012-03-24 19:46:24 +08:00
|
|
|
c->fault_fd[i] = timerfd_create(CLOCK_MONOTONIC, 0);
|
|
|
|
if (c->fault_fd[i] < 0) {
|
|
|
|
pr_err("timerfd_create failed: %m");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
c->pollfd[N_CLOCK_PFD * i + N_POLLFD].fd = c->fault_fd[i];
|
|
|
|
c->pollfd[N_CLOCK_PFD * i + N_POLLFD].events = POLLIN|POLLPRI;
|
2011-11-13 01:44:55 +08:00
|
|
|
}
|
|
|
|
|
2012-08-24 21:30:02 +08:00
|
|
|
/*
|
|
|
|
* One extra port is for the UDS interface.
|
|
|
|
*/
|
2012-08-26 21:09:15 +08:00
|
|
|
c->port[i] = port_open(phc_index, timestamping, 0, &udsif, c);
|
2012-08-24 21:30:02 +08:00
|
|
|
if (!c->port[i]) {
|
|
|
|
pr_err("failed to open the UDS port");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-11-13 01:44:55 +08:00
|
|
|
c->dds.numberPorts = c->nports = count;
|
|
|
|
|
|
|
|
for (i = 0; i < c->nports; i++)
|
2012-01-07 04:12:50 +08:00
|
|
|
port_dispatch(c->port[i], EV_INITIALIZE, 0);
|
2011-11-13 01:44:55 +08:00
|
|
|
|
2012-08-24 21:30:02 +08:00
|
|
|
port_dispatch(c->port[i], EV_INITIALIZE, 0); /*uds*/
|
|
|
|
|
2011-11-13 01:44:55 +08:00
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct dataset *clock_best_foreign(struct clock *c)
|
|
|
|
{
|
|
|
|
return c->best ? &c->best->dataset : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct port *clock_best_port(struct clock *c)
|
|
|
|
{
|
|
|
|
return c->best ? c->best->port : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct dataset *clock_default_ds(struct clock *c)
|
|
|
|
{
|
|
|
|
struct dataset *out = &c->default_dataset;
|
|
|
|
struct defaultDS *in = &c->dds;
|
|
|
|
|
|
|
|
out->priority1 = in->priority1;
|
|
|
|
out->identity = in->clockIdentity;
|
|
|
|
out->quality = in->clockQuality;
|
|
|
|
out->priority2 = in->priority2;
|
|
|
|
out->stepsRemoved = 0;
|
|
|
|
out->sender.clockIdentity = in->clockIdentity;
|
|
|
|
out->sender.portNumber = 0;
|
|
|
|
out->receiver.clockIdentity = in->clockIdentity;
|
|
|
|
out->receiver.portNumber = 0;
|
|
|
|
|
|
|
|
return out;
|
|
|
|
}
|
|
|
|
|
|
|
|
UInteger8 clock_domain_number(struct clock *c)
|
|
|
|
{
|
|
|
|
return c->dds.domainNumber;
|
|
|
|
}
|
|
|
|
|
2012-08-31 22:04:12 +08:00
|
|
|
void clock_follow_up_info(struct clock *c, struct follow_up_info_tlv *f)
|
|
|
|
{
|
|
|
|
c->status.cumulativeScaledRateOffset = f->cumulativeScaledRateOffset;
|
|
|
|
c->status.scaledLastGmPhaseChange = f->scaledLastGmPhaseChange;
|
|
|
|
c->status.gmTimeBaseIndicator = f->gmTimeBaseIndicator;
|
|
|
|
memcpy(&c->status.lastGmPhaseChange, &f->lastGmPhaseChange,
|
|
|
|
sizeof(c->status.lastGmPhaseChange));
|
|
|
|
}
|
|
|
|
|
2011-11-13 01:44:55 +08:00
|
|
|
struct ClockIdentity clock_identity(struct clock *c)
|
|
|
|
{
|
|
|
|
return c->dds.clockIdentity;
|
|
|
|
}
|
|
|
|
|
|
|
|
void clock_install_fda(struct clock *c, struct port *p, struct fdarray fda)
|
|
|
|
{
|
|
|
|
int i, j, k;
|
2012-08-24 21:30:02 +08:00
|
|
|
for (i = 0; i < c->nports + 1; i++) {
|
2011-11-13 01:44:55 +08:00
|
|
|
if (p == c->port[i])
|
|
|
|
break;
|
|
|
|
}
|
2012-03-21 20:55:56 +08:00
|
|
|
for (j = 0; j < N_POLLFD; j++) {
|
2012-03-24 19:46:24 +08:00
|
|
|
k = N_CLOCK_PFD * i + j;
|
2011-11-13 01:44:55 +08:00
|
|
|
c->pollfd[k].fd = fda.fd[j];
|
|
|
|
c->pollfd[k].events = POLLIN|POLLPRI;
|
|
|
|
}
|
2012-07-29 20:31:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void clock_manage(struct clock *c, struct port *p, struct ptp_message *msg)
|
|
|
|
{
|
|
|
|
int i, pdulen;
|
|
|
|
struct port *fwd;
|
|
|
|
struct management_tlv *mgt;
|
2012-08-02 13:32:15 +08:00
|
|
|
struct PortIdentity pid;
|
2012-07-29 20:31:30 +08:00
|
|
|
struct ClockIdentity *tcid, wildcard = {
|
|
|
|
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Forward this message out all eligible ports. */
|
2012-08-24 21:30:02 +08:00
|
|
|
if (forwarding(c, p) && msg->management.boundaryHops) {
|
2012-07-29 20:31:30 +08:00
|
|
|
pdulen = msg->header.messageLength;
|
|
|
|
msg->management.boundaryHops--;
|
|
|
|
msg_pre_send(msg);
|
2012-08-24 21:30:02 +08:00
|
|
|
for (i = 0; i < c->nports + 1; i++) {
|
2012-07-29 20:31:30 +08:00
|
|
|
fwd = c->port[i];
|
2012-08-24 21:30:02 +08:00
|
|
|
if (fwd != p && forwarding(c, fwd) &&
|
2012-07-29 20:31:30 +08:00
|
|
|
port_forward(fwd, msg, pdulen))
|
|
|
|
pr_err("port %d: management forward failed", i);
|
|
|
|
}
|
|
|
|
msg_post_recv(msg, pdulen);
|
|
|
|
msg->management.boundaryHops++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Apply this message to the local clock and ports. */
|
|
|
|
tcid = &msg->management.targetPortIdentity.clockIdentity;
|
|
|
|
if (!cid_eq(tcid, &wildcard) && !cid_eq(tcid, &c->dds.clockIdentity)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (msg->tlv_count != 1) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
mgt = (struct management_tlv *) msg->management.suffix;
|
2012-08-22 13:31:21 +08:00
|
|
|
|
|
|
|
if (clock_management_response(c, p, mgt->id, msg))
|
|
|
|
return;
|
|
|
|
|
2012-07-29 20:31:30 +08:00
|
|
|
switch (mgt->id) {
|
|
|
|
case USER_DESCRIPTION:
|
|
|
|
case SAVE_IN_NON_VOLATILE_STORAGE:
|
|
|
|
case RESET_NON_VOLATILE_STORAGE:
|
|
|
|
case INITIALIZE:
|
|
|
|
case FAULT_LOG:
|
|
|
|
case FAULT_LOG_RESET:
|
|
|
|
case DEFAULT_DATA_SET:
|
|
|
|
case PARENT_DATA_SET:
|
|
|
|
case TIME_PROPERTIES_DATA_SET:
|
|
|
|
case PRIORITY1:
|
|
|
|
case PRIORITY2:
|
|
|
|
case DOMAIN:
|
|
|
|
case SLAVE_ONLY:
|
|
|
|
case TIME:
|
|
|
|
case CLOCK_ACCURACY:
|
|
|
|
case UTC_PROPERTIES:
|
|
|
|
case TRACEABILITY_PROPERTIES:
|
|
|
|
case TIMESCALE_PROPERTIES:
|
|
|
|
case PATH_TRACE_LIST:
|
|
|
|
case PATH_TRACE_ENABLE:
|
|
|
|
case GRANDMASTER_CLUSTER_TABLE:
|
|
|
|
case ACCEPTABLE_MASTER_TABLE:
|
|
|
|
case ACCEPTABLE_MASTER_MAX_TABLE_SIZE:
|
|
|
|
case ALTERNATE_TIME_OFFSET_ENABLE:
|
|
|
|
case ALTERNATE_TIME_OFFSET_NAME:
|
|
|
|
case ALTERNATE_TIME_OFFSET_MAX_KEY:
|
|
|
|
case ALTERNATE_TIME_OFFSET_PROPERTIES:
|
|
|
|
case TRANSPARENT_CLOCK_DEFAULT_DATA_SET:
|
|
|
|
case PRIMARY_DOMAIN:
|
2012-08-25 22:17:26 +08:00
|
|
|
pid = port_identity(p);
|
2012-08-02 13:32:15 +08:00
|
|
|
if (port_managment_error(pid, p, msg, NOT_SUPPORTED))
|
|
|
|
pr_err("failed to send management error status");
|
2012-07-29 20:31:30 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
for (i = 0; i < c->nports; i++) {
|
|
|
|
if (port_manage(c->port[i], p, msg))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2011-11-13 01:44:55 +08:00
|
|
|
}
|
|
|
|
|
2011-12-27 20:36:17 +08:00
|
|
|
struct parentDS *clock_parent_ds(struct clock *c)
|
|
|
|
{
|
|
|
|
return &c->dad;
|
|
|
|
}
|
|
|
|
|
2011-11-13 01:44:55 +08:00
|
|
|
struct PortIdentity clock_parent_identity(struct clock *c)
|
|
|
|
{
|
|
|
|
return c->dad.parentPortIdentity;
|
|
|
|
}
|
|
|
|
|
|
|
|
int clock_poll(struct clock *c)
|
|
|
|
{
|
2012-07-08 18:14:28 +08:00
|
|
|
int cnt, i, j, k, lost = 0, sde = 0;
|
2011-11-13 01:44:55 +08:00
|
|
|
enum fsm_event event;
|
|
|
|
|
|
|
|
cnt = poll(c->pollfd, ARRAY_SIZE(c->pollfd), -1);
|
|
|
|
if (cnt < 0) {
|
|
|
|
if (EINTR == errno) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
pr_emerg("poll failed");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else if (!cnt) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < c->nports; i++) {
|
2012-03-24 19:46:24 +08:00
|
|
|
|
|
|
|
/* Let the ports handle their events. */
|
2011-11-13 01:44:55 +08:00
|
|
|
for (j = 0; j < N_POLLFD; j++) {
|
2012-03-24 19:46:24 +08:00
|
|
|
k = N_CLOCK_PFD * i + j;
|
2011-11-13 01:44:55 +08:00
|
|
|
if (c->pollfd[k].revents & (POLLIN|POLLPRI)) {
|
|
|
|
event = port_event(c->port[i], j);
|
|
|
|
if (EV_STATE_DECISION_EVENT == event)
|
|
|
|
sde = 1;
|
2012-07-08 18:14:28 +08:00
|
|
|
if (EV_ANNOUNCE_RECEIPT_TIMEOUT_EXPIRES == event)
|
|
|
|
lost = 1;
|
|
|
|
port_dispatch(c->port[i], event, 0);
|
2011-11-13 01:44:55 +08:00
|
|
|
}
|
|
|
|
}
|
2012-03-24 19:46:24 +08:00
|
|
|
|
|
|
|
/* Check the fault timer. */
|
|
|
|
k = N_CLOCK_PFD * i + N_POLLFD;
|
|
|
|
if (c->pollfd[k].revents & (POLLIN|POLLPRI)) {
|
|
|
|
clock_fault_timeout(c, i, 0);
|
|
|
|
port_dispatch(c->port[i], EV_FAULT_CLEARED, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear any fault after a little while. */
|
|
|
|
if (PS_FAULTY == port_state(c->port[i])) {
|
|
|
|
clock_fault_timeout(c, i, 1);
|
|
|
|
}
|
2011-11-13 01:44:55 +08:00
|
|
|
}
|
|
|
|
|
2012-08-24 21:30:02 +08:00
|
|
|
/* Check the UDS port. */
|
|
|
|
for (j = 0; j < N_POLLFD; j++) {
|
|
|
|
k = N_CLOCK_PFD * i + j;
|
|
|
|
if (c->pollfd[k].revents & (POLLIN|POLLPRI)) {
|
|
|
|
event = port_event(c->port[i], j);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-08 18:14:28 +08:00
|
|
|
if (lost && clock_master_lost(c))
|
|
|
|
clock_update_grandmaster(c);
|
2011-11-13 01:44:55 +08:00
|
|
|
if (sde)
|
|
|
|
handle_state_decision_event(c);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void clock_path_delay(struct clock *c, struct timespec req, struct timestamp rx,
|
|
|
|
Integer64 correction)
|
|
|
|
{
|
2011-11-27 21:19:29 +08:00
|
|
|
tmv_t c1, c2, c3, pd, t1, t2, t3, t4;
|
2011-11-13 01:44:55 +08:00
|
|
|
|
2012-03-11 17:52:01 +08:00
|
|
|
if (tmv_is_zero(c->t1))
|
|
|
|
return;
|
|
|
|
|
2011-11-13 01:44:55 +08:00
|
|
|
c1 = c->c1;
|
|
|
|
c2 = c->c2;
|
|
|
|
c3 = correction_to_tmv(correction);
|
|
|
|
t1 = c->t1;
|
|
|
|
t2 = c->t2;
|
|
|
|
t3 = timespec_to_tmv(req);
|
|
|
|
t4 = timestamp_to_tmv(rx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* c->path_delay = (t2 - t3) + (t4 - t1);
|
|
|
|
* c->path_delay -= c_sync + c_fup + c_delay_resp;
|
|
|
|
* c->path_delay /= 2.0;
|
|
|
|
*/
|
2011-11-27 21:19:29 +08:00
|
|
|
pd = tmv_add(tmv_sub(t2, t3), tmv_sub(t4, t1));
|
|
|
|
pd = tmv_sub(pd, tmv_add(c1, tmv_add(c2, c3)));
|
|
|
|
pd = tmv_div(pd, 2);
|
2011-11-13 01:44:55 +08:00
|
|
|
|
2011-12-13 12:30:45 +08:00
|
|
|
if (pd < 0) {
|
2011-12-30 18:08:54 +08:00
|
|
|
pr_warning("negative path delay %10lld", pd);
|
|
|
|
pr_warning("path_delay = (t2 - t3) + (t4 - t1)");
|
|
|
|
pr_warning("t2 - t3 = %+10lld", t2 - t3);
|
|
|
|
pr_warning("t4 - t1 = %+10lld", t4 - t1);
|
|
|
|
pr_warning("c1 %10lld", c1);
|
|
|
|
pr_warning("c2 %10lld", c2);
|
|
|
|
pr_warning("c3 %10lld", c3);
|
2011-12-13 12:30:45 +08:00
|
|
|
}
|
|
|
|
|
2011-11-27 21:19:29 +08:00
|
|
|
c->path_delay = mave_accumulate(c->avg_delay, pd);
|
|
|
|
|
2012-08-22 13:25:46 +08:00
|
|
|
c->cur.meanPathDelay = tmv_to_TimeInterval(c->path_delay);
|
|
|
|
|
2011-11-27 21:19:29 +08:00
|
|
|
pr_debug("path delay %10lld %10lld", c->path_delay, pd);
|
2011-11-13 01:44:55 +08:00
|
|
|
}
|
|
|
|
|
2012-08-31 21:49:19 +08:00
|
|
|
void clock_peer_delay(struct clock *c, tmv_t ppd, double nrr)
|
2012-04-05 20:04:15 +08:00
|
|
|
{
|
|
|
|
c->path_delay = ppd;
|
2012-08-31 21:49:19 +08:00
|
|
|
c->nrr = nrr;
|
2012-04-05 20:04:15 +08:00
|
|
|
}
|
|
|
|
|
2012-03-21 20:08:09 +08:00
|
|
|
void clock_remove_fda(struct clock *c, struct port *p, struct fdarray fda)
|
|
|
|
{
|
|
|
|
int i, j, k;
|
2012-08-24 21:30:02 +08:00
|
|
|
for (i = 0; i < c->nports + 1; i++) {
|
2012-03-21 20:08:09 +08:00
|
|
|
if (p == c->port[i])
|
|
|
|
break;
|
|
|
|
}
|
2012-03-21 20:55:56 +08:00
|
|
|
for (j = 0; j < N_POLLFD; j++) {
|
2012-03-24 19:46:24 +08:00
|
|
|
k = N_CLOCK_PFD * i + j;
|
2012-03-21 20:08:09 +08:00
|
|
|
c->pollfd[k].fd = -1;
|
|
|
|
c->pollfd[k].events = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-13 19:39:49 +08:00
|
|
|
int clock_slave_only(struct clock *c)
|
|
|
|
{
|
|
|
|
return c->dds.slaveOnly;
|
|
|
|
}
|
|
|
|
|
2011-12-27 18:28:09 +08:00
|
|
|
UInteger16 clock_steps_removed(struct clock *c)
|
|
|
|
{
|
|
|
|
return c->cur.stepsRemoved;
|
|
|
|
}
|
|
|
|
|
2011-12-18 18:02:26 +08:00
|
|
|
enum servo_state clock_synchronize(struct clock *c,
|
|
|
|
struct timespec ingress_ts,
|
|
|
|
struct timestamp origin_ts,
|
|
|
|
Integer64 correction1,
|
|
|
|
Integer64 correction2)
|
2011-11-13 01:44:55 +08:00
|
|
|
{
|
|
|
|
double adj;
|
|
|
|
tmv_t ingress, origin;
|
2011-12-18 18:02:26 +08:00
|
|
|
enum servo_state state = SERVO_UNLOCKED;
|
2011-11-13 01:44:55 +08:00
|
|
|
|
|
|
|
ingress = timespec_to_tmv(ingress_ts);
|
|
|
|
origin = timestamp_to_tmv(origin_ts);
|
|
|
|
|
|
|
|
c->t1 = origin;
|
|
|
|
c->t2 = ingress;
|
|
|
|
|
|
|
|
c->c1 = correction_to_tmv(correction1);
|
|
|
|
c->c2 = correction_to_tmv(correction2);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* c->master_offset = ingress - origin - c->path_delay - c->c1 - c->c2;
|
|
|
|
*/
|
|
|
|
c->master_offset = tmv_sub(ingress,
|
|
|
|
tmv_add(origin, tmv_add(c->path_delay, tmv_add(c->c1, c->c2))));
|
|
|
|
|
2012-08-28 00:57:43 +08:00
|
|
|
clock_utc_correct(c);
|
|
|
|
|
2012-08-22 13:25:46 +08:00
|
|
|
c->cur.offsetFromMaster = tmv_to_TimeInterval(c->master_offset);
|
|
|
|
|
2011-11-13 01:44:55 +08:00
|
|
|
if (!c->path_delay)
|
2011-12-18 18:02:26 +08:00
|
|
|
return state;
|
2011-11-13 01:44:55 +08:00
|
|
|
|
2012-08-18 01:16:32 +08:00
|
|
|
if (c->dds.free_running)
|
|
|
|
return clock_no_adjust(c);
|
|
|
|
|
2011-11-13 01:44:55 +08:00
|
|
|
adj = servo_sample(c->servo, c->master_offset, ingress, &state);
|
|
|
|
|
2011-12-30 18:17:05 +08:00
|
|
|
pr_info("master offset %10lld s%d adj %+7.0f path delay %10lld",
|
|
|
|
c->master_offset, state, adj, c->path_delay);
|
2011-11-13 01:44:55 +08:00
|
|
|
|
|
|
|
switch (state) {
|
|
|
|
case SERVO_UNLOCKED:
|
|
|
|
break;
|
|
|
|
case SERVO_JUMP:
|
|
|
|
clock_step(c->clkid, -c->master_offset);
|
2012-03-11 17:58:24 +08:00
|
|
|
c->t1 = tmv_zero();
|
|
|
|
c->t2 = tmv_zero();
|
2011-11-13 01:44:55 +08:00
|
|
|
break;
|
|
|
|
case SERVO_LOCKED:
|
|
|
|
clock_ppb(c->clkid, -adj);
|
|
|
|
break;
|
|
|
|
}
|
2011-12-18 18:02:26 +08:00
|
|
|
return state;
|
2011-11-13 01:44:55 +08:00
|
|
|
}
|
|
|
|
|
2012-08-22 02:04:45 +08:00
|
|
|
void clock_sync_interval(struct clock *c, int n)
|
|
|
|
{
|
|
|
|
int shift = c->dds.freq_est_interval - n;
|
|
|
|
|
|
|
|
if (shift < 0)
|
|
|
|
shift = 0;
|
|
|
|
|
|
|
|
c->fest.max_count = (1 << shift);
|
|
|
|
}
|
|
|
|
|
2011-12-27 20:36:17 +08:00
|
|
|
struct timePropertiesDS *clock_time_properties(struct clock *c)
|
|
|
|
{
|
|
|
|
return &c->tds;
|
|
|
|
}
|
|
|
|
|
2011-11-13 01:44:55 +08:00
|
|
|
static void handle_state_decision_event(struct clock *c)
|
|
|
|
{
|
|
|
|
struct foreign_clock *best = NULL, *fc;
|
2012-01-07 04:12:50 +08:00
|
|
|
int fresh_best = 0, i;
|
2011-11-13 01:44:55 +08:00
|
|
|
|
|
|
|
for (i = 0; i < c->nports; i++) {
|
|
|
|
fc = port_compute_best(c->port[i]);
|
|
|
|
if (!fc)
|
|
|
|
continue;
|
|
|
|
if (!best || dscmp(&fc->dataset, &best->dataset) > 0)
|
|
|
|
best = fc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!best)
|
|
|
|
return;
|
|
|
|
|
2012-01-01 16:02:29 +08:00
|
|
|
pr_notice("selected best master clock %s",
|
2011-11-13 01:44:55 +08:00
|
|
|
cid2str(&best->dataset.identity));
|
|
|
|
|
2012-09-12 23:06:34 +08:00
|
|
|
if (!cid_eq(&best->dataset.identity, &c->best_id)) {
|
2012-09-02 20:30:50 +08:00
|
|
|
clock_freq_est_reset(c);
|
2011-11-27 21:19:29 +08:00
|
|
|
mave_reset(c->avg_delay);
|
2012-01-07 04:12:50 +08:00
|
|
|
fresh_best = 1;
|
|
|
|
}
|
2011-11-27 21:19:29 +08:00
|
|
|
|
2011-11-13 01:44:55 +08:00
|
|
|
c->best = best;
|
2012-09-12 23:06:34 +08:00
|
|
|
c->best_id = best->dataset.identity;
|
2011-11-13 01:44:55 +08:00
|
|
|
|
|
|
|
for (i = 0; i < c->nports; i++) {
|
|
|
|
enum port_state ps;
|
|
|
|
enum fsm_event event;
|
|
|
|
ps = bmc_state_decision(c, c->port[i]);
|
|
|
|
switch (ps) {
|
|
|
|
case PS_LISTENING:
|
|
|
|
event = EV_NONE;
|
|
|
|
break;
|
|
|
|
case PS_GRAND_MASTER:
|
|
|
|
clock_update_grandmaster(c);
|
2011-12-27 18:04:50 +08:00
|
|
|
event = EV_RS_GRAND_MASTER;
|
|
|
|
break;
|
2011-11-13 01:44:55 +08:00
|
|
|
case PS_MASTER:
|
|
|
|
event = EV_RS_MASTER;
|
|
|
|
break;
|
|
|
|
case PS_PASSIVE:
|
|
|
|
event = EV_RS_PASSIVE;
|
|
|
|
break;
|
|
|
|
case PS_SLAVE:
|
|
|
|
clock_update_slave(c);
|
|
|
|
event = EV_RS_SLAVE;
|
|
|
|
break;
|
|
|
|
default:
|
2012-03-21 19:20:45 +08:00
|
|
|
event = EV_FAULT_DETECTED;
|
2011-11-13 01:44:55 +08:00
|
|
|
break;
|
|
|
|
}
|
2012-01-07 04:12:50 +08:00
|
|
|
port_dispatch(c->port[i], event, fresh_best);
|
2011-11-13 01:44:55 +08:00
|
|
|
}
|
|
|
|
}
|