ptp4l: flush old cached packets

This patch fixes a bug with time mysteriously jumping back and forth:

ptp4l[930.687]: port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED
ptp4l[931.687]: master offset         17 s2 freq  +33014 path delay      2728
ptp4l[932.687]: master offset        -74 s2 freq  +32928 path delay      2734
ptp4l[933.687]: master offset          2 s2 freq  +32982 path delay      2734
ptp4l[934.687]: master offset         -3 s2 freq  +32977 path delay      2728
ptp4l[935.687]: master offset         17 s2 freq  +32996 path delay      2729
ptp4l[936.687]: master offset        -10 s2 freq  +32974 path delay      2729
ptp4l[937.687]: master offset         35 s2 freq  +33016 path delay      2727
ptp4l[938.686]: master offset 60001851388 s2 freq +62499999 path delay      2728
ptp4l[939.687]: master offset  -62464938 s2 freq -62431946 path delay      2728

The last follow up message arriving out of order is cached. Before the state
machine changes to UNCALIBRATED, all sync and follow up messages are discarded.
If we get into that state between a sync and follow up message, the latter is
cached. When there's no real roerdering happening, it's kept cached forever.

When we restart the master, it starts numbering the messages from zero again.
The initial synchronization doesn't take always the same amount of time, so it
can happen that we get into UNCALIBRATED a little bit faster than before,
managing to get the sync message with the sequenceId that we missed last time.
As it has the same sequenceId as the cached (old) follow up message, it's
incorrectly assumed those two belong together.

Flush the cache when changing to UNCALIBRATED. Also, do similar thing for other
cached packets.

Signed-off-by: Jiri Benc <jbenc@redhat.com>
master
Jiri Benc 2013-07-19 10:43:09 +02:00 committed by Richard Cochran
parent 46db400589
commit 48f4dcbeb2
1 changed files with 24 additions and 3 deletions

27
port.c
View File

@ -1115,10 +1115,8 @@ static int port_is_enabled(struct port *p)
return 1;
}
static void port_disable(struct port *p)
static void flush_last_sync(struct port *p)
{
int i;
if (p->last_follow_up) {
msg_put(p->last_follow_up);
p->last_follow_up = NULL;
@ -1127,10 +1125,18 @@ static void port_disable(struct port *p)
msg_put(p->last_sync);
p->last_sync = NULL;
}
}
static void flush_delay_req(struct port *p)
{
if (p->delay_req) {
msg_put(p->delay_req);
p->delay_req = NULL;
}
}
static void flush_peer_delay(struct port *p)
{
if (p->peer_delay_req) {
msg_put(p->peer_delay_req);
p->peer_delay_req = NULL;
@ -1143,6 +1149,15 @@ static void port_disable(struct port *p)
msg_put(p->peer_delay_fup);
p->peer_delay_fup = NULL;
}
}
static void port_disable(struct port *p)
{
int i;
flush_last_sync(p);
flush_delay_req(p);
flush_peer_delay(p);
p->best = NULL;
free_foreign_masters(p);
@ -1783,6 +1798,9 @@ static void port_e2e_transition(struct port *p, enum port_state next)
port_set_announce_tmo(p);
break;
case PS_UNCALIBRATED:
flush_last_sync(p);
flush_delay_req(p);
/* fall through */
case PS_SLAVE:
port_set_announce_tmo(p);
port_set_delay_tmo(p);
@ -1820,6 +1838,9 @@ static void port_p2p_transition(struct port *p, enum port_state next)
port_set_announce_tmo(p);
break;
case PS_UNCALIBRATED:
flush_last_sync(p);
flush_peer_delay(p);
/* fall through */
case PS_SLAVE:
port_set_announce_tmo(p);
break;