Fix crash in hybrid E2E mode.
Richard Hill reported an occasional NULL pointer deference in port_delay_request() when in hybrid mode. if (p->hybrid_e2e) { struct ptp_message *dst = TAILQ_FIRST(&p->best->messages); msg->address = dst->address; ... } The code assumes that the p->best->messages list can't be empty because: The function, port_delay_request(), is called only when FD_DELAY_TIMER expires. That timer is only set by the function, port_set_delay_tmo(), which is called: 1. from process_delay_resp(), but only when state is UNCALIBRATED or SLAVE. 2. from port_e2e_transition(), but only when state is UNCALIBRATED or SLAVE. Looking at handle_state_decision_event(), a port can only enter UNCALIBRATED or SLAVE when it has a valid foreign master record, ie p->best->messages is not null. A port also only clears p->best->messages when it leaves UNCALIBRATED or SLAVE, at which point the FD_DELAY_TIMER is also cleared. *However* the p->best->messages list *can* be empty if the FD_ANNOUNCE_TIMER and the FD_DELAY_TIMER expire at the same time. In this case, the poll() call indicates events on both file descriptors. The announce timeout is handled like this: case FD_ANNOUNCE_TIMER: case FD_SYNC_RX_TIMER: if (p->best) fc_clear(p->best); So then the port_delay_request() call de-references the null TAILQ_FIRST message pointer. This patch fixes the issue by re-ordering the timer file descriptors within the polling list. Signed-off-by: Richard Cochran <richardcochran@gmail.com> Reported-by: Richard Hill <plonta@gmx.de>master
parent
cacf272d1c
commit
a8b66ce7f2
10
fd.h
10
fd.h
|
@ -22,12 +22,18 @@
|
||||||
|
|
||||||
#define N_TIMER_FDS 6
|
#define N_TIMER_FDS 6
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The order matters here. The DELAY timer must appear before the
|
||||||
|
* ANNOUNCE and SYNC_RX timers in order to correctly handle the case
|
||||||
|
* when the DELAY timer and one of the other two expire during the
|
||||||
|
* same call to poll().
|
||||||
|
*/
|
||||||
enum {
|
enum {
|
||||||
FD_EVENT,
|
FD_EVENT,
|
||||||
FD_GENERAL,
|
FD_GENERAL,
|
||||||
|
FD_DELAY_TIMER,
|
||||||
FD_ANNOUNCE_TIMER,
|
FD_ANNOUNCE_TIMER,
|
||||||
FD_SYNC_RX_TIMER,
|
FD_SYNC_RX_TIMER,
|
||||||
FD_DELAY_TIMER,
|
|
||||||
FD_QUALIFICATION_TIMER,
|
FD_QUALIFICATION_TIMER,
|
||||||
FD_MANNO_TIMER,
|
FD_MANNO_TIMER,
|
||||||
FD_SYNC_TX_TIMER,
|
FD_SYNC_TX_TIMER,
|
||||||
|
@ -35,6 +41,8 @@ enum {
|
||||||
N_POLLFD,
|
N_POLLFD,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define FD_FIRST_TIMER FD_DELAY_TIMER
|
||||||
|
|
||||||
struct fdarray {
|
struct fdarray {
|
||||||
int fd[N_POLLFD];
|
int fd[N_POLLFD];
|
||||||
};
|
};
|
||||||
|
|
6
port.c
6
port.c
|
@ -1545,7 +1545,7 @@ void port_disable(struct port *p)
|
||||||
transport_close(p->trp, &p->fda);
|
transport_close(p->trp, &p->fda);
|
||||||
|
|
||||||
for (i = 0; i < N_TIMER_FDS; i++) {
|
for (i = 0; i < N_TIMER_FDS; i++) {
|
||||||
close(p->fda.fd[FD_ANNOUNCE_TIMER + i]);
|
close(p->fda.fd[FD_FIRST_TIMER + i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Keep rtnl socket to get link status info. */
|
/* Keep rtnl socket to get link status info. */
|
||||||
|
@ -1590,7 +1590,7 @@ int port_initialize(struct port *p)
|
||||||
goto no_tropen;
|
goto no_tropen;
|
||||||
|
|
||||||
for (i = 0; i < N_TIMER_FDS; i++) {
|
for (i = 0; i < N_TIMER_FDS; i++) {
|
||||||
p->fda.fd[FD_ANNOUNCE_TIMER + i] = fd[i];
|
p->fda.fd[FD_FIRST_TIMER + i] = fd[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (port_set_announce_tmo(p))
|
if (port_set_announce_tmo(p))
|
||||||
|
@ -1628,7 +1628,7 @@ static int port_renew_transport(struct port *p)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
transport_close(p->trp, &p->fda);
|
transport_close(p->trp, &p->fda);
|
||||||
port_clear_fda(p, FD_ANNOUNCE_TIMER);
|
port_clear_fda(p, FD_FIRST_TIMER);
|
||||||
res = transport_open(p->trp, p->iface, &p->fda, p->timestamping);
|
res = transport_open(p->trp, p->iface, &p->fda, p->timestamping);
|
||||||
/* Need to call clock_fda_changed even if transport_open failed in
|
/* Need to call clock_fda_changed even if transport_open failed in
|
||||||
* order to update clock to the now closed descriptors. */
|
* order to update clock to the now closed descriptors. */
|
||||||
|
|
Loading…
Reference in New Issue