Update README and add contrib dir
This commit is contained in:
771
contrib/tsn/model/gPTP.cc
Normal file
771
contrib/tsn/model/gPTP.cc
Normal file
@@ -0,0 +1,771 @@
|
||||
#include "gPTP.h"
|
||||
|
||||
#include "ns3/log.h"
|
||||
#include "ns3/simulator.h"
|
||||
#include "ns3/nstime.h"
|
||||
#include "ns3/names.h"
|
||||
|
||||
#include "ns3/clock.h"
|
||||
#include "ns3/clock-virtual.h"
|
||||
#include "ns3/tsn-net-device.h"
|
||||
#include "ns3/ethernet-header2.h"
|
||||
#include "ns3/gPTP-header.h"
|
||||
#include "ns3/gPTP-packet.h"
|
||||
|
||||
|
||||
#include <tgmath.h>
|
||||
|
||||
namespace ns3
|
||||
{
|
||||
|
||||
NS_LOG_COMPONENT_DEFINE("GPTP");
|
||||
|
||||
NS_OBJECT_ENSURE_REGISTERED(GPTP);
|
||||
|
||||
TypeId
|
||||
GPTP::GetTypeId()
|
||||
{
|
||||
static TypeId tid =
|
||||
TypeId("ns3::GPTP")
|
||||
.SetParent<Application>()
|
||||
.SetGroupName("Applications")
|
||||
.AddConstructor<GPTP>()
|
||||
.AddAttribute("PdelayInterval",
|
||||
"Period between two emission of pdelayRequest messages",
|
||||
TimeValue(Seconds(1)),
|
||||
MakeTimeAccessor(&GPTP::m_pdelayInterval),
|
||||
MakeTimeChecker())
|
||||
.AddAttribute("SyncInterval",
|
||||
"Period between two emission of sync messages",
|
||||
TimeValue(MilliSeconds(125)),
|
||||
MakeTimeAccessor(&GPTP::m_syncInterval),
|
||||
MakeTimeChecker())
|
||||
.AddAttribute("Priority",
|
||||
"GPTP frame priority",
|
||||
UintegerValue(7),
|
||||
MakeUintegerAccessor(&GPTP::m_priority),
|
||||
MakeUintegerChecker<uint8_t>())
|
||||
|
||||
|
||||
.AddTraceSource("Nr",
|
||||
"Nr value",
|
||||
MakeTraceSourceAccessor(&GPTP::m_NrTrace),
|
||||
"ns3::GPTP::m_NrTrace")
|
||||
.AddTraceSource("Pdelay",
|
||||
"Pdelay value",
|
||||
MakeTraceSourceAccessor(&GPTP::m_PdelayTrace),
|
||||
"ns3::GPTP::m_PdelayTrace")
|
||||
.AddTraceSource("Offset",
|
||||
"Offset value",
|
||||
MakeTraceSourceAccessor(&GPTP::m_OffsetTrace),
|
||||
"ns3::TracedValueCallback::Time")
|
||||
.AddTraceSource("ClockBeforeCorrection",
|
||||
"Clock value before correction",
|
||||
MakeTraceSourceAccessor(&GPTP::m_ClockBeforeCorrectionTrace),
|
||||
"ns3::TracedValueCallback::Time")
|
||||
.AddTraceSource("ClockAfterCorrection",
|
||||
"Clock value after correction",
|
||||
MakeTraceSourceAccessor(&GPTP::m_ClockAfterCorrectionTrace),
|
||||
"ns3::TracedValueCallback::Time");
|
||||
|
||||
return tid;
|
||||
}
|
||||
|
||||
GPTP::GPTP()
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
}
|
||||
|
||||
GPTP::~GPTP()
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::SetMainClock(Ptr<Clock> c)
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
m_mainClock = c;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
GPTP::AddDomain(uint8_t domainId)
|
||||
{
|
||||
NS_ASSERT_MSG(m_mainClock != nullptr, "Main clock not set using 'void GPTP::SetMainClock(Ptr<Clock> c)' before calling 'void GPTP::AddDomain(uint8_t domainId)'");
|
||||
Ptr<VirtualClock> c = CreateObject<VirtualClock>();
|
||||
c->SetRefClock(m_mainClock);
|
||||
AddDomain(domainId, c);
|
||||
m_node->GetObject<TsnNode>()->AddClock(c);
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::AddDomain(uint8_t domainId, Ptr<Clock> clock)
|
||||
{
|
||||
for(int i=0; i < (int)m_domains.size(); i++){
|
||||
NS_ASSERT_MSG(m_domains[i].domainId != domainId,
|
||||
"Domain #"<< (uint16_t)domainId <<" already exist on this node");
|
||||
NS_ASSERT_MSG(m_domains[i].clock != clock,
|
||||
"Clock "<< clock<<" already exist on this node");
|
||||
}
|
||||
m_domains.insert(m_domains.end(), {domainId, clock});
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::AddPort(Ptr<TsnNetDevice> net, GPTPportState state, uint8_t domainId)
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
bool domainExist = false;
|
||||
for(int i=0; i < (int)m_domains.size(); i++){
|
||||
if (m_domains[i].domainId == domainId)
|
||||
{
|
||||
domainExist = true;
|
||||
}
|
||||
}
|
||||
NS_ASSERT_MSG(domainExist,
|
||||
"Domain #"<< (uint16_t)domainId <<" was not created using AddDomain(uint8_t domainId, Ptr<Clock> clock)");
|
||||
|
||||
|
||||
int id = -1;
|
||||
for(int i=0; i < (int)m_ports.size(); i++){
|
||||
if(m_ports[i].net == net)
|
||||
{
|
||||
id = i;
|
||||
}
|
||||
}
|
||||
|
||||
if (id == -1)
|
||||
{
|
||||
//Net device not in m_ports
|
||||
GPTPPort entry = {net};
|
||||
m_ports.insert(m_ports.end(), entry);
|
||||
GPTPPortPerDomainStruc perDomain = {domainId, state};
|
||||
m_ports.back().domains.insert(m_ports.back().domains.end(), perDomain);
|
||||
}
|
||||
else
|
||||
{
|
||||
//Net device in m_ports
|
||||
GPTPPortPerDomainStruc perDomain = {domainId, state};
|
||||
m_ports[id].domains.insert(m_ports[id].domains.end(), perDomain);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::DoDispose()
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
Application::DoDispose();
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::StartApplication()
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
//Set m_clockIndentity according to IEEE1588-2019 - 7.5.2.2.2.2
|
||||
uint8_t macBuffer[6];
|
||||
m_ports[0].net->GetAddress().CopyTo(macBuffer);
|
||||
m_baseClockIndentity = (((uint64_t)macBuffer[0] << 56 ) |
|
||||
((uint64_t)macBuffer[1] << 48 ) |
|
||||
((uint64_t)macBuffer[2] << 40 ) |
|
||||
((uint64_t)macBuffer[3] << 32 ) |
|
||||
((uint64_t)macBuffer[4] << 24 ) |
|
||||
((uint64_t)macBuffer[5] << 16 ) |
|
||||
0);;
|
||||
|
||||
//Setup RXCallback
|
||||
for(int i=0; i < (int)m_ports.size(); i++){
|
||||
m_ports[i].net->SetTransmitCallbackWithTimestamp(MakeCallback(&GPTP::TXCallback, this));
|
||||
m_ports[i].net->SetReceiveCallbackWithTimestamp(MakeCallback(&GPTP::RXCallback, this));
|
||||
}
|
||||
|
||||
//Send first pdelay_req
|
||||
StartPdelayMechanism();
|
||||
|
||||
//Per domain, if GM, send first sync
|
||||
for(int i=0; i <(int)m_domains.size(); i++)
|
||||
{
|
||||
// NS_LOG_INFO("Domain #" << (uint16_t)m_domains[i]);
|
||||
if(IsGm(m_domains[i].domainId)){
|
||||
StartSyncDistributionMechanism(m_domains[i].domainId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::StopApplication()
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
Simulator::Cancel(m_pdelayReqTxEvent);
|
||||
Simulator::Cancel(m_syncTxEvent);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
GPTP::StartPdelayMechanism()
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
for(int i=0; i < (int)m_ports.size(); i++){
|
||||
SendPdelayRequest(m_ports[i], (uint16_t)i);
|
||||
}
|
||||
m_pdelaySequenceId ++;
|
||||
//Schedule next Pdelay mechanism
|
||||
m_pdelayReqTxEvent = Simulator::Schedule(m_pdelayInterval, &GPTP::StartPdelayMechanism, this);
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::StartSyncDistributionMechanism(uint8_t domainId)
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
|
||||
SendSync(domainId);
|
||||
//Schedule next sync emission
|
||||
m_syncTxEvent = Simulator::Schedule(m_syncInterval, &GPTP::StartSyncDistributionMechanism, this, domainId);
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::SendSync(uint8_t domainId){
|
||||
NS_LOG_FUNCTION(this);
|
||||
for(int i=0; i < (int)m_ports.size(); i++){
|
||||
for(int j=0; j < (int)m_ports[i].domains.size(); j++){
|
||||
if(m_ports[i].domains[j].domainId == domainId){
|
||||
if(m_ports[i].asCapableAcrossDomains && m_ports[i].domains[j].state == MASTER){
|
||||
if(IsGm(domainId)){
|
||||
m_ports[i].domains[j].syncPtr.sequenceId ++;
|
||||
}
|
||||
Ptr<PdelayPayload> payload = Create<PdelayPayload>();
|
||||
Ptr<Packet> p = payload->GetPkt();
|
||||
GptpHeader gptpHeader;
|
||||
gptpHeader.SetMessageType(MESSAGETYPE_SYNC);
|
||||
gptpHeader.SetMessageLenght(44);
|
||||
gptpHeader.SetDomainNumber(domainId);
|
||||
gptpHeader.SetTwoStepFlag(true);
|
||||
gptpHeader.SetCorrectionField(0);
|
||||
gptpHeader.SetClockIdentity(m_baseClockIndentity | domainId);
|
||||
gptpHeader.SetPortIdentity(i);
|
||||
gptpHeader.SetSequenceId(m_ports[i].domains[j].syncPtr.sequenceId);
|
||||
|
||||
uint8_t logSyncInteral = -std::log2(m_syncInterval.GetSeconds());
|
||||
gptpHeader.SetLogMessageInterval(logSyncInteral);
|
||||
p->AddHeader(gptpHeader);
|
||||
|
||||
m_ports[i].domains[j].syncPtr.pktUid = p->GetUid();
|
||||
m_ports[i].domains[j].syncPtr.pktSent = true;
|
||||
m_ports[i].net->SendWithSpecificFIFO(p, GPTP_MULTICASTMAC, GPTP_ETHERTYPE, m_priority);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::SendFollowUp(GPTPPort port, uint16_t portId, uint8_t id){
|
||||
NS_LOG_FUNCTION(this);
|
||||
//Compute rateRatio and correctionField
|
||||
if(IsGm(m_ports[portId].domains[id].domainId))
|
||||
{
|
||||
m_ports[portId].domains[id].syncPtr.cumulativeScaledRateOffset = 0;
|
||||
m_ports[portId].domains[id].syncPtr.correctionField = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
Time syncEventIngressTimestamp = Time(Seconds(m_ports[portId].domains[id].syncPtr.syncReceiptTimestampSecond)) + Time(NanoSeconds(m_ports[portId].domains[id].syncPtr.syncReceiptTimestampNanoSecond));
|
||||
Time syncEventEgressTimestamp = Time(Seconds(m_ports[portId].domains[id].syncPtr.syncOriginTimestampSecond)) + Time(NanoSeconds(m_ports[portId].domains[id].syncPtr.syncOriginTimestampNanoSecond));
|
||||
|
||||
Time upstreamTxTime = syncEventIngressTimestamp - Time(NanoSeconds(m_ports[portId].meanLinkDelay / m_ports[portId].neighborRateRatio)); //STD 802.1AS : 10.2.2.1.7
|
||||
double r = m_ports[portId].neighborRateRatio*((m_ports[portId].domains[id].syncPtr.cumulativeScaledRateOffset * pow(2,-41)) + 1);
|
||||
|
||||
m_ports[portId].domains[id].syncPtr.correctionField = m_ports[portId].domains[id].syncPtr.correctionField + r*((double)syncEventEgressTimestamp.GetNanoSeconds()-(double)upstreamTxTime.GetNanoSeconds())*pow(2,16);
|
||||
m_ports[portId].domains[id].syncPtr.cumulativeScaledRateOffset = (r - 1.0)*pow(2,41);
|
||||
}
|
||||
|
||||
Ptr<FollowUpPayload> payload = Create<FollowUpPayload>(m_ports[portId].domains[id].syncPtr.preciseOriginTimestampSecond,
|
||||
m_ports[portId].domains[id].syncPtr.preciseOriginTimestampNanoSecond,
|
||||
m_ports[portId].domains[id].syncPtr.cumulativeScaledRateOffset,
|
||||
0,
|
||||
0,0,0,
|
||||
0);
|
||||
Ptr<Packet> p = payload->GetPkt();
|
||||
GptpHeader gptpHeader;
|
||||
gptpHeader.SetMessageType(MESSAGETYPE_FOLLOWUP);
|
||||
gptpHeader.SetMessageLenght(76);
|
||||
gptpHeader.SetDomainNumber(m_ports[portId].domains[id].domainId);
|
||||
gptpHeader.SetCorrectionField(m_ports[portId].domains[id].syncPtr.correctionField);
|
||||
gptpHeader.SetClockIdentity(m_baseClockIndentity | m_ports[portId].domains[id].domainId);
|
||||
gptpHeader.SetPortIdentity(portId);
|
||||
gptpHeader.SetSequenceId(m_ports[portId].domains[id].syncPtr.sequenceId);
|
||||
|
||||
uint8_t logSyncInteral = -std::log2(m_syncInterval.GetSeconds());
|
||||
gptpHeader.SetLogMessageInterval(logSyncInteral);
|
||||
p->AddHeader(gptpHeader);
|
||||
|
||||
port.net->SendWithSpecificFIFO(p, GPTP_MULTICASTMAC, GPTP_ETHERTYPE, m_priority);
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::SendPdelayRequest(GPTPPort port, uint16_t portId){
|
||||
NS_LOG_FUNCTION(this);
|
||||
|
||||
m_ports[portId].rcvdPdelayResp = false;
|
||||
m_ports[portId].rcvdPdelayRespFollowUp = false;
|
||||
m_ports[portId].txPdelayReqPtr.correctionField = 0;
|
||||
m_ports[portId].txPdelayReqPtr.sourceClockIndentity = m_baseClockIndentity;
|
||||
m_ports[portId].txPdelayReqPtr.sourcePortIndentity = portId;
|
||||
m_ports[portId].txPdelayReqPtr.sequenceId = m_pdelaySequenceId;
|
||||
m_ports[portId].txPdelayReqPtr.requestOriginTimestampSecond = 0;
|
||||
m_ports[portId].txPdelayReqPtr.requestOriginTimestampNanoSecond = 0;
|
||||
|
||||
Ptr<PdelayPayload> payload = Create<PdelayPayload>();
|
||||
Ptr<Packet> p = payload->GetPkt();
|
||||
GptpHeader gptpHeader;
|
||||
gptpHeader.SetMessageType(MESSAGETYPE_PDELAYREQ);
|
||||
gptpHeader.SetMessageLenght(54);
|
||||
gptpHeader.SetCorrectionField(m_ports[portId].txPdelayReqPtr.correctionField);
|
||||
gptpHeader.SetClockIdentity(m_ports[portId].txPdelayReqPtr.sourceClockIndentity);
|
||||
gptpHeader.SetPortIdentity(m_ports[portId].txPdelayReqPtr.sourcePortIndentity);
|
||||
gptpHeader.SetSequenceId(m_ports[portId].txPdelayReqPtr.sequenceId);
|
||||
|
||||
uint8_t logPdelayInteral = -std::log2(m_pdelayInterval.GetSeconds());
|
||||
gptpHeader.SetLogMessageInterval(logPdelayInteral);
|
||||
p->AddHeader(gptpHeader);
|
||||
|
||||
m_ports[portId].txPdelayReqPtr.pktUid = p->GetUid();
|
||||
m_ports[portId].txPdelayReqPtr.pktSent = true;
|
||||
port.net->SendWithSpecificFIFO(p, GPTP_MULTICASTMAC, GPTP_ETHERTYPE, m_priority);
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::SendPdelayResp(Ptr<TsnNetDevice> dev, GptpHeader rxHeader, Time rxTimestamp)
|
||||
{
|
||||
uint16_t portId = GetPortIdFromNetDev(dev);
|
||||
|
||||
m_ports[portId].txPdelayRespPtr.correctionField = 0;
|
||||
m_ports[portId].txPdelayRespPtr.sourceClockIndentity = m_baseClockIndentity;
|
||||
m_ports[portId].txPdelayRespPtr.sourcePortIndentity = portId;
|
||||
m_ports[portId].txPdelayRespPtr.sequenceId = rxHeader.GetSequenceId();
|
||||
m_ports[portId].txPdelayRespPtr.requestReceiptTimestampSecond = rxTimestamp.GetSeconds();
|
||||
m_ports[portId].txPdelayRespPtr.requestReceiptTimestampNanoSecond = rxTimestamp.GetNanoSeconds() % NANOINSECOND;
|
||||
m_ports[portId].txPdelayRespPtr.requestingClockIdentity = rxHeader.GetClockIdentity();
|
||||
m_ports[portId].txPdelayRespPtr.requestingPortIdentity = rxHeader.GetPortIdentity();
|
||||
m_ports[portId].txPdelayRespPtr.responseReceiptTimestampSecond = 0;
|
||||
m_ports[portId].txPdelayRespPtr.responseReceiptTimestampNanoSecond = 0;
|
||||
|
||||
|
||||
Ptr<PdelayPayload> payload = Create<PdelayPayload>(m_ports[portId].txPdelayRespPtr.requestReceiptTimestampSecond,
|
||||
m_ports[portId].txPdelayRespPtr.requestReceiptTimestampNanoSecond,
|
||||
m_ports[portId].txPdelayRespPtr.requestingClockIdentity,
|
||||
m_ports[portId].txPdelayRespPtr.requestingPortIdentity);
|
||||
Ptr<Packet> p = payload->GetPkt();
|
||||
GptpHeader gptpHeader;
|
||||
gptpHeader.SetMessageType(MESSAGETYPE_PDELAYRESP);
|
||||
gptpHeader.SetMessageLenght(54);
|
||||
gptpHeader.SetTwoStepFlag(true);
|
||||
gptpHeader.SetClockIdentity(m_ports[portId].txPdelayRespPtr.sourceClockIndentity);
|
||||
gptpHeader.SetPortIdentity(m_ports[portId].txPdelayRespPtr.sourcePortIndentity);
|
||||
gptpHeader.SetSequenceId(m_ports[portId].txPdelayRespPtr.sequenceId);
|
||||
gptpHeader.SetLogMessageInterval(0x7F);
|
||||
p->AddHeader(gptpHeader);
|
||||
|
||||
m_ports[portId].txPdelayRespPtr.pktUid = p->GetUid();
|
||||
m_ports[portId].txPdelayRespPtr.pktSent = true;
|
||||
dev->SendWithSpecificFIFO(p, GPTP_MULTICASTMAC, GPTP_ETHERTYPE, m_priority);
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::SendPdelayRespFup(Ptr<TsnNetDevice> dev, Time txTimestamp)
|
||||
{
|
||||
uint16_t portId = GetPortIdFromNetDev(dev);
|
||||
Ptr<PdelayPayload> payload = Create<PdelayPayload>(txTimestamp.GetSeconds(),
|
||||
txTimestamp.GetNanoSeconds()%NANOINSECOND,
|
||||
m_ports[portId].txPdelayRespPtr.requestingClockIdentity,
|
||||
m_ports[portId].txPdelayRespPtr.requestingPortIdentity);
|
||||
Ptr<Packet> p = payload->GetPkt();
|
||||
GptpHeader gptpHeader;
|
||||
gptpHeader.SetMessageType(MESSAGETYPE_PDELAYRESPFOLLOWUP);
|
||||
gptpHeader.SetMessageLenght(54);
|
||||
gptpHeader.SetClockIdentity(m_ports[portId].txPdelayRespPtr.sourceClockIndentity);
|
||||
gptpHeader.SetPortIdentity(m_ports[portId].txPdelayRespPtr.sourcePortIndentity);
|
||||
gptpHeader.SetSequenceId(m_ports[portId].txPdelayRespPtr.sequenceId);
|
||||
gptpHeader.SetLogMessageInterval(0x7F);
|
||||
p->AddHeader(gptpHeader);
|
||||
|
||||
dev->SendWithSpecificFIFO(p, GPTP_MULTICASTMAC, GPTP_ETHERTYPE, m_priority);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
GPTP::handleSync(Ptr<TsnNetDevice> dev, GptpHeader header, Ptr<Packet> pkt, Time rxTimestamp)
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
uint16_t portId = GetPortIdFromNetDev(dev);
|
||||
int id = GetDomainKeyFromNetDev(dev, portId, header.GetDomainNumber());
|
||||
|
||||
if(id != -1 && m_ports[portId].domains[id].state == SLAVE)
|
||||
{
|
||||
NS_LOG_INFO("Handle sync for domains #" << (uint16_t)m_ports[portId].domains[id].domainId);
|
||||
m_ports[portId].domains[id].syncPtr.correctionField = 0;
|
||||
m_ports[portId].domains[id].syncPtr.sourceClockIndentity = header.GetClockIdentity();
|
||||
m_ports[portId].domains[id].syncPtr.sourcePortIndentity = header.GetPortIdentity();
|
||||
m_ports[portId].domains[id].syncPtr.sequenceId = header.GetSequenceId();
|
||||
m_ports[portId].domains[id].syncPtr.syncOriginTimestampSecond = 0;
|
||||
m_ports[portId].domains[id].syncPtr.syncOriginTimestampNanoSecond = 0;
|
||||
m_ports[portId].domains[id].syncPtr.syncReceiptTimestampSecond = rxTimestamp.GetSeconds();
|
||||
m_ports[portId].domains[id].syncPtr.syncReceiptTimestampNanoSecond = rxTimestamp.GetNanoSeconds() % NANOINSECOND;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::handleFollowUp(Ptr<TsnNetDevice> dev, GptpHeader header, Ptr<Packet> pkt, Time rxTimestamp)
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
uint16_t portId = GetPortIdFromNetDev(dev);
|
||||
int id = GetDomainKeyFromNetDev(dev, portId, header.GetDomainNumber());
|
||||
if(id != -1 && m_ports[portId].domains[id].state == SLAVE)
|
||||
{
|
||||
if(m_ports[portId].domains[id].syncPtr.sequenceId == header.GetSequenceId() &&
|
||||
m_ports[portId].domains[id].syncPtr.sourceClockIndentity == header.GetClockIdentity() &&
|
||||
m_ports[portId].domains[id].syncPtr.sourcePortIndentity == header.GetPortIdentity())
|
||||
{
|
||||
NS_LOG_INFO("Handle fup for domains #" << (uint16_t)m_ports[portId].domains[id].domainId);
|
||||
uint8_t payload[42] = {};
|
||||
pkt->CopyData(payload, 42);
|
||||
|
||||
uint64_t timestampSecond = 0;
|
||||
uint32_t timestampNanoSecond = 0;
|
||||
uint32_t cumulativeScaledRateOffset = 0;
|
||||
readFollowUpPayload(payload, timestampSecond, timestampNanoSecond, cumulativeScaledRateOffset);
|
||||
|
||||
double r = (cumulativeScaledRateOffset * pow(2,-41)) + 1;
|
||||
m_ports[portId].domains[id].syncPtr.preciseOriginTimestampSecond = timestampSecond;
|
||||
m_ports[portId].domains[id].syncPtr.preciseOriginTimestampNanoSecond = timestampNanoSecond;
|
||||
m_ports[portId].domains[id].syncPtr.correctionField = header.GetCorrectionField();
|
||||
m_ports[portId].domains[id].syncPtr.cumulativeScaledRateOffset = cumulativeScaledRateOffset;
|
||||
|
||||
//Correct local clock
|
||||
Time offset = computeOffset(portId, id, r*m_ports[portId].neighborRateRatio);
|
||||
Ptr<Clock> domainClock = getClockFromDomainId(m_ports[portId].domains[id].domainId);
|
||||
domainClock->SetOffsetTime(offset);
|
||||
m_OffsetTrace(offset);
|
||||
m_ClockBeforeCorrectionTrace(domainClock->GetLocalTime() - offset);
|
||||
m_ClockAfterCorrectionTrace(domainClock->GetLocalTime());
|
||||
|
||||
// NS_LOG_INFO("At " << Simulator::Now().GetNanoSeconds() << " " << Names::FindName(m_ports[portId].net) << " : " << m_clock->GetLocalTime().GetNanoSeconds() << " (" << m_clock->GetLocalTime().GetNanoSeconds()-Simulator::Now().GetNanoSeconds() <<")");
|
||||
|
||||
//Forward sync to master port on this node
|
||||
for(int i=0; i < (int)m_ports.size(); i++){
|
||||
for (int j=0; j < (int)m_ports[i].domains.size(); j++)
|
||||
{
|
||||
if(m_ports[i].domains[j].domainId==m_ports[portId].domains[id].domainId && m_ports[i].domains[j].state==MASTER)
|
||||
{
|
||||
m_ports[i].domains[j].syncPtr.sequenceId = m_ports[portId].domains[id].syncPtr.sequenceId;
|
||||
m_ports[i].domains[j].syncPtr.correctionField = m_ports[portId].domains[id].syncPtr.correctionField;
|
||||
m_ports[i].domains[j].syncPtr.preciseOriginTimestampSecond = m_ports[portId].domains[id].syncPtr.preciseOriginTimestampSecond;
|
||||
m_ports[i].domains[j].syncPtr.preciseOriginTimestampNanoSecond = m_ports[portId].domains[id].syncPtr.preciseOriginTimestampNanoSecond;
|
||||
m_ports[i].domains[j].syncPtr.syncReceiptTimestampSecond = m_ports[portId].domains[id].syncPtr.syncReceiptTimestampSecond;
|
||||
m_ports[i].domains[j].syncPtr.syncReceiptTimestampNanoSecond = m_ports[portId].domains[id].syncPtr.syncReceiptTimestampNanoSecond;
|
||||
m_ports[i].domains[j].syncPtr.cumulativeScaledRateOffset = m_ports[portId].domains[id].syncPtr.cumulativeScaledRateOffset;
|
||||
}
|
||||
}
|
||||
}
|
||||
SendSync(m_ports[portId].domains[id].domainId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::handlePdelayResp(Ptr<TsnNetDevice> dev, GptpHeader header, Ptr<Packet> pkt, Time rxTimestamp)
|
||||
{
|
||||
uint16_t portId = GetPortIdFromNetDev(dev);
|
||||
uint8_t payload[20] = {};
|
||||
pkt->CopyData(payload, 20);
|
||||
|
||||
uint64_t timestampSecond = 0;
|
||||
uint32_t timestampNanoSecond = 0;
|
||||
uint64_t clockIdentity = 0;
|
||||
uint16_t portIdentity = 0;
|
||||
readPledayPayload(payload, timestampSecond, timestampNanoSecond, clockIdentity, portIdentity);
|
||||
|
||||
if(clockIdentity != m_baseClockIndentity || portIdentity!=portId)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if(header.GetSequenceId() == m_pdelaySequenceId-1)
|
||||
{
|
||||
m_ports[portId].rcvdPdelayResp = true;
|
||||
m_ports[portId].rcvdPdelayRespPtr.correctionField = header.GetCorrectionField();
|
||||
m_ports[portId].rcvdPdelayRespPtr.sourceClockIndentity = header.GetClockIdentity();
|
||||
m_ports[portId].rcvdPdelayRespPtr.sourcePortIndentity = header.GetPortIdentity();
|
||||
m_ports[portId].rcvdPdelayRespPtr.sequenceId = header.GetSequenceId();
|
||||
m_ports[portId].rcvdPdelayRespPtr.requestReceiptTimestampSecond = timestampSecond;
|
||||
m_ports[portId].rcvdPdelayRespPtr.requestReceiptTimestampNanoSecond = timestampNanoSecond;
|
||||
m_ports[portId].rcvdPdelayRespPtr.requestingClockIdentity = m_baseClockIndentity;
|
||||
m_ports[portId].rcvdPdelayRespPtr.requestingPortIdentity = portId;
|
||||
|
||||
m_ports[portId].rcvdPdelayRespPtr.responseReceiptTimestampSecond = rxTimestamp.GetSeconds();
|
||||
m_ports[portId].rcvdPdelayRespPtr.responseReceiptTimestampNanoSecond = rxTimestamp.GetNanoSeconds() % NANOINSECOND;
|
||||
}
|
||||
else{
|
||||
ResetPdelay(portId);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::handlePdelayRespFup(Ptr<TsnNetDevice> dev, GptpHeader header, Ptr<Packet> pkt)
|
||||
{
|
||||
uint16_t portId = GetPortIdFromNetDev(dev);
|
||||
uint8_t payload[20] = {};
|
||||
pkt->CopyData(payload, 20);
|
||||
|
||||
uint64_t timestampSecond = 0;
|
||||
uint32_t timestampNanoSecond = 0;
|
||||
uint64_t clockIdentity = 0;
|
||||
uint16_t portIdentity = 0;
|
||||
readPledayPayload(payload, timestampSecond, timestampNanoSecond, clockIdentity, portIdentity);
|
||||
|
||||
if(clockIdentity != m_baseClockIndentity || portIdentity!=portId)
|
||||
{
|
||||
return;
|
||||
}
|
||||
else if(m_ports[portId].rcvdPdelayResp && header.GetSequenceId() == m_pdelaySequenceId-1)
|
||||
{
|
||||
m_ports[portId].rcvdPdelayRespFollowUp = true;
|
||||
m_ports[portId].rcvdPdelayRespFollowUpPtr.correctionField = header.GetCorrectionField();
|
||||
m_ports[portId].rcvdPdelayRespFollowUpPtr.sourceClockIndentity = header.GetClockIdentity();
|
||||
m_ports[portId].rcvdPdelayRespFollowUpPtr.sourcePortIndentity = header.GetPortIdentity();
|
||||
m_ports[portId].rcvdPdelayRespFollowUpPtr.sequenceId = header.GetSequenceId();
|
||||
m_ports[portId].rcvdPdelayRespFollowUpPtr.requestingClockIdentity = m_baseClockIndentity;
|
||||
m_ports[portId].rcvdPdelayRespFollowUpPtr.requestingPortIdentity = portId;
|
||||
m_ports[portId].rcvdPdelayRespFollowUpPtr.responseOriginTimestampSecond = timestampSecond;
|
||||
m_ports[portId].rcvdPdelayRespFollowUpPtr.responseOriginTimestampNanoSecond = timestampNanoSecond;
|
||||
}
|
||||
else{
|
||||
ResetPdelay(portId);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
m_ports[portId].neighborRateRatio = computePdelayRateRatio(portId);
|
||||
m_ports[portId].meanLinkDelay = computePropTime(portId);
|
||||
|
||||
m_NrTrace(m_ports[portId].net, m_ports[portId].neighborRateRatio );
|
||||
m_PdelayTrace(m_ports[portId].net, m_ports[portId].meanLinkDelay );
|
||||
|
||||
if(m_ports[portId].neighborRateRatioValid && m_ports[portId].meanLinkDelay <= m_meanLinkDelayThresh && m_ports[portId].meanLinkDelay >= -m_meanLinkDelayThresh)
|
||||
{
|
||||
m_ports[portId].asCapableAcrossDomains = true;
|
||||
// NS_LOG_INFO("m_ports[portId].neighborRateRatio = " << m_ports[portId].neighborRateRatio);
|
||||
// NS_LOG_INFO("m_ports[portId].meanLinkDelay = " << m_ports[portId].meanLinkDelay);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_ports[portId].asCapableAcrossDomains = false;
|
||||
}
|
||||
}
|
||||
|
||||
double
|
||||
GPTP::computePdelayRateRatio(uint16_t portId)
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
Time t3 = Time(Seconds(m_ports[portId].rcvdPdelayRespFollowUpPtr.responseOriginTimestampSecond)) + Time(NanoSeconds(m_ports[portId].rcvdPdelayRespFollowUpPtr.responseOriginTimestampNanoSecond));
|
||||
Time t4 = Time(Seconds(m_ports[portId].rcvdPdelayRespPtr.responseReceiptTimestampSecond)) + Time(NanoSeconds(m_ports[portId].rcvdPdelayRespPtr.responseReceiptTimestampNanoSecond));
|
||||
Time t3_prime = m_ports[portId].t3_prime;
|
||||
Time t4_prime = m_ports[portId].t4_prime;
|
||||
double nr = (double)(t3.GetNanoSeconds() - t3_prime.GetNanoSeconds()) / (double)(t4.GetNanoSeconds() - t4_prime.GetNanoSeconds());
|
||||
|
||||
if(t3_prime != Time(0) && t4_prime != Time(0)){
|
||||
m_ports[portId].neighborRateRatioValid = true;
|
||||
}
|
||||
|
||||
m_ports[portId].t3_prime = t3;
|
||||
m_ports[portId].t4_prime = t4;
|
||||
return nr;
|
||||
}
|
||||
|
||||
double
|
||||
GPTP::computePropTime(uint16_t portId)
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
double nr = m_ports[portId].neighborRateRatio;
|
||||
Time t1 = Time(Seconds(m_ports[portId].txPdelayReqPtr.requestOriginTimestampSecond)) + Time(NanoSeconds(m_ports[portId].txPdelayReqPtr.requestOriginTimestampNanoSecond));
|
||||
Time t2 = Time(Seconds(m_ports[portId].rcvdPdelayRespPtr.requestReceiptTimestampSecond)) + Time(NanoSeconds(m_ports[portId].rcvdPdelayRespPtr.requestReceiptTimestampNanoSecond));
|
||||
Time t3 = Time(Seconds(m_ports[portId].rcvdPdelayRespFollowUpPtr.responseOriginTimestampSecond)) + Time(NanoSeconds(m_ports[portId].rcvdPdelayRespFollowUpPtr.responseOriginTimestampNanoSecond));
|
||||
Time t4 = Time(Seconds(m_ports[portId].rcvdPdelayRespPtr.responseReceiptTimestampSecond)) + Time(NanoSeconds(m_ports[portId].rcvdPdelayRespPtr.responseReceiptTimestampNanoSecond));
|
||||
|
||||
double d = (nr*(t4.GetNanoSeconds()-t1.GetNanoSeconds()) - (t3.GetNanoSeconds()-t2.GetNanoSeconds()))/2.0;
|
||||
return d;
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::ResetPdelay(uint16_t portId)
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
m_ports[portId].rcvdPdelayResp = false;
|
||||
m_ports[portId].rcvdPdelayRespFollowUp = false;
|
||||
m_ports[portId].lostResponses ++;
|
||||
}
|
||||
|
||||
Time
|
||||
GPTP::computeOffset(uint16_t portId, uint16_t id, double r)
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
Time syncEventIngressTimestamp = Time(Seconds( m_ports[portId].domains[id].syncPtr.syncReceiptTimestampSecond)) + Time(NanoSeconds( m_ports[portId].domains[id].syncPtr.syncReceiptTimestampNanoSecond));
|
||||
Time upstreamTxTime = syncEventIngressTimestamp - Time(NanoSeconds(m_ports[portId].meanLinkDelay / m_ports[portId].neighborRateRatio)); //STD 802.1AS : 10.2.2.1.7
|
||||
|
||||
Time localTime = getClockFromDomainId(m_ports[portId].domains[id].domainId)->GetUncorrectedLocalTime();
|
||||
Time preciseOriginTime = Time(Seconds( m_ports[portId].domains[id].syncPtr.preciseOriginTimestampSecond)) + Time(NanoSeconds( m_ports[portId].domains[id].syncPtr.preciseOriginTimestampNanoSecond));
|
||||
Time gmTime = preciseOriginTime + Time(NanoSeconds( m_ports[portId].domains[id].syncPtr.correctionField* pow(2,-16))) + r*(localTime - upstreamTxTime);
|
||||
Time offset = gmTime - localTime;
|
||||
|
||||
NS_LOG_INFO("Offset " << offset);
|
||||
return offset;
|
||||
}
|
||||
|
||||
uint16_t
|
||||
GPTP::GetPortIdFromNetDev(Ptr<TsnNetDevice> dev)
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
uint16_t portId = 65535;
|
||||
for(int i=0; i < (int)m_ports.size(); i++){
|
||||
if(dev == m_ports[i].net)
|
||||
{
|
||||
portId = i;
|
||||
}
|
||||
}
|
||||
return portId;
|
||||
}
|
||||
|
||||
int
|
||||
GPTP::GetDomainKeyFromNetDev(Ptr<TsnNetDevice> dev, uint16_t portId, uint8_t domainId)
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
for(int k=0; k <(int)m_ports[portId].domains.size(); k++){
|
||||
if(m_ports[portId].domains[k].domainId == domainId){
|
||||
return k;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
Ptr<Clock>
|
||||
GPTP::getClockFromDomainId(uint8_t domainId)
|
||||
{
|
||||
for(int i=0; i <(int)m_domains.size(); i++){
|
||||
if(m_domains[i].domainId == domainId)
|
||||
{
|
||||
return m_domains[i].clock;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
GPTP::IsGm(uint8_t domainId)
|
||||
{
|
||||
for(int j=0; j <(int)m_ports.size(); j++){
|
||||
for(int k=0; k <(int)m_ports[j].domains.size(); k++){
|
||||
if(m_ports[j].domains[k].domainId == domainId && m_ports[j].domains[k].state != MASTER){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::readPledayPayload(uint8_t* payload, uint64_t& timestampSecond, uint32_t& timestampNanoSecond, uint64_t& clockIdentity, uint16_t& portIdentity)
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
timestampSecond = ((uint64_t)payload[0] <<40) | ((uint64_t)payload[1] <<32) | ((uint64_t)payload[2] <<24) | ((uint64_t)payload[3] <<16) | ((uint64_t)payload[4] <<8) | payload[5];
|
||||
timestampNanoSecond = ((uint32_t)payload[6] <<24) | ((uint32_t)payload[7] <<16) | ((uint32_t)payload[8] <<8) | payload[9];
|
||||
clockIdentity = ((uint64_t)payload[10] <<56) | ((uint64_t)payload[11] <<48) | ((uint64_t)payload[12] <<40) | ((uint64_t)payload[13] <<32) | ((uint64_t)payload[14] <<24) | ((uint64_t)payload[15] <<16) | ((uint64_t)payload[16] <<8) | payload[17];
|
||||
portIdentity = (payload[18] <<8) | payload[19];
|
||||
}
|
||||
|
||||
void
|
||||
GPTP::readFollowUpPayload(uint8_t* payload, uint64_t& timestampSecond, uint32_t& timestampNanoSecond, uint32_t& cumulativeScaledRateOffset)
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
timestampSecond = ((uint64_t)payload[0] <<40) | ((uint64_t)payload[1] <<32) | ((uint64_t)payload[2] <<24) | ((uint64_t)payload[3] <<16) | ((uint64_t)payload[4] <<8) | payload[5];
|
||||
timestampNanoSecond = ((uint32_t)payload[6] <<24) | ((uint32_t)payload[7] <<16) | ((uint32_t)payload[8] <<8) | payload[9];
|
||||
cumulativeScaledRateOffset = ((uint32_t)payload[20] <<24) | ((uint32_t)payload[21] <<16) | ((uint32_t)payload[22] <<8) | payload[23];
|
||||
}
|
||||
|
||||
bool
|
||||
GPTP::RXCallback(Ptr<TsnNetDevice> dev, Ptr<const Packet> pkt, uint16_t mode, const Address& sender, Time rxTimestamp)
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
if(mode==GPTP_ETHERTYPE)
|
||||
{
|
||||
Ptr<Packet> pktCopy = pkt->Copy();
|
||||
GptpHeader header;
|
||||
pktCopy->RemoveHeader(header);
|
||||
if(header.GetMessageType() == MESSAGETYPE_SYNC)
|
||||
{
|
||||
Simulator::Schedule(m_pktProcessingTime, &GPTP::handleSync, this, dev, header, pktCopy, rxTimestamp);
|
||||
}
|
||||
else if(header.GetMessageType() == MESSAGETYPE_FOLLOWUP)
|
||||
{
|
||||
Simulator::Schedule(m_pktProcessingTime, &GPTP::handleFollowUp, this, dev, header, pktCopy, rxTimestamp);
|
||||
}
|
||||
else if(header.GetMessageType() == MESSAGETYPE_PDELAYREQ)
|
||||
{
|
||||
Simulator::Schedule(m_pktProcessingTime, &GPTP::SendPdelayResp, this, dev, header, rxTimestamp);
|
||||
}
|
||||
else if(header.GetMessageType() == MESSAGETYPE_PDELAYRESP)
|
||||
{
|
||||
Simulator::Schedule(m_pktProcessingTime, &GPTP::handlePdelayResp, this, dev, header, pktCopy, rxTimestamp);
|
||||
}
|
||||
else if(header.GetMessageType() == MESSAGETYPE_PDELAYRESPFOLLOWUP)
|
||||
{
|
||||
Simulator::Schedule(m_pktProcessingTime, &GPTP::handlePdelayRespFup, this, dev, header, pktCopy);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
GPTP::TXCallback(Ptr<const Packet> pkt, Time txTimestamp)
|
||||
{
|
||||
NS_LOG_FUNCTION(this);
|
||||
Ptr<Packet> pktCopy = pkt->Copy();
|
||||
EthernetHeader2 ethHeader;
|
||||
pktCopy->RemoveHeader(ethHeader);
|
||||
GptpHeader gptpHeader;
|
||||
pktCopy->RemoveHeader(gptpHeader);
|
||||
if(ethHeader.GetEthertype() == GPTP_ETHERTYPE)
|
||||
{
|
||||
for(int i=0; i < (int)m_ports.size(); i++){
|
||||
for(int k = 0; k < (int)m_ports[i].domains.size(); k++){
|
||||
if(m_ports[i].domains[k].syncPtr.pktSent && pkt->GetUid() == m_ports[i].domains[k].syncPtr.pktUid)
|
||||
{
|
||||
m_ports[i].domains[k].syncPtr.syncOriginTimestampSecond = txTimestamp.GetSeconds();
|
||||
m_ports[i].domains[k].syncPtr.syncOriginTimestampNanoSecond = txTimestamp.GetNanoSeconds() % NANOINSECOND;
|
||||
if(IsGm(gptpHeader.GetDomainNumber()))
|
||||
{
|
||||
Ptr<Clock> domainClock = getClockFromDomainId(gptpHeader.GetDomainNumber());
|
||||
Time diff = m_mainClock->GetLocalTime() - domainClock->GetLocalTime();
|
||||
txTimestamp = txTimestamp - diff;
|
||||
m_ports[i].domains[k].syncPtr.preciseOriginTimestampSecond = txTimestamp.GetSeconds();
|
||||
m_ports[i].domains[k].syncPtr.preciseOriginTimestampNanoSecond = txTimestamp.GetNanoSeconds() % NANOINSECOND;
|
||||
}
|
||||
Simulator::Schedule(m_TimestampProcessingTime, &GPTP::SendFollowUp, this, m_ports[i], i, k);
|
||||
return true;
|
||||
}
|
||||
else if(m_ports[i].txPdelayReqPtr.pktSent && pkt->GetUid() == m_ports[i].txPdelayReqPtr.pktUid)
|
||||
{
|
||||
m_ports[i].txPdelayReqPtr.requestOriginTimestampSecond = txTimestamp.GetSeconds();
|
||||
m_ports[i].txPdelayReqPtr.requestOriginTimestampNanoSecond = txTimestamp.GetNanoSeconds() % NANOINSECOND;
|
||||
return true;
|
||||
}
|
||||
else if(m_ports[i].txPdelayRespPtr.pktSent && pkt->GetUid() == m_ports[i].txPdelayRespPtr.pktUid)
|
||||
{
|
||||
Simulator::Schedule(m_TimestampProcessingTime, &GPTP::SendPdelayRespFup, this, m_ports[i].net, txTimestamp);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // Namespace ns3
|
||||
Reference in New Issue
Block a user