It takes up to 40 min before a packet is lost, (at rate of 1 packet every few minutes),
The MCU use Linux kernel 3.18.48,
Using Scope, (on UART's Rx Pin), I can see the packets, (about 15 bytes long), are sent well.
But the read() doesn't return, with any of the packet's bytes,
(VMIN = 1, VTIME = 0, configured to return if at least 1 byte is in the Rx buffer),
This code is used in 4 other projects, with different HW Board, and we never saw this issue before.
Can you share ideas of how to tackle such issue?
How can I debug the UART driver?
To better understand where the packet got lost,
Thanks,
Logic Analyzer of the Lost Packet
E_UARTDRV_STATUS UartDrv_Open(void *pUart, S_UartDrv_InitData *init_data)
{
struct termios tty;
struct serial_struct serial;
/*
* O_RDWR - Opens the port for reading and writing
* O_NOCTTY - The port never becomes the controlling terminal of the process.
* O_NDELAY - Use non-blocking I/O.
* On some systems this also means the RS232 DCD signal line is ignored.
* Note well: if present, the O_EXCL flag is silently ignored by the kernel when opening a serial device like a modem.
* On modern Linux systems programs like ModemManager will sometimes read and write to your device and possibly corrupt your program state.
* To avoid problems with programs like ModemManager you should set TIOCEXCL on the terminal after associating a terminal with the device.
* You cannot open with O_EXCL because it is silently ignored.
*/
fd = open(init_data->PortName, O_RDWR | O_NOCTTY | O_NDELAY);
if (fd == -1) // if there is an invalid descriptor, print the reason {
SYS_LOG_ERR_V("fd invalid whilst trying to open com port %s: %s\n", init_data->PortName, strerror(errno));
return UARTDRV_STATUS_ERROR;
}
if (tcflush(fd, TCIOFLUSH) < 0) {
SYS_LOG_ERR_V("Error failed to flush input output buffers %s\n", strerror(errno));
return UARTDRV_STATUS_ERROR;
}
// Enable low latency...this should affect the file /sys/bus/usb-serial/devices/ttyUSB0/latency_timer
if (ioctl(fd, TIOCGSERIAL, &serial) < 0) {
SYS_LOG_ERR_V("Error failed to get latency current value: %s\n", strerror(errno));
return UARTDRV_STATUS_ERROR;
}
serial.flags |= ASYNC_LOW_LATENCY;
if (ioctl(fd, TIOCSSERIAL, &serial) < 0) {
SYS_LOG_ERR_V("Error failed to set Low latency: %s\n", strerror(errno));
return UARTDRV_STATUS_ERROR;
}
if (fcntl(fd, F_SETFL, 0) < 0) {
SYS_LOG_ERR_V("Error failed to set file flags: %s\n", strerror(errno));
return UARTDRV_STATUS_ERROR;
}
/* Get current configuration */
if (tcgetattr(fd, &tty) < 0) {
SYS_LOG_ERR_V("Error failed to get current configuration: %s\n", strerror(errno));
return UARTDRV_STATUS_ERROR;
}
if (cfsetospeed(&tty, init_data->baud) < 0) {
SYS_LOG_ERR_V("Error failed to set output baud rate: %s\n", strerror(errno));
return UARTDRV_STATUS_ERROR;
}
if (cfsetispeed(&tty, init_data->baud) < 0) {
SYS_LOG_ERR_V("Error failed to set input baud rate: %s\n", strerror(errno));
return UARTDRV_STATUS_ERROR;
}
tty.c_cflag |= (CLOCAL | CREAD); /* Enable the receiver and set local mode */
tty.c_cflag &= ~CSIZE;
tty.c_cflag |= CS8; /* 8-bit characters */
tty.c_cflag &= ~PARENB; /* no parity bit */
tty.c_cflag &= ~CSTOPB; /* only need 1 stop bit */
/*
* Input flags - Turn off input processing
* convert break to null byte, no CR to NL translation,
* no NL to CR translation, don't mark parity errors or breaks
* no input parity check, don't strip high bit off,
* no XON/XOFF software flow control
* BRKINT - If this bit is set and IGNBRK is not set, a break condition clears the terminal input and output queues and raises a SIGINT signal for the foreground process group associated with the terminal.
* If neither BRKINT nor IGNBRK are set, a break condition is passed to the application as a single '\0' character if PARMRK is not set, or otherwise as a three-character sequence '\377', '\0', '\0'.
* INPCK - If this bit is set, input parity checking is enabled. If it is not set, no checking at all is done for parity errors on input; the characters are simply passed through to the application.
* Parity checking on input processing is independent of whether parity detection and generation on the underlying terminal hardware is enabled; see Control Modes.
* For example, you could clear the INPCK input mode flag and set the PARENB control mode flag to ignore parity errors on input, but still generate parity on output.
* If this bit is set, what happens when a parity error is detected depends on whether the IGNPAR or PARMRK bits are set. If neither of these bits are set, a byte with a parity error is passed to the application as a '\0' character.
*/
tty.c_iflag &= ~(BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON);
/*
* IGNBRK - If this bit is set, break conditions are ignored.
* A break condition is defined in the context of asynchronous serial data transmission as a series of zero-value bits longer than a single byte.
*/
tty.c_iflag |= IGNBRK;
/*
* No line processing
* echo off, echo newline off, canonical mode off,
* extended input processing off, signal chars off
*/
tty.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
/*
* Output flags - Turn off output processing
* no CR to NL translation, no NL to CR-NL translation,
* no NL to CR translation, no column 0 CR suppression,
* no Ctrl-D suppression, no fill characters, no case mapping,
* no local output processing
*
* c_oflag &= ~(OCRNL | ONLCR | ONLRET | ONOCR | ONOEOT| OFILL | OLCUC | OPOST);
*/
tty.c_oflag = 0;
/* fetch bytes as they become available */
tty.c_cc[VMIN] = 0;
tty.c_cc[VTIME] = 1; // timeout in 10th of second
if (tcsetattr(fd, TCSANOW, &tty) != 0) {
SYS_LOG_ERR_V("Error failed to set new configuration: %s\n", strerror(errno));
return UARTDRV_STATUS_ERROR;
}
uartPeripheral.fd = fd;
return UARTDRV_STATUS_SUCCESS;
}
uint8_t *UartDrv_Rx(S_UartDrv_Handle *handle, uint16_t bytesToRead, uint16_t *numBytesRead)
{
ssize_t n_read;
uint16_t n_TotalReadBytes = 0;
struct timespec timestamp;
struct timespec now;
long diff_ms;
bool Timeout = false, isPartialRead = false;
if (handle == NULL) {
SYS_LOG_ERR("UartDrv Error: async rx error - peripheral error");
exit(EXIT_FAILURE);
}
if (bytesToRead > sizeof(uartPeripheral.buffer_rx)) {
ESILOG_ERR_V("Param Error: Invalid length %u, max length %zu", bytesToRead, sizeof(uartPeripheral.buffer_rx));
*numBytesRead = 0;
return NULL;
}
while(n_TotalReadBytes < bytesToRead && !Timeout) {
do {
n_read = read(uartPeripheral.fd, &uartPeripheral.buffer_rx[n_TotalReadBytes], bytesToRead - n_TotalReadBytes);
if (isPartialRead) {
clock_gettime(CLOCK_REALTIME, &now);
diff_ms = (now.tv_sec - timestamp.tv_sec)*1000;
diff_ms += (now.tv_nsec - timestamp.tv_nsec)/1000000;
if (diff_ms > UART_READ_TIMEOUT_MS) {
SYS_LOG_ERR("UartDrv_Rx: Error, timeout while reading\r\n");
Timeout = true;
}
}
} while ((n_read != -1) && (n_read == 0) && !Timeout);
if (n_read == -1) {
if (errno == EINTR) {
ESILOG_WARN("Uart Interrupted");
continue;
}
ESILOG_ERR_V("Uart Error: [%d, %s]", errno, strerror(errno));
exit(EXIT_FAILURE);
}
n_TotalReadBytes += (uint16_t)n_read;
if (n_TotalReadBytes < bytesToRead) {
//SYS_LOG_DBG_V("UartDrv_Rx: couldn't fetch all bytes, read %hu, expected %hu, continue reading %s\r\n", n_TotalReadBytes, bytesToRead, isPartialRead? "During Partial read": "");
if (!isPartialRead) {
isPartialRead = true;
clock_gettime(CLOCK_REALTIME, ×tamp);
}
}
}
*numBytesRead = n_TotalReadBytes;
return uartPeripheral.buffer_rx;
}
I report a strange behaviour on deepsleep mangemnt: I have an esp8266 NodeMCU AMICA V2 board, if i put in deepsleep for 1800 seconds its fine and wake up as expected, then if I put in deepsleep for 3600 seconds it wont wakeup, i dont understand why since the code is still the same. To put on deepsleep i use this function:
(in setup i also have added "pinMode(D0, WAKEUP_PULLUP);" but nothing is changed):
sleepSeconds = 3600;
Serial.println(sleepSeconds);
digitalWrite(power, LOW);
delay(250);
ESP.deepSleep (sleepSeconds * 1000000);
delay(2000);
I also tested the max deepsleep time with below function and resulting on 13591117817us, that is almost 226 minutes so it should not a problem:
String result = "";
uint8_t base = 10;
do {
char c = input % base;
input /= base;
if (c < 10)
c +='0';
else
c += 'A' - 10;
result = c + result;
} while (input);
return result;
}
iOS 10+
iPhone: 5s & 6
Xcode: 9+
I'm recording audio using aLaw codec at 8 KHz samplerate with sample size 8 bits. I create an AudioQueue like this:
// create the queue
XThrowIfError(AudioQueueNewInput(
&mRecordFormat,
MyInputBufferHandler,
this /* userData */,
NULL /* run loop */,
kCFRunLoopCommonModes /* run loop mode */,
0 /* flags */,
&mQueue), "AudioQueueNewInput failed");
MyInputBufferHandler is the callback that is called every time a buffer (160 bytes every 20 ms) is filled. So I expect the callback is called every 20 ms. But when testing it, every 128 ms , MyInputBufferHandler callback is called 6 times in a burst. While I expect callback to be called every 20 ms.
My recording configuration is:
mRecordFormat.mSampleRate = 8000.0; // 8 KHz
mRecordFormat.mChannelsPerFrame = 1;
mRecordFormat.mBytesPerFrame = 1;
mRecordFormat.mBitsPerChannel = 8;
mRecordFormat.mBytesPerPacket = 1;
mRecordFormat.mFramesPerPacket = 1;
Can someone please help me out? Why is MyInputBufferHandler called every 128 ms instead of 20 ms? Samplerate of 8 KHz with a buffer of 160 bytes of recording, means every 20 ms calling MyInputBufferHandler and not every 128 ms!
It seems that AudioQueue is on top of AudioUnit and somehow can't control the internal buffer size no matter what buffer size you set on AudioQueue level. So by default, the internal buffer is at minimum set to 1024 bytes. So if you want a callback after 160 bytes of recording data, it won't.
So for those who run into the same problem, you need to use AudioUnit.
Links of a similar situation:
https://stackoverflow.com/a/4597409/1012775
https://stackoverflow.com/a/6687050/1012775
I am doing unix server ssh emulation for iOS. In the process of negotiation I met many of the hurdles and still fighting with those. One of the latest is about the SSH2_MSG_KEX_DH_GEX_REPLY packet data, where I receive the wrong packet length (may be extraneous padding). The packet description for overall process is as below:
Client : connection with aix.polarhome.com with port 775 (changed port for ssh) using GCDAsyncSocket
Server : SSH-2.0-OpenSSH_6.0
Client : send SSH-2.0-OpenSSH_6.0
(Rest packet will follow BPP protocol)
Server : SSH2_MSG_KEXINIT with set of supported algorithms
Client : SSH2_MSG_KEXINIT with set of common algorithms
Client : SSH2_MSG_KEX_DH_GEX_REQUEST_OLD
code:
SignedByte sendByte[1920];
int writeIndex = 0;
minGroupLength = 1024;
prefGroupLength = 1024;
maxGroupLength = 4096;
sendByte[writeIndex++] = SSH2_MSG_KEX_DH_GEX_REQUEST_OLD;
[self write32BitInteger:prefGroupLength toPacket:sendByte fromIndex:writeIndex];
writeIndex += 4;
[self sendSSHBinaryPacketPayload:sendByte toLength:writeIndex];
writeIndex = 0;
Server : SSH2_MSG_KEX_DH_GEX_GROUP
client -> fetch values of p and g
compute value of e (1 < e < (p-1)/2)
Client : SSH2_MSG_KEX_DH_GEX_INIT
Code
SignedByte sendByte[1920];
int writeIndex = 0;
NSInteger eByteCount = [[e description] stringByReplacingOccurrencesOfString:#" " withString:#""].length/2;
sendByte[writeIndex++] = SSH2_MSG_KEX_DH_GEX_INIT;
[self write32BitInteger:eByteCount toPacket:sendByte fromIndex:writeIndex];
writeIndex += 4;
Byte eBytes[eByteCount];
NSInteger length = [self getBytes:eBytes fromBigInteger:e];
for (int i = 0; i < length; i++) {
sendByte[writeIndex++] = eBytes[i];
}
[self sendSSHBinaryPacketPayload:sendByte toLength:writeIndex];
writeIndex = 0;
Server : SSH2_MSG_KEX_DH_GEX_REPLY
Total length : 720
Packet length (4 bytes): 00 00 02 bc (700 which should be 720 - 4 = 716) Don't Know why this 700?
client -> read host key and verify it
read value of f
read signature and verify it
Client : SSH2_MSG_NEWKEYS
Now after sent last packet server mocks and no data in return of SSH2_MSG_NEWKEYS.
I looked into code of other ssh emulators but none of them helped.
I am totally clueless, What should I need to do, please do help, I am really so frustrated.
Per #revinder in a comment:
I solved it my own server sending the two packets merged into one, the SSH2_MSG_KEX_DH_GEX_REPLY and the SSH2_MSG_NEWKEYS.
I am trying to create a software simulation on an Ubuntu GNU/Linux machine which will work like PPPoE. I would like this simulator to take outgoing packets, strip off the ethernet header, insert the PPP flags (7E, FF, 03, 00, and 21) and place the IP layer information in the PPP packet. I am having trouble with the FCS that goes after the data. From what I can tell, the cell modem I am using has a 2 byte FCS using the CRC16-CCITT method. I have found several pieces of software that will calculate this checksum, but none of them produce what is coming out the serial line (I have a serial line "sniffer" that shows me everything the modem is being sent).
I have been looking into the source of pppd and the linux kernel itself, and I can see that both of them have a method of adding an FCS to the data. It seems quite difficult to implement, as I have no experience in kernel hacking. Can someone come up with a simple way (preferably in Python) of calculating an FCS that matches the one that the kernel produces?
Thanks.
P.S. If anyone wants, I can add a sample of the data output I am getting to the serial modem.
Used simple python library crcmod.
import crcmod #pip3 install crcmod
fcsData = "A0 19 03 61 DC"
fcsData=''.join(fcsData.split(' '))
print(fcsData)
crc16 = crcmod.mkCrcFun(0x11021, rev=True,initCrc=0x0000, xorOut=0xFFFF)
print(hex(crc16(bytes.fromhex(fcsData))))
fcs=hex(crc16(bytes.fromhex(fcsData)))
I recently did something like this while testing code to kill a ppp connection ..
This worked for me:
# RFC 1662 Appendix C
def mkfcstab():
P = 0x8408
def valiter():
for b in range(256):
v = b
i = 8
while i:
v = (v >> 1) ^ P if v & 1 else v >> 1
i -= 1
yield v & 0xFFFF
return tuple(valiter())
fcstab = mkfcstab()
PPPINITFCS16 = 0xffff # Initial FCS value
PPPGOODFCS16 = 0xf0b8 # Good final FCS value
def pppfcs16(fcs, bytelist):
for b in bytelist:
fcs = (fcs >> 8) ^ fcstab[(fcs ^ b) & 0xff]
return fcs
To get the value:
fcs = pppfcs16(PPPINITFCS16, (ord(c) for c in frame)) ^ 0xFFFF
and swap the bytes (I used chr((fcs & 0xFF00) >> 8), chr(fcs & 0x00FF))
Got this from mbed.org PPP-Blinky:
// http://www.sunshine2k.de/coding/javascript/crc/crc_js.html - Correctly calculates
// the 16-bit FCS (crc) on our frames (Choose CRC16_CCITT_FALSE)
int crc;
void crcReset()
{
crc=0xffff; // crc restart
}
void crcDo(int x) // cumulative crc
{
for (int i=0; i<8; i++) {
crc=((crc&1)^(x&1))?(crc>>1)^0x8408:crc>>1; // crc calculator
x>>=1;
}
}
int crcBuf(char * buf, int size) // crc on an entire block of memory
{
crcReset();
for(int i=0; i<size; i++)crcDo(*buf++);
return crc;
}