Improve throughput by reading data in larger chunks
Once a first byte is received, read as many as are immediately available in larger chunks before going back to single-byte reads. This prevents dropped data when the host is sending it quickly.
This commit is contained in:
parent
ed0f7f47f9
commit
b3399e6134
|
@ -69,6 +69,7 @@ class ITMParser:
|
|||
except TimeoutException as e:
|
||||
# Timeout inside a parser should be reported.
|
||||
print(color("red", "Timeout"))
|
||||
break
|
||||
except ResyncException as e:
|
||||
print(color("red", "Resync"))
|
||||
|
||||
|
@ -153,15 +154,32 @@ def main(argv):
|
|||
args = parser.parse_args()
|
||||
|
||||
ser = serial.Serial(args.device, args.baudrate)
|
||||
ser.timeout = 1
|
||||
|
||||
print(color('green', 'ready'))
|
||||
|
||||
def input_stream():
|
||||
while True:
|
||||
# Read one byte with a 1s timeout.
|
||||
ser.timeout = 1
|
||||
data = ser.read(1)
|
||||
if len(data) == 0:
|
||||
# Timeout
|
||||
yield None
|
||||
else:
|
||||
yield data[0]
|
||||
continue
|
||||
yield data[0]
|
||||
|
||||
# Then read as many more as there are immediately
|
||||
# available, and send them. This is more efficient than
|
||||
# reading each individual byte, when they're coming in
|
||||
# fast. Once they stop, we'll go back to the normal
|
||||
# 1 byte read with timeout.
|
||||
ser.timeout = 0
|
||||
while True:
|
||||
data = ser.read(65536)
|
||||
if len(data) == 0:
|
||||
break
|
||||
for c in data:
|
||||
yield c
|
||||
|
||||
try:
|
||||
ITMParser(input_stream()).process()
|
||||
|
|
Loading…
Reference in New Issue
Block a user