Browse Source

Add nilm-pipewatch test and update for Python 3

tags/nilmtools-2.0.0
Jim Paris 3 years ago
parent
commit
8fd511b5df
2 changed files with 59 additions and 18 deletions
  1. +21
    -17
      nilmtools/pipewatch.py
  2. +38
    -1
      tests/test.py

+ 21
- 17
nilmtools/pipewatch.py View File

@@ -20,7 +20,6 @@ import daemon
def parse_args(argv = None): def parse_args(argv = None):
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
formatter_class = argparse.ArgumentDefaultsHelpFormatter, formatter_class = argparse.ArgumentDefaultsHelpFormatter,
version = nilmtools.__version__,
description = """\ description = """\
Pipe data from 'generator' to 'consumer'. This is intended to be Pipe data from 'generator' to 'consumer'. This is intended to be
executed frequently from cron, and will exit if another copy is executed frequently from cron, and will exit if another copy is
@@ -30,6 +29,8 @@ def parse_args(argv = None):
Intended for use with ethstream (generator) and nilm-insert Intended for use with ethstream (generator) and nilm-insert
(consumer). Commands are executed through the shell. (consumer). Commands are executed through the shell.
""") """)
parser.add_argument("-v", "--version", action="version",
version=nilmtools.__version__)
parser.add_argument("-d", "--daemon", action="store_true", parser.add_argument("-d", "--daemon", action="store_true",
help="Run in background") help="Run in background")
parser.add_argument("-l", "--lock", metavar="FILENAME", action="store", parser.add_argument("-l", "--lock", metavar="FILENAME", action="store",
@@ -38,7 +39,7 @@ def parse_args(argv = None):
help="Lock file for detecting running instance") help="Lock file for detecting running instance")
parser.add_argument("-t", "--timeout", metavar="SECONDS", action="store", parser.add_argument("-t", "--timeout", metavar="SECONDS", action="store",
type=float, default=30, type=float, default=30,
help="Restart if no output from " +
help="Exit if no output from " +
"generator for this long") "generator for this long")
group = parser.add_argument_group("commands to execute") group = parser.add_argument_group("commands to execute")
group.add_argument("generator", action="store", group.add_argument("generator", action="store",
@@ -49,31 +50,33 @@ def parse_args(argv = None):


return args return args


def reader_thread(queue, fd):
def reader_thread(q, fd):
# Read from a file descriptor, write to queue. # Read from a file descriptor, write to queue.
try: try:
while True: while True:
(r, w, x) = select.select([fd], [], [fd], 0.25) (r, w, x) = select.select([fd], [], [fd], 0.25)
if x:
raise Exception # generator died?
if x: # pragma: no cover -- never expect this to happen
# Very few things are "exceptional conditions";
# just TCP OOB data, some TTY state changes, etc.
raise Exception
if not r: if not r:
# short timeout -- just try again. This is to catch the # short timeout -- just try again. This is to catch the
# fd being closed elsewhere, which is only detected # fd being closed elsewhere, which is only detected
# when select restarts. # when select restarts.
continue continue
data = os.read(fd, 65536) data = os.read(fd, 65536)
if data == "": # generator EOF
if data == b"": # generator EOF
raise Exception raise Exception
queue.put(data)
q.put(data)
except Exception: except Exception:
queue.put(None)
q.put(None)


def watcher_thread(queue, procs):
def watcher_thread(q, procs):
# Put None in the queue if either process dies # Put None in the queue if either process dies
while True: while True:
for p in procs: for p in procs:
if p.poll() is not None: if p.poll() is not None:
queue.put(None)
q.put(None)
return return
time.sleep(0.25) time.sleep(0.25)


@@ -93,17 +96,17 @@ def pipewatch(args):
stderr = None, stderr = None,
preexec_fn = os.setpgrp) preexec_fn = os.setpgrp)


queue = queue.Queue(maxsize = 4)
q = queue.Queue(maxsize = 4)
reader = threading.Thread(target = reader_thread, reader = threading.Thread(target = reader_thread,
args = (queue, generator.stdout.fileno()))
args = (q, generator.stdout.fileno()))
reader.start() reader.start()
watcher = threading.Thread(target = watcher_thread, watcher = threading.Thread(target = watcher_thread,
args = (queue, [generator, consumer]))
args = (q, [generator, consumer]))
watcher.start() watcher.start()
try: try:
while True: while True:
try: try:
data = queue.get(True, args.timeout)
data = q.get(True, args.timeout)
if data is None: if data is None:
break break
consumer.stdin.write(data) consumer.stdin.write(data)
@@ -131,7 +134,8 @@ def pipewatch(args):
os.killpg(proc.pid, signal.SIGTERM) os.killpg(proc.pid, signal.SIGTERM)
if poll_timeout(proc, 0.5) is None: if poll_timeout(proc, 0.5) is None:
os.killpg(proc.pid, signal.SIGKILL) os.killpg(proc.pid, signal.SIGKILL)
except OSError:
except OSError: # pragma: no cover
# (hard to trigger race condition in os.killpg)
pass pass
return poll_timeout(proc, 0.5) return poll_timeout(proc, 0.5)


@@ -142,7 +146,7 @@ def pipewatch(args):
# Consume all remaining data in the queue until the reader # Consume all remaining data in the queue until the reader
# and watcher threads are done # and watcher threads are done
while reader.is_alive() or watcher.is_alive(): while reader.is_alive() or watcher.is_alive():
queue.get(True, 0.1)
q.get(True, 0.1)


fprintf(sys.stderr, "pipewatch: generator returned %d, " + fprintf(sys.stderr, "pipewatch: generator returned %d, " +
"consumer returned %d\n", gret, cret) "consumer returned %d\n", gret, cret)
@@ -160,7 +164,7 @@ def main(argv = None):
sys.exit(0) sys.exit(0)
try: try:
# Run as a daemon if requested, otherwise run directly. # Run as a daemon if requested, otherwise run directly.
if args.daemon:
if args.daemon: # pragma: no cover (hard to do from inside test suite)
with daemon.DaemonContext(files_preserve = [ lockfile ]): with daemon.DaemonContext(files_preserve = [ lockfile ]):
pipewatch(args) pipewatch(args)
else: else:


+ 38
- 1
tests/test.py View File

@@ -763,9 +763,46 @@ class TestAllCommands(CommandTester):
client.stream_create("/train/matches2", "uint8_1") client.stream_create("/train/matches2", "uint8_1")
self.ok(get_json("tests/data/trainola2.js")) self.ok(get_json("tests/data/trainola2.js"))


def test_10_pipewatch(self):
def test010_pipewatch(self):
self.main = nilmtools.pipewatch.main self.main = nilmtools.pipewatch.main


self.fail(f"")
self.ok(f"--help")

lock = "tests/pipewatch.lock"
lk = f"--lock {lock}"

try:
os.unlink(lock)
except OSError:
pass

# try locking so pipewatch will exit (with code 0)
lockfile = open(lock, "w")
nilmdb.utils.lock.exclusive_lock(lockfile)
self.ok(f"{lk} true true")
self.contain("pipewatch process already running")
os.unlink(lock)

# have pipewatch remove its own lock to trigger error later
self.ok(f"{lk} 'rm {lock}' true")

# various cases to get coverage
self.ok(f"{lk} true 'cat >/dev/null'")
self.contain("generator returned 0, consumer returned 0")
self.fail(f"{lk} false true")
self.contain("generator returned 1, consumer returned 0")
self.fail(f"{lk} false false")
self.contain("generator returned 1, consumer returned 1")
self.fail(f"{lk} true false")
self.contain("generator returned 0, consumer returned 1")
self.fail(f"{lk} 'kill -15 $$' true")
self.ok(f"{lk} 'sleep 1 ; echo hi' 'cat >/dev/null'")
self.ok(f"{lk} 'echo hi' 'cat >/dev/null'")
self.fail(f"{lk} --timeout 0.5 'sleep 10 ; echo hi' 'cat >/dev/null'")
self.fail(f"{lk} 'yes' 'head -1 >/dev/null'")
self.fail(f"{lk} false 'exec 2>&-; trap \"sleep 10\" 0 15 ; sleep 10'")

def test_11_cleanup(self): def test_11_cleanup(self):
self.main = nilmtools.cleanup.main self.main = nilmtools.cleanup.main
client = nilmdb.client.Client(url=self.url) client = nilmdb.client.Client(url=self.url)


Loading…
Cancel
Save