##// END OF EJS Templates
dockerlib: allow non-unique uid and gid of $DBUILDUSER (issue4657)...
dockerlib: allow non-unique uid and gid of $DBUILDUSER (issue4657) There are make targets for building mercurial packages for various distributions using docker. One of the preparation steps before building is to create inside the docker image a user with the same uid/gid as the current user on the host system, so that the resulting files have appropriate ownership/permissions. It's possible to run `make docker-<distro>` as a user with uid or gid that is already present in a vanilla docker container of that distibution. For example, issue4657 is about failing to build fedora packages as a user with uid=999 and gid=999 because these ids are already used in fedora, and groupadd fails. useradd would fail too, if the flow ever got to it (and there was a user with such uid already). A straightforward (maybe too much) way to fix this is to allow non-unique uid and gid for the new user and group that get created inside the image. I'm not sure of the implications of this, but marmoute encouraged me to try and send this patch for stable.

File last commit:

r25473:123c9903 default
r26888:271a8020 stable
Show More
killdaemons.py
95 lines | 3.0 KiB | text/x-python | PythonLexer
#!/usr/bin/env python
import os, sys, time, errno, signal
if os.name =='nt':
import ctypes
def _check(ret, expectederr=None):
if ret == 0:
winerrno = ctypes.GetLastError()
if winerrno == expectederr:
return True
raise ctypes.WinError(winerrno)
def kill(pid, logfn, tryhard=True):
logfn('# Killing daemon process %d' % pid)
PROCESS_TERMINATE = 1
PROCESS_QUERY_INFORMATION = 0x400
SYNCHRONIZE = 0x00100000
WAIT_OBJECT_0 = 0
WAIT_TIMEOUT = 258
handle = ctypes.windll.kernel32.OpenProcess(
PROCESS_TERMINATE|SYNCHRONIZE|PROCESS_QUERY_INFORMATION,
False, pid)
if handle == 0:
_check(0, 87) # err 87 when process not found
return # process not found, already finished
try:
r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
if r == WAIT_OBJECT_0:
pass # terminated, but process handle still available
elif r == WAIT_TIMEOUT:
_check(ctypes.windll.kernel32.TerminateProcess(handle, -1))
else:
_check(r)
# TODO?: forcefully kill when timeout
# and ?shorter waiting time? when tryhard==True
r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
# timeout = 100 ms
if r == WAIT_OBJECT_0:
pass # process is terminated
elif r == WAIT_TIMEOUT:
logfn('# Daemon process %d is stuck')
else:
_check(r) # any error
except: #re-raises
ctypes.windll.kernel32.CloseHandle(handle) # no _check, keep error
raise
_check(ctypes.windll.kernel32.CloseHandle(handle))
else:
def kill(pid, logfn, tryhard=True):
try:
os.kill(pid, 0)
logfn('# Killing daemon process %d' % pid)
os.kill(pid, signal.SIGTERM)
if tryhard:
for i in range(10):
time.sleep(0.05)
os.kill(pid, 0)
else:
time.sleep(0.1)
os.kill(pid, 0)
logfn('# Daemon process %d is stuck - really killing it' % pid)
os.kill(pid, signal.SIGKILL)
except OSError as err:
if err.errno != errno.ESRCH:
raise
def killdaemons(pidfile, tryhard=True, remove=False, logfn=None):
if not logfn:
logfn = lambda s: s
# Kill off any leftover daemon processes
try:
fp = open(pidfile)
for line in fp:
try:
pid = int(line)
except ValueError:
continue
kill(pid, logfn, tryhard)
fp.close()
if remove:
os.unlink(pidfile)
except IOError:
pass
if __name__ == '__main__':
if len(sys.argv) > 1:
path, = sys.argv[1:]
else:
path = os.environ["DAEMON_PIDS"]
killdaemons(path)