##// END OF EJS Templates
share: wrap bmstore._writerepo for transaction sensitivity (issue4940)...
share: wrap bmstore._writerepo for transaction sensitivity (issue4940) 46dec89fe888 made 'bmstore.write()' transaction sensitive, to restore original bookmarks correctly at failure of a transaction. For example, shelve and unshelve imply steps below: before 46dec89fe888: 1. move active bookmark forward at internal rebasing 2. 'bmstore.write()' writes updated ones into .hg/bookmarks 3. rollback transaction to remove internal commits 4. restore updated bookmarks manually after 46dec89fe888: 1. move active bookmark forward at internal rebasing 2. 'bmstore.write()' doesn't write updated ones into .hg/bookmarks (these are written into .hg/bookmarks.pending, if external hook is spawn) 3. rollback transaction to remove internal commits 4. .hg/bookmarks should be clean, because it isn't changed while transaction running: see (2) above But if shelve or unshelve is executed in the repository created with "shared bookmarks" ("hg share -B"), this doesn't work as expected, because: - share extension makes 'bmstore.write()' write updated bookmarks into .hg/bookmarks of shared source repository regardless of transaction activity, and - intentional transaction failure at the end of shelve/unshelve doesn't restore already updated .hg/bookmarks of shared source This patch makes share extension wrap 'bmstore._writerepo()' instead of 'bmstore.write()', because the former is used to actually write bookmark changes out.

File last commit:

r25473:123c9903 default
r26933:a7eecd02 stable
Show More
killdaemons.py
95 lines | 3.0 KiB | text/x-python | PythonLexer
#!/usr/bin/env python
import os, sys, time, errno, signal
if os.name =='nt':
import ctypes
def _check(ret, expectederr=None):
if ret == 0:
winerrno = ctypes.GetLastError()
if winerrno == expectederr:
return True
raise ctypes.WinError(winerrno)
def kill(pid, logfn, tryhard=True):
logfn('# Killing daemon process %d' % pid)
PROCESS_TERMINATE = 1
PROCESS_QUERY_INFORMATION = 0x400
SYNCHRONIZE = 0x00100000
WAIT_OBJECT_0 = 0
WAIT_TIMEOUT = 258
handle = ctypes.windll.kernel32.OpenProcess(
PROCESS_TERMINATE|SYNCHRONIZE|PROCESS_QUERY_INFORMATION,
False, pid)
if handle == 0:
_check(0, 87) # err 87 when process not found
return # process not found, already finished
try:
r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
if r == WAIT_OBJECT_0:
pass # terminated, but process handle still available
elif r == WAIT_TIMEOUT:
_check(ctypes.windll.kernel32.TerminateProcess(handle, -1))
else:
_check(r)
# TODO?: forcefully kill when timeout
# and ?shorter waiting time? when tryhard==True
r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
# timeout = 100 ms
if r == WAIT_OBJECT_0:
pass # process is terminated
elif r == WAIT_TIMEOUT:
logfn('# Daemon process %d is stuck')
else:
_check(r) # any error
except: #re-raises
ctypes.windll.kernel32.CloseHandle(handle) # no _check, keep error
raise
_check(ctypes.windll.kernel32.CloseHandle(handle))
else:
def kill(pid, logfn, tryhard=True):
try:
os.kill(pid, 0)
logfn('# Killing daemon process %d' % pid)
os.kill(pid, signal.SIGTERM)
if tryhard:
for i in range(10):
time.sleep(0.05)
os.kill(pid, 0)
else:
time.sleep(0.1)
os.kill(pid, 0)
logfn('# Daemon process %d is stuck - really killing it' % pid)
os.kill(pid, signal.SIGKILL)
except OSError as err:
if err.errno != errno.ESRCH:
raise
def killdaemons(pidfile, tryhard=True, remove=False, logfn=None):
if not logfn:
logfn = lambda s: s
# Kill off any leftover daemon processes
try:
fp = open(pidfile)
for line in fp:
try:
pid = int(line)
except ValueError:
continue
kill(pid, logfn, tryhard)
fp.close()
if remove:
os.unlink(pidfile)
except IOError:
pass
if __name__ == '__main__':
if len(sys.argv) > 1:
path, = sys.argv[1:]
else:
path = os.environ["DAEMON_PIDS"]
killdaemons(path)