##// END OF EJS Templates
hooks: provide access to transaction changes for internal hooks...
Joerg Sonnenberger -
r45350:09da5cf4 default
parent child Browse files
Show More
@@ -1,336 +1,340 b''
1 # hook.py - hook support for mercurial
1 # hook.py - hook support for mercurial
2 #
2 #
3 # Copyright 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import os
11 import os
12 import sys
12 import sys
13
13
14 from .i18n import _
14 from .i18n import _
15 from .pycompat import getattr
15 from .pycompat import getattr
16 from . import (
16 from . import (
17 demandimport,
17 demandimport,
18 encoding,
18 encoding,
19 error,
19 error,
20 extensions,
20 extensions,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24 from .utils import (
24 from .utils import (
25 procutil,
25 procutil,
26 resourceutil,
26 resourceutil,
27 stringutil,
27 stringutil,
28 )
28 )
29
29
30
30
31 def pythonhook(ui, repo, htype, hname, funcname, args, throw):
31 def pythonhook(ui, repo, htype, hname, funcname, args, throw):
32 '''call python hook. hook is callable object, looked up as
32 '''call python hook. hook is callable object, looked up as
33 name in python module. if callable returns "true", hook
33 name in python module. if callable returns "true", hook
34 fails, else passes. if hook raises exception, treated as
34 fails, else passes. if hook raises exception, treated as
35 hook failure. exception propagates if throw is "true".
35 hook failure. exception propagates if throw is "true".
36
36
37 reason for "true" meaning "hook failed" is so that
37 reason for "true" meaning "hook failed" is so that
38 unmodified commands (e.g. mercurial.commands.update) can
38 unmodified commands (e.g. mercurial.commands.update) can
39 be run as hooks without wrappers to convert return values.'''
39 be run as hooks without wrappers to convert return values.'''
40
40
41 if callable(funcname):
41 if callable(funcname):
42 obj = funcname
42 obj = funcname
43 funcname = pycompat.sysbytes(obj.__module__ + "." + obj.__name__)
43 funcname = pycompat.sysbytes(obj.__module__ + "." + obj.__name__)
44 else:
44 else:
45 d = funcname.rfind(b'.')
45 d = funcname.rfind(b'.')
46 if d == -1:
46 if d == -1:
47 raise error.HookLoadError(
47 raise error.HookLoadError(
48 _(b'%s hook is invalid: "%s" not in a module')
48 _(b'%s hook is invalid: "%s" not in a module')
49 % (hname, funcname)
49 % (hname, funcname)
50 )
50 )
51 modname = funcname[:d]
51 modname = funcname[:d]
52 oldpaths = sys.path
52 oldpaths = sys.path
53 if resourceutil.mainfrozen():
53 if resourceutil.mainfrozen():
54 # binary installs require sys.path manipulation
54 # binary installs require sys.path manipulation
55 modpath, modfile = os.path.split(modname)
55 modpath, modfile = os.path.split(modname)
56 if modpath and modfile:
56 if modpath and modfile:
57 sys.path = sys.path[:] + [modpath]
57 sys.path = sys.path[:] + [modpath]
58 modname = modfile
58 modname = modfile
59 with demandimport.deactivated():
59 with demandimport.deactivated():
60 try:
60 try:
61 obj = __import__(pycompat.sysstr(modname))
61 obj = __import__(pycompat.sysstr(modname))
62 except (ImportError, SyntaxError):
62 except (ImportError, SyntaxError):
63 e1 = sys.exc_info()
63 e1 = sys.exc_info()
64 try:
64 try:
65 # extensions are loaded with hgext_ prefix
65 # extensions are loaded with hgext_ prefix
66 obj = __import__("hgext_%s" % pycompat.sysstr(modname))
66 obj = __import__("hgext_%s" % pycompat.sysstr(modname))
67 except (ImportError, SyntaxError):
67 except (ImportError, SyntaxError):
68 e2 = sys.exc_info()
68 e2 = sys.exc_info()
69 if ui.tracebackflag:
69 if ui.tracebackflag:
70 ui.warn(
70 ui.warn(
71 _(
71 _(
72 b'exception from first failed import '
72 b'exception from first failed import '
73 b'attempt:\n'
73 b'attempt:\n'
74 )
74 )
75 )
75 )
76 ui.traceback(e1)
76 ui.traceback(e1)
77 if ui.tracebackflag:
77 if ui.tracebackflag:
78 ui.warn(
78 ui.warn(
79 _(
79 _(
80 b'exception from second failed import '
80 b'exception from second failed import '
81 b'attempt:\n'
81 b'attempt:\n'
82 )
82 )
83 )
83 )
84 ui.traceback(e2)
84 ui.traceback(e2)
85
85
86 if not ui.tracebackflag:
86 if not ui.tracebackflag:
87 tracebackhint = _(
87 tracebackhint = _(
88 b'run with --traceback for stack trace'
88 b'run with --traceback for stack trace'
89 )
89 )
90 else:
90 else:
91 tracebackhint = None
91 tracebackhint = None
92 raise error.HookLoadError(
92 raise error.HookLoadError(
93 _(b'%s hook is invalid: import of "%s" failed')
93 _(b'%s hook is invalid: import of "%s" failed')
94 % (hname, modname),
94 % (hname, modname),
95 hint=tracebackhint,
95 hint=tracebackhint,
96 )
96 )
97 sys.path = oldpaths
97 sys.path = oldpaths
98 try:
98 try:
99 for p in funcname.split(b'.')[1:]:
99 for p in funcname.split(b'.')[1:]:
100 obj = getattr(obj, p)
100 obj = getattr(obj, p)
101 except AttributeError:
101 except AttributeError:
102 raise error.HookLoadError(
102 raise error.HookLoadError(
103 _(b'%s hook is invalid: "%s" is not defined')
103 _(b'%s hook is invalid: "%s" is not defined')
104 % (hname, funcname)
104 % (hname, funcname)
105 )
105 )
106 if not callable(obj):
106 if not callable(obj):
107 raise error.HookLoadError(
107 raise error.HookLoadError(
108 _(b'%s hook is invalid: "%s" is not callable')
108 _(b'%s hook is invalid: "%s" is not callable')
109 % (hname, funcname)
109 % (hname, funcname)
110 )
110 )
111
111
112 ui.note(_(b"calling hook %s: %s\n") % (hname, funcname))
112 ui.note(_(b"calling hook %s: %s\n") % (hname, funcname))
113 starttime = util.timer()
113 starttime = util.timer()
114
114
115 try:
115 try:
116 r = obj(ui=ui, repo=repo, hooktype=htype, **pycompat.strkwargs(args))
116 r = obj(ui=ui, repo=repo, hooktype=htype, **pycompat.strkwargs(args))
117 except Exception as exc:
117 except Exception as exc:
118 if isinstance(exc, error.Abort):
118 if isinstance(exc, error.Abort):
119 ui.warn(_(b'error: %s hook failed: %s\n') % (hname, exc.args[0]))
119 ui.warn(_(b'error: %s hook failed: %s\n') % (hname, exc.args[0]))
120 else:
120 else:
121 ui.warn(
121 ui.warn(
122 _(b'error: %s hook raised an exception: %s\n')
122 _(b'error: %s hook raised an exception: %s\n')
123 % (hname, stringutil.forcebytestr(exc))
123 % (hname, stringutil.forcebytestr(exc))
124 )
124 )
125 if throw:
125 if throw:
126 raise
126 raise
127 if not ui.tracebackflag:
127 if not ui.tracebackflag:
128 ui.warn(_(b'(run with --traceback for stack trace)\n'))
128 ui.warn(_(b'(run with --traceback for stack trace)\n'))
129 ui.traceback()
129 ui.traceback()
130 return True, True
130 return True, True
131 finally:
131 finally:
132 duration = util.timer() - starttime
132 duration = util.timer() - starttime
133 ui.log(
133 ui.log(
134 b'pythonhook',
134 b'pythonhook',
135 b'pythonhook-%s: %s finished in %0.2f seconds\n',
135 b'pythonhook-%s: %s finished in %0.2f seconds\n',
136 htype,
136 htype,
137 funcname,
137 funcname,
138 duration,
138 duration,
139 )
139 )
140 if r:
140 if r:
141 if throw:
141 if throw:
142 raise error.HookAbort(_(b'%s hook failed') % hname)
142 raise error.HookAbort(_(b'%s hook failed') % hname)
143 ui.warn(_(b'warning: %s hook failed\n') % hname)
143 ui.warn(_(b'warning: %s hook failed\n') % hname)
144 return r, False
144 return r, False
145
145
146
146
147 def _exthook(ui, repo, htype, name, cmd, args, throw):
147 def _exthook(ui, repo, htype, name, cmd, args, throw):
148 starttime = util.timer()
148 starttime = util.timer()
149 env = {}
149 env = {}
150
150
151 # make in-memory changes visible to external process
151 # make in-memory changes visible to external process
152 if repo is not None:
152 if repo is not None:
153 tr = repo.currenttransaction()
153 tr = repo.currenttransaction()
154 repo.dirstate.write(tr)
154 repo.dirstate.write(tr)
155 if tr and tr.writepending():
155 if tr and tr.writepending():
156 env[b'HG_PENDING'] = repo.root
156 env[b'HG_PENDING'] = repo.root
157 env[b'HG_HOOKTYPE'] = htype
157 env[b'HG_HOOKTYPE'] = htype
158 env[b'HG_HOOKNAME'] = name
158 env[b'HG_HOOKNAME'] = name
159
159
160 for k, v in pycompat.iteritems(args):
160 for k, v in pycompat.iteritems(args):
161 # transaction changes can accumulate MBs of data, so skip it
162 # for external hooks
163 if k == b'changes':
164 continue
161 if callable(v):
165 if callable(v):
162 v = v()
166 v = v()
163 if isinstance(v, (dict, list)):
167 if isinstance(v, (dict, list)):
164 v = stringutil.pprint(v)
168 v = stringutil.pprint(v)
165 env[b'HG_' + k.upper()] = v
169 env[b'HG_' + k.upper()] = v
166
170
167 if ui.configbool(b'hooks', b'tonative.%s' % name, False):
171 if ui.configbool(b'hooks', b'tonative.%s' % name, False):
168 oldcmd = cmd
172 oldcmd = cmd
169 cmd = procutil.shelltonative(cmd, env)
173 cmd = procutil.shelltonative(cmd, env)
170 if cmd != oldcmd:
174 if cmd != oldcmd:
171 ui.note(_(b'converting hook "%s" to native\n') % name)
175 ui.note(_(b'converting hook "%s" to native\n') % name)
172
176
173 ui.note(_(b"running hook %s: %s\n") % (name, cmd))
177 ui.note(_(b"running hook %s: %s\n") % (name, cmd))
174
178
175 if repo:
179 if repo:
176 cwd = repo.root
180 cwd = repo.root
177 else:
181 else:
178 cwd = encoding.getcwd()
182 cwd = encoding.getcwd()
179 r = ui.system(cmd, environ=env, cwd=cwd, blockedtag=b'exthook-%s' % (name,))
183 r = ui.system(cmd, environ=env, cwd=cwd, blockedtag=b'exthook-%s' % (name,))
180
184
181 duration = util.timer() - starttime
185 duration = util.timer() - starttime
182 ui.log(
186 ui.log(
183 b'exthook',
187 b'exthook',
184 b'exthook-%s: %s finished in %0.2f seconds\n',
188 b'exthook-%s: %s finished in %0.2f seconds\n',
185 name,
189 name,
186 cmd,
190 cmd,
187 duration,
191 duration,
188 )
192 )
189 if r:
193 if r:
190 desc = procutil.explainexit(r)
194 desc = procutil.explainexit(r)
191 if throw:
195 if throw:
192 raise error.HookAbort(_(b'%s hook %s') % (name, desc))
196 raise error.HookAbort(_(b'%s hook %s') % (name, desc))
193 ui.warn(_(b'warning: %s hook %s\n') % (name, desc))
197 ui.warn(_(b'warning: %s hook %s\n') % (name, desc))
194 return r
198 return r
195
199
196
200
197 # represent an untrusted hook command
201 # represent an untrusted hook command
198 _fromuntrusted = object()
202 _fromuntrusted = object()
199
203
200
204
201 def _allhooks(ui):
205 def _allhooks(ui):
202 """return a list of (hook-id, cmd) pairs sorted by priority"""
206 """return a list of (hook-id, cmd) pairs sorted by priority"""
203 hooks = _hookitems(ui)
207 hooks = _hookitems(ui)
204 # Be careful in this section, propagating the real commands from untrusted
208 # Be careful in this section, propagating the real commands from untrusted
205 # sources would create a security vulnerability, make sure anything altered
209 # sources would create a security vulnerability, make sure anything altered
206 # in that section uses "_fromuntrusted" as its command.
210 # in that section uses "_fromuntrusted" as its command.
207 untrustedhooks = _hookitems(ui, _untrusted=True)
211 untrustedhooks = _hookitems(ui, _untrusted=True)
208 for name, value in untrustedhooks.items():
212 for name, value in untrustedhooks.items():
209 trustedvalue = hooks.get(name, (None, None, name, _fromuntrusted))
213 trustedvalue = hooks.get(name, (None, None, name, _fromuntrusted))
210 if value != trustedvalue:
214 if value != trustedvalue:
211 (lp, lo, lk, lv) = trustedvalue
215 (lp, lo, lk, lv) = trustedvalue
212 hooks[name] = (lp, lo, lk, _fromuntrusted)
216 hooks[name] = (lp, lo, lk, _fromuntrusted)
213 # (end of the security sensitive section)
217 # (end of the security sensitive section)
214 return [(k, v) for p, o, k, v in sorted(hooks.values())]
218 return [(k, v) for p, o, k, v in sorted(hooks.values())]
215
219
216
220
217 def _hookitems(ui, _untrusted=False):
221 def _hookitems(ui, _untrusted=False):
218 """return all hooks items ready to be sorted"""
222 """return all hooks items ready to be sorted"""
219 hooks = {}
223 hooks = {}
220 for name, cmd in ui.configitems(b'hooks', untrusted=_untrusted):
224 for name, cmd in ui.configitems(b'hooks', untrusted=_untrusted):
221 if name.startswith(b'priority.') or name.startswith(b'tonative.'):
225 if name.startswith(b'priority.') or name.startswith(b'tonative.'):
222 continue
226 continue
223
227
224 priority = ui.configint(b'hooks', b'priority.%s' % name, 0)
228 priority = ui.configint(b'hooks', b'priority.%s' % name, 0)
225 hooks[name] = (-priority, len(hooks), name, cmd)
229 hooks[name] = (-priority, len(hooks), name, cmd)
226 return hooks
230 return hooks
227
231
228
232
229 _redirect = False
233 _redirect = False
230
234
231
235
232 def redirect(state):
236 def redirect(state):
233 global _redirect
237 global _redirect
234 _redirect = state
238 _redirect = state
235
239
236
240
237 def hashook(ui, htype):
241 def hashook(ui, htype):
238 """return True if a hook is configured for 'htype'"""
242 """return True if a hook is configured for 'htype'"""
239 if not ui.callhooks:
243 if not ui.callhooks:
240 return False
244 return False
241 for hname, cmd in _allhooks(ui):
245 for hname, cmd in _allhooks(ui):
242 if hname.split(b'.')[0] == htype and cmd:
246 if hname.split(b'.')[0] == htype and cmd:
243 return True
247 return True
244 return False
248 return False
245
249
246
250
247 def hook(ui, repo, htype, throw=False, **args):
251 def hook(ui, repo, htype, throw=False, **args):
248 if not ui.callhooks:
252 if not ui.callhooks:
249 return False
253 return False
250
254
251 hooks = []
255 hooks = []
252 for hname, cmd in _allhooks(ui):
256 for hname, cmd in _allhooks(ui):
253 if hname.split(b'.')[0] == htype and cmd:
257 if hname.split(b'.')[0] == htype and cmd:
254 hooks.append((hname, cmd))
258 hooks.append((hname, cmd))
255
259
256 res = runhooks(ui, repo, htype, hooks, throw=throw, **args)
260 res = runhooks(ui, repo, htype, hooks, throw=throw, **args)
257 r = False
261 r = False
258 for hname, cmd in hooks:
262 for hname, cmd in hooks:
259 r = res[hname][0] or r
263 r = res[hname][0] or r
260 return r
264 return r
261
265
262
266
263 @contextlib.contextmanager
267 @contextlib.contextmanager
264 def redirect_stdio():
268 def redirect_stdio():
265 """Redirects stdout to stderr, if possible."""
269 """Redirects stdout to stderr, if possible."""
266
270
267 oldstdout = -1
271 oldstdout = -1
268 try:
272 try:
269 if _redirect:
273 if _redirect:
270 try:
274 try:
271 stdoutno = procutil.stdout.fileno()
275 stdoutno = procutil.stdout.fileno()
272 stderrno = procutil.stderr.fileno()
276 stderrno = procutil.stderr.fileno()
273 # temporarily redirect stdout to stderr, if possible
277 # temporarily redirect stdout to stderr, if possible
274 if stdoutno >= 0 and stderrno >= 0:
278 if stdoutno >= 0 and stderrno >= 0:
275 procutil.stdout.flush()
279 procutil.stdout.flush()
276 oldstdout = os.dup(stdoutno)
280 oldstdout = os.dup(stdoutno)
277 os.dup2(stderrno, stdoutno)
281 os.dup2(stderrno, stdoutno)
278 except (OSError, AttributeError):
282 except (OSError, AttributeError):
279 # files seem to be bogus, give up on redirecting (WSGI, etc)
283 # files seem to be bogus, give up on redirecting (WSGI, etc)
280 pass
284 pass
281
285
282 yield
286 yield
283
287
284 finally:
288 finally:
285 # The stderr is fully buffered on Windows when connected to a pipe.
289 # The stderr is fully buffered on Windows when connected to a pipe.
286 # A forcible flush is required to make small stderr data in the
290 # A forcible flush is required to make small stderr data in the
287 # remote side available to the client immediately.
291 # remote side available to the client immediately.
288 procutil.stderr.flush()
292 procutil.stderr.flush()
289
293
290 if _redirect and oldstdout >= 0:
294 if _redirect and oldstdout >= 0:
291 procutil.stdout.flush() # write hook output to stderr fd
295 procutil.stdout.flush() # write hook output to stderr fd
292 os.dup2(oldstdout, stdoutno)
296 os.dup2(oldstdout, stdoutno)
293 os.close(oldstdout)
297 os.close(oldstdout)
294
298
295
299
296 def runhooks(ui, repo, htype, hooks, throw=False, **args):
300 def runhooks(ui, repo, htype, hooks, throw=False, **args):
297 args = pycompat.byteskwargs(args)
301 args = pycompat.byteskwargs(args)
298 res = {}
302 res = {}
299
303
300 with redirect_stdio():
304 with redirect_stdio():
301 for hname, cmd in hooks:
305 for hname, cmd in hooks:
302 if cmd is _fromuntrusted:
306 if cmd is _fromuntrusted:
303 if throw:
307 if throw:
304 raise error.HookAbort(
308 raise error.HookAbort(
305 _(b'untrusted hook %s not executed') % hname,
309 _(b'untrusted hook %s not executed') % hname,
306 hint=_(b"see 'hg help config.trusted'"),
310 hint=_(b"see 'hg help config.trusted'"),
307 )
311 )
308 ui.warn(_(b'warning: untrusted hook %s not executed\n') % hname)
312 ui.warn(_(b'warning: untrusted hook %s not executed\n') % hname)
309 r = 1
313 r = 1
310 raised = False
314 raised = False
311 elif callable(cmd):
315 elif callable(cmd):
312 r, raised = pythonhook(ui, repo, htype, hname, cmd, args, throw)
316 r, raised = pythonhook(ui, repo, htype, hname, cmd, args, throw)
313 elif cmd.startswith(b'python:'):
317 elif cmd.startswith(b'python:'):
314 if cmd.count(b':') >= 2:
318 if cmd.count(b':') >= 2:
315 path, cmd = cmd[7:].rsplit(b':', 1)
319 path, cmd = cmd[7:].rsplit(b':', 1)
316 path = util.expandpath(path)
320 path = util.expandpath(path)
317 if repo:
321 if repo:
318 path = os.path.join(repo.root, path)
322 path = os.path.join(repo.root, path)
319 try:
323 try:
320 mod = extensions.loadpath(path, b'hghook.%s' % hname)
324 mod = extensions.loadpath(path, b'hghook.%s' % hname)
321 except Exception:
325 except Exception:
322 ui.write(_(b"loading %s hook failed:\n") % hname)
326 ui.write(_(b"loading %s hook failed:\n") % hname)
323 raise
327 raise
324 hookfn = getattr(mod, cmd)
328 hookfn = getattr(mod, cmd)
325 else:
329 else:
326 hookfn = cmd[7:].strip()
330 hookfn = cmd[7:].strip()
327 r, raised = pythonhook(
331 r, raised = pythonhook(
328 ui, repo, htype, hname, hookfn, args, throw
332 ui, repo, htype, hname, hookfn, args, throw
329 )
333 )
330 else:
334 else:
331 r = _exthook(ui, repo, htype, hname, cmd, args, throw)
335 r = _exthook(ui, repo, htype, hname, cmd, args, throw)
332 raised = False
336 raised = False
333
337
334 res[hname] = r, raised
338 res[hname] = r, raised
335
339
336 return res
340 return res
@@ -1,3828 +1,3829 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 context,
35 context,
36 dirstate,
36 dirstate,
37 dirstateguard,
37 dirstateguard,
38 discovery,
38 discovery,
39 encoding,
39 encoding,
40 error,
40 error,
41 exchange,
41 exchange,
42 extensions,
42 extensions,
43 filelog,
43 filelog,
44 hook,
44 hook,
45 lock as lockmod,
45 lock as lockmod,
46 match as matchmod,
46 match as matchmod,
47 merge as mergemod,
47 merge as mergemod,
48 mergeutil,
48 mergeutil,
49 namespaces,
49 namespaces,
50 narrowspec,
50 narrowspec,
51 obsolete,
51 obsolete,
52 pathutil,
52 pathutil,
53 phases,
53 phases,
54 pushkey,
54 pushkey,
55 pycompat,
55 pycompat,
56 rcutil,
56 rcutil,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store as storemod,
62 store as storemod,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70
70
71 from .interfaces import (
71 from .interfaces import (
72 repository,
72 repository,
73 util as interfaceutil,
73 util as interfaceutil,
74 )
74 )
75
75
76 from .utils import (
76 from .utils import (
77 hashutil,
77 hashutil,
78 procutil,
78 procutil,
79 stringutil,
79 stringutil,
80 )
80 )
81
81
82 from .revlogutils import constants as revlogconst
82 from .revlogutils import constants as revlogconst
83
83
84 release = lockmod.release
84 release = lockmod.release
85 urlerr = util.urlerr
85 urlerr = util.urlerr
86 urlreq = util.urlreq
86 urlreq = util.urlreq
87
87
88 # set of (path, vfs-location) tuples. vfs-location is:
88 # set of (path, vfs-location) tuples. vfs-location is:
89 # - 'plain for vfs relative paths
89 # - 'plain for vfs relative paths
90 # - '' for svfs relative paths
90 # - '' for svfs relative paths
91 _cachedfiles = set()
91 _cachedfiles = set()
92
92
93
93
94 class _basefilecache(scmutil.filecache):
94 class _basefilecache(scmutil.filecache):
95 """All filecache usage on repo are done for logic that should be unfiltered
95 """All filecache usage on repo are done for logic that should be unfiltered
96 """
96 """
97
97
98 def __get__(self, repo, type=None):
98 def __get__(self, repo, type=None):
99 if repo is None:
99 if repo is None:
100 return self
100 return self
101 # proxy to unfiltered __dict__ since filtered repo has no entry
101 # proxy to unfiltered __dict__ since filtered repo has no entry
102 unfi = repo.unfiltered()
102 unfi = repo.unfiltered()
103 try:
103 try:
104 return unfi.__dict__[self.sname]
104 return unfi.__dict__[self.sname]
105 except KeyError:
105 except KeyError:
106 pass
106 pass
107 return super(_basefilecache, self).__get__(unfi, type)
107 return super(_basefilecache, self).__get__(unfi, type)
108
108
109 def set(self, repo, value):
109 def set(self, repo, value):
110 return super(_basefilecache, self).set(repo.unfiltered(), value)
110 return super(_basefilecache, self).set(repo.unfiltered(), value)
111
111
112
112
113 class repofilecache(_basefilecache):
113 class repofilecache(_basefilecache):
114 """filecache for files in .hg but outside of .hg/store"""
114 """filecache for files in .hg but outside of .hg/store"""
115
115
116 def __init__(self, *paths):
116 def __init__(self, *paths):
117 super(repofilecache, self).__init__(*paths)
117 super(repofilecache, self).__init__(*paths)
118 for path in paths:
118 for path in paths:
119 _cachedfiles.add((path, b'plain'))
119 _cachedfiles.add((path, b'plain'))
120
120
121 def join(self, obj, fname):
121 def join(self, obj, fname):
122 return obj.vfs.join(fname)
122 return obj.vfs.join(fname)
123
123
124
124
125 class storecache(_basefilecache):
125 class storecache(_basefilecache):
126 """filecache for files in the store"""
126 """filecache for files in the store"""
127
127
128 def __init__(self, *paths):
128 def __init__(self, *paths):
129 super(storecache, self).__init__(*paths)
129 super(storecache, self).__init__(*paths)
130 for path in paths:
130 for path in paths:
131 _cachedfiles.add((path, b''))
131 _cachedfiles.add((path, b''))
132
132
133 def join(self, obj, fname):
133 def join(self, obj, fname):
134 return obj.sjoin(fname)
134 return obj.sjoin(fname)
135
135
136
136
137 class mixedrepostorecache(_basefilecache):
137 class mixedrepostorecache(_basefilecache):
138 """filecache for a mix files in .hg/store and outside"""
138 """filecache for a mix files in .hg/store and outside"""
139
139
140 def __init__(self, *pathsandlocations):
140 def __init__(self, *pathsandlocations):
141 # scmutil.filecache only uses the path for passing back into our
141 # scmutil.filecache only uses the path for passing back into our
142 # join(), so we can safely pass a list of paths and locations
142 # join(), so we can safely pass a list of paths and locations
143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 _cachedfiles.update(pathsandlocations)
144 _cachedfiles.update(pathsandlocations)
145
145
146 def join(self, obj, fnameandlocation):
146 def join(self, obj, fnameandlocation):
147 fname, location = fnameandlocation
147 fname, location = fnameandlocation
148 if location == b'plain':
148 if location == b'plain':
149 return obj.vfs.join(fname)
149 return obj.vfs.join(fname)
150 else:
150 else:
151 if location != b'':
151 if location != b'':
152 raise error.ProgrammingError(
152 raise error.ProgrammingError(
153 b'unexpected location: %s' % location
153 b'unexpected location: %s' % location
154 )
154 )
155 return obj.sjoin(fname)
155 return obj.sjoin(fname)
156
156
157
157
158 def isfilecached(repo, name):
158 def isfilecached(repo, name):
159 """check if a repo has already cached "name" filecache-ed property
159 """check if a repo has already cached "name" filecache-ed property
160
160
161 This returns (cachedobj-or-None, iscached) tuple.
161 This returns (cachedobj-or-None, iscached) tuple.
162 """
162 """
163 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 if not cacheentry:
164 if not cacheentry:
165 return None, False
165 return None, False
166 return cacheentry.obj, True
166 return cacheentry.obj, True
167
167
168
168
169 class unfilteredpropertycache(util.propertycache):
169 class unfilteredpropertycache(util.propertycache):
170 """propertycache that apply to unfiltered repo only"""
170 """propertycache that apply to unfiltered repo only"""
171
171
172 def __get__(self, repo, type=None):
172 def __get__(self, repo, type=None):
173 unfi = repo.unfiltered()
173 unfi = repo.unfiltered()
174 if unfi is repo:
174 if unfi is repo:
175 return super(unfilteredpropertycache, self).__get__(unfi)
175 return super(unfilteredpropertycache, self).__get__(unfi)
176 return getattr(unfi, self.name)
176 return getattr(unfi, self.name)
177
177
178
178
179 class filteredpropertycache(util.propertycache):
179 class filteredpropertycache(util.propertycache):
180 """propertycache that must take filtering in account"""
180 """propertycache that must take filtering in account"""
181
181
182 def cachevalue(self, obj, value):
182 def cachevalue(self, obj, value):
183 object.__setattr__(obj, self.name, value)
183 object.__setattr__(obj, self.name, value)
184
184
185
185
186 def hasunfilteredcache(repo, name):
186 def hasunfilteredcache(repo, name):
187 """check if a repo has an unfilteredpropertycache value for <name>"""
187 """check if a repo has an unfilteredpropertycache value for <name>"""
188 return name in vars(repo.unfiltered())
188 return name in vars(repo.unfiltered())
189
189
190
190
191 def unfilteredmethod(orig):
191 def unfilteredmethod(orig):
192 """decorate method that always need to be run on unfiltered version"""
192 """decorate method that always need to be run on unfiltered version"""
193
193
194 def wrapper(repo, *args, **kwargs):
194 def wrapper(repo, *args, **kwargs):
195 return orig(repo.unfiltered(), *args, **kwargs)
195 return orig(repo.unfiltered(), *args, **kwargs)
196
196
197 return wrapper
197 return wrapper
198
198
199
199
200 moderncaps = {
200 moderncaps = {
201 b'lookup',
201 b'lookup',
202 b'branchmap',
202 b'branchmap',
203 b'pushkey',
203 b'pushkey',
204 b'known',
204 b'known',
205 b'getbundle',
205 b'getbundle',
206 b'unbundle',
206 b'unbundle',
207 }
207 }
208 legacycaps = moderncaps.union({b'changegroupsubset'})
208 legacycaps = moderncaps.union({b'changegroupsubset'})
209
209
210
210
211 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 class localcommandexecutor(object):
212 class localcommandexecutor(object):
213 def __init__(self, peer):
213 def __init__(self, peer):
214 self._peer = peer
214 self._peer = peer
215 self._sent = False
215 self._sent = False
216 self._closed = False
216 self._closed = False
217
217
218 def __enter__(self):
218 def __enter__(self):
219 return self
219 return self
220
220
221 def __exit__(self, exctype, excvalue, exctb):
221 def __exit__(self, exctype, excvalue, exctb):
222 self.close()
222 self.close()
223
223
224 def callcommand(self, command, args):
224 def callcommand(self, command, args):
225 if self._sent:
225 if self._sent:
226 raise error.ProgrammingError(
226 raise error.ProgrammingError(
227 b'callcommand() cannot be used after sendcommands()'
227 b'callcommand() cannot be used after sendcommands()'
228 )
228 )
229
229
230 if self._closed:
230 if self._closed:
231 raise error.ProgrammingError(
231 raise error.ProgrammingError(
232 b'callcommand() cannot be used after close()'
232 b'callcommand() cannot be used after close()'
233 )
233 )
234
234
235 # We don't need to support anything fancy. Just call the named
235 # We don't need to support anything fancy. Just call the named
236 # method on the peer and return a resolved future.
236 # method on the peer and return a resolved future.
237 fn = getattr(self._peer, pycompat.sysstr(command))
237 fn = getattr(self._peer, pycompat.sysstr(command))
238
238
239 f = pycompat.futures.Future()
239 f = pycompat.futures.Future()
240
240
241 try:
241 try:
242 result = fn(**pycompat.strkwargs(args))
242 result = fn(**pycompat.strkwargs(args))
243 except Exception:
243 except Exception:
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 else:
245 else:
246 f.set_result(result)
246 f.set_result(result)
247
247
248 return f
248 return f
249
249
250 def sendcommands(self):
250 def sendcommands(self):
251 self._sent = True
251 self._sent = True
252
252
253 def close(self):
253 def close(self):
254 self._closed = True
254 self._closed = True
255
255
256
256
257 @interfaceutil.implementer(repository.ipeercommands)
257 @interfaceutil.implementer(repository.ipeercommands)
258 class localpeer(repository.peer):
258 class localpeer(repository.peer):
259 '''peer for a local repo; reflects only the most recent API'''
259 '''peer for a local repo; reflects only the most recent API'''
260
260
261 def __init__(self, repo, caps=None):
261 def __init__(self, repo, caps=None):
262 super(localpeer, self).__init__()
262 super(localpeer, self).__init__()
263
263
264 if caps is None:
264 if caps is None:
265 caps = moderncaps.copy()
265 caps = moderncaps.copy()
266 self._repo = repo.filtered(b'served')
266 self._repo = repo.filtered(b'served')
267 self.ui = repo.ui
267 self.ui = repo.ui
268 self._caps = repo._restrictcapabilities(caps)
268 self._caps = repo._restrictcapabilities(caps)
269
269
270 # Begin of _basepeer interface.
270 # Begin of _basepeer interface.
271
271
272 def url(self):
272 def url(self):
273 return self._repo.url()
273 return self._repo.url()
274
274
275 def local(self):
275 def local(self):
276 return self._repo
276 return self._repo
277
277
278 def peer(self):
278 def peer(self):
279 return self
279 return self
280
280
281 def canpush(self):
281 def canpush(self):
282 return True
282 return True
283
283
284 def close(self):
284 def close(self):
285 self._repo.close()
285 self._repo.close()
286
286
287 # End of _basepeer interface.
287 # End of _basepeer interface.
288
288
289 # Begin of _basewirecommands interface.
289 # Begin of _basewirecommands interface.
290
290
291 def branchmap(self):
291 def branchmap(self):
292 return self._repo.branchmap()
292 return self._repo.branchmap()
293
293
294 def capabilities(self):
294 def capabilities(self):
295 return self._caps
295 return self._caps
296
296
297 def clonebundles(self):
297 def clonebundles(self):
298 return self._repo.tryread(b'clonebundles.manifest')
298 return self._repo.tryread(b'clonebundles.manifest')
299
299
300 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 """Used to test argument passing over the wire"""
301 """Used to test argument passing over the wire"""
302 return b"%s %s %s %s %s" % (
302 return b"%s %s %s %s %s" % (
303 one,
303 one,
304 two,
304 two,
305 pycompat.bytestr(three),
305 pycompat.bytestr(three),
306 pycompat.bytestr(four),
306 pycompat.bytestr(four),
307 pycompat.bytestr(five),
307 pycompat.bytestr(five),
308 )
308 )
309
309
310 def getbundle(
310 def getbundle(
311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 ):
312 ):
313 chunks = exchange.getbundlechunks(
313 chunks = exchange.getbundlechunks(
314 self._repo,
314 self._repo,
315 source,
315 source,
316 heads=heads,
316 heads=heads,
317 common=common,
317 common=common,
318 bundlecaps=bundlecaps,
318 bundlecaps=bundlecaps,
319 **kwargs
319 **kwargs
320 )[1]
320 )[1]
321 cb = util.chunkbuffer(chunks)
321 cb = util.chunkbuffer(chunks)
322
322
323 if exchange.bundle2requested(bundlecaps):
323 if exchange.bundle2requested(bundlecaps):
324 # When requesting a bundle2, getbundle returns a stream to make the
324 # When requesting a bundle2, getbundle returns a stream to make the
325 # wire level function happier. We need to build a proper object
325 # wire level function happier. We need to build a proper object
326 # from it in local peer.
326 # from it in local peer.
327 return bundle2.getunbundler(self.ui, cb)
327 return bundle2.getunbundler(self.ui, cb)
328 else:
328 else:
329 return changegroup.getunbundler(b'01', cb, None)
329 return changegroup.getunbundler(b'01', cb, None)
330
330
331 def heads(self):
331 def heads(self):
332 return self._repo.heads()
332 return self._repo.heads()
333
333
334 def known(self, nodes):
334 def known(self, nodes):
335 return self._repo.known(nodes)
335 return self._repo.known(nodes)
336
336
337 def listkeys(self, namespace):
337 def listkeys(self, namespace):
338 return self._repo.listkeys(namespace)
338 return self._repo.listkeys(namespace)
339
339
340 def lookup(self, key):
340 def lookup(self, key):
341 return self._repo.lookup(key)
341 return self._repo.lookup(key)
342
342
343 def pushkey(self, namespace, key, old, new):
343 def pushkey(self, namespace, key, old, new):
344 return self._repo.pushkey(namespace, key, old, new)
344 return self._repo.pushkey(namespace, key, old, new)
345
345
346 def stream_out(self):
346 def stream_out(self):
347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348
348
349 def unbundle(self, bundle, heads, url):
349 def unbundle(self, bundle, heads, url):
350 """apply a bundle on a repo
350 """apply a bundle on a repo
351
351
352 This function handles the repo locking itself."""
352 This function handles the repo locking itself."""
353 try:
353 try:
354 try:
354 try:
355 bundle = exchange.readbundle(self.ui, bundle, None)
355 bundle = exchange.readbundle(self.ui, bundle, None)
356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 if util.safehasattr(ret, b'getchunks'):
357 if util.safehasattr(ret, b'getchunks'):
358 # This is a bundle20 object, turn it into an unbundler.
358 # This is a bundle20 object, turn it into an unbundler.
359 # This little dance should be dropped eventually when the
359 # This little dance should be dropped eventually when the
360 # API is finally improved.
360 # API is finally improved.
361 stream = util.chunkbuffer(ret.getchunks())
361 stream = util.chunkbuffer(ret.getchunks())
362 ret = bundle2.getunbundler(self.ui, stream)
362 ret = bundle2.getunbundler(self.ui, stream)
363 return ret
363 return ret
364 except Exception as exc:
364 except Exception as exc:
365 # If the exception contains output salvaged from a bundle2
365 # If the exception contains output salvaged from a bundle2
366 # reply, we need to make sure it is printed before continuing
366 # reply, we need to make sure it is printed before continuing
367 # to fail. So we build a bundle2 with such output and consume
367 # to fail. So we build a bundle2 with such output and consume
368 # it directly.
368 # it directly.
369 #
369 #
370 # This is not very elegant but allows a "simple" solution for
370 # This is not very elegant but allows a "simple" solution for
371 # issue4594
371 # issue4594
372 output = getattr(exc, '_bundle2salvagedoutput', ())
372 output = getattr(exc, '_bundle2salvagedoutput', ())
373 if output:
373 if output:
374 bundler = bundle2.bundle20(self._repo.ui)
374 bundler = bundle2.bundle20(self._repo.ui)
375 for out in output:
375 for out in output:
376 bundler.addpart(out)
376 bundler.addpart(out)
377 stream = util.chunkbuffer(bundler.getchunks())
377 stream = util.chunkbuffer(bundler.getchunks())
378 b = bundle2.getunbundler(self.ui, stream)
378 b = bundle2.getunbundler(self.ui, stream)
379 bundle2.processbundle(self._repo, b)
379 bundle2.processbundle(self._repo, b)
380 raise
380 raise
381 except error.PushRaced as exc:
381 except error.PushRaced as exc:
382 raise error.ResponseError(
382 raise error.ResponseError(
383 _(b'push failed:'), stringutil.forcebytestr(exc)
383 _(b'push failed:'), stringutil.forcebytestr(exc)
384 )
384 )
385
385
386 # End of _basewirecommands interface.
386 # End of _basewirecommands interface.
387
387
388 # Begin of peer interface.
388 # Begin of peer interface.
389
389
390 def commandexecutor(self):
390 def commandexecutor(self):
391 return localcommandexecutor(self)
391 return localcommandexecutor(self)
392
392
393 # End of peer interface.
393 # End of peer interface.
394
394
395
395
396 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 class locallegacypeer(localpeer):
397 class locallegacypeer(localpeer):
398 '''peer extension which implements legacy methods too; used for tests with
398 '''peer extension which implements legacy methods too; used for tests with
399 restricted capabilities'''
399 restricted capabilities'''
400
400
401 def __init__(self, repo):
401 def __init__(self, repo):
402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403
403
404 # Begin of baselegacywirecommands interface.
404 # Begin of baselegacywirecommands interface.
405
405
406 def between(self, pairs):
406 def between(self, pairs):
407 return self._repo.between(pairs)
407 return self._repo.between(pairs)
408
408
409 def branches(self, nodes):
409 def branches(self, nodes):
410 return self._repo.branches(nodes)
410 return self._repo.branches(nodes)
411
411
412 def changegroup(self, nodes, source):
412 def changegroup(self, nodes, source):
413 outgoing = discovery.outgoing(
413 outgoing = discovery.outgoing(
414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 )
415 )
416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417
417
418 def changegroupsubset(self, bases, heads, source):
418 def changegroupsubset(self, bases, heads, source):
419 outgoing = discovery.outgoing(
419 outgoing = discovery.outgoing(
420 self._repo, missingroots=bases, missingheads=heads
420 self._repo, missingroots=bases, missingheads=heads
421 )
421 )
422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423
423
424 # End of baselegacywirecommands interface.
424 # End of baselegacywirecommands interface.
425
425
426
426
427 # Increment the sub-version when the revlog v2 format changes to lock out old
427 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # clients.
428 # clients.
429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430
430
431 # A repository with the sparserevlog feature will have delta chains that
431 # A repository with the sparserevlog feature will have delta chains that
432 # can spread over a larger span. Sparse reading cuts these large spans into
432 # can spread over a larger span. Sparse reading cuts these large spans into
433 # pieces, so that each piece isn't too big.
433 # pieces, so that each piece isn't too big.
434 # Without the sparserevlog capability, reading from the repository could use
434 # Without the sparserevlog capability, reading from the repository could use
435 # huge amounts of memory, because the whole span would be read at once,
435 # huge amounts of memory, because the whole span would be read at once,
436 # including all the intermediate revisions that aren't pertinent for the chain.
436 # including all the intermediate revisions that aren't pertinent for the chain.
437 # This is why once a repository has enabled sparse-read, it becomes required.
437 # This is why once a repository has enabled sparse-read, it becomes required.
438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439
439
440 # A repository with the sidedataflag requirement will allow to store extra
440 # A repository with the sidedataflag requirement will allow to store extra
441 # information for revision without altering their original hashes.
441 # information for revision without altering their original hashes.
442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443
443
444 # A repository with the the copies-sidedata-changeset requirement will store
444 # A repository with the the copies-sidedata-changeset requirement will store
445 # copies related information in changeset's sidedata.
445 # copies related information in changeset's sidedata.
446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447
447
448 # The repository use persistent nodemap for the changelog and the manifest.
448 # The repository use persistent nodemap for the changelog and the manifest.
449 NODEMAP_REQUIREMENT = b'persistent-nodemap'
449 NODEMAP_REQUIREMENT = b'persistent-nodemap'
450
450
451 # Functions receiving (ui, features) that extensions can register to impact
451 # Functions receiving (ui, features) that extensions can register to impact
452 # the ability to load repositories with custom requirements. Only
452 # the ability to load repositories with custom requirements. Only
453 # functions defined in loaded extensions are called.
453 # functions defined in loaded extensions are called.
454 #
454 #
455 # The function receives a set of requirement strings that the repository
455 # The function receives a set of requirement strings that the repository
456 # is capable of opening. Functions will typically add elements to the
456 # is capable of opening. Functions will typically add elements to the
457 # set to reflect that the extension knows how to handle that requirements.
457 # set to reflect that the extension knows how to handle that requirements.
458 featuresetupfuncs = set()
458 featuresetupfuncs = set()
459
459
460
460
461 def makelocalrepository(baseui, path, intents=None):
461 def makelocalrepository(baseui, path, intents=None):
462 """Create a local repository object.
462 """Create a local repository object.
463
463
464 Given arguments needed to construct a local repository, this function
464 Given arguments needed to construct a local repository, this function
465 performs various early repository loading functionality (such as
465 performs various early repository loading functionality (such as
466 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
466 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
467 the repository can be opened, derives a type suitable for representing
467 the repository can be opened, derives a type suitable for representing
468 that repository, and returns an instance of it.
468 that repository, and returns an instance of it.
469
469
470 The returned object conforms to the ``repository.completelocalrepository``
470 The returned object conforms to the ``repository.completelocalrepository``
471 interface.
471 interface.
472
472
473 The repository type is derived by calling a series of factory functions
473 The repository type is derived by calling a series of factory functions
474 for each aspect/interface of the final repository. These are defined by
474 for each aspect/interface of the final repository. These are defined by
475 ``REPO_INTERFACES``.
475 ``REPO_INTERFACES``.
476
476
477 Each factory function is called to produce a type implementing a specific
477 Each factory function is called to produce a type implementing a specific
478 interface. The cumulative list of returned types will be combined into a
478 interface. The cumulative list of returned types will be combined into a
479 new type and that type will be instantiated to represent the local
479 new type and that type will be instantiated to represent the local
480 repository.
480 repository.
481
481
482 The factory functions each receive various state that may be consulted
482 The factory functions each receive various state that may be consulted
483 as part of deriving a type.
483 as part of deriving a type.
484
484
485 Extensions should wrap these factory functions to customize repository type
485 Extensions should wrap these factory functions to customize repository type
486 creation. Note that an extension's wrapped function may be called even if
486 creation. Note that an extension's wrapped function may be called even if
487 that extension is not loaded for the repo being constructed. Extensions
487 that extension is not loaded for the repo being constructed. Extensions
488 should check if their ``__name__`` appears in the
488 should check if their ``__name__`` appears in the
489 ``extensionmodulenames`` set passed to the factory function and no-op if
489 ``extensionmodulenames`` set passed to the factory function and no-op if
490 not.
490 not.
491 """
491 """
492 ui = baseui.copy()
492 ui = baseui.copy()
493 # Prevent copying repo configuration.
493 # Prevent copying repo configuration.
494 ui.copy = baseui.copy
494 ui.copy = baseui.copy
495
495
496 # Working directory VFS rooted at repository root.
496 # Working directory VFS rooted at repository root.
497 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
497 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
498
498
499 # Main VFS for .hg/ directory.
499 # Main VFS for .hg/ directory.
500 hgpath = wdirvfs.join(b'.hg')
500 hgpath = wdirvfs.join(b'.hg')
501 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
501 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
502
502
503 # The .hg/ path should exist and should be a directory. All other
503 # The .hg/ path should exist and should be a directory. All other
504 # cases are errors.
504 # cases are errors.
505 if not hgvfs.isdir():
505 if not hgvfs.isdir():
506 try:
506 try:
507 hgvfs.stat()
507 hgvfs.stat()
508 except OSError as e:
508 except OSError as e:
509 if e.errno != errno.ENOENT:
509 if e.errno != errno.ENOENT:
510 raise
510 raise
511
511
512 raise error.RepoError(_(b'repository %s not found') % path)
512 raise error.RepoError(_(b'repository %s not found') % path)
513
513
514 # .hg/requires file contains a newline-delimited list of
514 # .hg/requires file contains a newline-delimited list of
515 # features/capabilities the opener (us) must have in order to use
515 # features/capabilities the opener (us) must have in order to use
516 # the repository. This file was introduced in Mercurial 0.9.2,
516 # the repository. This file was introduced in Mercurial 0.9.2,
517 # which means very old repositories may not have one. We assume
517 # which means very old repositories may not have one. We assume
518 # a missing file translates to no requirements.
518 # a missing file translates to no requirements.
519 try:
519 try:
520 requirements = set(hgvfs.read(b'requires').splitlines())
520 requirements = set(hgvfs.read(b'requires').splitlines())
521 except IOError as e:
521 except IOError as e:
522 if e.errno != errno.ENOENT:
522 if e.errno != errno.ENOENT:
523 raise
523 raise
524 requirements = set()
524 requirements = set()
525
525
526 # The .hg/hgrc file may load extensions or contain config options
526 # The .hg/hgrc file may load extensions or contain config options
527 # that influence repository construction. Attempt to load it and
527 # that influence repository construction. Attempt to load it and
528 # process any new extensions that it may have pulled in.
528 # process any new extensions that it may have pulled in.
529 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
529 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
530 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
530 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
531 extensions.loadall(ui)
531 extensions.loadall(ui)
532 extensions.populateui(ui)
532 extensions.populateui(ui)
533
533
534 # Set of module names of extensions loaded for this repository.
534 # Set of module names of extensions loaded for this repository.
535 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
535 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
536
536
537 supportedrequirements = gathersupportedrequirements(ui)
537 supportedrequirements = gathersupportedrequirements(ui)
538
538
539 # We first validate the requirements are known.
539 # We first validate the requirements are known.
540 ensurerequirementsrecognized(requirements, supportedrequirements)
540 ensurerequirementsrecognized(requirements, supportedrequirements)
541
541
542 # Then we validate that the known set is reasonable to use together.
542 # Then we validate that the known set is reasonable to use together.
543 ensurerequirementscompatible(ui, requirements)
543 ensurerequirementscompatible(ui, requirements)
544
544
545 # TODO there are unhandled edge cases related to opening repositories with
545 # TODO there are unhandled edge cases related to opening repositories with
546 # shared storage. If storage is shared, we should also test for requirements
546 # shared storage. If storage is shared, we should also test for requirements
547 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
547 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
548 # that repo, as that repo may load extensions needed to open it. This is a
548 # that repo, as that repo may load extensions needed to open it. This is a
549 # bit complicated because we don't want the other hgrc to overwrite settings
549 # bit complicated because we don't want the other hgrc to overwrite settings
550 # in this hgrc.
550 # in this hgrc.
551 #
551 #
552 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
552 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
553 # file when sharing repos. But if a requirement is added after the share is
553 # file when sharing repos. But if a requirement is added after the share is
554 # performed, thereby introducing a new requirement for the opener, we may
554 # performed, thereby introducing a new requirement for the opener, we may
555 # will not see that and could encounter a run-time error interacting with
555 # will not see that and could encounter a run-time error interacting with
556 # that shared store since it has an unknown-to-us requirement.
556 # that shared store since it has an unknown-to-us requirement.
557
557
558 # At this point, we know we should be capable of opening the repository.
558 # At this point, we know we should be capable of opening the repository.
559 # Now get on with doing that.
559 # Now get on with doing that.
560
560
561 features = set()
561 features = set()
562
562
563 # The "store" part of the repository holds versioned data. How it is
563 # The "store" part of the repository holds versioned data. How it is
564 # accessed is determined by various requirements. The ``shared`` or
564 # accessed is determined by various requirements. The ``shared`` or
565 # ``relshared`` requirements indicate the store lives in the path contained
565 # ``relshared`` requirements indicate the store lives in the path contained
566 # in the ``.hg/sharedpath`` file. This is an absolute path for
566 # in the ``.hg/sharedpath`` file. This is an absolute path for
567 # ``shared`` and relative to ``.hg/`` for ``relshared``.
567 # ``shared`` and relative to ``.hg/`` for ``relshared``.
568 if b'shared' in requirements or b'relshared' in requirements:
568 if b'shared' in requirements or b'relshared' in requirements:
569 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
569 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
570 if b'relshared' in requirements:
570 if b'relshared' in requirements:
571 sharedpath = hgvfs.join(sharedpath)
571 sharedpath = hgvfs.join(sharedpath)
572
572
573 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
573 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
574
574
575 if not sharedvfs.exists():
575 if not sharedvfs.exists():
576 raise error.RepoError(
576 raise error.RepoError(
577 _(b'.hg/sharedpath points to nonexistent directory %s')
577 _(b'.hg/sharedpath points to nonexistent directory %s')
578 % sharedvfs.base
578 % sharedvfs.base
579 )
579 )
580
580
581 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
581 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
582
582
583 storebasepath = sharedvfs.base
583 storebasepath = sharedvfs.base
584 cachepath = sharedvfs.join(b'cache')
584 cachepath = sharedvfs.join(b'cache')
585 else:
585 else:
586 storebasepath = hgvfs.base
586 storebasepath = hgvfs.base
587 cachepath = hgvfs.join(b'cache')
587 cachepath = hgvfs.join(b'cache')
588 wcachepath = hgvfs.join(b'wcache')
588 wcachepath = hgvfs.join(b'wcache')
589
589
590 # The store has changed over time and the exact layout is dictated by
590 # The store has changed over time and the exact layout is dictated by
591 # requirements. The store interface abstracts differences across all
591 # requirements. The store interface abstracts differences across all
592 # of them.
592 # of them.
593 store = makestore(
593 store = makestore(
594 requirements,
594 requirements,
595 storebasepath,
595 storebasepath,
596 lambda base: vfsmod.vfs(base, cacheaudited=True),
596 lambda base: vfsmod.vfs(base, cacheaudited=True),
597 )
597 )
598 hgvfs.createmode = store.createmode
598 hgvfs.createmode = store.createmode
599
599
600 storevfs = store.vfs
600 storevfs = store.vfs
601 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
601 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
602
602
603 # The cache vfs is used to manage cache files.
603 # The cache vfs is used to manage cache files.
604 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
604 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
605 cachevfs.createmode = store.createmode
605 cachevfs.createmode = store.createmode
606 # The cache vfs is used to manage cache files related to the working copy
606 # The cache vfs is used to manage cache files related to the working copy
607 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
607 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
608 wcachevfs.createmode = store.createmode
608 wcachevfs.createmode = store.createmode
609
609
610 # Now resolve the type for the repository object. We do this by repeatedly
610 # Now resolve the type for the repository object. We do this by repeatedly
611 # calling a factory function to produces types for specific aspects of the
611 # calling a factory function to produces types for specific aspects of the
612 # repo's operation. The aggregate returned types are used as base classes
612 # repo's operation. The aggregate returned types are used as base classes
613 # for a dynamically-derived type, which will represent our new repository.
613 # for a dynamically-derived type, which will represent our new repository.
614
614
615 bases = []
615 bases = []
616 extrastate = {}
616 extrastate = {}
617
617
618 for iface, fn in REPO_INTERFACES:
618 for iface, fn in REPO_INTERFACES:
619 # We pass all potentially useful state to give extensions tons of
619 # We pass all potentially useful state to give extensions tons of
620 # flexibility.
620 # flexibility.
621 typ = fn()(
621 typ = fn()(
622 ui=ui,
622 ui=ui,
623 intents=intents,
623 intents=intents,
624 requirements=requirements,
624 requirements=requirements,
625 features=features,
625 features=features,
626 wdirvfs=wdirvfs,
626 wdirvfs=wdirvfs,
627 hgvfs=hgvfs,
627 hgvfs=hgvfs,
628 store=store,
628 store=store,
629 storevfs=storevfs,
629 storevfs=storevfs,
630 storeoptions=storevfs.options,
630 storeoptions=storevfs.options,
631 cachevfs=cachevfs,
631 cachevfs=cachevfs,
632 wcachevfs=wcachevfs,
632 wcachevfs=wcachevfs,
633 extensionmodulenames=extensionmodulenames,
633 extensionmodulenames=extensionmodulenames,
634 extrastate=extrastate,
634 extrastate=extrastate,
635 baseclasses=bases,
635 baseclasses=bases,
636 )
636 )
637
637
638 if not isinstance(typ, type):
638 if not isinstance(typ, type):
639 raise error.ProgrammingError(
639 raise error.ProgrammingError(
640 b'unable to construct type for %s' % iface
640 b'unable to construct type for %s' % iface
641 )
641 )
642
642
643 bases.append(typ)
643 bases.append(typ)
644
644
645 # type() allows you to use characters in type names that wouldn't be
645 # type() allows you to use characters in type names that wouldn't be
646 # recognized as Python symbols in source code. We abuse that to add
646 # recognized as Python symbols in source code. We abuse that to add
647 # rich information about our constructed repo.
647 # rich information about our constructed repo.
648 name = pycompat.sysstr(
648 name = pycompat.sysstr(
649 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
649 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
650 )
650 )
651
651
652 cls = type(name, tuple(bases), {})
652 cls = type(name, tuple(bases), {})
653
653
654 return cls(
654 return cls(
655 baseui=baseui,
655 baseui=baseui,
656 ui=ui,
656 ui=ui,
657 origroot=path,
657 origroot=path,
658 wdirvfs=wdirvfs,
658 wdirvfs=wdirvfs,
659 hgvfs=hgvfs,
659 hgvfs=hgvfs,
660 requirements=requirements,
660 requirements=requirements,
661 supportedrequirements=supportedrequirements,
661 supportedrequirements=supportedrequirements,
662 sharedpath=storebasepath,
662 sharedpath=storebasepath,
663 store=store,
663 store=store,
664 cachevfs=cachevfs,
664 cachevfs=cachevfs,
665 wcachevfs=wcachevfs,
665 wcachevfs=wcachevfs,
666 features=features,
666 features=features,
667 intents=intents,
667 intents=intents,
668 )
668 )
669
669
670
670
671 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
671 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
672 """Load hgrc files/content into a ui instance.
672 """Load hgrc files/content into a ui instance.
673
673
674 This is called during repository opening to load any additional
674 This is called during repository opening to load any additional
675 config files or settings relevant to the current repository.
675 config files or settings relevant to the current repository.
676
676
677 Returns a bool indicating whether any additional configs were loaded.
677 Returns a bool indicating whether any additional configs were loaded.
678
678
679 Extensions should monkeypatch this function to modify how per-repo
679 Extensions should monkeypatch this function to modify how per-repo
680 configs are loaded. For example, an extension may wish to pull in
680 configs are loaded. For example, an extension may wish to pull in
681 configs from alternate files or sources.
681 configs from alternate files or sources.
682 """
682 """
683 if not rcutil.use_repo_hgrc():
683 if not rcutil.use_repo_hgrc():
684 return False
684 return False
685 try:
685 try:
686 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
686 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
687 return True
687 return True
688 except IOError:
688 except IOError:
689 return False
689 return False
690
690
691
691
692 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
692 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
693 """Perform additional actions after .hg/hgrc is loaded.
693 """Perform additional actions after .hg/hgrc is loaded.
694
694
695 This function is called during repository loading immediately after
695 This function is called during repository loading immediately after
696 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
696 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
697
697
698 The function can be used to validate configs, automatically add
698 The function can be used to validate configs, automatically add
699 options (including extensions) based on requirements, etc.
699 options (including extensions) based on requirements, etc.
700 """
700 """
701
701
702 # Map of requirements to list of extensions to load automatically when
702 # Map of requirements to list of extensions to load automatically when
703 # requirement is present.
703 # requirement is present.
704 autoextensions = {
704 autoextensions = {
705 b'git': [b'git'],
705 b'git': [b'git'],
706 b'largefiles': [b'largefiles'],
706 b'largefiles': [b'largefiles'],
707 b'lfs': [b'lfs'],
707 b'lfs': [b'lfs'],
708 }
708 }
709
709
710 for requirement, names in sorted(autoextensions.items()):
710 for requirement, names in sorted(autoextensions.items()):
711 if requirement not in requirements:
711 if requirement not in requirements:
712 continue
712 continue
713
713
714 for name in names:
714 for name in names:
715 if not ui.hasconfig(b'extensions', name):
715 if not ui.hasconfig(b'extensions', name):
716 ui.setconfig(b'extensions', name, b'', source=b'autoload')
716 ui.setconfig(b'extensions', name, b'', source=b'autoload')
717
717
718
718
719 def gathersupportedrequirements(ui):
719 def gathersupportedrequirements(ui):
720 """Determine the complete set of recognized requirements."""
720 """Determine the complete set of recognized requirements."""
721 # Start with all requirements supported by this file.
721 # Start with all requirements supported by this file.
722 supported = set(localrepository._basesupported)
722 supported = set(localrepository._basesupported)
723
723
724 # Execute ``featuresetupfuncs`` entries if they belong to an extension
724 # Execute ``featuresetupfuncs`` entries if they belong to an extension
725 # relevant to this ui instance.
725 # relevant to this ui instance.
726 modules = {m.__name__ for n, m in extensions.extensions(ui)}
726 modules = {m.__name__ for n, m in extensions.extensions(ui)}
727
727
728 for fn in featuresetupfuncs:
728 for fn in featuresetupfuncs:
729 if fn.__module__ in modules:
729 if fn.__module__ in modules:
730 fn(ui, supported)
730 fn(ui, supported)
731
731
732 # Add derived requirements from registered compression engines.
732 # Add derived requirements from registered compression engines.
733 for name in util.compengines:
733 for name in util.compengines:
734 engine = util.compengines[name]
734 engine = util.compengines[name]
735 if engine.available() and engine.revlogheader():
735 if engine.available() and engine.revlogheader():
736 supported.add(b'exp-compression-%s' % name)
736 supported.add(b'exp-compression-%s' % name)
737 if engine.name() == b'zstd':
737 if engine.name() == b'zstd':
738 supported.add(b'revlog-compression-zstd')
738 supported.add(b'revlog-compression-zstd')
739
739
740 return supported
740 return supported
741
741
742
742
743 def ensurerequirementsrecognized(requirements, supported):
743 def ensurerequirementsrecognized(requirements, supported):
744 """Validate that a set of local requirements is recognized.
744 """Validate that a set of local requirements is recognized.
745
745
746 Receives a set of requirements. Raises an ``error.RepoError`` if there
746 Receives a set of requirements. Raises an ``error.RepoError`` if there
747 exists any requirement in that set that currently loaded code doesn't
747 exists any requirement in that set that currently loaded code doesn't
748 recognize.
748 recognize.
749
749
750 Returns a set of supported requirements.
750 Returns a set of supported requirements.
751 """
751 """
752 missing = set()
752 missing = set()
753
753
754 for requirement in requirements:
754 for requirement in requirements:
755 if requirement in supported:
755 if requirement in supported:
756 continue
756 continue
757
757
758 if not requirement or not requirement[0:1].isalnum():
758 if not requirement or not requirement[0:1].isalnum():
759 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
759 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
760
760
761 missing.add(requirement)
761 missing.add(requirement)
762
762
763 if missing:
763 if missing:
764 raise error.RequirementError(
764 raise error.RequirementError(
765 _(b'repository requires features unknown to this Mercurial: %s')
765 _(b'repository requires features unknown to this Mercurial: %s')
766 % b' '.join(sorted(missing)),
766 % b' '.join(sorted(missing)),
767 hint=_(
767 hint=_(
768 b'see https://mercurial-scm.org/wiki/MissingRequirement '
768 b'see https://mercurial-scm.org/wiki/MissingRequirement '
769 b'for more information'
769 b'for more information'
770 ),
770 ),
771 )
771 )
772
772
773
773
774 def ensurerequirementscompatible(ui, requirements):
774 def ensurerequirementscompatible(ui, requirements):
775 """Validates that a set of recognized requirements is mutually compatible.
775 """Validates that a set of recognized requirements is mutually compatible.
776
776
777 Some requirements may not be compatible with others or require
777 Some requirements may not be compatible with others or require
778 config options that aren't enabled. This function is called during
778 config options that aren't enabled. This function is called during
779 repository opening to ensure that the set of requirements needed
779 repository opening to ensure that the set of requirements needed
780 to open a repository is sane and compatible with config options.
780 to open a repository is sane and compatible with config options.
781
781
782 Extensions can monkeypatch this function to perform additional
782 Extensions can monkeypatch this function to perform additional
783 checking.
783 checking.
784
784
785 ``error.RepoError`` should be raised on failure.
785 ``error.RepoError`` should be raised on failure.
786 """
786 """
787 if b'exp-sparse' in requirements and not sparse.enabled:
787 if b'exp-sparse' in requirements and not sparse.enabled:
788 raise error.RepoError(
788 raise error.RepoError(
789 _(
789 _(
790 b'repository is using sparse feature but '
790 b'repository is using sparse feature but '
791 b'sparse is not enabled; enable the '
791 b'sparse is not enabled; enable the '
792 b'"sparse" extensions to access'
792 b'"sparse" extensions to access'
793 )
793 )
794 )
794 )
795
795
796
796
797 def makestore(requirements, path, vfstype):
797 def makestore(requirements, path, vfstype):
798 """Construct a storage object for a repository."""
798 """Construct a storage object for a repository."""
799 if b'store' in requirements:
799 if b'store' in requirements:
800 if b'fncache' in requirements:
800 if b'fncache' in requirements:
801 return storemod.fncachestore(
801 return storemod.fncachestore(
802 path, vfstype, b'dotencode' in requirements
802 path, vfstype, b'dotencode' in requirements
803 )
803 )
804
804
805 return storemod.encodedstore(path, vfstype)
805 return storemod.encodedstore(path, vfstype)
806
806
807 return storemod.basicstore(path, vfstype)
807 return storemod.basicstore(path, vfstype)
808
808
809
809
810 def resolvestorevfsoptions(ui, requirements, features):
810 def resolvestorevfsoptions(ui, requirements, features):
811 """Resolve the options to pass to the store vfs opener.
811 """Resolve the options to pass to the store vfs opener.
812
812
813 The returned dict is used to influence behavior of the storage layer.
813 The returned dict is used to influence behavior of the storage layer.
814 """
814 """
815 options = {}
815 options = {}
816
816
817 if b'treemanifest' in requirements:
817 if b'treemanifest' in requirements:
818 options[b'treemanifest'] = True
818 options[b'treemanifest'] = True
819
819
820 # experimental config: format.manifestcachesize
820 # experimental config: format.manifestcachesize
821 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
821 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
822 if manifestcachesize is not None:
822 if manifestcachesize is not None:
823 options[b'manifestcachesize'] = manifestcachesize
823 options[b'manifestcachesize'] = manifestcachesize
824
824
825 # In the absence of another requirement superseding a revlog-related
825 # In the absence of another requirement superseding a revlog-related
826 # requirement, we have to assume the repo is using revlog version 0.
826 # requirement, we have to assume the repo is using revlog version 0.
827 # This revlog format is super old and we don't bother trying to parse
827 # This revlog format is super old and we don't bother trying to parse
828 # opener options for it because those options wouldn't do anything
828 # opener options for it because those options wouldn't do anything
829 # meaningful on such old repos.
829 # meaningful on such old repos.
830 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
830 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
831 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
831 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
832 else: # explicitly mark repo as using revlogv0
832 else: # explicitly mark repo as using revlogv0
833 options[b'revlogv0'] = True
833 options[b'revlogv0'] = True
834
834
835 if COPIESSDC_REQUIREMENT in requirements:
835 if COPIESSDC_REQUIREMENT in requirements:
836 options[b'copies-storage'] = b'changeset-sidedata'
836 options[b'copies-storage'] = b'changeset-sidedata'
837 else:
837 else:
838 writecopiesto = ui.config(b'experimental', b'copies.write-to')
838 writecopiesto = ui.config(b'experimental', b'copies.write-to')
839 copiesextramode = (b'changeset-only', b'compatibility')
839 copiesextramode = (b'changeset-only', b'compatibility')
840 if writecopiesto in copiesextramode:
840 if writecopiesto in copiesextramode:
841 options[b'copies-storage'] = b'extra'
841 options[b'copies-storage'] = b'extra'
842
842
843 return options
843 return options
844
844
845
845
846 def resolverevlogstorevfsoptions(ui, requirements, features):
846 def resolverevlogstorevfsoptions(ui, requirements, features):
847 """Resolve opener options specific to revlogs."""
847 """Resolve opener options specific to revlogs."""
848
848
849 options = {}
849 options = {}
850 options[b'flagprocessors'] = {}
850 options[b'flagprocessors'] = {}
851
851
852 if b'revlogv1' in requirements:
852 if b'revlogv1' in requirements:
853 options[b'revlogv1'] = True
853 options[b'revlogv1'] = True
854 if REVLOGV2_REQUIREMENT in requirements:
854 if REVLOGV2_REQUIREMENT in requirements:
855 options[b'revlogv2'] = True
855 options[b'revlogv2'] = True
856
856
857 if b'generaldelta' in requirements:
857 if b'generaldelta' in requirements:
858 options[b'generaldelta'] = True
858 options[b'generaldelta'] = True
859
859
860 # experimental config: format.chunkcachesize
860 # experimental config: format.chunkcachesize
861 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
861 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
862 if chunkcachesize is not None:
862 if chunkcachesize is not None:
863 options[b'chunkcachesize'] = chunkcachesize
863 options[b'chunkcachesize'] = chunkcachesize
864
864
865 deltabothparents = ui.configbool(
865 deltabothparents = ui.configbool(
866 b'storage', b'revlog.optimize-delta-parent-choice'
866 b'storage', b'revlog.optimize-delta-parent-choice'
867 )
867 )
868 options[b'deltabothparents'] = deltabothparents
868 options[b'deltabothparents'] = deltabothparents
869
869
870 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
870 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
871 lazydeltabase = False
871 lazydeltabase = False
872 if lazydelta:
872 if lazydelta:
873 lazydeltabase = ui.configbool(
873 lazydeltabase = ui.configbool(
874 b'storage', b'revlog.reuse-external-delta-parent'
874 b'storage', b'revlog.reuse-external-delta-parent'
875 )
875 )
876 if lazydeltabase is None:
876 if lazydeltabase is None:
877 lazydeltabase = not scmutil.gddeltaconfig(ui)
877 lazydeltabase = not scmutil.gddeltaconfig(ui)
878 options[b'lazydelta'] = lazydelta
878 options[b'lazydelta'] = lazydelta
879 options[b'lazydeltabase'] = lazydeltabase
879 options[b'lazydeltabase'] = lazydeltabase
880
880
881 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
881 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
882 if 0 <= chainspan:
882 if 0 <= chainspan:
883 options[b'maxdeltachainspan'] = chainspan
883 options[b'maxdeltachainspan'] = chainspan
884
884
885 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
885 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
886 if mmapindexthreshold is not None:
886 if mmapindexthreshold is not None:
887 options[b'mmapindexthreshold'] = mmapindexthreshold
887 options[b'mmapindexthreshold'] = mmapindexthreshold
888
888
889 withsparseread = ui.configbool(b'experimental', b'sparse-read')
889 withsparseread = ui.configbool(b'experimental', b'sparse-read')
890 srdensitythres = float(
890 srdensitythres = float(
891 ui.config(b'experimental', b'sparse-read.density-threshold')
891 ui.config(b'experimental', b'sparse-read.density-threshold')
892 )
892 )
893 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
893 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
894 options[b'with-sparse-read'] = withsparseread
894 options[b'with-sparse-read'] = withsparseread
895 options[b'sparse-read-density-threshold'] = srdensitythres
895 options[b'sparse-read-density-threshold'] = srdensitythres
896 options[b'sparse-read-min-gap-size'] = srmingapsize
896 options[b'sparse-read-min-gap-size'] = srmingapsize
897
897
898 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
898 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
899 options[b'sparse-revlog'] = sparserevlog
899 options[b'sparse-revlog'] = sparserevlog
900 if sparserevlog:
900 if sparserevlog:
901 options[b'generaldelta'] = True
901 options[b'generaldelta'] = True
902
902
903 sidedata = SIDEDATA_REQUIREMENT in requirements
903 sidedata = SIDEDATA_REQUIREMENT in requirements
904 options[b'side-data'] = sidedata
904 options[b'side-data'] = sidedata
905
905
906 maxchainlen = None
906 maxchainlen = None
907 if sparserevlog:
907 if sparserevlog:
908 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
908 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
909 # experimental config: format.maxchainlen
909 # experimental config: format.maxchainlen
910 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
910 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
911 if maxchainlen is not None:
911 if maxchainlen is not None:
912 options[b'maxchainlen'] = maxchainlen
912 options[b'maxchainlen'] = maxchainlen
913
913
914 for r in requirements:
914 for r in requirements:
915 # we allow multiple compression engine requirement to co-exist because
915 # we allow multiple compression engine requirement to co-exist because
916 # strickly speaking, revlog seems to support mixed compression style.
916 # strickly speaking, revlog seems to support mixed compression style.
917 #
917 #
918 # The compression used for new entries will be "the last one"
918 # The compression used for new entries will be "the last one"
919 prefix = r.startswith
919 prefix = r.startswith
920 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
920 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
921 options[b'compengine'] = r.split(b'-', 2)[2]
921 options[b'compengine'] = r.split(b'-', 2)[2]
922
922
923 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
923 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
924 if options[b'zlib.level'] is not None:
924 if options[b'zlib.level'] is not None:
925 if not (0 <= options[b'zlib.level'] <= 9):
925 if not (0 <= options[b'zlib.level'] <= 9):
926 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
926 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
927 raise error.Abort(msg % options[b'zlib.level'])
927 raise error.Abort(msg % options[b'zlib.level'])
928 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
928 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
929 if options[b'zstd.level'] is not None:
929 if options[b'zstd.level'] is not None:
930 if not (0 <= options[b'zstd.level'] <= 22):
930 if not (0 <= options[b'zstd.level'] <= 22):
931 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
931 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
932 raise error.Abort(msg % options[b'zstd.level'])
932 raise error.Abort(msg % options[b'zstd.level'])
933
933
934 if repository.NARROW_REQUIREMENT in requirements:
934 if repository.NARROW_REQUIREMENT in requirements:
935 options[b'enableellipsis'] = True
935 options[b'enableellipsis'] = True
936
936
937 if ui.configbool(b'experimental', b'rust.index'):
937 if ui.configbool(b'experimental', b'rust.index'):
938 options[b'rust.index'] = True
938 options[b'rust.index'] = True
939 if NODEMAP_REQUIREMENT in requirements:
939 if NODEMAP_REQUIREMENT in requirements:
940 options[b'persistent-nodemap'] = True
940 options[b'persistent-nodemap'] = True
941 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
941 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
942 options[b'persistent-nodemap.mmap'] = True
942 options[b'persistent-nodemap.mmap'] = True
943 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
943 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
944 options[b'persistent-nodemap.mode'] = epnm
944 options[b'persistent-nodemap.mode'] = epnm
945 if ui.configbool(b'devel', b'persistent-nodemap'):
945 if ui.configbool(b'devel', b'persistent-nodemap'):
946 options[b'devel-force-nodemap'] = True
946 options[b'devel-force-nodemap'] = True
947
947
948 return options
948 return options
949
949
950
950
951 def makemain(**kwargs):
951 def makemain(**kwargs):
952 """Produce a type conforming to ``ilocalrepositorymain``."""
952 """Produce a type conforming to ``ilocalrepositorymain``."""
953 return localrepository
953 return localrepository
954
954
955
955
956 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
956 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
957 class revlogfilestorage(object):
957 class revlogfilestorage(object):
958 """File storage when using revlogs."""
958 """File storage when using revlogs."""
959
959
960 def file(self, path):
960 def file(self, path):
961 if path[0] == b'/':
961 if path[0] == b'/':
962 path = path[1:]
962 path = path[1:]
963
963
964 return filelog.filelog(self.svfs, path)
964 return filelog.filelog(self.svfs, path)
965
965
966
966
967 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
967 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
968 class revlognarrowfilestorage(object):
968 class revlognarrowfilestorage(object):
969 """File storage when using revlogs and narrow files."""
969 """File storage when using revlogs and narrow files."""
970
970
971 def file(self, path):
971 def file(self, path):
972 if path[0] == b'/':
972 if path[0] == b'/':
973 path = path[1:]
973 path = path[1:]
974
974
975 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
975 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
976
976
977
977
978 def makefilestorage(requirements, features, **kwargs):
978 def makefilestorage(requirements, features, **kwargs):
979 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
979 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
980 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
980 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
981 features.add(repository.REPO_FEATURE_STREAM_CLONE)
981 features.add(repository.REPO_FEATURE_STREAM_CLONE)
982
982
983 if repository.NARROW_REQUIREMENT in requirements:
983 if repository.NARROW_REQUIREMENT in requirements:
984 return revlognarrowfilestorage
984 return revlognarrowfilestorage
985 else:
985 else:
986 return revlogfilestorage
986 return revlogfilestorage
987
987
988
988
989 # List of repository interfaces and factory functions for them. Each
989 # List of repository interfaces and factory functions for them. Each
990 # will be called in order during ``makelocalrepository()`` to iteratively
990 # will be called in order during ``makelocalrepository()`` to iteratively
991 # derive the final type for a local repository instance. We capture the
991 # derive the final type for a local repository instance. We capture the
992 # function as a lambda so we don't hold a reference and the module-level
992 # function as a lambda so we don't hold a reference and the module-level
993 # functions can be wrapped.
993 # functions can be wrapped.
994 REPO_INTERFACES = [
994 REPO_INTERFACES = [
995 (repository.ilocalrepositorymain, lambda: makemain),
995 (repository.ilocalrepositorymain, lambda: makemain),
996 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
996 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
997 ]
997 ]
998
998
999
999
1000 @interfaceutil.implementer(repository.ilocalrepositorymain)
1000 @interfaceutil.implementer(repository.ilocalrepositorymain)
1001 class localrepository(object):
1001 class localrepository(object):
1002 """Main class for representing local repositories.
1002 """Main class for representing local repositories.
1003
1003
1004 All local repositories are instances of this class.
1004 All local repositories are instances of this class.
1005
1005
1006 Constructed on its own, instances of this class are not usable as
1006 Constructed on its own, instances of this class are not usable as
1007 repository objects. To obtain a usable repository object, call
1007 repository objects. To obtain a usable repository object, call
1008 ``hg.repository()``, ``localrepo.instance()``, or
1008 ``hg.repository()``, ``localrepo.instance()``, or
1009 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1009 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1010 ``instance()`` adds support for creating new repositories.
1010 ``instance()`` adds support for creating new repositories.
1011 ``hg.repository()`` adds more extension integration, including calling
1011 ``hg.repository()`` adds more extension integration, including calling
1012 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1012 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1013 used.
1013 used.
1014 """
1014 """
1015
1015
1016 # obsolete experimental requirements:
1016 # obsolete experimental requirements:
1017 # - manifestv2: An experimental new manifest format that allowed
1017 # - manifestv2: An experimental new manifest format that allowed
1018 # for stem compression of long paths. Experiment ended up not
1018 # for stem compression of long paths. Experiment ended up not
1019 # being successful (repository sizes went up due to worse delta
1019 # being successful (repository sizes went up due to worse delta
1020 # chains), and the code was deleted in 4.6.
1020 # chains), and the code was deleted in 4.6.
1021 supportedformats = {
1021 supportedformats = {
1022 b'revlogv1',
1022 b'revlogv1',
1023 b'generaldelta',
1023 b'generaldelta',
1024 b'treemanifest',
1024 b'treemanifest',
1025 COPIESSDC_REQUIREMENT,
1025 COPIESSDC_REQUIREMENT,
1026 REVLOGV2_REQUIREMENT,
1026 REVLOGV2_REQUIREMENT,
1027 SIDEDATA_REQUIREMENT,
1027 SIDEDATA_REQUIREMENT,
1028 SPARSEREVLOG_REQUIREMENT,
1028 SPARSEREVLOG_REQUIREMENT,
1029 NODEMAP_REQUIREMENT,
1029 NODEMAP_REQUIREMENT,
1030 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1030 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1031 }
1031 }
1032 _basesupported = supportedformats | {
1032 _basesupported = supportedformats | {
1033 b'store',
1033 b'store',
1034 b'fncache',
1034 b'fncache',
1035 b'shared',
1035 b'shared',
1036 b'relshared',
1036 b'relshared',
1037 b'dotencode',
1037 b'dotencode',
1038 b'exp-sparse',
1038 b'exp-sparse',
1039 b'internal-phase',
1039 b'internal-phase',
1040 }
1040 }
1041
1041
1042 # list of prefix for file which can be written without 'wlock'
1042 # list of prefix for file which can be written without 'wlock'
1043 # Extensions should extend this list when needed
1043 # Extensions should extend this list when needed
1044 _wlockfreeprefix = {
1044 _wlockfreeprefix = {
1045 # We migh consider requiring 'wlock' for the next
1045 # We migh consider requiring 'wlock' for the next
1046 # two, but pretty much all the existing code assume
1046 # two, but pretty much all the existing code assume
1047 # wlock is not needed so we keep them excluded for
1047 # wlock is not needed so we keep them excluded for
1048 # now.
1048 # now.
1049 b'hgrc',
1049 b'hgrc',
1050 b'requires',
1050 b'requires',
1051 # XXX cache is a complicatged business someone
1051 # XXX cache is a complicatged business someone
1052 # should investigate this in depth at some point
1052 # should investigate this in depth at some point
1053 b'cache/',
1053 b'cache/',
1054 # XXX shouldn't be dirstate covered by the wlock?
1054 # XXX shouldn't be dirstate covered by the wlock?
1055 b'dirstate',
1055 b'dirstate',
1056 # XXX bisect was still a bit too messy at the time
1056 # XXX bisect was still a bit too messy at the time
1057 # this changeset was introduced. Someone should fix
1057 # this changeset was introduced. Someone should fix
1058 # the remainig bit and drop this line
1058 # the remainig bit and drop this line
1059 b'bisect.state',
1059 b'bisect.state',
1060 }
1060 }
1061
1061
1062 def __init__(
1062 def __init__(
1063 self,
1063 self,
1064 baseui,
1064 baseui,
1065 ui,
1065 ui,
1066 origroot,
1066 origroot,
1067 wdirvfs,
1067 wdirvfs,
1068 hgvfs,
1068 hgvfs,
1069 requirements,
1069 requirements,
1070 supportedrequirements,
1070 supportedrequirements,
1071 sharedpath,
1071 sharedpath,
1072 store,
1072 store,
1073 cachevfs,
1073 cachevfs,
1074 wcachevfs,
1074 wcachevfs,
1075 features,
1075 features,
1076 intents=None,
1076 intents=None,
1077 ):
1077 ):
1078 """Create a new local repository instance.
1078 """Create a new local repository instance.
1079
1079
1080 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1080 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1081 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1081 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1082 object.
1082 object.
1083
1083
1084 Arguments:
1084 Arguments:
1085
1085
1086 baseui
1086 baseui
1087 ``ui.ui`` instance that ``ui`` argument was based off of.
1087 ``ui.ui`` instance that ``ui`` argument was based off of.
1088
1088
1089 ui
1089 ui
1090 ``ui.ui`` instance for use by the repository.
1090 ``ui.ui`` instance for use by the repository.
1091
1091
1092 origroot
1092 origroot
1093 ``bytes`` path to working directory root of this repository.
1093 ``bytes`` path to working directory root of this repository.
1094
1094
1095 wdirvfs
1095 wdirvfs
1096 ``vfs.vfs`` rooted at the working directory.
1096 ``vfs.vfs`` rooted at the working directory.
1097
1097
1098 hgvfs
1098 hgvfs
1099 ``vfs.vfs`` rooted at .hg/
1099 ``vfs.vfs`` rooted at .hg/
1100
1100
1101 requirements
1101 requirements
1102 ``set`` of bytestrings representing repository opening requirements.
1102 ``set`` of bytestrings representing repository opening requirements.
1103
1103
1104 supportedrequirements
1104 supportedrequirements
1105 ``set`` of bytestrings representing repository requirements that we
1105 ``set`` of bytestrings representing repository requirements that we
1106 know how to open. May be a supetset of ``requirements``.
1106 know how to open. May be a supetset of ``requirements``.
1107
1107
1108 sharedpath
1108 sharedpath
1109 ``bytes`` Defining path to storage base directory. Points to a
1109 ``bytes`` Defining path to storage base directory. Points to a
1110 ``.hg/`` directory somewhere.
1110 ``.hg/`` directory somewhere.
1111
1111
1112 store
1112 store
1113 ``store.basicstore`` (or derived) instance providing access to
1113 ``store.basicstore`` (or derived) instance providing access to
1114 versioned storage.
1114 versioned storage.
1115
1115
1116 cachevfs
1116 cachevfs
1117 ``vfs.vfs`` used for cache files.
1117 ``vfs.vfs`` used for cache files.
1118
1118
1119 wcachevfs
1119 wcachevfs
1120 ``vfs.vfs`` used for cache files related to the working copy.
1120 ``vfs.vfs`` used for cache files related to the working copy.
1121
1121
1122 features
1122 features
1123 ``set`` of bytestrings defining features/capabilities of this
1123 ``set`` of bytestrings defining features/capabilities of this
1124 instance.
1124 instance.
1125
1125
1126 intents
1126 intents
1127 ``set`` of system strings indicating what this repo will be used
1127 ``set`` of system strings indicating what this repo will be used
1128 for.
1128 for.
1129 """
1129 """
1130 self.baseui = baseui
1130 self.baseui = baseui
1131 self.ui = ui
1131 self.ui = ui
1132 self.origroot = origroot
1132 self.origroot = origroot
1133 # vfs rooted at working directory.
1133 # vfs rooted at working directory.
1134 self.wvfs = wdirvfs
1134 self.wvfs = wdirvfs
1135 self.root = wdirvfs.base
1135 self.root = wdirvfs.base
1136 # vfs rooted at .hg/. Used to access most non-store paths.
1136 # vfs rooted at .hg/. Used to access most non-store paths.
1137 self.vfs = hgvfs
1137 self.vfs = hgvfs
1138 self.path = hgvfs.base
1138 self.path = hgvfs.base
1139 self.requirements = requirements
1139 self.requirements = requirements
1140 self.supported = supportedrequirements
1140 self.supported = supportedrequirements
1141 self.sharedpath = sharedpath
1141 self.sharedpath = sharedpath
1142 self.store = store
1142 self.store = store
1143 self.cachevfs = cachevfs
1143 self.cachevfs = cachevfs
1144 self.wcachevfs = wcachevfs
1144 self.wcachevfs = wcachevfs
1145 self.features = features
1145 self.features = features
1146
1146
1147 self.filtername = None
1147 self.filtername = None
1148
1148
1149 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1149 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1150 b'devel', b'check-locks'
1150 b'devel', b'check-locks'
1151 ):
1151 ):
1152 self.vfs.audit = self._getvfsward(self.vfs.audit)
1152 self.vfs.audit = self._getvfsward(self.vfs.audit)
1153 # A list of callback to shape the phase if no data were found.
1153 # A list of callback to shape the phase if no data were found.
1154 # Callback are in the form: func(repo, roots) --> processed root.
1154 # Callback are in the form: func(repo, roots) --> processed root.
1155 # This list it to be filled by extension during repo setup
1155 # This list it to be filled by extension during repo setup
1156 self._phasedefaults = []
1156 self._phasedefaults = []
1157
1157
1158 color.setup(self.ui)
1158 color.setup(self.ui)
1159
1159
1160 self.spath = self.store.path
1160 self.spath = self.store.path
1161 self.svfs = self.store.vfs
1161 self.svfs = self.store.vfs
1162 self.sjoin = self.store.join
1162 self.sjoin = self.store.join
1163 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1163 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1164 b'devel', b'check-locks'
1164 b'devel', b'check-locks'
1165 ):
1165 ):
1166 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1166 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1167 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1167 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1168 else: # standard vfs
1168 else: # standard vfs
1169 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1169 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1170
1170
1171 self._dirstatevalidatewarned = False
1171 self._dirstatevalidatewarned = False
1172
1172
1173 self._branchcaches = branchmap.BranchMapCache()
1173 self._branchcaches = branchmap.BranchMapCache()
1174 self._revbranchcache = None
1174 self._revbranchcache = None
1175 self._filterpats = {}
1175 self._filterpats = {}
1176 self._datafilters = {}
1176 self._datafilters = {}
1177 self._transref = self._lockref = self._wlockref = None
1177 self._transref = self._lockref = self._wlockref = None
1178
1178
1179 # A cache for various files under .hg/ that tracks file changes,
1179 # A cache for various files under .hg/ that tracks file changes,
1180 # (used by the filecache decorator)
1180 # (used by the filecache decorator)
1181 #
1181 #
1182 # Maps a property name to its util.filecacheentry
1182 # Maps a property name to its util.filecacheentry
1183 self._filecache = {}
1183 self._filecache = {}
1184
1184
1185 # hold sets of revision to be filtered
1185 # hold sets of revision to be filtered
1186 # should be cleared when something might have changed the filter value:
1186 # should be cleared when something might have changed the filter value:
1187 # - new changesets,
1187 # - new changesets,
1188 # - phase change,
1188 # - phase change,
1189 # - new obsolescence marker,
1189 # - new obsolescence marker,
1190 # - working directory parent change,
1190 # - working directory parent change,
1191 # - bookmark changes
1191 # - bookmark changes
1192 self.filteredrevcache = {}
1192 self.filteredrevcache = {}
1193
1193
1194 # post-dirstate-status hooks
1194 # post-dirstate-status hooks
1195 self._postdsstatus = []
1195 self._postdsstatus = []
1196
1196
1197 # generic mapping between names and nodes
1197 # generic mapping between names and nodes
1198 self.names = namespaces.namespaces()
1198 self.names = namespaces.namespaces()
1199
1199
1200 # Key to signature value.
1200 # Key to signature value.
1201 self._sparsesignaturecache = {}
1201 self._sparsesignaturecache = {}
1202 # Signature to cached matcher instance.
1202 # Signature to cached matcher instance.
1203 self._sparsematchercache = {}
1203 self._sparsematchercache = {}
1204
1204
1205 self._extrafilterid = repoview.extrafilter(ui)
1205 self._extrafilterid = repoview.extrafilter(ui)
1206
1206
1207 self.filecopiesmode = None
1207 self.filecopiesmode = None
1208 if COPIESSDC_REQUIREMENT in self.requirements:
1208 if COPIESSDC_REQUIREMENT in self.requirements:
1209 self.filecopiesmode = b'changeset-sidedata'
1209 self.filecopiesmode = b'changeset-sidedata'
1210
1210
1211 def _getvfsward(self, origfunc):
1211 def _getvfsward(self, origfunc):
1212 """build a ward for self.vfs"""
1212 """build a ward for self.vfs"""
1213 rref = weakref.ref(self)
1213 rref = weakref.ref(self)
1214
1214
1215 def checkvfs(path, mode=None):
1215 def checkvfs(path, mode=None):
1216 ret = origfunc(path, mode=mode)
1216 ret = origfunc(path, mode=mode)
1217 repo = rref()
1217 repo = rref()
1218 if (
1218 if (
1219 repo is None
1219 repo is None
1220 or not util.safehasattr(repo, b'_wlockref')
1220 or not util.safehasattr(repo, b'_wlockref')
1221 or not util.safehasattr(repo, b'_lockref')
1221 or not util.safehasattr(repo, b'_lockref')
1222 ):
1222 ):
1223 return
1223 return
1224 if mode in (None, b'r', b'rb'):
1224 if mode in (None, b'r', b'rb'):
1225 return
1225 return
1226 if path.startswith(repo.path):
1226 if path.startswith(repo.path):
1227 # truncate name relative to the repository (.hg)
1227 # truncate name relative to the repository (.hg)
1228 path = path[len(repo.path) + 1 :]
1228 path = path[len(repo.path) + 1 :]
1229 if path.startswith(b'cache/'):
1229 if path.startswith(b'cache/'):
1230 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1230 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1231 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1231 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1232 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1232 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1233 # journal is covered by 'lock'
1233 # journal is covered by 'lock'
1234 if repo._currentlock(repo._lockref) is None:
1234 if repo._currentlock(repo._lockref) is None:
1235 repo.ui.develwarn(
1235 repo.ui.develwarn(
1236 b'write with no lock: "%s"' % path,
1236 b'write with no lock: "%s"' % path,
1237 stacklevel=3,
1237 stacklevel=3,
1238 config=b'check-locks',
1238 config=b'check-locks',
1239 )
1239 )
1240 elif repo._currentlock(repo._wlockref) is None:
1240 elif repo._currentlock(repo._wlockref) is None:
1241 # rest of vfs files are covered by 'wlock'
1241 # rest of vfs files are covered by 'wlock'
1242 #
1242 #
1243 # exclude special files
1243 # exclude special files
1244 for prefix in self._wlockfreeprefix:
1244 for prefix in self._wlockfreeprefix:
1245 if path.startswith(prefix):
1245 if path.startswith(prefix):
1246 return
1246 return
1247 repo.ui.develwarn(
1247 repo.ui.develwarn(
1248 b'write with no wlock: "%s"' % path,
1248 b'write with no wlock: "%s"' % path,
1249 stacklevel=3,
1249 stacklevel=3,
1250 config=b'check-locks',
1250 config=b'check-locks',
1251 )
1251 )
1252 return ret
1252 return ret
1253
1253
1254 return checkvfs
1254 return checkvfs
1255
1255
1256 def _getsvfsward(self, origfunc):
1256 def _getsvfsward(self, origfunc):
1257 """build a ward for self.svfs"""
1257 """build a ward for self.svfs"""
1258 rref = weakref.ref(self)
1258 rref = weakref.ref(self)
1259
1259
1260 def checksvfs(path, mode=None):
1260 def checksvfs(path, mode=None):
1261 ret = origfunc(path, mode=mode)
1261 ret = origfunc(path, mode=mode)
1262 repo = rref()
1262 repo = rref()
1263 if repo is None or not util.safehasattr(repo, b'_lockref'):
1263 if repo is None or not util.safehasattr(repo, b'_lockref'):
1264 return
1264 return
1265 if mode in (None, b'r', b'rb'):
1265 if mode in (None, b'r', b'rb'):
1266 return
1266 return
1267 if path.startswith(repo.sharedpath):
1267 if path.startswith(repo.sharedpath):
1268 # truncate name relative to the repository (.hg)
1268 # truncate name relative to the repository (.hg)
1269 path = path[len(repo.sharedpath) + 1 :]
1269 path = path[len(repo.sharedpath) + 1 :]
1270 if repo._currentlock(repo._lockref) is None:
1270 if repo._currentlock(repo._lockref) is None:
1271 repo.ui.develwarn(
1271 repo.ui.develwarn(
1272 b'write with no lock: "%s"' % path, stacklevel=4
1272 b'write with no lock: "%s"' % path, stacklevel=4
1273 )
1273 )
1274 return ret
1274 return ret
1275
1275
1276 return checksvfs
1276 return checksvfs
1277
1277
1278 def close(self):
1278 def close(self):
1279 self._writecaches()
1279 self._writecaches()
1280
1280
1281 def _writecaches(self):
1281 def _writecaches(self):
1282 if self._revbranchcache:
1282 if self._revbranchcache:
1283 self._revbranchcache.write()
1283 self._revbranchcache.write()
1284
1284
1285 def _restrictcapabilities(self, caps):
1285 def _restrictcapabilities(self, caps):
1286 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1286 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1287 caps = set(caps)
1287 caps = set(caps)
1288 capsblob = bundle2.encodecaps(
1288 capsblob = bundle2.encodecaps(
1289 bundle2.getrepocaps(self, role=b'client')
1289 bundle2.getrepocaps(self, role=b'client')
1290 )
1290 )
1291 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1291 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1292 return caps
1292 return caps
1293
1293
1294 def _writerequirements(self):
1294 def _writerequirements(self):
1295 scmutil.writerequires(self.vfs, self.requirements)
1295 scmutil.writerequires(self.vfs, self.requirements)
1296
1296
1297 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1297 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1298 # self -> auditor -> self._checknested -> self
1298 # self -> auditor -> self._checknested -> self
1299
1299
1300 @property
1300 @property
1301 def auditor(self):
1301 def auditor(self):
1302 # This is only used by context.workingctx.match in order to
1302 # This is only used by context.workingctx.match in order to
1303 # detect files in subrepos.
1303 # detect files in subrepos.
1304 return pathutil.pathauditor(self.root, callback=self._checknested)
1304 return pathutil.pathauditor(self.root, callback=self._checknested)
1305
1305
1306 @property
1306 @property
1307 def nofsauditor(self):
1307 def nofsauditor(self):
1308 # This is only used by context.basectx.match in order to detect
1308 # This is only used by context.basectx.match in order to detect
1309 # files in subrepos.
1309 # files in subrepos.
1310 return pathutil.pathauditor(
1310 return pathutil.pathauditor(
1311 self.root, callback=self._checknested, realfs=False, cached=True
1311 self.root, callback=self._checknested, realfs=False, cached=True
1312 )
1312 )
1313
1313
1314 def _checknested(self, path):
1314 def _checknested(self, path):
1315 """Determine if path is a legal nested repository."""
1315 """Determine if path is a legal nested repository."""
1316 if not path.startswith(self.root):
1316 if not path.startswith(self.root):
1317 return False
1317 return False
1318 subpath = path[len(self.root) + 1 :]
1318 subpath = path[len(self.root) + 1 :]
1319 normsubpath = util.pconvert(subpath)
1319 normsubpath = util.pconvert(subpath)
1320
1320
1321 # XXX: Checking against the current working copy is wrong in
1321 # XXX: Checking against the current working copy is wrong in
1322 # the sense that it can reject things like
1322 # the sense that it can reject things like
1323 #
1323 #
1324 # $ hg cat -r 10 sub/x.txt
1324 # $ hg cat -r 10 sub/x.txt
1325 #
1325 #
1326 # if sub/ is no longer a subrepository in the working copy
1326 # if sub/ is no longer a subrepository in the working copy
1327 # parent revision.
1327 # parent revision.
1328 #
1328 #
1329 # However, it can of course also allow things that would have
1329 # However, it can of course also allow things that would have
1330 # been rejected before, such as the above cat command if sub/
1330 # been rejected before, such as the above cat command if sub/
1331 # is a subrepository now, but was a normal directory before.
1331 # is a subrepository now, but was a normal directory before.
1332 # The old path auditor would have rejected by mistake since it
1332 # The old path auditor would have rejected by mistake since it
1333 # panics when it sees sub/.hg/.
1333 # panics when it sees sub/.hg/.
1334 #
1334 #
1335 # All in all, checking against the working copy seems sensible
1335 # All in all, checking against the working copy seems sensible
1336 # since we want to prevent access to nested repositories on
1336 # since we want to prevent access to nested repositories on
1337 # the filesystem *now*.
1337 # the filesystem *now*.
1338 ctx = self[None]
1338 ctx = self[None]
1339 parts = util.splitpath(subpath)
1339 parts = util.splitpath(subpath)
1340 while parts:
1340 while parts:
1341 prefix = b'/'.join(parts)
1341 prefix = b'/'.join(parts)
1342 if prefix in ctx.substate:
1342 if prefix in ctx.substate:
1343 if prefix == normsubpath:
1343 if prefix == normsubpath:
1344 return True
1344 return True
1345 else:
1345 else:
1346 sub = ctx.sub(prefix)
1346 sub = ctx.sub(prefix)
1347 return sub.checknested(subpath[len(prefix) + 1 :])
1347 return sub.checknested(subpath[len(prefix) + 1 :])
1348 else:
1348 else:
1349 parts.pop()
1349 parts.pop()
1350 return False
1350 return False
1351
1351
1352 def peer(self):
1352 def peer(self):
1353 return localpeer(self) # not cached to avoid reference cycle
1353 return localpeer(self) # not cached to avoid reference cycle
1354
1354
1355 def unfiltered(self):
1355 def unfiltered(self):
1356 """Return unfiltered version of the repository
1356 """Return unfiltered version of the repository
1357
1357
1358 Intended to be overwritten by filtered repo."""
1358 Intended to be overwritten by filtered repo."""
1359 return self
1359 return self
1360
1360
1361 def filtered(self, name, visibilityexceptions=None):
1361 def filtered(self, name, visibilityexceptions=None):
1362 """Return a filtered version of a repository
1362 """Return a filtered version of a repository
1363
1363
1364 The `name` parameter is the identifier of the requested view. This
1364 The `name` parameter is the identifier of the requested view. This
1365 will return a repoview object set "exactly" to the specified view.
1365 will return a repoview object set "exactly" to the specified view.
1366
1366
1367 This function does not apply recursive filtering to a repository. For
1367 This function does not apply recursive filtering to a repository. For
1368 example calling `repo.filtered("served")` will return a repoview using
1368 example calling `repo.filtered("served")` will return a repoview using
1369 the "served" view, regardless of the initial view used by `repo`.
1369 the "served" view, regardless of the initial view used by `repo`.
1370
1370
1371 In other word, there is always only one level of `repoview` "filtering".
1371 In other word, there is always only one level of `repoview` "filtering".
1372 """
1372 """
1373 if self._extrafilterid is not None and b'%' not in name:
1373 if self._extrafilterid is not None and b'%' not in name:
1374 name = name + b'%' + self._extrafilterid
1374 name = name + b'%' + self._extrafilterid
1375
1375
1376 cls = repoview.newtype(self.unfiltered().__class__)
1376 cls = repoview.newtype(self.unfiltered().__class__)
1377 return cls(self, name, visibilityexceptions)
1377 return cls(self, name, visibilityexceptions)
1378
1378
1379 @mixedrepostorecache(
1379 @mixedrepostorecache(
1380 (b'bookmarks', b'plain'),
1380 (b'bookmarks', b'plain'),
1381 (b'bookmarks.current', b'plain'),
1381 (b'bookmarks.current', b'plain'),
1382 (b'bookmarks', b''),
1382 (b'bookmarks', b''),
1383 (b'00changelog.i', b''),
1383 (b'00changelog.i', b''),
1384 )
1384 )
1385 def _bookmarks(self):
1385 def _bookmarks(self):
1386 # Since the multiple files involved in the transaction cannot be
1386 # Since the multiple files involved in the transaction cannot be
1387 # written atomically (with current repository format), there is a race
1387 # written atomically (with current repository format), there is a race
1388 # condition here.
1388 # condition here.
1389 #
1389 #
1390 # 1) changelog content A is read
1390 # 1) changelog content A is read
1391 # 2) outside transaction update changelog to content B
1391 # 2) outside transaction update changelog to content B
1392 # 3) outside transaction update bookmark file referring to content B
1392 # 3) outside transaction update bookmark file referring to content B
1393 # 4) bookmarks file content is read and filtered against changelog-A
1393 # 4) bookmarks file content is read and filtered against changelog-A
1394 #
1394 #
1395 # When this happens, bookmarks against nodes missing from A are dropped.
1395 # When this happens, bookmarks against nodes missing from A are dropped.
1396 #
1396 #
1397 # Having this happening during read is not great, but it become worse
1397 # Having this happening during read is not great, but it become worse
1398 # when this happen during write because the bookmarks to the "unknown"
1398 # when this happen during write because the bookmarks to the "unknown"
1399 # nodes will be dropped for good. However, writes happen within locks.
1399 # nodes will be dropped for good. However, writes happen within locks.
1400 # This locking makes it possible to have a race free consistent read.
1400 # This locking makes it possible to have a race free consistent read.
1401 # For this purpose data read from disc before locking are
1401 # For this purpose data read from disc before locking are
1402 # "invalidated" right after the locks are taken. This invalidations are
1402 # "invalidated" right after the locks are taken. This invalidations are
1403 # "light", the `filecache` mechanism keep the data in memory and will
1403 # "light", the `filecache` mechanism keep the data in memory and will
1404 # reuse them if the underlying files did not changed. Not parsing the
1404 # reuse them if the underlying files did not changed. Not parsing the
1405 # same data multiple times helps performances.
1405 # same data multiple times helps performances.
1406 #
1406 #
1407 # Unfortunately in the case describe above, the files tracked by the
1407 # Unfortunately in the case describe above, the files tracked by the
1408 # bookmarks file cache might not have changed, but the in-memory
1408 # bookmarks file cache might not have changed, but the in-memory
1409 # content is still "wrong" because we used an older changelog content
1409 # content is still "wrong" because we used an older changelog content
1410 # to process the on-disk data. So after locking, the changelog would be
1410 # to process the on-disk data. So after locking, the changelog would be
1411 # refreshed but `_bookmarks` would be preserved.
1411 # refreshed but `_bookmarks` would be preserved.
1412 # Adding `00changelog.i` to the list of tracked file is not
1412 # Adding `00changelog.i` to the list of tracked file is not
1413 # enough, because at the time we build the content for `_bookmarks` in
1413 # enough, because at the time we build the content for `_bookmarks` in
1414 # (4), the changelog file has already diverged from the content used
1414 # (4), the changelog file has already diverged from the content used
1415 # for loading `changelog` in (1)
1415 # for loading `changelog` in (1)
1416 #
1416 #
1417 # To prevent the issue, we force the changelog to be explicitly
1417 # To prevent the issue, we force the changelog to be explicitly
1418 # reloaded while computing `_bookmarks`. The data race can still happen
1418 # reloaded while computing `_bookmarks`. The data race can still happen
1419 # without the lock (with a narrower window), but it would no longer go
1419 # without the lock (with a narrower window), but it would no longer go
1420 # undetected during the lock time refresh.
1420 # undetected during the lock time refresh.
1421 #
1421 #
1422 # The new schedule is as follow
1422 # The new schedule is as follow
1423 #
1423 #
1424 # 1) filecache logic detect that `_bookmarks` needs to be computed
1424 # 1) filecache logic detect that `_bookmarks` needs to be computed
1425 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1425 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1426 # 3) We force `changelog` filecache to be tested
1426 # 3) We force `changelog` filecache to be tested
1427 # 4) cachestat for `changelog` are captured (for changelog)
1427 # 4) cachestat for `changelog` are captured (for changelog)
1428 # 5) `_bookmarks` is computed and cached
1428 # 5) `_bookmarks` is computed and cached
1429 #
1429 #
1430 # The step in (3) ensure we have a changelog at least as recent as the
1430 # The step in (3) ensure we have a changelog at least as recent as the
1431 # cache stat computed in (1). As a result at locking time:
1431 # cache stat computed in (1). As a result at locking time:
1432 # * if the changelog did not changed since (1) -> we can reuse the data
1432 # * if the changelog did not changed since (1) -> we can reuse the data
1433 # * otherwise -> the bookmarks get refreshed.
1433 # * otherwise -> the bookmarks get refreshed.
1434 self._refreshchangelog()
1434 self._refreshchangelog()
1435 return bookmarks.bmstore(self)
1435 return bookmarks.bmstore(self)
1436
1436
1437 def _refreshchangelog(self):
1437 def _refreshchangelog(self):
1438 """make sure the in memory changelog match the on-disk one"""
1438 """make sure the in memory changelog match the on-disk one"""
1439 if 'changelog' in vars(self) and self.currenttransaction() is None:
1439 if 'changelog' in vars(self) and self.currenttransaction() is None:
1440 del self.changelog
1440 del self.changelog
1441
1441
1442 @property
1442 @property
1443 def _activebookmark(self):
1443 def _activebookmark(self):
1444 return self._bookmarks.active
1444 return self._bookmarks.active
1445
1445
1446 # _phasesets depend on changelog. what we need is to call
1446 # _phasesets depend on changelog. what we need is to call
1447 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1447 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1448 # can't be easily expressed in filecache mechanism.
1448 # can't be easily expressed in filecache mechanism.
1449 @storecache(b'phaseroots', b'00changelog.i')
1449 @storecache(b'phaseroots', b'00changelog.i')
1450 def _phasecache(self):
1450 def _phasecache(self):
1451 return phases.phasecache(self, self._phasedefaults)
1451 return phases.phasecache(self, self._phasedefaults)
1452
1452
1453 @storecache(b'obsstore')
1453 @storecache(b'obsstore')
1454 def obsstore(self):
1454 def obsstore(self):
1455 return obsolete.makestore(self.ui, self)
1455 return obsolete.makestore(self.ui, self)
1456
1456
1457 @storecache(b'00changelog.i')
1457 @storecache(b'00changelog.i')
1458 def changelog(self):
1458 def changelog(self):
1459 return self.store.changelog(txnutil.mayhavepending(self.root))
1459 return self.store.changelog(txnutil.mayhavepending(self.root))
1460
1460
1461 @storecache(b'00manifest.i')
1461 @storecache(b'00manifest.i')
1462 def manifestlog(self):
1462 def manifestlog(self):
1463 return self.store.manifestlog(self, self._storenarrowmatch)
1463 return self.store.manifestlog(self, self._storenarrowmatch)
1464
1464
1465 @repofilecache(b'dirstate')
1465 @repofilecache(b'dirstate')
1466 def dirstate(self):
1466 def dirstate(self):
1467 return self._makedirstate()
1467 return self._makedirstate()
1468
1468
1469 def _makedirstate(self):
1469 def _makedirstate(self):
1470 """Extension point for wrapping the dirstate per-repo."""
1470 """Extension point for wrapping the dirstate per-repo."""
1471 sparsematchfn = lambda: sparse.matcher(self)
1471 sparsematchfn = lambda: sparse.matcher(self)
1472
1472
1473 return dirstate.dirstate(
1473 return dirstate.dirstate(
1474 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1474 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1475 )
1475 )
1476
1476
1477 def _dirstatevalidate(self, node):
1477 def _dirstatevalidate(self, node):
1478 try:
1478 try:
1479 self.changelog.rev(node)
1479 self.changelog.rev(node)
1480 return node
1480 return node
1481 except error.LookupError:
1481 except error.LookupError:
1482 if not self._dirstatevalidatewarned:
1482 if not self._dirstatevalidatewarned:
1483 self._dirstatevalidatewarned = True
1483 self._dirstatevalidatewarned = True
1484 self.ui.warn(
1484 self.ui.warn(
1485 _(b"warning: ignoring unknown working parent %s!\n")
1485 _(b"warning: ignoring unknown working parent %s!\n")
1486 % short(node)
1486 % short(node)
1487 )
1487 )
1488 return nullid
1488 return nullid
1489
1489
1490 @storecache(narrowspec.FILENAME)
1490 @storecache(narrowspec.FILENAME)
1491 def narrowpats(self):
1491 def narrowpats(self):
1492 """matcher patterns for this repository's narrowspec
1492 """matcher patterns for this repository's narrowspec
1493
1493
1494 A tuple of (includes, excludes).
1494 A tuple of (includes, excludes).
1495 """
1495 """
1496 return narrowspec.load(self)
1496 return narrowspec.load(self)
1497
1497
1498 @storecache(narrowspec.FILENAME)
1498 @storecache(narrowspec.FILENAME)
1499 def _storenarrowmatch(self):
1499 def _storenarrowmatch(self):
1500 if repository.NARROW_REQUIREMENT not in self.requirements:
1500 if repository.NARROW_REQUIREMENT not in self.requirements:
1501 return matchmod.always()
1501 return matchmod.always()
1502 include, exclude = self.narrowpats
1502 include, exclude = self.narrowpats
1503 return narrowspec.match(self.root, include=include, exclude=exclude)
1503 return narrowspec.match(self.root, include=include, exclude=exclude)
1504
1504
1505 @storecache(narrowspec.FILENAME)
1505 @storecache(narrowspec.FILENAME)
1506 def _narrowmatch(self):
1506 def _narrowmatch(self):
1507 if repository.NARROW_REQUIREMENT not in self.requirements:
1507 if repository.NARROW_REQUIREMENT not in self.requirements:
1508 return matchmod.always()
1508 return matchmod.always()
1509 narrowspec.checkworkingcopynarrowspec(self)
1509 narrowspec.checkworkingcopynarrowspec(self)
1510 include, exclude = self.narrowpats
1510 include, exclude = self.narrowpats
1511 return narrowspec.match(self.root, include=include, exclude=exclude)
1511 return narrowspec.match(self.root, include=include, exclude=exclude)
1512
1512
1513 def narrowmatch(self, match=None, includeexact=False):
1513 def narrowmatch(self, match=None, includeexact=False):
1514 """matcher corresponding the the repo's narrowspec
1514 """matcher corresponding the the repo's narrowspec
1515
1515
1516 If `match` is given, then that will be intersected with the narrow
1516 If `match` is given, then that will be intersected with the narrow
1517 matcher.
1517 matcher.
1518
1518
1519 If `includeexact` is True, then any exact matches from `match` will
1519 If `includeexact` is True, then any exact matches from `match` will
1520 be included even if they're outside the narrowspec.
1520 be included even if they're outside the narrowspec.
1521 """
1521 """
1522 if match:
1522 if match:
1523 if includeexact and not self._narrowmatch.always():
1523 if includeexact and not self._narrowmatch.always():
1524 # do not exclude explicitly-specified paths so that they can
1524 # do not exclude explicitly-specified paths so that they can
1525 # be warned later on
1525 # be warned later on
1526 em = matchmod.exact(match.files())
1526 em = matchmod.exact(match.files())
1527 nm = matchmod.unionmatcher([self._narrowmatch, em])
1527 nm = matchmod.unionmatcher([self._narrowmatch, em])
1528 return matchmod.intersectmatchers(match, nm)
1528 return matchmod.intersectmatchers(match, nm)
1529 return matchmod.intersectmatchers(match, self._narrowmatch)
1529 return matchmod.intersectmatchers(match, self._narrowmatch)
1530 return self._narrowmatch
1530 return self._narrowmatch
1531
1531
1532 def setnarrowpats(self, newincludes, newexcludes):
1532 def setnarrowpats(self, newincludes, newexcludes):
1533 narrowspec.save(self, newincludes, newexcludes)
1533 narrowspec.save(self, newincludes, newexcludes)
1534 self.invalidate(clearfilecache=True)
1534 self.invalidate(clearfilecache=True)
1535
1535
1536 @unfilteredpropertycache
1536 @unfilteredpropertycache
1537 def _quick_access_changeid_null(self):
1537 def _quick_access_changeid_null(self):
1538 return {
1538 return {
1539 b'null': (nullrev, nullid),
1539 b'null': (nullrev, nullid),
1540 nullrev: (nullrev, nullid),
1540 nullrev: (nullrev, nullid),
1541 nullid: (nullrev, nullid),
1541 nullid: (nullrev, nullid),
1542 }
1542 }
1543
1543
1544 @unfilteredpropertycache
1544 @unfilteredpropertycache
1545 def _quick_access_changeid_wc(self):
1545 def _quick_access_changeid_wc(self):
1546 # also fast path access to the working copy parents
1546 # also fast path access to the working copy parents
1547 # however, only do it for filter that ensure wc is visible.
1547 # however, only do it for filter that ensure wc is visible.
1548 quick = {}
1548 quick = {}
1549 cl = self.unfiltered().changelog
1549 cl = self.unfiltered().changelog
1550 for node in self.dirstate.parents():
1550 for node in self.dirstate.parents():
1551 if node == nullid:
1551 if node == nullid:
1552 continue
1552 continue
1553 rev = cl.index.get_rev(node)
1553 rev = cl.index.get_rev(node)
1554 if rev is None:
1554 if rev is None:
1555 # unknown working copy parent case:
1555 # unknown working copy parent case:
1556 #
1556 #
1557 # skip the fast path and let higher code deal with it
1557 # skip the fast path and let higher code deal with it
1558 continue
1558 continue
1559 pair = (rev, node)
1559 pair = (rev, node)
1560 quick[rev] = pair
1560 quick[rev] = pair
1561 quick[node] = pair
1561 quick[node] = pair
1562 # also add the parents of the parents
1562 # also add the parents of the parents
1563 for r in cl.parentrevs(rev):
1563 for r in cl.parentrevs(rev):
1564 if r == nullrev:
1564 if r == nullrev:
1565 continue
1565 continue
1566 n = cl.node(r)
1566 n = cl.node(r)
1567 pair = (r, n)
1567 pair = (r, n)
1568 quick[r] = pair
1568 quick[r] = pair
1569 quick[n] = pair
1569 quick[n] = pair
1570 p1node = self.dirstate.p1()
1570 p1node = self.dirstate.p1()
1571 if p1node != nullid:
1571 if p1node != nullid:
1572 quick[b'.'] = quick[p1node]
1572 quick[b'.'] = quick[p1node]
1573 return quick
1573 return quick
1574
1574
1575 @unfilteredmethod
1575 @unfilteredmethod
1576 def _quick_access_changeid_invalidate(self):
1576 def _quick_access_changeid_invalidate(self):
1577 if '_quick_access_changeid_wc' in vars(self):
1577 if '_quick_access_changeid_wc' in vars(self):
1578 del self.__dict__['_quick_access_changeid_wc']
1578 del self.__dict__['_quick_access_changeid_wc']
1579
1579
1580 @property
1580 @property
1581 def _quick_access_changeid(self):
1581 def _quick_access_changeid(self):
1582 """an helper dictionnary for __getitem__ calls
1582 """an helper dictionnary for __getitem__ calls
1583
1583
1584 This contains a list of symbol we can recognise right away without
1584 This contains a list of symbol we can recognise right away without
1585 further processing.
1585 further processing.
1586 """
1586 """
1587 mapping = self._quick_access_changeid_null
1587 mapping = self._quick_access_changeid_null
1588 if self.filtername in repoview.filter_has_wc:
1588 if self.filtername in repoview.filter_has_wc:
1589 mapping = mapping.copy()
1589 mapping = mapping.copy()
1590 mapping.update(self._quick_access_changeid_wc)
1590 mapping.update(self._quick_access_changeid_wc)
1591 return mapping
1591 return mapping
1592
1592
1593 def __getitem__(self, changeid):
1593 def __getitem__(self, changeid):
1594 # dealing with special cases
1594 # dealing with special cases
1595 if changeid is None:
1595 if changeid is None:
1596 return context.workingctx(self)
1596 return context.workingctx(self)
1597 if isinstance(changeid, context.basectx):
1597 if isinstance(changeid, context.basectx):
1598 return changeid
1598 return changeid
1599
1599
1600 # dealing with multiple revisions
1600 # dealing with multiple revisions
1601 if isinstance(changeid, slice):
1601 if isinstance(changeid, slice):
1602 # wdirrev isn't contiguous so the slice shouldn't include it
1602 # wdirrev isn't contiguous so the slice shouldn't include it
1603 return [
1603 return [
1604 self[i]
1604 self[i]
1605 for i in pycompat.xrange(*changeid.indices(len(self)))
1605 for i in pycompat.xrange(*changeid.indices(len(self)))
1606 if i not in self.changelog.filteredrevs
1606 if i not in self.changelog.filteredrevs
1607 ]
1607 ]
1608
1608
1609 # dealing with some special values
1609 # dealing with some special values
1610 quick_access = self._quick_access_changeid.get(changeid)
1610 quick_access = self._quick_access_changeid.get(changeid)
1611 if quick_access is not None:
1611 if quick_access is not None:
1612 rev, node = quick_access
1612 rev, node = quick_access
1613 return context.changectx(self, rev, node, maybe_filtered=False)
1613 return context.changectx(self, rev, node, maybe_filtered=False)
1614 if changeid == b'tip':
1614 if changeid == b'tip':
1615 node = self.changelog.tip()
1615 node = self.changelog.tip()
1616 rev = self.changelog.rev(node)
1616 rev = self.changelog.rev(node)
1617 return context.changectx(self, rev, node)
1617 return context.changectx(self, rev, node)
1618
1618
1619 # dealing with arbitrary values
1619 # dealing with arbitrary values
1620 try:
1620 try:
1621 if isinstance(changeid, int):
1621 if isinstance(changeid, int):
1622 node = self.changelog.node(changeid)
1622 node = self.changelog.node(changeid)
1623 rev = changeid
1623 rev = changeid
1624 elif changeid == b'.':
1624 elif changeid == b'.':
1625 # this is a hack to delay/avoid loading obsmarkers
1625 # this is a hack to delay/avoid loading obsmarkers
1626 # when we know that '.' won't be hidden
1626 # when we know that '.' won't be hidden
1627 node = self.dirstate.p1()
1627 node = self.dirstate.p1()
1628 rev = self.unfiltered().changelog.rev(node)
1628 rev = self.unfiltered().changelog.rev(node)
1629 elif len(changeid) == 20:
1629 elif len(changeid) == 20:
1630 try:
1630 try:
1631 node = changeid
1631 node = changeid
1632 rev = self.changelog.rev(changeid)
1632 rev = self.changelog.rev(changeid)
1633 except error.FilteredLookupError:
1633 except error.FilteredLookupError:
1634 changeid = hex(changeid) # for the error message
1634 changeid = hex(changeid) # for the error message
1635 raise
1635 raise
1636 except LookupError:
1636 except LookupError:
1637 # check if it might have come from damaged dirstate
1637 # check if it might have come from damaged dirstate
1638 #
1638 #
1639 # XXX we could avoid the unfiltered if we had a recognizable
1639 # XXX we could avoid the unfiltered if we had a recognizable
1640 # exception for filtered changeset access
1640 # exception for filtered changeset access
1641 if (
1641 if (
1642 self.local()
1642 self.local()
1643 and changeid in self.unfiltered().dirstate.parents()
1643 and changeid in self.unfiltered().dirstate.parents()
1644 ):
1644 ):
1645 msg = _(b"working directory has unknown parent '%s'!")
1645 msg = _(b"working directory has unknown parent '%s'!")
1646 raise error.Abort(msg % short(changeid))
1646 raise error.Abort(msg % short(changeid))
1647 changeid = hex(changeid) # for the error message
1647 changeid = hex(changeid) # for the error message
1648 raise
1648 raise
1649
1649
1650 elif len(changeid) == 40:
1650 elif len(changeid) == 40:
1651 node = bin(changeid)
1651 node = bin(changeid)
1652 rev = self.changelog.rev(node)
1652 rev = self.changelog.rev(node)
1653 else:
1653 else:
1654 raise error.ProgrammingError(
1654 raise error.ProgrammingError(
1655 b"unsupported changeid '%s' of type %s"
1655 b"unsupported changeid '%s' of type %s"
1656 % (changeid, pycompat.bytestr(type(changeid)))
1656 % (changeid, pycompat.bytestr(type(changeid)))
1657 )
1657 )
1658
1658
1659 return context.changectx(self, rev, node)
1659 return context.changectx(self, rev, node)
1660
1660
1661 except (error.FilteredIndexError, error.FilteredLookupError):
1661 except (error.FilteredIndexError, error.FilteredLookupError):
1662 raise error.FilteredRepoLookupError(
1662 raise error.FilteredRepoLookupError(
1663 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1663 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1664 )
1664 )
1665 except (IndexError, LookupError):
1665 except (IndexError, LookupError):
1666 raise error.RepoLookupError(
1666 raise error.RepoLookupError(
1667 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1667 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1668 )
1668 )
1669 except error.WdirUnsupported:
1669 except error.WdirUnsupported:
1670 return context.workingctx(self)
1670 return context.workingctx(self)
1671
1671
1672 def __contains__(self, changeid):
1672 def __contains__(self, changeid):
1673 """True if the given changeid exists
1673 """True if the given changeid exists
1674
1674
1675 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1675 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1676 specified.
1676 specified.
1677 """
1677 """
1678 try:
1678 try:
1679 self[changeid]
1679 self[changeid]
1680 return True
1680 return True
1681 except error.RepoLookupError:
1681 except error.RepoLookupError:
1682 return False
1682 return False
1683
1683
1684 def __nonzero__(self):
1684 def __nonzero__(self):
1685 return True
1685 return True
1686
1686
1687 __bool__ = __nonzero__
1687 __bool__ = __nonzero__
1688
1688
1689 def __len__(self):
1689 def __len__(self):
1690 # no need to pay the cost of repoview.changelog
1690 # no need to pay the cost of repoview.changelog
1691 unfi = self.unfiltered()
1691 unfi = self.unfiltered()
1692 return len(unfi.changelog)
1692 return len(unfi.changelog)
1693
1693
1694 def __iter__(self):
1694 def __iter__(self):
1695 return iter(self.changelog)
1695 return iter(self.changelog)
1696
1696
1697 def revs(self, expr, *args):
1697 def revs(self, expr, *args):
1698 '''Find revisions matching a revset.
1698 '''Find revisions matching a revset.
1699
1699
1700 The revset is specified as a string ``expr`` that may contain
1700 The revset is specified as a string ``expr`` that may contain
1701 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1701 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1702
1702
1703 Revset aliases from the configuration are not expanded. To expand
1703 Revset aliases from the configuration are not expanded. To expand
1704 user aliases, consider calling ``scmutil.revrange()`` or
1704 user aliases, consider calling ``scmutil.revrange()`` or
1705 ``repo.anyrevs([expr], user=True)``.
1705 ``repo.anyrevs([expr], user=True)``.
1706
1706
1707 Returns a smartset.abstractsmartset, which is a list-like interface
1707 Returns a smartset.abstractsmartset, which is a list-like interface
1708 that contains integer revisions.
1708 that contains integer revisions.
1709 '''
1709 '''
1710 tree = revsetlang.spectree(expr, *args)
1710 tree = revsetlang.spectree(expr, *args)
1711 return revset.makematcher(tree)(self)
1711 return revset.makematcher(tree)(self)
1712
1712
1713 def set(self, expr, *args):
1713 def set(self, expr, *args):
1714 '''Find revisions matching a revset and emit changectx instances.
1714 '''Find revisions matching a revset and emit changectx instances.
1715
1715
1716 This is a convenience wrapper around ``revs()`` that iterates the
1716 This is a convenience wrapper around ``revs()`` that iterates the
1717 result and is a generator of changectx instances.
1717 result and is a generator of changectx instances.
1718
1718
1719 Revset aliases from the configuration are not expanded. To expand
1719 Revset aliases from the configuration are not expanded. To expand
1720 user aliases, consider calling ``scmutil.revrange()``.
1720 user aliases, consider calling ``scmutil.revrange()``.
1721 '''
1721 '''
1722 for r in self.revs(expr, *args):
1722 for r in self.revs(expr, *args):
1723 yield self[r]
1723 yield self[r]
1724
1724
1725 def anyrevs(self, specs, user=False, localalias=None):
1725 def anyrevs(self, specs, user=False, localalias=None):
1726 '''Find revisions matching one of the given revsets.
1726 '''Find revisions matching one of the given revsets.
1727
1727
1728 Revset aliases from the configuration are not expanded by default. To
1728 Revset aliases from the configuration are not expanded by default. To
1729 expand user aliases, specify ``user=True``. To provide some local
1729 expand user aliases, specify ``user=True``. To provide some local
1730 definitions overriding user aliases, set ``localalias`` to
1730 definitions overriding user aliases, set ``localalias`` to
1731 ``{name: definitionstring}``.
1731 ``{name: definitionstring}``.
1732 '''
1732 '''
1733 if specs == [b'null']:
1733 if specs == [b'null']:
1734 return revset.baseset([nullrev])
1734 return revset.baseset([nullrev])
1735 if specs == [b'.']:
1735 if specs == [b'.']:
1736 quick_data = self._quick_access_changeid.get(b'.')
1736 quick_data = self._quick_access_changeid.get(b'.')
1737 if quick_data is not None:
1737 if quick_data is not None:
1738 return revset.baseset([quick_data[0]])
1738 return revset.baseset([quick_data[0]])
1739 if user:
1739 if user:
1740 m = revset.matchany(
1740 m = revset.matchany(
1741 self.ui,
1741 self.ui,
1742 specs,
1742 specs,
1743 lookup=revset.lookupfn(self),
1743 lookup=revset.lookupfn(self),
1744 localalias=localalias,
1744 localalias=localalias,
1745 )
1745 )
1746 else:
1746 else:
1747 m = revset.matchany(None, specs, localalias=localalias)
1747 m = revset.matchany(None, specs, localalias=localalias)
1748 return m(self)
1748 return m(self)
1749
1749
1750 def url(self):
1750 def url(self):
1751 return b'file:' + self.root
1751 return b'file:' + self.root
1752
1752
1753 def hook(self, name, throw=False, **args):
1753 def hook(self, name, throw=False, **args):
1754 """Call a hook, passing this repo instance.
1754 """Call a hook, passing this repo instance.
1755
1755
1756 This a convenience method to aid invoking hooks. Extensions likely
1756 This a convenience method to aid invoking hooks. Extensions likely
1757 won't call this unless they have registered a custom hook or are
1757 won't call this unless they have registered a custom hook or are
1758 replacing code that is expected to call a hook.
1758 replacing code that is expected to call a hook.
1759 """
1759 """
1760 return hook.hook(self.ui, self, name, throw, **args)
1760 return hook.hook(self.ui, self, name, throw, **args)
1761
1761
1762 @filteredpropertycache
1762 @filteredpropertycache
1763 def _tagscache(self):
1763 def _tagscache(self):
1764 '''Returns a tagscache object that contains various tags related
1764 '''Returns a tagscache object that contains various tags related
1765 caches.'''
1765 caches.'''
1766
1766
1767 # This simplifies its cache management by having one decorated
1767 # This simplifies its cache management by having one decorated
1768 # function (this one) and the rest simply fetch things from it.
1768 # function (this one) and the rest simply fetch things from it.
1769 class tagscache(object):
1769 class tagscache(object):
1770 def __init__(self):
1770 def __init__(self):
1771 # These two define the set of tags for this repository. tags
1771 # These two define the set of tags for this repository. tags
1772 # maps tag name to node; tagtypes maps tag name to 'global' or
1772 # maps tag name to node; tagtypes maps tag name to 'global' or
1773 # 'local'. (Global tags are defined by .hgtags across all
1773 # 'local'. (Global tags are defined by .hgtags across all
1774 # heads, and local tags are defined in .hg/localtags.)
1774 # heads, and local tags are defined in .hg/localtags.)
1775 # They constitute the in-memory cache of tags.
1775 # They constitute the in-memory cache of tags.
1776 self.tags = self.tagtypes = None
1776 self.tags = self.tagtypes = None
1777
1777
1778 self.nodetagscache = self.tagslist = None
1778 self.nodetagscache = self.tagslist = None
1779
1779
1780 cache = tagscache()
1780 cache = tagscache()
1781 cache.tags, cache.tagtypes = self._findtags()
1781 cache.tags, cache.tagtypes = self._findtags()
1782
1782
1783 return cache
1783 return cache
1784
1784
1785 def tags(self):
1785 def tags(self):
1786 '''return a mapping of tag to node'''
1786 '''return a mapping of tag to node'''
1787 t = {}
1787 t = {}
1788 if self.changelog.filteredrevs:
1788 if self.changelog.filteredrevs:
1789 tags, tt = self._findtags()
1789 tags, tt = self._findtags()
1790 else:
1790 else:
1791 tags = self._tagscache.tags
1791 tags = self._tagscache.tags
1792 rev = self.changelog.rev
1792 rev = self.changelog.rev
1793 for k, v in pycompat.iteritems(tags):
1793 for k, v in pycompat.iteritems(tags):
1794 try:
1794 try:
1795 # ignore tags to unknown nodes
1795 # ignore tags to unknown nodes
1796 rev(v)
1796 rev(v)
1797 t[k] = v
1797 t[k] = v
1798 except (error.LookupError, ValueError):
1798 except (error.LookupError, ValueError):
1799 pass
1799 pass
1800 return t
1800 return t
1801
1801
1802 def _findtags(self):
1802 def _findtags(self):
1803 '''Do the hard work of finding tags. Return a pair of dicts
1803 '''Do the hard work of finding tags. Return a pair of dicts
1804 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1804 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1805 maps tag name to a string like \'global\' or \'local\'.
1805 maps tag name to a string like \'global\' or \'local\'.
1806 Subclasses or extensions are free to add their own tags, but
1806 Subclasses or extensions are free to add their own tags, but
1807 should be aware that the returned dicts will be retained for the
1807 should be aware that the returned dicts will be retained for the
1808 duration of the localrepo object.'''
1808 duration of the localrepo object.'''
1809
1809
1810 # XXX what tagtype should subclasses/extensions use? Currently
1810 # XXX what tagtype should subclasses/extensions use? Currently
1811 # mq and bookmarks add tags, but do not set the tagtype at all.
1811 # mq and bookmarks add tags, but do not set the tagtype at all.
1812 # Should each extension invent its own tag type? Should there
1812 # Should each extension invent its own tag type? Should there
1813 # be one tagtype for all such "virtual" tags? Or is the status
1813 # be one tagtype for all such "virtual" tags? Or is the status
1814 # quo fine?
1814 # quo fine?
1815
1815
1816 # map tag name to (node, hist)
1816 # map tag name to (node, hist)
1817 alltags = tagsmod.findglobaltags(self.ui, self)
1817 alltags = tagsmod.findglobaltags(self.ui, self)
1818 # map tag name to tag type
1818 # map tag name to tag type
1819 tagtypes = {tag: b'global' for tag in alltags}
1819 tagtypes = {tag: b'global' for tag in alltags}
1820
1820
1821 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1821 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1822
1822
1823 # Build the return dicts. Have to re-encode tag names because
1823 # Build the return dicts. Have to re-encode tag names because
1824 # the tags module always uses UTF-8 (in order not to lose info
1824 # the tags module always uses UTF-8 (in order not to lose info
1825 # writing to the cache), but the rest of Mercurial wants them in
1825 # writing to the cache), but the rest of Mercurial wants them in
1826 # local encoding.
1826 # local encoding.
1827 tags = {}
1827 tags = {}
1828 for (name, (node, hist)) in pycompat.iteritems(alltags):
1828 for (name, (node, hist)) in pycompat.iteritems(alltags):
1829 if node != nullid:
1829 if node != nullid:
1830 tags[encoding.tolocal(name)] = node
1830 tags[encoding.tolocal(name)] = node
1831 tags[b'tip'] = self.changelog.tip()
1831 tags[b'tip'] = self.changelog.tip()
1832 tagtypes = {
1832 tagtypes = {
1833 encoding.tolocal(name): value
1833 encoding.tolocal(name): value
1834 for (name, value) in pycompat.iteritems(tagtypes)
1834 for (name, value) in pycompat.iteritems(tagtypes)
1835 }
1835 }
1836 return (tags, tagtypes)
1836 return (tags, tagtypes)
1837
1837
1838 def tagtype(self, tagname):
1838 def tagtype(self, tagname):
1839 '''
1839 '''
1840 return the type of the given tag. result can be:
1840 return the type of the given tag. result can be:
1841
1841
1842 'local' : a local tag
1842 'local' : a local tag
1843 'global' : a global tag
1843 'global' : a global tag
1844 None : tag does not exist
1844 None : tag does not exist
1845 '''
1845 '''
1846
1846
1847 return self._tagscache.tagtypes.get(tagname)
1847 return self._tagscache.tagtypes.get(tagname)
1848
1848
1849 def tagslist(self):
1849 def tagslist(self):
1850 '''return a list of tags ordered by revision'''
1850 '''return a list of tags ordered by revision'''
1851 if not self._tagscache.tagslist:
1851 if not self._tagscache.tagslist:
1852 l = []
1852 l = []
1853 for t, n in pycompat.iteritems(self.tags()):
1853 for t, n in pycompat.iteritems(self.tags()):
1854 l.append((self.changelog.rev(n), t, n))
1854 l.append((self.changelog.rev(n), t, n))
1855 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1855 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1856
1856
1857 return self._tagscache.tagslist
1857 return self._tagscache.tagslist
1858
1858
1859 def nodetags(self, node):
1859 def nodetags(self, node):
1860 '''return the tags associated with a node'''
1860 '''return the tags associated with a node'''
1861 if not self._tagscache.nodetagscache:
1861 if not self._tagscache.nodetagscache:
1862 nodetagscache = {}
1862 nodetagscache = {}
1863 for t, n in pycompat.iteritems(self._tagscache.tags):
1863 for t, n in pycompat.iteritems(self._tagscache.tags):
1864 nodetagscache.setdefault(n, []).append(t)
1864 nodetagscache.setdefault(n, []).append(t)
1865 for tags in pycompat.itervalues(nodetagscache):
1865 for tags in pycompat.itervalues(nodetagscache):
1866 tags.sort()
1866 tags.sort()
1867 self._tagscache.nodetagscache = nodetagscache
1867 self._tagscache.nodetagscache = nodetagscache
1868 return self._tagscache.nodetagscache.get(node, [])
1868 return self._tagscache.nodetagscache.get(node, [])
1869
1869
1870 def nodebookmarks(self, node):
1870 def nodebookmarks(self, node):
1871 """return the list of bookmarks pointing to the specified node"""
1871 """return the list of bookmarks pointing to the specified node"""
1872 return self._bookmarks.names(node)
1872 return self._bookmarks.names(node)
1873
1873
1874 def branchmap(self):
1874 def branchmap(self):
1875 '''returns a dictionary {branch: [branchheads]} with branchheads
1875 '''returns a dictionary {branch: [branchheads]} with branchheads
1876 ordered by increasing revision number'''
1876 ordered by increasing revision number'''
1877 return self._branchcaches[self]
1877 return self._branchcaches[self]
1878
1878
1879 @unfilteredmethod
1879 @unfilteredmethod
1880 def revbranchcache(self):
1880 def revbranchcache(self):
1881 if not self._revbranchcache:
1881 if not self._revbranchcache:
1882 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1882 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1883 return self._revbranchcache
1883 return self._revbranchcache
1884
1884
1885 def branchtip(self, branch, ignoremissing=False):
1885 def branchtip(self, branch, ignoremissing=False):
1886 '''return the tip node for a given branch
1886 '''return the tip node for a given branch
1887
1887
1888 If ignoremissing is True, then this method will not raise an error.
1888 If ignoremissing is True, then this method will not raise an error.
1889 This is helpful for callers that only expect None for a missing branch
1889 This is helpful for callers that only expect None for a missing branch
1890 (e.g. namespace).
1890 (e.g. namespace).
1891
1891
1892 '''
1892 '''
1893 try:
1893 try:
1894 return self.branchmap().branchtip(branch)
1894 return self.branchmap().branchtip(branch)
1895 except KeyError:
1895 except KeyError:
1896 if not ignoremissing:
1896 if not ignoremissing:
1897 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1897 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1898 else:
1898 else:
1899 pass
1899 pass
1900
1900
1901 def lookup(self, key):
1901 def lookup(self, key):
1902 node = scmutil.revsymbol(self, key).node()
1902 node = scmutil.revsymbol(self, key).node()
1903 if node is None:
1903 if node is None:
1904 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1904 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1905 return node
1905 return node
1906
1906
1907 def lookupbranch(self, key):
1907 def lookupbranch(self, key):
1908 if self.branchmap().hasbranch(key):
1908 if self.branchmap().hasbranch(key):
1909 return key
1909 return key
1910
1910
1911 return scmutil.revsymbol(self, key).branch()
1911 return scmutil.revsymbol(self, key).branch()
1912
1912
1913 def known(self, nodes):
1913 def known(self, nodes):
1914 cl = self.changelog
1914 cl = self.changelog
1915 get_rev = cl.index.get_rev
1915 get_rev = cl.index.get_rev
1916 filtered = cl.filteredrevs
1916 filtered = cl.filteredrevs
1917 result = []
1917 result = []
1918 for n in nodes:
1918 for n in nodes:
1919 r = get_rev(n)
1919 r = get_rev(n)
1920 resp = not (r is None or r in filtered)
1920 resp = not (r is None or r in filtered)
1921 result.append(resp)
1921 result.append(resp)
1922 return result
1922 return result
1923
1923
1924 def local(self):
1924 def local(self):
1925 return self
1925 return self
1926
1926
1927 def publishing(self):
1927 def publishing(self):
1928 # it's safe (and desirable) to trust the publish flag unconditionally
1928 # it's safe (and desirable) to trust the publish flag unconditionally
1929 # so that we don't finalize changes shared between users via ssh or nfs
1929 # so that we don't finalize changes shared between users via ssh or nfs
1930 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1930 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1931
1931
1932 def cancopy(self):
1932 def cancopy(self):
1933 # so statichttprepo's override of local() works
1933 # so statichttprepo's override of local() works
1934 if not self.local():
1934 if not self.local():
1935 return False
1935 return False
1936 if not self.publishing():
1936 if not self.publishing():
1937 return True
1937 return True
1938 # if publishing we can't copy if there is filtered content
1938 # if publishing we can't copy if there is filtered content
1939 return not self.filtered(b'visible').changelog.filteredrevs
1939 return not self.filtered(b'visible').changelog.filteredrevs
1940
1940
1941 def shared(self):
1941 def shared(self):
1942 '''the type of shared repository (None if not shared)'''
1942 '''the type of shared repository (None if not shared)'''
1943 if self.sharedpath != self.path:
1943 if self.sharedpath != self.path:
1944 return b'store'
1944 return b'store'
1945 return None
1945 return None
1946
1946
1947 def wjoin(self, f, *insidef):
1947 def wjoin(self, f, *insidef):
1948 return self.vfs.reljoin(self.root, f, *insidef)
1948 return self.vfs.reljoin(self.root, f, *insidef)
1949
1949
1950 def setparents(self, p1, p2=nullid):
1950 def setparents(self, p1, p2=nullid):
1951 self[None].setparents(p1, p2)
1951 self[None].setparents(p1, p2)
1952 self._quick_access_changeid_invalidate()
1952 self._quick_access_changeid_invalidate()
1953
1953
1954 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1954 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1955 """changeid must be a changeset revision, if specified.
1955 """changeid must be a changeset revision, if specified.
1956 fileid can be a file revision or node."""
1956 fileid can be a file revision or node."""
1957 return context.filectx(
1957 return context.filectx(
1958 self, path, changeid, fileid, changectx=changectx
1958 self, path, changeid, fileid, changectx=changectx
1959 )
1959 )
1960
1960
1961 def getcwd(self):
1961 def getcwd(self):
1962 return self.dirstate.getcwd()
1962 return self.dirstate.getcwd()
1963
1963
1964 def pathto(self, f, cwd=None):
1964 def pathto(self, f, cwd=None):
1965 return self.dirstate.pathto(f, cwd)
1965 return self.dirstate.pathto(f, cwd)
1966
1966
1967 def _loadfilter(self, filter):
1967 def _loadfilter(self, filter):
1968 if filter not in self._filterpats:
1968 if filter not in self._filterpats:
1969 l = []
1969 l = []
1970 for pat, cmd in self.ui.configitems(filter):
1970 for pat, cmd in self.ui.configitems(filter):
1971 if cmd == b'!':
1971 if cmd == b'!':
1972 continue
1972 continue
1973 mf = matchmod.match(self.root, b'', [pat])
1973 mf = matchmod.match(self.root, b'', [pat])
1974 fn = None
1974 fn = None
1975 params = cmd
1975 params = cmd
1976 for name, filterfn in pycompat.iteritems(self._datafilters):
1976 for name, filterfn in pycompat.iteritems(self._datafilters):
1977 if cmd.startswith(name):
1977 if cmd.startswith(name):
1978 fn = filterfn
1978 fn = filterfn
1979 params = cmd[len(name) :].lstrip()
1979 params = cmd[len(name) :].lstrip()
1980 break
1980 break
1981 if not fn:
1981 if not fn:
1982 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1982 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1983 fn.__name__ = 'commandfilter'
1983 fn.__name__ = 'commandfilter'
1984 # Wrap old filters not supporting keyword arguments
1984 # Wrap old filters not supporting keyword arguments
1985 if not pycompat.getargspec(fn)[2]:
1985 if not pycompat.getargspec(fn)[2]:
1986 oldfn = fn
1986 oldfn = fn
1987 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1987 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1988 fn.__name__ = 'compat-' + oldfn.__name__
1988 fn.__name__ = 'compat-' + oldfn.__name__
1989 l.append((mf, fn, params))
1989 l.append((mf, fn, params))
1990 self._filterpats[filter] = l
1990 self._filterpats[filter] = l
1991 return self._filterpats[filter]
1991 return self._filterpats[filter]
1992
1992
1993 def _filter(self, filterpats, filename, data):
1993 def _filter(self, filterpats, filename, data):
1994 for mf, fn, cmd in filterpats:
1994 for mf, fn, cmd in filterpats:
1995 if mf(filename):
1995 if mf(filename):
1996 self.ui.debug(
1996 self.ui.debug(
1997 b"filtering %s through %s\n"
1997 b"filtering %s through %s\n"
1998 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1998 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1999 )
1999 )
2000 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2000 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2001 break
2001 break
2002
2002
2003 return data
2003 return data
2004
2004
2005 @unfilteredpropertycache
2005 @unfilteredpropertycache
2006 def _encodefilterpats(self):
2006 def _encodefilterpats(self):
2007 return self._loadfilter(b'encode')
2007 return self._loadfilter(b'encode')
2008
2008
2009 @unfilteredpropertycache
2009 @unfilteredpropertycache
2010 def _decodefilterpats(self):
2010 def _decodefilterpats(self):
2011 return self._loadfilter(b'decode')
2011 return self._loadfilter(b'decode')
2012
2012
2013 def adddatafilter(self, name, filter):
2013 def adddatafilter(self, name, filter):
2014 self._datafilters[name] = filter
2014 self._datafilters[name] = filter
2015
2015
2016 def wread(self, filename):
2016 def wread(self, filename):
2017 if self.wvfs.islink(filename):
2017 if self.wvfs.islink(filename):
2018 data = self.wvfs.readlink(filename)
2018 data = self.wvfs.readlink(filename)
2019 else:
2019 else:
2020 data = self.wvfs.read(filename)
2020 data = self.wvfs.read(filename)
2021 return self._filter(self._encodefilterpats, filename, data)
2021 return self._filter(self._encodefilterpats, filename, data)
2022
2022
2023 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2023 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2024 """write ``data`` into ``filename`` in the working directory
2024 """write ``data`` into ``filename`` in the working directory
2025
2025
2026 This returns length of written (maybe decoded) data.
2026 This returns length of written (maybe decoded) data.
2027 """
2027 """
2028 data = self._filter(self._decodefilterpats, filename, data)
2028 data = self._filter(self._decodefilterpats, filename, data)
2029 if b'l' in flags:
2029 if b'l' in flags:
2030 self.wvfs.symlink(data, filename)
2030 self.wvfs.symlink(data, filename)
2031 else:
2031 else:
2032 self.wvfs.write(
2032 self.wvfs.write(
2033 filename, data, backgroundclose=backgroundclose, **kwargs
2033 filename, data, backgroundclose=backgroundclose, **kwargs
2034 )
2034 )
2035 if b'x' in flags:
2035 if b'x' in flags:
2036 self.wvfs.setflags(filename, False, True)
2036 self.wvfs.setflags(filename, False, True)
2037 else:
2037 else:
2038 self.wvfs.setflags(filename, False, False)
2038 self.wvfs.setflags(filename, False, False)
2039 return len(data)
2039 return len(data)
2040
2040
2041 def wwritedata(self, filename, data):
2041 def wwritedata(self, filename, data):
2042 return self._filter(self._decodefilterpats, filename, data)
2042 return self._filter(self._decodefilterpats, filename, data)
2043
2043
2044 def currenttransaction(self):
2044 def currenttransaction(self):
2045 """return the current transaction or None if non exists"""
2045 """return the current transaction or None if non exists"""
2046 if self._transref:
2046 if self._transref:
2047 tr = self._transref()
2047 tr = self._transref()
2048 else:
2048 else:
2049 tr = None
2049 tr = None
2050
2050
2051 if tr and tr.running():
2051 if tr and tr.running():
2052 return tr
2052 return tr
2053 return None
2053 return None
2054
2054
2055 def transaction(self, desc, report=None):
2055 def transaction(self, desc, report=None):
2056 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2056 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2057 b'devel', b'check-locks'
2057 b'devel', b'check-locks'
2058 ):
2058 ):
2059 if self._currentlock(self._lockref) is None:
2059 if self._currentlock(self._lockref) is None:
2060 raise error.ProgrammingError(b'transaction requires locking')
2060 raise error.ProgrammingError(b'transaction requires locking')
2061 tr = self.currenttransaction()
2061 tr = self.currenttransaction()
2062 if tr is not None:
2062 if tr is not None:
2063 return tr.nest(name=desc)
2063 return tr.nest(name=desc)
2064
2064
2065 # abort here if the journal already exists
2065 # abort here if the journal already exists
2066 if self.svfs.exists(b"journal"):
2066 if self.svfs.exists(b"journal"):
2067 raise error.RepoError(
2067 raise error.RepoError(
2068 _(b"abandoned transaction found"),
2068 _(b"abandoned transaction found"),
2069 hint=_(b"run 'hg recover' to clean up transaction"),
2069 hint=_(b"run 'hg recover' to clean up transaction"),
2070 )
2070 )
2071
2071
2072 idbase = b"%.40f#%f" % (random.random(), time.time())
2072 idbase = b"%.40f#%f" % (random.random(), time.time())
2073 ha = hex(hashutil.sha1(idbase).digest())
2073 ha = hex(hashutil.sha1(idbase).digest())
2074 txnid = b'TXN:' + ha
2074 txnid = b'TXN:' + ha
2075 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2075 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2076
2076
2077 self._writejournal(desc)
2077 self._writejournal(desc)
2078 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2078 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2079 if report:
2079 if report:
2080 rp = report
2080 rp = report
2081 else:
2081 else:
2082 rp = self.ui.warn
2082 rp = self.ui.warn
2083 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2083 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2084 # we must avoid cyclic reference between repo and transaction.
2084 # we must avoid cyclic reference between repo and transaction.
2085 reporef = weakref.ref(self)
2085 reporef = weakref.ref(self)
2086 # Code to track tag movement
2086 # Code to track tag movement
2087 #
2087 #
2088 # Since tags are all handled as file content, it is actually quite hard
2088 # Since tags are all handled as file content, it is actually quite hard
2089 # to track these movement from a code perspective. So we fallback to a
2089 # to track these movement from a code perspective. So we fallback to a
2090 # tracking at the repository level. One could envision to track changes
2090 # tracking at the repository level. One could envision to track changes
2091 # to the '.hgtags' file through changegroup apply but that fails to
2091 # to the '.hgtags' file through changegroup apply but that fails to
2092 # cope with case where transaction expose new heads without changegroup
2092 # cope with case where transaction expose new heads without changegroup
2093 # being involved (eg: phase movement).
2093 # being involved (eg: phase movement).
2094 #
2094 #
2095 # For now, We gate the feature behind a flag since this likely comes
2095 # For now, We gate the feature behind a flag since this likely comes
2096 # with performance impacts. The current code run more often than needed
2096 # with performance impacts. The current code run more often than needed
2097 # and do not use caches as much as it could. The current focus is on
2097 # and do not use caches as much as it could. The current focus is on
2098 # the behavior of the feature so we disable it by default. The flag
2098 # the behavior of the feature so we disable it by default. The flag
2099 # will be removed when we are happy with the performance impact.
2099 # will be removed when we are happy with the performance impact.
2100 #
2100 #
2101 # Once this feature is no longer experimental move the following
2101 # Once this feature is no longer experimental move the following
2102 # documentation to the appropriate help section:
2102 # documentation to the appropriate help section:
2103 #
2103 #
2104 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2104 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2105 # tags (new or changed or deleted tags). In addition the details of
2105 # tags (new or changed or deleted tags). In addition the details of
2106 # these changes are made available in a file at:
2106 # these changes are made available in a file at:
2107 # ``REPOROOT/.hg/changes/tags.changes``.
2107 # ``REPOROOT/.hg/changes/tags.changes``.
2108 # Make sure you check for HG_TAG_MOVED before reading that file as it
2108 # Make sure you check for HG_TAG_MOVED before reading that file as it
2109 # might exist from a previous transaction even if no tag were touched
2109 # might exist from a previous transaction even if no tag were touched
2110 # in this one. Changes are recorded in a line base format::
2110 # in this one. Changes are recorded in a line base format::
2111 #
2111 #
2112 # <action> <hex-node> <tag-name>\n
2112 # <action> <hex-node> <tag-name>\n
2113 #
2113 #
2114 # Actions are defined as follow:
2114 # Actions are defined as follow:
2115 # "-R": tag is removed,
2115 # "-R": tag is removed,
2116 # "+A": tag is added,
2116 # "+A": tag is added,
2117 # "-M": tag is moved (old value),
2117 # "-M": tag is moved (old value),
2118 # "+M": tag is moved (new value),
2118 # "+M": tag is moved (new value),
2119 tracktags = lambda x: None
2119 tracktags = lambda x: None
2120 # experimental config: experimental.hook-track-tags
2120 # experimental config: experimental.hook-track-tags
2121 shouldtracktags = self.ui.configbool(
2121 shouldtracktags = self.ui.configbool(
2122 b'experimental', b'hook-track-tags'
2122 b'experimental', b'hook-track-tags'
2123 )
2123 )
2124 if desc != b'strip' and shouldtracktags:
2124 if desc != b'strip' and shouldtracktags:
2125 oldheads = self.changelog.headrevs()
2125 oldheads = self.changelog.headrevs()
2126
2126
2127 def tracktags(tr2):
2127 def tracktags(tr2):
2128 repo = reporef()
2128 repo = reporef()
2129 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2129 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2130 newheads = repo.changelog.headrevs()
2130 newheads = repo.changelog.headrevs()
2131 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2131 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2132 # notes: we compare lists here.
2132 # notes: we compare lists here.
2133 # As we do it only once buiding set would not be cheaper
2133 # As we do it only once buiding set would not be cheaper
2134 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2134 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2135 if changes:
2135 if changes:
2136 tr2.hookargs[b'tag_moved'] = b'1'
2136 tr2.hookargs[b'tag_moved'] = b'1'
2137 with repo.vfs(
2137 with repo.vfs(
2138 b'changes/tags.changes', b'w', atomictemp=True
2138 b'changes/tags.changes', b'w', atomictemp=True
2139 ) as changesfile:
2139 ) as changesfile:
2140 # note: we do not register the file to the transaction
2140 # note: we do not register the file to the transaction
2141 # because we needs it to still exist on the transaction
2141 # because we needs it to still exist on the transaction
2142 # is close (for txnclose hooks)
2142 # is close (for txnclose hooks)
2143 tagsmod.writediff(changesfile, changes)
2143 tagsmod.writediff(changesfile, changes)
2144
2144
2145 def validate(tr2):
2145 def validate(tr2):
2146 """will run pre-closing hooks"""
2146 """will run pre-closing hooks"""
2147 # XXX the transaction API is a bit lacking here so we take a hacky
2147 # XXX the transaction API is a bit lacking here so we take a hacky
2148 # path for now
2148 # path for now
2149 #
2149 #
2150 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2150 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2151 # dict is copied before these run. In addition we needs the data
2151 # dict is copied before these run. In addition we needs the data
2152 # available to in memory hooks too.
2152 # available to in memory hooks too.
2153 #
2153 #
2154 # Moreover, we also need to make sure this runs before txnclose
2154 # Moreover, we also need to make sure this runs before txnclose
2155 # hooks and there is no "pending" mechanism that would execute
2155 # hooks and there is no "pending" mechanism that would execute
2156 # logic only if hooks are about to run.
2156 # logic only if hooks are about to run.
2157 #
2157 #
2158 # Fixing this limitation of the transaction is also needed to track
2158 # Fixing this limitation of the transaction is also needed to track
2159 # other families of changes (bookmarks, phases, obsolescence).
2159 # other families of changes (bookmarks, phases, obsolescence).
2160 #
2160 #
2161 # This will have to be fixed before we remove the experimental
2161 # This will have to be fixed before we remove the experimental
2162 # gating.
2162 # gating.
2163 tracktags(tr2)
2163 tracktags(tr2)
2164 repo = reporef()
2164 repo = reporef()
2165
2165
2166 singleheadopt = (b'experimental', b'single-head-per-branch')
2166 singleheadopt = (b'experimental', b'single-head-per-branch')
2167 singlehead = repo.ui.configbool(*singleheadopt)
2167 singlehead = repo.ui.configbool(*singleheadopt)
2168 if singlehead:
2168 if singlehead:
2169 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2169 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2170 accountclosed = singleheadsub.get(
2170 accountclosed = singleheadsub.get(
2171 b"account-closed-heads", False
2171 b"account-closed-heads", False
2172 )
2172 )
2173 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2173 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2174 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2174 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2175 for name, (old, new) in sorted(
2175 for name, (old, new) in sorted(
2176 tr.changes[b'bookmarks'].items()
2176 tr.changes[b'bookmarks'].items()
2177 ):
2177 ):
2178 args = tr.hookargs.copy()
2178 args = tr.hookargs.copy()
2179 args.update(bookmarks.preparehookargs(name, old, new))
2179 args.update(bookmarks.preparehookargs(name, old, new))
2180 repo.hook(
2180 repo.hook(
2181 b'pretxnclose-bookmark',
2181 b'pretxnclose-bookmark',
2182 throw=True,
2182 throw=True,
2183 **pycompat.strkwargs(args)
2183 **pycompat.strkwargs(args)
2184 )
2184 )
2185 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2185 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2186 cl = repo.unfiltered().changelog
2186 cl = repo.unfiltered().changelog
2187 for revs, (old, new) in tr.changes[b'phases']:
2187 for revs, (old, new) in tr.changes[b'phases']:
2188 for rev in revs:
2188 for rev in revs:
2189 args = tr.hookargs.copy()
2189 args = tr.hookargs.copy()
2190 node = hex(cl.node(rev))
2190 node = hex(cl.node(rev))
2191 args.update(phases.preparehookargs(node, old, new))
2191 args.update(phases.preparehookargs(node, old, new))
2192 repo.hook(
2192 repo.hook(
2193 b'pretxnclose-phase',
2193 b'pretxnclose-phase',
2194 throw=True,
2194 throw=True,
2195 **pycompat.strkwargs(args)
2195 **pycompat.strkwargs(args)
2196 )
2196 )
2197
2197
2198 repo.hook(
2198 repo.hook(
2199 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2199 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2200 )
2200 )
2201
2201
2202 def releasefn(tr, success):
2202 def releasefn(tr, success):
2203 repo = reporef()
2203 repo = reporef()
2204 if repo is None:
2204 if repo is None:
2205 # If the repo has been GC'd (and this release function is being
2205 # If the repo has been GC'd (and this release function is being
2206 # called from transaction.__del__), there's not much we can do,
2206 # called from transaction.__del__), there's not much we can do,
2207 # so just leave the unfinished transaction there and let the
2207 # so just leave the unfinished transaction there and let the
2208 # user run `hg recover`.
2208 # user run `hg recover`.
2209 return
2209 return
2210 if success:
2210 if success:
2211 # this should be explicitly invoked here, because
2211 # this should be explicitly invoked here, because
2212 # in-memory changes aren't written out at closing
2212 # in-memory changes aren't written out at closing
2213 # transaction, if tr.addfilegenerator (via
2213 # transaction, if tr.addfilegenerator (via
2214 # dirstate.write or so) isn't invoked while
2214 # dirstate.write or so) isn't invoked while
2215 # transaction running
2215 # transaction running
2216 repo.dirstate.write(None)
2216 repo.dirstate.write(None)
2217 else:
2217 else:
2218 # discard all changes (including ones already written
2218 # discard all changes (including ones already written
2219 # out) in this transaction
2219 # out) in this transaction
2220 narrowspec.restorebackup(self, b'journal.narrowspec')
2220 narrowspec.restorebackup(self, b'journal.narrowspec')
2221 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2221 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2222 repo.dirstate.restorebackup(None, b'journal.dirstate')
2222 repo.dirstate.restorebackup(None, b'journal.dirstate')
2223
2223
2224 repo.invalidate(clearfilecache=True)
2224 repo.invalidate(clearfilecache=True)
2225
2225
2226 tr = transaction.transaction(
2226 tr = transaction.transaction(
2227 rp,
2227 rp,
2228 self.svfs,
2228 self.svfs,
2229 vfsmap,
2229 vfsmap,
2230 b"journal",
2230 b"journal",
2231 b"undo",
2231 b"undo",
2232 aftertrans(renames),
2232 aftertrans(renames),
2233 self.store.createmode,
2233 self.store.createmode,
2234 validator=validate,
2234 validator=validate,
2235 releasefn=releasefn,
2235 releasefn=releasefn,
2236 checkambigfiles=_cachedfiles,
2236 checkambigfiles=_cachedfiles,
2237 name=desc,
2237 name=desc,
2238 )
2238 )
2239 tr.changes[b'origrepolen'] = len(self)
2239 tr.changes[b'origrepolen'] = len(self)
2240 tr.changes[b'obsmarkers'] = set()
2240 tr.changes[b'obsmarkers'] = set()
2241 tr.changes[b'phases'] = []
2241 tr.changes[b'phases'] = []
2242 tr.changes[b'bookmarks'] = {}
2242 tr.changes[b'bookmarks'] = {}
2243
2243
2244 tr.hookargs[b'txnid'] = txnid
2244 tr.hookargs[b'txnid'] = txnid
2245 tr.hookargs[b'txnname'] = desc
2245 tr.hookargs[b'txnname'] = desc
2246 tr.hookargs[b'changes'] = tr.changes
2246 # note: writing the fncache only during finalize mean that the file is
2247 # note: writing the fncache only during finalize mean that the file is
2247 # outdated when running hooks. As fncache is used for streaming clone,
2248 # outdated when running hooks. As fncache is used for streaming clone,
2248 # this is not expected to break anything that happen during the hooks.
2249 # this is not expected to break anything that happen during the hooks.
2249 tr.addfinalize(b'flush-fncache', self.store.write)
2250 tr.addfinalize(b'flush-fncache', self.store.write)
2250
2251
2251 def txnclosehook(tr2):
2252 def txnclosehook(tr2):
2252 """To be run if transaction is successful, will schedule a hook run
2253 """To be run if transaction is successful, will schedule a hook run
2253 """
2254 """
2254 # Don't reference tr2 in hook() so we don't hold a reference.
2255 # Don't reference tr2 in hook() so we don't hold a reference.
2255 # This reduces memory consumption when there are multiple
2256 # This reduces memory consumption when there are multiple
2256 # transactions per lock. This can likely go away if issue5045
2257 # transactions per lock. This can likely go away if issue5045
2257 # fixes the function accumulation.
2258 # fixes the function accumulation.
2258 hookargs = tr2.hookargs
2259 hookargs = tr2.hookargs
2259
2260
2260 def hookfunc(unused_success):
2261 def hookfunc(unused_success):
2261 repo = reporef()
2262 repo = reporef()
2262 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2263 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2263 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2264 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2264 for name, (old, new) in bmchanges:
2265 for name, (old, new) in bmchanges:
2265 args = tr.hookargs.copy()
2266 args = tr.hookargs.copy()
2266 args.update(bookmarks.preparehookargs(name, old, new))
2267 args.update(bookmarks.preparehookargs(name, old, new))
2267 repo.hook(
2268 repo.hook(
2268 b'txnclose-bookmark',
2269 b'txnclose-bookmark',
2269 throw=False,
2270 throw=False,
2270 **pycompat.strkwargs(args)
2271 **pycompat.strkwargs(args)
2271 )
2272 )
2272
2273
2273 if hook.hashook(repo.ui, b'txnclose-phase'):
2274 if hook.hashook(repo.ui, b'txnclose-phase'):
2274 cl = repo.unfiltered().changelog
2275 cl = repo.unfiltered().changelog
2275 phasemv = sorted(
2276 phasemv = sorted(
2276 tr.changes[b'phases'], key=lambda r: r[0][0]
2277 tr.changes[b'phases'], key=lambda r: r[0][0]
2277 )
2278 )
2278 for revs, (old, new) in phasemv:
2279 for revs, (old, new) in phasemv:
2279 for rev in revs:
2280 for rev in revs:
2280 args = tr.hookargs.copy()
2281 args = tr.hookargs.copy()
2281 node = hex(cl.node(rev))
2282 node = hex(cl.node(rev))
2282 args.update(phases.preparehookargs(node, old, new))
2283 args.update(phases.preparehookargs(node, old, new))
2283 repo.hook(
2284 repo.hook(
2284 b'txnclose-phase',
2285 b'txnclose-phase',
2285 throw=False,
2286 throw=False,
2286 **pycompat.strkwargs(args)
2287 **pycompat.strkwargs(args)
2287 )
2288 )
2288
2289
2289 repo.hook(
2290 repo.hook(
2290 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2291 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2291 )
2292 )
2292
2293
2293 reporef()._afterlock(hookfunc)
2294 reporef()._afterlock(hookfunc)
2294
2295
2295 tr.addfinalize(b'txnclose-hook', txnclosehook)
2296 tr.addfinalize(b'txnclose-hook', txnclosehook)
2296 # Include a leading "-" to make it happen before the transaction summary
2297 # Include a leading "-" to make it happen before the transaction summary
2297 # reports registered via scmutil.registersummarycallback() whose names
2298 # reports registered via scmutil.registersummarycallback() whose names
2298 # are 00-txnreport etc. That way, the caches will be warm when the
2299 # are 00-txnreport etc. That way, the caches will be warm when the
2299 # callbacks run.
2300 # callbacks run.
2300 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2301 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2301
2302
2302 def txnaborthook(tr2):
2303 def txnaborthook(tr2):
2303 """To be run if transaction is aborted
2304 """To be run if transaction is aborted
2304 """
2305 """
2305 reporef().hook(
2306 reporef().hook(
2306 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2307 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2307 )
2308 )
2308
2309
2309 tr.addabort(b'txnabort-hook', txnaborthook)
2310 tr.addabort(b'txnabort-hook', txnaborthook)
2310 # avoid eager cache invalidation. in-memory data should be identical
2311 # avoid eager cache invalidation. in-memory data should be identical
2311 # to stored data if transaction has no error.
2312 # to stored data if transaction has no error.
2312 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2313 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2313 self._transref = weakref.ref(tr)
2314 self._transref = weakref.ref(tr)
2314 scmutil.registersummarycallback(self, tr, desc)
2315 scmutil.registersummarycallback(self, tr, desc)
2315 return tr
2316 return tr
2316
2317
2317 def _journalfiles(self):
2318 def _journalfiles(self):
2318 return (
2319 return (
2319 (self.svfs, b'journal'),
2320 (self.svfs, b'journal'),
2320 (self.svfs, b'journal.narrowspec'),
2321 (self.svfs, b'journal.narrowspec'),
2321 (self.vfs, b'journal.narrowspec.dirstate'),
2322 (self.vfs, b'journal.narrowspec.dirstate'),
2322 (self.vfs, b'journal.dirstate'),
2323 (self.vfs, b'journal.dirstate'),
2323 (self.vfs, b'journal.branch'),
2324 (self.vfs, b'journal.branch'),
2324 (self.vfs, b'journal.desc'),
2325 (self.vfs, b'journal.desc'),
2325 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2326 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2326 (self.svfs, b'journal.phaseroots'),
2327 (self.svfs, b'journal.phaseroots'),
2327 )
2328 )
2328
2329
2329 def undofiles(self):
2330 def undofiles(self):
2330 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2331 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2331
2332
2332 @unfilteredmethod
2333 @unfilteredmethod
2333 def _writejournal(self, desc):
2334 def _writejournal(self, desc):
2334 self.dirstate.savebackup(None, b'journal.dirstate')
2335 self.dirstate.savebackup(None, b'journal.dirstate')
2335 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2336 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2336 narrowspec.savebackup(self, b'journal.narrowspec')
2337 narrowspec.savebackup(self, b'journal.narrowspec')
2337 self.vfs.write(
2338 self.vfs.write(
2338 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2339 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2339 )
2340 )
2340 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2341 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2341 bookmarksvfs = bookmarks.bookmarksvfs(self)
2342 bookmarksvfs = bookmarks.bookmarksvfs(self)
2342 bookmarksvfs.write(
2343 bookmarksvfs.write(
2343 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2344 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2344 )
2345 )
2345 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2346 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2346
2347
2347 def recover(self):
2348 def recover(self):
2348 with self.lock():
2349 with self.lock():
2349 if self.svfs.exists(b"journal"):
2350 if self.svfs.exists(b"journal"):
2350 self.ui.status(_(b"rolling back interrupted transaction\n"))
2351 self.ui.status(_(b"rolling back interrupted transaction\n"))
2351 vfsmap = {
2352 vfsmap = {
2352 b'': self.svfs,
2353 b'': self.svfs,
2353 b'plain': self.vfs,
2354 b'plain': self.vfs,
2354 }
2355 }
2355 transaction.rollback(
2356 transaction.rollback(
2356 self.svfs,
2357 self.svfs,
2357 vfsmap,
2358 vfsmap,
2358 b"journal",
2359 b"journal",
2359 self.ui.warn,
2360 self.ui.warn,
2360 checkambigfiles=_cachedfiles,
2361 checkambigfiles=_cachedfiles,
2361 )
2362 )
2362 self.invalidate()
2363 self.invalidate()
2363 return True
2364 return True
2364 else:
2365 else:
2365 self.ui.warn(_(b"no interrupted transaction available\n"))
2366 self.ui.warn(_(b"no interrupted transaction available\n"))
2366 return False
2367 return False
2367
2368
2368 def rollback(self, dryrun=False, force=False):
2369 def rollback(self, dryrun=False, force=False):
2369 wlock = lock = dsguard = None
2370 wlock = lock = dsguard = None
2370 try:
2371 try:
2371 wlock = self.wlock()
2372 wlock = self.wlock()
2372 lock = self.lock()
2373 lock = self.lock()
2373 if self.svfs.exists(b"undo"):
2374 if self.svfs.exists(b"undo"):
2374 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2375 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2375
2376
2376 return self._rollback(dryrun, force, dsguard)
2377 return self._rollback(dryrun, force, dsguard)
2377 else:
2378 else:
2378 self.ui.warn(_(b"no rollback information available\n"))
2379 self.ui.warn(_(b"no rollback information available\n"))
2379 return 1
2380 return 1
2380 finally:
2381 finally:
2381 release(dsguard, lock, wlock)
2382 release(dsguard, lock, wlock)
2382
2383
2383 @unfilteredmethod # Until we get smarter cache management
2384 @unfilteredmethod # Until we get smarter cache management
2384 def _rollback(self, dryrun, force, dsguard):
2385 def _rollback(self, dryrun, force, dsguard):
2385 ui = self.ui
2386 ui = self.ui
2386 try:
2387 try:
2387 args = self.vfs.read(b'undo.desc').splitlines()
2388 args = self.vfs.read(b'undo.desc').splitlines()
2388 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2389 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2389 if len(args) >= 3:
2390 if len(args) >= 3:
2390 detail = args[2]
2391 detail = args[2]
2391 oldtip = oldlen - 1
2392 oldtip = oldlen - 1
2392
2393
2393 if detail and ui.verbose:
2394 if detail and ui.verbose:
2394 msg = _(
2395 msg = _(
2395 b'repository tip rolled back to revision %d'
2396 b'repository tip rolled back to revision %d'
2396 b' (undo %s: %s)\n'
2397 b' (undo %s: %s)\n'
2397 ) % (oldtip, desc, detail)
2398 ) % (oldtip, desc, detail)
2398 else:
2399 else:
2399 msg = _(
2400 msg = _(
2400 b'repository tip rolled back to revision %d (undo %s)\n'
2401 b'repository tip rolled back to revision %d (undo %s)\n'
2401 ) % (oldtip, desc)
2402 ) % (oldtip, desc)
2402 except IOError:
2403 except IOError:
2403 msg = _(b'rolling back unknown transaction\n')
2404 msg = _(b'rolling back unknown transaction\n')
2404 desc = None
2405 desc = None
2405
2406
2406 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2407 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2407 raise error.Abort(
2408 raise error.Abort(
2408 _(
2409 _(
2409 b'rollback of last commit while not checked out '
2410 b'rollback of last commit while not checked out '
2410 b'may lose data'
2411 b'may lose data'
2411 ),
2412 ),
2412 hint=_(b'use -f to force'),
2413 hint=_(b'use -f to force'),
2413 )
2414 )
2414
2415
2415 ui.status(msg)
2416 ui.status(msg)
2416 if dryrun:
2417 if dryrun:
2417 return 0
2418 return 0
2418
2419
2419 parents = self.dirstate.parents()
2420 parents = self.dirstate.parents()
2420 self.destroying()
2421 self.destroying()
2421 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2422 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2422 transaction.rollback(
2423 transaction.rollback(
2423 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2424 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2424 )
2425 )
2425 bookmarksvfs = bookmarks.bookmarksvfs(self)
2426 bookmarksvfs = bookmarks.bookmarksvfs(self)
2426 if bookmarksvfs.exists(b'undo.bookmarks'):
2427 if bookmarksvfs.exists(b'undo.bookmarks'):
2427 bookmarksvfs.rename(
2428 bookmarksvfs.rename(
2428 b'undo.bookmarks', b'bookmarks', checkambig=True
2429 b'undo.bookmarks', b'bookmarks', checkambig=True
2429 )
2430 )
2430 if self.svfs.exists(b'undo.phaseroots'):
2431 if self.svfs.exists(b'undo.phaseroots'):
2431 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2432 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2432 self.invalidate()
2433 self.invalidate()
2433
2434
2434 has_node = self.changelog.index.has_node
2435 has_node = self.changelog.index.has_node
2435 parentgone = any(not has_node(p) for p in parents)
2436 parentgone = any(not has_node(p) for p in parents)
2436 if parentgone:
2437 if parentgone:
2437 # prevent dirstateguard from overwriting already restored one
2438 # prevent dirstateguard from overwriting already restored one
2438 dsguard.close()
2439 dsguard.close()
2439
2440
2440 narrowspec.restorebackup(self, b'undo.narrowspec')
2441 narrowspec.restorebackup(self, b'undo.narrowspec')
2441 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2442 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2442 self.dirstate.restorebackup(None, b'undo.dirstate')
2443 self.dirstate.restorebackup(None, b'undo.dirstate')
2443 try:
2444 try:
2444 branch = self.vfs.read(b'undo.branch')
2445 branch = self.vfs.read(b'undo.branch')
2445 self.dirstate.setbranch(encoding.tolocal(branch))
2446 self.dirstate.setbranch(encoding.tolocal(branch))
2446 except IOError:
2447 except IOError:
2447 ui.warn(
2448 ui.warn(
2448 _(
2449 _(
2449 b'named branch could not be reset: '
2450 b'named branch could not be reset: '
2450 b'current branch is still \'%s\'\n'
2451 b'current branch is still \'%s\'\n'
2451 )
2452 )
2452 % self.dirstate.branch()
2453 % self.dirstate.branch()
2453 )
2454 )
2454
2455
2455 parents = tuple([p.rev() for p in self[None].parents()])
2456 parents = tuple([p.rev() for p in self[None].parents()])
2456 if len(parents) > 1:
2457 if len(parents) > 1:
2457 ui.status(
2458 ui.status(
2458 _(
2459 _(
2459 b'working directory now based on '
2460 b'working directory now based on '
2460 b'revisions %d and %d\n'
2461 b'revisions %d and %d\n'
2461 )
2462 )
2462 % parents
2463 % parents
2463 )
2464 )
2464 else:
2465 else:
2465 ui.status(
2466 ui.status(
2466 _(b'working directory now based on revision %d\n') % parents
2467 _(b'working directory now based on revision %d\n') % parents
2467 )
2468 )
2468 mergemod.mergestate.clean(self, self[b'.'].node())
2469 mergemod.mergestate.clean(self, self[b'.'].node())
2469
2470
2470 # TODO: if we know which new heads may result from this rollback, pass
2471 # TODO: if we know which new heads may result from this rollback, pass
2471 # them to destroy(), which will prevent the branchhead cache from being
2472 # them to destroy(), which will prevent the branchhead cache from being
2472 # invalidated.
2473 # invalidated.
2473 self.destroyed()
2474 self.destroyed()
2474 return 0
2475 return 0
2475
2476
2476 def _buildcacheupdater(self, newtransaction):
2477 def _buildcacheupdater(self, newtransaction):
2477 """called during transaction to build the callback updating cache
2478 """called during transaction to build the callback updating cache
2478
2479
2479 Lives on the repository to help extension who might want to augment
2480 Lives on the repository to help extension who might want to augment
2480 this logic. For this purpose, the created transaction is passed to the
2481 this logic. For this purpose, the created transaction is passed to the
2481 method.
2482 method.
2482 """
2483 """
2483 # we must avoid cyclic reference between repo and transaction.
2484 # we must avoid cyclic reference between repo and transaction.
2484 reporef = weakref.ref(self)
2485 reporef = weakref.ref(self)
2485
2486
2486 def updater(tr):
2487 def updater(tr):
2487 repo = reporef()
2488 repo = reporef()
2488 repo.updatecaches(tr)
2489 repo.updatecaches(tr)
2489
2490
2490 return updater
2491 return updater
2491
2492
2492 @unfilteredmethod
2493 @unfilteredmethod
2493 def updatecaches(self, tr=None, full=False):
2494 def updatecaches(self, tr=None, full=False):
2494 """warm appropriate caches
2495 """warm appropriate caches
2495
2496
2496 If this function is called after a transaction closed. The transaction
2497 If this function is called after a transaction closed. The transaction
2497 will be available in the 'tr' argument. This can be used to selectively
2498 will be available in the 'tr' argument. This can be used to selectively
2498 update caches relevant to the changes in that transaction.
2499 update caches relevant to the changes in that transaction.
2499
2500
2500 If 'full' is set, make sure all caches the function knows about have
2501 If 'full' is set, make sure all caches the function knows about have
2501 up-to-date data. Even the ones usually loaded more lazily.
2502 up-to-date data. Even the ones usually loaded more lazily.
2502 """
2503 """
2503 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2504 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2504 # During strip, many caches are invalid but
2505 # During strip, many caches are invalid but
2505 # later call to `destroyed` will refresh them.
2506 # later call to `destroyed` will refresh them.
2506 return
2507 return
2507
2508
2508 if tr is None or tr.changes[b'origrepolen'] < len(self):
2509 if tr is None or tr.changes[b'origrepolen'] < len(self):
2509 # accessing the 'ser ved' branchmap should refresh all the others,
2510 # accessing the 'ser ved' branchmap should refresh all the others,
2510 self.ui.debug(b'updating the branch cache\n')
2511 self.ui.debug(b'updating the branch cache\n')
2511 self.filtered(b'served').branchmap()
2512 self.filtered(b'served').branchmap()
2512 self.filtered(b'served.hidden').branchmap()
2513 self.filtered(b'served.hidden').branchmap()
2513
2514
2514 if full:
2515 if full:
2515 unfi = self.unfiltered()
2516 unfi = self.unfiltered()
2516
2517
2517 self.changelog.update_caches(transaction=tr)
2518 self.changelog.update_caches(transaction=tr)
2518 self.manifestlog.update_caches(transaction=tr)
2519 self.manifestlog.update_caches(transaction=tr)
2519
2520
2520 rbc = unfi.revbranchcache()
2521 rbc = unfi.revbranchcache()
2521 for r in unfi.changelog:
2522 for r in unfi.changelog:
2522 rbc.branchinfo(r)
2523 rbc.branchinfo(r)
2523 rbc.write()
2524 rbc.write()
2524
2525
2525 # ensure the working copy parents are in the manifestfulltextcache
2526 # ensure the working copy parents are in the manifestfulltextcache
2526 for ctx in self[b'.'].parents():
2527 for ctx in self[b'.'].parents():
2527 ctx.manifest() # accessing the manifest is enough
2528 ctx.manifest() # accessing the manifest is enough
2528
2529
2529 # accessing fnode cache warms the cache
2530 # accessing fnode cache warms the cache
2530 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2531 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2531 # accessing tags warm the cache
2532 # accessing tags warm the cache
2532 self.tags()
2533 self.tags()
2533 self.filtered(b'served').tags()
2534 self.filtered(b'served').tags()
2534
2535
2535 # The `full` arg is documented as updating even the lazily-loaded
2536 # The `full` arg is documented as updating even the lazily-loaded
2536 # caches immediately, so we're forcing a write to cause these caches
2537 # caches immediately, so we're forcing a write to cause these caches
2537 # to be warmed up even if they haven't explicitly been requested
2538 # to be warmed up even if they haven't explicitly been requested
2538 # yet (if they've never been used by hg, they won't ever have been
2539 # yet (if they've never been used by hg, they won't ever have been
2539 # written, even if they're a subset of another kind of cache that
2540 # written, even if they're a subset of another kind of cache that
2540 # *has* been used).
2541 # *has* been used).
2541 for filt in repoview.filtertable.keys():
2542 for filt in repoview.filtertable.keys():
2542 filtered = self.filtered(filt)
2543 filtered = self.filtered(filt)
2543 filtered.branchmap().write(filtered)
2544 filtered.branchmap().write(filtered)
2544
2545
2545 def invalidatecaches(self):
2546 def invalidatecaches(self):
2546
2547
2547 if '_tagscache' in vars(self):
2548 if '_tagscache' in vars(self):
2548 # can't use delattr on proxy
2549 # can't use delattr on proxy
2549 del self.__dict__['_tagscache']
2550 del self.__dict__['_tagscache']
2550
2551
2551 self._branchcaches.clear()
2552 self._branchcaches.clear()
2552 self.invalidatevolatilesets()
2553 self.invalidatevolatilesets()
2553 self._sparsesignaturecache.clear()
2554 self._sparsesignaturecache.clear()
2554
2555
2555 def invalidatevolatilesets(self):
2556 def invalidatevolatilesets(self):
2556 self.filteredrevcache.clear()
2557 self.filteredrevcache.clear()
2557 obsolete.clearobscaches(self)
2558 obsolete.clearobscaches(self)
2558 self._quick_access_changeid_invalidate()
2559 self._quick_access_changeid_invalidate()
2559
2560
2560 def invalidatedirstate(self):
2561 def invalidatedirstate(self):
2561 '''Invalidates the dirstate, causing the next call to dirstate
2562 '''Invalidates the dirstate, causing the next call to dirstate
2562 to check if it was modified since the last time it was read,
2563 to check if it was modified since the last time it was read,
2563 rereading it if it has.
2564 rereading it if it has.
2564
2565
2565 This is different to dirstate.invalidate() that it doesn't always
2566 This is different to dirstate.invalidate() that it doesn't always
2566 rereads the dirstate. Use dirstate.invalidate() if you want to
2567 rereads the dirstate. Use dirstate.invalidate() if you want to
2567 explicitly read the dirstate again (i.e. restoring it to a previous
2568 explicitly read the dirstate again (i.e. restoring it to a previous
2568 known good state).'''
2569 known good state).'''
2569 if hasunfilteredcache(self, 'dirstate'):
2570 if hasunfilteredcache(self, 'dirstate'):
2570 for k in self.dirstate._filecache:
2571 for k in self.dirstate._filecache:
2571 try:
2572 try:
2572 delattr(self.dirstate, k)
2573 delattr(self.dirstate, k)
2573 except AttributeError:
2574 except AttributeError:
2574 pass
2575 pass
2575 delattr(self.unfiltered(), 'dirstate')
2576 delattr(self.unfiltered(), 'dirstate')
2576
2577
2577 def invalidate(self, clearfilecache=False):
2578 def invalidate(self, clearfilecache=False):
2578 '''Invalidates both store and non-store parts other than dirstate
2579 '''Invalidates both store and non-store parts other than dirstate
2579
2580
2580 If a transaction is running, invalidation of store is omitted,
2581 If a transaction is running, invalidation of store is omitted,
2581 because discarding in-memory changes might cause inconsistency
2582 because discarding in-memory changes might cause inconsistency
2582 (e.g. incomplete fncache causes unintentional failure, but
2583 (e.g. incomplete fncache causes unintentional failure, but
2583 redundant one doesn't).
2584 redundant one doesn't).
2584 '''
2585 '''
2585 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2586 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2586 for k in list(self._filecache.keys()):
2587 for k in list(self._filecache.keys()):
2587 # dirstate is invalidated separately in invalidatedirstate()
2588 # dirstate is invalidated separately in invalidatedirstate()
2588 if k == b'dirstate':
2589 if k == b'dirstate':
2589 continue
2590 continue
2590 if (
2591 if (
2591 k == b'changelog'
2592 k == b'changelog'
2592 and self.currenttransaction()
2593 and self.currenttransaction()
2593 and self.changelog._delayed
2594 and self.changelog._delayed
2594 ):
2595 ):
2595 # The changelog object may store unwritten revisions. We don't
2596 # The changelog object may store unwritten revisions. We don't
2596 # want to lose them.
2597 # want to lose them.
2597 # TODO: Solve the problem instead of working around it.
2598 # TODO: Solve the problem instead of working around it.
2598 continue
2599 continue
2599
2600
2600 if clearfilecache:
2601 if clearfilecache:
2601 del self._filecache[k]
2602 del self._filecache[k]
2602 try:
2603 try:
2603 delattr(unfiltered, k)
2604 delattr(unfiltered, k)
2604 except AttributeError:
2605 except AttributeError:
2605 pass
2606 pass
2606 self.invalidatecaches()
2607 self.invalidatecaches()
2607 if not self.currenttransaction():
2608 if not self.currenttransaction():
2608 # TODO: Changing contents of store outside transaction
2609 # TODO: Changing contents of store outside transaction
2609 # causes inconsistency. We should make in-memory store
2610 # causes inconsistency. We should make in-memory store
2610 # changes detectable, and abort if changed.
2611 # changes detectable, and abort if changed.
2611 self.store.invalidatecaches()
2612 self.store.invalidatecaches()
2612
2613
2613 def invalidateall(self):
2614 def invalidateall(self):
2614 '''Fully invalidates both store and non-store parts, causing the
2615 '''Fully invalidates both store and non-store parts, causing the
2615 subsequent operation to reread any outside changes.'''
2616 subsequent operation to reread any outside changes.'''
2616 # extension should hook this to invalidate its caches
2617 # extension should hook this to invalidate its caches
2617 self.invalidate()
2618 self.invalidate()
2618 self.invalidatedirstate()
2619 self.invalidatedirstate()
2619
2620
2620 @unfilteredmethod
2621 @unfilteredmethod
2621 def _refreshfilecachestats(self, tr):
2622 def _refreshfilecachestats(self, tr):
2622 """Reload stats of cached files so that they are flagged as valid"""
2623 """Reload stats of cached files so that they are flagged as valid"""
2623 for k, ce in self._filecache.items():
2624 for k, ce in self._filecache.items():
2624 k = pycompat.sysstr(k)
2625 k = pycompat.sysstr(k)
2625 if k == 'dirstate' or k not in self.__dict__:
2626 if k == 'dirstate' or k not in self.__dict__:
2626 continue
2627 continue
2627 ce.refresh()
2628 ce.refresh()
2628
2629
2629 def _lock(
2630 def _lock(
2630 self,
2631 self,
2631 vfs,
2632 vfs,
2632 lockname,
2633 lockname,
2633 wait,
2634 wait,
2634 releasefn,
2635 releasefn,
2635 acquirefn,
2636 acquirefn,
2636 desc,
2637 desc,
2637 inheritchecker=None,
2638 inheritchecker=None,
2638 parentenvvar=None,
2639 parentenvvar=None,
2639 ):
2640 ):
2640 parentlock = None
2641 parentlock = None
2641 # the contents of parentenvvar are used by the underlying lock to
2642 # the contents of parentenvvar are used by the underlying lock to
2642 # determine whether it can be inherited
2643 # determine whether it can be inherited
2643 if parentenvvar is not None:
2644 if parentenvvar is not None:
2644 parentlock = encoding.environ.get(parentenvvar)
2645 parentlock = encoding.environ.get(parentenvvar)
2645
2646
2646 timeout = 0
2647 timeout = 0
2647 warntimeout = 0
2648 warntimeout = 0
2648 if wait:
2649 if wait:
2649 timeout = self.ui.configint(b"ui", b"timeout")
2650 timeout = self.ui.configint(b"ui", b"timeout")
2650 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2651 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2651 # internal config: ui.signal-safe-lock
2652 # internal config: ui.signal-safe-lock
2652 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2653 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2653
2654
2654 l = lockmod.trylock(
2655 l = lockmod.trylock(
2655 self.ui,
2656 self.ui,
2656 vfs,
2657 vfs,
2657 lockname,
2658 lockname,
2658 timeout,
2659 timeout,
2659 warntimeout,
2660 warntimeout,
2660 releasefn=releasefn,
2661 releasefn=releasefn,
2661 acquirefn=acquirefn,
2662 acquirefn=acquirefn,
2662 desc=desc,
2663 desc=desc,
2663 inheritchecker=inheritchecker,
2664 inheritchecker=inheritchecker,
2664 parentlock=parentlock,
2665 parentlock=parentlock,
2665 signalsafe=signalsafe,
2666 signalsafe=signalsafe,
2666 )
2667 )
2667 return l
2668 return l
2668
2669
2669 def _afterlock(self, callback):
2670 def _afterlock(self, callback):
2670 """add a callback to be run when the repository is fully unlocked
2671 """add a callback to be run when the repository is fully unlocked
2671
2672
2672 The callback will be executed when the outermost lock is released
2673 The callback will be executed when the outermost lock is released
2673 (with wlock being higher level than 'lock')."""
2674 (with wlock being higher level than 'lock')."""
2674 for ref in (self._wlockref, self._lockref):
2675 for ref in (self._wlockref, self._lockref):
2675 l = ref and ref()
2676 l = ref and ref()
2676 if l and l.held:
2677 if l and l.held:
2677 l.postrelease.append(callback)
2678 l.postrelease.append(callback)
2678 break
2679 break
2679 else: # no lock have been found.
2680 else: # no lock have been found.
2680 callback(True)
2681 callback(True)
2681
2682
2682 def lock(self, wait=True):
2683 def lock(self, wait=True):
2683 '''Lock the repository store (.hg/store) and return a weak reference
2684 '''Lock the repository store (.hg/store) and return a weak reference
2684 to the lock. Use this before modifying the store (e.g. committing or
2685 to the lock. Use this before modifying the store (e.g. committing or
2685 stripping). If you are opening a transaction, get a lock as well.)
2686 stripping). If you are opening a transaction, get a lock as well.)
2686
2687
2687 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2688 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2688 'wlock' first to avoid a dead-lock hazard.'''
2689 'wlock' first to avoid a dead-lock hazard.'''
2689 l = self._currentlock(self._lockref)
2690 l = self._currentlock(self._lockref)
2690 if l is not None:
2691 if l is not None:
2691 l.lock()
2692 l.lock()
2692 return l
2693 return l
2693
2694
2694 l = self._lock(
2695 l = self._lock(
2695 vfs=self.svfs,
2696 vfs=self.svfs,
2696 lockname=b"lock",
2697 lockname=b"lock",
2697 wait=wait,
2698 wait=wait,
2698 releasefn=None,
2699 releasefn=None,
2699 acquirefn=self.invalidate,
2700 acquirefn=self.invalidate,
2700 desc=_(b'repository %s') % self.origroot,
2701 desc=_(b'repository %s') % self.origroot,
2701 )
2702 )
2702 self._lockref = weakref.ref(l)
2703 self._lockref = weakref.ref(l)
2703 return l
2704 return l
2704
2705
2705 def _wlockchecktransaction(self):
2706 def _wlockchecktransaction(self):
2706 if self.currenttransaction() is not None:
2707 if self.currenttransaction() is not None:
2707 raise error.LockInheritanceContractViolation(
2708 raise error.LockInheritanceContractViolation(
2708 b'wlock cannot be inherited in the middle of a transaction'
2709 b'wlock cannot be inherited in the middle of a transaction'
2709 )
2710 )
2710
2711
2711 def wlock(self, wait=True):
2712 def wlock(self, wait=True):
2712 '''Lock the non-store parts of the repository (everything under
2713 '''Lock the non-store parts of the repository (everything under
2713 .hg except .hg/store) and return a weak reference to the lock.
2714 .hg except .hg/store) and return a weak reference to the lock.
2714
2715
2715 Use this before modifying files in .hg.
2716 Use this before modifying files in .hg.
2716
2717
2717 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2718 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2718 'wlock' first to avoid a dead-lock hazard.'''
2719 'wlock' first to avoid a dead-lock hazard.'''
2719 l = self._wlockref and self._wlockref()
2720 l = self._wlockref and self._wlockref()
2720 if l is not None and l.held:
2721 if l is not None and l.held:
2721 l.lock()
2722 l.lock()
2722 return l
2723 return l
2723
2724
2724 # We do not need to check for non-waiting lock acquisition. Such
2725 # We do not need to check for non-waiting lock acquisition. Such
2725 # acquisition would not cause dead-lock as they would just fail.
2726 # acquisition would not cause dead-lock as they would just fail.
2726 if wait and (
2727 if wait and (
2727 self.ui.configbool(b'devel', b'all-warnings')
2728 self.ui.configbool(b'devel', b'all-warnings')
2728 or self.ui.configbool(b'devel', b'check-locks')
2729 or self.ui.configbool(b'devel', b'check-locks')
2729 ):
2730 ):
2730 if self._currentlock(self._lockref) is not None:
2731 if self._currentlock(self._lockref) is not None:
2731 self.ui.develwarn(b'"wlock" acquired after "lock"')
2732 self.ui.develwarn(b'"wlock" acquired after "lock"')
2732
2733
2733 def unlock():
2734 def unlock():
2734 if self.dirstate.pendingparentchange():
2735 if self.dirstate.pendingparentchange():
2735 self.dirstate.invalidate()
2736 self.dirstate.invalidate()
2736 else:
2737 else:
2737 self.dirstate.write(None)
2738 self.dirstate.write(None)
2738
2739
2739 self._filecache[b'dirstate'].refresh()
2740 self._filecache[b'dirstate'].refresh()
2740
2741
2741 l = self._lock(
2742 l = self._lock(
2742 self.vfs,
2743 self.vfs,
2743 b"wlock",
2744 b"wlock",
2744 wait,
2745 wait,
2745 unlock,
2746 unlock,
2746 self.invalidatedirstate,
2747 self.invalidatedirstate,
2747 _(b'working directory of %s') % self.origroot,
2748 _(b'working directory of %s') % self.origroot,
2748 inheritchecker=self._wlockchecktransaction,
2749 inheritchecker=self._wlockchecktransaction,
2749 parentenvvar=b'HG_WLOCK_LOCKER',
2750 parentenvvar=b'HG_WLOCK_LOCKER',
2750 )
2751 )
2751 self._wlockref = weakref.ref(l)
2752 self._wlockref = weakref.ref(l)
2752 return l
2753 return l
2753
2754
2754 def _currentlock(self, lockref):
2755 def _currentlock(self, lockref):
2755 """Returns the lock if it's held, or None if it's not."""
2756 """Returns the lock if it's held, or None if it's not."""
2756 if lockref is None:
2757 if lockref is None:
2757 return None
2758 return None
2758 l = lockref()
2759 l = lockref()
2759 if l is None or not l.held:
2760 if l is None or not l.held:
2760 return None
2761 return None
2761 return l
2762 return l
2762
2763
2763 def currentwlock(self):
2764 def currentwlock(self):
2764 """Returns the wlock if it's held, or None if it's not."""
2765 """Returns the wlock if it's held, or None if it's not."""
2765 return self._currentlock(self._wlockref)
2766 return self._currentlock(self._wlockref)
2766
2767
2767 def _filecommit(
2768 def _filecommit(
2768 self,
2769 self,
2769 fctx,
2770 fctx,
2770 manifest1,
2771 manifest1,
2771 manifest2,
2772 manifest2,
2772 linkrev,
2773 linkrev,
2773 tr,
2774 tr,
2774 changelist,
2775 changelist,
2775 includecopymeta,
2776 includecopymeta,
2776 ):
2777 ):
2777 """
2778 """
2778 commit an individual file as part of a larger transaction
2779 commit an individual file as part of a larger transaction
2779 """
2780 """
2780
2781
2781 fname = fctx.path()
2782 fname = fctx.path()
2782 fparent1 = manifest1.get(fname, nullid)
2783 fparent1 = manifest1.get(fname, nullid)
2783 fparent2 = manifest2.get(fname, nullid)
2784 fparent2 = manifest2.get(fname, nullid)
2784 if isinstance(fctx, context.filectx):
2785 if isinstance(fctx, context.filectx):
2785 node = fctx.filenode()
2786 node = fctx.filenode()
2786 if node in [fparent1, fparent2]:
2787 if node in [fparent1, fparent2]:
2787 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2788 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2788 if (
2789 if (
2789 fparent1 != nullid
2790 fparent1 != nullid
2790 and manifest1.flags(fname) != fctx.flags()
2791 and manifest1.flags(fname) != fctx.flags()
2791 ) or (
2792 ) or (
2792 fparent2 != nullid
2793 fparent2 != nullid
2793 and manifest2.flags(fname) != fctx.flags()
2794 and manifest2.flags(fname) != fctx.flags()
2794 ):
2795 ):
2795 changelist.append(fname)
2796 changelist.append(fname)
2796 return node
2797 return node
2797
2798
2798 flog = self.file(fname)
2799 flog = self.file(fname)
2799 meta = {}
2800 meta = {}
2800 cfname = fctx.copysource()
2801 cfname = fctx.copysource()
2801 if cfname and cfname != fname:
2802 if cfname and cfname != fname:
2802 # Mark the new revision of this file as a copy of another
2803 # Mark the new revision of this file as a copy of another
2803 # file. This copy data will effectively act as a parent
2804 # file. This copy data will effectively act as a parent
2804 # of this new revision. If this is a merge, the first
2805 # of this new revision. If this is a merge, the first
2805 # parent will be the nullid (meaning "look up the copy data")
2806 # parent will be the nullid (meaning "look up the copy data")
2806 # and the second one will be the other parent. For example:
2807 # and the second one will be the other parent. For example:
2807 #
2808 #
2808 # 0 --- 1 --- 3 rev1 changes file foo
2809 # 0 --- 1 --- 3 rev1 changes file foo
2809 # \ / rev2 renames foo to bar and changes it
2810 # \ / rev2 renames foo to bar and changes it
2810 # \- 2 -/ rev3 should have bar with all changes and
2811 # \- 2 -/ rev3 should have bar with all changes and
2811 # should record that bar descends from
2812 # should record that bar descends from
2812 # bar in rev2 and foo in rev1
2813 # bar in rev2 and foo in rev1
2813 #
2814 #
2814 # this allows this merge to succeed:
2815 # this allows this merge to succeed:
2815 #
2816 #
2816 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2817 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2817 # \ / merging rev3 and rev4 should use bar@rev2
2818 # \ / merging rev3 and rev4 should use bar@rev2
2818 # \- 2 --- 4 as the merge base
2819 # \- 2 --- 4 as the merge base
2819 #
2820 #
2820
2821
2821 cnode = manifest1.get(cfname)
2822 cnode = manifest1.get(cfname)
2822 newfparent = fparent2
2823 newfparent = fparent2
2823
2824
2824 if manifest2: # branch merge
2825 if manifest2: # branch merge
2825 if fparent2 == nullid or cnode is None: # copied on remote side
2826 if fparent2 == nullid or cnode is None: # copied on remote side
2826 if cfname in manifest2:
2827 if cfname in manifest2:
2827 cnode = manifest2[cfname]
2828 cnode = manifest2[cfname]
2828 newfparent = fparent1
2829 newfparent = fparent1
2829
2830
2830 # Here, we used to search backwards through history to try to find
2831 # Here, we used to search backwards through history to try to find
2831 # where the file copy came from if the source of a copy was not in
2832 # where the file copy came from if the source of a copy was not in
2832 # the parent directory. However, this doesn't actually make sense to
2833 # the parent directory. However, this doesn't actually make sense to
2833 # do (what does a copy from something not in your working copy even
2834 # do (what does a copy from something not in your working copy even
2834 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2835 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2835 # the user that copy information was dropped, so if they didn't
2836 # the user that copy information was dropped, so if they didn't
2836 # expect this outcome it can be fixed, but this is the correct
2837 # expect this outcome it can be fixed, but this is the correct
2837 # behavior in this circumstance.
2838 # behavior in this circumstance.
2838
2839
2839 if cnode:
2840 if cnode:
2840 self.ui.debug(
2841 self.ui.debug(
2841 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2842 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2842 )
2843 )
2843 if includecopymeta:
2844 if includecopymeta:
2844 meta[b"copy"] = cfname
2845 meta[b"copy"] = cfname
2845 meta[b"copyrev"] = hex(cnode)
2846 meta[b"copyrev"] = hex(cnode)
2846 fparent1, fparent2 = nullid, newfparent
2847 fparent1, fparent2 = nullid, newfparent
2847 else:
2848 else:
2848 self.ui.warn(
2849 self.ui.warn(
2849 _(
2850 _(
2850 b"warning: can't find ancestor for '%s' "
2851 b"warning: can't find ancestor for '%s' "
2851 b"copied from '%s'!\n"
2852 b"copied from '%s'!\n"
2852 )
2853 )
2853 % (fname, cfname)
2854 % (fname, cfname)
2854 )
2855 )
2855
2856
2856 elif fparent1 == nullid:
2857 elif fparent1 == nullid:
2857 fparent1, fparent2 = fparent2, nullid
2858 fparent1, fparent2 = fparent2, nullid
2858 elif fparent2 != nullid:
2859 elif fparent2 != nullid:
2859 # is one parent an ancestor of the other?
2860 # is one parent an ancestor of the other?
2860 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2861 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2861 if fparent1 in fparentancestors:
2862 if fparent1 in fparentancestors:
2862 fparent1, fparent2 = fparent2, nullid
2863 fparent1, fparent2 = fparent2, nullid
2863 elif fparent2 in fparentancestors:
2864 elif fparent2 in fparentancestors:
2864 fparent2 = nullid
2865 fparent2 = nullid
2865 elif not fparentancestors:
2866 elif not fparentancestors:
2866 # TODO: this whole if-else might be simplified much more
2867 # TODO: this whole if-else might be simplified much more
2867 ms = mergemod.mergestate.read(self)
2868 ms = mergemod.mergestate.read(self)
2868 if (
2869 if (
2869 fname in ms
2870 fname in ms
2870 and ms[fname] == mergemod.MERGE_RECORD_MERGED_OTHER
2871 and ms[fname] == mergemod.MERGE_RECORD_MERGED_OTHER
2871 ):
2872 ):
2872 fparent1, fparent2 = fparent2, nullid
2873 fparent1, fparent2 = fparent2, nullid
2873
2874
2874 # is the file changed?
2875 # is the file changed?
2875 text = fctx.data()
2876 text = fctx.data()
2876 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2877 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2877 changelist.append(fname)
2878 changelist.append(fname)
2878 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2879 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2879 # are just the flags changed during merge?
2880 # are just the flags changed during merge?
2880 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2881 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2881 changelist.append(fname)
2882 changelist.append(fname)
2882
2883
2883 return fparent1
2884 return fparent1
2884
2885
2885 def checkcommitpatterns(self, wctx, match, status, fail):
2886 def checkcommitpatterns(self, wctx, match, status, fail):
2886 """check for commit arguments that aren't committable"""
2887 """check for commit arguments that aren't committable"""
2887 if match.isexact() or match.prefix():
2888 if match.isexact() or match.prefix():
2888 matched = set(status.modified + status.added + status.removed)
2889 matched = set(status.modified + status.added + status.removed)
2889
2890
2890 for f in match.files():
2891 for f in match.files():
2891 f = self.dirstate.normalize(f)
2892 f = self.dirstate.normalize(f)
2892 if f == b'.' or f in matched or f in wctx.substate:
2893 if f == b'.' or f in matched or f in wctx.substate:
2893 continue
2894 continue
2894 if f in status.deleted:
2895 if f in status.deleted:
2895 fail(f, _(b'file not found!'))
2896 fail(f, _(b'file not found!'))
2896 # Is it a directory that exists or used to exist?
2897 # Is it a directory that exists or used to exist?
2897 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2898 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2898 d = f + b'/'
2899 d = f + b'/'
2899 for mf in matched:
2900 for mf in matched:
2900 if mf.startswith(d):
2901 if mf.startswith(d):
2901 break
2902 break
2902 else:
2903 else:
2903 fail(f, _(b"no match under directory!"))
2904 fail(f, _(b"no match under directory!"))
2904 elif f not in self.dirstate:
2905 elif f not in self.dirstate:
2905 fail(f, _(b"file not tracked!"))
2906 fail(f, _(b"file not tracked!"))
2906
2907
2907 @unfilteredmethod
2908 @unfilteredmethod
2908 def commit(
2909 def commit(
2909 self,
2910 self,
2910 text=b"",
2911 text=b"",
2911 user=None,
2912 user=None,
2912 date=None,
2913 date=None,
2913 match=None,
2914 match=None,
2914 force=False,
2915 force=False,
2915 editor=None,
2916 editor=None,
2916 extra=None,
2917 extra=None,
2917 ):
2918 ):
2918 """Add a new revision to current repository.
2919 """Add a new revision to current repository.
2919
2920
2920 Revision information is gathered from the working directory,
2921 Revision information is gathered from the working directory,
2921 match can be used to filter the committed files. If editor is
2922 match can be used to filter the committed files. If editor is
2922 supplied, it is called to get a commit message.
2923 supplied, it is called to get a commit message.
2923 """
2924 """
2924 if extra is None:
2925 if extra is None:
2925 extra = {}
2926 extra = {}
2926
2927
2927 def fail(f, msg):
2928 def fail(f, msg):
2928 raise error.Abort(b'%s: %s' % (f, msg))
2929 raise error.Abort(b'%s: %s' % (f, msg))
2929
2930
2930 if not match:
2931 if not match:
2931 match = matchmod.always()
2932 match = matchmod.always()
2932
2933
2933 if not force:
2934 if not force:
2934 match.bad = fail
2935 match.bad = fail
2935
2936
2936 # lock() for recent changelog (see issue4368)
2937 # lock() for recent changelog (see issue4368)
2937 with self.wlock(), self.lock():
2938 with self.wlock(), self.lock():
2938 wctx = self[None]
2939 wctx = self[None]
2939 merge = len(wctx.parents()) > 1
2940 merge = len(wctx.parents()) > 1
2940
2941
2941 if not force and merge and not match.always():
2942 if not force and merge and not match.always():
2942 raise error.Abort(
2943 raise error.Abort(
2943 _(
2944 _(
2944 b'cannot partially commit a merge '
2945 b'cannot partially commit a merge '
2945 b'(do not specify files or patterns)'
2946 b'(do not specify files or patterns)'
2946 )
2947 )
2947 )
2948 )
2948
2949
2949 status = self.status(match=match, clean=force)
2950 status = self.status(match=match, clean=force)
2950 if force:
2951 if force:
2951 status.modified.extend(
2952 status.modified.extend(
2952 status.clean
2953 status.clean
2953 ) # mq may commit clean files
2954 ) # mq may commit clean files
2954
2955
2955 # check subrepos
2956 # check subrepos
2956 subs, commitsubs, newstate = subrepoutil.precommit(
2957 subs, commitsubs, newstate = subrepoutil.precommit(
2957 self.ui, wctx, status, match, force=force
2958 self.ui, wctx, status, match, force=force
2958 )
2959 )
2959
2960
2960 # make sure all explicit patterns are matched
2961 # make sure all explicit patterns are matched
2961 if not force:
2962 if not force:
2962 self.checkcommitpatterns(wctx, match, status, fail)
2963 self.checkcommitpatterns(wctx, match, status, fail)
2963
2964
2964 cctx = context.workingcommitctx(
2965 cctx = context.workingcommitctx(
2965 self, status, text, user, date, extra
2966 self, status, text, user, date, extra
2966 )
2967 )
2967
2968
2968 ms = mergemod.mergestate.read(self)
2969 ms = mergemod.mergestate.read(self)
2969 mergeutil.checkunresolved(ms)
2970 mergeutil.checkunresolved(ms)
2970
2971
2971 # internal config: ui.allowemptycommit
2972 # internal config: ui.allowemptycommit
2972 allowemptycommit = (
2973 allowemptycommit = (
2973 wctx.branch() != wctx.p1().branch()
2974 wctx.branch() != wctx.p1().branch()
2974 or extra.get(b'close')
2975 or extra.get(b'close')
2975 or merge
2976 or merge
2976 or cctx.files()
2977 or cctx.files()
2977 or self.ui.configbool(b'ui', b'allowemptycommit')
2978 or self.ui.configbool(b'ui', b'allowemptycommit')
2978 )
2979 )
2979 if not allowemptycommit:
2980 if not allowemptycommit:
2980 self.ui.debug(b'nothing to commit, clearing merge state\n')
2981 self.ui.debug(b'nothing to commit, clearing merge state\n')
2981 ms.reset()
2982 ms.reset()
2982 return None
2983 return None
2983
2984
2984 if merge and cctx.deleted():
2985 if merge and cctx.deleted():
2985 raise error.Abort(_(b"cannot commit merge with missing files"))
2986 raise error.Abort(_(b"cannot commit merge with missing files"))
2986
2987
2987 if editor:
2988 if editor:
2988 cctx._text = editor(self, cctx, subs)
2989 cctx._text = editor(self, cctx, subs)
2989 edited = text != cctx._text
2990 edited = text != cctx._text
2990
2991
2991 # Save commit message in case this transaction gets rolled back
2992 # Save commit message in case this transaction gets rolled back
2992 # (e.g. by a pretxncommit hook). Leave the content alone on
2993 # (e.g. by a pretxncommit hook). Leave the content alone on
2993 # the assumption that the user will use the same editor again.
2994 # the assumption that the user will use the same editor again.
2994 msgfn = self.savecommitmessage(cctx._text)
2995 msgfn = self.savecommitmessage(cctx._text)
2995
2996
2996 # commit subs and write new state
2997 # commit subs and write new state
2997 if subs:
2998 if subs:
2998 uipathfn = scmutil.getuipathfn(self)
2999 uipathfn = scmutil.getuipathfn(self)
2999 for s in sorted(commitsubs):
3000 for s in sorted(commitsubs):
3000 sub = wctx.sub(s)
3001 sub = wctx.sub(s)
3001 self.ui.status(
3002 self.ui.status(
3002 _(b'committing subrepository %s\n')
3003 _(b'committing subrepository %s\n')
3003 % uipathfn(subrepoutil.subrelpath(sub))
3004 % uipathfn(subrepoutil.subrelpath(sub))
3004 )
3005 )
3005 sr = sub.commit(cctx._text, user, date)
3006 sr = sub.commit(cctx._text, user, date)
3006 newstate[s] = (newstate[s][0], sr)
3007 newstate[s] = (newstate[s][0], sr)
3007 subrepoutil.writestate(self, newstate)
3008 subrepoutil.writestate(self, newstate)
3008
3009
3009 p1, p2 = self.dirstate.parents()
3010 p1, p2 = self.dirstate.parents()
3010 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3011 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3011 try:
3012 try:
3012 self.hook(
3013 self.hook(
3013 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3014 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3014 )
3015 )
3015 with self.transaction(b'commit'):
3016 with self.transaction(b'commit'):
3016 ret = self.commitctx(cctx, True)
3017 ret = self.commitctx(cctx, True)
3017 # update bookmarks, dirstate and mergestate
3018 # update bookmarks, dirstate and mergestate
3018 bookmarks.update(self, [p1, p2], ret)
3019 bookmarks.update(self, [p1, p2], ret)
3019 cctx.markcommitted(ret)
3020 cctx.markcommitted(ret)
3020 ms.reset()
3021 ms.reset()
3021 except: # re-raises
3022 except: # re-raises
3022 if edited:
3023 if edited:
3023 self.ui.write(
3024 self.ui.write(
3024 _(b'note: commit message saved in %s\n') % msgfn
3025 _(b'note: commit message saved in %s\n') % msgfn
3025 )
3026 )
3026 self.ui.write(
3027 self.ui.write(
3027 _(
3028 _(
3028 b"note: use 'hg commit --logfile "
3029 b"note: use 'hg commit --logfile "
3029 b".hg/last-message.txt --edit' to reuse it\n"
3030 b".hg/last-message.txt --edit' to reuse it\n"
3030 )
3031 )
3031 )
3032 )
3032 raise
3033 raise
3033
3034
3034 def commithook(unused_success):
3035 def commithook(unused_success):
3035 # hack for command that use a temporary commit (eg: histedit)
3036 # hack for command that use a temporary commit (eg: histedit)
3036 # temporary commit got stripped before hook release
3037 # temporary commit got stripped before hook release
3037 if self.changelog.hasnode(ret):
3038 if self.changelog.hasnode(ret):
3038 self.hook(
3039 self.hook(
3039 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3040 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3040 )
3041 )
3041
3042
3042 self._afterlock(commithook)
3043 self._afterlock(commithook)
3043 return ret
3044 return ret
3044
3045
3045 @unfilteredmethod
3046 @unfilteredmethod
3046 def commitctx(self, ctx, error=False, origctx=None):
3047 def commitctx(self, ctx, error=False, origctx=None):
3047 """Add a new revision to current repository.
3048 """Add a new revision to current repository.
3048 Revision information is passed via the context argument.
3049 Revision information is passed via the context argument.
3049
3050
3050 ctx.files() should list all files involved in this commit, i.e.
3051 ctx.files() should list all files involved in this commit, i.e.
3051 modified/added/removed files. On merge, it may be wider than the
3052 modified/added/removed files. On merge, it may be wider than the
3052 ctx.files() to be committed, since any file nodes derived directly
3053 ctx.files() to be committed, since any file nodes derived directly
3053 from p1 or p2 are excluded from the committed ctx.files().
3054 from p1 or p2 are excluded from the committed ctx.files().
3054
3055
3055 origctx is for convert to work around the problem that bug
3056 origctx is for convert to work around the problem that bug
3056 fixes to the files list in changesets change hashes. For
3057 fixes to the files list in changesets change hashes. For
3057 convert to be the identity, it can pass an origctx and this
3058 convert to be the identity, it can pass an origctx and this
3058 function will use the same files list when it makes sense to
3059 function will use the same files list when it makes sense to
3059 do so.
3060 do so.
3060 """
3061 """
3061
3062
3062 p1, p2 = ctx.p1(), ctx.p2()
3063 p1, p2 = ctx.p1(), ctx.p2()
3063 user = ctx.user()
3064 user = ctx.user()
3064
3065
3065 if self.filecopiesmode == b'changeset-sidedata':
3066 if self.filecopiesmode == b'changeset-sidedata':
3066 writechangesetcopy = True
3067 writechangesetcopy = True
3067 writefilecopymeta = True
3068 writefilecopymeta = True
3068 writecopiesto = None
3069 writecopiesto = None
3069 else:
3070 else:
3070 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3071 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3071 writefilecopymeta = writecopiesto != b'changeset-only'
3072 writefilecopymeta = writecopiesto != b'changeset-only'
3072 writechangesetcopy = writecopiesto in (
3073 writechangesetcopy = writecopiesto in (
3073 b'changeset-only',
3074 b'changeset-only',
3074 b'compatibility',
3075 b'compatibility',
3075 )
3076 )
3076 p1copies, p2copies = None, None
3077 p1copies, p2copies = None, None
3077 if writechangesetcopy:
3078 if writechangesetcopy:
3078 p1copies = ctx.p1copies()
3079 p1copies = ctx.p1copies()
3079 p2copies = ctx.p2copies()
3080 p2copies = ctx.p2copies()
3080 filesadded, filesremoved = None, None
3081 filesadded, filesremoved = None, None
3081 with self.lock(), self.transaction(b"commit") as tr:
3082 with self.lock(), self.transaction(b"commit") as tr:
3082 trp = weakref.proxy(tr)
3083 trp = weakref.proxy(tr)
3083
3084
3084 if ctx.manifestnode():
3085 if ctx.manifestnode():
3085 # reuse an existing manifest revision
3086 # reuse an existing manifest revision
3086 self.ui.debug(b'reusing known manifest\n')
3087 self.ui.debug(b'reusing known manifest\n')
3087 mn = ctx.manifestnode()
3088 mn = ctx.manifestnode()
3088 files = ctx.files()
3089 files = ctx.files()
3089 if writechangesetcopy:
3090 if writechangesetcopy:
3090 filesadded = ctx.filesadded()
3091 filesadded = ctx.filesadded()
3091 filesremoved = ctx.filesremoved()
3092 filesremoved = ctx.filesremoved()
3092 elif ctx.files():
3093 elif ctx.files():
3093 m1ctx = p1.manifestctx()
3094 m1ctx = p1.manifestctx()
3094 m2ctx = p2.manifestctx()
3095 m2ctx = p2.manifestctx()
3095 mctx = m1ctx.copy()
3096 mctx = m1ctx.copy()
3096
3097
3097 m = mctx.read()
3098 m = mctx.read()
3098 m1 = m1ctx.read()
3099 m1 = m1ctx.read()
3099 m2 = m2ctx.read()
3100 m2 = m2ctx.read()
3100
3101
3101 # check in files
3102 # check in files
3102 added = []
3103 added = []
3103 changed = []
3104 changed = []
3104 removed = list(ctx.removed())
3105 removed = list(ctx.removed())
3105 linkrev = len(self)
3106 linkrev = len(self)
3106 self.ui.note(_(b"committing files:\n"))
3107 self.ui.note(_(b"committing files:\n"))
3107 uipathfn = scmutil.getuipathfn(self)
3108 uipathfn = scmutil.getuipathfn(self)
3108 for f in sorted(ctx.modified() + ctx.added()):
3109 for f in sorted(ctx.modified() + ctx.added()):
3109 self.ui.note(uipathfn(f) + b"\n")
3110 self.ui.note(uipathfn(f) + b"\n")
3110 try:
3111 try:
3111 fctx = ctx[f]
3112 fctx = ctx[f]
3112 if fctx is None:
3113 if fctx is None:
3113 removed.append(f)
3114 removed.append(f)
3114 else:
3115 else:
3115 added.append(f)
3116 added.append(f)
3116 m[f] = self._filecommit(
3117 m[f] = self._filecommit(
3117 fctx,
3118 fctx,
3118 m1,
3119 m1,
3119 m2,
3120 m2,
3120 linkrev,
3121 linkrev,
3121 trp,
3122 trp,
3122 changed,
3123 changed,
3123 writefilecopymeta,
3124 writefilecopymeta,
3124 )
3125 )
3125 m.setflag(f, fctx.flags())
3126 m.setflag(f, fctx.flags())
3126 except OSError:
3127 except OSError:
3127 self.ui.warn(
3128 self.ui.warn(
3128 _(b"trouble committing %s!\n") % uipathfn(f)
3129 _(b"trouble committing %s!\n") % uipathfn(f)
3129 )
3130 )
3130 raise
3131 raise
3131 except IOError as inst:
3132 except IOError as inst:
3132 errcode = getattr(inst, 'errno', errno.ENOENT)
3133 errcode = getattr(inst, 'errno', errno.ENOENT)
3133 if error or errcode and errcode != errno.ENOENT:
3134 if error or errcode and errcode != errno.ENOENT:
3134 self.ui.warn(
3135 self.ui.warn(
3135 _(b"trouble committing %s!\n") % uipathfn(f)
3136 _(b"trouble committing %s!\n") % uipathfn(f)
3136 )
3137 )
3137 raise
3138 raise
3138
3139
3139 # update manifest
3140 # update manifest
3140 removed = [f for f in removed if f in m1 or f in m2]
3141 removed = [f for f in removed if f in m1 or f in m2]
3141 drop = sorted([f for f in removed if f in m])
3142 drop = sorted([f for f in removed if f in m])
3142 for f in drop:
3143 for f in drop:
3143 del m[f]
3144 del m[f]
3144 if p2.rev() != nullrev:
3145 if p2.rev() != nullrev:
3145
3146
3146 @util.cachefunc
3147 @util.cachefunc
3147 def mas():
3148 def mas():
3148 p1n = p1.node()
3149 p1n = p1.node()
3149 p2n = p2.node()
3150 p2n = p2.node()
3150 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3151 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3151 if not cahs:
3152 if not cahs:
3152 cahs = [nullrev]
3153 cahs = [nullrev]
3153 return [self[r].manifest() for r in cahs]
3154 return [self[r].manifest() for r in cahs]
3154
3155
3155 def deletionfromparent(f):
3156 def deletionfromparent(f):
3156 # When a file is removed relative to p1 in a merge, this
3157 # When a file is removed relative to p1 in a merge, this
3157 # function determines whether the absence is due to a
3158 # function determines whether the absence is due to a
3158 # deletion from a parent, or whether the merge commit
3159 # deletion from a parent, or whether the merge commit
3159 # itself deletes the file. We decide this by doing a
3160 # itself deletes the file. We decide this by doing a
3160 # simplified three way merge of the manifest entry for
3161 # simplified three way merge of the manifest entry for
3161 # the file. There are two ways we decide the merge
3162 # the file. There are two ways we decide the merge
3162 # itself didn't delete a file:
3163 # itself didn't delete a file:
3163 # - neither parent (nor the merge) contain the file
3164 # - neither parent (nor the merge) contain the file
3164 # - exactly one parent contains the file, and that
3165 # - exactly one parent contains the file, and that
3165 # parent has the same filelog entry as the merge
3166 # parent has the same filelog entry as the merge
3166 # ancestor (or all of them if there two). In other
3167 # ancestor (or all of them if there two). In other
3167 # words, that parent left the file unchanged while the
3168 # words, that parent left the file unchanged while the
3168 # other one deleted it.
3169 # other one deleted it.
3169 # One way to think about this is that deleting a file is
3170 # One way to think about this is that deleting a file is
3170 # similar to emptying it, so the list of changed files
3171 # similar to emptying it, so the list of changed files
3171 # should be similar either way. The computation
3172 # should be similar either way. The computation
3172 # described above is not done directly in _filecommit
3173 # described above is not done directly in _filecommit
3173 # when creating the list of changed files, however
3174 # when creating the list of changed files, however
3174 # it does something very similar by comparing filelog
3175 # it does something very similar by comparing filelog
3175 # nodes.
3176 # nodes.
3176 if f in m1:
3177 if f in m1:
3177 return f not in m2 and all(
3178 return f not in m2 and all(
3178 f in ma and ma.find(f) == m1.find(f)
3179 f in ma and ma.find(f) == m1.find(f)
3179 for ma in mas()
3180 for ma in mas()
3180 )
3181 )
3181 elif f in m2:
3182 elif f in m2:
3182 return all(
3183 return all(
3183 f in ma and ma.find(f) == m2.find(f)
3184 f in ma and ma.find(f) == m2.find(f)
3184 for ma in mas()
3185 for ma in mas()
3185 )
3186 )
3186 else:
3187 else:
3187 return True
3188 return True
3188
3189
3189 removed = [f for f in removed if not deletionfromparent(f)]
3190 removed = [f for f in removed if not deletionfromparent(f)]
3190
3191
3191 files = changed + removed
3192 files = changed + removed
3192 md = None
3193 md = None
3193 if not files:
3194 if not files:
3194 # if no "files" actually changed in terms of the changelog,
3195 # if no "files" actually changed in terms of the changelog,
3195 # try hard to detect unmodified manifest entry so that the
3196 # try hard to detect unmodified manifest entry so that the
3196 # exact same commit can be reproduced later on convert.
3197 # exact same commit can be reproduced later on convert.
3197 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3198 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3198 if not files and md:
3199 if not files and md:
3199 self.ui.debug(
3200 self.ui.debug(
3200 b'not reusing manifest (no file change in '
3201 b'not reusing manifest (no file change in '
3201 b'changelog, but manifest differs)\n'
3202 b'changelog, but manifest differs)\n'
3202 )
3203 )
3203 if files or md:
3204 if files or md:
3204 self.ui.note(_(b"committing manifest\n"))
3205 self.ui.note(_(b"committing manifest\n"))
3205 # we're using narrowmatch here since it's already applied at
3206 # we're using narrowmatch here since it's already applied at
3206 # other stages (such as dirstate.walk), so we're already
3207 # other stages (such as dirstate.walk), so we're already
3207 # ignoring things outside of narrowspec in most cases. The
3208 # ignoring things outside of narrowspec in most cases. The
3208 # one case where we might have files outside the narrowspec
3209 # one case where we might have files outside the narrowspec
3209 # at this point is merges, and we already error out in the
3210 # at this point is merges, and we already error out in the
3210 # case where the merge has files outside of the narrowspec,
3211 # case where the merge has files outside of the narrowspec,
3211 # so this is safe.
3212 # so this is safe.
3212 mn = mctx.write(
3213 mn = mctx.write(
3213 trp,
3214 trp,
3214 linkrev,
3215 linkrev,
3215 p1.manifestnode(),
3216 p1.manifestnode(),
3216 p2.manifestnode(),
3217 p2.manifestnode(),
3217 added,
3218 added,
3218 drop,
3219 drop,
3219 match=self.narrowmatch(),
3220 match=self.narrowmatch(),
3220 )
3221 )
3221
3222
3222 if writechangesetcopy:
3223 if writechangesetcopy:
3223 filesadded = [
3224 filesadded = [
3224 f for f in changed if not (f in m1 or f in m2)
3225 f for f in changed if not (f in m1 or f in m2)
3225 ]
3226 ]
3226 filesremoved = removed
3227 filesremoved = removed
3227 else:
3228 else:
3228 self.ui.debug(
3229 self.ui.debug(
3229 b'reusing manifest from p1 (listed files '
3230 b'reusing manifest from p1 (listed files '
3230 b'actually unchanged)\n'
3231 b'actually unchanged)\n'
3231 )
3232 )
3232 mn = p1.manifestnode()
3233 mn = p1.manifestnode()
3233 else:
3234 else:
3234 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3235 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3235 mn = p1.manifestnode()
3236 mn = p1.manifestnode()
3236 files = []
3237 files = []
3237
3238
3238 if writecopiesto == b'changeset-only':
3239 if writecopiesto == b'changeset-only':
3239 # If writing only to changeset extras, use None to indicate that
3240 # If writing only to changeset extras, use None to indicate that
3240 # no entry should be written. If writing to both, write an empty
3241 # no entry should be written. If writing to both, write an empty
3241 # entry to prevent the reader from falling back to reading
3242 # entry to prevent the reader from falling back to reading
3242 # filelogs.
3243 # filelogs.
3243 p1copies = p1copies or None
3244 p1copies = p1copies or None
3244 p2copies = p2copies or None
3245 p2copies = p2copies or None
3245 filesadded = filesadded or None
3246 filesadded = filesadded or None
3246 filesremoved = filesremoved or None
3247 filesremoved = filesremoved or None
3247
3248
3248 if origctx and origctx.manifestnode() == mn:
3249 if origctx and origctx.manifestnode() == mn:
3249 files = origctx.files()
3250 files = origctx.files()
3250
3251
3251 # update changelog
3252 # update changelog
3252 self.ui.note(_(b"committing changelog\n"))
3253 self.ui.note(_(b"committing changelog\n"))
3253 self.changelog.delayupdate(tr)
3254 self.changelog.delayupdate(tr)
3254 n = self.changelog.add(
3255 n = self.changelog.add(
3255 mn,
3256 mn,
3256 files,
3257 files,
3257 ctx.description(),
3258 ctx.description(),
3258 trp,
3259 trp,
3259 p1.node(),
3260 p1.node(),
3260 p2.node(),
3261 p2.node(),
3261 user,
3262 user,
3262 ctx.date(),
3263 ctx.date(),
3263 ctx.extra().copy(),
3264 ctx.extra().copy(),
3264 p1copies,
3265 p1copies,
3265 p2copies,
3266 p2copies,
3266 filesadded,
3267 filesadded,
3267 filesremoved,
3268 filesremoved,
3268 )
3269 )
3269 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3270 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3270 self.hook(
3271 self.hook(
3271 b'pretxncommit',
3272 b'pretxncommit',
3272 throw=True,
3273 throw=True,
3273 node=hex(n),
3274 node=hex(n),
3274 parent1=xp1,
3275 parent1=xp1,
3275 parent2=xp2,
3276 parent2=xp2,
3276 )
3277 )
3277 # set the new commit is proper phase
3278 # set the new commit is proper phase
3278 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3279 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3279 if targetphase:
3280 if targetphase:
3280 # retract boundary do not alter parent changeset.
3281 # retract boundary do not alter parent changeset.
3281 # if a parent have higher the resulting phase will
3282 # if a parent have higher the resulting phase will
3282 # be compliant anyway
3283 # be compliant anyway
3283 #
3284 #
3284 # if minimal phase was 0 we don't need to retract anything
3285 # if minimal phase was 0 we don't need to retract anything
3285 phases.registernew(self, tr, targetphase, [n])
3286 phases.registernew(self, tr, targetphase, [n])
3286 return n
3287 return n
3287
3288
3288 @unfilteredmethod
3289 @unfilteredmethod
3289 def destroying(self):
3290 def destroying(self):
3290 '''Inform the repository that nodes are about to be destroyed.
3291 '''Inform the repository that nodes are about to be destroyed.
3291 Intended for use by strip and rollback, so there's a common
3292 Intended for use by strip and rollback, so there's a common
3292 place for anything that has to be done before destroying history.
3293 place for anything that has to be done before destroying history.
3293
3294
3294 This is mostly useful for saving state that is in memory and waiting
3295 This is mostly useful for saving state that is in memory and waiting
3295 to be flushed when the current lock is released. Because a call to
3296 to be flushed when the current lock is released. Because a call to
3296 destroyed is imminent, the repo will be invalidated causing those
3297 destroyed is imminent, the repo will be invalidated causing those
3297 changes to stay in memory (waiting for the next unlock), or vanish
3298 changes to stay in memory (waiting for the next unlock), or vanish
3298 completely.
3299 completely.
3299 '''
3300 '''
3300 # When using the same lock to commit and strip, the phasecache is left
3301 # When using the same lock to commit and strip, the phasecache is left
3301 # dirty after committing. Then when we strip, the repo is invalidated,
3302 # dirty after committing. Then when we strip, the repo is invalidated,
3302 # causing those changes to disappear.
3303 # causing those changes to disappear.
3303 if '_phasecache' in vars(self):
3304 if '_phasecache' in vars(self):
3304 self._phasecache.write()
3305 self._phasecache.write()
3305
3306
3306 @unfilteredmethod
3307 @unfilteredmethod
3307 def destroyed(self):
3308 def destroyed(self):
3308 '''Inform the repository that nodes have been destroyed.
3309 '''Inform the repository that nodes have been destroyed.
3309 Intended for use by strip and rollback, so there's a common
3310 Intended for use by strip and rollback, so there's a common
3310 place for anything that has to be done after destroying history.
3311 place for anything that has to be done after destroying history.
3311 '''
3312 '''
3312 # When one tries to:
3313 # When one tries to:
3313 # 1) destroy nodes thus calling this method (e.g. strip)
3314 # 1) destroy nodes thus calling this method (e.g. strip)
3314 # 2) use phasecache somewhere (e.g. commit)
3315 # 2) use phasecache somewhere (e.g. commit)
3315 #
3316 #
3316 # then 2) will fail because the phasecache contains nodes that were
3317 # then 2) will fail because the phasecache contains nodes that were
3317 # removed. We can either remove phasecache from the filecache,
3318 # removed. We can either remove phasecache from the filecache,
3318 # causing it to reload next time it is accessed, or simply filter
3319 # causing it to reload next time it is accessed, or simply filter
3319 # the removed nodes now and write the updated cache.
3320 # the removed nodes now and write the updated cache.
3320 self._phasecache.filterunknown(self)
3321 self._phasecache.filterunknown(self)
3321 self._phasecache.write()
3322 self._phasecache.write()
3322
3323
3323 # refresh all repository caches
3324 # refresh all repository caches
3324 self.updatecaches()
3325 self.updatecaches()
3325
3326
3326 # Ensure the persistent tag cache is updated. Doing it now
3327 # Ensure the persistent tag cache is updated. Doing it now
3327 # means that the tag cache only has to worry about destroyed
3328 # means that the tag cache only has to worry about destroyed
3328 # heads immediately after a strip/rollback. That in turn
3329 # heads immediately after a strip/rollback. That in turn
3329 # guarantees that "cachetip == currenttip" (comparing both rev
3330 # guarantees that "cachetip == currenttip" (comparing both rev
3330 # and node) always means no nodes have been added or destroyed.
3331 # and node) always means no nodes have been added or destroyed.
3331
3332
3332 # XXX this is suboptimal when qrefresh'ing: we strip the current
3333 # XXX this is suboptimal when qrefresh'ing: we strip the current
3333 # head, refresh the tag cache, then immediately add a new head.
3334 # head, refresh the tag cache, then immediately add a new head.
3334 # But I think doing it this way is necessary for the "instant
3335 # But I think doing it this way is necessary for the "instant
3335 # tag cache retrieval" case to work.
3336 # tag cache retrieval" case to work.
3336 self.invalidate()
3337 self.invalidate()
3337
3338
3338 def status(
3339 def status(
3339 self,
3340 self,
3340 node1=b'.',
3341 node1=b'.',
3341 node2=None,
3342 node2=None,
3342 match=None,
3343 match=None,
3343 ignored=False,
3344 ignored=False,
3344 clean=False,
3345 clean=False,
3345 unknown=False,
3346 unknown=False,
3346 listsubrepos=False,
3347 listsubrepos=False,
3347 ):
3348 ):
3348 '''a convenience method that calls node1.status(node2)'''
3349 '''a convenience method that calls node1.status(node2)'''
3349 return self[node1].status(
3350 return self[node1].status(
3350 node2, match, ignored, clean, unknown, listsubrepos
3351 node2, match, ignored, clean, unknown, listsubrepos
3351 )
3352 )
3352
3353
3353 def addpostdsstatus(self, ps):
3354 def addpostdsstatus(self, ps):
3354 """Add a callback to run within the wlock, at the point at which status
3355 """Add a callback to run within the wlock, at the point at which status
3355 fixups happen.
3356 fixups happen.
3356
3357
3357 On status completion, callback(wctx, status) will be called with the
3358 On status completion, callback(wctx, status) will be called with the
3358 wlock held, unless the dirstate has changed from underneath or the wlock
3359 wlock held, unless the dirstate has changed from underneath or the wlock
3359 couldn't be grabbed.
3360 couldn't be grabbed.
3360
3361
3361 Callbacks should not capture and use a cached copy of the dirstate --
3362 Callbacks should not capture and use a cached copy of the dirstate --
3362 it might change in the meanwhile. Instead, they should access the
3363 it might change in the meanwhile. Instead, they should access the
3363 dirstate via wctx.repo().dirstate.
3364 dirstate via wctx.repo().dirstate.
3364
3365
3365 This list is emptied out after each status run -- extensions should
3366 This list is emptied out after each status run -- extensions should
3366 make sure it adds to this list each time dirstate.status is called.
3367 make sure it adds to this list each time dirstate.status is called.
3367 Extensions should also make sure they don't call this for statuses
3368 Extensions should also make sure they don't call this for statuses
3368 that don't involve the dirstate.
3369 that don't involve the dirstate.
3369 """
3370 """
3370
3371
3371 # The list is located here for uniqueness reasons -- it is actually
3372 # The list is located here for uniqueness reasons -- it is actually
3372 # managed by the workingctx, but that isn't unique per-repo.
3373 # managed by the workingctx, but that isn't unique per-repo.
3373 self._postdsstatus.append(ps)
3374 self._postdsstatus.append(ps)
3374
3375
3375 def postdsstatus(self):
3376 def postdsstatus(self):
3376 """Used by workingctx to get the list of post-dirstate-status hooks."""
3377 """Used by workingctx to get the list of post-dirstate-status hooks."""
3377 return self._postdsstatus
3378 return self._postdsstatus
3378
3379
3379 def clearpostdsstatus(self):
3380 def clearpostdsstatus(self):
3380 """Used by workingctx to clear post-dirstate-status hooks."""
3381 """Used by workingctx to clear post-dirstate-status hooks."""
3381 del self._postdsstatus[:]
3382 del self._postdsstatus[:]
3382
3383
3383 def heads(self, start=None):
3384 def heads(self, start=None):
3384 if start is None:
3385 if start is None:
3385 cl = self.changelog
3386 cl = self.changelog
3386 headrevs = reversed(cl.headrevs())
3387 headrevs = reversed(cl.headrevs())
3387 return [cl.node(rev) for rev in headrevs]
3388 return [cl.node(rev) for rev in headrevs]
3388
3389
3389 heads = self.changelog.heads(start)
3390 heads = self.changelog.heads(start)
3390 # sort the output in rev descending order
3391 # sort the output in rev descending order
3391 return sorted(heads, key=self.changelog.rev, reverse=True)
3392 return sorted(heads, key=self.changelog.rev, reverse=True)
3392
3393
3393 def branchheads(self, branch=None, start=None, closed=False):
3394 def branchheads(self, branch=None, start=None, closed=False):
3394 '''return a (possibly filtered) list of heads for the given branch
3395 '''return a (possibly filtered) list of heads for the given branch
3395
3396
3396 Heads are returned in topological order, from newest to oldest.
3397 Heads are returned in topological order, from newest to oldest.
3397 If branch is None, use the dirstate branch.
3398 If branch is None, use the dirstate branch.
3398 If start is not None, return only heads reachable from start.
3399 If start is not None, return only heads reachable from start.
3399 If closed is True, return heads that are marked as closed as well.
3400 If closed is True, return heads that are marked as closed as well.
3400 '''
3401 '''
3401 if branch is None:
3402 if branch is None:
3402 branch = self[None].branch()
3403 branch = self[None].branch()
3403 branches = self.branchmap()
3404 branches = self.branchmap()
3404 if not branches.hasbranch(branch):
3405 if not branches.hasbranch(branch):
3405 return []
3406 return []
3406 # the cache returns heads ordered lowest to highest
3407 # the cache returns heads ordered lowest to highest
3407 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3408 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3408 if start is not None:
3409 if start is not None:
3409 # filter out the heads that cannot be reached from startrev
3410 # filter out the heads that cannot be reached from startrev
3410 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3411 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3411 bheads = [h for h in bheads if h in fbheads]
3412 bheads = [h for h in bheads if h in fbheads]
3412 return bheads
3413 return bheads
3413
3414
3414 def branches(self, nodes):
3415 def branches(self, nodes):
3415 if not nodes:
3416 if not nodes:
3416 nodes = [self.changelog.tip()]
3417 nodes = [self.changelog.tip()]
3417 b = []
3418 b = []
3418 for n in nodes:
3419 for n in nodes:
3419 t = n
3420 t = n
3420 while True:
3421 while True:
3421 p = self.changelog.parents(n)
3422 p = self.changelog.parents(n)
3422 if p[1] != nullid or p[0] == nullid:
3423 if p[1] != nullid or p[0] == nullid:
3423 b.append((t, n, p[0], p[1]))
3424 b.append((t, n, p[0], p[1]))
3424 break
3425 break
3425 n = p[0]
3426 n = p[0]
3426 return b
3427 return b
3427
3428
3428 def between(self, pairs):
3429 def between(self, pairs):
3429 r = []
3430 r = []
3430
3431
3431 for top, bottom in pairs:
3432 for top, bottom in pairs:
3432 n, l, i = top, [], 0
3433 n, l, i = top, [], 0
3433 f = 1
3434 f = 1
3434
3435
3435 while n != bottom and n != nullid:
3436 while n != bottom and n != nullid:
3436 p = self.changelog.parents(n)[0]
3437 p = self.changelog.parents(n)[0]
3437 if i == f:
3438 if i == f:
3438 l.append(n)
3439 l.append(n)
3439 f = f * 2
3440 f = f * 2
3440 n = p
3441 n = p
3441 i += 1
3442 i += 1
3442
3443
3443 r.append(l)
3444 r.append(l)
3444
3445
3445 return r
3446 return r
3446
3447
3447 def checkpush(self, pushop):
3448 def checkpush(self, pushop):
3448 """Extensions can override this function if additional checks have
3449 """Extensions can override this function if additional checks have
3449 to be performed before pushing, or call it if they override push
3450 to be performed before pushing, or call it if they override push
3450 command.
3451 command.
3451 """
3452 """
3452
3453
3453 @unfilteredpropertycache
3454 @unfilteredpropertycache
3454 def prepushoutgoinghooks(self):
3455 def prepushoutgoinghooks(self):
3455 """Return util.hooks consists of a pushop with repo, remote, outgoing
3456 """Return util.hooks consists of a pushop with repo, remote, outgoing
3456 methods, which are called before pushing changesets.
3457 methods, which are called before pushing changesets.
3457 """
3458 """
3458 return util.hooks()
3459 return util.hooks()
3459
3460
3460 def pushkey(self, namespace, key, old, new):
3461 def pushkey(self, namespace, key, old, new):
3461 try:
3462 try:
3462 tr = self.currenttransaction()
3463 tr = self.currenttransaction()
3463 hookargs = {}
3464 hookargs = {}
3464 if tr is not None:
3465 if tr is not None:
3465 hookargs.update(tr.hookargs)
3466 hookargs.update(tr.hookargs)
3466 hookargs = pycompat.strkwargs(hookargs)
3467 hookargs = pycompat.strkwargs(hookargs)
3467 hookargs['namespace'] = namespace
3468 hookargs['namespace'] = namespace
3468 hookargs['key'] = key
3469 hookargs['key'] = key
3469 hookargs['old'] = old
3470 hookargs['old'] = old
3470 hookargs['new'] = new
3471 hookargs['new'] = new
3471 self.hook(b'prepushkey', throw=True, **hookargs)
3472 self.hook(b'prepushkey', throw=True, **hookargs)
3472 except error.HookAbort as exc:
3473 except error.HookAbort as exc:
3473 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3474 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3474 if exc.hint:
3475 if exc.hint:
3475 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3476 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3476 return False
3477 return False
3477 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3478 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3478 ret = pushkey.push(self, namespace, key, old, new)
3479 ret = pushkey.push(self, namespace, key, old, new)
3479
3480
3480 def runhook(unused_success):
3481 def runhook(unused_success):
3481 self.hook(
3482 self.hook(
3482 b'pushkey',
3483 b'pushkey',
3483 namespace=namespace,
3484 namespace=namespace,
3484 key=key,
3485 key=key,
3485 old=old,
3486 old=old,
3486 new=new,
3487 new=new,
3487 ret=ret,
3488 ret=ret,
3488 )
3489 )
3489
3490
3490 self._afterlock(runhook)
3491 self._afterlock(runhook)
3491 return ret
3492 return ret
3492
3493
3493 def listkeys(self, namespace):
3494 def listkeys(self, namespace):
3494 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3495 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3495 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3496 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3496 values = pushkey.list(self, namespace)
3497 values = pushkey.list(self, namespace)
3497 self.hook(b'listkeys', namespace=namespace, values=values)
3498 self.hook(b'listkeys', namespace=namespace, values=values)
3498 return values
3499 return values
3499
3500
3500 def debugwireargs(self, one, two, three=None, four=None, five=None):
3501 def debugwireargs(self, one, two, three=None, four=None, five=None):
3501 '''used to test argument passing over the wire'''
3502 '''used to test argument passing over the wire'''
3502 return b"%s %s %s %s %s" % (
3503 return b"%s %s %s %s %s" % (
3503 one,
3504 one,
3504 two,
3505 two,
3505 pycompat.bytestr(three),
3506 pycompat.bytestr(three),
3506 pycompat.bytestr(four),
3507 pycompat.bytestr(four),
3507 pycompat.bytestr(five),
3508 pycompat.bytestr(five),
3508 )
3509 )
3509
3510
3510 def savecommitmessage(self, text):
3511 def savecommitmessage(self, text):
3511 fp = self.vfs(b'last-message.txt', b'wb')
3512 fp = self.vfs(b'last-message.txt', b'wb')
3512 try:
3513 try:
3513 fp.write(text)
3514 fp.write(text)
3514 finally:
3515 finally:
3515 fp.close()
3516 fp.close()
3516 return self.pathto(fp.name[len(self.root) + 1 :])
3517 return self.pathto(fp.name[len(self.root) + 1 :])
3517
3518
3518
3519
3519 # used to avoid circular references so destructors work
3520 # used to avoid circular references so destructors work
3520 def aftertrans(files):
3521 def aftertrans(files):
3521 renamefiles = [tuple(t) for t in files]
3522 renamefiles = [tuple(t) for t in files]
3522
3523
3523 def a():
3524 def a():
3524 for vfs, src, dest in renamefiles:
3525 for vfs, src, dest in renamefiles:
3525 # if src and dest refer to a same file, vfs.rename is a no-op,
3526 # if src and dest refer to a same file, vfs.rename is a no-op,
3526 # leaving both src and dest on disk. delete dest to make sure
3527 # leaving both src and dest on disk. delete dest to make sure
3527 # the rename couldn't be such a no-op.
3528 # the rename couldn't be such a no-op.
3528 vfs.tryunlink(dest)
3529 vfs.tryunlink(dest)
3529 try:
3530 try:
3530 vfs.rename(src, dest)
3531 vfs.rename(src, dest)
3531 except OSError: # journal file does not yet exist
3532 except OSError: # journal file does not yet exist
3532 pass
3533 pass
3533
3534
3534 return a
3535 return a
3535
3536
3536
3537
3537 def undoname(fn):
3538 def undoname(fn):
3538 base, name = os.path.split(fn)
3539 base, name = os.path.split(fn)
3539 assert name.startswith(b'journal')
3540 assert name.startswith(b'journal')
3540 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3541 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3541
3542
3542
3543
3543 def instance(ui, path, create, intents=None, createopts=None):
3544 def instance(ui, path, create, intents=None, createopts=None):
3544 localpath = util.urllocalpath(path)
3545 localpath = util.urllocalpath(path)
3545 if create:
3546 if create:
3546 createrepository(ui, localpath, createopts=createopts)
3547 createrepository(ui, localpath, createopts=createopts)
3547
3548
3548 return makelocalrepository(ui, localpath, intents=intents)
3549 return makelocalrepository(ui, localpath, intents=intents)
3549
3550
3550
3551
3551 def islocal(path):
3552 def islocal(path):
3552 return True
3553 return True
3553
3554
3554
3555
3555 def defaultcreateopts(ui, createopts=None):
3556 def defaultcreateopts(ui, createopts=None):
3556 """Populate the default creation options for a repository.
3557 """Populate the default creation options for a repository.
3557
3558
3558 A dictionary of explicitly requested creation options can be passed
3559 A dictionary of explicitly requested creation options can be passed
3559 in. Missing keys will be populated.
3560 in. Missing keys will be populated.
3560 """
3561 """
3561 createopts = dict(createopts or {})
3562 createopts = dict(createopts or {})
3562
3563
3563 if b'backend' not in createopts:
3564 if b'backend' not in createopts:
3564 # experimental config: storage.new-repo-backend
3565 # experimental config: storage.new-repo-backend
3565 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3566 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3566
3567
3567 return createopts
3568 return createopts
3568
3569
3569
3570
3570 def newreporequirements(ui, createopts):
3571 def newreporequirements(ui, createopts):
3571 """Determine the set of requirements for a new local repository.
3572 """Determine the set of requirements for a new local repository.
3572
3573
3573 Extensions can wrap this function to specify custom requirements for
3574 Extensions can wrap this function to specify custom requirements for
3574 new repositories.
3575 new repositories.
3575 """
3576 """
3576 # If the repo is being created from a shared repository, we copy
3577 # If the repo is being created from a shared repository, we copy
3577 # its requirements.
3578 # its requirements.
3578 if b'sharedrepo' in createopts:
3579 if b'sharedrepo' in createopts:
3579 requirements = set(createopts[b'sharedrepo'].requirements)
3580 requirements = set(createopts[b'sharedrepo'].requirements)
3580 if createopts.get(b'sharedrelative'):
3581 if createopts.get(b'sharedrelative'):
3581 requirements.add(b'relshared')
3582 requirements.add(b'relshared')
3582 else:
3583 else:
3583 requirements.add(b'shared')
3584 requirements.add(b'shared')
3584
3585
3585 return requirements
3586 return requirements
3586
3587
3587 if b'backend' not in createopts:
3588 if b'backend' not in createopts:
3588 raise error.ProgrammingError(
3589 raise error.ProgrammingError(
3589 b'backend key not present in createopts; '
3590 b'backend key not present in createopts; '
3590 b'was defaultcreateopts() called?'
3591 b'was defaultcreateopts() called?'
3591 )
3592 )
3592
3593
3593 if createopts[b'backend'] != b'revlogv1':
3594 if createopts[b'backend'] != b'revlogv1':
3594 raise error.Abort(
3595 raise error.Abort(
3595 _(
3596 _(
3596 b'unable to determine repository requirements for '
3597 b'unable to determine repository requirements for '
3597 b'storage backend: %s'
3598 b'storage backend: %s'
3598 )
3599 )
3599 % createopts[b'backend']
3600 % createopts[b'backend']
3600 )
3601 )
3601
3602
3602 requirements = {b'revlogv1'}
3603 requirements = {b'revlogv1'}
3603 if ui.configbool(b'format', b'usestore'):
3604 if ui.configbool(b'format', b'usestore'):
3604 requirements.add(b'store')
3605 requirements.add(b'store')
3605 if ui.configbool(b'format', b'usefncache'):
3606 if ui.configbool(b'format', b'usefncache'):
3606 requirements.add(b'fncache')
3607 requirements.add(b'fncache')
3607 if ui.configbool(b'format', b'dotencode'):
3608 if ui.configbool(b'format', b'dotencode'):
3608 requirements.add(b'dotencode')
3609 requirements.add(b'dotencode')
3609
3610
3610 compengines = ui.configlist(b'format', b'revlog-compression')
3611 compengines = ui.configlist(b'format', b'revlog-compression')
3611 for compengine in compengines:
3612 for compengine in compengines:
3612 if compengine in util.compengines:
3613 if compengine in util.compengines:
3613 break
3614 break
3614 else:
3615 else:
3615 raise error.Abort(
3616 raise error.Abort(
3616 _(
3617 _(
3617 b'compression engines %s defined by '
3618 b'compression engines %s defined by '
3618 b'format.revlog-compression not available'
3619 b'format.revlog-compression not available'
3619 )
3620 )
3620 % b', '.join(b'"%s"' % e for e in compengines),
3621 % b', '.join(b'"%s"' % e for e in compengines),
3621 hint=_(
3622 hint=_(
3622 b'run "hg debuginstall" to list available '
3623 b'run "hg debuginstall" to list available '
3623 b'compression engines'
3624 b'compression engines'
3624 ),
3625 ),
3625 )
3626 )
3626
3627
3627 # zlib is the historical default and doesn't need an explicit requirement.
3628 # zlib is the historical default and doesn't need an explicit requirement.
3628 if compengine == b'zstd':
3629 if compengine == b'zstd':
3629 requirements.add(b'revlog-compression-zstd')
3630 requirements.add(b'revlog-compression-zstd')
3630 elif compengine != b'zlib':
3631 elif compengine != b'zlib':
3631 requirements.add(b'exp-compression-%s' % compengine)
3632 requirements.add(b'exp-compression-%s' % compengine)
3632
3633
3633 if scmutil.gdinitconfig(ui):
3634 if scmutil.gdinitconfig(ui):
3634 requirements.add(b'generaldelta')
3635 requirements.add(b'generaldelta')
3635 if ui.configbool(b'format', b'sparse-revlog'):
3636 if ui.configbool(b'format', b'sparse-revlog'):
3636 requirements.add(SPARSEREVLOG_REQUIREMENT)
3637 requirements.add(SPARSEREVLOG_REQUIREMENT)
3637
3638
3638 # experimental config: format.exp-use-side-data
3639 # experimental config: format.exp-use-side-data
3639 if ui.configbool(b'format', b'exp-use-side-data'):
3640 if ui.configbool(b'format', b'exp-use-side-data'):
3640 requirements.add(SIDEDATA_REQUIREMENT)
3641 requirements.add(SIDEDATA_REQUIREMENT)
3641 # experimental config: format.exp-use-copies-side-data-changeset
3642 # experimental config: format.exp-use-copies-side-data-changeset
3642 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3643 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3643 requirements.add(SIDEDATA_REQUIREMENT)
3644 requirements.add(SIDEDATA_REQUIREMENT)
3644 requirements.add(COPIESSDC_REQUIREMENT)
3645 requirements.add(COPIESSDC_REQUIREMENT)
3645 if ui.configbool(b'experimental', b'treemanifest'):
3646 if ui.configbool(b'experimental', b'treemanifest'):
3646 requirements.add(b'treemanifest')
3647 requirements.add(b'treemanifest')
3647
3648
3648 revlogv2 = ui.config(b'experimental', b'revlogv2')
3649 revlogv2 = ui.config(b'experimental', b'revlogv2')
3649 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3650 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3650 requirements.remove(b'revlogv1')
3651 requirements.remove(b'revlogv1')
3651 # generaldelta is implied by revlogv2.
3652 # generaldelta is implied by revlogv2.
3652 requirements.discard(b'generaldelta')
3653 requirements.discard(b'generaldelta')
3653 requirements.add(REVLOGV2_REQUIREMENT)
3654 requirements.add(REVLOGV2_REQUIREMENT)
3654 # experimental config: format.internal-phase
3655 # experimental config: format.internal-phase
3655 if ui.configbool(b'format', b'internal-phase'):
3656 if ui.configbool(b'format', b'internal-phase'):
3656 requirements.add(b'internal-phase')
3657 requirements.add(b'internal-phase')
3657
3658
3658 if createopts.get(b'narrowfiles'):
3659 if createopts.get(b'narrowfiles'):
3659 requirements.add(repository.NARROW_REQUIREMENT)
3660 requirements.add(repository.NARROW_REQUIREMENT)
3660
3661
3661 if createopts.get(b'lfs'):
3662 if createopts.get(b'lfs'):
3662 requirements.add(b'lfs')
3663 requirements.add(b'lfs')
3663
3664
3664 if ui.configbool(b'format', b'bookmarks-in-store'):
3665 if ui.configbool(b'format', b'bookmarks-in-store'):
3665 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3666 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3666
3667
3667 if ui.configbool(b'format', b'use-persistent-nodemap'):
3668 if ui.configbool(b'format', b'use-persistent-nodemap'):
3668 requirements.add(NODEMAP_REQUIREMENT)
3669 requirements.add(NODEMAP_REQUIREMENT)
3669
3670
3670 return requirements
3671 return requirements
3671
3672
3672
3673
3673 def filterknowncreateopts(ui, createopts):
3674 def filterknowncreateopts(ui, createopts):
3674 """Filters a dict of repo creation options against options that are known.
3675 """Filters a dict of repo creation options against options that are known.
3675
3676
3676 Receives a dict of repo creation options and returns a dict of those
3677 Receives a dict of repo creation options and returns a dict of those
3677 options that we don't know how to handle.
3678 options that we don't know how to handle.
3678
3679
3679 This function is called as part of repository creation. If the
3680 This function is called as part of repository creation. If the
3680 returned dict contains any items, repository creation will not
3681 returned dict contains any items, repository creation will not
3681 be allowed, as it means there was a request to create a repository
3682 be allowed, as it means there was a request to create a repository
3682 with options not recognized by loaded code.
3683 with options not recognized by loaded code.
3683
3684
3684 Extensions can wrap this function to filter out creation options
3685 Extensions can wrap this function to filter out creation options
3685 they know how to handle.
3686 they know how to handle.
3686 """
3687 """
3687 known = {
3688 known = {
3688 b'backend',
3689 b'backend',
3689 b'lfs',
3690 b'lfs',
3690 b'narrowfiles',
3691 b'narrowfiles',
3691 b'sharedrepo',
3692 b'sharedrepo',
3692 b'sharedrelative',
3693 b'sharedrelative',
3693 b'shareditems',
3694 b'shareditems',
3694 b'shallowfilestore',
3695 b'shallowfilestore',
3695 }
3696 }
3696
3697
3697 return {k: v for k, v in createopts.items() if k not in known}
3698 return {k: v for k, v in createopts.items() if k not in known}
3698
3699
3699
3700
3700 def createrepository(ui, path, createopts=None):
3701 def createrepository(ui, path, createopts=None):
3701 """Create a new repository in a vfs.
3702 """Create a new repository in a vfs.
3702
3703
3703 ``path`` path to the new repo's working directory.
3704 ``path`` path to the new repo's working directory.
3704 ``createopts`` options for the new repository.
3705 ``createopts`` options for the new repository.
3705
3706
3706 The following keys for ``createopts`` are recognized:
3707 The following keys for ``createopts`` are recognized:
3707
3708
3708 backend
3709 backend
3709 The storage backend to use.
3710 The storage backend to use.
3710 lfs
3711 lfs
3711 Repository will be created with ``lfs`` requirement. The lfs extension
3712 Repository will be created with ``lfs`` requirement. The lfs extension
3712 will automatically be loaded when the repository is accessed.
3713 will automatically be loaded when the repository is accessed.
3713 narrowfiles
3714 narrowfiles
3714 Set up repository to support narrow file storage.
3715 Set up repository to support narrow file storage.
3715 sharedrepo
3716 sharedrepo
3716 Repository object from which storage should be shared.
3717 Repository object from which storage should be shared.
3717 sharedrelative
3718 sharedrelative
3718 Boolean indicating if the path to the shared repo should be
3719 Boolean indicating if the path to the shared repo should be
3719 stored as relative. By default, the pointer to the "parent" repo
3720 stored as relative. By default, the pointer to the "parent" repo
3720 is stored as an absolute path.
3721 is stored as an absolute path.
3721 shareditems
3722 shareditems
3722 Set of items to share to the new repository (in addition to storage).
3723 Set of items to share to the new repository (in addition to storage).
3723 shallowfilestore
3724 shallowfilestore
3724 Indicates that storage for files should be shallow (not all ancestor
3725 Indicates that storage for files should be shallow (not all ancestor
3725 revisions are known).
3726 revisions are known).
3726 """
3727 """
3727 createopts = defaultcreateopts(ui, createopts=createopts)
3728 createopts = defaultcreateopts(ui, createopts=createopts)
3728
3729
3729 unknownopts = filterknowncreateopts(ui, createopts)
3730 unknownopts = filterknowncreateopts(ui, createopts)
3730
3731
3731 if not isinstance(unknownopts, dict):
3732 if not isinstance(unknownopts, dict):
3732 raise error.ProgrammingError(
3733 raise error.ProgrammingError(
3733 b'filterknowncreateopts() did not return a dict'
3734 b'filterknowncreateopts() did not return a dict'
3734 )
3735 )
3735
3736
3736 if unknownopts:
3737 if unknownopts:
3737 raise error.Abort(
3738 raise error.Abort(
3738 _(
3739 _(
3739 b'unable to create repository because of unknown '
3740 b'unable to create repository because of unknown '
3740 b'creation option: %s'
3741 b'creation option: %s'
3741 )
3742 )
3742 % b', '.join(sorted(unknownopts)),
3743 % b', '.join(sorted(unknownopts)),
3743 hint=_(b'is a required extension not loaded?'),
3744 hint=_(b'is a required extension not loaded?'),
3744 )
3745 )
3745
3746
3746 requirements = newreporequirements(ui, createopts=createopts)
3747 requirements = newreporequirements(ui, createopts=createopts)
3747
3748
3748 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3749 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3749
3750
3750 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3751 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3751 if hgvfs.exists():
3752 if hgvfs.exists():
3752 raise error.RepoError(_(b'repository %s already exists') % path)
3753 raise error.RepoError(_(b'repository %s already exists') % path)
3753
3754
3754 if b'sharedrepo' in createopts:
3755 if b'sharedrepo' in createopts:
3755 sharedpath = createopts[b'sharedrepo'].sharedpath
3756 sharedpath = createopts[b'sharedrepo'].sharedpath
3756
3757
3757 if createopts.get(b'sharedrelative'):
3758 if createopts.get(b'sharedrelative'):
3758 try:
3759 try:
3759 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3760 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3760 except (IOError, ValueError) as e:
3761 except (IOError, ValueError) as e:
3761 # ValueError is raised on Windows if the drive letters differ
3762 # ValueError is raised on Windows if the drive letters differ
3762 # on each path.
3763 # on each path.
3763 raise error.Abort(
3764 raise error.Abort(
3764 _(b'cannot calculate relative path'),
3765 _(b'cannot calculate relative path'),
3765 hint=stringutil.forcebytestr(e),
3766 hint=stringutil.forcebytestr(e),
3766 )
3767 )
3767
3768
3768 if not wdirvfs.exists():
3769 if not wdirvfs.exists():
3769 wdirvfs.makedirs()
3770 wdirvfs.makedirs()
3770
3771
3771 hgvfs.makedir(notindexed=True)
3772 hgvfs.makedir(notindexed=True)
3772 if b'sharedrepo' not in createopts:
3773 if b'sharedrepo' not in createopts:
3773 hgvfs.mkdir(b'cache')
3774 hgvfs.mkdir(b'cache')
3774 hgvfs.mkdir(b'wcache')
3775 hgvfs.mkdir(b'wcache')
3775
3776
3776 if b'store' in requirements and b'sharedrepo' not in createopts:
3777 if b'store' in requirements and b'sharedrepo' not in createopts:
3777 hgvfs.mkdir(b'store')
3778 hgvfs.mkdir(b'store')
3778
3779
3779 # We create an invalid changelog outside the store so very old
3780 # We create an invalid changelog outside the store so very old
3780 # Mercurial versions (which didn't know about the requirements
3781 # Mercurial versions (which didn't know about the requirements
3781 # file) encounter an error on reading the changelog. This
3782 # file) encounter an error on reading the changelog. This
3782 # effectively locks out old clients and prevents them from
3783 # effectively locks out old clients and prevents them from
3783 # mucking with a repo in an unknown format.
3784 # mucking with a repo in an unknown format.
3784 #
3785 #
3785 # The revlog header has version 2, which won't be recognized by
3786 # The revlog header has version 2, which won't be recognized by
3786 # such old clients.
3787 # such old clients.
3787 hgvfs.append(
3788 hgvfs.append(
3788 b'00changelog.i',
3789 b'00changelog.i',
3789 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3790 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3790 b'layout',
3791 b'layout',
3791 )
3792 )
3792
3793
3793 scmutil.writerequires(hgvfs, requirements)
3794 scmutil.writerequires(hgvfs, requirements)
3794
3795
3795 # Write out file telling readers where to find the shared store.
3796 # Write out file telling readers where to find the shared store.
3796 if b'sharedrepo' in createopts:
3797 if b'sharedrepo' in createopts:
3797 hgvfs.write(b'sharedpath', sharedpath)
3798 hgvfs.write(b'sharedpath', sharedpath)
3798
3799
3799 if createopts.get(b'shareditems'):
3800 if createopts.get(b'shareditems'):
3800 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3801 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3801 hgvfs.write(b'shared', shared)
3802 hgvfs.write(b'shared', shared)
3802
3803
3803
3804
3804 def poisonrepository(repo):
3805 def poisonrepository(repo):
3805 """Poison a repository instance so it can no longer be used."""
3806 """Poison a repository instance so it can no longer be used."""
3806 # Perform any cleanup on the instance.
3807 # Perform any cleanup on the instance.
3807 repo.close()
3808 repo.close()
3808
3809
3809 # Our strategy is to replace the type of the object with one that
3810 # Our strategy is to replace the type of the object with one that
3810 # has all attribute lookups result in error.
3811 # has all attribute lookups result in error.
3811 #
3812 #
3812 # But we have to allow the close() method because some constructors
3813 # But we have to allow the close() method because some constructors
3813 # of repos call close() on repo references.
3814 # of repos call close() on repo references.
3814 class poisonedrepository(object):
3815 class poisonedrepository(object):
3815 def __getattribute__(self, item):
3816 def __getattribute__(self, item):
3816 if item == 'close':
3817 if item == 'close':
3817 return object.__getattribute__(self, item)
3818 return object.__getattribute__(self, item)
3818
3819
3819 raise error.ProgrammingError(
3820 raise error.ProgrammingError(
3820 b'repo instances should not be used after unshare'
3821 b'repo instances should not be used after unshare'
3821 )
3822 )
3822
3823
3823 def close(self):
3824 def close(self):
3824 pass
3825 pass
3825
3826
3826 # We may have a repoview, which intercepts __setattr__. So be sure
3827 # We may have a repoview, which intercepts __setattr__. So be sure
3827 # we operate at the lowest level possible.
3828 # we operate at the lowest level possible.
3828 object.__setattr__(repo, '__class__', poisonedrepository)
3829 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,1392 +1,1392 b''
1 commit hooks can see env vars
1 commit hooks can see env vars
2 (and post-transaction one are run unlocked)
2 (and post-transaction one are run unlocked)
3
3
4
4
5 $ cat > $TESTTMP/txnabort.checkargs.py <<EOF
5 $ cat > $TESTTMP/txnabort.checkargs.py <<EOF
6 > from mercurial import pycompat
6 > from mercurial import pycompat
7 > def showargs(ui, repo, hooktype, **kwargs):
7 > def showargs(ui, repo, hooktype, **kwargs):
8 > kwargs = pycompat.byteskwargs(kwargs)
8 > kwargs = pycompat.byteskwargs(kwargs)
9 > ui.write(b'%s Python hook: %s\n' % (hooktype,
9 > ui.write(b'%s Python hook: %s\n' % (hooktype,
10 > b','.join(sorted(kwargs))))
10 > b','.join(sorted(kwargs))))
11 > EOF
11 > EOF
12
12
13 $ hg init a
13 $ hg init a
14 $ cd a
14 $ cd a
15 $ cat > .hg/hgrc <<EOF
15 $ cat > .hg/hgrc <<EOF
16 > [hooks]
16 > [hooks]
17 > commit = sh -c "HG_LOCAL= HG_TAG= printenv.py --line commit"
17 > commit = sh -c "HG_LOCAL= HG_TAG= printenv.py --line commit"
18 > commit.b = sh -c "HG_LOCAL= HG_TAG= printenv.py --line commit.b"
18 > commit.b = sh -c "HG_LOCAL= HG_TAG= printenv.py --line commit.b"
19 > precommit = sh -c "HG_LOCAL= HG_NODE= HG_TAG= printenv.py --line precommit"
19 > precommit = sh -c "HG_LOCAL= HG_NODE= HG_TAG= printenv.py --line precommit"
20 > pretxncommit = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxncommit"
20 > pretxncommit = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxncommit"
21 > pretxncommit.tip = hg -q tip
21 > pretxncommit.tip = hg -q tip
22 > pre-identify = sh -c "printenv.py --line pre-identify 1"
22 > pre-identify = sh -c "printenv.py --line pre-identify 1"
23 > pre-cat = sh -c "printenv.py --line pre-cat"
23 > pre-cat = sh -c "printenv.py --line pre-cat"
24 > post-cat = sh -c "printenv.py --line post-cat"
24 > post-cat = sh -c "printenv.py --line post-cat"
25 > pretxnopen = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxnopen"
25 > pretxnopen = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxnopen"
26 > pretxnclose = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxnclose"
26 > pretxnclose = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxnclose"
27 > txnclose = sh -c "HG_LOCAL= HG_TAG= printenv.py --line txnclose"
27 > txnclose = sh -c "HG_LOCAL= HG_TAG= printenv.py --line txnclose"
28 > txnabort.0 = python:$TESTTMP/txnabort.checkargs.py:showargs
28 > txnabort.0 = python:$TESTTMP/txnabort.checkargs.py:showargs
29 > txnabort.1 = sh -c "HG_LOCAL= HG_TAG= printenv.py --line txnabort"
29 > txnabort.1 = sh -c "HG_LOCAL= HG_TAG= printenv.py --line txnabort"
30 > txnclose.checklock = sh -c "hg debuglock > /dev/null"
30 > txnclose.checklock = sh -c "hg debuglock > /dev/null"
31 > EOF
31 > EOF
32 $ echo a > a
32 $ echo a > a
33 $ hg add a
33 $ hg add a
34 $ hg commit -m a
34 $ hg commit -m a
35 precommit hook: HG_HOOKNAME=precommit
35 precommit hook: HG_HOOKNAME=precommit
36 HG_HOOKTYPE=precommit
36 HG_HOOKTYPE=precommit
37 HG_PARENT1=0000000000000000000000000000000000000000
37 HG_PARENT1=0000000000000000000000000000000000000000
38
38
39 pretxnopen hook: HG_HOOKNAME=pretxnopen
39 pretxnopen hook: HG_HOOKNAME=pretxnopen
40 HG_HOOKTYPE=pretxnopen
40 HG_HOOKTYPE=pretxnopen
41 HG_TXNID=TXN:$ID$
41 HG_TXNID=TXN:$ID$
42 HG_TXNNAME=commit
42 HG_TXNNAME=commit
43
43
44 pretxncommit hook: HG_HOOKNAME=pretxncommit
44 pretxncommit hook: HG_HOOKNAME=pretxncommit
45 HG_HOOKTYPE=pretxncommit
45 HG_HOOKTYPE=pretxncommit
46 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
46 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
47 HG_PARENT1=0000000000000000000000000000000000000000
47 HG_PARENT1=0000000000000000000000000000000000000000
48 HG_PENDING=$TESTTMP/a
48 HG_PENDING=$TESTTMP/a
49
49
50 0:cb9a9f314b8b
50 0:cb9a9f314b8b
51 pretxnclose hook: HG_HOOKNAME=pretxnclose
51 pretxnclose hook: HG_HOOKNAME=pretxnclose
52 HG_HOOKTYPE=pretxnclose
52 HG_HOOKTYPE=pretxnclose
53 HG_PENDING=$TESTTMP/a
53 HG_PENDING=$TESTTMP/a
54 HG_PHASES_MOVED=1
54 HG_PHASES_MOVED=1
55 HG_TXNID=TXN:$ID$
55 HG_TXNID=TXN:$ID$
56 HG_TXNNAME=commit
56 HG_TXNNAME=commit
57
57
58 txnclose hook: HG_HOOKNAME=txnclose
58 txnclose hook: HG_HOOKNAME=txnclose
59 HG_HOOKTYPE=txnclose
59 HG_HOOKTYPE=txnclose
60 HG_PHASES_MOVED=1
60 HG_PHASES_MOVED=1
61 HG_TXNID=TXN:$ID$
61 HG_TXNID=TXN:$ID$
62 HG_TXNNAME=commit
62 HG_TXNNAME=commit
63
63
64 commit hook: HG_HOOKNAME=commit
64 commit hook: HG_HOOKNAME=commit
65 HG_HOOKTYPE=commit
65 HG_HOOKTYPE=commit
66 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
66 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
67 HG_PARENT1=0000000000000000000000000000000000000000
67 HG_PARENT1=0000000000000000000000000000000000000000
68
68
69 commit.b hook: HG_HOOKNAME=commit.b
69 commit.b hook: HG_HOOKNAME=commit.b
70 HG_HOOKTYPE=commit
70 HG_HOOKTYPE=commit
71 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
71 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
72 HG_PARENT1=0000000000000000000000000000000000000000
72 HG_PARENT1=0000000000000000000000000000000000000000
73
73
74
74
75 $ hg clone . ../b
75 $ hg clone . ../b
76 updating to branch default
76 updating to branch default
77 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
77 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
78 $ cd ../b
78 $ cd ../b
79
79
80 changegroup hooks can see env vars
80 changegroup hooks can see env vars
81
81
82 $ cat > .hg/hgrc <<EOF
82 $ cat > .hg/hgrc <<EOF
83 > [hooks]
83 > [hooks]
84 > prechangegroup = sh -c "printenv.py --line prechangegroup"
84 > prechangegroup = sh -c "printenv.py --line prechangegroup"
85 > changegroup = sh -c "printenv.py --line changegroup"
85 > changegroup = sh -c "printenv.py --line changegroup"
86 > incoming = sh -c "printenv.py --line incoming"
86 > incoming = sh -c "printenv.py --line incoming"
87 > EOF
87 > EOF
88
88
89 pretxncommit and commit hooks can see both parents of merge
89 pretxncommit and commit hooks can see both parents of merge
90
90
91 $ cd ../a
91 $ cd ../a
92 $ echo b >> a
92 $ echo b >> a
93 $ hg commit -m a1 -d "1 0"
93 $ hg commit -m a1 -d "1 0"
94 precommit hook: HG_HOOKNAME=precommit
94 precommit hook: HG_HOOKNAME=precommit
95 HG_HOOKTYPE=precommit
95 HG_HOOKTYPE=precommit
96 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
96 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
97
97
98 pretxnopen hook: HG_HOOKNAME=pretxnopen
98 pretxnopen hook: HG_HOOKNAME=pretxnopen
99 HG_HOOKTYPE=pretxnopen
99 HG_HOOKTYPE=pretxnopen
100 HG_TXNID=TXN:$ID$
100 HG_TXNID=TXN:$ID$
101 HG_TXNNAME=commit
101 HG_TXNNAME=commit
102
102
103 pretxncommit hook: HG_HOOKNAME=pretxncommit
103 pretxncommit hook: HG_HOOKNAME=pretxncommit
104 HG_HOOKTYPE=pretxncommit
104 HG_HOOKTYPE=pretxncommit
105 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
105 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
106 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
106 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
107 HG_PENDING=$TESTTMP/a
107 HG_PENDING=$TESTTMP/a
108
108
109 1:ab228980c14d
109 1:ab228980c14d
110 pretxnclose hook: HG_HOOKNAME=pretxnclose
110 pretxnclose hook: HG_HOOKNAME=pretxnclose
111 HG_HOOKTYPE=pretxnclose
111 HG_HOOKTYPE=pretxnclose
112 HG_PENDING=$TESTTMP/a
112 HG_PENDING=$TESTTMP/a
113 HG_TXNID=TXN:$ID$
113 HG_TXNID=TXN:$ID$
114 HG_TXNNAME=commit
114 HG_TXNNAME=commit
115
115
116 txnclose hook: HG_HOOKNAME=txnclose
116 txnclose hook: HG_HOOKNAME=txnclose
117 HG_HOOKTYPE=txnclose
117 HG_HOOKTYPE=txnclose
118 HG_TXNID=TXN:$ID$
118 HG_TXNID=TXN:$ID$
119 HG_TXNNAME=commit
119 HG_TXNNAME=commit
120
120
121 commit hook: HG_HOOKNAME=commit
121 commit hook: HG_HOOKNAME=commit
122 HG_HOOKTYPE=commit
122 HG_HOOKTYPE=commit
123 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
123 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
124 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
124 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
125
125
126 commit.b hook: HG_HOOKNAME=commit.b
126 commit.b hook: HG_HOOKNAME=commit.b
127 HG_HOOKTYPE=commit
127 HG_HOOKTYPE=commit
128 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
128 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
129 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
129 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
130
130
131 $ hg update -C 0
131 $ hg update -C 0
132 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
132 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
133 $ echo b > b
133 $ echo b > b
134 $ hg add b
134 $ hg add b
135 $ hg commit -m b -d '1 0'
135 $ hg commit -m b -d '1 0'
136 precommit hook: HG_HOOKNAME=precommit
136 precommit hook: HG_HOOKNAME=precommit
137 HG_HOOKTYPE=precommit
137 HG_HOOKTYPE=precommit
138 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
138 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
139
139
140 pretxnopen hook: HG_HOOKNAME=pretxnopen
140 pretxnopen hook: HG_HOOKNAME=pretxnopen
141 HG_HOOKTYPE=pretxnopen
141 HG_HOOKTYPE=pretxnopen
142 HG_TXNID=TXN:$ID$
142 HG_TXNID=TXN:$ID$
143 HG_TXNNAME=commit
143 HG_TXNNAME=commit
144
144
145 pretxncommit hook: HG_HOOKNAME=pretxncommit
145 pretxncommit hook: HG_HOOKNAME=pretxncommit
146 HG_HOOKTYPE=pretxncommit
146 HG_HOOKTYPE=pretxncommit
147 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
147 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
148 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
148 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
149 HG_PENDING=$TESTTMP/a
149 HG_PENDING=$TESTTMP/a
150
150
151 2:ee9deb46ab31
151 2:ee9deb46ab31
152 pretxnclose hook: HG_HOOKNAME=pretxnclose
152 pretxnclose hook: HG_HOOKNAME=pretxnclose
153 HG_HOOKTYPE=pretxnclose
153 HG_HOOKTYPE=pretxnclose
154 HG_PENDING=$TESTTMP/a
154 HG_PENDING=$TESTTMP/a
155 HG_TXNID=TXN:$ID$
155 HG_TXNID=TXN:$ID$
156 HG_TXNNAME=commit
156 HG_TXNNAME=commit
157
157
158 created new head
158 created new head
159 txnclose hook: HG_HOOKNAME=txnclose
159 txnclose hook: HG_HOOKNAME=txnclose
160 HG_HOOKTYPE=txnclose
160 HG_HOOKTYPE=txnclose
161 HG_TXNID=TXN:$ID$
161 HG_TXNID=TXN:$ID$
162 HG_TXNNAME=commit
162 HG_TXNNAME=commit
163
163
164 commit hook: HG_HOOKNAME=commit
164 commit hook: HG_HOOKNAME=commit
165 HG_HOOKTYPE=commit
165 HG_HOOKTYPE=commit
166 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
166 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
167 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
167 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
168
168
169 commit.b hook: HG_HOOKNAME=commit.b
169 commit.b hook: HG_HOOKNAME=commit.b
170 HG_HOOKTYPE=commit
170 HG_HOOKTYPE=commit
171 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
171 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
172 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
172 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
173
173
174 $ hg merge 1
174 $ hg merge 1
175 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
175 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
176 (branch merge, don't forget to commit)
176 (branch merge, don't forget to commit)
177 $ hg commit -m merge -d '2 0'
177 $ hg commit -m merge -d '2 0'
178 precommit hook: HG_HOOKNAME=precommit
178 precommit hook: HG_HOOKNAME=precommit
179 HG_HOOKTYPE=precommit
179 HG_HOOKTYPE=precommit
180 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
180 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
181 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
181 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
182
182
183 pretxnopen hook: HG_HOOKNAME=pretxnopen
183 pretxnopen hook: HG_HOOKNAME=pretxnopen
184 HG_HOOKTYPE=pretxnopen
184 HG_HOOKTYPE=pretxnopen
185 HG_TXNID=TXN:$ID$
185 HG_TXNID=TXN:$ID$
186 HG_TXNNAME=commit
186 HG_TXNNAME=commit
187
187
188 pretxncommit hook: HG_HOOKNAME=pretxncommit
188 pretxncommit hook: HG_HOOKNAME=pretxncommit
189 HG_HOOKTYPE=pretxncommit
189 HG_HOOKTYPE=pretxncommit
190 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
190 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
191 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
191 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
192 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
192 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
193 HG_PENDING=$TESTTMP/a
193 HG_PENDING=$TESTTMP/a
194
194
195 3:07f3376c1e65
195 3:07f3376c1e65
196 pretxnclose hook: HG_HOOKNAME=pretxnclose
196 pretxnclose hook: HG_HOOKNAME=pretxnclose
197 HG_HOOKTYPE=pretxnclose
197 HG_HOOKTYPE=pretxnclose
198 HG_PENDING=$TESTTMP/a
198 HG_PENDING=$TESTTMP/a
199 HG_TXNID=TXN:$ID$
199 HG_TXNID=TXN:$ID$
200 HG_TXNNAME=commit
200 HG_TXNNAME=commit
201
201
202 txnclose hook: HG_HOOKNAME=txnclose
202 txnclose hook: HG_HOOKNAME=txnclose
203 HG_HOOKTYPE=txnclose
203 HG_HOOKTYPE=txnclose
204 HG_TXNID=TXN:$ID$
204 HG_TXNID=TXN:$ID$
205 HG_TXNNAME=commit
205 HG_TXNNAME=commit
206
206
207 commit hook: HG_HOOKNAME=commit
207 commit hook: HG_HOOKNAME=commit
208 HG_HOOKTYPE=commit
208 HG_HOOKTYPE=commit
209 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
209 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
210 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
210 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
211 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
211 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
212
212
213 commit.b hook: HG_HOOKNAME=commit.b
213 commit.b hook: HG_HOOKNAME=commit.b
214 HG_HOOKTYPE=commit
214 HG_HOOKTYPE=commit
215 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
215 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
216 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
216 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
217 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
217 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
218
218
219
219
220 test generic hooks
220 test generic hooks
221
221
222 $ hg id
222 $ hg id
223 pre-identify hook: HG_ARGS=id
223 pre-identify hook: HG_ARGS=id
224 HG_HOOKNAME=pre-identify
224 HG_HOOKNAME=pre-identify
225 HG_HOOKTYPE=pre-identify
225 HG_HOOKTYPE=pre-identify
226 HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None, 'template': ''}
226 HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None, 'template': ''}
227 HG_PATS=[]
227 HG_PATS=[]
228
228
229 abort: pre-identify hook exited with status 1
229 abort: pre-identify hook exited with status 1
230 [255]
230 [255]
231 $ hg cat b
231 $ hg cat b
232 pre-cat hook: HG_ARGS=cat b
232 pre-cat hook: HG_ARGS=cat b
233 HG_HOOKNAME=pre-cat
233 HG_HOOKNAME=pre-cat
234 HG_HOOKTYPE=pre-cat
234 HG_HOOKTYPE=pre-cat
235 HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': '', 'template': ''}
235 HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': '', 'template': ''}
236 HG_PATS=['b']
236 HG_PATS=['b']
237
237
238 b
238 b
239 post-cat hook: HG_ARGS=cat b
239 post-cat hook: HG_ARGS=cat b
240 HG_HOOKNAME=post-cat
240 HG_HOOKNAME=post-cat
241 HG_HOOKTYPE=post-cat
241 HG_HOOKTYPE=post-cat
242 HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': '', 'template': ''}
242 HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': '', 'template': ''}
243 HG_PATS=['b']
243 HG_PATS=['b']
244 HG_RESULT=0
244 HG_RESULT=0
245
245
246
246
247 $ cd ../b
247 $ cd ../b
248 $ hg pull ../a
248 $ hg pull ../a
249 pulling from ../a
249 pulling from ../a
250 searching for changes
250 searching for changes
251 prechangegroup hook: HG_HOOKNAME=prechangegroup
251 prechangegroup hook: HG_HOOKNAME=prechangegroup
252 HG_HOOKTYPE=prechangegroup
252 HG_HOOKTYPE=prechangegroup
253 HG_SOURCE=pull
253 HG_SOURCE=pull
254 HG_TXNID=TXN:$ID$
254 HG_TXNID=TXN:$ID$
255 HG_TXNNAME=pull
255 HG_TXNNAME=pull
256 file:/*/$TESTTMP/a (glob)
256 file:/*/$TESTTMP/a (glob)
257 HG_URL=file:$TESTTMP/a
257 HG_URL=file:$TESTTMP/a
258
258
259 adding changesets
259 adding changesets
260 adding manifests
260 adding manifests
261 adding file changes
261 adding file changes
262 added 3 changesets with 2 changes to 2 files
262 added 3 changesets with 2 changes to 2 files
263 new changesets ab228980c14d:07f3376c1e65
263 new changesets ab228980c14d:07f3376c1e65
264 changegroup hook: HG_HOOKNAME=changegroup
264 changegroup hook: HG_HOOKNAME=changegroup
265 HG_HOOKTYPE=changegroup
265 HG_HOOKTYPE=changegroup
266 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
266 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
267 HG_NODE_LAST=07f3376c1e655977439df2a814e3cc14b27abac2
267 HG_NODE_LAST=07f3376c1e655977439df2a814e3cc14b27abac2
268 HG_SOURCE=pull
268 HG_SOURCE=pull
269 HG_TXNID=TXN:$ID$
269 HG_TXNID=TXN:$ID$
270 HG_TXNNAME=pull
270 HG_TXNNAME=pull
271 file:/*/$TESTTMP/a (glob)
271 file:/*/$TESTTMP/a (glob)
272 HG_URL=file:$TESTTMP/a
272 HG_URL=file:$TESTTMP/a
273
273
274 incoming hook: HG_HOOKNAME=incoming
274 incoming hook: HG_HOOKNAME=incoming
275 HG_HOOKTYPE=incoming
275 HG_HOOKTYPE=incoming
276 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
276 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
277 HG_SOURCE=pull
277 HG_SOURCE=pull
278 HG_TXNID=TXN:$ID$
278 HG_TXNID=TXN:$ID$
279 HG_TXNNAME=pull
279 HG_TXNNAME=pull
280 file:/*/$TESTTMP/a (glob)
280 file:/*/$TESTTMP/a (glob)
281 HG_URL=file:$TESTTMP/a
281 HG_URL=file:$TESTTMP/a
282
282
283 incoming hook: HG_HOOKNAME=incoming
283 incoming hook: HG_HOOKNAME=incoming
284 HG_HOOKTYPE=incoming
284 HG_HOOKTYPE=incoming
285 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
285 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
286 HG_SOURCE=pull
286 HG_SOURCE=pull
287 HG_TXNID=TXN:$ID$
287 HG_TXNID=TXN:$ID$
288 HG_TXNNAME=pull
288 HG_TXNNAME=pull
289 file:/*/$TESTTMP/a (glob)
289 file:/*/$TESTTMP/a (glob)
290 HG_URL=file:$TESTTMP/a
290 HG_URL=file:$TESTTMP/a
291
291
292 incoming hook: HG_HOOKNAME=incoming
292 incoming hook: HG_HOOKNAME=incoming
293 HG_HOOKTYPE=incoming
293 HG_HOOKTYPE=incoming
294 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
294 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
295 HG_SOURCE=pull
295 HG_SOURCE=pull
296 HG_TXNID=TXN:$ID$
296 HG_TXNID=TXN:$ID$
297 HG_TXNNAME=pull
297 HG_TXNNAME=pull
298 file:/*/$TESTTMP/a (glob)
298 file:/*/$TESTTMP/a (glob)
299 HG_URL=file:$TESTTMP/a
299 HG_URL=file:$TESTTMP/a
300
300
301 (run 'hg update' to get a working copy)
301 (run 'hg update' to get a working copy)
302
302
303 tag hooks can see env vars
303 tag hooks can see env vars
304
304
305 $ cd ../a
305 $ cd ../a
306 $ cat >> .hg/hgrc <<EOF
306 $ cat >> .hg/hgrc <<EOF
307 > pretag = sh -c "printenv.py --line pretag"
307 > pretag = sh -c "printenv.py --line pretag"
308 > tag = sh -c "HG_PARENT1= HG_PARENT2= printenv.py --line tag"
308 > tag = sh -c "HG_PARENT1= HG_PARENT2= printenv.py --line tag"
309 > EOF
309 > EOF
310 $ hg tag -d '3 0' a
310 $ hg tag -d '3 0' a
311 pretag hook: HG_HOOKNAME=pretag
311 pretag hook: HG_HOOKNAME=pretag
312 HG_HOOKTYPE=pretag
312 HG_HOOKTYPE=pretag
313 HG_LOCAL=0
313 HG_LOCAL=0
314 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
314 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
315 HG_TAG=a
315 HG_TAG=a
316
316
317 precommit hook: HG_HOOKNAME=precommit
317 precommit hook: HG_HOOKNAME=precommit
318 HG_HOOKTYPE=precommit
318 HG_HOOKTYPE=precommit
319 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
319 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
320
320
321 pretxnopen hook: HG_HOOKNAME=pretxnopen
321 pretxnopen hook: HG_HOOKNAME=pretxnopen
322 HG_HOOKTYPE=pretxnopen
322 HG_HOOKTYPE=pretxnopen
323 HG_TXNID=TXN:$ID$
323 HG_TXNID=TXN:$ID$
324 HG_TXNNAME=commit
324 HG_TXNNAME=commit
325
325
326 pretxncommit hook: HG_HOOKNAME=pretxncommit
326 pretxncommit hook: HG_HOOKNAME=pretxncommit
327 HG_HOOKTYPE=pretxncommit
327 HG_HOOKTYPE=pretxncommit
328 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
328 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
329 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
329 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
330 HG_PENDING=$TESTTMP/a
330 HG_PENDING=$TESTTMP/a
331
331
332 4:539e4b31b6dc
332 4:539e4b31b6dc
333 pretxnclose hook: HG_HOOKNAME=pretxnclose
333 pretxnclose hook: HG_HOOKNAME=pretxnclose
334 HG_HOOKTYPE=pretxnclose
334 HG_HOOKTYPE=pretxnclose
335 HG_PENDING=$TESTTMP/a
335 HG_PENDING=$TESTTMP/a
336 HG_TXNID=TXN:$ID$
336 HG_TXNID=TXN:$ID$
337 HG_TXNNAME=commit
337 HG_TXNNAME=commit
338
338
339 tag hook: HG_HOOKNAME=tag
339 tag hook: HG_HOOKNAME=tag
340 HG_HOOKTYPE=tag
340 HG_HOOKTYPE=tag
341 HG_LOCAL=0
341 HG_LOCAL=0
342 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
342 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
343 HG_TAG=a
343 HG_TAG=a
344
344
345 txnclose hook: HG_HOOKNAME=txnclose
345 txnclose hook: HG_HOOKNAME=txnclose
346 HG_HOOKTYPE=txnclose
346 HG_HOOKTYPE=txnclose
347 HG_TXNID=TXN:$ID$
347 HG_TXNID=TXN:$ID$
348 HG_TXNNAME=commit
348 HG_TXNNAME=commit
349
349
350 commit hook: HG_HOOKNAME=commit
350 commit hook: HG_HOOKNAME=commit
351 HG_HOOKTYPE=commit
351 HG_HOOKTYPE=commit
352 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
352 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
353 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
353 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
354
354
355 commit.b hook: HG_HOOKNAME=commit.b
355 commit.b hook: HG_HOOKNAME=commit.b
356 HG_HOOKTYPE=commit
356 HG_HOOKTYPE=commit
357 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
357 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
358 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
358 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
359
359
360 $ hg tag -l la
360 $ hg tag -l la
361 pretag hook: HG_HOOKNAME=pretag
361 pretag hook: HG_HOOKNAME=pretag
362 HG_HOOKTYPE=pretag
362 HG_HOOKTYPE=pretag
363 HG_LOCAL=1
363 HG_LOCAL=1
364 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
364 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
365 HG_TAG=la
365 HG_TAG=la
366
366
367 tag hook: HG_HOOKNAME=tag
367 tag hook: HG_HOOKNAME=tag
368 HG_HOOKTYPE=tag
368 HG_HOOKTYPE=tag
369 HG_LOCAL=1
369 HG_LOCAL=1
370 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
370 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
371 HG_TAG=la
371 HG_TAG=la
372
372
373
373
374 pretag hook can forbid tagging
374 pretag hook can forbid tagging
375
375
376 $ cat >> .hg/hgrc <<EOF
376 $ cat >> .hg/hgrc <<EOF
377 > pretag.forbid = sh -c "printenv.py --line pretag.forbid 1"
377 > pretag.forbid = sh -c "printenv.py --line pretag.forbid 1"
378 > EOF
378 > EOF
379 $ hg tag -d '4 0' fa
379 $ hg tag -d '4 0' fa
380 pretag hook: HG_HOOKNAME=pretag
380 pretag hook: HG_HOOKNAME=pretag
381 HG_HOOKTYPE=pretag
381 HG_HOOKTYPE=pretag
382 HG_LOCAL=0
382 HG_LOCAL=0
383 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
383 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
384 HG_TAG=fa
384 HG_TAG=fa
385
385
386 pretag.forbid hook: HG_HOOKNAME=pretag.forbid
386 pretag.forbid hook: HG_HOOKNAME=pretag.forbid
387 HG_HOOKTYPE=pretag
387 HG_HOOKTYPE=pretag
388 HG_LOCAL=0
388 HG_LOCAL=0
389 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
389 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
390 HG_TAG=fa
390 HG_TAG=fa
391
391
392 abort: pretag.forbid hook exited with status 1
392 abort: pretag.forbid hook exited with status 1
393 [255]
393 [255]
394 $ hg tag -l fla
394 $ hg tag -l fla
395 pretag hook: HG_HOOKNAME=pretag
395 pretag hook: HG_HOOKNAME=pretag
396 HG_HOOKTYPE=pretag
396 HG_HOOKTYPE=pretag
397 HG_LOCAL=1
397 HG_LOCAL=1
398 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
398 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
399 HG_TAG=fla
399 HG_TAG=fla
400
400
401 pretag.forbid hook: HG_HOOKNAME=pretag.forbid
401 pretag.forbid hook: HG_HOOKNAME=pretag.forbid
402 HG_HOOKTYPE=pretag
402 HG_HOOKTYPE=pretag
403 HG_LOCAL=1
403 HG_LOCAL=1
404 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
404 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
405 HG_TAG=fla
405 HG_TAG=fla
406
406
407 abort: pretag.forbid hook exited with status 1
407 abort: pretag.forbid hook exited with status 1
408 [255]
408 [255]
409
409
410 pretxncommit hook can see changeset, can roll back txn, changeset no
410 pretxncommit hook can see changeset, can roll back txn, changeset no
411 more there after
411 more there after
412
412
413 $ cat >> .hg/hgrc <<EOF
413 $ cat >> .hg/hgrc <<EOF
414 > pretxncommit.forbid0 = sh -c "hg tip -q"
414 > pretxncommit.forbid0 = sh -c "hg tip -q"
415 > pretxncommit.forbid1 = sh -c "printenv.py --line pretxncommit.forbid 1"
415 > pretxncommit.forbid1 = sh -c "printenv.py --line pretxncommit.forbid 1"
416 > EOF
416 > EOF
417 $ echo z > z
417 $ echo z > z
418 $ hg add z
418 $ hg add z
419 $ hg -q tip
419 $ hg -q tip
420 4:539e4b31b6dc
420 4:539e4b31b6dc
421 $ hg commit -m 'fail' -d '4 0'
421 $ hg commit -m 'fail' -d '4 0'
422 precommit hook: HG_HOOKNAME=precommit
422 precommit hook: HG_HOOKNAME=precommit
423 HG_HOOKTYPE=precommit
423 HG_HOOKTYPE=precommit
424 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
424 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
425
425
426 pretxnopen hook: HG_HOOKNAME=pretxnopen
426 pretxnopen hook: HG_HOOKNAME=pretxnopen
427 HG_HOOKTYPE=pretxnopen
427 HG_HOOKTYPE=pretxnopen
428 HG_TXNID=TXN:$ID$
428 HG_TXNID=TXN:$ID$
429 HG_TXNNAME=commit
429 HG_TXNNAME=commit
430
430
431 pretxncommit hook: HG_HOOKNAME=pretxncommit
431 pretxncommit hook: HG_HOOKNAME=pretxncommit
432 HG_HOOKTYPE=pretxncommit
432 HG_HOOKTYPE=pretxncommit
433 HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567
433 HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567
434 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
434 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
435 HG_PENDING=$TESTTMP/a
435 HG_PENDING=$TESTTMP/a
436
436
437 5:6f611f8018c1
437 5:6f611f8018c1
438 5:6f611f8018c1
438 5:6f611f8018c1
439 pretxncommit.forbid hook: HG_HOOKNAME=pretxncommit.forbid1
439 pretxncommit.forbid hook: HG_HOOKNAME=pretxncommit.forbid1
440 HG_HOOKTYPE=pretxncommit
440 HG_HOOKTYPE=pretxncommit
441 HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567
441 HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567
442 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
442 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
443 HG_PENDING=$TESTTMP/a
443 HG_PENDING=$TESTTMP/a
444
444
445 transaction abort!
445 transaction abort!
446 txnabort Python hook: txnid,txnname
446 txnabort Python hook: changes,txnid,txnname
447 txnabort hook: HG_HOOKNAME=txnabort.1
447 txnabort hook: HG_HOOKNAME=txnabort.1
448 HG_HOOKTYPE=txnabort
448 HG_HOOKTYPE=txnabort
449 HG_TXNID=TXN:$ID$
449 HG_TXNID=TXN:$ID$
450 HG_TXNNAME=commit
450 HG_TXNNAME=commit
451
451
452 rollback completed
452 rollback completed
453 abort: pretxncommit.forbid1 hook exited with status 1
453 abort: pretxncommit.forbid1 hook exited with status 1
454 [255]
454 [255]
455 $ hg -q tip
455 $ hg -q tip
456 4:539e4b31b6dc
456 4:539e4b31b6dc
457
457
458 (Check that no 'changelog.i.a' file were left behind)
458 (Check that no 'changelog.i.a' file were left behind)
459
459
460 $ ls -1 .hg/store/
460 $ ls -1 .hg/store/
461 00changelog.i
461 00changelog.i
462 00manifest.i
462 00manifest.i
463 data
463 data
464 fncache (repofncache !)
464 fncache (repofncache !)
465 journal.phaseroots
465 journal.phaseroots
466 phaseroots
466 phaseroots
467 undo
467 undo
468 undo.backup.fncache (repofncache !)
468 undo.backup.fncache (repofncache !)
469 undo.backupfiles
469 undo.backupfiles
470 undo.phaseroots
470 undo.phaseroots
471
471
472
472
473 precommit hook can prevent commit
473 precommit hook can prevent commit
474
474
475 $ cat >> .hg/hgrc <<EOF
475 $ cat >> .hg/hgrc <<EOF
476 > precommit.forbid = sh -c "printenv.py --line precommit.forbid 1"
476 > precommit.forbid = sh -c "printenv.py --line precommit.forbid 1"
477 > EOF
477 > EOF
478 $ hg commit -m 'fail' -d '4 0'
478 $ hg commit -m 'fail' -d '4 0'
479 precommit hook: HG_HOOKNAME=precommit
479 precommit hook: HG_HOOKNAME=precommit
480 HG_HOOKTYPE=precommit
480 HG_HOOKTYPE=precommit
481 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
481 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
482
482
483 precommit.forbid hook: HG_HOOKNAME=precommit.forbid
483 precommit.forbid hook: HG_HOOKNAME=precommit.forbid
484 HG_HOOKTYPE=precommit
484 HG_HOOKTYPE=precommit
485 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
485 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
486
486
487 abort: precommit.forbid hook exited with status 1
487 abort: precommit.forbid hook exited with status 1
488 [255]
488 [255]
489 $ hg -q tip
489 $ hg -q tip
490 4:539e4b31b6dc
490 4:539e4b31b6dc
491
491
492 preupdate hook can prevent update
492 preupdate hook can prevent update
493
493
494 $ cat >> .hg/hgrc <<EOF
494 $ cat >> .hg/hgrc <<EOF
495 > preupdate = sh -c "printenv.py --line preupdate"
495 > preupdate = sh -c "printenv.py --line preupdate"
496 > EOF
496 > EOF
497 $ hg update 1
497 $ hg update 1
498 preupdate hook: HG_HOOKNAME=preupdate
498 preupdate hook: HG_HOOKNAME=preupdate
499 HG_HOOKTYPE=preupdate
499 HG_HOOKTYPE=preupdate
500 HG_PARENT1=ab228980c14d
500 HG_PARENT1=ab228980c14d
501
501
502 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
502 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
503
503
504 update hook
504 update hook
505
505
506 $ cat >> .hg/hgrc <<EOF
506 $ cat >> .hg/hgrc <<EOF
507 > update = sh -c "printenv.py --line update"
507 > update = sh -c "printenv.py --line update"
508 > EOF
508 > EOF
509 $ hg update
509 $ hg update
510 preupdate hook: HG_HOOKNAME=preupdate
510 preupdate hook: HG_HOOKNAME=preupdate
511 HG_HOOKTYPE=preupdate
511 HG_HOOKTYPE=preupdate
512 HG_PARENT1=539e4b31b6dc
512 HG_PARENT1=539e4b31b6dc
513
513
514 update hook: HG_ERROR=0
514 update hook: HG_ERROR=0
515 HG_HOOKNAME=update
515 HG_HOOKNAME=update
516 HG_HOOKTYPE=update
516 HG_HOOKTYPE=update
517 HG_PARENT1=539e4b31b6dc
517 HG_PARENT1=539e4b31b6dc
518
518
519 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
519 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
520
520
521 pushkey hook
521 pushkey hook
522
522
523 $ cat >> .hg/hgrc <<EOF
523 $ cat >> .hg/hgrc <<EOF
524 > pushkey = sh -c "printenv.py --line pushkey"
524 > pushkey = sh -c "printenv.py --line pushkey"
525 > EOF
525 > EOF
526 $ cd ../b
526 $ cd ../b
527 $ hg bookmark -r null foo
527 $ hg bookmark -r null foo
528 $ hg push -B foo ../a
528 $ hg push -B foo ../a
529 pushing to ../a
529 pushing to ../a
530 searching for changes
530 searching for changes
531 no changes found
531 no changes found
532 pretxnopen hook: HG_HOOKNAME=pretxnopen
532 pretxnopen hook: HG_HOOKNAME=pretxnopen
533 HG_HOOKTYPE=pretxnopen
533 HG_HOOKTYPE=pretxnopen
534 HG_TXNID=TXN:$ID$
534 HG_TXNID=TXN:$ID$
535 HG_TXNNAME=push
535 HG_TXNNAME=push
536
536
537 pretxnclose hook: HG_BOOKMARK_MOVED=1
537 pretxnclose hook: HG_BOOKMARK_MOVED=1
538 HG_BUNDLE2=1
538 HG_BUNDLE2=1
539 HG_HOOKNAME=pretxnclose
539 HG_HOOKNAME=pretxnclose
540 HG_HOOKTYPE=pretxnclose
540 HG_HOOKTYPE=pretxnclose
541 HG_PENDING=$TESTTMP/a
541 HG_PENDING=$TESTTMP/a
542 HG_SOURCE=push
542 HG_SOURCE=push
543 HG_TXNID=TXN:$ID$
543 HG_TXNID=TXN:$ID$
544 HG_TXNNAME=push
544 HG_TXNNAME=push
545 HG_URL=file:$TESTTMP/a
545 HG_URL=file:$TESTTMP/a
546
546
547 pushkey hook: HG_BUNDLE2=1
547 pushkey hook: HG_BUNDLE2=1
548 HG_HOOKNAME=pushkey
548 HG_HOOKNAME=pushkey
549 HG_HOOKTYPE=pushkey
549 HG_HOOKTYPE=pushkey
550 HG_KEY=foo
550 HG_KEY=foo
551 HG_NAMESPACE=bookmarks
551 HG_NAMESPACE=bookmarks
552 HG_NEW=0000000000000000000000000000000000000000
552 HG_NEW=0000000000000000000000000000000000000000
553 HG_PUSHKEYCOMPAT=1
553 HG_PUSHKEYCOMPAT=1
554 HG_SOURCE=push
554 HG_SOURCE=push
555 HG_TXNID=TXN:$ID$
555 HG_TXNID=TXN:$ID$
556 HG_TXNNAME=push
556 HG_TXNNAME=push
557 HG_URL=file:$TESTTMP/a
557 HG_URL=file:$TESTTMP/a
558
558
559 txnclose hook: HG_BOOKMARK_MOVED=1
559 txnclose hook: HG_BOOKMARK_MOVED=1
560 HG_BUNDLE2=1
560 HG_BUNDLE2=1
561 HG_HOOKNAME=txnclose
561 HG_HOOKNAME=txnclose
562 HG_HOOKTYPE=txnclose
562 HG_HOOKTYPE=txnclose
563 HG_SOURCE=push
563 HG_SOURCE=push
564 HG_TXNID=TXN:$ID$
564 HG_TXNID=TXN:$ID$
565 HG_TXNNAME=push
565 HG_TXNNAME=push
566 HG_URL=file:$TESTTMP/a
566 HG_URL=file:$TESTTMP/a
567
567
568 exporting bookmark foo
568 exporting bookmark foo
569 [1]
569 [1]
570 $ cd ../a
570 $ cd ../a
571
571
572 listkeys hook
572 listkeys hook
573
573
574 $ cat >> .hg/hgrc <<EOF
574 $ cat >> .hg/hgrc <<EOF
575 > listkeys = sh -c "printenv.py --line listkeys"
575 > listkeys = sh -c "printenv.py --line listkeys"
576 > EOF
576 > EOF
577 $ hg bookmark -r null bar
577 $ hg bookmark -r null bar
578 pretxnopen hook: HG_HOOKNAME=pretxnopen
578 pretxnopen hook: HG_HOOKNAME=pretxnopen
579 HG_HOOKTYPE=pretxnopen
579 HG_HOOKTYPE=pretxnopen
580 HG_TXNID=TXN:$ID$
580 HG_TXNID=TXN:$ID$
581 HG_TXNNAME=bookmark
581 HG_TXNNAME=bookmark
582
582
583 pretxnclose hook: HG_BOOKMARK_MOVED=1
583 pretxnclose hook: HG_BOOKMARK_MOVED=1
584 HG_HOOKNAME=pretxnclose
584 HG_HOOKNAME=pretxnclose
585 HG_HOOKTYPE=pretxnclose
585 HG_HOOKTYPE=pretxnclose
586 HG_PENDING=$TESTTMP/a
586 HG_PENDING=$TESTTMP/a
587 HG_TXNID=TXN:$ID$
587 HG_TXNID=TXN:$ID$
588 HG_TXNNAME=bookmark
588 HG_TXNNAME=bookmark
589
589
590 txnclose hook: HG_BOOKMARK_MOVED=1
590 txnclose hook: HG_BOOKMARK_MOVED=1
591 HG_HOOKNAME=txnclose
591 HG_HOOKNAME=txnclose
592 HG_HOOKTYPE=txnclose
592 HG_HOOKTYPE=txnclose
593 HG_TXNID=TXN:$ID$
593 HG_TXNID=TXN:$ID$
594 HG_TXNNAME=bookmark
594 HG_TXNNAME=bookmark
595
595
596 $ cd ../b
596 $ cd ../b
597 $ hg pull -B bar ../a
597 $ hg pull -B bar ../a
598 pulling from ../a
598 pulling from ../a
599 listkeys hook: HG_HOOKNAME=listkeys
599 listkeys hook: HG_HOOKNAME=listkeys
600 HG_HOOKTYPE=listkeys
600 HG_HOOKTYPE=listkeys
601 HG_NAMESPACE=bookmarks
601 HG_NAMESPACE=bookmarks
602 HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
602 HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
603
603
604 no changes found
604 no changes found
605 adding remote bookmark bar
605 adding remote bookmark bar
606 $ cd ../a
606 $ cd ../a
607
607
608 test that prepushkey can prevent incoming keys
608 test that prepushkey can prevent incoming keys
609
609
610 $ cat >> .hg/hgrc <<EOF
610 $ cat >> .hg/hgrc <<EOF
611 > prepushkey = sh -c "printenv.py --line prepushkey.forbid 1"
611 > prepushkey = sh -c "printenv.py --line prepushkey.forbid 1"
612 > EOF
612 > EOF
613 $ cd ../b
613 $ cd ../b
614 $ hg bookmark -r null baz
614 $ hg bookmark -r null baz
615 $ hg push -B baz ../a
615 $ hg push -B baz ../a
616 pushing to ../a
616 pushing to ../a
617 searching for changes
617 searching for changes
618 listkeys hook: HG_HOOKNAME=listkeys
618 listkeys hook: HG_HOOKNAME=listkeys
619 HG_HOOKTYPE=listkeys
619 HG_HOOKTYPE=listkeys
620 HG_NAMESPACE=phases
620 HG_NAMESPACE=phases
621 HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
621 HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
622
622
623 listkeys hook: HG_HOOKNAME=listkeys
623 listkeys hook: HG_HOOKNAME=listkeys
624 HG_HOOKTYPE=listkeys
624 HG_HOOKTYPE=listkeys
625 HG_NAMESPACE=bookmarks
625 HG_NAMESPACE=bookmarks
626 HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
626 HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
627
627
628 no changes found
628 no changes found
629 pretxnopen hook: HG_HOOKNAME=pretxnopen
629 pretxnopen hook: HG_HOOKNAME=pretxnopen
630 HG_HOOKTYPE=pretxnopen
630 HG_HOOKTYPE=pretxnopen
631 HG_TXNID=TXN:$ID$
631 HG_TXNID=TXN:$ID$
632 HG_TXNNAME=push
632 HG_TXNNAME=push
633
633
634 prepushkey.forbid hook: HG_BUNDLE2=1
634 prepushkey.forbid hook: HG_BUNDLE2=1
635 HG_HOOKNAME=prepushkey
635 HG_HOOKNAME=prepushkey
636 HG_HOOKTYPE=prepushkey
636 HG_HOOKTYPE=prepushkey
637 HG_KEY=baz
637 HG_KEY=baz
638 HG_NAMESPACE=bookmarks
638 HG_NAMESPACE=bookmarks
639 HG_NEW=0000000000000000000000000000000000000000
639 HG_NEW=0000000000000000000000000000000000000000
640 HG_PUSHKEYCOMPAT=1
640 HG_PUSHKEYCOMPAT=1
641 HG_SOURCE=push
641 HG_SOURCE=push
642 HG_TXNID=TXN:$ID$
642 HG_TXNID=TXN:$ID$
643 HG_TXNNAME=push
643 HG_TXNNAME=push
644 HG_URL=file:$TESTTMP/a
644 HG_URL=file:$TESTTMP/a
645
645
646 abort: prepushkey hook exited with status 1
646 abort: prepushkey hook exited with status 1
647 [255]
647 [255]
648 $ cd ../a
648 $ cd ../a
649
649
650 test that prelistkeys can prevent listing keys
650 test that prelistkeys can prevent listing keys
651
651
652 $ cat >> .hg/hgrc <<EOF
652 $ cat >> .hg/hgrc <<EOF
653 > prelistkeys = sh -c "printenv.py --line prelistkeys.forbid 1"
653 > prelistkeys = sh -c "printenv.py --line prelistkeys.forbid 1"
654 > EOF
654 > EOF
655 $ hg bookmark -r null quux
655 $ hg bookmark -r null quux
656 pretxnopen hook: HG_HOOKNAME=pretxnopen
656 pretxnopen hook: HG_HOOKNAME=pretxnopen
657 HG_HOOKTYPE=pretxnopen
657 HG_HOOKTYPE=pretxnopen
658 HG_TXNID=TXN:$ID$
658 HG_TXNID=TXN:$ID$
659 HG_TXNNAME=bookmark
659 HG_TXNNAME=bookmark
660
660
661 pretxnclose hook: HG_BOOKMARK_MOVED=1
661 pretxnclose hook: HG_BOOKMARK_MOVED=1
662 HG_HOOKNAME=pretxnclose
662 HG_HOOKNAME=pretxnclose
663 HG_HOOKTYPE=pretxnclose
663 HG_HOOKTYPE=pretxnclose
664 HG_PENDING=$TESTTMP/a
664 HG_PENDING=$TESTTMP/a
665 HG_TXNID=TXN:$ID$
665 HG_TXNID=TXN:$ID$
666 HG_TXNNAME=bookmark
666 HG_TXNNAME=bookmark
667
667
668 txnclose hook: HG_BOOKMARK_MOVED=1
668 txnclose hook: HG_BOOKMARK_MOVED=1
669 HG_HOOKNAME=txnclose
669 HG_HOOKNAME=txnclose
670 HG_HOOKTYPE=txnclose
670 HG_HOOKTYPE=txnclose
671 HG_TXNID=TXN:$ID$
671 HG_TXNID=TXN:$ID$
672 HG_TXNNAME=bookmark
672 HG_TXNNAME=bookmark
673
673
674 $ cd ../b
674 $ cd ../b
675 $ hg pull -B quux ../a
675 $ hg pull -B quux ../a
676 pulling from ../a
676 pulling from ../a
677 prelistkeys.forbid hook: HG_HOOKNAME=prelistkeys
677 prelistkeys.forbid hook: HG_HOOKNAME=prelistkeys
678 HG_HOOKTYPE=prelistkeys
678 HG_HOOKTYPE=prelistkeys
679 HG_NAMESPACE=bookmarks
679 HG_NAMESPACE=bookmarks
680
680
681 abort: prelistkeys hook exited with status 1
681 abort: prelistkeys hook exited with status 1
682 [255]
682 [255]
683 $ cd ../a
683 $ cd ../a
684 $ rm .hg/hgrc
684 $ rm .hg/hgrc
685
685
686 prechangegroup hook can prevent incoming changes
686 prechangegroup hook can prevent incoming changes
687
687
688 $ cd ../b
688 $ cd ../b
689 $ hg -q tip
689 $ hg -q tip
690 3:07f3376c1e65
690 3:07f3376c1e65
691 $ cat > .hg/hgrc <<EOF
691 $ cat > .hg/hgrc <<EOF
692 > [hooks]
692 > [hooks]
693 > prechangegroup.forbid = sh -c "printenv.py --line prechangegroup.forbid 1"
693 > prechangegroup.forbid = sh -c "printenv.py --line prechangegroup.forbid 1"
694 > EOF
694 > EOF
695 $ hg pull ../a
695 $ hg pull ../a
696 pulling from ../a
696 pulling from ../a
697 searching for changes
697 searching for changes
698 prechangegroup.forbid hook: HG_HOOKNAME=prechangegroup.forbid
698 prechangegroup.forbid hook: HG_HOOKNAME=prechangegroup.forbid
699 HG_HOOKTYPE=prechangegroup
699 HG_HOOKTYPE=prechangegroup
700 HG_SOURCE=pull
700 HG_SOURCE=pull
701 HG_TXNID=TXN:$ID$
701 HG_TXNID=TXN:$ID$
702 HG_TXNNAME=pull
702 HG_TXNNAME=pull
703 file:/*/$TESTTMP/a (glob)
703 file:/*/$TESTTMP/a (glob)
704 HG_URL=file:$TESTTMP/a
704 HG_URL=file:$TESTTMP/a
705
705
706 abort: prechangegroup.forbid hook exited with status 1
706 abort: prechangegroup.forbid hook exited with status 1
707 [255]
707 [255]
708
708
709 pretxnchangegroup hook can see incoming changes, can roll back txn,
709 pretxnchangegroup hook can see incoming changes, can roll back txn,
710 incoming changes no longer there after
710 incoming changes no longer there after
711
711
712 $ cat > .hg/hgrc <<EOF
712 $ cat > .hg/hgrc <<EOF
713 > [hooks]
713 > [hooks]
714 > pretxnchangegroup.forbid0 = hg tip -q
714 > pretxnchangegroup.forbid0 = hg tip -q
715 > pretxnchangegroup.forbid1 = sh -c "printenv.py --line pretxnchangegroup.forbid 1"
715 > pretxnchangegroup.forbid1 = sh -c "printenv.py --line pretxnchangegroup.forbid 1"
716 > EOF
716 > EOF
717 $ hg pull ../a
717 $ hg pull ../a
718 pulling from ../a
718 pulling from ../a
719 searching for changes
719 searching for changes
720 adding changesets
720 adding changesets
721 adding manifests
721 adding manifests
722 adding file changes
722 adding file changes
723 4:539e4b31b6dc
723 4:539e4b31b6dc
724 pretxnchangegroup.forbid hook: HG_HOOKNAME=pretxnchangegroup.forbid1
724 pretxnchangegroup.forbid hook: HG_HOOKNAME=pretxnchangegroup.forbid1
725 HG_HOOKTYPE=pretxnchangegroup
725 HG_HOOKTYPE=pretxnchangegroup
726 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
726 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
727 HG_NODE_LAST=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
727 HG_NODE_LAST=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
728 HG_PENDING=$TESTTMP/b
728 HG_PENDING=$TESTTMP/b
729 HG_SOURCE=pull
729 HG_SOURCE=pull
730 HG_TXNID=TXN:$ID$
730 HG_TXNID=TXN:$ID$
731 HG_TXNNAME=pull
731 HG_TXNNAME=pull
732 file:/*/$TESTTMP/a (glob)
732 file:/*/$TESTTMP/a (glob)
733 HG_URL=file:$TESTTMP/a
733 HG_URL=file:$TESTTMP/a
734
734
735 transaction abort!
735 transaction abort!
736 rollback completed
736 rollback completed
737 abort: pretxnchangegroup.forbid1 hook exited with status 1
737 abort: pretxnchangegroup.forbid1 hook exited with status 1
738 [255]
738 [255]
739 $ hg -q tip
739 $ hg -q tip
740 3:07f3376c1e65
740 3:07f3376c1e65
741
741
742 outgoing hooks can see env vars
742 outgoing hooks can see env vars
743
743
744 $ rm .hg/hgrc
744 $ rm .hg/hgrc
745 $ cat > ../a/.hg/hgrc <<EOF
745 $ cat > ../a/.hg/hgrc <<EOF
746 > [hooks]
746 > [hooks]
747 > preoutgoing = sh -c "printenv.py --line preoutgoing"
747 > preoutgoing = sh -c "printenv.py --line preoutgoing"
748 > outgoing = sh -c "printenv.py --line outgoing"
748 > outgoing = sh -c "printenv.py --line outgoing"
749 > EOF
749 > EOF
750 $ hg pull ../a
750 $ hg pull ../a
751 pulling from ../a
751 pulling from ../a
752 searching for changes
752 searching for changes
753 preoutgoing hook: HG_HOOKNAME=preoutgoing
753 preoutgoing hook: HG_HOOKNAME=preoutgoing
754 HG_HOOKTYPE=preoutgoing
754 HG_HOOKTYPE=preoutgoing
755 HG_SOURCE=pull
755 HG_SOURCE=pull
756
756
757 outgoing hook: HG_HOOKNAME=outgoing
757 outgoing hook: HG_HOOKNAME=outgoing
758 HG_HOOKTYPE=outgoing
758 HG_HOOKTYPE=outgoing
759 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
759 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
760 HG_SOURCE=pull
760 HG_SOURCE=pull
761
761
762 adding changesets
762 adding changesets
763 adding manifests
763 adding manifests
764 adding file changes
764 adding file changes
765 adding remote bookmark quux
765 adding remote bookmark quux
766 added 1 changesets with 1 changes to 1 files
766 added 1 changesets with 1 changes to 1 files
767 new changesets 539e4b31b6dc
767 new changesets 539e4b31b6dc
768 (run 'hg update' to get a working copy)
768 (run 'hg update' to get a working copy)
769 $ hg rollback
769 $ hg rollback
770 repository tip rolled back to revision 3 (undo pull)
770 repository tip rolled back to revision 3 (undo pull)
771
771
772 preoutgoing hook can prevent outgoing changes
772 preoutgoing hook can prevent outgoing changes
773
773
774 $ cat >> ../a/.hg/hgrc <<EOF
774 $ cat >> ../a/.hg/hgrc <<EOF
775 > preoutgoing.forbid = sh -c "printenv.py --line preoutgoing.forbid 1"
775 > preoutgoing.forbid = sh -c "printenv.py --line preoutgoing.forbid 1"
776 > EOF
776 > EOF
777 $ hg pull ../a
777 $ hg pull ../a
778 pulling from ../a
778 pulling from ../a
779 searching for changes
779 searching for changes
780 preoutgoing hook: HG_HOOKNAME=preoutgoing
780 preoutgoing hook: HG_HOOKNAME=preoutgoing
781 HG_HOOKTYPE=preoutgoing
781 HG_HOOKTYPE=preoutgoing
782 HG_SOURCE=pull
782 HG_SOURCE=pull
783
783
784 preoutgoing.forbid hook: HG_HOOKNAME=preoutgoing.forbid
784 preoutgoing.forbid hook: HG_HOOKNAME=preoutgoing.forbid
785 HG_HOOKTYPE=preoutgoing
785 HG_HOOKTYPE=preoutgoing
786 HG_SOURCE=pull
786 HG_SOURCE=pull
787
787
788 abort: preoutgoing.forbid hook exited with status 1
788 abort: preoutgoing.forbid hook exited with status 1
789 [255]
789 [255]
790
790
791 outgoing hooks work for local clones
791 outgoing hooks work for local clones
792
792
793 $ cd ..
793 $ cd ..
794 $ cat > a/.hg/hgrc <<EOF
794 $ cat > a/.hg/hgrc <<EOF
795 > [hooks]
795 > [hooks]
796 > preoutgoing = sh -c "printenv.py --line preoutgoing"
796 > preoutgoing = sh -c "printenv.py --line preoutgoing"
797 > outgoing = sh -c "printenv.py --line outgoing"
797 > outgoing = sh -c "printenv.py --line outgoing"
798 > EOF
798 > EOF
799 $ hg clone a c
799 $ hg clone a c
800 preoutgoing hook: HG_HOOKNAME=preoutgoing
800 preoutgoing hook: HG_HOOKNAME=preoutgoing
801 HG_HOOKTYPE=preoutgoing
801 HG_HOOKTYPE=preoutgoing
802 HG_SOURCE=clone
802 HG_SOURCE=clone
803
803
804 outgoing hook: HG_HOOKNAME=outgoing
804 outgoing hook: HG_HOOKNAME=outgoing
805 HG_HOOKTYPE=outgoing
805 HG_HOOKTYPE=outgoing
806 HG_NODE=0000000000000000000000000000000000000000
806 HG_NODE=0000000000000000000000000000000000000000
807 HG_SOURCE=clone
807 HG_SOURCE=clone
808
808
809 updating to branch default
809 updating to branch default
810 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
810 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
811 $ rm -rf c
811 $ rm -rf c
812
812
813 preoutgoing hook can prevent outgoing changes for local clones
813 preoutgoing hook can prevent outgoing changes for local clones
814
814
815 $ cat >> a/.hg/hgrc <<EOF
815 $ cat >> a/.hg/hgrc <<EOF
816 > preoutgoing.forbid = sh -c "printenv.py --line preoutgoing.forbid 1"
816 > preoutgoing.forbid = sh -c "printenv.py --line preoutgoing.forbid 1"
817 > EOF
817 > EOF
818 $ hg clone a zzz
818 $ hg clone a zzz
819 preoutgoing hook: HG_HOOKNAME=preoutgoing
819 preoutgoing hook: HG_HOOKNAME=preoutgoing
820 HG_HOOKTYPE=preoutgoing
820 HG_HOOKTYPE=preoutgoing
821 HG_SOURCE=clone
821 HG_SOURCE=clone
822
822
823 preoutgoing.forbid hook: HG_HOOKNAME=preoutgoing.forbid
823 preoutgoing.forbid hook: HG_HOOKNAME=preoutgoing.forbid
824 HG_HOOKTYPE=preoutgoing
824 HG_HOOKTYPE=preoutgoing
825 HG_SOURCE=clone
825 HG_SOURCE=clone
826
826
827 abort: preoutgoing.forbid hook exited with status 1
827 abort: preoutgoing.forbid hook exited with status 1
828 [255]
828 [255]
829
829
830 $ cd "$TESTTMP/b"
830 $ cd "$TESTTMP/b"
831
831
832 $ cat > hooktests.py <<EOF
832 $ cat > hooktests.py <<EOF
833 > from __future__ import print_function
833 > from __future__ import print_function
834 > from mercurial import (
834 > from mercurial import (
835 > error,
835 > error,
836 > pycompat,
836 > pycompat,
837 > )
837 > )
838 >
838 >
839 > uncallable = 0
839 > uncallable = 0
840 >
840 >
841 > def printargs(ui, args):
841 > def printargs(ui, args):
842 > a = list(pycompat.byteskwargs(args).items())
842 > a = list(pycompat.byteskwargs(args).items())
843 > a.sort()
843 > a.sort()
844 > ui.write(b'hook args:\n')
844 > ui.write(b'hook args:\n')
845 > for k, v in a:
845 > for k, v in a:
846 > ui.write(b' %s %s\n' % (k, v))
846 > ui.write(b' %s %s\n' % (k, v))
847 >
847 >
848 > def passhook(ui, repo, **args):
848 > def passhook(ui, repo, **args):
849 > printargs(ui, args)
849 > printargs(ui, args)
850 >
850 >
851 > def failhook(ui, repo, **args):
851 > def failhook(ui, repo, **args):
852 > printargs(ui, args)
852 > printargs(ui, args)
853 > return True
853 > return True
854 >
854 >
855 > class LocalException(Exception):
855 > class LocalException(Exception):
856 > pass
856 > pass
857 >
857 >
858 > def raisehook(**args):
858 > def raisehook(**args):
859 > raise LocalException('exception from hook')
859 > raise LocalException('exception from hook')
860 >
860 >
861 > def aborthook(**args):
861 > def aborthook(**args):
862 > raise error.Abort(b'raise abort from hook')
862 > raise error.Abort(b'raise abort from hook')
863 >
863 >
864 > def brokenhook(**args):
864 > def brokenhook(**args):
865 > return 1 + {}
865 > return 1 + {}
866 >
866 >
867 > def verbosehook(ui, **args):
867 > def verbosehook(ui, **args):
868 > ui.note(b'verbose output from hook\n')
868 > ui.note(b'verbose output from hook\n')
869 >
869 >
870 > def printtags(ui, repo, **args):
870 > def printtags(ui, repo, **args):
871 > ui.write(b'[%s]\n' % b', '.join(sorted(repo.tags())))
871 > ui.write(b'[%s]\n' % b', '.join(sorted(repo.tags())))
872 >
872 >
873 > class container(object):
873 > class container(object):
874 > unreachable = 1
874 > unreachable = 1
875 > EOF
875 > EOF
876
876
877 $ cat > syntaxerror.py << NO_CHECK_EOF
877 $ cat > syntaxerror.py << NO_CHECK_EOF
878 > (foo
878 > (foo
879 > NO_CHECK_EOF
879 > NO_CHECK_EOF
880
880
881 test python hooks
881 test python hooks
882
882
883 #if windows
883 #if windows
884 $ PYTHONPATH="$TESTTMP/b;$PYTHONPATH"
884 $ PYTHONPATH="$TESTTMP/b;$PYTHONPATH"
885 #else
885 #else
886 $ PYTHONPATH="$TESTTMP/b:$PYTHONPATH"
886 $ PYTHONPATH="$TESTTMP/b:$PYTHONPATH"
887 #endif
887 #endif
888 $ export PYTHONPATH
888 $ export PYTHONPATH
889
889
890 $ echo '[hooks]' > ../a/.hg/hgrc
890 $ echo '[hooks]' > ../a/.hg/hgrc
891 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
891 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
892 $ hg pull ../a 2>&1 | grep 'raised an exception'
892 $ hg pull ../a 2>&1 | grep 'raised an exception'
893 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
893 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
894
894
895 $ echo '[hooks]' > ../a/.hg/hgrc
895 $ echo '[hooks]' > ../a/.hg/hgrc
896 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
896 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
897 $ hg pull ../a 2>&1 | grep 'raised an exception'
897 $ hg pull ../a 2>&1 | grep 'raised an exception'
898 error: preoutgoing.raise hook raised an exception: exception from hook
898 error: preoutgoing.raise hook raised an exception: exception from hook
899
899
900 $ echo '[hooks]' > ../a/.hg/hgrc
900 $ echo '[hooks]' > ../a/.hg/hgrc
901 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
901 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
902 $ hg pull ../a
902 $ hg pull ../a
903 pulling from ../a
903 pulling from ../a
904 searching for changes
904 searching for changes
905 error: preoutgoing.abort hook failed: raise abort from hook
905 error: preoutgoing.abort hook failed: raise abort from hook
906 abort: raise abort from hook
906 abort: raise abort from hook
907 [255]
907 [255]
908
908
909 $ echo '[hooks]' > ../a/.hg/hgrc
909 $ echo '[hooks]' > ../a/.hg/hgrc
910 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
910 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
911 $ hg pull ../a
911 $ hg pull ../a
912 pulling from ../a
912 pulling from ../a
913 searching for changes
913 searching for changes
914 hook args:
914 hook args:
915 hooktype preoutgoing
915 hooktype preoutgoing
916 source pull
916 source pull
917 abort: preoutgoing.fail hook failed
917 abort: preoutgoing.fail hook failed
918 [255]
918 [255]
919
919
920 $ echo '[hooks]' > ../a/.hg/hgrc
920 $ echo '[hooks]' > ../a/.hg/hgrc
921 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
921 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
922 $ hg pull ../a
922 $ hg pull ../a
923 pulling from ../a
923 pulling from ../a
924 searching for changes
924 searching for changes
925 abort: preoutgoing.uncallable hook is invalid: "hooktests.uncallable" is not callable
925 abort: preoutgoing.uncallable hook is invalid: "hooktests.uncallable" is not callable
926 [255]
926 [255]
927
927
928 $ echo '[hooks]' > ../a/.hg/hgrc
928 $ echo '[hooks]' > ../a/.hg/hgrc
929 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
929 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
930 $ hg pull ../a
930 $ hg pull ../a
931 pulling from ../a
931 pulling from ../a
932 searching for changes
932 searching for changes
933 abort: preoutgoing.nohook hook is invalid: "hooktests.nohook" is not defined
933 abort: preoutgoing.nohook hook is invalid: "hooktests.nohook" is not defined
934 [255]
934 [255]
935
935
936 $ echo '[hooks]' > ../a/.hg/hgrc
936 $ echo '[hooks]' > ../a/.hg/hgrc
937 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
937 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
938 $ hg pull ../a
938 $ hg pull ../a
939 pulling from ../a
939 pulling from ../a
940 searching for changes
940 searching for changes
941 abort: preoutgoing.nomodule hook is invalid: "nomodule" not in a module
941 abort: preoutgoing.nomodule hook is invalid: "nomodule" not in a module
942 [255]
942 [255]
943
943
944 $ echo '[hooks]' > ../a/.hg/hgrc
944 $ echo '[hooks]' > ../a/.hg/hgrc
945 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
945 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
946 $ hg pull ../a
946 $ hg pull ../a
947 pulling from ../a
947 pulling from ../a
948 searching for changes
948 searching for changes
949 abort: preoutgoing.badmodule hook is invalid: import of "nomodule" failed
949 abort: preoutgoing.badmodule hook is invalid: import of "nomodule" failed
950 (run with --traceback for stack trace)
950 (run with --traceback for stack trace)
951 [255]
951 [255]
952
952
953 $ echo '[hooks]' > ../a/.hg/hgrc
953 $ echo '[hooks]' > ../a/.hg/hgrc
954 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
954 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
955 $ hg pull ../a
955 $ hg pull ../a
956 pulling from ../a
956 pulling from ../a
957 searching for changes
957 searching for changes
958 abort: preoutgoing.unreachable hook is invalid: import of "hooktests.container" failed
958 abort: preoutgoing.unreachable hook is invalid: import of "hooktests.container" failed
959 (run with --traceback for stack trace)
959 (run with --traceback for stack trace)
960 [255]
960 [255]
961
961
962 $ echo '[hooks]' > ../a/.hg/hgrc
962 $ echo '[hooks]' > ../a/.hg/hgrc
963 $ echo 'preoutgoing.syntaxerror = python:syntaxerror.syntaxerror' >> ../a/.hg/hgrc
963 $ echo 'preoutgoing.syntaxerror = python:syntaxerror.syntaxerror' >> ../a/.hg/hgrc
964 $ hg pull ../a
964 $ hg pull ../a
965 pulling from ../a
965 pulling from ../a
966 searching for changes
966 searching for changes
967 abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
967 abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
968 (run with --traceback for stack trace)
968 (run with --traceback for stack trace)
969 [255]
969 [255]
970
970
971 $ hg pull ../a --traceback 2>&1 | egrep 'pulling|searching|^exception|Traceback|SyntaxError|ImportError|ModuleNotFoundError|HookLoadError|abort'
971 $ hg pull ../a --traceback 2>&1 | egrep 'pulling|searching|^exception|Traceback|SyntaxError|ImportError|ModuleNotFoundError|HookLoadError|abort'
972 pulling from ../a
972 pulling from ../a
973 searching for changes
973 searching for changes
974 exception from first failed import attempt:
974 exception from first failed import attempt:
975 Traceback (most recent call last):
975 Traceback (most recent call last):
976 SyntaxError: * (glob)
976 SyntaxError: * (glob)
977 exception from second failed import attempt:
977 exception from second failed import attempt:
978 Traceback (most recent call last): (py3 !)
978 Traceback (most recent call last): (py3 !)
979 SyntaxError: * (glob) (py3 !)
979 SyntaxError: * (glob) (py3 !)
980 Traceback (most recent call last):
980 Traceback (most recent call last):
981 ImportError: No module named hgext_syntaxerror (no-py3 !)
981 ImportError: No module named hgext_syntaxerror (no-py3 !)
982 ImportError: No module named 'hgext_syntaxerror' (py3 no-py36 !)
982 ImportError: No module named 'hgext_syntaxerror' (py3 no-py36 !)
983 ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
983 ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
984 Traceback (most recent call last):
984 Traceback (most recent call last):
985 SyntaxError: * (glob) (py3 !)
985 SyntaxError: * (glob) (py3 !)
986 Traceback (most recent call last): (py3 !)
986 Traceback (most recent call last): (py3 !)
987 ImportError: No module named 'hgext_syntaxerror' (py3 no-py36 !)
987 ImportError: No module named 'hgext_syntaxerror' (py3 no-py36 !)
988 ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
988 ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
989 Traceback (most recent call last): (py3 !)
989 Traceback (most recent call last): (py3 !)
990 HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed (no-py3 !)
990 HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed (no-py3 !)
991 raise error.HookLoadError( (py38 !)
991 raise error.HookLoadError( (py38 !)
992 mercurial.error.HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed (py3 !)
992 mercurial.error.HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed (py3 !)
993 abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
993 abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
994
994
995 $ echo '[hooks]' > ../a/.hg/hgrc
995 $ echo '[hooks]' > ../a/.hg/hgrc
996 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
996 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
997 $ hg pull ../a
997 $ hg pull ../a
998 pulling from ../a
998 pulling from ../a
999 searching for changes
999 searching for changes
1000 hook args:
1000 hook args:
1001 hooktype preoutgoing
1001 hooktype preoutgoing
1002 source pull
1002 source pull
1003 adding changesets
1003 adding changesets
1004 adding manifests
1004 adding manifests
1005 adding file changes
1005 adding file changes
1006 adding remote bookmark quux
1006 adding remote bookmark quux
1007 added 1 changesets with 1 changes to 1 files
1007 added 1 changesets with 1 changes to 1 files
1008 new changesets 539e4b31b6dc
1008 new changesets 539e4b31b6dc
1009 (run 'hg update' to get a working copy)
1009 (run 'hg update' to get a working copy)
1010
1010
1011 post- python hooks that fail to *run* don't cause an abort
1011 post- python hooks that fail to *run* don't cause an abort
1012 $ rm ../a/.hg/hgrc
1012 $ rm ../a/.hg/hgrc
1013 $ echo '[hooks]' > .hg/hgrc
1013 $ echo '[hooks]' > .hg/hgrc
1014 $ echo 'post-pull.broken = python:hooktests.brokenhook' >> .hg/hgrc
1014 $ echo 'post-pull.broken = python:hooktests.brokenhook' >> .hg/hgrc
1015 $ hg pull ../a
1015 $ hg pull ../a
1016 pulling from ../a
1016 pulling from ../a
1017 searching for changes
1017 searching for changes
1018 no changes found
1018 no changes found
1019 error: post-pull.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
1019 error: post-pull.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
1020 (run with --traceback for stack trace)
1020 (run with --traceback for stack trace)
1021
1021
1022 but post- python hooks that fail to *load* do
1022 but post- python hooks that fail to *load* do
1023 $ echo '[hooks]' > .hg/hgrc
1023 $ echo '[hooks]' > .hg/hgrc
1024 $ echo 'post-pull.nomodule = python:nomodule' >> .hg/hgrc
1024 $ echo 'post-pull.nomodule = python:nomodule' >> .hg/hgrc
1025 $ hg pull ../a
1025 $ hg pull ../a
1026 pulling from ../a
1026 pulling from ../a
1027 searching for changes
1027 searching for changes
1028 no changes found
1028 no changes found
1029 abort: post-pull.nomodule hook is invalid: "nomodule" not in a module
1029 abort: post-pull.nomodule hook is invalid: "nomodule" not in a module
1030 [255]
1030 [255]
1031
1031
1032 $ echo '[hooks]' > .hg/hgrc
1032 $ echo '[hooks]' > .hg/hgrc
1033 $ echo 'post-pull.badmodule = python:nomodule.nowhere' >> .hg/hgrc
1033 $ echo 'post-pull.badmodule = python:nomodule.nowhere' >> .hg/hgrc
1034 $ hg pull ../a
1034 $ hg pull ../a
1035 pulling from ../a
1035 pulling from ../a
1036 searching for changes
1036 searching for changes
1037 no changes found
1037 no changes found
1038 abort: post-pull.badmodule hook is invalid: import of "nomodule" failed
1038 abort: post-pull.badmodule hook is invalid: import of "nomodule" failed
1039 (run with --traceback for stack trace)
1039 (run with --traceback for stack trace)
1040 [255]
1040 [255]
1041
1041
1042 $ echo '[hooks]' > .hg/hgrc
1042 $ echo '[hooks]' > .hg/hgrc
1043 $ echo 'post-pull.nohook = python:hooktests.nohook' >> .hg/hgrc
1043 $ echo 'post-pull.nohook = python:hooktests.nohook' >> .hg/hgrc
1044 $ hg pull ../a
1044 $ hg pull ../a
1045 pulling from ../a
1045 pulling from ../a
1046 searching for changes
1046 searching for changes
1047 no changes found
1047 no changes found
1048 abort: post-pull.nohook hook is invalid: "hooktests.nohook" is not defined
1048 abort: post-pull.nohook hook is invalid: "hooktests.nohook" is not defined
1049 [255]
1049 [255]
1050
1050
1051 make sure --traceback works
1051 make sure --traceback works
1052
1052
1053 $ echo '[hooks]' > .hg/hgrc
1053 $ echo '[hooks]' > .hg/hgrc
1054 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
1054 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
1055
1055
1056 $ echo aa > a
1056 $ echo aa > a
1057 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
1057 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
1058 Traceback (most recent call last):
1058 Traceback (most recent call last):
1059
1059
1060 $ cd ..
1060 $ cd ..
1061 $ hg init c
1061 $ hg init c
1062 $ cd c
1062 $ cd c
1063
1063
1064 $ cat > hookext.py <<EOF
1064 $ cat > hookext.py <<EOF
1065 > def autohook(ui, **args):
1065 > def autohook(ui, **args):
1066 > ui.write(b'Automatically installed hook\n')
1066 > ui.write(b'Automatically installed hook\n')
1067 >
1067 >
1068 > def reposetup(ui, repo):
1068 > def reposetup(ui, repo):
1069 > repo.ui.setconfig(b"hooks", b"commit.auto", autohook)
1069 > repo.ui.setconfig(b"hooks", b"commit.auto", autohook)
1070 > EOF
1070 > EOF
1071 $ echo '[extensions]' >> .hg/hgrc
1071 $ echo '[extensions]' >> .hg/hgrc
1072 $ echo 'hookext = hookext.py' >> .hg/hgrc
1072 $ echo 'hookext = hookext.py' >> .hg/hgrc
1073
1073
1074 $ touch foo
1074 $ touch foo
1075 $ hg add foo
1075 $ hg add foo
1076 $ hg ci -d '0 0' -m 'add foo'
1076 $ hg ci -d '0 0' -m 'add foo'
1077 Automatically installed hook
1077 Automatically installed hook
1078 $ echo >> foo
1078 $ echo >> foo
1079 $ hg ci --debug -d '0 0' -m 'change foo'
1079 $ hg ci --debug -d '0 0' -m 'change foo'
1080 committing files:
1080 committing files:
1081 foo
1081 foo
1082 committing manifest
1082 committing manifest
1083 committing changelog
1083 committing changelog
1084 updating the branch cache
1084 updating the branch cache
1085 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
1085 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
1086 calling hook commit.auto: hgext_hookext.autohook
1086 calling hook commit.auto: hgext_hookext.autohook
1087 Automatically installed hook
1087 Automatically installed hook
1088
1088
1089 $ hg showconfig hooks
1089 $ hg showconfig hooks
1090 hooks.commit.auto=<function autohook at *> (glob)
1090 hooks.commit.auto=<function autohook at *> (glob)
1091
1091
1092 test python hook configured with python:[file]:[hook] syntax
1092 test python hook configured with python:[file]:[hook] syntax
1093
1093
1094 $ cd ..
1094 $ cd ..
1095 $ mkdir d
1095 $ mkdir d
1096 $ cd d
1096 $ cd d
1097 $ hg init repo
1097 $ hg init repo
1098 $ mkdir hooks
1098 $ mkdir hooks
1099
1099
1100 $ cd hooks
1100 $ cd hooks
1101 $ cat > testhooks.py <<EOF
1101 $ cat > testhooks.py <<EOF
1102 > def testhook(ui, **args):
1102 > def testhook(ui, **args):
1103 > ui.write(b'hook works\n')
1103 > ui.write(b'hook works\n')
1104 > EOF
1104 > EOF
1105 $ echo '[hooks]' > ../repo/.hg/hgrc
1105 $ echo '[hooks]' > ../repo/.hg/hgrc
1106 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
1106 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
1107
1107
1108 $ cd ../repo
1108 $ cd ../repo
1109 $ hg commit -d '0 0'
1109 $ hg commit -d '0 0'
1110 hook works
1110 hook works
1111 nothing changed
1111 nothing changed
1112 [1]
1112 [1]
1113
1113
1114 $ echo '[hooks]' > .hg/hgrc
1114 $ echo '[hooks]' > .hg/hgrc
1115 $ echo "update.ne = python:`pwd`/nonexistent.py:testhook" >> .hg/hgrc
1115 $ echo "update.ne = python:`pwd`/nonexistent.py:testhook" >> .hg/hgrc
1116 $ echo "pre-identify.npmd = python:`pwd`/:no_python_module_dir" >> .hg/hgrc
1116 $ echo "pre-identify.npmd = python:`pwd`/:no_python_module_dir" >> .hg/hgrc
1117
1117
1118 $ hg up null
1118 $ hg up null
1119 loading update.ne hook failed:
1119 loading update.ne hook failed:
1120 abort: $ENOENT$: '$TESTTMP/d/repo/nonexistent.py'
1120 abort: $ENOENT$: '$TESTTMP/d/repo/nonexistent.py'
1121 [255]
1121 [255]
1122
1122
1123 $ hg id
1123 $ hg id
1124 loading pre-identify.npmd hook failed:
1124 loading pre-identify.npmd hook failed:
1125 abort: No module named repo! (no-py3 !)
1125 abort: No module named repo! (no-py3 !)
1126 abort: No module named 'repo'! (py3 !)
1126 abort: No module named 'repo'! (py3 !)
1127 [255]
1127 [255]
1128
1128
1129 $ cd ../../b
1129 $ cd ../../b
1130
1130
1131 make sure --traceback works on hook import failure
1131 make sure --traceback works on hook import failure
1132
1132
1133 $ cat > importfail.py <<EOF
1133 $ cat > importfail.py <<EOF
1134 > import somebogusmodule
1134 > import somebogusmodule
1135 > # dereference something in the module to force demandimport to load it
1135 > # dereference something in the module to force demandimport to load it
1136 > somebogusmodule.whatever
1136 > somebogusmodule.whatever
1137 > EOF
1137 > EOF
1138
1138
1139 $ echo '[hooks]' > .hg/hgrc
1139 $ echo '[hooks]' > .hg/hgrc
1140 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
1140 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
1141
1141
1142 $ echo a >> a
1142 $ echo a >> a
1143 $ hg --traceback commit -ma 2>&1 | egrep '^exception|ImportError|ModuleNotFoundError|Traceback|HookLoadError|abort'
1143 $ hg --traceback commit -ma 2>&1 | egrep '^exception|ImportError|ModuleNotFoundError|Traceback|HookLoadError|abort'
1144 exception from first failed import attempt:
1144 exception from first failed import attempt:
1145 Traceback (most recent call last):
1145 Traceback (most recent call last):
1146 ImportError: No module named somebogusmodule (no-py3 !)
1146 ImportError: No module named somebogusmodule (no-py3 !)
1147 ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
1147 ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
1148 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1148 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1149 exception from second failed import attempt:
1149 exception from second failed import attempt:
1150 Traceback (most recent call last): (py3 !)
1150 Traceback (most recent call last): (py3 !)
1151 ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
1151 ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
1152 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1152 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1153 Traceback (most recent call last): (py3 !)
1153 Traceback (most recent call last): (py3 !)
1154 ImportError: No module named 'hgext_importfail' (py3 no-py36 !)
1154 ImportError: No module named 'hgext_importfail' (py3 no-py36 !)
1155 ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
1155 ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
1156 Traceback (most recent call last): (py3 !)
1156 Traceback (most recent call last): (py3 !)
1157 ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
1157 ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
1158 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1158 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1159 Traceback (most recent call last):
1159 Traceback (most recent call last):
1160 ImportError: No module named hgext_importfail (no-py3 !)
1160 ImportError: No module named hgext_importfail (no-py3 !)
1161 ImportError: No module named 'hgext_importfail' (py3 no-py36 !)
1161 ImportError: No module named 'hgext_importfail' (py3 no-py36 !)
1162 ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
1162 ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
1163 Traceback (most recent call last):
1163 Traceback (most recent call last):
1164 HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed (no-py3 !)
1164 HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed (no-py3 !)
1165 raise error.HookLoadError( (py38 !)
1165 raise error.HookLoadError( (py38 !)
1166 mercurial.error.HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed (py3 !)
1166 mercurial.error.HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed (py3 !)
1167 abort: precommit.importfail hook is invalid: import of "importfail" failed
1167 abort: precommit.importfail hook is invalid: import of "importfail" failed
1168
1168
1169 Issue1827: Hooks Update & Commit not completely post operation
1169 Issue1827: Hooks Update & Commit not completely post operation
1170
1170
1171 commit and update hooks should run after command completion. The largefiles
1171 commit and update hooks should run after command completion. The largefiles
1172 use demonstrates a recursive wlock, showing the hook doesn't run until the
1172 use demonstrates a recursive wlock, showing the hook doesn't run until the
1173 final release (and dirstate flush).
1173 final release (and dirstate flush).
1174
1174
1175 $ echo '[hooks]' > .hg/hgrc
1175 $ echo '[hooks]' > .hg/hgrc
1176 $ echo 'commit = hg id' >> .hg/hgrc
1176 $ echo 'commit = hg id' >> .hg/hgrc
1177 $ echo 'update = hg id' >> .hg/hgrc
1177 $ echo 'update = hg id' >> .hg/hgrc
1178 $ echo bb > a
1178 $ echo bb > a
1179 $ hg ci -ma
1179 $ hg ci -ma
1180 223eafe2750c tip
1180 223eafe2750c tip
1181 $ hg up 0 --config extensions.largefiles=
1181 $ hg up 0 --config extensions.largefiles=
1182 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
1182 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
1183 cb9a9f314b8b
1183 cb9a9f314b8b
1184 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1184 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1185
1185
1186 make sure --verbose (and --quiet/--debug etc.) are propagated to the local ui
1186 make sure --verbose (and --quiet/--debug etc.) are propagated to the local ui
1187 that is passed to pre/post hooks
1187 that is passed to pre/post hooks
1188
1188
1189 $ echo '[hooks]' > .hg/hgrc
1189 $ echo '[hooks]' > .hg/hgrc
1190 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
1190 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
1191 $ hg id
1191 $ hg id
1192 cb9a9f314b8b
1192 cb9a9f314b8b
1193 $ hg id --verbose
1193 $ hg id --verbose
1194 calling hook pre-identify: hooktests.verbosehook
1194 calling hook pre-identify: hooktests.verbosehook
1195 verbose output from hook
1195 verbose output from hook
1196 cb9a9f314b8b
1196 cb9a9f314b8b
1197
1197
1198 Ensure hooks can be prioritized
1198 Ensure hooks can be prioritized
1199
1199
1200 $ echo '[hooks]' > .hg/hgrc
1200 $ echo '[hooks]' > .hg/hgrc
1201 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
1201 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
1202 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
1202 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
1203 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
1203 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
1204 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
1204 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
1205 $ hg id --verbose
1205 $ hg id --verbose
1206 calling hook pre-identify.b: hooktests.verbosehook
1206 calling hook pre-identify.b: hooktests.verbosehook
1207 verbose output from hook
1207 verbose output from hook
1208 calling hook pre-identify.a: hooktests.verbosehook
1208 calling hook pre-identify.a: hooktests.verbosehook
1209 verbose output from hook
1209 verbose output from hook
1210 calling hook pre-identify.c: hooktests.verbosehook
1210 calling hook pre-identify.c: hooktests.verbosehook
1211 verbose output from hook
1211 verbose output from hook
1212 cb9a9f314b8b
1212 cb9a9f314b8b
1213
1213
1214 new tags must be visible in pretxncommit (issue3210)
1214 new tags must be visible in pretxncommit (issue3210)
1215
1215
1216 $ echo 'pretxncommit.printtags = python:hooktests.printtags' >> .hg/hgrc
1216 $ echo 'pretxncommit.printtags = python:hooktests.printtags' >> .hg/hgrc
1217 $ hg tag -f foo
1217 $ hg tag -f foo
1218 [a, foo, tip]
1218 [a, foo, tip]
1219
1219
1220 post-init hooks must not crash (issue4983)
1220 post-init hooks must not crash (issue4983)
1221 This also creates the `to` repo for the next test block.
1221 This also creates the `to` repo for the next test block.
1222
1222
1223 $ cd ..
1223 $ cd ..
1224 $ cat << EOF >> hgrc-with-post-init-hook
1224 $ cat << EOF >> hgrc-with-post-init-hook
1225 > [hooks]
1225 > [hooks]
1226 > post-init = sh -c "printenv.py --line post-init"
1226 > post-init = sh -c "printenv.py --line post-init"
1227 > EOF
1227 > EOF
1228 $ HGRCPATH=hgrc-with-post-init-hook hg init to
1228 $ HGRCPATH=hgrc-with-post-init-hook hg init to
1229 post-init hook: HG_ARGS=init to
1229 post-init hook: HG_ARGS=init to
1230 HG_HOOKNAME=post-init
1230 HG_HOOKNAME=post-init
1231 HG_HOOKTYPE=post-init
1231 HG_HOOKTYPE=post-init
1232 HG_OPTS={'insecure': None, 'remotecmd': '', 'ssh': ''}
1232 HG_OPTS={'insecure': None, 'remotecmd': '', 'ssh': ''}
1233 HG_PATS=['to']
1233 HG_PATS=['to']
1234 HG_RESULT=0
1234 HG_RESULT=0
1235
1235
1236
1236
1237 new commits must be visible in pretxnchangegroup (issue3428)
1237 new commits must be visible in pretxnchangegroup (issue3428)
1238
1238
1239 $ echo '[hooks]' >> to/.hg/hgrc
1239 $ echo '[hooks]' >> to/.hg/hgrc
1240 $ echo 'prechangegroup = hg --traceback tip' >> to/.hg/hgrc
1240 $ echo 'prechangegroup = hg --traceback tip' >> to/.hg/hgrc
1241 $ echo 'pretxnchangegroup = hg --traceback tip' >> to/.hg/hgrc
1241 $ echo 'pretxnchangegroup = hg --traceback tip' >> to/.hg/hgrc
1242 $ echo a >> to/a
1242 $ echo a >> to/a
1243 $ hg --cwd to ci -Ama
1243 $ hg --cwd to ci -Ama
1244 adding a
1244 adding a
1245 $ hg clone to from
1245 $ hg clone to from
1246 updating to branch default
1246 updating to branch default
1247 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1247 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1248 $ echo aa >> from/a
1248 $ echo aa >> from/a
1249 $ hg --cwd from ci -mb
1249 $ hg --cwd from ci -mb
1250 $ hg --cwd from push
1250 $ hg --cwd from push
1251 pushing to $TESTTMP/to
1251 pushing to $TESTTMP/to
1252 searching for changes
1252 searching for changes
1253 changeset: 0:cb9a9f314b8b
1253 changeset: 0:cb9a9f314b8b
1254 tag: tip
1254 tag: tip
1255 user: test
1255 user: test
1256 date: Thu Jan 01 00:00:00 1970 +0000
1256 date: Thu Jan 01 00:00:00 1970 +0000
1257 summary: a
1257 summary: a
1258
1258
1259 adding changesets
1259 adding changesets
1260 adding manifests
1260 adding manifests
1261 adding file changes
1261 adding file changes
1262 changeset: 1:9836a07b9b9d
1262 changeset: 1:9836a07b9b9d
1263 tag: tip
1263 tag: tip
1264 user: test
1264 user: test
1265 date: Thu Jan 01 00:00:00 1970 +0000
1265 date: Thu Jan 01 00:00:00 1970 +0000
1266 summary: b
1266 summary: b
1267
1267
1268 added 1 changesets with 1 changes to 1 files
1268 added 1 changesets with 1 changes to 1 files
1269
1269
1270 pretxnclose hook failure should abort the transaction
1270 pretxnclose hook failure should abort the transaction
1271
1271
1272 $ hg init txnfailure
1272 $ hg init txnfailure
1273 $ cd txnfailure
1273 $ cd txnfailure
1274 $ touch a && hg commit -Aqm a
1274 $ touch a && hg commit -Aqm a
1275 $ cat >> .hg/hgrc <<EOF
1275 $ cat >> .hg/hgrc <<EOF
1276 > [hooks]
1276 > [hooks]
1277 > pretxnclose.error = exit 1
1277 > pretxnclose.error = exit 1
1278 > EOF
1278 > EOF
1279 $ hg strip -r 0 --config extensions.strip=
1279 $ hg strip -r 0 --config extensions.strip=
1280 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1280 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1281 saved backup bundle to * (glob)
1281 saved backup bundle to * (glob)
1282 transaction abort!
1282 transaction abort!
1283 rollback completed
1283 rollback completed
1284 strip failed, backup bundle stored in * (glob)
1284 strip failed, backup bundle stored in * (glob)
1285 abort: pretxnclose.error hook exited with status 1
1285 abort: pretxnclose.error hook exited with status 1
1286 [255]
1286 [255]
1287 $ hg recover
1287 $ hg recover
1288 no interrupted transaction available
1288 no interrupted transaction available
1289 [1]
1289 [1]
1290 $ cd ..
1290 $ cd ..
1291
1291
1292 check whether HG_PENDING makes pending changes only in related
1292 check whether HG_PENDING makes pending changes only in related
1293 repositories visible to an external hook.
1293 repositories visible to an external hook.
1294
1294
1295 (emulate a transaction running concurrently by copied
1295 (emulate a transaction running concurrently by copied
1296 .hg/store/00changelog.i.a in subsequent test)
1296 .hg/store/00changelog.i.a in subsequent test)
1297
1297
1298 $ cat > $TESTTMP/savepending.sh <<EOF
1298 $ cat > $TESTTMP/savepending.sh <<EOF
1299 > cp .hg/store/00changelog.i.a .hg/store/00changelog.i.a.saved
1299 > cp .hg/store/00changelog.i.a .hg/store/00changelog.i.a.saved
1300 > exit 1 # to avoid adding new revision for subsequent tests
1300 > exit 1 # to avoid adding new revision for subsequent tests
1301 > EOF
1301 > EOF
1302 $ cd a
1302 $ cd a
1303 $ hg tip -q
1303 $ hg tip -q
1304 4:539e4b31b6dc
1304 4:539e4b31b6dc
1305 $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" commit -m "invisible"
1305 $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" commit -m "invisible"
1306 transaction abort!
1306 transaction abort!
1307 rollback completed
1307 rollback completed
1308 abort: pretxnclose hook exited with status 1
1308 abort: pretxnclose hook exited with status 1
1309 [255]
1309 [255]
1310 $ cp .hg/store/00changelog.i.a.saved .hg/store/00changelog.i.a
1310 $ cp .hg/store/00changelog.i.a.saved .hg/store/00changelog.i.a
1311
1311
1312 (check (in)visibility of new changeset while transaction running in
1312 (check (in)visibility of new changeset while transaction running in
1313 repo)
1313 repo)
1314
1314
1315 $ cat > $TESTTMP/checkpending.sh <<EOF
1315 $ cat > $TESTTMP/checkpending.sh <<EOF
1316 > echo '@a'
1316 > echo '@a'
1317 > hg -R "$TESTTMP/a" tip -q
1317 > hg -R "$TESTTMP/a" tip -q
1318 > echo '@a/nested'
1318 > echo '@a/nested'
1319 > hg -R "$TESTTMP/a/nested" tip -q
1319 > hg -R "$TESTTMP/a/nested" tip -q
1320 > exit 1 # to avoid adding new revision for subsequent tests
1320 > exit 1 # to avoid adding new revision for subsequent tests
1321 > EOF
1321 > EOF
1322 $ hg init nested
1322 $ hg init nested
1323 $ cd nested
1323 $ cd nested
1324 $ echo a > a
1324 $ echo a > a
1325 $ hg add a
1325 $ hg add a
1326 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkpending.sh" commit -m '#0'
1326 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkpending.sh" commit -m '#0'
1327 @a
1327 @a
1328 4:539e4b31b6dc
1328 4:539e4b31b6dc
1329 @a/nested
1329 @a/nested
1330 0:bf5e395ced2c
1330 0:bf5e395ced2c
1331 transaction abort!
1331 transaction abort!
1332 rollback completed
1332 rollback completed
1333 abort: pretxnclose hook exited with status 1
1333 abort: pretxnclose hook exited with status 1
1334 [255]
1334 [255]
1335
1335
1336 Hook from untrusted hgrc are reported as failure
1336 Hook from untrusted hgrc are reported as failure
1337 ================================================
1337 ================================================
1338
1338
1339 $ cat << EOF > $TESTTMP/untrusted.py
1339 $ cat << EOF > $TESTTMP/untrusted.py
1340 > from mercurial import scmutil, util
1340 > from mercurial import scmutil, util
1341 > def uisetup(ui):
1341 > def uisetup(ui):
1342 > class untrustedui(ui.__class__):
1342 > class untrustedui(ui.__class__):
1343 > def _trusted(self, fp, f):
1343 > def _trusted(self, fp, f):
1344 > if util.normpath(fp.name).endswith(b'untrusted/.hg/hgrc'):
1344 > if util.normpath(fp.name).endswith(b'untrusted/.hg/hgrc'):
1345 > return False
1345 > return False
1346 > return super(untrustedui, self)._trusted(fp, f)
1346 > return super(untrustedui, self)._trusted(fp, f)
1347 > ui.__class__ = untrustedui
1347 > ui.__class__ = untrustedui
1348 > EOF
1348 > EOF
1349 $ cat << EOF >> $HGRCPATH
1349 $ cat << EOF >> $HGRCPATH
1350 > [extensions]
1350 > [extensions]
1351 > untrusted=$TESTTMP/untrusted.py
1351 > untrusted=$TESTTMP/untrusted.py
1352 > EOF
1352 > EOF
1353 $ hg init untrusted
1353 $ hg init untrusted
1354 $ cd untrusted
1354 $ cd untrusted
1355
1355
1356 Non-blocking hook
1356 Non-blocking hook
1357 -----------------
1357 -----------------
1358
1358
1359 $ cat << EOF >> .hg/hgrc
1359 $ cat << EOF >> .hg/hgrc
1360 > [hooks]
1360 > [hooks]
1361 > txnclose.testing=echo txnclose hook called
1361 > txnclose.testing=echo txnclose hook called
1362 > EOF
1362 > EOF
1363 $ touch a && hg commit -Aqm a
1363 $ touch a && hg commit -Aqm a
1364 warning: untrusted hook txnclose.testing not executed
1364 warning: untrusted hook txnclose.testing not executed
1365 $ hg log
1365 $ hg log
1366 changeset: 0:3903775176ed
1366 changeset: 0:3903775176ed
1367 tag: tip
1367 tag: tip
1368 user: test
1368 user: test
1369 date: Thu Jan 01 00:00:00 1970 +0000
1369 date: Thu Jan 01 00:00:00 1970 +0000
1370 summary: a
1370 summary: a
1371
1371
1372
1372
1373 Non-blocking hook
1373 Non-blocking hook
1374 -----------------
1374 -----------------
1375
1375
1376 $ cat << EOF >> .hg/hgrc
1376 $ cat << EOF >> .hg/hgrc
1377 > [hooks]
1377 > [hooks]
1378 > pretxnclose.testing=echo pre-txnclose hook called
1378 > pretxnclose.testing=echo pre-txnclose hook called
1379 > EOF
1379 > EOF
1380 $ touch b && hg commit -Aqm a
1380 $ touch b && hg commit -Aqm a
1381 transaction abort!
1381 transaction abort!
1382 rollback completed
1382 rollback completed
1383 abort: untrusted hook pretxnclose.testing not executed
1383 abort: untrusted hook pretxnclose.testing not executed
1384 (see 'hg help config.trusted')
1384 (see 'hg help config.trusted')
1385 [255]
1385 [255]
1386 $ hg log
1386 $ hg log
1387 changeset: 0:3903775176ed
1387 changeset: 0:3903775176ed
1388 tag: tip
1388 tag: tip
1389 user: test
1389 user: test
1390 date: Thu Jan 01 00:00:00 1970 +0000
1390 date: Thu Jan 01 00:00:00 1970 +0000
1391 summary: a
1391 summary: a
1392
1392
General Comments 0
You need to be logged in to leave comments. Login now