##// END OF EJS Templates
hooks: provide access to transaction changes for internal hooks...
Joerg Sonnenberger -
r45350:09da5cf4 default
parent child Browse files
Show More
@@ -1,336 +1,340 b''
1 1 # hook.py - hook support for mercurial
2 2 #
3 3 # Copyright 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import os
12 12 import sys
13 13
14 14 from .i18n import _
15 15 from .pycompat import getattr
16 16 from . import (
17 17 demandimport,
18 18 encoding,
19 19 error,
20 20 extensions,
21 21 pycompat,
22 22 util,
23 23 )
24 24 from .utils import (
25 25 procutil,
26 26 resourceutil,
27 27 stringutil,
28 28 )
29 29
30 30
31 31 def pythonhook(ui, repo, htype, hname, funcname, args, throw):
32 32 '''call python hook. hook is callable object, looked up as
33 33 name in python module. if callable returns "true", hook
34 34 fails, else passes. if hook raises exception, treated as
35 35 hook failure. exception propagates if throw is "true".
36 36
37 37 reason for "true" meaning "hook failed" is so that
38 38 unmodified commands (e.g. mercurial.commands.update) can
39 39 be run as hooks without wrappers to convert return values.'''
40 40
41 41 if callable(funcname):
42 42 obj = funcname
43 43 funcname = pycompat.sysbytes(obj.__module__ + "." + obj.__name__)
44 44 else:
45 45 d = funcname.rfind(b'.')
46 46 if d == -1:
47 47 raise error.HookLoadError(
48 48 _(b'%s hook is invalid: "%s" not in a module')
49 49 % (hname, funcname)
50 50 )
51 51 modname = funcname[:d]
52 52 oldpaths = sys.path
53 53 if resourceutil.mainfrozen():
54 54 # binary installs require sys.path manipulation
55 55 modpath, modfile = os.path.split(modname)
56 56 if modpath and modfile:
57 57 sys.path = sys.path[:] + [modpath]
58 58 modname = modfile
59 59 with demandimport.deactivated():
60 60 try:
61 61 obj = __import__(pycompat.sysstr(modname))
62 62 except (ImportError, SyntaxError):
63 63 e1 = sys.exc_info()
64 64 try:
65 65 # extensions are loaded with hgext_ prefix
66 66 obj = __import__("hgext_%s" % pycompat.sysstr(modname))
67 67 except (ImportError, SyntaxError):
68 68 e2 = sys.exc_info()
69 69 if ui.tracebackflag:
70 70 ui.warn(
71 71 _(
72 72 b'exception from first failed import '
73 73 b'attempt:\n'
74 74 )
75 75 )
76 76 ui.traceback(e1)
77 77 if ui.tracebackflag:
78 78 ui.warn(
79 79 _(
80 80 b'exception from second failed import '
81 81 b'attempt:\n'
82 82 )
83 83 )
84 84 ui.traceback(e2)
85 85
86 86 if not ui.tracebackflag:
87 87 tracebackhint = _(
88 88 b'run with --traceback for stack trace'
89 89 )
90 90 else:
91 91 tracebackhint = None
92 92 raise error.HookLoadError(
93 93 _(b'%s hook is invalid: import of "%s" failed')
94 94 % (hname, modname),
95 95 hint=tracebackhint,
96 96 )
97 97 sys.path = oldpaths
98 98 try:
99 99 for p in funcname.split(b'.')[1:]:
100 100 obj = getattr(obj, p)
101 101 except AttributeError:
102 102 raise error.HookLoadError(
103 103 _(b'%s hook is invalid: "%s" is not defined')
104 104 % (hname, funcname)
105 105 )
106 106 if not callable(obj):
107 107 raise error.HookLoadError(
108 108 _(b'%s hook is invalid: "%s" is not callable')
109 109 % (hname, funcname)
110 110 )
111 111
112 112 ui.note(_(b"calling hook %s: %s\n") % (hname, funcname))
113 113 starttime = util.timer()
114 114
115 115 try:
116 116 r = obj(ui=ui, repo=repo, hooktype=htype, **pycompat.strkwargs(args))
117 117 except Exception as exc:
118 118 if isinstance(exc, error.Abort):
119 119 ui.warn(_(b'error: %s hook failed: %s\n') % (hname, exc.args[0]))
120 120 else:
121 121 ui.warn(
122 122 _(b'error: %s hook raised an exception: %s\n')
123 123 % (hname, stringutil.forcebytestr(exc))
124 124 )
125 125 if throw:
126 126 raise
127 127 if not ui.tracebackflag:
128 128 ui.warn(_(b'(run with --traceback for stack trace)\n'))
129 129 ui.traceback()
130 130 return True, True
131 131 finally:
132 132 duration = util.timer() - starttime
133 133 ui.log(
134 134 b'pythonhook',
135 135 b'pythonhook-%s: %s finished in %0.2f seconds\n',
136 136 htype,
137 137 funcname,
138 138 duration,
139 139 )
140 140 if r:
141 141 if throw:
142 142 raise error.HookAbort(_(b'%s hook failed') % hname)
143 143 ui.warn(_(b'warning: %s hook failed\n') % hname)
144 144 return r, False
145 145
146 146
147 147 def _exthook(ui, repo, htype, name, cmd, args, throw):
148 148 starttime = util.timer()
149 149 env = {}
150 150
151 151 # make in-memory changes visible to external process
152 152 if repo is not None:
153 153 tr = repo.currenttransaction()
154 154 repo.dirstate.write(tr)
155 155 if tr and tr.writepending():
156 156 env[b'HG_PENDING'] = repo.root
157 157 env[b'HG_HOOKTYPE'] = htype
158 158 env[b'HG_HOOKNAME'] = name
159 159
160 160 for k, v in pycompat.iteritems(args):
161 # transaction changes can accumulate MBs of data, so skip it
162 # for external hooks
163 if k == b'changes':
164 continue
161 165 if callable(v):
162 166 v = v()
163 167 if isinstance(v, (dict, list)):
164 168 v = stringutil.pprint(v)
165 169 env[b'HG_' + k.upper()] = v
166 170
167 171 if ui.configbool(b'hooks', b'tonative.%s' % name, False):
168 172 oldcmd = cmd
169 173 cmd = procutil.shelltonative(cmd, env)
170 174 if cmd != oldcmd:
171 175 ui.note(_(b'converting hook "%s" to native\n') % name)
172 176
173 177 ui.note(_(b"running hook %s: %s\n") % (name, cmd))
174 178
175 179 if repo:
176 180 cwd = repo.root
177 181 else:
178 182 cwd = encoding.getcwd()
179 183 r = ui.system(cmd, environ=env, cwd=cwd, blockedtag=b'exthook-%s' % (name,))
180 184
181 185 duration = util.timer() - starttime
182 186 ui.log(
183 187 b'exthook',
184 188 b'exthook-%s: %s finished in %0.2f seconds\n',
185 189 name,
186 190 cmd,
187 191 duration,
188 192 )
189 193 if r:
190 194 desc = procutil.explainexit(r)
191 195 if throw:
192 196 raise error.HookAbort(_(b'%s hook %s') % (name, desc))
193 197 ui.warn(_(b'warning: %s hook %s\n') % (name, desc))
194 198 return r
195 199
196 200
197 201 # represent an untrusted hook command
198 202 _fromuntrusted = object()
199 203
200 204
201 205 def _allhooks(ui):
202 206 """return a list of (hook-id, cmd) pairs sorted by priority"""
203 207 hooks = _hookitems(ui)
204 208 # Be careful in this section, propagating the real commands from untrusted
205 209 # sources would create a security vulnerability, make sure anything altered
206 210 # in that section uses "_fromuntrusted" as its command.
207 211 untrustedhooks = _hookitems(ui, _untrusted=True)
208 212 for name, value in untrustedhooks.items():
209 213 trustedvalue = hooks.get(name, (None, None, name, _fromuntrusted))
210 214 if value != trustedvalue:
211 215 (lp, lo, lk, lv) = trustedvalue
212 216 hooks[name] = (lp, lo, lk, _fromuntrusted)
213 217 # (end of the security sensitive section)
214 218 return [(k, v) for p, o, k, v in sorted(hooks.values())]
215 219
216 220
217 221 def _hookitems(ui, _untrusted=False):
218 222 """return all hooks items ready to be sorted"""
219 223 hooks = {}
220 224 for name, cmd in ui.configitems(b'hooks', untrusted=_untrusted):
221 225 if name.startswith(b'priority.') or name.startswith(b'tonative.'):
222 226 continue
223 227
224 228 priority = ui.configint(b'hooks', b'priority.%s' % name, 0)
225 229 hooks[name] = (-priority, len(hooks), name, cmd)
226 230 return hooks
227 231
228 232
229 233 _redirect = False
230 234
231 235
232 236 def redirect(state):
233 237 global _redirect
234 238 _redirect = state
235 239
236 240
237 241 def hashook(ui, htype):
238 242 """return True if a hook is configured for 'htype'"""
239 243 if not ui.callhooks:
240 244 return False
241 245 for hname, cmd in _allhooks(ui):
242 246 if hname.split(b'.')[0] == htype and cmd:
243 247 return True
244 248 return False
245 249
246 250
247 251 def hook(ui, repo, htype, throw=False, **args):
248 252 if not ui.callhooks:
249 253 return False
250 254
251 255 hooks = []
252 256 for hname, cmd in _allhooks(ui):
253 257 if hname.split(b'.')[0] == htype and cmd:
254 258 hooks.append((hname, cmd))
255 259
256 260 res = runhooks(ui, repo, htype, hooks, throw=throw, **args)
257 261 r = False
258 262 for hname, cmd in hooks:
259 263 r = res[hname][0] or r
260 264 return r
261 265
262 266
263 267 @contextlib.contextmanager
264 268 def redirect_stdio():
265 269 """Redirects stdout to stderr, if possible."""
266 270
267 271 oldstdout = -1
268 272 try:
269 273 if _redirect:
270 274 try:
271 275 stdoutno = procutil.stdout.fileno()
272 276 stderrno = procutil.stderr.fileno()
273 277 # temporarily redirect stdout to stderr, if possible
274 278 if stdoutno >= 0 and stderrno >= 0:
275 279 procutil.stdout.flush()
276 280 oldstdout = os.dup(stdoutno)
277 281 os.dup2(stderrno, stdoutno)
278 282 except (OSError, AttributeError):
279 283 # files seem to be bogus, give up on redirecting (WSGI, etc)
280 284 pass
281 285
282 286 yield
283 287
284 288 finally:
285 289 # The stderr is fully buffered on Windows when connected to a pipe.
286 290 # A forcible flush is required to make small stderr data in the
287 291 # remote side available to the client immediately.
288 292 procutil.stderr.flush()
289 293
290 294 if _redirect and oldstdout >= 0:
291 295 procutil.stdout.flush() # write hook output to stderr fd
292 296 os.dup2(oldstdout, stdoutno)
293 297 os.close(oldstdout)
294 298
295 299
296 300 def runhooks(ui, repo, htype, hooks, throw=False, **args):
297 301 args = pycompat.byteskwargs(args)
298 302 res = {}
299 303
300 304 with redirect_stdio():
301 305 for hname, cmd in hooks:
302 306 if cmd is _fromuntrusted:
303 307 if throw:
304 308 raise error.HookAbort(
305 309 _(b'untrusted hook %s not executed') % hname,
306 310 hint=_(b"see 'hg help config.trusted'"),
307 311 )
308 312 ui.warn(_(b'warning: untrusted hook %s not executed\n') % hname)
309 313 r = 1
310 314 raised = False
311 315 elif callable(cmd):
312 316 r, raised = pythonhook(ui, repo, htype, hname, cmd, args, throw)
313 317 elif cmd.startswith(b'python:'):
314 318 if cmd.count(b':') >= 2:
315 319 path, cmd = cmd[7:].rsplit(b':', 1)
316 320 path = util.expandpath(path)
317 321 if repo:
318 322 path = os.path.join(repo.root, path)
319 323 try:
320 324 mod = extensions.loadpath(path, b'hghook.%s' % hname)
321 325 except Exception:
322 326 ui.write(_(b"loading %s hook failed:\n") % hname)
323 327 raise
324 328 hookfn = getattr(mod, cmd)
325 329 else:
326 330 hookfn = cmd[7:].strip()
327 331 r, raised = pythonhook(
328 332 ui, repo, htype, hname, hookfn, args, throw
329 333 )
330 334 else:
331 335 r = _exthook(ui, repo, htype, hname, cmd, args, throw)
332 336 raised = False
333 337
334 338 res[hname] = r, raised
335 339
336 340 return res
@@ -1,3828 +1,3829 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 )
25 25 from .pycompat import (
26 26 delattr,
27 27 getattr,
28 28 )
29 29 from . import (
30 30 bookmarks,
31 31 branchmap,
32 32 bundle2,
33 33 changegroup,
34 34 color,
35 35 context,
36 36 dirstate,
37 37 dirstateguard,
38 38 discovery,
39 39 encoding,
40 40 error,
41 41 exchange,
42 42 extensions,
43 43 filelog,
44 44 hook,
45 45 lock as lockmod,
46 46 match as matchmod,
47 47 merge as mergemod,
48 48 mergeutil,
49 49 namespaces,
50 50 narrowspec,
51 51 obsolete,
52 52 pathutil,
53 53 phases,
54 54 pushkey,
55 55 pycompat,
56 56 rcutil,
57 57 repoview,
58 58 revset,
59 59 revsetlang,
60 60 scmutil,
61 61 sparse,
62 62 store as storemod,
63 63 subrepoutil,
64 64 tags as tagsmod,
65 65 transaction,
66 66 txnutil,
67 67 util,
68 68 vfs as vfsmod,
69 69 )
70 70
71 71 from .interfaces import (
72 72 repository,
73 73 util as interfaceutil,
74 74 )
75 75
76 76 from .utils import (
77 77 hashutil,
78 78 procutil,
79 79 stringutil,
80 80 )
81 81
82 82 from .revlogutils import constants as revlogconst
83 83
84 84 release = lockmod.release
85 85 urlerr = util.urlerr
86 86 urlreq = util.urlreq
87 87
88 88 # set of (path, vfs-location) tuples. vfs-location is:
89 89 # - 'plain for vfs relative paths
90 90 # - '' for svfs relative paths
91 91 _cachedfiles = set()
92 92
93 93
94 94 class _basefilecache(scmutil.filecache):
95 95 """All filecache usage on repo are done for logic that should be unfiltered
96 96 """
97 97
98 98 def __get__(self, repo, type=None):
99 99 if repo is None:
100 100 return self
101 101 # proxy to unfiltered __dict__ since filtered repo has no entry
102 102 unfi = repo.unfiltered()
103 103 try:
104 104 return unfi.__dict__[self.sname]
105 105 except KeyError:
106 106 pass
107 107 return super(_basefilecache, self).__get__(unfi, type)
108 108
109 109 def set(self, repo, value):
110 110 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 111
112 112
113 113 class repofilecache(_basefilecache):
114 114 """filecache for files in .hg but outside of .hg/store"""
115 115
116 116 def __init__(self, *paths):
117 117 super(repofilecache, self).__init__(*paths)
118 118 for path in paths:
119 119 _cachedfiles.add((path, b'plain'))
120 120
121 121 def join(self, obj, fname):
122 122 return obj.vfs.join(fname)
123 123
124 124
125 125 class storecache(_basefilecache):
126 126 """filecache for files in the store"""
127 127
128 128 def __init__(self, *paths):
129 129 super(storecache, self).__init__(*paths)
130 130 for path in paths:
131 131 _cachedfiles.add((path, b''))
132 132
133 133 def join(self, obj, fname):
134 134 return obj.sjoin(fname)
135 135
136 136
137 137 class mixedrepostorecache(_basefilecache):
138 138 """filecache for a mix files in .hg/store and outside"""
139 139
140 140 def __init__(self, *pathsandlocations):
141 141 # scmutil.filecache only uses the path for passing back into our
142 142 # join(), so we can safely pass a list of paths and locations
143 143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 144 _cachedfiles.update(pathsandlocations)
145 145
146 146 def join(self, obj, fnameandlocation):
147 147 fname, location = fnameandlocation
148 148 if location == b'plain':
149 149 return obj.vfs.join(fname)
150 150 else:
151 151 if location != b'':
152 152 raise error.ProgrammingError(
153 153 b'unexpected location: %s' % location
154 154 )
155 155 return obj.sjoin(fname)
156 156
157 157
158 158 def isfilecached(repo, name):
159 159 """check if a repo has already cached "name" filecache-ed property
160 160
161 161 This returns (cachedobj-or-None, iscached) tuple.
162 162 """
163 163 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 164 if not cacheentry:
165 165 return None, False
166 166 return cacheentry.obj, True
167 167
168 168
169 169 class unfilteredpropertycache(util.propertycache):
170 170 """propertycache that apply to unfiltered repo only"""
171 171
172 172 def __get__(self, repo, type=None):
173 173 unfi = repo.unfiltered()
174 174 if unfi is repo:
175 175 return super(unfilteredpropertycache, self).__get__(unfi)
176 176 return getattr(unfi, self.name)
177 177
178 178
179 179 class filteredpropertycache(util.propertycache):
180 180 """propertycache that must take filtering in account"""
181 181
182 182 def cachevalue(self, obj, value):
183 183 object.__setattr__(obj, self.name, value)
184 184
185 185
186 186 def hasunfilteredcache(repo, name):
187 187 """check if a repo has an unfilteredpropertycache value for <name>"""
188 188 return name in vars(repo.unfiltered())
189 189
190 190
191 191 def unfilteredmethod(orig):
192 192 """decorate method that always need to be run on unfiltered version"""
193 193
194 194 def wrapper(repo, *args, **kwargs):
195 195 return orig(repo.unfiltered(), *args, **kwargs)
196 196
197 197 return wrapper
198 198
199 199
200 200 moderncaps = {
201 201 b'lookup',
202 202 b'branchmap',
203 203 b'pushkey',
204 204 b'known',
205 205 b'getbundle',
206 206 b'unbundle',
207 207 }
208 208 legacycaps = moderncaps.union({b'changegroupsubset'})
209 209
210 210
211 211 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 212 class localcommandexecutor(object):
213 213 def __init__(self, peer):
214 214 self._peer = peer
215 215 self._sent = False
216 216 self._closed = False
217 217
218 218 def __enter__(self):
219 219 return self
220 220
221 221 def __exit__(self, exctype, excvalue, exctb):
222 222 self.close()
223 223
224 224 def callcommand(self, command, args):
225 225 if self._sent:
226 226 raise error.ProgrammingError(
227 227 b'callcommand() cannot be used after sendcommands()'
228 228 )
229 229
230 230 if self._closed:
231 231 raise error.ProgrammingError(
232 232 b'callcommand() cannot be used after close()'
233 233 )
234 234
235 235 # We don't need to support anything fancy. Just call the named
236 236 # method on the peer and return a resolved future.
237 237 fn = getattr(self._peer, pycompat.sysstr(command))
238 238
239 239 f = pycompat.futures.Future()
240 240
241 241 try:
242 242 result = fn(**pycompat.strkwargs(args))
243 243 except Exception:
244 244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 245 else:
246 246 f.set_result(result)
247 247
248 248 return f
249 249
250 250 def sendcommands(self):
251 251 self._sent = True
252 252
253 253 def close(self):
254 254 self._closed = True
255 255
256 256
257 257 @interfaceutil.implementer(repository.ipeercommands)
258 258 class localpeer(repository.peer):
259 259 '''peer for a local repo; reflects only the most recent API'''
260 260
261 261 def __init__(self, repo, caps=None):
262 262 super(localpeer, self).__init__()
263 263
264 264 if caps is None:
265 265 caps = moderncaps.copy()
266 266 self._repo = repo.filtered(b'served')
267 267 self.ui = repo.ui
268 268 self._caps = repo._restrictcapabilities(caps)
269 269
270 270 # Begin of _basepeer interface.
271 271
272 272 def url(self):
273 273 return self._repo.url()
274 274
275 275 def local(self):
276 276 return self._repo
277 277
278 278 def peer(self):
279 279 return self
280 280
281 281 def canpush(self):
282 282 return True
283 283
284 284 def close(self):
285 285 self._repo.close()
286 286
287 287 # End of _basepeer interface.
288 288
289 289 # Begin of _basewirecommands interface.
290 290
291 291 def branchmap(self):
292 292 return self._repo.branchmap()
293 293
294 294 def capabilities(self):
295 295 return self._caps
296 296
297 297 def clonebundles(self):
298 298 return self._repo.tryread(b'clonebundles.manifest')
299 299
300 300 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 301 """Used to test argument passing over the wire"""
302 302 return b"%s %s %s %s %s" % (
303 303 one,
304 304 two,
305 305 pycompat.bytestr(three),
306 306 pycompat.bytestr(four),
307 307 pycompat.bytestr(five),
308 308 )
309 309
310 310 def getbundle(
311 311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 312 ):
313 313 chunks = exchange.getbundlechunks(
314 314 self._repo,
315 315 source,
316 316 heads=heads,
317 317 common=common,
318 318 bundlecaps=bundlecaps,
319 319 **kwargs
320 320 )[1]
321 321 cb = util.chunkbuffer(chunks)
322 322
323 323 if exchange.bundle2requested(bundlecaps):
324 324 # When requesting a bundle2, getbundle returns a stream to make the
325 325 # wire level function happier. We need to build a proper object
326 326 # from it in local peer.
327 327 return bundle2.getunbundler(self.ui, cb)
328 328 else:
329 329 return changegroup.getunbundler(b'01', cb, None)
330 330
331 331 def heads(self):
332 332 return self._repo.heads()
333 333
334 334 def known(self, nodes):
335 335 return self._repo.known(nodes)
336 336
337 337 def listkeys(self, namespace):
338 338 return self._repo.listkeys(namespace)
339 339
340 340 def lookup(self, key):
341 341 return self._repo.lookup(key)
342 342
343 343 def pushkey(self, namespace, key, old, new):
344 344 return self._repo.pushkey(namespace, key, old, new)
345 345
346 346 def stream_out(self):
347 347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 348
349 349 def unbundle(self, bundle, heads, url):
350 350 """apply a bundle on a repo
351 351
352 352 This function handles the repo locking itself."""
353 353 try:
354 354 try:
355 355 bundle = exchange.readbundle(self.ui, bundle, None)
356 356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 357 if util.safehasattr(ret, b'getchunks'):
358 358 # This is a bundle20 object, turn it into an unbundler.
359 359 # This little dance should be dropped eventually when the
360 360 # API is finally improved.
361 361 stream = util.chunkbuffer(ret.getchunks())
362 362 ret = bundle2.getunbundler(self.ui, stream)
363 363 return ret
364 364 except Exception as exc:
365 365 # If the exception contains output salvaged from a bundle2
366 366 # reply, we need to make sure it is printed before continuing
367 367 # to fail. So we build a bundle2 with such output and consume
368 368 # it directly.
369 369 #
370 370 # This is not very elegant but allows a "simple" solution for
371 371 # issue4594
372 372 output = getattr(exc, '_bundle2salvagedoutput', ())
373 373 if output:
374 374 bundler = bundle2.bundle20(self._repo.ui)
375 375 for out in output:
376 376 bundler.addpart(out)
377 377 stream = util.chunkbuffer(bundler.getchunks())
378 378 b = bundle2.getunbundler(self.ui, stream)
379 379 bundle2.processbundle(self._repo, b)
380 380 raise
381 381 except error.PushRaced as exc:
382 382 raise error.ResponseError(
383 383 _(b'push failed:'), stringutil.forcebytestr(exc)
384 384 )
385 385
386 386 # End of _basewirecommands interface.
387 387
388 388 # Begin of peer interface.
389 389
390 390 def commandexecutor(self):
391 391 return localcommandexecutor(self)
392 392
393 393 # End of peer interface.
394 394
395 395
396 396 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 397 class locallegacypeer(localpeer):
398 398 '''peer extension which implements legacy methods too; used for tests with
399 399 restricted capabilities'''
400 400
401 401 def __init__(self, repo):
402 402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 403
404 404 # Begin of baselegacywirecommands interface.
405 405
406 406 def between(self, pairs):
407 407 return self._repo.between(pairs)
408 408
409 409 def branches(self, nodes):
410 410 return self._repo.branches(nodes)
411 411
412 412 def changegroup(self, nodes, source):
413 413 outgoing = discovery.outgoing(
414 414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 415 )
416 416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 417
418 418 def changegroupsubset(self, bases, heads, source):
419 419 outgoing = discovery.outgoing(
420 420 self._repo, missingroots=bases, missingheads=heads
421 421 )
422 422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 423
424 424 # End of baselegacywirecommands interface.
425 425
426 426
427 427 # Increment the sub-version when the revlog v2 format changes to lock out old
428 428 # clients.
429 429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 430
431 431 # A repository with the sparserevlog feature will have delta chains that
432 432 # can spread over a larger span. Sparse reading cuts these large spans into
433 433 # pieces, so that each piece isn't too big.
434 434 # Without the sparserevlog capability, reading from the repository could use
435 435 # huge amounts of memory, because the whole span would be read at once,
436 436 # including all the intermediate revisions that aren't pertinent for the chain.
437 437 # This is why once a repository has enabled sparse-read, it becomes required.
438 438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 439
440 440 # A repository with the sidedataflag requirement will allow to store extra
441 441 # information for revision without altering their original hashes.
442 442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 443
444 444 # A repository with the the copies-sidedata-changeset requirement will store
445 445 # copies related information in changeset's sidedata.
446 446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 447
448 448 # The repository use persistent nodemap for the changelog and the manifest.
449 449 NODEMAP_REQUIREMENT = b'persistent-nodemap'
450 450
451 451 # Functions receiving (ui, features) that extensions can register to impact
452 452 # the ability to load repositories with custom requirements. Only
453 453 # functions defined in loaded extensions are called.
454 454 #
455 455 # The function receives a set of requirement strings that the repository
456 456 # is capable of opening. Functions will typically add elements to the
457 457 # set to reflect that the extension knows how to handle that requirements.
458 458 featuresetupfuncs = set()
459 459
460 460
461 461 def makelocalrepository(baseui, path, intents=None):
462 462 """Create a local repository object.
463 463
464 464 Given arguments needed to construct a local repository, this function
465 465 performs various early repository loading functionality (such as
466 466 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
467 467 the repository can be opened, derives a type suitable for representing
468 468 that repository, and returns an instance of it.
469 469
470 470 The returned object conforms to the ``repository.completelocalrepository``
471 471 interface.
472 472
473 473 The repository type is derived by calling a series of factory functions
474 474 for each aspect/interface of the final repository. These are defined by
475 475 ``REPO_INTERFACES``.
476 476
477 477 Each factory function is called to produce a type implementing a specific
478 478 interface. The cumulative list of returned types will be combined into a
479 479 new type and that type will be instantiated to represent the local
480 480 repository.
481 481
482 482 The factory functions each receive various state that may be consulted
483 483 as part of deriving a type.
484 484
485 485 Extensions should wrap these factory functions to customize repository type
486 486 creation. Note that an extension's wrapped function may be called even if
487 487 that extension is not loaded for the repo being constructed. Extensions
488 488 should check if their ``__name__`` appears in the
489 489 ``extensionmodulenames`` set passed to the factory function and no-op if
490 490 not.
491 491 """
492 492 ui = baseui.copy()
493 493 # Prevent copying repo configuration.
494 494 ui.copy = baseui.copy
495 495
496 496 # Working directory VFS rooted at repository root.
497 497 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
498 498
499 499 # Main VFS for .hg/ directory.
500 500 hgpath = wdirvfs.join(b'.hg')
501 501 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
502 502
503 503 # The .hg/ path should exist and should be a directory. All other
504 504 # cases are errors.
505 505 if not hgvfs.isdir():
506 506 try:
507 507 hgvfs.stat()
508 508 except OSError as e:
509 509 if e.errno != errno.ENOENT:
510 510 raise
511 511
512 512 raise error.RepoError(_(b'repository %s not found') % path)
513 513
514 514 # .hg/requires file contains a newline-delimited list of
515 515 # features/capabilities the opener (us) must have in order to use
516 516 # the repository. This file was introduced in Mercurial 0.9.2,
517 517 # which means very old repositories may not have one. We assume
518 518 # a missing file translates to no requirements.
519 519 try:
520 520 requirements = set(hgvfs.read(b'requires').splitlines())
521 521 except IOError as e:
522 522 if e.errno != errno.ENOENT:
523 523 raise
524 524 requirements = set()
525 525
526 526 # The .hg/hgrc file may load extensions or contain config options
527 527 # that influence repository construction. Attempt to load it and
528 528 # process any new extensions that it may have pulled in.
529 529 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
530 530 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
531 531 extensions.loadall(ui)
532 532 extensions.populateui(ui)
533 533
534 534 # Set of module names of extensions loaded for this repository.
535 535 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
536 536
537 537 supportedrequirements = gathersupportedrequirements(ui)
538 538
539 539 # We first validate the requirements are known.
540 540 ensurerequirementsrecognized(requirements, supportedrequirements)
541 541
542 542 # Then we validate that the known set is reasonable to use together.
543 543 ensurerequirementscompatible(ui, requirements)
544 544
545 545 # TODO there are unhandled edge cases related to opening repositories with
546 546 # shared storage. If storage is shared, we should also test for requirements
547 547 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
548 548 # that repo, as that repo may load extensions needed to open it. This is a
549 549 # bit complicated because we don't want the other hgrc to overwrite settings
550 550 # in this hgrc.
551 551 #
552 552 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
553 553 # file when sharing repos. But if a requirement is added after the share is
554 554 # performed, thereby introducing a new requirement for the opener, we may
555 555 # will not see that and could encounter a run-time error interacting with
556 556 # that shared store since it has an unknown-to-us requirement.
557 557
558 558 # At this point, we know we should be capable of opening the repository.
559 559 # Now get on with doing that.
560 560
561 561 features = set()
562 562
563 563 # The "store" part of the repository holds versioned data. How it is
564 564 # accessed is determined by various requirements. The ``shared`` or
565 565 # ``relshared`` requirements indicate the store lives in the path contained
566 566 # in the ``.hg/sharedpath`` file. This is an absolute path for
567 567 # ``shared`` and relative to ``.hg/`` for ``relshared``.
568 568 if b'shared' in requirements or b'relshared' in requirements:
569 569 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
570 570 if b'relshared' in requirements:
571 571 sharedpath = hgvfs.join(sharedpath)
572 572
573 573 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
574 574
575 575 if not sharedvfs.exists():
576 576 raise error.RepoError(
577 577 _(b'.hg/sharedpath points to nonexistent directory %s')
578 578 % sharedvfs.base
579 579 )
580 580
581 581 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
582 582
583 583 storebasepath = sharedvfs.base
584 584 cachepath = sharedvfs.join(b'cache')
585 585 else:
586 586 storebasepath = hgvfs.base
587 587 cachepath = hgvfs.join(b'cache')
588 588 wcachepath = hgvfs.join(b'wcache')
589 589
590 590 # The store has changed over time and the exact layout is dictated by
591 591 # requirements. The store interface abstracts differences across all
592 592 # of them.
593 593 store = makestore(
594 594 requirements,
595 595 storebasepath,
596 596 lambda base: vfsmod.vfs(base, cacheaudited=True),
597 597 )
598 598 hgvfs.createmode = store.createmode
599 599
600 600 storevfs = store.vfs
601 601 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
602 602
603 603 # The cache vfs is used to manage cache files.
604 604 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
605 605 cachevfs.createmode = store.createmode
606 606 # The cache vfs is used to manage cache files related to the working copy
607 607 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
608 608 wcachevfs.createmode = store.createmode
609 609
610 610 # Now resolve the type for the repository object. We do this by repeatedly
611 611 # calling a factory function to produces types for specific aspects of the
612 612 # repo's operation. The aggregate returned types are used as base classes
613 613 # for a dynamically-derived type, which will represent our new repository.
614 614
615 615 bases = []
616 616 extrastate = {}
617 617
618 618 for iface, fn in REPO_INTERFACES:
619 619 # We pass all potentially useful state to give extensions tons of
620 620 # flexibility.
621 621 typ = fn()(
622 622 ui=ui,
623 623 intents=intents,
624 624 requirements=requirements,
625 625 features=features,
626 626 wdirvfs=wdirvfs,
627 627 hgvfs=hgvfs,
628 628 store=store,
629 629 storevfs=storevfs,
630 630 storeoptions=storevfs.options,
631 631 cachevfs=cachevfs,
632 632 wcachevfs=wcachevfs,
633 633 extensionmodulenames=extensionmodulenames,
634 634 extrastate=extrastate,
635 635 baseclasses=bases,
636 636 )
637 637
638 638 if not isinstance(typ, type):
639 639 raise error.ProgrammingError(
640 640 b'unable to construct type for %s' % iface
641 641 )
642 642
643 643 bases.append(typ)
644 644
645 645 # type() allows you to use characters in type names that wouldn't be
646 646 # recognized as Python symbols in source code. We abuse that to add
647 647 # rich information about our constructed repo.
648 648 name = pycompat.sysstr(
649 649 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
650 650 )
651 651
652 652 cls = type(name, tuple(bases), {})
653 653
654 654 return cls(
655 655 baseui=baseui,
656 656 ui=ui,
657 657 origroot=path,
658 658 wdirvfs=wdirvfs,
659 659 hgvfs=hgvfs,
660 660 requirements=requirements,
661 661 supportedrequirements=supportedrequirements,
662 662 sharedpath=storebasepath,
663 663 store=store,
664 664 cachevfs=cachevfs,
665 665 wcachevfs=wcachevfs,
666 666 features=features,
667 667 intents=intents,
668 668 )
669 669
670 670
671 671 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
672 672 """Load hgrc files/content into a ui instance.
673 673
674 674 This is called during repository opening to load any additional
675 675 config files or settings relevant to the current repository.
676 676
677 677 Returns a bool indicating whether any additional configs were loaded.
678 678
679 679 Extensions should monkeypatch this function to modify how per-repo
680 680 configs are loaded. For example, an extension may wish to pull in
681 681 configs from alternate files or sources.
682 682 """
683 683 if not rcutil.use_repo_hgrc():
684 684 return False
685 685 try:
686 686 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
687 687 return True
688 688 except IOError:
689 689 return False
690 690
691 691
692 692 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
693 693 """Perform additional actions after .hg/hgrc is loaded.
694 694
695 695 This function is called during repository loading immediately after
696 696 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
697 697
698 698 The function can be used to validate configs, automatically add
699 699 options (including extensions) based on requirements, etc.
700 700 """
701 701
702 702 # Map of requirements to list of extensions to load automatically when
703 703 # requirement is present.
704 704 autoextensions = {
705 705 b'git': [b'git'],
706 706 b'largefiles': [b'largefiles'],
707 707 b'lfs': [b'lfs'],
708 708 }
709 709
710 710 for requirement, names in sorted(autoextensions.items()):
711 711 if requirement not in requirements:
712 712 continue
713 713
714 714 for name in names:
715 715 if not ui.hasconfig(b'extensions', name):
716 716 ui.setconfig(b'extensions', name, b'', source=b'autoload')
717 717
718 718
719 719 def gathersupportedrequirements(ui):
720 720 """Determine the complete set of recognized requirements."""
721 721 # Start with all requirements supported by this file.
722 722 supported = set(localrepository._basesupported)
723 723
724 724 # Execute ``featuresetupfuncs`` entries if they belong to an extension
725 725 # relevant to this ui instance.
726 726 modules = {m.__name__ for n, m in extensions.extensions(ui)}
727 727
728 728 for fn in featuresetupfuncs:
729 729 if fn.__module__ in modules:
730 730 fn(ui, supported)
731 731
732 732 # Add derived requirements from registered compression engines.
733 733 for name in util.compengines:
734 734 engine = util.compengines[name]
735 735 if engine.available() and engine.revlogheader():
736 736 supported.add(b'exp-compression-%s' % name)
737 737 if engine.name() == b'zstd':
738 738 supported.add(b'revlog-compression-zstd')
739 739
740 740 return supported
741 741
742 742
743 743 def ensurerequirementsrecognized(requirements, supported):
744 744 """Validate that a set of local requirements is recognized.
745 745
746 746 Receives a set of requirements. Raises an ``error.RepoError`` if there
747 747 exists any requirement in that set that currently loaded code doesn't
748 748 recognize.
749 749
750 750 Returns a set of supported requirements.
751 751 """
752 752 missing = set()
753 753
754 754 for requirement in requirements:
755 755 if requirement in supported:
756 756 continue
757 757
758 758 if not requirement or not requirement[0:1].isalnum():
759 759 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
760 760
761 761 missing.add(requirement)
762 762
763 763 if missing:
764 764 raise error.RequirementError(
765 765 _(b'repository requires features unknown to this Mercurial: %s')
766 766 % b' '.join(sorted(missing)),
767 767 hint=_(
768 768 b'see https://mercurial-scm.org/wiki/MissingRequirement '
769 769 b'for more information'
770 770 ),
771 771 )
772 772
773 773
774 774 def ensurerequirementscompatible(ui, requirements):
775 775 """Validates that a set of recognized requirements is mutually compatible.
776 776
777 777 Some requirements may not be compatible with others or require
778 778 config options that aren't enabled. This function is called during
779 779 repository opening to ensure that the set of requirements needed
780 780 to open a repository is sane and compatible with config options.
781 781
782 782 Extensions can monkeypatch this function to perform additional
783 783 checking.
784 784
785 785 ``error.RepoError`` should be raised on failure.
786 786 """
787 787 if b'exp-sparse' in requirements and not sparse.enabled:
788 788 raise error.RepoError(
789 789 _(
790 790 b'repository is using sparse feature but '
791 791 b'sparse is not enabled; enable the '
792 792 b'"sparse" extensions to access'
793 793 )
794 794 )
795 795
796 796
797 797 def makestore(requirements, path, vfstype):
798 798 """Construct a storage object for a repository."""
799 799 if b'store' in requirements:
800 800 if b'fncache' in requirements:
801 801 return storemod.fncachestore(
802 802 path, vfstype, b'dotencode' in requirements
803 803 )
804 804
805 805 return storemod.encodedstore(path, vfstype)
806 806
807 807 return storemod.basicstore(path, vfstype)
808 808
809 809
810 810 def resolvestorevfsoptions(ui, requirements, features):
811 811 """Resolve the options to pass to the store vfs opener.
812 812
813 813 The returned dict is used to influence behavior of the storage layer.
814 814 """
815 815 options = {}
816 816
817 817 if b'treemanifest' in requirements:
818 818 options[b'treemanifest'] = True
819 819
820 820 # experimental config: format.manifestcachesize
821 821 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
822 822 if manifestcachesize is not None:
823 823 options[b'manifestcachesize'] = manifestcachesize
824 824
825 825 # In the absence of another requirement superseding a revlog-related
826 826 # requirement, we have to assume the repo is using revlog version 0.
827 827 # This revlog format is super old and we don't bother trying to parse
828 828 # opener options for it because those options wouldn't do anything
829 829 # meaningful on such old repos.
830 830 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
831 831 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
832 832 else: # explicitly mark repo as using revlogv0
833 833 options[b'revlogv0'] = True
834 834
835 835 if COPIESSDC_REQUIREMENT in requirements:
836 836 options[b'copies-storage'] = b'changeset-sidedata'
837 837 else:
838 838 writecopiesto = ui.config(b'experimental', b'copies.write-to')
839 839 copiesextramode = (b'changeset-only', b'compatibility')
840 840 if writecopiesto in copiesextramode:
841 841 options[b'copies-storage'] = b'extra'
842 842
843 843 return options
844 844
845 845
846 846 def resolverevlogstorevfsoptions(ui, requirements, features):
847 847 """Resolve opener options specific to revlogs."""
848 848
849 849 options = {}
850 850 options[b'flagprocessors'] = {}
851 851
852 852 if b'revlogv1' in requirements:
853 853 options[b'revlogv1'] = True
854 854 if REVLOGV2_REQUIREMENT in requirements:
855 855 options[b'revlogv2'] = True
856 856
857 857 if b'generaldelta' in requirements:
858 858 options[b'generaldelta'] = True
859 859
860 860 # experimental config: format.chunkcachesize
861 861 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
862 862 if chunkcachesize is not None:
863 863 options[b'chunkcachesize'] = chunkcachesize
864 864
865 865 deltabothparents = ui.configbool(
866 866 b'storage', b'revlog.optimize-delta-parent-choice'
867 867 )
868 868 options[b'deltabothparents'] = deltabothparents
869 869
870 870 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
871 871 lazydeltabase = False
872 872 if lazydelta:
873 873 lazydeltabase = ui.configbool(
874 874 b'storage', b'revlog.reuse-external-delta-parent'
875 875 )
876 876 if lazydeltabase is None:
877 877 lazydeltabase = not scmutil.gddeltaconfig(ui)
878 878 options[b'lazydelta'] = lazydelta
879 879 options[b'lazydeltabase'] = lazydeltabase
880 880
881 881 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
882 882 if 0 <= chainspan:
883 883 options[b'maxdeltachainspan'] = chainspan
884 884
885 885 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
886 886 if mmapindexthreshold is not None:
887 887 options[b'mmapindexthreshold'] = mmapindexthreshold
888 888
889 889 withsparseread = ui.configbool(b'experimental', b'sparse-read')
890 890 srdensitythres = float(
891 891 ui.config(b'experimental', b'sparse-read.density-threshold')
892 892 )
893 893 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
894 894 options[b'with-sparse-read'] = withsparseread
895 895 options[b'sparse-read-density-threshold'] = srdensitythres
896 896 options[b'sparse-read-min-gap-size'] = srmingapsize
897 897
898 898 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
899 899 options[b'sparse-revlog'] = sparserevlog
900 900 if sparserevlog:
901 901 options[b'generaldelta'] = True
902 902
903 903 sidedata = SIDEDATA_REQUIREMENT in requirements
904 904 options[b'side-data'] = sidedata
905 905
906 906 maxchainlen = None
907 907 if sparserevlog:
908 908 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
909 909 # experimental config: format.maxchainlen
910 910 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
911 911 if maxchainlen is not None:
912 912 options[b'maxchainlen'] = maxchainlen
913 913
914 914 for r in requirements:
915 915 # we allow multiple compression engine requirement to co-exist because
916 916 # strickly speaking, revlog seems to support mixed compression style.
917 917 #
918 918 # The compression used for new entries will be "the last one"
919 919 prefix = r.startswith
920 920 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
921 921 options[b'compengine'] = r.split(b'-', 2)[2]
922 922
923 923 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
924 924 if options[b'zlib.level'] is not None:
925 925 if not (0 <= options[b'zlib.level'] <= 9):
926 926 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
927 927 raise error.Abort(msg % options[b'zlib.level'])
928 928 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
929 929 if options[b'zstd.level'] is not None:
930 930 if not (0 <= options[b'zstd.level'] <= 22):
931 931 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
932 932 raise error.Abort(msg % options[b'zstd.level'])
933 933
934 934 if repository.NARROW_REQUIREMENT in requirements:
935 935 options[b'enableellipsis'] = True
936 936
937 937 if ui.configbool(b'experimental', b'rust.index'):
938 938 options[b'rust.index'] = True
939 939 if NODEMAP_REQUIREMENT in requirements:
940 940 options[b'persistent-nodemap'] = True
941 941 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
942 942 options[b'persistent-nodemap.mmap'] = True
943 943 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
944 944 options[b'persistent-nodemap.mode'] = epnm
945 945 if ui.configbool(b'devel', b'persistent-nodemap'):
946 946 options[b'devel-force-nodemap'] = True
947 947
948 948 return options
949 949
950 950
951 951 def makemain(**kwargs):
952 952 """Produce a type conforming to ``ilocalrepositorymain``."""
953 953 return localrepository
954 954
955 955
956 956 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
957 957 class revlogfilestorage(object):
958 958 """File storage when using revlogs."""
959 959
960 960 def file(self, path):
961 961 if path[0] == b'/':
962 962 path = path[1:]
963 963
964 964 return filelog.filelog(self.svfs, path)
965 965
966 966
967 967 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
968 968 class revlognarrowfilestorage(object):
969 969 """File storage when using revlogs and narrow files."""
970 970
971 971 def file(self, path):
972 972 if path[0] == b'/':
973 973 path = path[1:]
974 974
975 975 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
976 976
977 977
978 978 def makefilestorage(requirements, features, **kwargs):
979 979 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
980 980 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
981 981 features.add(repository.REPO_FEATURE_STREAM_CLONE)
982 982
983 983 if repository.NARROW_REQUIREMENT in requirements:
984 984 return revlognarrowfilestorage
985 985 else:
986 986 return revlogfilestorage
987 987
988 988
989 989 # List of repository interfaces and factory functions for them. Each
990 990 # will be called in order during ``makelocalrepository()`` to iteratively
991 991 # derive the final type for a local repository instance. We capture the
992 992 # function as a lambda so we don't hold a reference and the module-level
993 993 # functions can be wrapped.
994 994 REPO_INTERFACES = [
995 995 (repository.ilocalrepositorymain, lambda: makemain),
996 996 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
997 997 ]
998 998
999 999
1000 1000 @interfaceutil.implementer(repository.ilocalrepositorymain)
1001 1001 class localrepository(object):
1002 1002 """Main class for representing local repositories.
1003 1003
1004 1004 All local repositories are instances of this class.
1005 1005
1006 1006 Constructed on its own, instances of this class are not usable as
1007 1007 repository objects. To obtain a usable repository object, call
1008 1008 ``hg.repository()``, ``localrepo.instance()``, or
1009 1009 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1010 1010 ``instance()`` adds support for creating new repositories.
1011 1011 ``hg.repository()`` adds more extension integration, including calling
1012 1012 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1013 1013 used.
1014 1014 """
1015 1015
1016 1016 # obsolete experimental requirements:
1017 1017 # - manifestv2: An experimental new manifest format that allowed
1018 1018 # for stem compression of long paths. Experiment ended up not
1019 1019 # being successful (repository sizes went up due to worse delta
1020 1020 # chains), and the code was deleted in 4.6.
1021 1021 supportedformats = {
1022 1022 b'revlogv1',
1023 1023 b'generaldelta',
1024 1024 b'treemanifest',
1025 1025 COPIESSDC_REQUIREMENT,
1026 1026 REVLOGV2_REQUIREMENT,
1027 1027 SIDEDATA_REQUIREMENT,
1028 1028 SPARSEREVLOG_REQUIREMENT,
1029 1029 NODEMAP_REQUIREMENT,
1030 1030 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1031 1031 }
1032 1032 _basesupported = supportedformats | {
1033 1033 b'store',
1034 1034 b'fncache',
1035 1035 b'shared',
1036 1036 b'relshared',
1037 1037 b'dotencode',
1038 1038 b'exp-sparse',
1039 1039 b'internal-phase',
1040 1040 }
1041 1041
1042 1042 # list of prefix for file which can be written without 'wlock'
1043 1043 # Extensions should extend this list when needed
1044 1044 _wlockfreeprefix = {
1045 1045 # We migh consider requiring 'wlock' for the next
1046 1046 # two, but pretty much all the existing code assume
1047 1047 # wlock is not needed so we keep them excluded for
1048 1048 # now.
1049 1049 b'hgrc',
1050 1050 b'requires',
1051 1051 # XXX cache is a complicatged business someone
1052 1052 # should investigate this in depth at some point
1053 1053 b'cache/',
1054 1054 # XXX shouldn't be dirstate covered by the wlock?
1055 1055 b'dirstate',
1056 1056 # XXX bisect was still a bit too messy at the time
1057 1057 # this changeset was introduced. Someone should fix
1058 1058 # the remainig bit and drop this line
1059 1059 b'bisect.state',
1060 1060 }
1061 1061
1062 1062 def __init__(
1063 1063 self,
1064 1064 baseui,
1065 1065 ui,
1066 1066 origroot,
1067 1067 wdirvfs,
1068 1068 hgvfs,
1069 1069 requirements,
1070 1070 supportedrequirements,
1071 1071 sharedpath,
1072 1072 store,
1073 1073 cachevfs,
1074 1074 wcachevfs,
1075 1075 features,
1076 1076 intents=None,
1077 1077 ):
1078 1078 """Create a new local repository instance.
1079 1079
1080 1080 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1081 1081 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1082 1082 object.
1083 1083
1084 1084 Arguments:
1085 1085
1086 1086 baseui
1087 1087 ``ui.ui`` instance that ``ui`` argument was based off of.
1088 1088
1089 1089 ui
1090 1090 ``ui.ui`` instance for use by the repository.
1091 1091
1092 1092 origroot
1093 1093 ``bytes`` path to working directory root of this repository.
1094 1094
1095 1095 wdirvfs
1096 1096 ``vfs.vfs`` rooted at the working directory.
1097 1097
1098 1098 hgvfs
1099 1099 ``vfs.vfs`` rooted at .hg/
1100 1100
1101 1101 requirements
1102 1102 ``set`` of bytestrings representing repository opening requirements.
1103 1103
1104 1104 supportedrequirements
1105 1105 ``set`` of bytestrings representing repository requirements that we
1106 1106 know how to open. May be a supetset of ``requirements``.
1107 1107
1108 1108 sharedpath
1109 1109 ``bytes`` Defining path to storage base directory. Points to a
1110 1110 ``.hg/`` directory somewhere.
1111 1111
1112 1112 store
1113 1113 ``store.basicstore`` (or derived) instance providing access to
1114 1114 versioned storage.
1115 1115
1116 1116 cachevfs
1117 1117 ``vfs.vfs`` used for cache files.
1118 1118
1119 1119 wcachevfs
1120 1120 ``vfs.vfs`` used for cache files related to the working copy.
1121 1121
1122 1122 features
1123 1123 ``set`` of bytestrings defining features/capabilities of this
1124 1124 instance.
1125 1125
1126 1126 intents
1127 1127 ``set`` of system strings indicating what this repo will be used
1128 1128 for.
1129 1129 """
1130 1130 self.baseui = baseui
1131 1131 self.ui = ui
1132 1132 self.origroot = origroot
1133 1133 # vfs rooted at working directory.
1134 1134 self.wvfs = wdirvfs
1135 1135 self.root = wdirvfs.base
1136 1136 # vfs rooted at .hg/. Used to access most non-store paths.
1137 1137 self.vfs = hgvfs
1138 1138 self.path = hgvfs.base
1139 1139 self.requirements = requirements
1140 1140 self.supported = supportedrequirements
1141 1141 self.sharedpath = sharedpath
1142 1142 self.store = store
1143 1143 self.cachevfs = cachevfs
1144 1144 self.wcachevfs = wcachevfs
1145 1145 self.features = features
1146 1146
1147 1147 self.filtername = None
1148 1148
1149 1149 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1150 1150 b'devel', b'check-locks'
1151 1151 ):
1152 1152 self.vfs.audit = self._getvfsward(self.vfs.audit)
1153 1153 # A list of callback to shape the phase if no data were found.
1154 1154 # Callback are in the form: func(repo, roots) --> processed root.
1155 1155 # This list it to be filled by extension during repo setup
1156 1156 self._phasedefaults = []
1157 1157
1158 1158 color.setup(self.ui)
1159 1159
1160 1160 self.spath = self.store.path
1161 1161 self.svfs = self.store.vfs
1162 1162 self.sjoin = self.store.join
1163 1163 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1164 1164 b'devel', b'check-locks'
1165 1165 ):
1166 1166 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1167 1167 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1168 1168 else: # standard vfs
1169 1169 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1170 1170
1171 1171 self._dirstatevalidatewarned = False
1172 1172
1173 1173 self._branchcaches = branchmap.BranchMapCache()
1174 1174 self._revbranchcache = None
1175 1175 self._filterpats = {}
1176 1176 self._datafilters = {}
1177 1177 self._transref = self._lockref = self._wlockref = None
1178 1178
1179 1179 # A cache for various files under .hg/ that tracks file changes,
1180 1180 # (used by the filecache decorator)
1181 1181 #
1182 1182 # Maps a property name to its util.filecacheentry
1183 1183 self._filecache = {}
1184 1184
1185 1185 # hold sets of revision to be filtered
1186 1186 # should be cleared when something might have changed the filter value:
1187 1187 # - new changesets,
1188 1188 # - phase change,
1189 1189 # - new obsolescence marker,
1190 1190 # - working directory parent change,
1191 1191 # - bookmark changes
1192 1192 self.filteredrevcache = {}
1193 1193
1194 1194 # post-dirstate-status hooks
1195 1195 self._postdsstatus = []
1196 1196
1197 1197 # generic mapping between names and nodes
1198 1198 self.names = namespaces.namespaces()
1199 1199
1200 1200 # Key to signature value.
1201 1201 self._sparsesignaturecache = {}
1202 1202 # Signature to cached matcher instance.
1203 1203 self._sparsematchercache = {}
1204 1204
1205 1205 self._extrafilterid = repoview.extrafilter(ui)
1206 1206
1207 1207 self.filecopiesmode = None
1208 1208 if COPIESSDC_REQUIREMENT in self.requirements:
1209 1209 self.filecopiesmode = b'changeset-sidedata'
1210 1210
1211 1211 def _getvfsward(self, origfunc):
1212 1212 """build a ward for self.vfs"""
1213 1213 rref = weakref.ref(self)
1214 1214
1215 1215 def checkvfs(path, mode=None):
1216 1216 ret = origfunc(path, mode=mode)
1217 1217 repo = rref()
1218 1218 if (
1219 1219 repo is None
1220 1220 or not util.safehasattr(repo, b'_wlockref')
1221 1221 or not util.safehasattr(repo, b'_lockref')
1222 1222 ):
1223 1223 return
1224 1224 if mode in (None, b'r', b'rb'):
1225 1225 return
1226 1226 if path.startswith(repo.path):
1227 1227 # truncate name relative to the repository (.hg)
1228 1228 path = path[len(repo.path) + 1 :]
1229 1229 if path.startswith(b'cache/'):
1230 1230 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1231 1231 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1232 1232 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1233 1233 # journal is covered by 'lock'
1234 1234 if repo._currentlock(repo._lockref) is None:
1235 1235 repo.ui.develwarn(
1236 1236 b'write with no lock: "%s"' % path,
1237 1237 stacklevel=3,
1238 1238 config=b'check-locks',
1239 1239 )
1240 1240 elif repo._currentlock(repo._wlockref) is None:
1241 1241 # rest of vfs files are covered by 'wlock'
1242 1242 #
1243 1243 # exclude special files
1244 1244 for prefix in self._wlockfreeprefix:
1245 1245 if path.startswith(prefix):
1246 1246 return
1247 1247 repo.ui.develwarn(
1248 1248 b'write with no wlock: "%s"' % path,
1249 1249 stacklevel=3,
1250 1250 config=b'check-locks',
1251 1251 )
1252 1252 return ret
1253 1253
1254 1254 return checkvfs
1255 1255
1256 1256 def _getsvfsward(self, origfunc):
1257 1257 """build a ward for self.svfs"""
1258 1258 rref = weakref.ref(self)
1259 1259
1260 1260 def checksvfs(path, mode=None):
1261 1261 ret = origfunc(path, mode=mode)
1262 1262 repo = rref()
1263 1263 if repo is None or not util.safehasattr(repo, b'_lockref'):
1264 1264 return
1265 1265 if mode in (None, b'r', b'rb'):
1266 1266 return
1267 1267 if path.startswith(repo.sharedpath):
1268 1268 # truncate name relative to the repository (.hg)
1269 1269 path = path[len(repo.sharedpath) + 1 :]
1270 1270 if repo._currentlock(repo._lockref) is None:
1271 1271 repo.ui.develwarn(
1272 1272 b'write with no lock: "%s"' % path, stacklevel=4
1273 1273 )
1274 1274 return ret
1275 1275
1276 1276 return checksvfs
1277 1277
1278 1278 def close(self):
1279 1279 self._writecaches()
1280 1280
1281 1281 def _writecaches(self):
1282 1282 if self._revbranchcache:
1283 1283 self._revbranchcache.write()
1284 1284
1285 1285 def _restrictcapabilities(self, caps):
1286 1286 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1287 1287 caps = set(caps)
1288 1288 capsblob = bundle2.encodecaps(
1289 1289 bundle2.getrepocaps(self, role=b'client')
1290 1290 )
1291 1291 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1292 1292 return caps
1293 1293
1294 1294 def _writerequirements(self):
1295 1295 scmutil.writerequires(self.vfs, self.requirements)
1296 1296
1297 1297 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1298 1298 # self -> auditor -> self._checknested -> self
1299 1299
1300 1300 @property
1301 1301 def auditor(self):
1302 1302 # This is only used by context.workingctx.match in order to
1303 1303 # detect files in subrepos.
1304 1304 return pathutil.pathauditor(self.root, callback=self._checknested)
1305 1305
1306 1306 @property
1307 1307 def nofsauditor(self):
1308 1308 # This is only used by context.basectx.match in order to detect
1309 1309 # files in subrepos.
1310 1310 return pathutil.pathauditor(
1311 1311 self.root, callback=self._checknested, realfs=False, cached=True
1312 1312 )
1313 1313
1314 1314 def _checknested(self, path):
1315 1315 """Determine if path is a legal nested repository."""
1316 1316 if not path.startswith(self.root):
1317 1317 return False
1318 1318 subpath = path[len(self.root) + 1 :]
1319 1319 normsubpath = util.pconvert(subpath)
1320 1320
1321 1321 # XXX: Checking against the current working copy is wrong in
1322 1322 # the sense that it can reject things like
1323 1323 #
1324 1324 # $ hg cat -r 10 sub/x.txt
1325 1325 #
1326 1326 # if sub/ is no longer a subrepository in the working copy
1327 1327 # parent revision.
1328 1328 #
1329 1329 # However, it can of course also allow things that would have
1330 1330 # been rejected before, such as the above cat command if sub/
1331 1331 # is a subrepository now, but was a normal directory before.
1332 1332 # The old path auditor would have rejected by mistake since it
1333 1333 # panics when it sees sub/.hg/.
1334 1334 #
1335 1335 # All in all, checking against the working copy seems sensible
1336 1336 # since we want to prevent access to nested repositories on
1337 1337 # the filesystem *now*.
1338 1338 ctx = self[None]
1339 1339 parts = util.splitpath(subpath)
1340 1340 while parts:
1341 1341 prefix = b'/'.join(parts)
1342 1342 if prefix in ctx.substate:
1343 1343 if prefix == normsubpath:
1344 1344 return True
1345 1345 else:
1346 1346 sub = ctx.sub(prefix)
1347 1347 return sub.checknested(subpath[len(prefix) + 1 :])
1348 1348 else:
1349 1349 parts.pop()
1350 1350 return False
1351 1351
1352 1352 def peer(self):
1353 1353 return localpeer(self) # not cached to avoid reference cycle
1354 1354
1355 1355 def unfiltered(self):
1356 1356 """Return unfiltered version of the repository
1357 1357
1358 1358 Intended to be overwritten by filtered repo."""
1359 1359 return self
1360 1360
1361 1361 def filtered(self, name, visibilityexceptions=None):
1362 1362 """Return a filtered version of a repository
1363 1363
1364 1364 The `name` parameter is the identifier of the requested view. This
1365 1365 will return a repoview object set "exactly" to the specified view.
1366 1366
1367 1367 This function does not apply recursive filtering to a repository. For
1368 1368 example calling `repo.filtered("served")` will return a repoview using
1369 1369 the "served" view, regardless of the initial view used by `repo`.
1370 1370
1371 1371 In other word, there is always only one level of `repoview` "filtering".
1372 1372 """
1373 1373 if self._extrafilterid is not None and b'%' not in name:
1374 1374 name = name + b'%' + self._extrafilterid
1375 1375
1376 1376 cls = repoview.newtype(self.unfiltered().__class__)
1377 1377 return cls(self, name, visibilityexceptions)
1378 1378
1379 1379 @mixedrepostorecache(
1380 1380 (b'bookmarks', b'plain'),
1381 1381 (b'bookmarks.current', b'plain'),
1382 1382 (b'bookmarks', b''),
1383 1383 (b'00changelog.i', b''),
1384 1384 )
1385 1385 def _bookmarks(self):
1386 1386 # Since the multiple files involved in the transaction cannot be
1387 1387 # written atomically (with current repository format), there is a race
1388 1388 # condition here.
1389 1389 #
1390 1390 # 1) changelog content A is read
1391 1391 # 2) outside transaction update changelog to content B
1392 1392 # 3) outside transaction update bookmark file referring to content B
1393 1393 # 4) bookmarks file content is read and filtered against changelog-A
1394 1394 #
1395 1395 # When this happens, bookmarks against nodes missing from A are dropped.
1396 1396 #
1397 1397 # Having this happening during read is not great, but it become worse
1398 1398 # when this happen during write because the bookmarks to the "unknown"
1399 1399 # nodes will be dropped for good. However, writes happen within locks.
1400 1400 # This locking makes it possible to have a race free consistent read.
1401 1401 # For this purpose data read from disc before locking are
1402 1402 # "invalidated" right after the locks are taken. This invalidations are
1403 1403 # "light", the `filecache` mechanism keep the data in memory and will
1404 1404 # reuse them if the underlying files did not changed. Not parsing the
1405 1405 # same data multiple times helps performances.
1406 1406 #
1407 1407 # Unfortunately in the case describe above, the files tracked by the
1408 1408 # bookmarks file cache might not have changed, but the in-memory
1409 1409 # content is still "wrong" because we used an older changelog content
1410 1410 # to process the on-disk data. So after locking, the changelog would be
1411 1411 # refreshed but `_bookmarks` would be preserved.
1412 1412 # Adding `00changelog.i` to the list of tracked file is not
1413 1413 # enough, because at the time we build the content for `_bookmarks` in
1414 1414 # (4), the changelog file has already diverged from the content used
1415 1415 # for loading `changelog` in (1)
1416 1416 #
1417 1417 # To prevent the issue, we force the changelog to be explicitly
1418 1418 # reloaded while computing `_bookmarks`. The data race can still happen
1419 1419 # without the lock (with a narrower window), but it would no longer go
1420 1420 # undetected during the lock time refresh.
1421 1421 #
1422 1422 # The new schedule is as follow
1423 1423 #
1424 1424 # 1) filecache logic detect that `_bookmarks` needs to be computed
1425 1425 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1426 1426 # 3) We force `changelog` filecache to be tested
1427 1427 # 4) cachestat for `changelog` are captured (for changelog)
1428 1428 # 5) `_bookmarks` is computed and cached
1429 1429 #
1430 1430 # The step in (3) ensure we have a changelog at least as recent as the
1431 1431 # cache stat computed in (1). As a result at locking time:
1432 1432 # * if the changelog did not changed since (1) -> we can reuse the data
1433 1433 # * otherwise -> the bookmarks get refreshed.
1434 1434 self._refreshchangelog()
1435 1435 return bookmarks.bmstore(self)
1436 1436
1437 1437 def _refreshchangelog(self):
1438 1438 """make sure the in memory changelog match the on-disk one"""
1439 1439 if 'changelog' in vars(self) and self.currenttransaction() is None:
1440 1440 del self.changelog
1441 1441
1442 1442 @property
1443 1443 def _activebookmark(self):
1444 1444 return self._bookmarks.active
1445 1445
1446 1446 # _phasesets depend on changelog. what we need is to call
1447 1447 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1448 1448 # can't be easily expressed in filecache mechanism.
1449 1449 @storecache(b'phaseroots', b'00changelog.i')
1450 1450 def _phasecache(self):
1451 1451 return phases.phasecache(self, self._phasedefaults)
1452 1452
1453 1453 @storecache(b'obsstore')
1454 1454 def obsstore(self):
1455 1455 return obsolete.makestore(self.ui, self)
1456 1456
1457 1457 @storecache(b'00changelog.i')
1458 1458 def changelog(self):
1459 1459 return self.store.changelog(txnutil.mayhavepending(self.root))
1460 1460
1461 1461 @storecache(b'00manifest.i')
1462 1462 def manifestlog(self):
1463 1463 return self.store.manifestlog(self, self._storenarrowmatch)
1464 1464
1465 1465 @repofilecache(b'dirstate')
1466 1466 def dirstate(self):
1467 1467 return self._makedirstate()
1468 1468
1469 1469 def _makedirstate(self):
1470 1470 """Extension point for wrapping the dirstate per-repo."""
1471 1471 sparsematchfn = lambda: sparse.matcher(self)
1472 1472
1473 1473 return dirstate.dirstate(
1474 1474 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1475 1475 )
1476 1476
1477 1477 def _dirstatevalidate(self, node):
1478 1478 try:
1479 1479 self.changelog.rev(node)
1480 1480 return node
1481 1481 except error.LookupError:
1482 1482 if not self._dirstatevalidatewarned:
1483 1483 self._dirstatevalidatewarned = True
1484 1484 self.ui.warn(
1485 1485 _(b"warning: ignoring unknown working parent %s!\n")
1486 1486 % short(node)
1487 1487 )
1488 1488 return nullid
1489 1489
1490 1490 @storecache(narrowspec.FILENAME)
1491 1491 def narrowpats(self):
1492 1492 """matcher patterns for this repository's narrowspec
1493 1493
1494 1494 A tuple of (includes, excludes).
1495 1495 """
1496 1496 return narrowspec.load(self)
1497 1497
1498 1498 @storecache(narrowspec.FILENAME)
1499 1499 def _storenarrowmatch(self):
1500 1500 if repository.NARROW_REQUIREMENT not in self.requirements:
1501 1501 return matchmod.always()
1502 1502 include, exclude = self.narrowpats
1503 1503 return narrowspec.match(self.root, include=include, exclude=exclude)
1504 1504
1505 1505 @storecache(narrowspec.FILENAME)
1506 1506 def _narrowmatch(self):
1507 1507 if repository.NARROW_REQUIREMENT not in self.requirements:
1508 1508 return matchmod.always()
1509 1509 narrowspec.checkworkingcopynarrowspec(self)
1510 1510 include, exclude = self.narrowpats
1511 1511 return narrowspec.match(self.root, include=include, exclude=exclude)
1512 1512
1513 1513 def narrowmatch(self, match=None, includeexact=False):
1514 1514 """matcher corresponding the the repo's narrowspec
1515 1515
1516 1516 If `match` is given, then that will be intersected with the narrow
1517 1517 matcher.
1518 1518
1519 1519 If `includeexact` is True, then any exact matches from `match` will
1520 1520 be included even if they're outside the narrowspec.
1521 1521 """
1522 1522 if match:
1523 1523 if includeexact and not self._narrowmatch.always():
1524 1524 # do not exclude explicitly-specified paths so that they can
1525 1525 # be warned later on
1526 1526 em = matchmod.exact(match.files())
1527 1527 nm = matchmod.unionmatcher([self._narrowmatch, em])
1528 1528 return matchmod.intersectmatchers(match, nm)
1529 1529 return matchmod.intersectmatchers(match, self._narrowmatch)
1530 1530 return self._narrowmatch
1531 1531
1532 1532 def setnarrowpats(self, newincludes, newexcludes):
1533 1533 narrowspec.save(self, newincludes, newexcludes)
1534 1534 self.invalidate(clearfilecache=True)
1535 1535
1536 1536 @unfilteredpropertycache
1537 1537 def _quick_access_changeid_null(self):
1538 1538 return {
1539 1539 b'null': (nullrev, nullid),
1540 1540 nullrev: (nullrev, nullid),
1541 1541 nullid: (nullrev, nullid),
1542 1542 }
1543 1543
1544 1544 @unfilteredpropertycache
1545 1545 def _quick_access_changeid_wc(self):
1546 1546 # also fast path access to the working copy parents
1547 1547 # however, only do it for filter that ensure wc is visible.
1548 1548 quick = {}
1549 1549 cl = self.unfiltered().changelog
1550 1550 for node in self.dirstate.parents():
1551 1551 if node == nullid:
1552 1552 continue
1553 1553 rev = cl.index.get_rev(node)
1554 1554 if rev is None:
1555 1555 # unknown working copy parent case:
1556 1556 #
1557 1557 # skip the fast path and let higher code deal with it
1558 1558 continue
1559 1559 pair = (rev, node)
1560 1560 quick[rev] = pair
1561 1561 quick[node] = pair
1562 1562 # also add the parents of the parents
1563 1563 for r in cl.parentrevs(rev):
1564 1564 if r == nullrev:
1565 1565 continue
1566 1566 n = cl.node(r)
1567 1567 pair = (r, n)
1568 1568 quick[r] = pair
1569 1569 quick[n] = pair
1570 1570 p1node = self.dirstate.p1()
1571 1571 if p1node != nullid:
1572 1572 quick[b'.'] = quick[p1node]
1573 1573 return quick
1574 1574
1575 1575 @unfilteredmethod
1576 1576 def _quick_access_changeid_invalidate(self):
1577 1577 if '_quick_access_changeid_wc' in vars(self):
1578 1578 del self.__dict__['_quick_access_changeid_wc']
1579 1579
1580 1580 @property
1581 1581 def _quick_access_changeid(self):
1582 1582 """an helper dictionnary for __getitem__ calls
1583 1583
1584 1584 This contains a list of symbol we can recognise right away without
1585 1585 further processing.
1586 1586 """
1587 1587 mapping = self._quick_access_changeid_null
1588 1588 if self.filtername in repoview.filter_has_wc:
1589 1589 mapping = mapping.copy()
1590 1590 mapping.update(self._quick_access_changeid_wc)
1591 1591 return mapping
1592 1592
1593 1593 def __getitem__(self, changeid):
1594 1594 # dealing with special cases
1595 1595 if changeid is None:
1596 1596 return context.workingctx(self)
1597 1597 if isinstance(changeid, context.basectx):
1598 1598 return changeid
1599 1599
1600 1600 # dealing with multiple revisions
1601 1601 if isinstance(changeid, slice):
1602 1602 # wdirrev isn't contiguous so the slice shouldn't include it
1603 1603 return [
1604 1604 self[i]
1605 1605 for i in pycompat.xrange(*changeid.indices(len(self)))
1606 1606 if i not in self.changelog.filteredrevs
1607 1607 ]
1608 1608
1609 1609 # dealing with some special values
1610 1610 quick_access = self._quick_access_changeid.get(changeid)
1611 1611 if quick_access is not None:
1612 1612 rev, node = quick_access
1613 1613 return context.changectx(self, rev, node, maybe_filtered=False)
1614 1614 if changeid == b'tip':
1615 1615 node = self.changelog.tip()
1616 1616 rev = self.changelog.rev(node)
1617 1617 return context.changectx(self, rev, node)
1618 1618
1619 1619 # dealing with arbitrary values
1620 1620 try:
1621 1621 if isinstance(changeid, int):
1622 1622 node = self.changelog.node(changeid)
1623 1623 rev = changeid
1624 1624 elif changeid == b'.':
1625 1625 # this is a hack to delay/avoid loading obsmarkers
1626 1626 # when we know that '.' won't be hidden
1627 1627 node = self.dirstate.p1()
1628 1628 rev = self.unfiltered().changelog.rev(node)
1629 1629 elif len(changeid) == 20:
1630 1630 try:
1631 1631 node = changeid
1632 1632 rev = self.changelog.rev(changeid)
1633 1633 except error.FilteredLookupError:
1634 1634 changeid = hex(changeid) # for the error message
1635 1635 raise
1636 1636 except LookupError:
1637 1637 # check if it might have come from damaged dirstate
1638 1638 #
1639 1639 # XXX we could avoid the unfiltered if we had a recognizable
1640 1640 # exception for filtered changeset access
1641 1641 if (
1642 1642 self.local()
1643 1643 and changeid in self.unfiltered().dirstate.parents()
1644 1644 ):
1645 1645 msg = _(b"working directory has unknown parent '%s'!")
1646 1646 raise error.Abort(msg % short(changeid))
1647 1647 changeid = hex(changeid) # for the error message
1648 1648 raise
1649 1649
1650 1650 elif len(changeid) == 40:
1651 1651 node = bin(changeid)
1652 1652 rev = self.changelog.rev(node)
1653 1653 else:
1654 1654 raise error.ProgrammingError(
1655 1655 b"unsupported changeid '%s' of type %s"
1656 1656 % (changeid, pycompat.bytestr(type(changeid)))
1657 1657 )
1658 1658
1659 1659 return context.changectx(self, rev, node)
1660 1660
1661 1661 except (error.FilteredIndexError, error.FilteredLookupError):
1662 1662 raise error.FilteredRepoLookupError(
1663 1663 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1664 1664 )
1665 1665 except (IndexError, LookupError):
1666 1666 raise error.RepoLookupError(
1667 1667 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1668 1668 )
1669 1669 except error.WdirUnsupported:
1670 1670 return context.workingctx(self)
1671 1671
1672 1672 def __contains__(self, changeid):
1673 1673 """True if the given changeid exists
1674 1674
1675 1675 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1676 1676 specified.
1677 1677 """
1678 1678 try:
1679 1679 self[changeid]
1680 1680 return True
1681 1681 except error.RepoLookupError:
1682 1682 return False
1683 1683
1684 1684 def __nonzero__(self):
1685 1685 return True
1686 1686
1687 1687 __bool__ = __nonzero__
1688 1688
1689 1689 def __len__(self):
1690 1690 # no need to pay the cost of repoview.changelog
1691 1691 unfi = self.unfiltered()
1692 1692 return len(unfi.changelog)
1693 1693
1694 1694 def __iter__(self):
1695 1695 return iter(self.changelog)
1696 1696
1697 1697 def revs(self, expr, *args):
1698 1698 '''Find revisions matching a revset.
1699 1699
1700 1700 The revset is specified as a string ``expr`` that may contain
1701 1701 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1702 1702
1703 1703 Revset aliases from the configuration are not expanded. To expand
1704 1704 user aliases, consider calling ``scmutil.revrange()`` or
1705 1705 ``repo.anyrevs([expr], user=True)``.
1706 1706
1707 1707 Returns a smartset.abstractsmartset, which is a list-like interface
1708 1708 that contains integer revisions.
1709 1709 '''
1710 1710 tree = revsetlang.spectree(expr, *args)
1711 1711 return revset.makematcher(tree)(self)
1712 1712
1713 1713 def set(self, expr, *args):
1714 1714 '''Find revisions matching a revset and emit changectx instances.
1715 1715
1716 1716 This is a convenience wrapper around ``revs()`` that iterates the
1717 1717 result and is a generator of changectx instances.
1718 1718
1719 1719 Revset aliases from the configuration are not expanded. To expand
1720 1720 user aliases, consider calling ``scmutil.revrange()``.
1721 1721 '''
1722 1722 for r in self.revs(expr, *args):
1723 1723 yield self[r]
1724 1724
1725 1725 def anyrevs(self, specs, user=False, localalias=None):
1726 1726 '''Find revisions matching one of the given revsets.
1727 1727
1728 1728 Revset aliases from the configuration are not expanded by default. To
1729 1729 expand user aliases, specify ``user=True``. To provide some local
1730 1730 definitions overriding user aliases, set ``localalias`` to
1731 1731 ``{name: definitionstring}``.
1732 1732 '''
1733 1733 if specs == [b'null']:
1734 1734 return revset.baseset([nullrev])
1735 1735 if specs == [b'.']:
1736 1736 quick_data = self._quick_access_changeid.get(b'.')
1737 1737 if quick_data is not None:
1738 1738 return revset.baseset([quick_data[0]])
1739 1739 if user:
1740 1740 m = revset.matchany(
1741 1741 self.ui,
1742 1742 specs,
1743 1743 lookup=revset.lookupfn(self),
1744 1744 localalias=localalias,
1745 1745 )
1746 1746 else:
1747 1747 m = revset.matchany(None, specs, localalias=localalias)
1748 1748 return m(self)
1749 1749
1750 1750 def url(self):
1751 1751 return b'file:' + self.root
1752 1752
1753 1753 def hook(self, name, throw=False, **args):
1754 1754 """Call a hook, passing this repo instance.
1755 1755
1756 1756 This a convenience method to aid invoking hooks. Extensions likely
1757 1757 won't call this unless they have registered a custom hook or are
1758 1758 replacing code that is expected to call a hook.
1759 1759 """
1760 1760 return hook.hook(self.ui, self, name, throw, **args)
1761 1761
1762 1762 @filteredpropertycache
1763 1763 def _tagscache(self):
1764 1764 '''Returns a tagscache object that contains various tags related
1765 1765 caches.'''
1766 1766
1767 1767 # This simplifies its cache management by having one decorated
1768 1768 # function (this one) and the rest simply fetch things from it.
1769 1769 class tagscache(object):
1770 1770 def __init__(self):
1771 1771 # These two define the set of tags for this repository. tags
1772 1772 # maps tag name to node; tagtypes maps tag name to 'global' or
1773 1773 # 'local'. (Global tags are defined by .hgtags across all
1774 1774 # heads, and local tags are defined in .hg/localtags.)
1775 1775 # They constitute the in-memory cache of tags.
1776 1776 self.tags = self.tagtypes = None
1777 1777
1778 1778 self.nodetagscache = self.tagslist = None
1779 1779
1780 1780 cache = tagscache()
1781 1781 cache.tags, cache.tagtypes = self._findtags()
1782 1782
1783 1783 return cache
1784 1784
1785 1785 def tags(self):
1786 1786 '''return a mapping of tag to node'''
1787 1787 t = {}
1788 1788 if self.changelog.filteredrevs:
1789 1789 tags, tt = self._findtags()
1790 1790 else:
1791 1791 tags = self._tagscache.tags
1792 1792 rev = self.changelog.rev
1793 1793 for k, v in pycompat.iteritems(tags):
1794 1794 try:
1795 1795 # ignore tags to unknown nodes
1796 1796 rev(v)
1797 1797 t[k] = v
1798 1798 except (error.LookupError, ValueError):
1799 1799 pass
1800 1800 return t
1801 1801
1802 1802 def _findtags(self):
1803 1803 '''Do the hard work of finding tags. Return a pair of dicts
1804 1804 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1805 1805 maps tag name to a string like \'global\' or \'local\'.
1806 1806 Subclasses or extensions are free to add their own tags, but
1807 1807 should be aware that the returned dicts will be retained for the
1808 1808 duration of the localrepo object.'''
1809 1809
1810 1810 # XXX what tagtype should subclasses/extensions use? Currently
1811 1811 # mq and bookmarks add tags, but do not set the tagtype at all.
1812 1812 # Should each extension invent its own tag type? Should there
1813 1813 # be one tagtype for all such "virtual" tags? Or is the status
1814 1814 # quo fine?
1815 1815
1816 1816 # map tag name to (node, hist)
1817 1817 alltags = tagsmod.findglobaltags(self.ui, self)
1818 1818 # map tag name to tag type
1819 1819 tagtypes = {tag: b'global' for tag in alltags}
1820 1820
1821 1821 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1822 1822
1823 1823 # Build the return dicts. Have to re-encode tag names because
1824 1824 # the tags module always uses UTF-8 (in order not to lose info
1825 1825 # writing to the cache), but the rest of Mercurial wants them in
1826 1826 # local encoding.
1827 1827 tags = {}
1828 1828 for (name, (node, hist)) in pycompat.iteritems(alltags):
1829 1829 if node != nullid:
1830 1830 tags[encoding.tolocal(name)] = node
1831 1831 tags[b'tip'] = self.changelog.tip()
1832 1832 tagtypes = {
1833 1833 encoding.tolocal(name): value
1834 1834 for (name, value) in pycompat.iteritems(tagtypes)
1835 1835 }
1836 1836 return (tags, tagtypes)
1837 1837
1838 1838 def tagtype(self, tagname):
1839 1839 '''
1840 1840 return the type of the given tag. result can be:
1841 1841
1842 1842 'local' : a local tag
1843 1843 'global' : a global tag
1844 1844 None : tag does not exist
1845 1845 '''
1846 1846
1847 1847 return self._tagscache.tagtypes.get(tagname)
1848 1848
1849 1849 def tagslist(self):
1850 1850 '''return a list of tags ordered by revision'''
1851 1851 if not self._tagscache.tagslist:
1852 1852 l = []
1853 1853 for t, n in pycompat.iteritems(self.tags()):
1854 1854 l.append((self.changelog.rev(n), t, n))
1855 1855 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1856 1856
1857 1857 return self._tagscache.tagslist
1858 1858
1859 1859 def nodetags(self, node):
1860 1860 '''return the tags associated with a node'''
1861 1861 if not self._tagscache.nodetagscache:
1862 1862 nodetagscache = {}
1863 1863 for t, n in pycompat.iteritems(self._tagscache.tags):
1864 1864 nodetagscache.setdefault(n, []).append(t)
1865 1865 for tags in pycompat.itervalues(nodetagscache):
1866 1866 tags.sort()
1867 1867 self._tagscache.nodetagscache = nodetagscache
1868 1868 return self._tagscache.nodetagscache.get(node, [])
1869 1869
1870 1870 def nodebookmarks(self, node):
1871 1871 """return the list of bookmarks pointing to the specified node"""
1872 1872 return self._bookmarks.names(node)
1873 1873
1874 1874 def branchmap(self):
1875 1875 '''returns a dictionary {branch: [branchheads]} with branchheads
1876 1876 ordered by increasing revision number'''
1877 1877 return self._branchcaches[self]
1878 1878
1879 1879 @unfilteredmethod
1880 1880 def revbranchcache(self):
1881 1881 if not self._revbranchcache:
1882 1882 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1883 1883 return self._revbranchcache
1884 1884
1885 1885 def branchtip(self, branch, ignoremissing=False):
1886 1886 '''return the tip node for a given branch
1887 1887
1888 1888 If ignoremissing is True, then this method will not raise an error.
1889 1889 This is helpful for callers that only expect None for a missing branch
1890 1890 (e.g. namespace).
1891 1891
1892 1892 '''
1893 1893 try:
1894 1894 return self.branchmap().branchtip(branch)
1895 1895 except KeyError:
1896 1896 if not ignoremissing:
1897 1897 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1898 1898 else:
1899 1899 pass
1900 1900
1901 1901 def lookup(self, key):
1902 1902 node = scmutil.revsymbol(self, key).node()
1903 1903 if node is None:
1904 1904 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1905 1905 return node
1906 1906
1907 1907 def lookupbranch(self, key):
1908 1908 if self.branchmap().hasbranch(key):
1909 1909 return key
1910 1910
1911 1911 return scmutil.revsymbol(self, key).branch()
1912 1912
1913 1913 def known(self, nodes):
1914 1914 cl = self.changelog
1915 1915 get_rev = cl.index.get_rev
1916 1916 filtered = cl.filteredrevs
1917 1917 result = []
1918 1918 for n in nodes:
1919 1919 r = get_rev(n)
1920 1920 resp = not (r is None or r in filtered)
1921 1921 result.append(resp)
1922 1922 return result
1923 1923
1924 1924 def local(self):
1925 1925 return self
1926 1926
1927 1927 def publishing(self):
1928 1928 # it's safe (and desirable) to trust the publish flag unconditionally
1929 1929 # so that we don't finalize changes shared between users via ssh or nfs
1930 1930 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1931 1931
1932 1932 def cancopy(self):
1933 1933 # so statichttprepo's override of local() works
1934 1934 if not self.local():
1935 1935 return False
1936 1936 if not self.publishing():
1937 1937 return True
1938 1938 # if publishing we can't copy if there is filtered content
1939 1939 return not self.filtered(b'visible').changelog.filteredrevs
1940 1940
1941 1941 def shared(self):
1942 1942 '''the type of shared repository (None if not shared)'''
1943 1943 if self.sharedpath != self.path:
1944 1944 return b'store'
1945 1945 return None
1946 1946
1947 1947 def wjoin(self, f, *insidef):
1948 1948 return self.vfs.reljoin(self.root, f, *insidef)
1949 1949
1950 1950 def setparents(self, p1, p2=nullid):
1951 1951 self[None].setparents(p1, p2)
1952 1952 self._quick_access_changeid_invalidate()
1953 1953
1954 1954 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1955 1955 """changeid must be a changeset revision, if specified.
1956 1956 fileid can be a file revision or node."""
1957 1957 return context.filectx(
1958 1958 self, path, changeid, fileid, changectx=changectx
1959 1959 )
1960 1960
1961 1961 def getcwd(self):
1962 1962 return self.dirstate.getcwd()
1963 1963
1964 1964 def pathto(self, f, cwd=None):
1965 1965 return self.dirstate.pathto(f, cwd)
1966 1966
1967 1967 def _loadfilter(self, filter):
1968 1968 if filter not in self._filterpats:
1969 1969 l = []
1970 1970 for pat, cmd in self.ui.configitems(filter):
1971 1971 if cmd == b'!':
1972 1972 continue
1973 1973 mf = matchmod.match(self.root, b'', [pat])
1974 1974 fn = None
1975 1975 params = cmd
1976 1976 for name, filterfn in pycompat.iteritems(self._datafilters):
1977 1977 if cmd.startswith(name):
1978 1978 fn = filterfn
1979 1979 params = cmd[len(name) :].lstrip()
1980 1980 break
1981 1981 if not fn:
1982 1982 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1983 1983 fn.__name__ = 'commandfilter'
1984 1984 # Wrap old filters not supporting keyword arguments
1985 1985 if not pycompat.getargspec(fn)[2]:
1986 1986 oldfn = fn
1987 1987 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1988 1988 fn.__name__ = 'compat-' + oldfn.__name__
1989 1989 l.append((mf, fn, params))
1990 1990 self._filterpats[filter] = l
1991 1991 return self._filterpats[filter]
1992 1992
1993 1993 def _filter(self, filterpats, filename, data):
1994 1994 for mf, fn, cmd in filterpats:
1995 1995 if mf(filename):
1996 1996 self.ui.debug(
1997 1997 b"filtering %s through %s\n"
1998 1998 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1999 1999 )
2000 2000 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2001 2001 break
2002 2002
2003 2003 return data
2004 2004
2005 2005 @unfilteredpropertycache
2006 2006 def _encodefilterpats(self):
2007 2007 return self._loadfilter(b'encode')
2008 2008
2009 2009 @unfilteredpropertycache
2010 2010 def _decodefilterpats(self):
2011 2011 return self._loadfilter(b'decode')
2012 2012
2013 2013 def adddatafilter(self, name, filter):
2014 2014 self._datafilters[name] = filter
2015 2015
2016 2016 def wread(self, filename):
2017 2017 if self.wvfs.islink(filename):
2018 2018 data = self.wvfs.readlink(filename)
2019 2019 else:
2020 2020 data = self.wvfs.read(filename)
2021 2021 return self._filter(self._encodefilterpats, filename, data)
2022 2022
2023 2023 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2024 2024 """write ``data`` into ``filename`` in the working directory
2025 2025
2026 2026 This returns length of written (maybe decoded) data.
2027 2027 """
2028 2028 data = self._filter(self._decodefilterpats, filename, data)
2029 2029 if b'l' in flags:
2030 2030 self.wvfs.symlink(data, filename)
2031 2031 else:
2032 2032 self.wvfs.write(
2033 2033 filename, data, backgroundclose=backgroundclose, **kwargs
2034 2034 )
2035 2035 if b'x' in flags:
2036 2036 self.wvfs.setflags(filename, False, True)
2037 2037 else:
2038 2038 self.wvfs.setflags(filename, False, False)
2039 2039 return len(data)
2040 2040
2041 2041 def wwritedata(self, filename, data):
2042 2042 return self._filter(self._decodefilterpats, filename, data)
2043 2043
2044 2044 def currenttransaction(self):
2045 2045 """return the current transaction or None if non exists"""
2046 2046 if self._transref:
2047 2047 tr = self._transref()
2048 2048 else:
2049 2049 tr = None
2050 2050
2051 2051 if tr and tr.running():
2052 2052 return tr
2053 2053 return None
2054 2054
2055 2055 def transaction(self, desc, report=None):
2056 2056 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2057 2057 b'devel', b'check-locks'
2058 2058 ):
2059 2059 if self._currentlock(self._lockref) is None:
2060 2060 raise error.ProgrammingError(b'transaction requires locking')
2061 2061 tr = self.currenttransaction()
2062 2062 if tr is not None:
2063 2063 return tr.nest(name=desc)
2064 2064
2065 2065 # abort here if the journal already exists
2066 2066 if self.svfs.exists(b"journal"):
2067 2067 raise error.RepoError(
2068 2068 _(b"abandoned transaction found"),
2069 2069 hint=_(b"run 'hg recover' to clean up transaction"),
2070 2070 )
2071 2071
2072 2072 idbase = b"%.40f#%f" % (random.random(), time.time())
2073 2073 ha = hex(hashutil.sha1(idbase).digest())
2074 2074 txnid = b'TXN:' + ha
2075 2075 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2076 2076
2077 2077 self._writejournal(desc)
2078 2078 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2079 2079 if report:
2080 2080 rp = report
2081 2081 else:
2082 2082 rp = self.ui.warn
2083 2083 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2084 2084 # we must avoid cyclic reference between repo and transaction.
2085 2085 reporef = weakref.ref(self)
2086 2086 # Code to track tag movement
2087 2087 #
2088 2088 # Since tags are all handled as file content, it is actually quite hard
2089 2089 # to track these movement from a code perspective. So we fallback to a
2090 2090 # tracking at the repository level. One could envision to track changes
2091 2091 # to the '.hgtags' file through changegroup apply but that fails to
2092 2092 # cope with case where transaction expose new heads without changegroup
2093 2093 # being involved (eg: phase movement).
2094 2094 #
2095 2095 # For now, We gate the feature behind a flag since this likely comes
2096 2096 # with performance impacts. The current code run more often than needed
2097 2097 # and do not use caches as much as it could. The current focus is on
2098 2098 # the behavior of the feature so we disable it by default. The flag
2099 2099 # will be removed when we are happy with the performance impact.
2100 2100 #
2101 2101 # Once this feature is no longer experimental move the following
2102 2102 # documentation to the appropriate help section:
2103 2103 #
2104 2104 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2105 2105 # tags (new or changed or deleted tags). In addition the details of
2106 2106 # these changes are made available in a file at:
2107 2107 # ``REPOROOT/.hg/changes/tags.changes``.
2108 2108 # Make sure you check for HG_TAG_MOVED before reading that file as it
2109 2109 # might exist from a previous transaction even if no tag were touched
2110 2110 # in this one. Changes are recorded in a line base format::
2111 2111 #
2112 2112 # <action> <hex-node> <tag-name>\n
2113 2113 #
2114 2114 # Actions are defined as follow:
2115 2115 # "-R": tag is removed,
2116 2116 # "+A": tag is added,
2117 2117 # "-M": tag is moved (old value),
2118 2118 # "+M": tag is moved (new value),
2119 2119 tracktags = lambda x: None
2120 2120 # experimental config: experimental.hook-track-tags
2121 2121 shouldtracktags = self.ui.configbool(
2122 2122 b'experimental', b'hook-track-tags'
2123 2123 )
2124 2124 if desc != b'strip' and shouldtracktags:
2125 2125 oldheads = self.changelog.headrevs()
2126 2126
2127 2127 def tracktags(tr2):
2128 2128 repo = reporef()
2129 2129 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2130 2130 newheads = repo.changelog.headrevs()
2131 2131 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2132 2132 # notes: we compare lists here.
2133 2133 # As we do it only once buiding set would not be cheaper
2134 2134 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2135 2135 if changes:
2136 2136 tr2.hookargs[b'tag_moved'] = b'1'
2137 2137 with repo.vfs(
2138 2138 b'changes/tags.changes', b'w', atomictemp=True
2139 2139 ) as changesfile:
2140 2140 # note: we do not register the file to the transaction
2141 2141 # because we needs it to still exist on the transaction
2142 2142 # is close (for txnclose hooks)
2143 2143 tagsmod.writediff(changesfile, changes)
2144 2144
2145 2145 def validate(tr2):
2146 2146 """will run pre-closing hooks"""
2147 2147 # XXX the transaction API is a bit lacking here so we take a hacky
2148 2148 # path for now
2149 2149 #
2150 2150 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2151 2151 # dict is copied before these run. In addition we needs the data
2152 2152 # available to in memory hooks too.
2153 2153 #
2154 2154 # Moreover, we also need to make sure this runs before txnclose
2155 2155 # hooks and there is no "pending" mechanism that would execute
2156 2156 # logic only if hooks are about to run.
2157 2157 #
2158 2158 # Fixing this limitation of the transaction is also needed to track
2159 2159 # other families of changes (bookmarks, phases, obsolescence).
2160 2160 #
2161 2161 # This will have to be fixed before we remove the experimental
2162 2162 # gating.
2163 2163 tracktags(tr2)
2164 2164 repo = reporef()
2165 2165
2166 2166 singleheadopt = (b'experimental', b'single-head-per-branch')
2167 2167 singlehead = repo.ui.configbool(*singleheadopt)
2168 2168 if singlehead:
2169 2169 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2170 2170 accountclosed = singleheadsub.get(
2171 2171 b"account-closed-heads", False
2172 2172 )
2173 2173 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2174 2174 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2175 2175 for name, (old, new) in sorted(
2176 2176 tr.changes[b'bookmarks'].items()
2177 2177 ):
2178 2178 args = tr.hookargs.copy()
2179 2179 args.update(bookmarks.preparehookargs(name, old, new))
2180 2180 repo.hook(
2181 2181 b'pretxnclose-bookmark',
2182 2182 throw=True,
2183 2183 **pycompat.strkwargs(args)
2184 2184 )
2185 2185 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2186 2186 cl = repo.unfiltered().changelog
2187 2187 for revs, (old, new) in tr.changes[b'phases']:
2188 2188 for rev in revs:
2189 2189 args = tr.hookargs.copy()
2190 2190 node = hex(cl.node(rev))
2191 2191 args.update(phases.preparehookargs(node, old, new))
2192 2192 repo.hook(
2193 2193 b'pretxnclose-phase',
2194 2194 throw=True,
2195 2195 **pycompat.strkwargs(args)
2196 2196 )
2197 2197
2198 2198 repo.hook(
2199 2199 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2200 2200 )
2201 2201
2202 2202 def releasefn(tr, success):
2203 2203 repo = reporef()
2204 2204 if repo is None:
2205 2205 # If the repo has been GC'd (and this release function is being
2206 2206 # called from transaction.__del__), there's not much we can do,
2207 2207 # so just leave the unfinished transaction there and let the
2208 2208 # user run `hg recover`.
2209 2209 return
2210 2210 if success:
2211 2211 # this should be explicitly invoked here, because
2212 2212 # in-memory changes aren't written out at closing
2213 2213 # transaction, if tr.addfilegenerator (via
2214 2214 # dirstate.write or so) isn't invoked while
2215 2215 # transaction running
2216 2216 repo.dirstate.write(None)
2217 2217 else:
2218 2218 # discard all changes (including ones already written
2219 2219 # out) in this transaction
2220 2220 narrowspec.restorebackup(self, b'journal.narrowspec')
2221 2221 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2222 2222 repo.dirstate.restorebackup(None, b'journal.dirstate')
2223 2223
2224 2224 repo.invalidate(clearfilecache=True)
2225 2225
2226 2226 tr = transaction.transaction(
2227 2227 rp,
2228 2228 self.svfs,
2229 2229 vfsmap,
2230 2230 b"journal",
2231 2231 b"undo",
2232 2232 aftertrans(renames),
2233 2233 self.store.createmode,
2234 2234 validator=validate,
2235 2235 releasefn=releasefn,
2236 2236 checkambigfiles=_cachedfiles,
2237 2237 name=desc,
2238 2238 )
2239 2239 tr.changes[b'origrepolen'] = len(self)
2240 2240 tr.changes[b'obsmarkers'] = set()
2241 2241 tr.changes[b'phases'] = []
2242 2242 tr.changes[b'bookmarks'] = {}
2243 2243
2244 2244 tr.hookargs[b'txnid'] = txnid
2245 2245 tr.hookargs[b'txnname'] = desc
2246 tr.hookargs[b'changes'] = tr.changes
2246 2247 # note: writing the fncache only during finalize mean that the file is
2247 2248 # outdated when running hooks. As fncache is used for streaming clone,
2248 2249 # this is not expected to break anything that happen during the hooks.
2249 2250 tr.addfinalize(b'flush-fncache', self.store.write)
2250 2251
2251 2252 def txnclosehook(tr2):
2252 2253 """To be run if transaction is successful, will schedule a hook run
2253 2254 """
2254 2255 # Don't reference tr2 in hook() so we don't hold a reference.
2255 2256 # This reduces memory consumption when there are multiple
2256 2257 # transactions per lock. This can likely go away if issue5045
2257 2258 # fixes the function accumulation.
2258 2259 hookargs = tr2.hookargs
2259 2260
2260 2261 def hookfunc(unused_success):
2261 2262 repo = reporef()
2262 2263 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2263 2264 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2264 2265 for name, (old, new) in bmchanges:
2265 2266 args = tr.hookargs.copy()
2266 2267 args.update(bookmarks.preparehookargs(name, old, new))
2267 2268 repo.hook(
2268 2269 b'txnclose-bookmark',
2269 2270 throw=False,
2270 2271 **pycompat.strkwargs(args)
2271 2272 )
2272 2273
2273 2274 if hook.hashook(repo.ui, b'txnclose-phase'):
2274 2275 cl = repo.unfiltered().changelog
2275 2276 phasemv = sorted(
2276 2277 tr.changes[b'phases'], key=lambda r: r[0][0]
2277 2278 )
2278 2279 for revs, (old, new) in phasemv:
2279 2280 for rev in revs:
2280 2281 args = tr.hookargs.copy()
2281 2282 node = hex(cl.node(rev))
2282 2283 args.update(phases.preparehookargs(node, old, new))
2283 2284 repo.hook(
2284 2285 b'txnclose-phase',
2285 2286 throw=False,
2286 2287 **pycompat.strkwargs(args)
2287 2288 )
2288 2289
2289 2290 repo.hook(
2290 2291 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2291 2292 )
2292 2293
2293 2294 reporef()._afterlock(hookfunc)
2294 2295
2295 2296 tr.addfinalize(b'txnclose-hook', txnclosehook)
2296 2297 # Include a leading "-" to make it happen before the transaction summary
2297 2298 # reports registered via scmutil.registersummarycallback() whose names
2298 2299 # are 00-txnreport etc. That way, the caches will be warm when the
2299 2300 # callbacks run.
2300 2301 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2301 2302
2302 2303 def txnaborthook(tr2):
2303 2304 """To be run if transaction is aborted
2304 2305 """
2305 2306 reporef().hook(
2306 2307 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2307 2308 )
2308 2309
2309 2310 tr.addabort(b'txnabort-hook', txnaborthook)
2310 2311 # avoid eager cache invalidation. in-memory data should be identical
2311 2312 # to stored data if transaction has no error.
2312 2313 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2313 2314 self._transref = weakref.ref(tr)
2314 2315 scmutil.registersummarycallback(self, tr, desc)
2315 2316 return tr
2316 2317
2317 2318 def _journalfiles(self):
2318 2319 return (
2319 2320 (self.svfs, b'journal'),
2320 2321 (self.svfs, b'journal.narrowspec'),
2321 2322 (self.vfs, b'journal.narrowspec.dirstate'),
2322 2323 (self.vfs, b'journal.dirstate'),
2323 2324 (self.vfs, b'journal.branch'),
2324 2325 (self.vfs, b'journal.desc'),
2325 2326 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2326 2327 (self.svfs, b'journal.phaseroots'),
2327 2328 )
2328 2329
2329 2330 def undofiles(self):
2330 2331 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2331 2332
2332 2333 @unfilteredmethod
2333 2334 def _writejournal(self, desc):
2334 2335 self.dirstate.savebackup(None, b'journal.dirstate')
2335 2336 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2336 2337 narrowspec.savebackup(self, b'journal.narrowspec')
2337 2338 self.vfs.write(
2338 2339 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2339 2340 )
2340 2341 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2341 2342 bookmarksvfs = bookmarks.bookmarksvfs(self)
2342 2343 bookmarksvfs.write(
2343 2344 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2344 2345 )
2345 2346 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2346 2347
2347 2348 def recover(self):
2348 2349 with self.lock():
2349 2350 if self.svfs.exists(b"journal"):
2350 2351 self.ui.status(_(b"rolling back interrupted transaction\n"))
2351 2352 vfsmap = {
2352 2353 b'': self.svfs,
2353 2354 b'plain': self.vfs,
2354 2355 }
2355 2356 transaction.rollback(
2356 2357 self.svfs,
2357 2358 vfsmap,
2358 2359 b"journal",
2359 2360 self.ui.warn,
2360 2361 checkambigfiles=_cachedfiles,
2361 2362 )
2362 2363 self.invalidate()
2363 2364 return True
2364 2365 else:
2365 2366 self.ui.warn(_(b"no interrupted transaction available\n"))
2366 2367 return False
2367 2368
2368 2369 def rollback(self, dryrun=False, force=False):
2369 2370 wlock = lock = dsguard = None
2370 2371 try:
2371 2372 wlock = self.wlock()
2372 2373 lock = self.lock()
2373 2374 if self.svfs.exists(b"undo"):
2374 2375 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2375 2376
2376 2377 return self._rollback(dryrun, force, dsguard)
2377 2378 else:
2378 2379 self.ui.warn(_(b"no rollback information available\n"))
2379 2380 return 1
2380 2381 finally:
2381 2382 release(dsguard, lock, wlock)
2382 2383
2383 2384 @unfilteredmethod # Until we get smarter cache management
2384 2385 def _rollback(self, dryrun, force, dsguard):
2385 2386 ui = self.ui
2386 2387 try:
2387 2388 args = self.vfs.read(b'undo.desc').splitlines()
2388 2389 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2389 2390 if len(args) >= 3:
2390 2391 detail = args[2]
2391 2392 oldtip = oldlen - 1
2392 2393
2393 2394 if detail and ui.verbose:
2394 2395 msg = _(
2395 2396 b'repository tip rolled back to revision %d'
2396 2397 b' (undo %s: %s)\n'
2397 2398 ) % (oldtip, desc, detail)
2398 2399 else:
2399 2400 msg = _(
2400 2401 b'repository tip rolled back to revision %d (undo %s)\n'
2401 2402 ) % (oldtip, desc)
2402 2403 except IOError:
2403 2404 msg = _(b'rolling back unknown transaction\n')
2404 2405 desc = None
2405 2406
2406 2407 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2407 2408 raise error.Abort(
2408 2409 _(
2409 2410 b'rollback of last commit while not checked out '
2410 2411 b'may lose data'
2411 2412 ),
2412 2413 hint=_(b'use -f to force'),
2413 2414 )
2414 2415
2415 2416 ui.status(msg)
2416 2417 if dryrun:
2417 2418 return 0
2418 2419
2419 2420 parents = self.dirstate.parents()
2420 2421 self.destroying()
2421 2422 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2422 2423 transaction.rollback(
2423 2424 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2424 2425 )
2425 2426 bookmarksvfs = bookmarks.bookmarksvfs(self)
2426 2427 if bookmarksvfs.exists(b'undo.bookmarks'):
2427 2428 bookmarksvfs.rename(
2428 2429 b'undo.bookmarks', b'bookmarks', checkambig=True
2429 2430 )
2430 2431 if self.svfs.exists(b'undo.phaseroots'):
2431 2432 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2432 2433 self.invalidate()
2433 2434
2434 2435 has_node = self.changelog.index.has_node
2435 2436 parentgone = any(not has_node(p) for p in parents)
2436 2437 if parentgone:
2437 2438 # prevent dirstateguard from overwriting already restored one
2438 2439 dsguard.close()
2439 2440
2440 2441 narrowspec.restorebackup(self, b'undo.narrowspec')
2441 2442 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2442 2443 self.dirstate.restorebackup(None, b'undo.dirstate')
2443 2444 try:
2444 2445 branch = self.vfs.read(b'undo.branch')
2445 2446 self.dirstate.setbranch(encoding.tolocal(branch))
2446 2447 except IOError:
2447 2448 ui.warn(
2448 2449 _(
2449 2450 b'named branch could not be reset: '
2450 2451 b'current branch is still \'%s\'\n'
2451 2452 )
2452 2453 % self.dirstate.branch()
2453 2454 )
2454 2455
2455 2456 parents = tuple([p.rev() for p in self[None].parents()])
2456 2457 if len(parents) > 1:
2457 2458 ui.status(
2458 2459 _(
2459 2460 b'working directory now based on '
2460 2461 b'revisions %d and %d\n'
2461 2462 )
2462 2463 % parents
2463 2464 )
2464 2465 else:
2465 2466 ui.status(
2466 2467 _(b'working directory now based on revision %d\n') % parents
2467 2468 )
2468 2469 mergemod.mergestate.clean(self, self[b'.'].node())
2469 2470
2470 2471 # TODO: if we know which new heads may result from this rollback, pass
2471 2472 # them to destroy(), which will prevent the branchhead cache from being
2472 2473 # invalidated.
2473 2474 self.destroyed()
2474 2475 return 0
2475 2476
2476 2477 def _buildcacheupdater(self, newtransaction):
2477 2478 """called during transaction to build the callback updating cache
2478 2479
2479 2480 Lives on the repository to help extension who might want to augment
2480 2481 this logic. For this purpose, the created transaction is passed to the
2481 2482 method.
2482 2483 """
2483 2484 # we must avoid cyclic reference between repo and transaction.
2484 2485 reporef = weakref.ref(self)
2485 2486
2486 2487 def updater(tr):
2487 2488 repo = reporef()
2488 2489 repo.updatecaches(tr)
2489 2490
2490 2491 return updater
2491 2492
2492 2493 @unfilteredmethod
2493 2494 def updatecaches(self, tr=None, full=False):
2494 2495 """warm appropriate caches
2495 2496
2496 2497 If this function is called after a transaction closed. The transaction
2497 2498 will be available in the 'tr' argument. This can be used to selectively
2498 2499 update caches relevant to the changes in that transaction.
2499 2500
2500 2501 If 'full' is set, make sure all caches the function knows about have
2501 2502 up-to-date data. Even the ones usually loaded more lazily.
2502 2503 """
2503 2504 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2504 2505 # During strip, many caches are invalid but
2505 2506 # later call to `destroyed` will refresh them.
2506 2507 return
2507 2508
2508 2509 if tr is None or tr.changes[b'origrepolen'] < len(self):
2509 2510 # accessing the 'ser ved' branchmap should refresh all the others,
2510 2511 self.ui.debug(b'updating the branch cache\n')
2511 2512 self.filtered(b'served').branchmap()
2512 2513 self.filtered(b'served.hidden').branchmap()
2513 2514
2514 2515 if full:
2515 2516 unfi = self.unfiltered()
2516 2517
2517 2518 self.changelog.update_caches(transaction=tr)
2518 2519 self.manifestlog.update_caches(transaction=tr)
2519 2520
2520 2521 rbc = unfi.revbranchcache()
2521 2522 for r in unfi.changelog:
2522 2523 rbc.branchinfo(r)
2523 2524 rbc.write()
2524 2525
2525 2526 # ensure the working copy parents are in the manifestfulltextcache
2526 2527 for ctx in self[b'.'].parents():
2527 2528 ctx.manifest() # accessing the manifest is enough
2528 2529
2529 2530 # accessing fnode cache warms the cache
2530 2531 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2531 2532 # accessing tags warm the cache
2532 2533 self.tags()
2533 2534 self.filtered(b'served').tags()
2534 2535
2535 2536 # The `full` arg is documented as updating even the lazily-loaded
2536 2537 # caches immediately, so we're forcing a write to cause these caches
2537 2538 # to be warmed up even if they haven't explicitly been requested
2538 2539 # yet (if they've never been used by hg, they won't ever have been
2539 2540 # written, even if they're a subset of another kind of cache that
2540 2541 # *has* been used).
2541 2542 for filt in repoview.filtertable.keys():
2542 2543 filtered = self.filtered(filt)
2543 2544 filtered.branchmap().write(filtered)
2544 2545
2545 2546 def invalidatecaches(self):
2546 2547
2547 2548 if '_tagscache' in vars(self):
2548 2549 # can't use delattr on proxy
2549 2550 del self.__dict__['_tagscache']
2550 2551
2551 2552 self._branchcaches.clear()
2552 2553 self.invalidatevolatilesets()
2553 2554 self._sparsesignaturecache.clear()
2554 2555
2555 2556 def invalidatevolatilesets(self):
2556 2557 self.filteredrevcache.clear()
2557 2558 obsolete.clearobscaches(self)
2558 2559 self._quick_access_changeid_invalidate()
2559 2560
2560 2561 def invalidatedirstate(self):
2561 2562 '''Invalidates the dirstate, causing the next call to dirstate
2562 2563 to check if it was modified since the last time it was read,
2563 2564 rereading it if it has.
2564 2565
2565 2566 This is different to dirstate.invalidate() that it doesn't always
2566 2567 rereads the dirstate. Use dirstate.invalidate() if you want to
2567 2568 explicitly read the dirstate again (i.e. restoring it to a previous
2568 2569 known good state).'''
2569 2570 if hasunfilteredcache(self, 'dirstate'):
2570 2571 for k in self.dirstate._filecache:
2571 2572 try:
2572 2573 delattr(self.dirstate, k)
2573 2574 except AttributeError:
2574 2575 pass
2575 2576 delattr(self.unfiltered(), 'dirstate')
2576 2577
2577 2578 def invalidate(self, clearfilecache=False):
2578 2579 '''Invalidates both store and non-store parts other than dirstate
2579 2580
2580 2581 If a transaction is running, invalidation of store is omitted,
2581 2582 because discarding in-memory changes might cause inconsistency
2582 2583 (e.g. incomplete fncache causes unintentional failure, but
2583 2584 redundant one doesn't).
2584 2585 '''
2585 2586 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2586 2587 for k in list(self._filecache.keys()):
2587 2588 # dirstate is invalidated separately in invalidatedirstate()
2588 2589 if k == b'dirstate':
2589 2590 continue
2590 2591 if (
2591 2592 k == b'changelog'
2592 2593 and self.currenttransaction()
2593 2594 and self.changelog._delayed
2594 2595 ):
2595 2596 # The changelog object may store unwritten revisions. We don't
2596 2597 # want to lose them.
2597 2598 # TODO: Solve the problem instead of working around it.
2598 2599 continue
2599 2600
2600 2601 if clearfilecache:
2601 2602 del self._filecache[k]
2602 2603 try:
2603 2604 delattr(unfiltered, k)
2604 2605 except AttributeError:
2605 2606 pass
2606 2607 self.invalidatecaches()
2607 2608 if not self.currenttransaction():
2608 2609 # TODO: Changing contents of store outside transaction
2609 2610 # causes inconsistency. We should make in-memory store
2610 2611 # changes detectable, and abort if changed.
2611 2612 self.store.invalidatecaches()
2612 2613
2613 2614 def invalidateall(self):
2614 2615 '''Fully invalidates both store and non-store parts, causing the
2615 2616 subsequent operation to reread any outside changes.'''
2616 2617 # extension should hook this to invalidate its caches
2617 2618 self.invalidate()
2618 2619 self.invalidatedirstate()
2619 2620
2620 2621 @unfilteredmethod
2621 2622 def _refreshfilecachestats(self, tr):
2622 2623 """Reload stats of cached files so that they are flagged as valid"""
2623 2624 for k, ce in self._filecache.items():
2624 2625 k = pycompat.sysstr(k)
2625 2626 if k == 'dirstate' or k not in self.__dict__:
2626 2627 continue
2627 2628 ce.refresh()
2628 2629
2629 2630 def _lock(
2630 2631 self,
2631 2632 vfs,
2632 2633 lockname,
2633 2634 wait,
2634 2635 releasefn,
2635 2636 acquirefn,
2636 2637 desc,
2637 2638 inheritchecker=None,
2638 2639 parentenvvar=None,
2639 2640 ):
2640 2641 parentlock = None
2641 2642 # the contents of parentenvvar are used by the underlying lock to
2642 2643 # determine whether it can be inherited
2643 2644 if parentenvvar is not None:
2644 2645 parentlock = encoding.environ.get(parentenvvar)
2645 2646
2646 2647 timeout = 0
2647 2648 warntimeout = 0
2648 2649 if wait:
2649 2650 timeout = self.ui.configint(b"ui", b"timeout")
2650 2651 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2651 2652 # internal config: ui.signal-safe-lock
2652 2653 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2653 2654
2654 2655 l = lockmod.trylock(
2655 2656 self.ui,
2656 2657 vfs,
2657 2658 lockname,
2658 2659 timeout,
2659 2660 warntimeout,
2660 2661 releasefn=releasefn,
2661 2662 acquirefn=acquirefn,
2662 2663 desc=desc,
2663 2664 inheritchecker=inheritchecker,
2664 2665 parentlock=parentlock,
2665 2666 signalsafe=signalsafe,
2666 2667 )
2667 2668 return l
2668 2669
2669 2670 def _afterlock(self, callback):
2670 2671 """add a callback to be run when the repository is fully unlocked
2671 2672
2672 2673 The callback will be executed when the outermost lock is released
2673 2674 (with wlock being higher level than 'lock')."""
2674 2675 for ref in (self._wlockref, self._lockref):
2675 2676 l = ref and ref()
2676 2677 if l and l.held:
2677 2678 l.postrelease.append(callback)
2678 2679 break
2679 2680 else: # no lock have been found.
2680 2681 callback(True)
2681 2682
2682 2683 def lock(self, wait=True):
2683 2684 '''Lock the repository store (.hg/store) and return a weak reference
2684 2685 to the lock. Use this before modifying the store (e.g. committing or
2685 2686 stripping). If you are opening a transaction, get a lock as well.)
2686 2687
2687 2688 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2688 2689 'wlock' first to avoid a dead-lock hazard.'''
2689 2690 l = self._currentlock(self._lockref)
2690 2691 if l is not None:
2691 2692 l.lock()
2692 2693 return l
2693 2694
2694 2695 l = self._lock(
2695 2696 vfs=self.svfs,
2696 2697 lockname=b"lock",
2697 2698 wait=wait,
2698 2699 releasefn=None,
2699 2700 acquirefn=self.invalidate,
2700 2701 desc=_(b'repository %s') % self.origroot,
2701 2702 )
2702 2703 self._lockref = weakref.ref(l)
2703 2704 return l
2704 2705
2705 2706 def _wlockchecktransaction(self):
2706 2707 if self.currenttransaction() is not None:
2707 2708 raise error.LockInheritanceContractViolation(
2708 2709 b'wlock cannot be inherited in the middle of a transaction'
2709 2710 )
2710 2711
2711 2712 def wlock(self, wait=True):
2712 2713 '''Lock the non-store parts of the repository (everything under
2713 2714 .hg except .hg/store) and return a weak reference to the lock.
2714 2715
2715 2716 Use this before modifying files in .hg.
2716 2717
2717 2718 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2718 2719 'wlock' first to avoid a dead-lock hazard.'''
2719 2720 l = self._wlockref and self._wlockref()
2720 2721 if l is not None and l.held:
2721 2722 l.lock()
2722 2723 return l
2723 2724
2724 2725 # We do not need to check for non-waiting lock acquisition. Such
2725 2726 # acquisition would not cause dead-lock as they would just fail.
2726 2727 if wait and (
2727 2728 self.ui.configbool(b'devel', b'all-warnings')
2728 2729 or self.ui.configbool(b'devel', b'check-locks')
2729 2730 ):
2730 2731 if self._currentlock(self._lockref) is not None:
2731 2732 self.ui.develwarn(b'"wlock" acquired after "lock"')
2732 2733
2733 2734 def unlock():
2734 2735 if self.dirstate.pendingparentchange():
2735 2736 self.dirstate.invalidate()
2736 2737 else:
2737 2738 self.dirstate.write(None)
2738 2739
2739 2740 self._filecache[b'dirstate'].refresh()
2740 2741
2741 2742 l = self._lock(
2742 2743 self.vfs,
2743 2744 b"wlock",
2744 2745 wait,
2745 2746 unlock,
2746 2747 self.invalidatedirstate,
2747 2748 _(b'working directory of %s') % self.origroot,
2748 2749 inheritchecker=self._wlockchecktransaction,
2749 2750 parentenvvar=b'HG_WLOCK_LOCKER',
2750 2751 )
2751 2752 self._wlockref = weakref.ref(l)
2752 2753 return l
2753 2754
2754 2755 def _currentlock(self, lockref):
2755 2756 """Returns the lock if it's held, or None if it's not."""
2756 2757 if lockref is None:
2757 2758 return None
2758 2759 l = lockref()
2759 2760 if l is None or not l.held:
2760 2761 return None
2761 2762 return l
2762 2763
2763 2764 def currentwlock(self):
2764 2765 """Returns the wlock if it's held, or None if it's not."""
2765 2766 return self._currentlock(self._wlockref)
2766 2767
2767 2768 def _filecommit(
2768 2769 self,
2769 2770 fctx,
2770 2771 manifest1,
2771 2772 manifest2,
2772 2773 linkrev,
2773 2774 tr,
2774 2775 changelist,
2775 2776 includecopymeta,
2776 2777 ):
2777 2778 """
2778 2779 commit an individual file as part of a larger transaction
2779 2780 """
2780 2781
2781 2782 fname = fctx.path()
2782 2783 fparent1 = manifest1.get(fname, nullid)
2783 2784 fparent2 = manifest2.get(fname, nullid)
2784 2785 if isinstance(fctx, context.filectx):
2785 2786 node = fctx.filenode()
2786 2787 if node in [fparent1, fparent2]:
2787 2788 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2788 2789 if (
2789 2790 fparent1 != nullid
2790 2791 and manifest1.flags(fname) != fctx.flags()
2791 2792 ) or (
2792 2793 fparent2 != nullid
2793 2794 and manifest2.flags(fname) != fctx.flags()
2794 2795 ):
2795 2796 changelist.append(fname)
2796 2797 return node
2797 2798
2798 2799 flog = self.file(fname)
2799 2800 meta = {}
2800 2801 cfname = fctx.copysource()
2801 2802 if cfname and cfname != fname:
2802 2803 # Mark the new revision of this file as a copy of another
2803 2804 # file. This copy data will effectively act as a parent
2804 2805 # of this new revision. If this is a merge, the first
2805 2806 # parent will be the nullid (meaning "look up the copy data")
2806 2807 # and the second one will be the other parent. For example:
2807 2808 #
2808 2809 # 0 --- 1 --- 3 rev1 changes file foo
2809 2810 # \ / rev2 renames foo to bar and changes it
2810 2811 # \- 2 -/ rev3 should have bar with all changes and
2811 2812 # should record that bar descends from
2812 2813 # bar in rev2 and foo in rev1
2813 2814 #
2814 2815 # this allows this merge to succeed:
2815 2816 #
2816 2817 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2817 2818 # \ / merging rev3 and rev4 should use bar@rev2
2818 2819 # \- 2 --- 4 as the merge base
2819 2820 #
2820 2821
2821 2822 cnode = manifest1.get(cfname)
2822 2823 newfparent = fparent2
2823 2824
2824 2825 if manifest2: # branch merge
2825 2826 if fparent2 == nullid or cnode is None: # copied on remote side
2826 2827 if cfname in manifest2:
2827 2828 cnode = manifest2[cfname]
2828 2829 newfparent = fparent1
2829 2830
2830 2831 # Here, we used to search backwards through history to try to find
2831 2832 # where the file copy came from if the source of a copy was not in
2832 2833 # the parent directory. However, this doesn't actually make sense to
2833 2834 # do (what does a copy from something not in your working copy even
2834 2835 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2835 2836 # the user that copy information was dropped, so if they didn't
2836 2837 # expect this outcome it can be fixed, but this is the correct
2837 2838 # behavior in this circumstance.
2838 2839
2839 2840 if cnode:
2840 2841 self.ui.debug(
2841 2842 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2842 2843 )
2843 2844 if includecopymeta:
2844 2845 meta[b"copy"] = cfname
2845 2846 meta[b"copyrev"] = hex(cnode)
2846 2847 fparent1, fparent2 = nullid, newfparent
2847 2848 else:
2848 2849 self.ui.warn(
2849 2850 _(
2850 2851 b"warning: can't find ancestor for '%s' "
2851 2852 b"copied from '%s'!\n"
2852 2853 )
2853 2854 % (fname, cfname)
2854 2855 )
2855 2856
2856 2857 elif fparent1 == nullid:
2857 2858 fparent1, fparent2 = fparent2, nullid
2858 2859 elif fparent2 != nullid:
2859 2860 # is one parent an ancestor of the other?
2860 2861 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2861 2862 if fparent1 in fparentancestors:
2862 2863 fparent1, fparent2 = fparent2, nullid
2863 2864 elif fparent2 in fparentancestors:
2864 2865 fparent2 = nullid
2865 2866 elif not fparentancestors:
2866 2867 # TODO: this whole if-else might be simplified much more
2867 2868 ms = mergemod.mergestate.read(self)
2868 2869 if (
2869 2870 fname in ms
2870 2871 and ms[fname] == mergemod.MERGE_RECORD_MERGED_OTHER
2871 2872 ):
2872 2873 fparent1, fparent2 = fparent2, nullid
2873 2874
2874 2875 # is the file changed?
2875 2876 text = fctx.data()
2876 2877 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2877 2878 changelist.append(fname)
2878 2879 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2879 2880 # are just the flags changed during merge?
2880 2881 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2881 2882 changelist.append(fname)
2882 2883
2883 2884 return fparent1
2884 2885
2885 2886 def checkcommitpatterns(self, wctx, match, status, fail):
2886 2887 """check for commit arguments that aren't committable"""
2887 2888 if match.isexact() or match.prefix():
2888 2889 matched = set(status.modified + status.added + status.removed)
2889 2890
2890 2891 for f in match.files():
2891 2892 f = self.dirstate.normalize(f)
2892 2893 if f == b'.' or f in matched or f in wctx.substate:
2893 2894 continue
2894 2895 if f in status.deleted:
2895 2896 fail(f, _(b'file not found!'))
2896 2897 # Is it a directory that exists or used to exist?
2897 2898 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2898 2899 d = f + b'/'
2899 2900 for mf in matched:
2900 2901 if mf.startswith(d):
2901 2902 break
2902 2903 else:
2903 2904 fail(f, _(b"no match under directory!"))
2904 2905 elif f not in self.dirstate:
2905 2906 fail(f, _(b"file not tracked!"))
2906 2907
2907 2908 @unfilteredmethod
2908 2909 def commit(
2909 2910 self,
2910 2911 text=b"",
2911 2912 user=None,
2912 2913 date=None,
2913 2914 match=None,
2914 2915 force=False,
2915 2916 editor=None,
2916 2917 extra=None,
2917 2918 ):
2918 2919 """Add a new revision to current repository.
2919 2920
2920 2921 Revision information is gathered from the working directory,
2921 2922 match can be used to filter the committed files. If editor is
2922 2923 supplied, it is called to get a commit message.
2923 2924 """
2924 2925 if extra is None:
2925 2926 extra = {}
2926 2927
2927 2928 def fail(f, msg):
2928 2929 raise error.Abort(b'%s: %s' % (f, msg))
2929 2930
2930 2931 if not match:
2931 2932 match = matchmod.always()
2932 2933
2933 2934 if not force:
2934 2935 match.bad = fail
2935 2936
2936 2937 # lock() for recent changelog (see issue4368)
2937 2938 with self.wlock(), self.lock():
2938 2939 wctx = self[None]
2939 2940 merge = len(wctx.parents()) > 1
2940 2941
2941 2942 if not force and merge and not match.always():
2942 2943 raise error.Abort(
2943 2944 _(
2944 2945 b'cannot partially commit a merge '
2945 2946 b'(do not specify files or patterns)'
2946 2947 )
2947 2948 )
2948 2949
2949 2950 status = self.status(match=match, clean=force)
2950 2951 if force:
2951 2952 status.modified.extend(
2952 2953 status.clean
2953 2954 ) # mq may commit clean files
2954 2955
2955 2956 # check subrepos
2956 2957 subs, commitsubs, newstate = subrepoutil.precommit(
2957 2958 self.ui, wctx, status, match, force=force
2958 2959 )
2959 2960
2960 2961 # make sure all explicit patterns are matched
2961 2962 if not force:
2962 2963 self.checkcommitpatterns(wctx, match, status, fail)
2963 2964
2964 2965 cctx = context.workingcommitctx(
2965 2966 self, status, text, user, date, extra
2966 2967 )
2967 2968
2968 2969 ms = mergemod.mergestate.read(self)
2969 2970 mergeutil.checkunresolved(ms)
2970 2971
2971 2972 # internal config: ui.allowemptycommit
2972 2973 allowemptycommit = (
2973 2974 wctx.branch() != wctx.p1().branch()
2974 2975 or extra.get(b'close')
2975 2976 or merge
2976 2977 or cctx.files()
2977 2978 or self.ui.configbool(b'ui', b'allowemptycommit')
2978 2979 )
2979 2980 if not allowemptycommit:
2980 2981 self.ui.debug(b'nothing to commit, clearing merge state\n')
2981 2982 ms.reset()
2982 2983 return None
2983 2984
2984 2985 if merge and cctx.deleted():
2985 2986 raise error.Abort(_(b"cannot commit merge with missing files"))
2986 2987
2987 2988 if editor:
2988 2989 cctx._text = editor(self, cctx, subs)
2989 2990 edited = text != cctx._text
2990 2991
2991 2992 # Save commit message in case this transaction gets rolled back
2992 2993 # (e.g. by a pretxncommit hook). Leave the content alone on
2993 2994 # the assumption that the user will use the same editor again.
2994 2995 msgfn = self.savecommitmessage(cctx._text)
2995 2996
2996 2997 # commit subs and write new state
2997 2998 if subs:
2998 2999 uipathfn = scmutil.getuipathfn(self)
2999 3000 for s in sorted(commitsubs):
3000 3001 sub = wctx.sub(s)
3001 3002 self.ui.status(
3002 3003 _(b'committing subrepository %s\n')
3003 3004 % uipathfn(subrepoutil.subrelpath(sub))
3004 3005 )
3005 3006 sr = sub.commit(cctx._text, user, date)
3006 3007 newstate[s] = (newstate[s][0], sr)
3007 3008 subrepoutil.writestate(self, newstate)
3008 3009
3009 3010 p1, p2 = self.dirstate.parents()
3010 3011 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3011 3012 try:
3012 3013 self.hook(
3013 3014 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3014 3015 )
3015 3016 with self.transaction(b'commit'):
3016 3017 ret = self.commitctx(cctx, True)
3017 3018 # update bookmarks, dirstate and mergestate
3018 3019 bookmarks.update(self, [p1, p2], ret)
3019 3020 cctx.markcommitted(ret)
3020 3021 ms.reset()
3021 3022 except: # re-raises
3022 3023 if edited:
3023 3024 self.ui.write(
3024 3025 _(b'note: commit message saved in %s\n') % msgfn
3025 3026 )
3026 3027 self.ui.write(
3027 3028 _(
3028 3029 b"note: use 'hg commit --logfile "
3029 3030 b".hg/last-message.txt --edit' to reuse it\n"
3030 3031 )
3031 3032 )
3032 3033 raise
3033 3034
3034 3035 def commithook(unused_success):
3035 3036 # hack for command that use a temporary commit (eg: histedit)
3036 3037 # temporary commit got stripped before hook release
3037 3038 if self.changelog.hasnode(ret):
3038 3039 self.hook(
3039 3040 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3040 3041 )
3041 3042
3042 3043 self._afterlock(commithook)
3043 3044 return ret
3044 3045
3045 3046 @unfilteredmethod
3046 3047 def commitctx(self, ctx, error=False, origctx=None):
3047 3048 """Add a new revision to current repository.
3048 3049 Revision information is passed via the context argument.
3049 3050
3050 3051 ctx.files() should list all files involved in this commit, i.e.
3051 3052 modified/added/removed files. On merge, it may be wider than the
3052 3053 ctx.files() to be committed, since any file nodes derived directly
3053 3054 from p1 or p2 are excluded from the committed ctx.files().
3054 3055
3055 3056 origctx is for convert to work around the problem that bug
3056 3057 fixes to the files list in changesets change hashes. For
3057 3058 convert to be the identity, it can pass an origctx and this
3058 3059 function will use the same files list when it makes sense to
3059 3060 do so.
3060 3061 """
3061 3062
3062 3063 p1, p2 = ctx.p1(), ctx.p2()
3063 3064 user = ctx.user()
3064 3065
3065 3066 if self.filecopiesmode == b'changeset-sidedata':
3066 3067 writechangesetcopy = True
3067 3068 writefilecopymeta = True
3068 3069 writecopiesto = None
3069 3070 else:
3070 3071 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3071 3072 writefilecopymeta = writecopiesto != b'changeset-only'
3072 3073 writechangesetcopy = writecopiesto in (
3073 3074 b'changeset-only',
3074 3075 b'compatibility',
3075 3076 )
3076 3077 p1copies, p2copies = None, None
3077 3078 if writechangesetcopy:
3078 3079 p1copies = ctx.p1copies()
3079 3080 p2copies = ctx.p2copies()
3080 3081 filesadded, filesremoved = None, None
3081 3082 with self.lock(), self.transaction(b"commit") as tr:
3082 3083 trp = weakref.proxy(tr)
3083 3084
3084 3085 if ctx.manifestnode():
3085 3086 # reuse an existing manifest revision
3086 3087 self.ui.debug(b'reusing known manifest\n')
3087 3088 mn = ctx.manifestnode()
3088 3089 files = ctx.files()
3089 3090 if writechangesetcopy:
3090 3091 filesadded = ctx.filesadded()
3091 3092 filesremoved = ctx.filesremoved()
3092 3093 elif ctx.files():
3093 3094 m1ctx = p1.manifestctx()
3094 3095 m2ctx = p2.manifestctx()
3095 3096 mctx = m1ctx.copy()
3096 3097
3097 3098 m = mctx.read()
3098 3099 m1 = m1ctx.read()
3099 3100 m2 = m2ctx.read()
3100 3101
3101 3102 # check in files
3102 3103 added = []
3103 3104 changed = []
3104 3105 removed = list(ctx.removed())
3105 3106 linkrev = len(self)
3106 3107 self.ui.note(_(b"committing files:\n"))
3107 3108 uipathfn = scmutil.getuipathfn(self)
3108 3109 for f in sorted(ctx.modified() + ctx.added()):
3109 3110 self.ui.note(uipathfn(f) + b"\n")
3110 3111 try:
3111 3112 fctx = ctx[f]
3112 3113 if fctx is None:
3113 3114 removed.append(f)
3114 3115 else:
3115 3116 added.append(f)
3116 3117 m[f] = self._filecommit(
3117 3118 fctx,
3118 3119 m1,
3119 3120 m2,
3120 3121 linkrev,
3121 3122 trp,
3122 3123 changed,
3123 3124 writefilecopymeta,
3124 3125 )
3125 3126 m.setflag(f, fctx.flags())
3126 3127 except OSError:
3127 3128 self.ui.warn(
3128 3129 _(b"trouble committing %s!\n") % uipathfn(f)
3129 3130 )
3130 3131 raise
3131 3132 except IOError as inst:
3132 3133 errcode = getattr(inst, 'errno', errno.ENOENT)
3133 3134 if error or errcode and errcode != errno.ENOENT:
3134 3135 self.ui.warn(
3135 3136 _(b"trouble committing %s!\n") % uipathfn(f)
3136 3137 )
3137 3138 raise
3138 3139
3139 3140 # update manifest
3140 3141 removed = [f for f in removed if f in m1 or f in m2]
3141 3142 drop = sorted([f for f in removed if f in m])
3142 3143 for f in drop:
3143 3144 del m[f]
3144 3145 if p2.rev() != nullrev:
3145 3146
3146 3147 @util.cachefunc
3147 3148 def mas():
3148 3149 p1n = p1.node()
3149 3150 p2n = p2.node()
3150 3151 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3151 3152 if not cahs:
3152 3153 cahs = [nullrev]
3153 3154 return [self[r].manifest() for r in cahs]
3154 3155
3155 3156 def deletionfromparent(f):
3156 3157 # When a file is removed relative to p1 in a merge, this
3157 3158 # function determines whether the absence is due to a
3158 3159 # deletion from a parent, or whether the merge commit
3159 3160 # itself deletes the file. We decide this by doing a
3160 3161 # simplified three way merge of the manifest entry for
3161 3162 # the file. There are two ways we decide the merge
3162 3163 # itself didn't delete a file:
3163 3164 # - neither parent (nor the merge) contain the file
3164 3165 # - exactly one parent contains the file, and that
3165 3166 # parent has the same filelog entry as the merge
3166 3167 # ancestor (or all of them if there two). In other
3167 3168 # words, that parent left the file unchanged while the
3168 3169 # other one deleted it.
3169 3170 # One way to think about this is that deleting a file is
3170 3171 # similar to emptying it, so the list of changed files
3171 3172 # should be similar either way. The computation
3172 3173 # described above is not done directly in _filecommit
3173 3174 # when creating the list of changed files, however
3174 3175 # it does something very similar by comparing filelog
3175 3176 # nodes.
3176 3177 if f in m1:
3177 3178 return f not in m2 and all(
3178 3179 f in ma and ma.find(f) == m1.find(f)
3179 3180 for ma in mas()
3180 3181 )
3181 3182 elif f in m2:
3182 3183 return all(
3183 3184 f in ma and ma.find(f) == m2.find(f)
3184 3185 for ma in mas()
3185 3186 )
3186 3187 else:
3187 3188 return True
3188 3189
3189 3190 removed = [f for f in removed if not deletionfromparent(f)]
3190 3191
3191 3192 files = changed + removed
3192 3193 md = None
3193 3194 if not files:
3194 3195 # if no "files" actually changed in terms of the changelog,
3195 3196 # try hard to detect unmodified manifest entry so that the
3196 3197 # exact same commit can be reproduced later on convert.
3197 3198 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3198 3199 if not files and md:
3199 3200 self.ui.debug(
3200 3201 b'not reusing manifest (no file change in '
3201 3202 b'changelog, but manifest differs)\n'
3202 3203 )
3203 3204 if files or md:
3204 3205 self.ui.note(_(b"committing manifest\n"))
3205 3206 # we're using narrowmatch here since it's already applied at
3206 3207 # other stages (such as dirstate.walk), so we're already
3207 3208 # ignoring things outside of narrowspec in most cases. The
3208 3209 # one case where we might have files outside the narrowspec
3209 3210 # at this point is merges, and we already error out in the
3210 3211 # case where the merge has files outside of the narrowspec,
3211 3212 # so this is safe.
3212 3213 mn = mctx.write(
3213 3214 trp,
3214 3215 linkrev,
3215 3216 p1.manifestnode(),
3216 3217 p2.manifestnode(),
3217 3218 added,
3218 3219 drop,
3219 3220 match=self.narrowmatch(),
3220 3221 )
3221 3222
3222 3223 if writechangesetcopy:
3223 3224 filesadded = [
3224 3225 f for f in changed if not (f in m1 or f in m2)
3225 3226 ]
3226 3227 filesremoved = removed
3227 3228 else:
3228 3229 self.ui.debug(
3229 3230 b'reusing manifest from p1 (listed files '
3230 3231 b'actually unchanged)\n'
3231 3232 )
3232 3233 mn = p1.manifestnode()
3233 3234 else:
3234 3235 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3235 3236 mn = p1.manifestnode()
3236 3237 files = []
3237 3238
3238 3239 if writecopiesto == b'changeset-only':
3239 3240 # If writing only to changeset extras, use None to indicate that
3240 3241 # no entry should be written. If writing to both, write an empty
3241 3242 # entry to prevent the reader from falling back to reading
3242 3243 # filelogs.
3243 3244 p1copies = p1copies or None
3244 3245 p2copies = p2copies or None
3245 3246 filesadded = filesadded or None
3246 3247 filesremoved = filesremoved or None
3247 3248
3248 3249 if origctx and origctx.manifestnode() == mn:
3249 3250 files = origctx.files()
3250 3251
3251 3252 # update changelog
3252 3253 self.ui.note(_(b"committing changelog\n"))
3253 3254 self.changelog.delayupdate(tr)
3254 3255 n = self.changelog.add(
3255 3256 mn,
3256 3257 files,
3257 3258 ctx.description(),
3258 3259 trp,
3259 3260 p1.node(),
3260 3261 p2.node(),
3261 3262 user,
3262 3263 ctx.date(),
3263 3264 ctx.extra().copy(),
3264 3265 p1copies,
3265 3266 p2copies,
3266 3267 filesadded,
3267 3268 filesremoved,
3268 3269 )
3269 3270 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3270 3271 self.hook(
3271 3272 b'pretxncommit',
3272 3273 throw=True,
3273 3274 node=hex(n),
3274 3275 parent1=xp1,
3275 3276 parent2=xp2,
3276 3277 )
3277 3278 # set the new commit is proper phase
3278 3279 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3279 3280 if targetphase:
3280 3281 # retract boundary do not alter parent changeset.
3281 3282 # if a parent have higher the resulting phase will
3282 3283 # be compliant anyway
3283 3284 #
3284 3285 # if minimal phase was 0 we don't need to retract anything
3285 3286 phases.registernew(self, tr, targetphase, [n])
3286 3287 return n
3287 3288
3288 3289 @unfilteredmethod
3289 3290 def destroying(self):
3290 3291 '''Inform the repository that nodes are about to be destroyed.
3291 3292 Intended for use by strip and rollback, so there's a common
3292 3293 place for anything that has to be done before destroying history.
3293 3294
3294 3295 This is mostly useful for saving state that is in memory and waiting
3295 3296 to be flushed when the current lock is released. Because a call to
3296 3297 destroyed is imminent, the repo will be invalidated causing those
3297 3298 changes to stay in memory (waiting for the next unlock), or vanish
3298 3299 completely.
3299 3300 '''
3300 3301 # When using the same lock to commit and strip, the phasecache is left
3301 3302 # dirty after committing. Then when we strip, the repo is invalidated,
3302 3303 # causing those changes to disappear.
3303 3304 if '_phasecache' in vars(self):
3304 3305 self._phasecache.write()
3305 3306
3306 3307 @unfilteredmethod
3307 3308 def destroyed(self):
3308 3309 '''Inform the repository that nodes have been destroyed.
3309 3310 Intended for use by strip and rollback, so there's a common
3310 3311 place for anything that has to be done after destroying history.
3311 3312 '''
3312 3313 # When one tries to:
3313 3314 # 1) destroy nodes thus calling this method (e.g. strip)
3314 3315 # 2) use phasecache somewhere (e.g. commit)
3315 3316 #
3316 3317 # then 2) will fail because the phasecache contains nodes that were
3317 3318 # removed. We can either remove phasecache from the filecache,
3318 3319 # causing it to reload next time it is accessed, or simply filter
3319 3320 # the removed nodes now and write the updated cache.
3320 3321 self._phasecache.filterunknown(self)
3321 3322 self._phasecache.write()
3322 3323
3323 3324 # refresh all repository caches
3324 3325 self.updatecaches()
3325 3326
3326 3327 # Ensure the persistent tag cache is updated. Doing it now
3327 3328 # means that the tag cache only has to worry about destroyed
3328 3329 # heads immediately after a strip/rollback. That in turn
3329 3330 # guarantees that "cachetip == currenttip" (comparing both rev
3330 3331 # and node) always means no nodes have been added or destroyed.
3331 3332
3332 3333 # XXX this is suboptimal when qrefresh'ing: we strip the current
3333 3334 # head, refresh the tag cache, then immediately add a new head.
3334 3335 # But I think doing it this way is necessary for the "instant
3335 3336 # tag cache retrieval" case to work.
3336 3337 self.invalidate()
3337 3338
3338 3339 def status(
3339 3340 self,
3340 3341 node1=b'.',
3341 3342 node2=None,
3342 3343 match=None,
3343 3344 ignored=False,
3344 3345 clean=False,
3345 3346 unknown=False,
3346 3347 listsubrepos=False,
3347 3348 ):
3348 3349 '''a convenience method that calls node1.status(node2)'''
3349 3350 return self[node1].status(
3350 3351 node2, match, ignored, clean, unknown, listsubrepos
3351 3352 )
3352 3353
3353 3354 def addpostdsstatus(self, ps):
3354 3355 """Add a callback to run within the wlock, at the point at which status
3355 3356 fixups happen.
3356 3357
3357 3358 On status completion, callback(wctx, status) will be called with the
3358 3359 wlock held, unless the dirstate has changed from underneath or the wlock
3359 3360 couldn't be grabbed.
3360 3361
3361 3362 Callbacks should not capture and use a cached copy of the dirstate --
3362 3363 it might change in the meanwhile. Instead, they should access the
3363 3364 dirstate via wctx.repo().dirstate.
3364 3365
3365 3366 This list is emptied out after each status run -- extensions should
3366 3367 make sure it adds to this list each time dirstate.status is called.
3367 3368 Extensions should also make sure they don't call this for statuses
3368 3369 that don't involve the dirstate.
3369 3370 """
3370 3371
3371 3372 # The list is located here for uniqueness reasons -- it is actually
3372 3373 # managed by the workingctx, but that isn't unique per-repo.
3373 3374 self._postdsstatus.append(ps)
3374 3375
3375 3376 def postdsstatus(self):
3376 3377 """Used by workingctx to get the list of post-dirstate-status hooks."""
3377 3378 return self._postdsstatus
3378 3379
3379 3380 def clearpostdsstatus(self):
3380 3381 """Used by workingctx to clear post-dirstate-status hooks."""
3381 3382 del self._postdsstatus[:]
3382 3383
3383 3384 def heads(self, start=None):
3384 3385 if start is None:
3385 3386 cl = self.changelog
3386 3387 headrevs = reversed(cl.headrevs())
3387 3388 return [cl.node(rev) for rev in headrevs]
3388 3389
3389 3390 heads = self.changelog.heads(start)
3390 3391 # sort the output in rev descending order
3391 3392 return sorted(heads, key=self.changelog.rev, reverse=True)
3392 3393
3393 3394 def branchheads(self, branch=None, start=None, closed=False):
3394 3395 '''return a (possibly filtered) list of heads for the given branch
3395 3396
3396 3397 Heads are returned in topological order, from newest to oldest.
3397 3398 If branch is None, use the dirstate branch.
3398 3399 If start is not None, return only heads reachable from start.
3399 3400 If closed is True, return heads that are marked as closed as well.
3400 3401 '''
3401 3402 if branch is None:
3402 3403 branch = self[None].branch()
3403 3404 branches = self.branchmap()
3404 3405 if not branches.hasbranch(branch):
3405 3406 return []
3406 3407 # the cache returns heads ordered lowest to highest
3407 3408 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3408 3409 if start is not None:
3409 3410 # filter out the heads that cannot be reached from startrev
3410 3411 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3411 3412 bheads = [h for h in bheads if h in fbheads]
3412 3413 return bheads
3413 3414
3414 3415 def branches(self, nodes):
3415 3416 if not nodes:
3416 3417 nodes = [self.changelog.tip()]
3417 3418 b = []
3418 3419 for n in nodes:
3419 3420 t = n
3420 3421 while True:
3421 3422 p = self.changelog.parents(n)
3422 3423 if p[1] != nullid or p[0] == nullid:
3423 3424 b.append((t, n, p[0], p[1]))
3424 3425 break
3425 3426 n = p[0]
3426 3427 return b
3427 3428
3428 3429 def between(self, pairs):
3429 3430 r = []
3430 3431
3431 3432 for top, bottom in pairs:
3432 3433 n, l, i = top, [], 0
3433 3434 f = 1
3434 3435
3435 3436 while n != bottom and n != nullid:
3436 3437 p = self.changelog.parents(n)[0]
3437 3438 if i == f:
3438 3439 l.append(n)
3439 3440 f = f * 2
3440 3441 n = p
3441 3442 i += 1
3442 3443
3443 3444 r.append(l)
3444 3445
3445 3446 return r
3446 3447
3447 3448 def checkpush(self, pushop):
3448 3449 """Extensions can override this function if additional checks have
3449 3450 to be performed before pushing, or call it if they override push
3450 3451 command.
3451 3452 """
3452 3453
3453 3454 @unfilteredpropertycache
3454 3455 def prepushoutgoinghooks(self):
3455 3456 """Return util.hooks consists of a pushop with repo, remote, outgoing
3456 3457 methods, which are called before pushing changesets.
3457 3458 """
3458 3459 return util.hooks()
3459 3460
3460 3461 def pushkey(self, namespace, key, old, new):
3461 3462 try:
3462 3463 tr = self.currenttransaction()
3463 3464 hookargs = {}
3464 3465 if tr is not None:
3465 3466 hookargs.update(tr.hookargs)
3466 3467 hookargs = pycompat.strkwargs(hookargs)
3467 3468 hookargs['namespace'] = namespace
3468 3469 hookargs['key'] = key
3469 3470 hookargs['old'] = old
3470 3471 hookargs['new'] = new
3471 3472 self.hook(b'prepushkey', throw=True, **hookargs)
3472 3473 except error.HookAbort as exc:
3473 3474 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3474 3475 if exc.hint:
3475 3476 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3476 3477 return False
3477 3478 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3478 3479 ret = pushkey.push(self, namespace, key, old, new)
3479 3480
3480 3481 def runhook(unused_success):
3481 3482 self.hook(
3482 3483 b'pushkey',
3483 3484 namespace=namespace,
3484 3485 key=key,
3485 3486 old=old,
3486 3487 new=new,
3487 3488 ret=ret,
3488 3489 )
3489 3490
3490 3491 self._afterlock(runhook)
3491 3492 return ret
3492 3493
3493 3494 def listkeys(self, namespace):
3494 3495 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3495 3496 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3496 3497 values = pushkey.list(self, namespace)
3497 3498 self.hook(b'listkeys', namespace=namespace, values=values)
3498 3499 return values
3499 3500
3500 3501 def debugwireargs(self, one, two, three=None, four=None, five=None):
3501 3502 '''used to test argument passing over the wire'''
3502 3503 return b"%s %s %s %s %s" % (
3503 3504 one,
3504 3505 two,
3505 3506 pycompat.bytestr(three),
3506 3507 pycompat.bytestr(four),
3507 3508 pycompat.bytestr(five),
3508 3509 )
3509 3510
3510 3511 def savecommitmessage(self, text):
3511 3512 fp = self.vfs(b'last-message.txt', b'wb')
3512 3513 try:
3513 3514 fp.write(text)
3514 3515 finally:
3515 3516 fp.close()
3516 3517 return self.pathto(fp.name[len(self.root) + 1 :])
3517 3518
3518 3519
3519 3520 # used to avoid circular references so destructors work
3520 3521 def aftertrans(files):
3521 3522 renamefiles = [tuple(t) for t in files]
3522 3523
3523 3524 def a():
3524 3525 for vfs, src, dest in renamefiles:
3525 3526 # if src and dest refer to a same file, vfs.rename is a no-op,
3526 3527 # leaving both src and dest on disk. delete dest to make sure
3527 3528 # the rename couldn't be such a no-op.
3528 3529 vfs.tryunlink(dest)
3529 3530 try:
3530 3531 vfs.rename(src, dest)
3531 3532 except OSError: # journal file does not yet exist
3532 3533 pass
3533 3534
3534 3535 return a
3535 3536
3536 3537
3537 3538 def undoname(fn):
3538 3539 base, name = os.path.split(fn)
3539 3540 assert name.startswith(b'journal')
3540 3541 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3541 3542
3542 3543
3543 3544 def instance(ui, path, create, intents=None, createopts=None):
3544 3545 localpath = util.urllocalpath(path)
3545 3546 if create:
3546 3547 createrepository(ui, localpath, createopts=createopts)
3547 3548
3548 3549 return makelocalrepository(ui, localpath, intents=intents)
3549 3550
3550 3551
3551 3552 def islocal(path):
3552 3553 return True
3553 3554
3554 3555
3555 3556 def defaultcreateopts(ui, createopts=None):
3556 3557 """Populate the default creation options for a repository.
3557 3558
3558 3559 A dictionary of explicitly requested creation options can be passed
3559 3560 in. Missing keys will be populated.
3560 3561 """
3561 3562 createopts = dict(createopts or {})
3562 3563
3563 3564 if b'backend' not in createopts:
3564 3565 # experimental config: storage.new-repo-backend
3565 3566 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3566 3567
3567 3568 return createopts
3568 3569
3569 3570
3570 3571 def newreporequirements(ui, createopts):
3571 3572 """Determine the set of requirements for a new local repository.
3572 3573
3573 3574 Extensions can wrap this function to specify custom requirements for
3574 3575 new repositories.
3575 3576 """
3576 3577 # If the repo is being created from a shared repository, we copy
3577 3578 # its requirements.
3578 3579 if b'sharedrepo' in createopts:
3579 3580 requirements = set(createopts[b'sharedrepo'].requirements)
3580 3581 if createopts.get(b'sharedrelative'):
3581 3582 requirements.add(b'relshared')
3582 3583 else:
3583 3584 requirements.add(b'shared')
3584 3585
3585 3586 return requirements
3586 3587
3587 3588 if b'backend' not in createopts:
3588 3589 raise error.ProgrammingError(
3589 3590 b'backend key not present in createopts; '
3590 3591 b'was defaultcreateopts() called?'
3591 3592 )
3592 3593
3593 3594 if createopts[b'backend'] != b'revlogv1':
3594 3595 raise error.Abort(
3595 3596 _(
3596 3597 b'unable to determine repository requirements for '
3597 3598 b'storage backend: %s'
3598 3599 )
3599 3600 % createopts[b'backend']
3600 3601 )
3601 3602
3602 3603 requirements = {b'revlogv1'}
3603 3604 if ui.configbool(b'format', b'usestore'):
3604 3605 requirements.add(b'store')
3605 3606 if ui.configbool(b'format', b'usefncache'):
3606 3607 requirements.add(b'fncache')
3607 3608 if ui.configbool(b'format', b'dotencode'):
3608 3609 requirements.add(b'dotencode')
3609 3610
3610 3611 compengines = ui.configlist(b'format', b'revlog-compression')
3611 3612 for compengine in compengines:
3612 3613 if compengine in util.compengines:
3613 3614 break
3614 3615 else:
3615 3616 raise error.Abort(
3616 3617 _(
3617 3618 b'compression engines %s defined by '
3618 3619 b'format.revlog-compression not available'
3619 3620 )
3620 3621 % b', '.join(b'"%s"' % e for e in compengines),
3621 3622 hint=_(
3622 3623 b'run "hg debuginstall" to list available '
3623 3624 b'compression engines'
3624 3625 ),
3625 3626 )
3626 3627
3627 3628 # zlib is the historical default and doesn't need an explicit requirement.
3628 3629 if compengine == b'zstd':
3629 3630 requirements.add(b'revlog-compression-zstd')
3630 3631 elif compengine != b'zlib':
3631 3632 requirements.add(b'exp-compression-%s' % compengine)
3632 3633
3633 3634 if scmutil.gdinitconfig(ui):
3634 3635 requirements.add(b'generaldelta')
3635 3636 if ui.configbool(b'format', b'sparse-revlog'):
3636 3637 requirements.add(SPARSEREVLOG_REQUIREMENT)
3637 3638
3638 3639 # experimental config: format.exp-use-side-data
3639 3640 if ui.configbool(b'format', b'exp-use-side-data'):
3640 3641 requirements.add(SIDEDATA_REQUIREMENT)
3641 3642 # experimental config: format.exp-use-copies-side-data-changeset
3642 3643 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3643 3644 requirements.add(SIDEDATA_REQUIREMENT)
3644 3645 requirements.add(COPIESSDC_REQUIREMENT)
3645 3646 if ui.configbool(b'experimental', b'treemanifest'):
3646 3647 requirements.add(b'treemanifest')
3647 3648
3648 3649 revlogv2 = ui.config(b'experimental', b'revlogv2')
3649 3650 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3650 3651 requirements.remove(b'revlogv1')
3651 3652 # generaldelta is implied by revlogv2.
3652 3653 requirements.discard(b'generaldelta')
3653 3654 requirements.add(REVLOGV2_REQUIREMENT)
3654 3655 # experimental config: format.internal-phase
3655 3656 if ui.configbool(b'format', b'internal-phase'):
3656 3657 requirements.add(b'internal-phase')
3657 3658
3658 3659 if createopts.get(b'narrowfiles'):
3659 3660 requirements.add(repository.NARROW_REQUIREMENT)
3660 3661
3661 3662 if createopts.get(b'lfs'):
3662 3663 requirements.add(b'lfs')
3663 3664
3664 3665 if ui.configbool(b'format', b'bookmarks-in-store'):
3665 3666 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3666 3667
3667 3668 if ui.configbool(b'format', b'use-persistent-nodemap'):
3668 3669 requirements.add(NODEMAP_REQUIREMENT)
3669 3670
3670 3671 return requirements
3671 3672
3672 3673
3673 3674 def filterknowncreateopts(ui, createopts):
3674 3675 """Filters a dict of repo creation options against options that are known.
3675 3676
3676 3677 Receives a dict of repo creation options and returns a dict of those
3677 3678 options that we don't know how to handle.
3678 3679
3679 3680 This function is called as part of repository creation. If the
3680 3681 returned dict contains any items, repository creation will not
3681 3682 be allowed, as it means there was a request to create a repository
3682 3683 with options not recognized by loaded code.
3683 3684
3684 3685 Extensions can wrap this function to filter out creation options
3685 3686 they know how to handle.
3686 3687 """
3687 3688 known = {
3688 3689 b'backend',
3689 3690 b'lfs',
3690 3691 b'narrowfiles',
3691 3692 b'sharedrepo',
3692 3693 b'sharedrelative',
3693 3694 b'shareditems',
3694 3695 b'shallowfilestore',
3695 3696 }
3696 3697
3697 3698 return {k: v for k, v in createopts.items() if k not in known}
3698 3699
3699 3700
3700 3701 def createrepository(ui, path, createopts=None):
3701 3702 """Create a new repository in a vfs.
3702 3703
3703 3704 ``path`` path to the new repo's working directory.
3704 3705 ``createopts`` options for the new repository.
3705 3706
3706 3707 The following keys for ``createopts`` are recognized:
3707 3708
3708 3709 backend
3709 3710 The storage backend to use.
3710 3711 lfs
3711 3712 Repository will be created with ``lfs`` requirement. The lfs extension
3712 3713 will automatically be loaded when the repository is accessed.
3713 3714 narrowfiles
3714 3715 Set up repository to support narrow file storage.
3715 3716 sharedrepo
3716 3717 Repository object from which storage should be shared.
3717 3718 sharedrelative
3718 3719 Boolean indicating if the path to the shared repo should be
3719 3720 stored as relative. By default, the pointer to the "parent" repo
3720 3721 is stored as an absolute path.
3721 3722 shareditems
3722 3723 Set of items to share to the new repository (in addition to storage).
3723 3724 shallowfilestore
3724 3725 Indicates that storage for files should be shallow (not all ancestor
3725 3726 revisions are known).
3726 3727 """
3727 3728 createopts = defaultcreateopts(ui, createopts=createopts)
3728 3729
3729 3730 unknownopts = filterknowncreateopts(ui, createopts)
3730 3731
3731 3732 if not isinstance(unknownopts, dict):
3732 3733 raise error.ProgrammingError(
3733 3734 b'filterknowncreateopts() did not return a dict'
3734 3735 )
3735 3736
3736 3737 if unknownopts:
3737 3738 raise error.Abort(
3738 3739 _(
3739 3740 b'unable to create repository because of unknown '
3740 3741 b'creation option: %s'
3741 3742 )
3742 3743 % b', '.join(sorted(unknownopts)),
3743 3744 hint=_(b'is a required extension not loaded?'),
3744 3745 )
3745 3746
3746 3747 requirements = newreporequirements(ui, createopts=createopts)
3747 3748
3748 3749 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3749 3750
3750 3751 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3751 3752 if hgvfs.exists():
3752 3753 raise error.RepoError(_(b'repository %s already exists') % path)
3753 3754
3754 3755 if b'sharedrepo' in createopts:
3755 3756 sharedpath = createopts[b'sharedrepo'].sharedpath
3756 3757
3757 3758 if createopts.get(b'sharedrelative'):
3758 3759 try:
3759 3760 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3760 3761 except (IOError, ValueError) as e:
3761 3762 # ValueError is raised on Windows if the drive letters differ
3762 3763 # on each path.
3763 3764 raise error.Abort(
3764 3765 _(b'cannot calculate relative path'),
3765 3766 hint=stringutil.forcebytestr(e),
3766 3767 )
3767 3768
3768 3769 if not wdirvfs.exists():
3769 3770 wdirvfs.makedirs()
3770 3771
3771 3772 hgvfs.makedir(notindexed=True)
3772 3773 if b'sharedrepo' not in createopts:
3773 3774 hgvfs.mkdir(b'cache')
3774 3775 hgvfs.mkdir(b'wcache')
3775 3776
3776 3777 if b'store' in requirements and b'sharedrepo' not in createopts:
3777 3778 hgvfs.mkdir(b'store')
3778 3779
3779 3780 # We create an invalid changelog outside the store so very old
3780 3781 # Mercurial versions (which didn't know about the requirements
3781 3782 # file) encounter an error on reading the changelog. This
3782 3783 # effectively locks out old clients and prevents them from
3783 3784 # mucking with a repo in an unknown format.
3784 3785 #
3785 3786 # The revlog header has version 2, which won't be recognized by
3786 3787 # such old clients.
3787 3788 hgvfs.append(
3788 3789 b'00changelog.i',
3789 3790 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3790 3791 b'layout',
3791 3792 )
3792 3793
3793 3794 scmutil.writerequires(hgvfs, requirements)
3794 3795
3795 3796 # Write out file telling readers where to find the shared store.
3796 3797 if b'sharedrepo' in createopts:
3797 3798 hgvfs.write(b'sharedpath', sharedpath)
3798 3799
3799 3800 if createopts.get(b'shareditems'):
3800 3801 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3801 3802 hgvfs.write(b'shared', shared)
3802 3803
3803 3804
3804 3805 def poisonrepository(repo):
3805 3806 """Poison a repository instance so it can no longer be used."""
3806 3807 # Perform any cleanup on the instance.
3807 3808 repo.close()
3808 3809
3809 3810 # Our strategy is to replace the type of the object with one that
3810 3811 # has all attribute lookups result in error.
3811 3812 #
3812 3813 # But we have to allow the close() method because some constructors
3813 3814 # of repos call close() on repo references.
3814 3815 class poisonedrepository(object):
3815 3816 def __getattribute__(self, item):
3816 3817 if item == 'close':
3817 3818 return object.__getattribute__(self, item)
3818 3819
3819 3820 raise error.ProgrammingError(
3820 3821 b'repo instances should not be used after unshare'
3821 3822 )
3822 3823
3823 3824 def close(self):
3824 3825 pass
3825 3826
3826 3827 # We may have a repoview, which intercepts __setattr__. So be sure
3827 3828 # we operate at the lowest level possible.
3828 3829 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,1392 +1,1392 b''
1 1 commit hooks can see env vars
2 2 (and post-transaction one are run unlocked)
3 3
4 4
5 5 $ cat > $TESTTMP/txnabort.checkargs.py <<EOF
6 6 > from mercurial import pycompat
7 7 > def showargs(ui, repo, hooktype, **kwargs):
8 8 > kwargs = pycompat.byteskwargs(kwargs)
9 9 > ui.write(b'%s Python hook: %s\n' % (hooktype,
10 10 > b','.join(sorted(kwargs))))
11 11 > EOF
12 12
13 13 $ hg init a
14 14 $ cd a
15 15 $ cat > .hg/hgrc <<EOF
16 16 > [hooks]
17 17 > commit = sh -c "HG_LOCAL= HG_TAG= printenv.py --line commit"
18 18 > commit.b = sh -c "HG_LOCAL= HG_TAG= printenv.py --line commit.b"
19 19 > precommit = sh -c "HG_LOCAL= HG_NODE= HG_TAG= printenv.py --line precommit"
20 20 > pretxncommit = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxncommit"
21 21 > pretxncommit.tip = hg -q tip
22 22 > pre-identify = sh -c "printenv.py --line pre-identify 1"
23 23 > pre-cat = sh -c "printenv.py --line pre-cat"
24 24 > post-cat = sh -c "printenv.py --line post-cat"
25 25 > pretxnopen = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxnopen"
26 26 > pretxnclose = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxnclose"
27 27 > txnclose = sh -c "HG_LOCAL= HG_TAG= printenv.py --line txnclose"
28 28 > txnabort.0 = python:$TESTTMP/txnabort.checkargs.py:showargs
29 29 > txnabort.1 = sh -c "HG_LOCAL= HG_TAG= printenv.py --line txnabort"
30 30 > txnclose.checklock = sh -c "hg debuglock > /dev/null"
31 31 > EOF
32 32 $ echo a > a
33 33 $ hg add a
34 34 $ hg commit -m a
35 35 precommit hook: HG_HOOKNAME=precommit
36 36 HG_HOOKTYPE=precommit
37 37 HG_PARENT1=0000000000000000000000000000000000000000
38 38
39 39 pretxnopen hook: HG_HOOKNAME=pretxnopen
40 40 HG_HOOKTYPE=pretxnopen
41 41 HG_TXNID=TXN:$ID$
42 42 HG_TXNNAME=commit
43 43
44 44 pretxncommit hook: HG_HOOKNAME=pretxncommit
45 45 HG_HOOKTYPE=pretxncommit
46 46 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
47 47 HG_PARENT1=0000000000000000000000000000000000000000
48 48 HG_PENDING=$TESTTMP/a
49 49
50 50 0:cb9a9f314b8b
51 51 pretxnclose hook: HG_HOOKNAME=pretxnclose
52 52 HG_HOOKTYPE=pretxnclose
53 53 HG_PENDING=$TESTTMP/a
54 54 HG_PHASES_MOVED=1
55 55 HG_TXNID=TXN:$ID$
56 56 HG_TXNNAME=commit
57 57
58 58 txnclose hook: HG_HOOKNAME=txnclose
59 59 HG_HOOKTYPE=txnclose
60 60 HG_PHASES_MOVED=1
61 61 HG_TXNID=TXN:$ID$
62 62 HG_TXNNAME=commit
63 63
64 64 commit hook: HG_HOOKNAME=commit
65 65 HG_HOOKTYPE=commit
66 66 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
67 67 HG_PARENT1=0000000000000000000000000000000000000000
68 68
69 69 commit.b hook: HG_HOOKNAME=commit.b
70 70 HG_HOOKTYPE=commit
71 71 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
72 72 HG_PARENT1=0000000000000000000000000000000000000000
73 73
74 74
75 75 $ hg clone . ../b
76 76 updating to branch default
77 77 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
78 78 $ cd ../b
79 79
80 80 changegroup hooks can see env vars
81 81
82 82 $ cat > .hg/hgrc <<EOF
83 83 > [hooks]
84 84 > prechangegroup = sh -c "printenv.py --line prechangegroup"
85 85 > changegroup = sh -c "printenv.py --line changegroup"
86 86 > incoming = sh -c "printenv.py --line incoming"
87 87 > EOF
88 88
89 89 pretxncommit and commit hooks can see both parents of merge
90 90
91 91 $ cd ../a
92 92 $ echo b >> a
93 93 $ hg commit -m a1 -d "1 0"
94 94 precommit hook: HG_HOOKNAME=precommit
95 95 HG_HOOKTYPE=precommit
96 96 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
97 97
98 98 pretxnopen hook: HG_HOOKNAME=pretxnopen
99 99 HG_HOOKTYPE=pretxnopen
100 100 HG_TXNID=TXN:$ID$
101 101 HG_TXNNAME=commit
102 102
103 103 pretxncommit hook: HG_HOOKNAME=pretxncommit
104 104 HG_HOOKTYPE=pretxncommit
105 105 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
106 106 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
107 107 HG_PENDING=$TESTTMP/a
108 108
109 109 1:ab228980c14d
110 110 pretxnclose hook: HG_HOOKNAME=pretxnclose
111 111 HG_HOOKTYPE=pretxnclose
112 112 HG_PENDING=$TESTTMP/a
113 113 HG_TXNID=TXN:$ID$
114 114 HG_TXNNAME=commit
115 115
116 116 txnclose hook: HG_HOOKNAME=txnclose
117 117 HG_HOOKTYPE=txnclose
118 118 HG_TXNID=TXN:$ID$
119 119 HG_TXNNAME=commit
120 120
121 121 commit hook: HG_HOOKNAME=commit
122 122 HG_HOOKTYPE=commit
123 123 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
124 124 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
125 125
126 126 commit.b hook: HG_HOOKNAME=commit.b
127 127 HG_HOOKTYPE=commit
128 128 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
129 129 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
130 130
131 131 $ hg update -C 0
132 132 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
133 133 $ echo b > b
134 134 $ hg add b
135 135 $ hg commit -m b -d '1 0'
136 136 precommit hook: HG_HOOKNAME=precommit
137 137 HG_HOOKTYPE=precommit
138 138 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
139 139
140 140 pretxnopen hook: HG_HOOKNAME=pretxnopen
141 141 HG_HOOKTYPE=pretxnopen
142 142 HG_TXNID=TXN:$ID$
143 143 HG_TXNNAME=commit
144 144
145 145 pretxncommit hook: HG_HOOKNAME=pretxncommit
146 146 HG_HOOKTYPE=pretxncommit
147 147 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
148 148 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
149 149 HG_PENDING=$TESTTMP/a
150 150
151 151 2:ee9deb46ab31
152 152 pretxnclose hook: HG_HOOKNAME=pretxnclose
153 153 HG_HOOKTYPE=pretxnclose
154 154 HG_PENDING=$TESTTMP/a
155 155 HG_TXNID=TXN:$ID$
156 156 HG_TXNNAME=commit
157 157
158 158 created new head
159 159 txnclose hook: HG_HOOKNAME=txnclose
160 160 HG_HOOKTYPE=txnclose
161 161 HG_TXNID=TXN:$ID$
162 162 HG_TXNNAME=commit
163 163
164 164 commit hook: HG_HOOKNAME=commit
165 165 HG_HOOKTYPE=commit
166 166 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
167 167 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
168 168
169 169 commit.b hook: HG_HOOKNAME=commit.b
170 170 HG_HOOKTYPE=commit
171 171 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
172 172 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
173 173
174 174 $ hg merge 1
175 175 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
176 176 (branch merge, don't forget to commit)
177 177 $ hg commit -m merge -d '2 0'
178 178 precommit hook: HG_HOOKNAME=precommit
179 179 HG_HOOKTYPE=precommit
180 180 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
181 181 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
182 182
183 183 pretxnopen hook: HG_HOOKNAME=pretxnopen
184 184 HG_HOOKTYPE=pretxnopen
185 185 HG_TXNID=TXN:$ID$
186 186 HG_TXNNAME=commit
187 187
188 188 pretxncommit hook: HG_HOOKNAME=pretxncommit
189 189 HG_HOOKTYPE=pretxncommit
190 190 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
191 191 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
192 192 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
193 193 HG_PENDING=$TESTTMP/a
194 194
195 195 3:07f3376c1e65
196 196 pretxnclose hook: HG_HOOKNAME=pretxnclose
197 197 HG_HOOKTYPE=pretxnclose
198 198 HG_PENDING=$TESTTMP/a
199 199 HG_TXNID=TXN:$ID$
200 200 HG_TXNNAME=commit
201 201
202 202 txnclose hook: HG_HOOKNAME=txnclose
203 203 HG_HOOKTYPE=txnclose
204 204 HG_TXNID=TXN:$ID$
205 205 HG_TXNNAME=commit
206 206
207 207 commit hook: HG_HOOKNAME=commit
208 208 HG_HOOKTYPE=commit
209 209 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
210 210 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
211 211 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
212 212
213 213 commit.b hook: HG_HOOKNAME=commit.b
214 214 HG_HOOKTYPE=commit
215 215 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
216 216 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
217 217 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
218 218
219 219
220 220 test generic hooks
221 221
222 222 $ hg id
223 223 pre-identify hook: HG_ARGS=id
224 224 HG_HOOKNAME=pre-identify
225 225 HG_HOOKTYPE=pre-identify
226 226 HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None, 'template': ''}
227 227 HG_PATS=[]
228 228
229 229 abort: pre-identify hook exited with status 1
230 230 [255]
231 231 $ hg cat b
232 232 pre-cat hook: HG_ARGS=cat b
233 233 HG_HOOKNAME=pre-cat
234 234 HG_HOOKTYPE=pre-cat
235 235 HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': '', 'template': ''}
236 236 HG_PATS=['b']
237 237
238 238 b
239 239 post-cat hook: HG_ARGS=cat b
240 240 HG_HOOKNAME=post-cat
241 241 HG_HOOKTYPE=post-cat
242 242 HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': '', 'template': ''}
243 243 HG_PATS=['b']
244 244 HG_RESULT=0
245 245
246 246
247 247 $ cd ../b
248 248 $ hg pull ../a
249 249 pulling from ../a
250 250 searching for changes
251 251 prechangegroup hook: HG_HOOKNAME=prechangegroup
252 252 HG_HOOKTYPE=prechangegroup
253 253 HG_SOURCE=pull
254 254 HG_TXNID=TXN:$ID$
255 255 HG_TXNNAME=pull
256 256 file:/*/$TESTTMP/a (glob)
257 257 HG_URL=file:$TESTTMP/a
258 258
259 259 adding changesets
260 260 adding manifests
261 261 adding file changes
262 262 added 3 changesets with 2 changes to 2 files
263 263 new changesets ab228980c14d:07f3376c1e65
264 264 changegroup hook: HG_HOOKNAME=changegroup
265 265 HG_HOOKTYPE=changegroup
266 266 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
267 267 HG_NODE_LAST=07f3376c1e655977439df2a814e3cc14b27abac2
268 268 HG_SOURCE=pull
269 269 HG_TXNID=TXN:$ID$
270 270 HG_TXNNAME=pull
271 271 file:/*/$TESTTMP/a (glob)
272 272 HG_URL=file:$TESTTMP/a
273 273
274 274 incoming hook: HG_HOOKNAME=incoming
275 275 HG_HOOKTYPE=incoming
276 276 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
277 277 HG_SOURCE=pull
278 278 HG_TXNID=TXN:$ID$
279 279 HG_TXNNAME=pull
280 280 file:/*/$TESTTMP/a (glob)
281 281 HG_URL=file:$TESTTMP/a
282 282
283 283 incoming hook: HG_HOOKNAME=incoming
284 284 HG_HOOKTYPE=incoming
285 285 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
286 286 HG_SOURCE=pull
287 287 HG_TXNID=TXN:$ID$
288 288 HG_TXNNAME=pull
289 289 file:/*/$TESTTMP/a (glob)
290 290 HG_URL=file:$TESTTMP/a
291 291
292 292 incoming hook: HG_HOOKNAME=incoming
293 293 HG_HOOKTYPE=incoming
294 294 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
295 295 HG_SOURCE=pull
296 296 HG_TXNID=TXN:$ID$
297 297 HG_TXNNAME=pull
298 298 file:/*/$TESTTMP/a (glob)
299 299 HG_URL=file:$TESTTMP/a
300 300
301 301 (run 'hg update' to get a working copy)
302 302
303 303 tag hooks can see env vars
304 304
305 305 $ cd ../a
306 306 $ cat >> .hg/hgrc <<EOF
307 307 > pretag = sh -c "printenv.py --line pretag"
308 308 > tag = sh -c "HG_PARENT1= HG_PARENT2= printenv.py --line tag"
309 309 > EOF
310 310 $ hg tag -d '3 0' a
311 311 pretag hook: HG_HOOKNAME=pretag
312 312 HG_HOOKTYPE=pretag
313 313 HG_LOCAL=0
314 314 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
315 315 HG_TAG=a
316 316
317 317 precommit hook: HG_HOOKNAME=precommit
318 318 HG_HOOKTYPE=precommit
319 319 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
320 320
321 321 pretxnopen hook: HG_HOOKNAME=pretxnopen
322 322 HG_HOOKTYPE=pretxnopen
323 323 HG_TXNID=TXN:$ID$
324 324 HG_TXNNAME=commit
325 325
326 326 pretxncommit hook: HG_HOOKNAME=pretxncommit
327 327 HG_HOOKTYPE=pretxncommit
328 328 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
329 329 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
330 330 HG_PENDING=$TESTTMP/a
331 331
332 332 4:539e4b31b6dc
333 333 pretxnclose hook: HG_HOOKNAME=pretxnclose
334 334 HG_HOOKTYPE=pretxnclose
335 335 HG_PENDING=$TESTTMP/a
336 336 HG_TXNID=TXN:$ID$
337 337 HG_TXNNAME=commit
338 338
339 339 tag hook: HG_HOOKNAME=tag
340 340 HG_HOOKTYPE=tag
341 341 HG_LOCAL=0
342 342 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
343 343 HG_TAG=a
344 344
345 345 txnclose hook: HG_HOOKNAME=txnclose
346 346 HG_HOOKTYPE=txnclose
347 347 HG_TXNID=TXN:$ID$
348 348 HG_TXNNAME=commit
349 349
350 350 commit hook: HG_HOOKNAME=commit
351 351 HG_HOOKTYPE=commit
352 352 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
353 353 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
354 354
355 355 commit.b hook: HG_HOOKNAME=commit.b
356 356 HG_HOOKTYPE=commit
357 357 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
358 358 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
359 359
360 360 $ hg tag -l la
361 361 pretag hook: HG_HOOKNAME=pretag
362 362 HG_HOOKTYPE=pretag
363 363 HG_LOCAL=1
364 364 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
365 365 HG_TAG=la
366 366
367 367 tag hook: HG_HOOKNAME=tag
368 368 HG_HOOKTYPE=tag
369 369 HG_LOCAL=1
370 370 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
371 371 HG_TAG=la
372 372
373 373
374 374 pretag hook can forbid tagging
375 375
376 376 $ cat >> .hg/hgrc <<EOF
377 377 > pretag.forbid = sh -c "printenv.py --line pretag.forbid 1"
378 378 > EOF
379 379 $ hg tag -d '4 0' fa
380 380 pretag hook: HG_HOOKNAME=pretag
381 381 HG_HOOKTYPE=pretag
382 382 HG_LOCAL=0
383 383 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
384 384 HG_TAG=fa
385 385
386 386 pretag.forbid hook: HG_HOOKNAME=pretag.forbid
387 387 HG_HOOKTYPE=pretag
388 388 HG_LOCAL=0
389 389 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
390 390 HG_TAG=fa
391 391
392 392 abort: pretag.forbid hook exited with status 1
393 393 [255]
394 394 $ hg tag -l fla
395 395 pretag hook: HG_HOOKNAME=pretag
396 396 HG_HOOKTYPE=pretag
397 397 HG_LOCAL=1
398 398 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
399 399 HG_TAG=fla
400 400
401 401 pretag.forbid hook: HG_HOOKNAME=pretag.forbid
402 402 HG_HOOKTYPE=pretag
403 403 HG_LOCAL=1
404 404 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
405 405 HG_TAG=fla
406 406
407 407 abort: pretag.forbid hook exited with status 1
408 408 [255]
409 409
410 410 pretxncommit hook can see changeset, can roll back txn, changeset no
411 411 more there after
412 412
413 413 $ cat >> .hg/hgrc <<EOF
414 414 > pretxncommit.forbid0 = sh -c "hg tip -q"
415 415 > pretxncommit.forbid1 = sh -c "printenv.py --line pretxncommit.forbid 1"
416 416 > EOF
417 417 $ echo z > z
418 418 $ hg add z
419 419 $ hg -q tip
420 420 4:539e4b31b6dc
421 421 $ hg commit -m 'fail' -d '4 0'
422 422 precommit hook: HG_HOOKNAME=precommit
423 423 HG_HOOKTYPE=precommit
424 424 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
425 425
426 426 pretxnopen hook: HG_HOOKNAME=pretxnopen
427 427 HG_HOOKTYPE=pretxnopen
428 428 HG_TXNID=TXN:$ID$
429 429 HG_TXNNAME=commit
430 430
431 431 pretxncommit hook: HG_HOOKNAME=pretxncommit
432 432 HG_HOOKTYPE=pretxncommit
433 433 HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567
434 434 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
435 435 HG_PENDING=$TESTTMP/a
436 436
437 437 5:6f611f8018c1
438 438 5:6f611f8018c1
439 439 pretxncommit.forbid hook: HG_HOOKNAME=pretxncommit.forbid1
440 440 HG_HOOKTYPE=pretxncommit
441 441 HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567
442 442 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
443 443 HG_PENDING=$TESTTMP/a
444 444
445 445 transaction abort!
446 txnabort Python hook: txnid,txnname
446 txnabort Python hook: changes,txnid,txnname
447 447 txnabort hook: HG_HOOKNAME=txnabort.1
448 448 HG_HOOKTYPE=txnabort
449 449 HG_TXNID=TXN:$ID$
450 450 HG_TXNNAME=commit
451 451
452 452 rollback completed
453 453 abort: pretxncommit.forbid1 hook exited with status 1
454 454 [255]
455 455 $ hg -q tip
456 456 4:539e4b31b6dc
457 457
458 458 (Check that no 'changelog.i.a' file were left behind)
459 459
460 460 $ ls -1 .hg/store/
461 461 00changelog.i
462 462 00manifest.i
463 463 data
464 464 fncache (repofncache !)
465 465 journal.phaseroots
466 466 phaseroots
467 467 undo
468 468 undo.backup.fncache (repofncache !)
469 469 undo.backupfiles
470 470 undo.phaseroots
471 471
472 472
473 473 precommit hook can prevent commit
474 474
475 475 $ cat >> .hg/hgrc <<EOF
476 476 > precommit.forbid = sh -c "printenv.py --line precommit.forbid 1"
477 477 > EOF
478 478 $ hg commit -m 'fail' -d '4 0'
479 479 precommit hook: HG_HOOKNAME=precommit
480 480 HG_HOOKTYPE=precommit
481 481 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
482 482
483 483 precommit.forbid hook: HG_HOOKNAME=precommit.forbid
484 484 HG_HOOKTYPE=precommit
485 485 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
486 486
487 487 abort: precommit.forbid hook exited with status 1
488 488 [255]
489 489 $ hg -q tip
490 490 4:539e4b31b6dc
491 491
492 492 preupdate hook can prevent update
493 493
494 494 $ cat >> .hg/hgrc <<EOF
495 495 > preupdate = sh -c "printenv.py --line preupdate"
496 496 > EOF
497 497 $ hg update 1
498 498 preupdate hook: HG_HOOKNAME=preupdate
499 499 HG_HOOKTYPE=preupdate
500 500 HG_PARENT1=ab228980c14d
501 501
502 502 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
503 503
504 504 update hook
505 505
506 506 $ cat >> .hg/hgrc <<EOF
507 507 > update = sh -c "printenv.py --line update"
508 508 > EOF
509 509 $ hg update
510 510 preupdate hook: HG_HOOKNAME=preupdate
511 511 HG_HOOKTYPE=preupdate
512 512 HG_PARENT1=539e4b31b6dc
513 513
514 514 update hook: HG_ERROR=0
515 515 HG_HOOKNAME=update
516 516 HG_HOOKTYPE=update
517 517 HG_PARENT1=539e4b31b6dc
518 518
519 519 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
520 520
521 521 pushkey hook
522 522
523 523 $ cat >> .hg/hgrc <<EOF
524 524 > pushkey = sh -c "printenv.py --line pushkey"
525 525 > EOF
526 526 $ cd ../b
527 527 $ hg bookmark -r null foo
528 528 $ hg push -B foo ../a
529 529 pushing to ../a
530 530 searching for changes
531 531 no changes found
532 532 pretxnopen hook: HG_HOOKNAME=pretxnopen
533 533 HG_HOOKTYPE=pretxnopen
534 534 HG_TXNID=TXN:$ID$
535 535 HG_TXNNAME=push
536 536
537 537 pretxnclose hook: HG_BOOKMARK_MOVED=1
538 538 HG_BUNDLE2=1
539 539 HG_HOOKNAME=pretxnclose
540 540 HG_HOOKTYPE=pretxnclose
541 541 HG_PENDING=$TESTTMP/a
542 542 HG_SOURCE=push
543 543 HG_TXNID=TXN:$ID$
544 544 HG_TXNNAME=push
545 545 HG_URL=file:$TESTTMP/a
546 546
547 547 pushkey hook: HG_BUNDLE2=1
548 548 HG_HOOKNAME=pushkey
549 549 HG_HOOKTYPE=pushkey
550 550 HG_KEY=foo
551 551 HG_NAMESPACE=bookmarks
552 552 HG_NEW=0000000000000000000000000000000000000000
553 553 HG_PUSHKEYCOMPAT=1
554 554 HG_SOURCE=push
555 555 HG_TXNID=TXN:$ID$
556 556 HG_TXNNAME=push
557 557 HG_URL=file:$TESTTMP/a
558 558
559 559 txnclose hook: HG_BOOKMARK_MOVED=1
560 560 HG_BUNDLE2=1
561 561 HG_HOOKNAME=txnclose
562 562 HG_HOOKTYPE=txnclose
563 563 HG_SOURCE=push
564 564 HG_TXNID=TXN:$ID$
565 565 HG_TXNNAME=push
566 566 HG_URL=file:$TESTTMP/a
567 567
568 568 exporting bookmark foo
569 569 [1]
570 570 $ cd ../a
571 571
572 572 listkeys hook
573 573
574 574 $ cat >> .hg/hgrc <<EOF
575 575 > listkeys = sh -c "printenv.py --line listkeys"
576 576 > EOF
577 577 $ hg bookmark -r null bar
578 578 pretxnopen hook: HG_HOOKNAME=pretxnopen
579 579 HG_HOOKTYPE=pretxnopen
580 580 HG_TXNID=TXN:$ID$
581 581 HG_TXNNAME=bookmark
582 582
583 583 pretxnclose hook: HG_BOOKMARK_MOVED=1
584 584 HG_HOOKNAME=pretxnclose
585 585 HG_HOOKTYPE=pretxnclose
586 586 HG_PENDING=$TESTTMP/a
587 587 HG_TXNID=TXN:$ID$
588 588 HG_TXNNAME=bookmark
589 589
590 590 txnclose hook: HG_BOOKMARK_MOVED=1
591 591 HG_HOOKNAME=txnclose
592 592 HG_HOOKTYPE=txnclose
593 593 HG_TXNID=TXN:$ID$
594 594 HG_TXNNAME=bookmark
595 595
596 596 $ cd ../b
597 597 $ hg pull -B bar ../a
598 598 pulling from ../a
599 599 listkeys hook: HG_HOOKNAME=listkeys
600 600 HG_HOOKTYPE=listkeys
601 601 HG_NAMESPACE=bookmarks
602 602 HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
603 603
604 604 no changes found
605 605 adding remote bookmark bar
606 606 $ cd ../a
607 607
608 608 test that prepushkey can prevent incoming keys
609 609
610 610 $ cat >> .hg/hgrc <<EOF
611 611 > prepushkey = sh -c "printenv.py --line prepushkey.forbid 1"
612 612 > EOF
613 613 $ cd ../b
614 614 $ hg bookmark -r null baz
615 615 $ hg push -B baz ../a
616 616 pushing to ../a
617 617 searching for changes
618 618 listkeys hook: HG_HOOKNAME=listkeys
619 619 HG_HOOKTYPE=listkeys
620 620 HG_NAMESPACE=phases
621 621 HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
622 622
623 623 listkeys hook: HG_HOOKNAME=listkeys
624 624 HG_HOOKTYPE=listkeys
625 625 HG_NAMESPACE=bookmarks
626 626 HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
627 627
628 628 no changes found
629 629 pretxnopen hook: HG_HOOKNAME=pretxnopen
630 630 HG_HOOKTYPE=pretxnopen
631 631 HG_TXNID=TXN:$ID$
632 632 HG_TXNNAME=push
633 633
634 634 prepushkey.forbid hook: HG_BUNDLE2=1
635 635 HG_HOOKNAME=prepushkey
636 636 HG_HOOKTYPE=prepushkey
637 637 HG_KEY=baz
638 638 HG_NAMESPACE=bookmarks
639 639 HG_NEW=0000000000000000000000000000000000000000
640 640 HG_PUSHKEYCOMPAT=1
641 641 HG_SOURCE=push
642 642 HG_TXNID=TXN:$ID$
643 643 HG_TXNNAME=push
644 644 HG_URL=file:$TESTTMP/a
645 645
646 646 abort: prepushkey hook exited with status 1
647 647 [255]
648 648 $ cd ../a
649 649
650 650 test that prelistkeys can prevent listing keys
651 651
652 652 $ cat >> .hg/hgrc <<EOF
653 653 > prelistkeys = sh -c "printenv.py --line prelistkeys.forbid 1"
654 654 > EOF
655 655 $ hg bookmark -r null quux
656 656 pretxnopen hook: HG_HOOKNAME=pretxnopen
657 657 HG_HOOKTYPE=pretxnopen
658 658 HG_TXNID=TXN:$ID$
659 659 HG_TXNNAME=bookmark
660 660
661 661 pretxnclose hook: HG_BOOKMARK_MOVED=1
662 662 HG_HOOKNAME=pretxnclose
663 663 HG_HOOKTYPE=pretxnclose
664 664 HG_PENDING=$TESTTMP/a
665 665 HG_TXNID=TXN:$ID$
666 666 HG_TXNNAME=bookmark
667 667
668 668 txnclose hook: HG_BOOKMARK_MOVED=1
669 669 HG_HOOKNAME=txnclose
670 670 HG_HOOKTYPE=txnclose
671 671 HG_TXNID=TXN:$ID$
672 672 HG_TXNNAME=bookmark
673 673
674 674 $ cd ../b
675 675 $ hg pull -B quux ../a
676 676 pulling from ../a
677 677 prelistkeys.forbid hook: HG_HOOKNAME=prelistkeys
678 678 HG_HOOKTYPE=prelistkeys
679 679 HG_NAMESPACE=bookmarks
680 680
681 681 abort: prelistkeys hook exited with status 1
682 682 [255]
683 683 $ cd ../a
684 684 $ rm .hg/hgrc
685 685
686 686 prechangegroup hook can prevent incoming changes
687 687
688 688 $ cd ../b
689 689 $ hg -q tip
690 690 3:07f3376c1e65
691 691 $ cat > .hg/hgrc <<EOF
692 692 > [hooks]
693 693 > prechangegroup.forbid = sh -c "printenv.py --line prechangegroup.forbid 1"
694 694 > EOF
695 695 $ hg pull ../a
696 696 pulling from ../a
697 697 searching for changes
698 698 prechangegroup.forbid hook: HG_HOOKNAME=prechangegroup.forbid
699 699 HG_HOOKTYPE=prechangegroup
700 700 HG_SOURCE=pull
701 701 HG_TXNID=TXN:$ID$
702 702 HG_TXNNAME=pull
703 703 file:/*/$TESTTMP/a (glob)
704 704 HG_URL=file:$TESTTMP/a
705 705
706 706 abort: prechangegroup.forbid hook exited with status 1
707 707 [255]
708 708
709 709 pretxnchangegroup hook can see incoming changes, can roll back txn,
710 710 incoming changes no longer there after
711 711
712 712 $ cat > .hg/hgrc <<EOF
713 713 > [hooks]
714 714 > pretxnchangegroup.forbid0 = hg tip -q
715 715 > pretxnchangegroup.forbid1 = sh -c "printenv.py --line pretxnchangegroup.forbid 1"
716 716 > EOF
717 717 $ hg pull ../a
718 718 pulling from ../a
719 719 searching for changes
720 720 adding changesets
721 721 adding manifests
722 722 adding file changes
723 723 4:539e4b31b6dc
724 724 pretxnchangegroup.forbid hook: HG_HOOKNAME=pretxnchangegroup.forbid1
725 725 HG_HOOKTYPE=pretxnchangegroup
726 726 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
727 727 HG_NODE_LAST=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
728 728 HG_PENDING=$TESTTMP/b
729 729 HG_SOURCE=pull
730 730 HG_TXNID=TXN:$ID$
731 731 HG_TXNNAME=pull
732 732 file:/*/$TESTTMP/a (glob)
733 733 HG_URL=file:$TESTTMP/a
734 734
735 735 transaction abort!
736 736 rollback completed
737 737 abort: pretxnchangegroup.forbid1 hook exited with status 1
738 738 [255]
739 739 $ hg -q tip
740 740 3:07f3376c1e65
741 741
742 742 outgoing hooks can see env vars
743 743
744 744 $ rm .hg/hgrc
745 745 $ cat > ../a/.hg/hgrc <<EOF
746 746 > [hooks]
747 747 > preoutgoing = sh -c "printenv.py --line preoutgoing"
748 748 > outgoing = sh -c "printenv.py --line outgoing"
749 749 > EOF
750 750 $ hg pull ../a
751 751 pulling from ../a
752 752 searching for changes
753 753 preoutgoing hook: HG_HOOKNAME=preoutgoing
754 754 HG_HOOKTYPE=preoutgoing
755 755 HG_SOURCE=pull
756 756
757 757 outgoing hook: HG_HOOKNAME=outgoing
758 758 HG_HOOKTYPE=outgoing
759 759 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
760 760 HG_SOURCE=pull
761 761
762 762 adding changesets
763 763 adding manifests
764 764 adding file changes
765 765 adding remote bookmark quux
766 766 added 1 changesets with 1 changes to 1 files
767 767 new changesets 539e4b31b6dc
768 768 (run 'hg update' to get a working copy)
769 769 $ hg rollback
770 770 repository tip rolled back to revision 3 (undo pull)
771 771
772 772 preoutgoing hook can prevent outgoing changes
773 773
774 774 $ cat >> ../a/.hg/hgrc <<EOF
775 775 > preoutgoing.forbid = sh -c "printenv.py --line preoutgoing.forbid 1"
776 776 > EOF
777 777 $ hg pull ../a
778 778 pulling from ../a
779 779 searching for changes
780 780 preoutgoing hook: HG_HOOKNAME=preoutgoing
781 781 HG_HOOKTYPE=preoutgoing
782 782 HG_SOURCE=pull
783 783
784 784 preoutgoing.forbid hook: HG_HOOKNAME=preoutgoing.forbid
785 785 HG_HOOKTYPE=preoutgoing
786 786 HG_SOURCE=pull
787 787
788 788 abort: preoutgoing.forbid hook exited with status 1
789 789 [255]
790 790
791 791 outgoing hooks work for local clones
792 792
793 793 $ cd ..
794 794 $ cat > a/.hg/hgrc <<EOF
795 795 > [hooks]
796 796 > preoutgoing = sh -c "printenv.py --line preoutgoing"
797 797 > outgoing = sh -c "printenv.py --line outgoing"
798 798 > EOF
799 799 $ hg clone a c
800 800 preoutgoing hook: HG_HOOKNAME=preoutgoing
801 801 HG_HOOKTYPE=preoutgoing
802 802 HG_SOURCE=clone
803 803
804 804 outgoing hook: HG_HOOKNAME=outgoing
805 805 HG_HOOKTYPE=outgoing
806 806 HG_NODE=0000000000000000000000000000000000000000
807 807 HG_SOURCE=clone
808 808
809 809 updating to branch default
810 810 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
811 811 $ rm -rf c
812 812
813 813 preoutgoing hook can prevent outgoing changes for local clones
814 814
815 815 $ cat >> a/.hg/hgrc <<EOF
816 816 > preoutgoing.forbid = sh -c "printenv.py --line preoutgoing.forbid 1"
817 817 > EOF
818 818 $ hg clone a zzz
819 819 preoutgoing hook: HG_HOOKNAME=preoutgoing
820 820 HG_HOOKTYPE=preoutgoing
821 821 HG_SOURCE=clone
822 822
823 823 preoutgoing.forbid hook: HG_HOOKNAME=preoutgoing.forbid
824 824 HG_HOOKTYPE=preoutgoing
825 825 HG_SOURCE=clone
826 826
827 827 abort: preoutgoing.forbid hook exited with status 1
828 828 [255]
829 829
830 830 $ cd "$TESTTMP/b"
831 831
832 832 $ cat > hooktests.py <<EOF
833 833 > from __future__ import print_function
834 834 > from mercurial import (
835 835 > error,
836 836 > pycompat,
837 837 > )
838 838 >
839 839 > uncallable = 0
840 840 >
841 841 > def printargs(ui, args):
842 842 > a = list(pycompat.byteskwargs(args).items())
843 843 > a.sort()
844 844 > ui.write(b'hook args:\n')
845 845 > for k, v in a:
846 846 > ui.write(b' %s %s\n' % (k, v))
847 847 >
848 848 > def passhook(ui, repo, **args):
849 849 > printargs(ui, args)
850 850 >
851 851 > def failhook(ui, repo, **args):
852 852 > printargs(ui, args)
853 853 > return True
854 854 >
855 855 > class LocalException(Exception):
856 856 > pass
857 857 >
858 858 > def raisehook(**args):
859 859 > raise LocalException('exception from hook')
860 860 >
861 861 > def aborthook(**args):
862 862 > raise error.Abort(b'raise abort from hook')
863 863 >
864 864 > def brokenhook(**args):
865 865 > return 1 + {}
866 866 >
867 867 > def verbosehook(ui, **args):
868 868 > ui.note(b'verbose output from hook\n')
869 869 >
870 870 > def printtags(ui, repo, **args):
871 871 > ui.write(b'[%s]\n' % b', '.join(sorted(repo.tags())))
872 872 >
873 873 > class container(object):
874 874 > unreachable = 1
875 875 > EOF
876 876
877 877 $ cat > syntaxerror.py << NO_CHECK_EOF
878 878 > (foo
879 879 > NO_CHECK_EOF
880 880
881 881 test python hooks
882 882
883 883 #if windows
884 884 $ PYTHONPATH="$TESTTMP/b;$PYTHONPATH"
885 885 #else
886 886 $ PYTHONPATH="$TESTTMP/b:$PYTHONPATH"
887 887 #endif
888 888 $ export PYTHONPATH
889 889
890 890 $ echo '[hooks]' > ../a/.hg/hgrc
891 891 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
892 892 $ hg pull ../a 2>&1 | grep 'raised an exception'
893 893 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
894 894
895 895 $ echo '[hooks]' > ../a/.hg/hgrc
896 896 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
897 897 $ hg pull ../a 2>&1 | grep 'raised an exception'
898 898 error: preoutgoing.raise hook raised an exception: exception from hook
899 899
900 900 $ echo '[hooks]' > ../a/.hg/hgrc
901 901 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
902 902 $ hg pull ../a
903 903 pulling from ../a
904 904 searching for changes
905 905 error: preoutgoing.abort hook failed: raise abort from hook
906 906 abort: raise abort from hook
907 907 [255]
908 908
909 909 $ echo '[hooks]' > ../a/.hg/hgrc
910 910 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
911 911 $ hg pull ../a
912 912 pulling from ../a
913 913 searching for changes
914 914 hook args:
915 915 hooktype preoutgoing
916 916 source pull
917 917 abort: preoutgoing.fail hook failed
918 918 [255]
919 919
920 920 $ echo '[hooks]' > ../a/.hg/hgrc
921 921 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
922 922 $ hg pull ../a
923 923 pulling from ../a
924 924 searching for changes
925 925 abort: preoutgoing.uncallable hook is invalid: "hooktests.uncallable" is not callable
926 926 [255]
927 927
928 928 $ echo '[hooks]' > ../a/.hg/hgrc
929 929 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
930 930 $ hg pull ../a
931 931 pulling from ../a
932 932 searching for changes
933 933 abort: preoutgoing.nohook hook is invalid: "hooktests.nohook" is not defined
934 934 [255]
935 935
936 936 $ echo '[hooks]' > ../a/.hg/hgrc
937 937 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
938 938 $ hg pull ../a
939 939 pulling from ../a
940 940 searching for changes
941 941 abort: preoutgoing.nomodule hook is invalid: "nomodule" not in a module
942 942 [255]
943 943
944 944 $ echo '[hooks]' > ../a/.hg/hgrc
945 945 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
946 946 $ hg pull ../a
947 947 pulling from ../a
948 948 searching for changes
949 949 abort: preoutgoing.badmodule hook is invalid: import of "nomodule" failed
950 950 (run with --traceback for stack trace)
951 951 [255]
952 952
953 953 $ echo '[hooks]' > ../a/.hg/hgrc
954 954 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
955 955 $ hg pull ../a
956 956 pulling from ../a
957 957 searching for changes
958 958 abort: preoutgoing.unreachable hook is invalid: import of "hooktests.container" failed
959 959 (run with --traceback for stack trace)
960 960 [255]
961 961
962 962 $ echo '[hooks]' > ../a/.hg/hgrc
963 963 $ echo 'preoutgoing.syntaxerror = python:syntaxerror.syntaxerror' >> ../a/.hg/hgrc
964 964 $ hg pull ../a
965 965 pulling from ../a
966 966 searching for changes
967 967 abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
968 968 (run with --traceback for stack trace)
969 969 [255]
970 970
971 971 $ hg pull ../a --traceback 2>&1 | egrep 'pulling|searching|^exception|Traceback|SyntaxError|ImportError|ModuleNotFoundError|HookLoadError|abort'
972 972 pulling from ../a
973 973 searching for changes
974 974 exception from first failed import attempt:
975 975 Traceback (most recent call last):
976 976 SyntaxError: * (glob)
977 977 exception from second failed import attempt:
978 978 Traceback (most recent call last): (py3 !)
979 979 SyntaxError: * (glob) (py3 !)
980 980 Traceback (most recent call last):
981 981 ImportError: No module named hgext_syntaxerror (no-py3 !)
982 982 ImportError: No module named 'hgext_syntaxerror' (py3 no-py36 !)
983 983 ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
984 984 Traceback (most recent call last):
985 985 SyntaxError: * (glob) (py3 !)
986 986 Traceback (most recent call last): (py3 !)
987 987 ImportError: No module named 'hgext_syntaxerror' (py3 no-py36 !)
988 988 ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
989 989 Traceback (most recent call last): (py3 !)
990 990 HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed (no-py3 !)
991 991 raise error.HookLoadError( (py38 !)
992 992 mercurial.error.HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed (py3 !)
993 993 abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
994 994
995 995 $ echo '[hooks]' > ../a/.hg/hgrc
996 996 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
997 997 $ hg pull ../a
998 998 pulling from ../a
999 999 searching for changes
1000 1000 hook args:
1001 1001 hooktype preoutgoing
1002 1002 source pull
1003 1003 adding changesets
1004 1004 adding manifests
1005 1005 adding file changes
1006 1006 adding remote bookmark quux
1007 1007 added 1 changesets with 1 changes to 1 files
1008 1008 new changesets 539e4b31b6dc
1009 1009 (run 'hg update' to get a working copy)
1010 1010
1011 1011 post- python hooks that fail to *run* don't cause an abort
1012 1012 $ rm ../a/.hg/hgrc
1013 1013 $ echo '[hooks]' > .hg/hgrc
1014 1014 $ echo 'post-pull.broken = python:hooktests.brokenhook' >> .hg/hgrc
1015 1015 $ hg pull ../a
1016 1016 pulling from ../a
1017 1017 searching for changes
1018 1018 no changes found
1019 1019 error: post-pull.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
1020 1020 (run with --traceback for stack trace)
1021 1021
1022 1022 but post- python hooks that fail to *load* do
1023 1023 $ echo '[hooks]' > .hg/hgrc
1024 1024 $ echo 'post-pull.nomodule = python:nomodule' >> .hg/hgrc
1025 1025 $ hg pull ../a
1026 1026 pulling from ../a
1027 1027 searching for changes
1028 1028 no changes found
1029 1029 abort: post-pull.nomodule hook is invalid: "nomodule" not in a module
1030 1030 [255]
1031 1031
1032 1032 $ echo '[hooks]' > .hg/hgrc
1033 1033 $ echo 'post-pull.badmodule = python:nomodule.nowhere' >> .hg/hgrc
1034 1034 $ hg pull ../a
1035 1035 pulling from ../a
1036 1036 searching for changes
1037 1037 no changes found
1038 1038 abort: post-pull.badmodule hook is invalid: import of "nomodule" failed
1039 1039 (run with --traceback for stack trace)
1040 1040 [255]
1041 1041
1042 1042 $ echo '[hooks]' > .hg/hgrc
1043 1043 $ echo 'post-pull.nohook = python:hooktests.nohook' >> .hg/hgrc
1044 1044 $ hg pull ../a
1045 1045 pulling from ../a
1046 1046 searching for changes
1047 1047 no changes found
1048 1048 abort: post-pull.nohook hook is invalid: "hooktests.nohook" is not defined
1049 1049 [255]
1050 1050
1051 1051 make sure --traceback works
1052 1052
1053 1053 $ echo '[hooks]' > .hg/hgrc
1054 1054 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
1055 1055
1056 1056 $ echo aa > a
1057 1057 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
1058 1058 Traceback (most recent call last):
1059 1059
1060 1060 $ cd ..
1061 1061 $ hg init c
1062 1062 $ cd c
1063 1063
1064 1064 $ cat > hookext.py <<EOF
1065 1065 > def autohook(ui, **args):
1066 1066 > ui.write(b'Automatically installed hook\n')
1067 1067 >
1068 1068 > def reposetup(ui, repo):
1069 1069 > repo.ui.setconfig(b"hooks", b"commit.auto", autohook)
1070 1070 > EOF
1071 1071 $ echo '[extensions]' >> .hg/hgrc
1072 1072 $ echo 'hookext = hookext.py' >> .hg/hgrc
1073 1073
1074 1074 $ touch foo
1075 1075 $ hg add foo
1076 1076 $ hg ci -d '0 0' -m 'add foo'
1077 1077 Automatically installed hook
1078 1078 $ echo >> foo
1079 1079 $ hg ci --debug -d '0 0' -m 'change foo'
1080 1080 committing files:
1081 1081 foo
1082 1082 committing manifest
1083 1083 committing changelog
1084 1084 updating the branch cache
1085 1085 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
1086 1086 calling hook commit.auto: hgext_hookext.autohook
1087 1087 Automatically installed hook
1088 1088
1089 1089 $ hg showconfig hooks
1090 1090 hooks.commit.auto=<function autohook at *> (glob)
1091 1091
1092 1092 test python hook configured with python:[file]:[hook] syntax
1093 1093
1094 1094 $ cd ..
1095 1095 $ mkdir d
1096 1096 $ cd d
1097 1097 $ hg init repo
1098 1098 $ mkdir hooks
1099 1099
1100 1100 $ cd hooks
1101 1101 $ cat > testhooks.py <<EOF
1102 1102 > def testhook(ui, **args):
1103 1103 > ui.write(b'hook works\n')
1104 1104 > EOF
1105 1105 $ echo '[hooks]' > ../repo/.hg/hgrc
1106 1106 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
1107 1107
1108 1108 $ cd ../repo
1109 1109 $ hg commit -d '0 0'
1110 1110 hook works
1111 1111 nothing changed
1112 1112 [1]
1113 1113
1114 1114 $ echo '[hooks]' > .hg/hgrc
1115 1115 $ echo "update.ne = python:`pwd`/nonexistent.py:testhook" >> .hg/hgrc
1116 1116 $ echo "pre-identify.npmd = python:`pwd`/:no_python_module_dir" >> .hg/hgrc
1117 1117
1118 1118 $ hg up null
1119 1119 loading update.ne hook failed:
1120 1120 abort: $ENOENT$: '$TESTTMP/d/repo/nonexistent.py'
1121 1121 [255]
1122 1122
1123 1123 $ hg id
1124 1124 loading pre-identify.npmd hook failed:
1125 1125 abort: No module named repo! (no-py3 !)
1126 1126 abort: No module named 'repo'! (py3 !)
1127 1127 [255]
1128 1128
1129 1129 $ cd ../../b
1130 1130
1131 1131 make sure --traceback works on hook import failure
1132 1132
1133 1133 $ cat > importfail.py <<EOF
1134 1134 > import somebogusmodule
1135 1135 > # dereference something in the module to force demandimport to load it
1136 1136 > somebogusmodule.whatever
1137 1137 > EOF
1138 1138
1139 1139 $ echo '[hooks]' > .hg/hgrc
1140 1140 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
1141 1141
1142 1142 $ echo a >> a
1143 1143 $ hg --traceback commit -ma 2>&1 | egrep '^exception|ImportError|ModuleNotFoundError|Traceback|HookLoadError|abort'
1144 1144 exception from first failed import attempt:
1145 1145 Traceback (most recent call last):
1146 1146 ImportError: No module named somebogusmodule (no-py3 !)
1147 1147 ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
1148 1148 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1149 1149 exception from second failed import attempt:
1150 1150 Traceback (most recent call last): (py3 !)
1151 1151 ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
1152 1152 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1153 1153 Traceback (most recent call last): (py3 !)
1154 1154 ImportError: No module named 'hgext_importfail' (py3 no-py36 !)
1155 1155 ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
1156 1156 Traceback (most recent call last): (py3 !)
1157 1157 ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
1158 1158 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1159 1159 Traceback (most recent call last):
1160 1160 ImportError: No module named hgext_importfail (no-py3 !)
1161 1161 ImportError: No module named 'hgext_importfail' (py3 no-py36 !)
1162 1162 ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
1163 1163 Traceback (most recent call last):
1164 1164 HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed (no-py3 !)
1165 1165 raise error.HookLoadError( (py38 !)
1166 1166 mercurial.error.HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed (py3 !)
1167 1167 abort: precommit.importfail hook is invalid: import of "importfail" failed
1168 1168
1169 1169 Issue1827: Hooks Update & Commit not completely post operation
1170 1170
1171 1171 commit and update hooks should run after command completion. The largefiles
1172 1172 use demonstrates a recursive wlock, showing the hook doesn't run until the
1173 1173 final release (and dirstate flush).
1174 1174
1175 1175 $ echo '[hooks]' > .hg/hgrc
1176 1176 $ echo 'commit = hg id' >> .hg/hgrc
1177 1177 $ echo 'update = hg id' >> .hg/hgrc
1178 1178 $ echo bb > a
1179 1179 $ hg ci -ma
1180 1180 223eafe2750c tip
1181 1181 $ hg up 0 --config extensions.largefiles=
1182 1182 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
1183 1183 cb9a9f314b8b
1184 1184 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1185 1185
1186 1186 make sure --verbose (and --quiet/--debug etc.) are propagated to the local ui
1187 1187 that is passed to pre/post hooks
1188 1188
1189 1189 $ echo '[hooks]' > .hg/hgrc
1190 1190 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
1191 1191 $ hg id
1192 1192 cb9a9f314b8b
1193 1193 $ hg id --verbose
1194 1194 calling hook pre-identify: hooktests.verbosehook
1195 1195 verbose output from hook
1196 1196 cb9a9f314b8b
1197 1197
1198 1198 Ensure hooks can be prioritized
1199 1199
1200 1200 $ echo '[hooks]' > .hg/hgrc
1201 1201 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
1202 1202 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
1203 1203 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
1204 1204 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
1205 1205 $ hg id --verbose
1206 1206 calling hook pre-identify.b: hooktests.verbosehook
1207 1207 verbose output from hook
1208 1208 calling hook pre-identify.a: hooktests.verbosehook
1209 1209 verbose output from hook
1210 1210 calling hook pre-identify.c: hooktests.verbosehook
1211 1211 verbose output from hook
1212 1212 cb9a9f314b8b
1213 1213
1214 1214 new tags must be visible in pretxncommit (issue3210)
1215 1215
1216 1216 $ echo 'pretxncommit.printtags = python:hooktests.printtags' >> .hg/hgrc
1217 1217 $ hg tag -f foo
1218 1218 [a, foo, tip]
1219 1219
1220 1220 post-init hooks must not crash (issue4983)
1221 1221 This also creates the `to` repo for the next test block.
1222 1222
1223 1223 $ cd ..
1224 1224 $ cat << EOF >> hgrc-with-post-init-hook
1225 1225 > [hooks]
1226 1226 > post-init = sh -c "printenv.py --line post-init"
1227 1227 > EOF
1228 1228 $ HGRCPATH=hgrc-with-post-init-hook hg init to
1229 1229 post-init hook: HG_ARGS=init to
1230 1230 HG_HOOKNAME=post-init
1231 1231 HG_HOOKTYPE=post-init
1232 1232 HG_OPTS={'insecure': None, 'remotecmd': '', 'ssh': ''}
1233 1233 HG_PATS=['to']
1234 1234 HG_RESULT=0
1235 1235
1236 1236
1237 1237 new commits must be visible in pretxnchangegroup (issue3428)
1238 1238
1239 1239 $ echo '[hooks]' >> to/.hg/hgrc
1240 1240 $ echo 'prechangegroup = hg --traceback tip' >> to/.hg/hgrc
1241 1241 $ echo 'pretxnchangegroup = hg --traceback tip' >> to/.hg/hgrc
1242 1242 $ echo a >> to/a
1243 1243 $ hg --cwd to ci -Ama
1244 1244 adding a
1245 1245 $ hg clone to from
1246 1246 updating to branch default
1247 1247 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1248 1248 $ echo aa >> from/a
1249 1249 $ hg --cwd from ci -mb
1250 1250 $ hg --cwd from push
1251 1251 pushing to $TESTTMP/to
1252 1252 searching for changes
1253 1253 changeset: 0:cb9a9f314b8b
1254 1254 tag: tip
1255 1255 user: test
1256 1256 date: Thu Jan 01 00:00:00 1970 +0000
1257 1257 summary: a
1258 1258
1259 1259 adding changesets
1260 1260 adding manifests
1261 1261 adding file changes
1262 1262 changeset: 1:9836a07b9b9d
1263 1263 tag: tip
1264 1264 user: test
1265 1265 date: Thu Jan 01 00:00:00 1970 +0000
1266 1266 summary: b
1267 1267
1268 1268 added 1 changesets with 1 changes to 1 files
1269 1269
1270 1270 pretxnclose hook failure should abort the transaction
1271 1271
1272 1272 $ hg init txnfailure
1273 1273 $ cd txnfailure
1274 1274 $ touch a && hg commit -Aqm a
1275 1275 $ cat >> .hg/hgrc <<EOF
1276 1276 > [hooks]
1277 1277 > pretxnclose.error = exit 1
1278 1278 > EOF
1279 1279 $ hg strip -r 0 --config extensions.strip=
1280 1280 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1281 1281 saved backup bundle to * (glob)
1282 1282 transaction abort!
1283 1283 rollback completed
1284 1284 strip failed, backup bundle stored in * (glob)
1285 1285 abort: pretxnclose.error hook exited with status 1
1286 1286 [255]
1287 1287 $ hg recover
1288 1288 no interrupted transaction available
1289 1289 [1]
1290 1290 $ cd ..
1291 1291
1292 1292 check whether HG_PENDING makes pending changes only in related
1293 1293 repositories visible to an external hook.
1294 1294
1295 1295 (emulate a transaction running concurrently by copied
1296 1296 .hg/store/00changelog.i.a in subsequent test)
1297 1297
1298 1298 $ cat > $TESTTMP/savepending.sh <<EOF
1299 1299 > cp .hg/store/00changelog.i.a .hg/store/00changelog.i.a.saved
1300 1300 > exit 1 # to avoid adding new revision for subsequent tests
1301 1301 > EOF
1302 1302 $ cd a
1303 1303 $ hg tip -q
1304 1304 4:539e4b31b6dc
1305 1305 $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" commit -m "invisible"
1306 1306 transaction abort!
1307 1307 rollback completed
1308 1308 abort: pretxnclose hook exited with status 1
1309 1309 [255]
1310 1310 $ cp .hg/store/00changelog.i.a.saved .hg/store/00changelog.i.a
1311 1311
1312 1312 (check (in)visibility of new changeset while transaction running in
1313 1313 repo)
1314 1314
1315 1315 $ cat > $TESTTMP/checkpending.sh <<EOF
1316 1316 > echo '@a'
1317 1317 > hg -R "$TESTTMP/a" tip -q
1318 1318 > echo '@a/nested'
1319 1319 > hg -R "$TESTTMP/a/nested" tip -q
1320 1320 > exit 1 # to avoid adding new revision for subsequent tests
1321 1321 > EOF
1322 1322 $ hg init nested
1323 1323 $ cd nested
1324 1324 $ echo a > a
1325 1325 $ hg add a
1326 1326 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkpending.sh" commit -m '#0'
1327 1327 @a
1328 1328 4:539e4b31b6dc
1329 1329 @a/nested
1330 1330 0:bf5e395ced2c
1331 1331 transaction abort!
1332 1332 rollback completed
1333 1333 abort: pretxnclose hook exited with status 1
1334 1334 [255]
1335 1335
1336 1336 Hook from untrusted hgrc are reported as failure
1337 1337 ================================================
1338 1338
1339 1339 $ cat << EOF > $TESTTMP/untrusted.py
1340 1340 > from mercurial import scmutil, util
1341 1341 > def uisetup(ui):
1342 1342 > class untrustedui(ui.__class__):
1343 1343 > def _trusted(self, fp, f):
1344 1344 > if util.normpath(fp.name).endswith(b'untrusted/.hg/hgrc'):
1345 1345 > return False
1346 1346 > return super(untrustedui, self)._trusted(fp, f)
1347 1347 > ui.__class__ = untrustedui
1348 1348 > EOF
1349 1349 $ cat << EOF >> $HGRCPATH
1350 1350 > [extensions]
1351 1351 > untrusted=$TESTTMP/untrusted.py
1352 1352 > EOF
1353 1353 $ hg init untrusted
1354 1354 $ cd untrusted
1355 1355
1356 1356 Non-blocking hook
1357 1357 -----------------
1358 1358
1359 1359 $ cat << EOF >> .hg/hgrc
1360 1360 > [hooks]
1361 1361 > txnclose.testing=echo txnclose hook called
1362 1362 > EOF
1363 1363 $ touch a && hg commit -Aqm a
1364 1364 warning: untrusted hook txnclose.testing not executed
1365 1365 $ hg log
1366 1366 changeset: 0:3903775176ed
1367 1367 tag: tip
1368 1368 user: test
1369 1369 date: Thu Jan 01 00:00:00 1970 +0000
1370 1370 summary: a
1371 1371
1372 1372
1373 1373 Non-blocking hook
1374 1374 -----------------
1375 1375
1376 1376 $ cat << EOF >> .hg/hgrc
1377 1377 > [hooks]
1378 1378 > pretxnclose.testing=echo pre-txnclose hook called
1379 1379 > EOF
1380 1380 $ touch b && hg commit -Aqm a
1381 1381 transaction abort!
1382 1382 rollback completed
1383 1383 abort: untrusted hook pretxnclose.testing not executed
1384 1384 (see 'hg help config.trusted')
1385 1385 [255]
1386 1386 $ hg log
1387 1387 changeset: 0:3903775176ed
1388 1388 tag: tip
1389 1389 user: test
1390 1390 date: Thu Jan 01 00:00:00 1970 +0000
1391 1391 summary: a
1392 1392
General Comments 0
You need to be logged in to leave comments. Login now