##// END OF EJS Templates
transaction: no longer explicitly cache phaseroots...
marmoute -
r51088:ab806355 default
parent child Browse files
Show More
@@ -1,4020 +1,4016 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import re
13 import re
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from concurrent import futures
18 from concurrent import futures
19 from typing import (
19 from typing import (
20 Optional,
20 Optional,
21 )
21 )
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullrev,
27 nullrev,
28 sha1nodeconstants,
28 sha1nodeconstants,
29 short,
29 short,
30 )
30 )
31 from .pycompat import (
31 from .pycompat import (
32 delattr,
32 delattr,
33 getattr,
33 getattr,
34 )
34 )
35 from . import (
35 from . import (
36 bookmarks,
36 bookmarks,
37 branchmap,
37 branchmap,
38 bundle2,
38 bundle2,
39 bundlecaches,
39 bundlecaches,
40 changegroup,
40 changegroup,
41 color,
41 color,
42 commit,
42 commit,
43 context,
43 context,
44 dirstate,
44 dirstate,
45 discovery,
45 discovery,
46 encoding,
46 encoding,
47 error,
47 error,
48 exchange,
48 exchange,
49 extensions,
49 extensions,
50 filelog,
50 filelog,
51 hook,
51 hook,
52 lock as lockmod,
52 lock as lockmod,
53 match as matchmod,
53 match as matchmod,
54 mergestate as mergestatemod,
54 mergestate as mergestatemod,
55 mergeutil,
55 mergeutil,
56 namespaces,
56 namespaces,
57 narrowspec,
57 narrowspec,
58 obsolete,
58 obsolete,
59 pathutil,
59 pathutil,
60 phases,
60 phases,
61 pushkey,
61 pushkey,
62 pycompat,
62 pycompat,
63 rcutil,
63 rcutil,
64 repoview,
64 repoview,
65 requirements as requirementsmod,
65 requirements as requirementsmod,
66 revlog,
66 revlog,
67 revset,
67 revset,
68 revsetlang,
68 revsetlang,
69 scmutil,
69 scmutil,
70 sparse,
70 sparse,
71 store as storemod,
71 store as storemod,
72 subrepoutil,
72 subrepoutil,
73 tags as tagsmod,
73 tags as tagsmod,
74 transaction,
74 transaction,
75 txnutil,
75 txnutil,
76 util,
76 util,
77 vfs as vfsmod,
77 vfs as vfsmod,
78 wireprototypes,
78 wireprototypes,
79 )
79 )
80
80
81 from .interfaces import (
81 from .interfaces import (
82 repository,
82 repository,
83 util as interfaceutil,
83 util as interfaceutil,
84 )
84 )
85
85
86 from .utils import (
86 from .utils import (
87 hashutil,
87 hashutil,
88 procutil,
88 procutil,
89 stringutil,
89 stringutil,
90 urlutil,
90 urlutil,
91 )
91 )
92
92
93 from .revlogutils import (
93 from .revlogutils import (
94 concurrency_checker as revlogchecker,
94 concurrency_checker as revlogchecker,
95 constants as revlogconst,
95 constants as revlogconst,
96 sidedata as sidedatamod,
96 sidedata as sidedatamod,
97 )
97 )
98
98
99 release = lockmod.release
99 release = lockmod.release
100 urlerr = util.urlerr
100 urlerr = util.urlerr
101 urlreq = util.urlreq
101 urlreq = util.urlreq
102
102
103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(b"^(dirstate|narrowspec.dirstate).*")
103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(b"^(dirstate|narrowspec.dirstate).*")
104
104
105 # set of (path, vfs-location) tuples. vfs-location is:
105 # set of (path, vfs-location) tuples. vfs-location is:
106 # - 'plain for vfs relative paths
106 # - 'plain for vfs relative paths
107 # - '' for svfs relative paths
107 # - '' for svfs relative paths
108 _cachedfiles = set()
108 _cachedfiles = set()
109
109
110
110
111 class _basefilecache(scmutil.filecache):
111 class _basefilecache(scmutil.filecache):
112 """All filecache usage on repo are done for logic that should be unfiltered"""
112 """All filecache usage on repo are done for logic that should be unfiltered"""
113
113
114 def __get__(self, repo, type=None):
114 def __get__(self, repo, type=None):
115 if repo is None:
115 if repo is None:
116 return self
116 return self
117 # proxy to unfiltered __dict__ since filtered repo has no entry
117 # proxy to unfiltered __dict__ since filtered repo has no entry
118 unfi = repo.unfiltered()
118 unfi = repo.unfiltered()
119 try:
119 try:
120 return unfi.__dict__[self.sname]
120 return unfi.__dict__[self.sname]
121 except KeyError:
121 except KeyError:
122 pass
122 pass
123 return super(_basefilecache, self).__get__(unfi, type)
123 return super(_basefilecache, self).__get__(unfi, type)
124
124
125 def set(self, repo, value):
125 def set(self, repo, value):
126 return super(_basefilecache, self).set(repo.unfiltered(), value)
126 return super(_basefilecache, self).set(repo.unfiltered(), value)
127
127
128
128
129 class repofilecache(_basefilecache):
129 class repofilecache(_basefilecache):
130 """filecache for files in .hg but outside of .hg/store"""
130 """filecache for files in .hg but outside of .hg/store"""
131
131
132 def __init__(self, *paths):
132 def __init__(self, *paths):
133 super(repofilecache, self).__init__(*paths)
133 super(repofilecache, self).__init__(*paths)
134 for path in paths:
134 for path in paths:
135 _cachedfiles.add((path, b'plain'))
135 _cachedfiles.add((path, b'plain'))
136
136
137 def join(self, obj, fname):
137 def join(self, obj, fname):
138 return obj.vfs.join(fname)
138 return obj.vfs.join(fname)
139
139
140
140
141 class storecache(_basefilecache):
141 class storecache(_basefilecache):
142 """filecache for files in the store"""
142 """filecache for files in the store"""
143
143
144 def __init__(self, *paths):
144 def __init__(self, *paths):
145 super(storecache, self).__init__(*paths)
145 super(storecache, self).__init__(*paths)
146 for path in paths:
146 for path in paths:
147 _cachedfiles.add((path, b''))
147 _cachedfiles.add((path, b''))
148
148
149 def join(self, obj, fname):
149 def join(self, obj, fname):
150 return obj.sjoin(fname)
150 return obj.sjoin(fname)
151
151
152
152
153 class changelogcache(storecache):
153 class changelogcache(storecache):
154 """filecache for the changelog"""
154 """filecache for the changelog"""
155
155
156 def __init__(self):
156 def __init__(self):
157 super(changelogcache, self).__init__()
157 super(changelogcache, self).__init__()
158 _cachedfiles.add((b'00changelog.i', b''))
158 _cachedfiles.add((b'00changelog.i', b''))
159 _cachedfiles.add((b'00changelog.n', b''))
159 _cachedfiles.add((b'00changelog.n', b''))
160
160
161 def tracked_paths(self, obj):
161 def tracked_paths(self, obj):
162 paths = [self.join(obj, b'00changelog.i')]
162 paths = [self.join(obj, b'00changelog.i')]
163 if obj.store.opener.options.get(b'persistent-nodemap', False):
163 if obj.store.opener.options.get(b'persistent-nodemap', False):
164 paths.append(self.join(obj, b'00changelog.n'))
164 paths.append(self.join(obj, b'00changelog.n'))
165 return paths
165 return paths
166
166
167
167
168 class manifestlogcache(storecache):
168 class manifestlogcache(storecache):
169 """filecache for the manifestlog"""
169 """filecache for the manifestlog"""
170
170
171 def __init__(self):
171 def __init__(self):
172 super(manifestlogcache, self).__init__()
172 super(manifestlogcache, self).__init__()
173 _cachedfiles.add((b'00manifest.i', b''))
173 _cachedfiles.add((b'00manifest.i', b''))
174 _cachedfiles.add((b'00manifest.n', b''))
174 _cachedfiles.add((b'00manifest.n', b''))
175
175
176 def tracked_paths(self, obj):
176 def tracked_paths(self, obj):
177 paths = [self.join(obj, b'00manifest.i')]
177 paths = [self.join(obj, b'00manifest.i')]
178 if obj.store.opener.options.get(b'persistent-nodemap', False):
178 if obj.store.opener.options.get(b'persistent-nodemap', False):
179 paths.append(self.join(obj, b'00manifest.n'))
179 paths.append(self.join(obj, b'00manifest.n'))
180 return paths
180 return paths
181
181
182
182
183 class mixedrepostorecache(_basefilecache):
183 class mixedrepostorecache(_basefilecache):
184 """filecache for a mix files in .hg/store and outside"""
184 """filecache for a mix files in .hg/store and outside"""
185
185
186 def __init__(self, *pathsandlocations):
186 def __init__(self, *pathsandlocations):
187 # scmutil.filecache only uses the path for passing back into our
187 # scmutil.filecache only uses the path for passing back into our
188 # join(), so we can safely pass a list of paths and locations
188 # join(), so we can safely pass a list of paths and locations
189 super(mixedrepostorecache, self).__init__(*pathsandlocations)
189 super(mixedrepostorecache, self).__init__(*pathsandlocations)
190 _cachedfiles.update(pathsandlocations)
190 _cachedfiles.update(pathsandlocations)
191
191
192 def join(self, obj, fnameandlocation):
192 def join(self, obj, fnameandlocation):
193 fname, location = fnameandlocation
193 fname, location = fnameandlocation
194 if location == b'plain':
194 if location == b'plain':
195 return obj.vfs.join(fname)
195 return obj.vfs.join(fname)
196 else:
196 else:
197 if location != b'':
197 if location != b'':
198 raise error.ProgrammingError(
198 raise error.ProgrammingError(
199 b'unexpected location: %s' % location
199 b'unexpected location: %s' % location
200 )
200 )
201 return obj.sjoin(fname)
201 return obj.sjoin(fname)
202
202
203
203
204 def isfilecached(repo, name):
204 def isfilecached(repo, name):
205 """check if a repo has already cached "name" filecache-ed property
205 """check if a repo has already cached "name" filecache-ed property
206
206
207 This returns (cachedobj-or-None, iscached) tuple.
207 This returns (cachedobj-or-None, iscached) tuple.
208 """
208 """
209 cacheentry = repo.unfiltered()._filecache.get(name, None)
209 cacheentry = repo.unfiltered()._filecache.get(name, None)
210 if not cacheentry:
210 if not cacheentry:
211 return None, False
211 return None, False
212 return cacheentry.obj, True
212 return cacheentry.obj, True
213
213
214
214
215 class unfilteredpropertycache(util.propertycache):
215 class unfilteredpropertycache(util.propertycache):
216 """propertycache that apply to unfiltered repo only"""
216 """propertycache that apply to unfiltered repo only"""
217
217
218 def __get__(self, repo, type=None):
218 def __get__(self, repo, type=None):
219 unfi = repo.unfiltered()
219 unfi = repo.unfiltered()
220 if unfi is repo:
220 if unfi is repo:
221 return super(unfilteredpropertycache, self).__get__(unfi)
221 return super(unfilteredpropertycache, self).__get__(unfi)
222 return getattr(unfi, self.name)
222 return getattr(unfi, self.name)
223
223
224
224
225 class filteredpropertycache(util.propertycache):
225 class filteredpropertycache(util.propertycache):
226 """propertycache that must take filtering in account"""
226 """propertycache that must take filtering in account"""
227
227
228 def cachevalue(self, obj, value):
228 def cachevalue(self, obj, value):
229 object.__setattr__(obj, self.name, value)
229 object.__setattr__(obj, self.name, value)
230
230
231
231
232 def hasunfilteredcache(repo, name):
232 def hasunfilteredcache(repo, name):
233 """check if a repo has an unfilteredpropertycache value for <name>"""
233 """check if a repo has an unfilteredpropertycache value for <name>"""
234 return name in vars(repo.unfiltered())
234 return name in vars(repo.unfiltered())
235
235
236
236
237 def unfilteredmethod(orig):
237 def unfilteredmethod(orig):
238 """decorate method that always need to be run on unfiltered version"""
238 """decorate method that always need to be run on unfiltered version"""
239
239
240 @functools.wraps(orig)
240 @functools.wraps(orig)
241 def wrapper(repo, *args, **kwargs):
241 def wrapper(repo, *args, **kwargs):
242 return orig(repo.unfiltered(), *args, **kwargs)
242 return orig(repo.unfiltered(), *args, **kwargs)
243
243
244 return wrapper
244 return wrapper
245
245
246
246
247 moderncaps = {
247 moderncaps = {
248 b'lookup',
248 b'lookup',
249 b'branchmap',
249 b'branchmap',
250 b'pushkey',
250 b'pushkey',
251 b'known',
251 b'known',
252 b'getbundle',
252 b'getbundle',
253 b'unbundle',
253 b'unbundle',
254 }
254 }
255 legacycaps = moderncaps.union({b'changegroupsubset'})
255 legacycaps = moderncaps.union({b'changegroupsubset'})
256
256
257
257
258 @interfaceutil.implementer(repository.ipeercommandexecutor)
258 @interfaceutil.implementer(repository.ipeercommandexecutor)
259 class localcommandexecutor:
259 class localcommandexecutor:
260 def __init__(self, peer):
260 def __init__(self, peer):
261 self._peer = peer
261 self._peer = peer
262 self._sent = False
262 self._sent = False
263 self._closed = False
263 self._closed = False
264
264
265 def __enter__(self):
265 def __enter__(self):
266 return self
266 return self
267
267
268 def __exit__(self, exctype, excvalue, exctb):
268 def __exit__(self, exctype, excvalue, exctb):
269 self.close()
269 self.close()
270
270
271 def callcommand(self, command, args):
271 def callcommand(self, command, args):
272 if self._sent:
272 if self._sent:
273 raise error.ProgrammingError(
273 raise error.ProgrammingError(
274 b'callcommand() cannot be used after sendcommands()'
274 b'callcommand() cannot be used after sendcommands()'
275 )
275 )
276
276
277 if self._closed:
277 if self._closed:
278 raise error.ProgrammingError(
278 raise error.ProgrammingError(
279 b'callcommand() cannot be used after close()'
279 b'callcommand() cannot be used after close()'
280 )
280 )
281
281
282 # We don't need to support anything fancy. Just call the named
282 # We don't need to support anything fancy. Just call the named
283 # method on the peer and return a resolved future.
283 # method on the peer and return a resolved future.
284 fn = getattr(self._peer, pycompat.sysstr(command))
284 fn = getattr(self._peer, pycompat.sysstr(command))
285
285
286 f = futures.Future()
286 f = futures.Future()
287
287
288 try:
288 try:
289 result = fn(**pycompat.strkwargs(args))
289 result = fn(**pycompat.strkwargs(args))
290 except Exception:
290 except Exception:
291 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
291 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
292 else:
292 else:
293 f.set_result(result)
293 f.set_result(result)
294
294
295 return f
295 return f
296
296
297 def sendcommands(self):
297 def sendcommands(self):
298 self._sent = True
298 self._sent = True
299
299
300 def close(self):
300 def close(self):
301 self._closed = True
301 self._closed = True
302
302
303
303
304 @interfaceutil.implementer(repository.ipeercommands)
304 @interfaceutil.implementer(repository.ipeercommands)
305 class localpeer(repository.peer):
305 class localpeer(repository.peer):
306 '''peer for a local repo; reflects only the most recent API'''
306 '''peer for a local repo; reflects only the most recent API'''
307
307
308 def __init__(self, repo, caps=None, path=None):
308 def __init__(self, repo, caps=None, path=None):
309 super(localpeer, self).__init__(repo.ui, path=path)
309 super(localpeer, self).__init__(repo.ui, path=path)
310
310
311 if caps is None:
311 if caps is None:
312 caps = moderncaps.copy()
312 caps = moderncaps.copy()
313 self._repo = repo.filtered(b'served')
313 self._repo = repo.filtered(b'served')
314
314
315 if repo._wanted_sidedata:
315 if repo._wanted_sidedata:
316 formatted = bundle2.format_remote_wanted_sidedata(repo)
316 formatted = bundle2.format_remote_wanted_sidedata(repo)
317 caps.add(b'exp-wanted-sidedata=' + formatted)
317 caps.add(b'exp-wanted-sidedata=' + formatted)
318
318
319 self._caps = repo._restrictcapabilities(caps)
319 self._caps = repo._restrictcapabilities(caps)
320
320
321 # Begin of _basepeer interface.
321 # Begin of _basepeer interface.
322
322
323 def url(self):
323 def url(self):
324 return self._repo.url()
324 return self._repo.url()
325
325
326 def local(self):
326 def local(self):
327 return self._repo
327 return self._repo
328
328
329 def canpush(self):
329 def canpush(self):
330 return True
330 return True
331
331
332 def close(self):
332 def close(self):
333 self._repo.close()
333 self._repo.close()
334
334
335 # End of _basepeer interface.
335 # End of _basepeer interface.
336
336
337 # Begin of _basewirecommands interface.
337 # Begin of _basewirecommands interface.
338
338
339 def branchmap(self):
339 def branchmap(self):
340 return self._repo.branchmap()
340 return self._repo.branchmap()
341
341
342 def capabilities(self):
342 def capabilities(self):
343 return self._caps
343 return self._caps
344
344
345 def clonebundles(self):
345 def clonebundles(self):
346 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
346 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
347
347
348 def debugwireargs(self, one, two, three=None, four=None, five=None):
348 def debugwireargs(self, one, two, three=None, four=None, five=None):
349 """Used to test argument passing over the wire"""
349 """Used to test argument passing over the wire"""
350 return b"%s %s %s %s %s" % (
350 return b"%s %s %s %s %s" % (
351 one,
351 one,
352 two,
352 two,
353 pycompat.bytestr(three),
353 pycompat.bytestr(three),
354 pycompat.bytestr(four),
354 pycompat.bytestr(four),
355 pycompat.bytestr(five),
355 pycompat.bytestr(five),
356 )
356 )
357
357
358 def getbundle(
358 def getbundle(
359 self,
359 self,
360 source,
360 source,
361 heads=None,
361 heads=None,
362 common=None,
362 common=None,
363 bundlecaps=None,
363 bundlecaps=None,
364 remote_sidedata=None,
364 remote_sidedata=None,
365 **kwargs
365 **kwargs
366 ):
366 ):
367 chunks = exchange.getbundlechunks(
367 chunks = exchange.getbundlechunks(
368 self._repo,
368 self._repo,
369 source,
369 source,
370 heads=heads,
370 heads=heads,
371 common=common,
371 common=common,
372 bundlecaps=bundlecaps,
372 bundlecaps=bundlecaps,
373 remote_sidedata=remote_sidedata,
373 remote_sidedata=remote_sidedata,
374 **kwargs
374 **kwargs
375 )[1]
375 )[1]
376 cb = util.chunkbuffer(chunks)
376 cb = util.chunkbuffer(chunks)
377
377
378 if exchange.bundle2requested(bundlecaps):
378 if exchange.bundle2requested(bundlecaps):
379 # When requesting a bundle2, getbundle returns a stream to make the
379 # When requesting a bundle2, getbundle returns a stream to make the
380 # wire level function happier. We need to build a proper object
380 # wire level function happier. We need to build a proper object
381 # from it in local peer.
381 # from it in local peer.
382 return bundle2.getunbundler(self.ui, cb)
382 return bundle2.getunbundler(self.ui, cb)
383 else:
383 else:
384 return changegroup.getunbundler(b'01', cb, None)
384 return changegroup.getunbundler(b'01', cb, None)
385
385
386 def heads(self):
386 def heads(self):
387 return self._repo.heads()
387 return self._repo.heads()
388
388
389 def known(self, nodes):
389 def known(self, nodes):
390 return self._repo.known(nodes)
390 return self._repo.known(nodes)
391
391
392 def listkeys(self, namespace):
392 def listkeys(self, namespace):
393 return self._repo.listkeys(namespace)
393 return self._repo.listkeys(namespace)
394
394
395 def lookup(self, key):
395 def lookup(self, key):
396 return self._repo.lookup(key)
396 return self._repo.lookup(key)
397
397
398 def pushkey(self, namespace, key, old, new):
398 def pushkey(self, namespace, key, old, new):
399 return self._repo.pushkey(namespace, key, old, new)
399 return self._repo.pushkey(namespace, key, old, new)
400
400
401 def stream_out(self):
401 def stream_out(self):
402 raise error.Abort(_(b'cannot perform stream clone against local peer'))
402 raise error.Abort(_(b'cannot perform stream clone against local peer'))
403
403
404 def unbundle(self, bundle, heads, url):
404 def unbundle(self, bundle, heads, url):
405 """apply a bundle on a repo
405 """apply a bundle on a repo
406
406
407 This function handles the repo locking itself."""
407 This function handles the repo locking itself."""
408 try:
408 try:
409 try:
409 try:
410 bundle = exchange.readbundle(self.ui, bundle, None)
410 bundle = exchange.readbundle(self.ui, bundle, None)
411 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
411 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
412 if util.safehasattr(ret, b'getchunks'):
412 if util.safehasattr(ret, b'getchunks'):
413 # This is a bundle20 object, turn it into an unbundler.
413 # This is a bundle20 object, turn it into an unbundler.
414 # This little dance should be dropped eventually when the
414 # This little dance should be dropped eventually when the
415 # API is finally improved.
415 # API is finally improved.
416 stream = util.chunkbuffer(ret.getchunks())
416 stream = util.chunkbuffer(ret.getchunks())
417 ret = bundle2.getunbundler(self.ui, stream)
417 ret = bundle2.getunbundler(self.ui, stream)
418 return ret
418 return ret
419 except Exception as exc:
419 except Exception as exc:
420 # If the exception contains output salvaged from a bundle2
420 # If the exception contains output salvaged from a bundle2
421 # reply, we need to make sure it is printed before continuing
421 # reply, we need to make sure it is printed before continuing
422 # to fail. So we build a bundle2 with such output and consume
422 # to fail. So we build a bundle2 with such output and consume
423 # it directly.
423 # it directly.
424 #
424 #
425 # This is not very elegant but allows a "simple" solution for
425 # This is not very elegant but allows a "simple" solution for
426 # issue4594
426 # issue4594
427 output = getattr(exc, '_bundle2salvagedoutput', ())
427 output = getattr(exc, '_bundle2salvagedoutput', ())
428 if output:
428 if output:
429 bundler = bundle2.bundle20(self._repo.ui)
429 bundler = bundle2.bundle20(self._repo.ui)
430 for out in output:
430 for out in output:
431 bundler.addpart(out)
431 bundler.addpart(out)
432 stream = util.chunkbuffer(bundler.getchunks())
432 stream = util.chunkbuffer(bundler.getchunks())
433 b = bundle2.getunbundler(self.ui, stream)
433 b = bundle2.getunbundler(self.ui, stream)
434 bundle2.processbundle(self._repo, b)
434 bundle2.processbundle(self._repo, b)
435 raise
435 raise
436 except error.PushRaced as exc:
436 except error.PushRaced as exc:
437 raise error.ResponseError(
437 raise error.ResponseError(
438 _(b'push failed:'), stringutil.forcebytestr(exc)
438 _(b'push failed:'), stringutil.forcebytestr(exc)
439 )
439 )
440
440
441 # End of _basewirecommands interface.
441 # End of _basewirecommands interface.
442
442
443 # Begin of peer interface.
443 # Begin of peer interface.
444
444
445 def commandexecutor(self):
445 def commandexecutor(self):
446 return localcommandexecutor(self)
446 return localcommandexecutor(self)
447
447
448 # End of peer interface.
448 # End of peer interface.
449
449
450
450
451 @interfaceutil.implementer(repository.ipeerlegacycommands)
451 @interfaceutil.implementer(repository.ipeerlegacycommands)
452 class locallegacypeer(localpeer):
452 class locallegacypeer(localpeer):
453 """peer extension which implements legacy methods too; used for tests with
453 """peer extension which implements legacy methods too; used for tests with
454 restricted capabilities"""
454 restricted capabilities"""
455
455
456 def __init__(self, repo, path=None):
456 def __init__(self, repo, path=None):
457 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
457 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
458
458
459 # Begin of baselegacywirecommands interface.
459 # Begin of baselegacywirecommands interface.
460
460
461 def between(self, pairs):
461 def between(self, pairs):
462 return self._repo.between(pairs)
462 return self._repo.between(pairs)
463
463
464 def branches(self, nodes):
464 def branches(self, nodes):
465 return self._repo.branches(nodes)
465 return self._repo.branches(nodes)
466
466
467 def changegroup(self, nodes, source):
467 def changegroup(self, nodes, source):
468 outgoing = discovery.outgoing(
468 outgoing = discovery.outgoing(
469 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
469 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
470 )
470 )
471 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
471 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
472
472
473 def changegroupsubset(self, bases, heads, source):
473 def changegroupsubset(self, bases, heads, source):
474 outgoing = discovery.outgoing(
474 outgoing = discovery.outgoing(
475 self._repo, missingroots=bases, ancestorsof=heads
475 self._repo, missingroots=bases, ancestorsof=heads
476 )
476 )
477 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
477 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
478
478
479 # End of baselegacywirecommands interface.
479 # End of baselegacywirecommands interface.
480
480
481
481
482 # Functions receiving (ui, features) that extensions can register to impact
482 # Functions receiving (ui, features) that extensions can register to impact
483 # the ability to load repositories with custom requirements. Only
483 # the ability to load repositories with custom requirements. Only
484 # functions defined in loaded extensions are called.
484 # functions defined in loaded extensions are called.
485 #
485 #
486 # The function receives a set of requirement strings that the repository
486 # The function receives a set of requirement strings that the repository
487 # is capable of opening. Functions will typically add elements to the
487 # is capable of opening. Functions will typically add elements to the
488 # set to reflect that the extension knows how to handle that requirements.
488 # set to reflect that the extension knows how to handle that requirements.
489 featuresetupfuncs = set()
489 featuresetupfuncs = set()
490
490
491
491
492 def _getsharedvfs(hgvfs, requirements):
492 def _getsharedvfs(hgvfs, requirements):
493 """returns the vfs object pointing to root of shared source
493 """returns the vfs object pointing to root of shared source
494 repo for a shared repository
494 repo for a shared repository
495
495
496 hgvfs is vfs pointing at .hg/ of current repo (shared one)
496 hgvfs is vfs pointing at .hg/ of current repo (shared one)
497 requirements is a set of requirements of current repo (shared one)
497 requirements is a set of requirements of current repo (shared one)
498 """
498 """
499 # The ``shared`` or ``relshared`` requirements indicate the
499 # The ``shared`` or ``relshared`` requirements indicate the
500 # store lives in the path contained in the ``.hg/sharedpath`` file.
500 # store lives in the path contained in the ``.hg/sharedpath`` file.
501 # This is an absolute path for ``shared`` and relative to
501 # This is an absolute path for ``shared`` and relative to
502 # ``.hg/`` for ``relshared``.
502 # ``.hg/`` for ``relshared``.
503 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
503 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
504 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
504 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
505 sharedpath = util.normpath(hgvfs.join(sharedpath))
505 sharedpath = util.normpath(hgvfs.join(sharedpath))
506
506
507 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
507 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
508
508
509 if not sharedvfs.exists():
509 if not sharedvfs.exists():
510 raise error.RepoError(
510 raise error.RepoError(
511 _(b'.hg/sharedpath points to nonexistent directory %s')
511 _(b'.hg/sharedpath points to nonexistent directory %s')
512 % sharedvfs.base
512 % sharedvfs.base
513 )
513 )
514 return sharedvfs
514 return sharedvfs
515
515
516
516
517 def _readrequires(vfs, allowmissing):
517 def _readrequires(vfs, allowmissing):
518 """reads the require file present at root of this vfs
518 """reads the require file present at root of this vfs
519 and return a set of requirements
519 and return a set of requirements
520
520
521 If allowmissing is True, we suppress FileNotFoundError if raised"""
521 If allowmissing is True, we suppress FileNotFoundError if raised"""
522 # requires file contains a newline-delimited list of
522 # requires file contains a newline-delimited list of
523 # features/capabilities the opener (us) must have in order to use
523 # features/capabilities the opener (us) must have in order to use
524 # the repository. This file was introduced in Mercurial 0.9.2,
524 # the repository. This file was introduced in Mercurial 0.9.2,
525 # which means very old repositories may not have one. We assume
525 # which means very old repositories may not have one. We assume
526 # a missing file translates to no requirements.
526 # a missing file translates to no requirements.
527 read = vfs.tryread if allowmissing else vfs.read
527 read = vfs.tryread if allowmissing else vfs.read
528 return set(read(b'requires').splitlines())
528 return set(read(b'requires').splitlines())
529
529
530
530
531 def makelocalrepository(baseui, path: bytes, intents=None):
531 def makelocalrepository(baseui, path: bytes, intents=None):
532 """Create a local repository object.
532 """Create a local repository object.
533
533
534 Given arguments needed to construct a local repository, this function
534 Given arguments needed to construct a local repository, this function
535 performs various early repository loading functionality (such as
535 performs various early repository loading functionality (such as
536 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
536 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
537 the repository can be opened, derives a type suitable for representing
537 the repository can be opened, derives a type suitable for representing
538 that repository, and returns an instance of it.
538 that repository, and returns an instance of it.
539
539
540 The returned object conforms to the ``repository.completelocalrepository``
540 The returned object conforms to the ``repository.completelocalrepository``
541 interface.
541 interface.
542
542
543 The repository type is derived by calling a series of factory functions
543 The repository type is derived by calling a series of factory functions
544 for each aspect/interface of the final repository. These are defined by
544 for each aspect/interface of the final repository. These are defined by
545 ``REPO_INTERFACES``.
545 ``REPO_INTERFACES``.
546
546
547 Each factory function is called to produce a type implementing a specific
547 Each factory function is called to produce a type implementing a specific
548 interface. The cumulative list of returned types will be combined into a
548 interface. The cumulative list of returned types will be combined into a
549 new type and that type will be instantiated to represent the local
549 new type and that type will be instantiated to represent the local
550 repository.
550 repository.
551
551
552 The factory functions each receive various state that may be consulted
552 The factory functions each receive various state that may be consulted
553 as part of deriving a type.
553 as part of deriving a type.
554
554
555 Extensions should wrap these factory functions to customize repository type
555 Extensions should wrap these factory functions to customize repository type
556 creation. Note that an extension's wrapped function may be called even if
556 creation. Note that an extension's wrapped function may be called even if
557 that extension is not loaded for the repo being constructed. Extensions
557 that extension is not loaded for the repo being constructed. Extensions
558 should check if their ``__name__`` appears in the
558 should check if their ``__name__`` appears in the
559 ``extensionmodulenames`` set passed to the factory function and no-op if
559 ``extensionmodulenames`` set passed to the factory function and no-op if
560 not.
560 not.
561 """
561 """
562 ui = baseui.copy()
562 ui = baseui.copy()
563 # Prevent copying repo configuration.
563 # Prevent copying repo configuration.
564 ui.copy = baseui.copy
564 ui.copy = baseui.copy
565
565
566 # Working directory VFS rooted at repository root.
566 # Working directory VFS rooted at repository root.
567 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
567 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
568
568
569 # Main VFS for .hg/ directory.
569 # Main VFS for .hg/ directory.
570 hgpath = wdirvfs.join(b'.hg')
570 hgpath = wdirvfs.join(b'.hg')
571 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
571 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
572 # Whether this repository is shared one or not
572 # Whether this repository is shared one or not
573 shared = False
573 shared = False
574 # If this repository is shared, vfs pointing to shared repo
574 # If this repository is shared, vfs pointing to shared repo
575 sharedvfs = None
575 sharedvfs = None
576
576
577 # The .hg/ path should exist and should be a directory. All other
577 # The .hg/ path should exist and should be a directory. All other
578 # cases are errors.
578 # cases are errors.
579 if not hgvfs.isdir():
579 if not hgvfs.isdir():
580 try:
580 try:
581 hgvfs.stat()
581 hgvfs.stat()
582 except FileNotFoundError:
582 except FileNotFoundError:
583 pass
583 pass
584 except ValueError as e:
584 except ValueError as e:
585 # Can be raised on Python 3.8 when path is invalid.
585 # Can be raised on Python 3.8 when path is invalid.
586 raise error.Abort(
586 raise error.Abort(
587 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
587 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
588 )
588 )
589
589
590 raise error.RepoError(_(b'repository %s not found') % path)
590 raise error.RepoError(_(b'repository %s not found') % path)
591
591
592 requirements = _readrequires(hgvfs, True)
592 requirements = _readrequires(hgvfs, True)
593 shared = (
593 shared = (
594 requirementsmod.SHARED_REQUIREMENT in requirements
594 requirementsmod.SHARED_REQUIREMENT in requirements
595 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
595 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
596 )
596 )
597 storevfs = None
597 storevfs = None
598 if shared:
598 if shared:
599 # This is a shared repo
599 # This is a shared repo
600 sharedvfs = _getsharedvfs(hgvfs, requirements)
600 sharedvfs = _getsharedvfs(hgvfs, requirements)
601 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
601 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
602 else:
602 else:
603 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
603 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
604
604
605 # if .hg/requires contains the sharesafe requirement, it means
605 # if .hg/requires contains the sharesafe requirement, it means
606 # there exists a `.hg/store/requires` too and we should read it
606 # there exists a `.hg/store/requires` too and we should read it
607 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
607 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
608 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
608 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
609 # is not present, refer checkrequirementscompat() for that
609 # is not present, refer checkrequirementscompat() for that
610 #
610 #
611 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
611 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
612 # repository was shared the old way. We check the share source .hg/requires
612 # repository was shared the old way. We check the share source .hg/requires
613 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
613 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
614 # to be reshared
614 # to be reshared
615 hint = _(b"see `hg help config.format.use-share-safe` for more information")
615 hint = _(b"see `hg help config.format.use-share-safe` for more information")
616 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
616 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
617 if (
617 if (
618 shared
618 shared
619 and requirementsmod.SHARESAFE_REQUIREMENT
619 and requirementsmod.SHARESAFE_REQUIREMENT
620 not in _readrequires(sharedvfs, True)
620 not in _readrequires(sharedvfs, True)
621 ):
621 ):
622 mismatch_warn = ui.configbool(
622 mismatch_warn = ui.configbool(
623 b'share', b'safe-mismatch.source-not-safe.warn'
623 b'share', b'safe-mismatch.source-not-safe.warn'
624 )
624 )
625 mismatch_config = ui.config(
625 mismatch_config = ui.config(
626 b'share', b'safe-mismatch.source-not-safe'
626 b'share', b'safe-mismatch.source-not-safe'
627 )
627 )
628 mismatch_verbose_upgrade = ui.configbool(
628 mismatch_verbose_upgrade = ui.configbool(
629 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
629 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
630 )
630 )
631 if mismatch_config in (
631 if mismatch_config in (
632 b'downgrade-allow',
632 b'downgrade-allow',
633 b'allow',
633 b'allow',
634 b'downgrade-abort',
634 b'downgrade-abort',
635 ):
635 ):
636 # prevent cyclic import localrepo -> upgrade -> localrepo
636 # prevent cyclic import localrepo -> upgrade -> localrepo
637 from . import upgrade
637 from . import upgrade
638
638
639 upgrade.downgrade_share_to_non_safe(
639 upgrade.downgrade_share_to_non_safe(
640 ui,
640 ui,
641 hgvfs,
641 hgvfs,
642 sharedvfs,
642 sharedvfs,
643 requirements,
643 requirements,
644 mismatch_config,
644 mismatch_config,
645 mismatch_warn,
645 mismatch_warn,
646 mismatch_verbose_upgrade,
646 mismatch_verbose_upgrade,
647 )
647 )
648 elif mismatch_config == b'abort':
648 elif mismatch_config == b'abort':
649 raise error.Abort(
649 raise error.Abort(
650 _(b"share source does not support share-safe requirement"),
650 _(b"share source does not support share-safe requirement"),
651 hint=hint,
651 hint=hint,
652 )
652 )
653 else:
653 else:
654 raise error.Abort(
654 raise error.Abort(
655 _(
655 _(
656 b"share-safe mismatch with source.\nUnrecognized"
656 b"share-safe mismatch with source.\nUnrecognized"
657 b" value '%s' of `share.safe-mismatch.source-not-safe`"
657 b" value '%s' of `share.safe-mismatch.source-not-safe`"
658 b" set."
658 b" set."
659 )
659 )
660 % mismatch_config,
660 % mismatch_config,
661 hint=hint,
661 hint=hint,
662 )
662 )
663 else:
663 else:
664 requirements |= _readrequires(storevfs, False)
664 requirements |= _readrequires(storevfs, False)
665 elif shared:
665 elif shared:
666 sourcerequires = _readrequires(sharedvfs, False)
666 sourcerequires = _readrequires(sharedvfs, False)
667 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
667 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
668 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
668 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
669 mismatch_warn = ui.configbool(
669 mismatch_warn = ui.configbool(
670 b'share', b'safe-mismatch.source-safe.warn'
670 b'share', b'safe-mismatch.source-safe.warn'
671 )
671 )
672 mismatch_verbose_upgrade = ui.configbool(
672 mismatch_verbose_upgrade = ui.configbool(
673 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
673 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
674 )
674 )
675 if mismatch_config in (
675 if mismatch_config in (
676 b'upgrade-allow',
676 b'upgrade-allow',
677 b'allow',
677 b'allow',
678 b'upgrade-abort',
678 b'upgrade-abort',
679 ):
679 ):
680 # prevent cyclic import localrepo -> upgrade -> localrepo
680 # prevent cyclic import localrepo -> upgrade -> localrepo
681 from . import upgrade
681 from . import upgrade
682
682
683 upgrade.upgrade_share_to_safe(
683 upgrade.upgrade_share_to_safe(
684 ui,
684 ui,
685 hgvfs,
685 hgvfs,
686 storevfs,
686 storevfs,
687 requirements,
687 requirements,
688 mismatch_config,
688 mismatch_config,
689 mismatch_warn,
689 mismatch_warn,
690 mismatch_verbose_upgrade,
690 mismatch_verbose_upgrade,
691 )
691 )
692 elif mismatch_config == b'abort':
692 elif mismatch_config == b'abort':
693 raise error.Abort(
693 raise error.Abort(
694 _(
694 _(
695 b'version mismatch: source uses share-safe'
695 b'version mismatch: source uses share-safe'
696 b' functionality while the current share does not'
696 b' functionality while the current share does not'
697 ),
697 ),
698 hint=hint,
698 hint=hint,
699 )
699 )
700 else:
700 else:
701 raise error.Abort(
701 raise error.Abort(
702 _(
702 _(
703 b"share-safe mismatch with source.\nUnrecognized"
703 b"share-safe mismatch with source.\nUnrecognized"
704 b" value '%s' of `share.safe-mismatch.source-safe` set."
704 b" value '%s' of `share.safe-mismatch.source-safe` set."
705 )
705 )
706 % mismatch_config,
706 % mismatch_config,
707 hint=hint,
707 hint=hint,
708 )
708 )
709
709
710 # The .hg/hgrc file may load extensions or contain config options
710 # The .hg/hgrc file may load extensions or contain config options
711 # that influence repository construction. Attempt to load it and
711 # that influence repository construction. Attempt to load it and
712 # process any new extensions that it may have pulled in.
712 # process any new extensions that it may have pulled in.
713 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
713 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
714 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
714 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
715 extensions.loadall(ui)
715 extensions.loadall(ui)
716 extensions.populateui(ui)
716 extensions.populateui(ui)
717
717
718 # Set of module names of extensions loaded for this repository.
718 # Set of module names of extensions loaded for this repository.
719 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
719 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
720
720
721 supportedrequirements = gathersupportedrequirements(ui)
721 supportedrequirements = gathersupportedrequirements(ui)
722
722
723 # We first validate the requirements are known.
723 # We first validate the requirements are known.
724 ensurerequirementsrecognized(requirements, supportedrequirements)
724 ensurerequirementsrecognized(requirements, supportedrequirements)
725
725
726 # Then we validate that the known set is reasonable to use together.
726 # Then we validate that the known set is reasonable to use together.
727 ensurerequirementscompatible(ui, requirements)
727 ensurerequirementscompatible(ui, requirements)
728
728
729 # TODO there are unhandled edge cases related to opening repositories with
729 # TODO there are unhandled edge cases related to opening repositories with
730 # shared storage. If storage is shared, we should also test for requirements
730 # shared storage. If storage is shared, we should also test for requirements
731 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
731 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
732 # that repo, as that repo may load extensions needed to open it. This is a
732 # that repo, as that repo may load extensions needed to open it. This is a
733 # bit complicated because we don't want the other hgrc to overwrite settings
733 # bit complicated because we don't want the other hgrc to overwrite settings
734 # in this hgrc.
734 # in this hgrc.
735 #
735 #
736 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
736 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
737 # file when sharing repos. But if a requirement is added after the share is
737 # file when sharing repos. But if a requirement is added after the share is
738 # performed, thereby introducing a new requirement for the opener, we may
738 # performed, thereby introducing a new requirement for the opener, we may
739 # will not see that and could encounter a run-time error interacting with
739 # will not see that and could encounter a run-time error interacting with
740 # that shared store since it has an unknown-to-us requirement.
740 # that shared store since it has an unknown-to-us requirement.
741
741
742 # At this point, we know we should be capable of opening the repository.
742 # At this point, we know we should be capable of opening the repository.
743 # Now get on with doing that.
743 # Now get on with doing that.
744
744
745 features = set()
745 features = set()
746
746
747 # The "store" part of the repository holds versioned data. How it is
747 # The "store" part of the repository holds versioned data. How it is
748 # accessed is determined by various requirements. If `shared` or
748 # accessed is determined by various requirements. If `shared` or
749 # `relshared` requirements are present, this indicates current repository
749 # `relshared` requirements are present, this indicates current repository
750 # is a share and store exists in path mentioned in `.hg/sharedpath`
750 # is a share and store exists in path mentioned in `.hg/sharedpath`
751 if shared:
751 if shared:
752 storebasepath = sharedvfs.base
752 storebasepath = sharedvfs.base
753 cachepath = sharedvfs.join(b'cache')
753 cachepath = sharedvfs.join(b'cache')
754 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
754 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
755 else:
755 else:
756 storebasepath = hgvfs.base
756 storebasepath = hgvfs.base
757 cachepath = hgvfs.join(b'cache')
757 cachepath = hgvfs.join(b'cache')
758 wcachepath = hgvfs.join(b'wcache')
758 wcachepath = hgvfs.join(b'wcache')
759
759
760 # The store has changed over time and the exact layout is dictated by
760 # The store has changed over time and the exact layout is dictated by
761 # requirements. The store interface abstracts differences across all
761 # requirements. The store interface abstracts differences across all
762 # of them.
762 # of them.
763 store = makestore(
763 store = makestore(
764 requirements,
764 requirements,
765 storebasepath,
765 storebasepath,
766 lambda base: vfsmod.vfs(base, cacheaudited=True),
766 lambda base: vfsmod.vfs(base, cacheaudited=True),
767 )
767 )
768 hgvfs.createmode = store.createmode
768 hgvfs.createmode = store.createmode
769
769
770 storevfs = store.vfs
770 storevfs = store.vfs
771 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
771 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
772
772
773 if (
773 if (
774 requirementsmod.REVLOGV2_REQUIREMENT in requirements
774 requirementsmod.REVLOGV2_REQUIREMENT in requirements
775 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
775 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
776 ):
776 ):
777 features.add(repository.REPO_FEATURE_SIDE_DATA)
777 features.add(repository.REPO_FEATURE_SIDE_DATA)
778 # the revlogv2 docket introduced race condition that we need to fix
778 # the revlogv2 docket introduced race condition that we need to fix
779 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
779 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
780
780
781 # The cache vfs is used to manage cache files.
781 # The cache vfs is used to manage cache files.
782 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
782 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
783 cachevfs.createmode = store.createmode
783 cachevfs.createmode = store.createmode
784 # The cache vfs is used to manage cache files related to the working copy
784 # The cache vfs is used to manage cache files related to the working copy
785 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
785 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
786 wcachevfs.createmode = store.createmode
786 wcachevfs.createmode = store.createmode
787
787
788 # Now resolve the type for the repository object. We do this by repeatedly
788 # Now resolve the type for the repository object. We do this by repeatedly
789 # calling a factory function to produces types for specific aspects of the
789 # calling a factory function to produces types for specific aspects of the
790 # repo's operation. The aggregate returned types are used as base classes
790 # repo's operation. The aggregate returned types are used as base classes
791 # for a dynamically-derived type, which will represent our new repository.
791 # for a dynamically-derived type, which will represent our new repository.
792
792
793 bases = []
793 bases = []
794 extrastate = {}
794 extrastate = {}
795
795
796 for iface, fn in REPO_INTERFACES:
796 for iface, fn in REPO_INTERFACES:
797 # We pass all potentially useful state to give extensions tons of
797 # We pass all potentially useful state to give extensions tons of
798 # flexibility.
798 # flexibility.
799 typ = fn()(
799 typ = fn()(
800 ui=ui,
800 ui=ui,
801 intents=intents,
801 intents=intents,
802 requirements=requirements,
802 requirements=requirements,
803 features=features,
803 features=features,
804 wdirvfs=wdirvfs,
804 wdirvfs=wdirvfs,
805 hgvfs=hgvfs,
805 hgvfs=hgvfs,
806 store=store,
806 store=store,
807 storevfs=storevfs,
807 storevfs=storevfs,
808 storeoptions=storevfs.options,
808 storeoptions=storevfs.options,
809 cachevfs=cachevfs,
809 cachevfs=cachevfs,
810 wcachevfs=wcachevfs,
810 wcachevfs=wcachevfs,
811 extensionmodulenames=extensionmodulenames,
811 extensionmodulenames=extensionmodulenames,
812 extrastate=extrastate,
812 extrastate=extrastate,
813 baseclasses=bases,
813 baseclasses=bases,
814 )
814 )
815
815
816 if not isinstance(typ, type):
816 if not isinstance(typ, type):
817 raise error.ProgrammingError(
817 raise error.ProgrammingError(
818 b'unable to construct type for %s' % iface
818 b'unable to construct type for %s' % iface
819 )
819 )
820
820
821 bases.append(typ)
821 bases.append(typ)
822
822
823 # type() allows you to use characters in type names that wouldn't be
823 # type() allows you to use characters in type names that wouldn't be
824 # recognized as Python symbols in source code. We abuse that to add
824 # recognized as Python symbols in source code. We abuse that to add
825 # rich information about our constructed repo.
825 # rich information about our constructed repo.
826 name = pycompat.sysstr(
826 name = pycompat.sysstr(
827 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
827 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
828 )
828 )
829
829
830 cls = type(name, tuple(bases), {})
830 cls = type(name, tuple(bases), {})
831
831
832 return cls(
832 return cls(
833 baseui=baseui,
833 baseui=baseui,
834 ui=ui,
834 ui=ui,
835 origroot=path,
835 origroot=path,
836 wdirvfs=wdirvfs,
836 wdirvfs=wdirvfs,
837 hgvfs=hgvfs,
837 hgvfs=hgvfs,
838 requirements=requirements,
838 requirements=requirements,
839 supportedrequirements=supportedrequirements,
839 supportedrequirements=supportedrequirements,
840 sharedpath=storebasepath,
840 sharedpath=storebasepath,
841 store=store,
841 store=store,
842 cachevfs=cachevfs,
842 cachevfs=cachevfs,
843 wcachevfs=wcachevfs,
843 wcachevfs=wcachevfs,
844 features=features,
844 features=features,
845 intents=intents,
845 intents=intents,
846 )
846 )
847
847
848
848
849 def loadhgrc(
849 def loadhgrc(
850 ui,
850 ui,
851 wdirvfs: vfsmod.vfs,
851 wdirvfs: vfsmod.vfs,
852 hgvfs: vfsmod.vfs,
852 hgvfs: vfsmod.vfs,
853 requirements,
853 requirements,
854 sharedvfs: Optional[vfsmod.vfs] = None,
854 sharedvfs: Optional[vfsmod.vfs] = None,
855 ):
855 ):
856 """Load hgrc files/content into a ui instance.
856 """Load hgrc files/content into a ui instance.
857
857
858 This is called during repository opening to load any additional
858 This is called during repository opening to load any additional
859 config files or settings relevant to the current repository.
859 config files or settings relevant to the current repository.
860
860
861 Returns a bool indicating whether any additional configs were loaded.
861 Returns a bool indicating whether any additional configs were loaded.
862
862
863 Extensions should monkeypatch this function to modify how per-repo
863 Extensions should monkeypatch this function to modify how per-repo
864 configs are loaded. For example, an extension may wish to pull in
864 configs are loaded. For example, an extension may wish to pull in
865 configs from alternate files or sources.
865 configs from alternate files or sources.
866
866
867 sharedvfs is vfs object pointing to source repo if the current one is a
867 sharedvfs is vfs object pointing to source repo if the current one is a
868 shared one
868 shared one
869 """
869 """
870 if not rcutil.use_repo_hgrc():
870 if not rcutil.use_repo_hgrc():
871 return False
871 return False
872
872
873 ret = False
873 ret = False
874 # first load config from shared source if we has to
874 # first load config from shared source if we has to
875 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
875 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
876 try:
876 try:
877 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
877 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
878 ret = True
878 ret = True
879 except IOError:
879 except IOError:
880 pass
880 pass
881
881
882 try:
882 try:
883 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
883 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
884 ret = True
884 ret = True
885 except IOError:
885 except IOError:
886 pass
886 pass
887
887
888 try:
888 try:
889 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
889 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
890 ret = True
890 ret = True
891 except IOError:
891 except IOError:
892 pass
892 pass
893
893
894 return ret
894 return ret
895
895
896
896
897 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
897 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
898 """Perform additional actions after .hg/hgrc is loaded.
898 """Perform additional actions after .hg/hgrc is loaded.
899
899
900 This function is called during repository loading immediately after
900 This function is called during repository loading immediately after
901 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
901 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
902
902
903 The function can be used to validate configs, automatically add
903 The function can be used to validate configs, automatically add
904 options (including extensions) based on requirements, etc.
904 options (including extensions) based on requirements, etc.
905 """
905 """
906
906
907 # Map of requirements to list of extensions to load automatically when
907 # Map of requirements to list of extensions to load automatically when
908 # requirement is present.
908 # requirement is present.
909 autoextensions = {
909 autoextensions = {
910 b'git': [b'git'],
910 b'git': [b'git'],
911 b'largefiles': [b'largefiles'],
911 b'largefiles': [b'largefiles'],
912 b'lfs': [b'lfs'],
912 b'lfs': [b'lfs'],
913 }
913 }
914
914
915 for requirement, names in sorted(autoextensions.items()):
915 for requirement, names in sorted(autoextensions.items()):
916 if requirement not in requirements:
916 if requirement not in requirements:
917 continue
917 continue
918
918
919 for name in names:
919 for name in names:
920 if not ui.hasconfig(b'extensions', name):
920 if not ui.hasconfig(b'extensions', name):
921 ui.setconfig(b'extensions', name, b'', source=b'autoload')
921 ui.setconfig(b'extensions', name, b'', source=b'autoload')
922
922
923
923
924 def gathersupportedrequirements(ui):
924 def gathersupportedrequirements(ui):
925 """Determine the complete set of recognized requirements."""
925 """Determine the complete set of recognized requirements."""
926 # Start with all requirements supported by this file.
926 # Start with all requirements supported by this file.
927 supported = set(localrepository._basesupported)
927 supported = set(localrepository._basesupported)
928
928
929 # Execute ``featuresetupfuncs`` entries if they belong to an extension
929 # Execute ``featuresetupfuncs`` entries if they belong to an extension
930 # relevant to this ui instance.
930 # relevant to this ui instance.
931 modules = {m.__name__ for n, m in extensions.extensions(ui)}
931 modules = {m.__name__ for n, m in extensions.extensions(ui)}
932
932
933 for fn in featuresetupfuncs:
933 for fn in featuresetupfuncs:
934 if fn.__module__ in modules:
934 if fn.__module__ in modules:
935 fn(ui, supported)
935 fn(ui, supported)
936
936
937 # Add derived requirements from registered compression engines.
937 # Add derived requirements from registered compression engines.
938 for name in util.compengines:
938 for name in util.compengines:
939 engine = util.compengines[name]
939 engine = util.compengines[name]
940 if engine.available() and engine.revlogheader():
940 if engine.available() and engine.revlogheader():
941 supported.add(b'exp-compression-%s' % name)
941 supported.add(b'exp-compression-%s' % name)
942 if engine.name() == b'zstd':
942 if engine.name() == b'zstd':
943 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
943 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
944
944
945 return supported
945 return supported
946
946
947
947
948 def ensurerequirementsrecognized(requirements, supported):
948 def ensurerequirementsrecognized(requirements, supported):
949 """Validate that a set of local requirements is recognized.
949 """Validate that a set of local requirements is recognized.
950
950
951 Receives a set of requirements. Raises an ``error.RepoError`` if there
951 Receives a set of requirements. Raises an ``error.RepoError`` if there
952 exists any requirement in that set that currently loaded code doesn't
952 exists any requirement in that set that currently loaded code doesn't
953 recognize.
953 recognize.
954
954
955 Returns a set of supported requirements.
955 Returns a set of supported requirements.
956 """
956 """
957 missing = set()
957 missing = set()
958
958
959 for requirement in requirements:
959 for requirement in requirements:
960 if requirement in supported:
960 if requirement in supported:
961 continue
961 continue
962
962
963 if not requirement or not requirement[0:1].isalnum():
963 if not requirement or not requirement[0:1].isalnum():
964 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
964 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
965
965
966 missing.add(requirement)
966 missing.add(requirement)
967
967
968 if missing:
968 if missing:
969 raise error.RequirementError(
969 raise error.RequirementError(
970 _(b'repository requires features unknown to this Mercurial: %s')
970 _(b'repository requires features unknown to this Mercurial: %s')
971 % b' '.join(sorted(missing)),
971 % b' '.join(sorted(missing)),
972 hint=_(
972 hint=_(
973 b'see https://mercurial-scm.org/wiki/MissingRequirement '
973 b'see https://mercurial-scm.org/wiki/MissingRequirement '
974 b'for more information'
974 b'for more information'
975 ),
975 ),
976 )
976 )
977
977
978
978
979 def ensurerequirementscompatible(ui, requirements):
979 def ensurerequirementscompatible(ui, requirements):
980 """Validates that a set of recognized requirements is mutually compatible.
980 """Validates that a set of recognized requirements is mutually compatible.
981
981
982 Some requirements may not be compatible with others or require
982 Some requirements may not be compatible with others or require
983 config options that aren't enabled. This function is called during
983 config options that aren't enabled. This function is called during
984 repository opening to ensure that the set of requirements needed
984 repository opening to ensure that the set of requirements needed
985 to open a repository is sane and compatible with config options.
985 to open a repository is sane and compatible with config options.
986
986
987 Extensions can monkeypatch this function to perform additional
987 Extensions can monkeypatch this function to perform additional
988 checking.
988 checking.
989
989
990 ``error.RepoError`` should be raised on failure.
990 ``error.RepoError`` should be raised on failure.
991 """
991 """
992 if (
992 if (
993 requirementsmod.SPARSE_REQUIREMENT in requirements
993 requirementsmod.SPARSE_REQUIREMENT in requirements
994 and not sparse.enabled
994 and not sparse.enabled
995 ):
995 ):
996 raise error.RepoError(
996 raise error.RepoError(
997 _(
997 _(
998 b'repository is using sparse feature but '
998 b'repository is using sparse feature but '
999 b'sparse is not enabled; enable the '
999 b'sparse is not enabled; enable the '
1000 b'"sparse" extensions to access'
1000 b'"sparse" extensions to access'
1001 )
1001 )
1002 )
1002 )
1003
1003
1004
1004
1005 def makestore(requirements, path, vfstype):
1005 def makestore(requirements, path, vfstype):
1006 """Construct a storage object for a repository."""
1006 """Construct a storage object for a repository."""
1007 if requirementsmod.STORE_REQUIREMENT in requirements:
1007 if requirementsmod.STORE_REQUIREMENT in requirements:
1008 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1008 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1009 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1009 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1010 return storemod.fncachestore(path, vfstype, dotencode)
1010 return storemod.fncachestore(path, vfstype, dotencode)
1011
1011
1012 return storemod.encodedstore(path, vfstype)
1012 return storemod.encodedstore(path, vfstype)
1013
1013
1014 return storemod.basicstore(path, vfstype)
1014 return storemod.basicstore(path, vfstype)
1015
1015
1016
1016
1017 def resolvestorevfsoptions(ui, requirements, features):
1017 def resolvestorevfsoptions(ui, requirements, features):
1018 """Resolve the options to pass to the store vfs opener.
1018 """Resolve the options to pass to the store vfs opener.
1019
1019
1020 The returned dict is used to influence behavior of the storage layer.
1020 The returned dict is used to influence behavior of the storage layer.
1021 """
1021 """
1022 options = {}
1022 options = {}
1023
1023
1024 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1024 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1025 options[b'treemanifest'] = True
1025 options[b'treemanifest'] = True
1026
1026
1027 # experimental config: format.manifestcachesize
1027 # experimental config: format.manifestcachesize
1028 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1028 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1029 if manifestcachesize is not None:
1029 if manifestcachesize is not None:
1030 options[b'manifestcachesize'] = manifestcachesize
1030 options[b'manifestcachesize'] = manifestcachesize
1031
1031
1032 # In the absence of another requirement superseding a revlog-related
1032 # In the absence of another requirement superseding a revlog-related
1033 # requirement, we have to assume the repo is using revlog version 0.
1033 # requirement, we have to assume the repo is using revlog version 0.
1034 # This revlog format is super old and we don't bother trying to parse
1034 # This revlog format is super old and we don't bother trying to parse
1035 # opener options for it because those options wouldn't do anything
1035 # opener options for it because those options wouldn't do anything
1036 # meaningful on such old repos.
1036 # meaningful on such old repos.
1037 if (
1037 if (
1038 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1038 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1039 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1039 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1040 ):
1040 ):
1041 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1041 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1042 else: # explicitly mark repo as using revlogv0
1042 else: # explicitly mark repo as using revlogv0
1043 options[b'revlogv0'] = True
1043 options[b'revlogv0'] = True
1044
1044
1045 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1045 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1046 options[b'copies-storage'] = b'changeset-sidedata'
1046 options[b'copies-storage'] = b'changeset-sidedata'
1047 else:
1047 else:
1048 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1048 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1049 copiesextramode = (b'changeset-only', b'compatibility')
1049 copiesextramode = (b'changeset-only', b'compatibility')
1050 if writecopiesto in copiesextramode:
1050 if writecopiesto in copiesextramode:
1051 options[b'copies-storage'] = b'extra'
1051 options[b'copies-storage'] = b'extra'
1052
1052
1053 return options
1053 return options
1054
1054
1055
1055
1056 def resolverevlogstorevfsoptions(ui, requirements, features):
1056 def resolverevlogstorevfsoptions(ui, requirements, features):
1057 """Resolve opener options specific to revlogs."""
1057 """Resolve opener options specific to revlogs."""
1058
1058
1059 options = {}
1059 options = {}
1060 options[b'flagprocessors'] = {}
1060 options[b'flagprocessors'] = {}
1061
1061
1062 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1062 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1063 options[b'revlogv1'] = True
1063 options[b'revlogv1'] = True
1064 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1064 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1065 options[b'revlogv2'] = True
1065 options[b'revlogv2'] = True
1066 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1066 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1067 options[b'changelogv2'] = True
1067 options[b'changelogv2'] = True
1068 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1068 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1069 options[b'changelogv2.compute-rank'] = cmp_rank
1069 options[b'changelogv2.compute-rank'] = cmp_rank
1070
1070
1071 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1071 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1072 options[b'generaldelta'] = True
1072 options[b'generaldelta'] = True
1073
1073
1074 # experimental config: format.chunkcachesize
1074 # experimental config: format.chunkcachesize
1075 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1075 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1076 if chunkcachesize is not None:
1076 if chunkcachesize is not None:
1077 options[b'chunkcachesize'] = chunkcachesize
1077 options[b'chunkcachesize'] = chunkcachesize
1078
1078
1079 deltabothparents = ui.configbool(
1079 deltabothparents = ui.configbool(
1080 b'storage', b'revlog.optimize-delta-parent-choice'
1080 b'storage', b'revlog.optimize-delta-parent-choice'
1081 )
1081 )
1082 options[b'deltabothparents'] = deltabothparents
1082 options[b'deltabothparents'] = deltabothparents
1083 dps_cgds = ui.configint(
1083 dps_cgds = ui.configint(
1084 b'storage',
1084 b'storage',
1085 b'revlog.delta-parent-search.candidate-group-chunk-size',
1085 b'revlog.delta-parent-search.candidate-group-chunk-size',
1086 )
1086 )
1087 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1087 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1088 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1088 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1089
1089
1090 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1090 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1091 options[b'issue6528.fix-incoming'] = issue6528
1091 options[b'issue6528.fix-incoming'] = issue6528
1092
1092
1093 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1093 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1094 lazydeltabase = False
1094 lazydeltabase = False
1095 if lazydelta:
1095 if lazydelta:
1096 lazydeltabase = ui.configbool(
1096 lazydeltabase = ui.configbool(
1097 b'storage', b'revlog.reuse-external-delta-parent'
1097 b'storage', b'revlog.reuse-external-delta-parent'
1098 )
1098 )
1099 if lazydeltabase is None:
1099 if lazydeltabase is None:
1100 lazydeltabase = not scmutil.gddeltaconfig(ui)
1100 lazydeltabase = not scmutil.gddeltaconfig(ui)
1101 options[b'lazydelta'] = lazydelta
1101 options[b'lazydelta'] = lazydelta
1102 options[b'lazydeltabase'] = lazydeltabase
1102 options[b'lazydeltabase'] = lazydeltabase
1103
1103
1104 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1104 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1105 if 0 <= chainspan:
1105 if 0 <= chainspan:
1106 options[b'maxdeltachainspan'] = chainspan
1106 options[b'maxdeltachainspan'] = chainspan
1107
1107
1108 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1108 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1109 if mmapindexthreshold is not None:
1109 if mmapindexthreshold is not None:
1110 options[b'mmapindexthreshold'] = mmapindexthreshold
1110 options[b'mmapindexthreshold'] = mmapindexthreshold
1111
1111
1112 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1112 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1113 srdensitythres = float(
1113 srdensitythres = float(
1114 ui.config(b'experimental', b'sparse-read.density-threshold')
1114 ui.config(b'experimental', b'sparse-read.density-threshold')
1115 )
1115 )
1116 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1116 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1117 options[b'with-sparse-read'] = withsparseread
1117 options[b'with-sparse-read'] = withsparseread
1118 options[b'sparse-read-density-threshold'] = srdensitythres
1118 options[b'sparse-read-density-threshold'] = srdensitythres
1119 options[b'sparse-read-min-gap-size'] = srmingapsize
1119 options[b'sparse-read-min-gap-size'] = srmingapsize
1120
1120
1121 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1121 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1122 options[b'sparse-revlog'] = sparserevlog
1122 options[b'sparse-revlog'] = sparserevlog
1123 if sparserevlog:
1123 if sparserevlog:
1124 options[b'generaldelta'] = True
1124 options[b'generaldelta'] = True
1125
1125
1126 maxchainlen = None
1126 maxchainlen = None
1127 if sparserevlog:
1127 if sparserevlog:
1128 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1128 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1129 # experimental config: format.maxchainlen
1129 # experimental config: format.maxchainlen
1130 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1130 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1131 if maxchainlen is not None:
1131 if maxchainlen is not None:
1132 options[b'maxchainlen'] = maxchainlen
1132 options[b'maxchainlen'] = maxchainlen
1133
1133
1134 for r in requirements:
1134 for r in requirements:
1135 # we allow multiple compression engine requirement to co-exist because
1135 # we allow multiple compression engine requirement to co-exist because
1136 # strickly speaking, revlog seems to support mixed compression style.
1136 # strickly speaking, revlog seems to support mixed compression style.
1137 #
1137 #
1138 # The compression used for new entries will be "the last one"
1138 # The compression used for new entries will be "the last one"
1139 prefix = r.startswith
1139 prefix = r.startswith
1140 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1140 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1141 options[b'compengine'] = r.split(b'-', 2)[2]
1141 options[b'compengine'] = r.split(b'-', 2)[2]
1142
1142
1143 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1143 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1144 if options[b'zlib.level'] is not None:
1144 if options[b'zlib.level'] is not None:
1145 if not (0 <= options[b'zlib.level'] <= 9):
1145 if not (0 <= options[b'zlib.level'] <= 9):
1146 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1146 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1147 raise error.Abort(msg % options[b'zlib.level'])
1147 raise error.Abort(msg % options[b'zlib.level'])
1148 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1148 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1149 if options[b'zstd.level'] is not None:
1149 if options[b'zstd.level'] is not None:
1150 if not (0 <= options[b'zstd.level'] <= 22):
1150 if not (0 <= options[b'zstd.level'] <= 22):
1151 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1151 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1152 raise error.Abort(msg % options[b'zstd.level'])
1152 raise error.Abort(msg % options[b'zstd.level'])
1153
1153
1154 if requirementsmod.NARROW_REQUIREMENT in requirements:
1154 if requirementsmod.NARROW_REQUIREMENT in requirements:
1155 options[b'enableellipsis'] = True
1155 options[b'enableellipsis'] = True
1156
1156
1157 if ui.configbool(b'experimental', b'rust.index'):
1157 if ui.configbool(b'experimental', b'rust.index'):
1158 options[b'rust.index'] = True
1158 options[b'rust.index'] = True
1159 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1159 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1160 slow_path = ui.config(
1160 slow_path = ui.config(
1161 b'storage', b'revlog.persistent-nodemap.slow-path'
1161 b'storage', b'revlog.persistent-nodemap.slow-path'
1162 )
1162 )
1163 if slow_path not in (b'allow', b'warn', b'abort'):
1163 if slow_path not in (b'allow', b'warn', b'abort'):
1164 default = ui.config_default(
1164 default = ui.config_default(
1165 b'storage', b'revlog.persistent-nodemap.slow-path'
1165 b'storage', b'revlog.persistent-nodemap.slow-path'
1166 )
1166 )
1167 msg = _(
1167 msg = _(
1168 b'unknown value for config '
1168 b'unknown value for config '
1169 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1169 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1170 )
1170 )
1171 ui.warn(msg % slow_path)
1171 ui.warn(msg % slow_path)
1172 if not ui.quiet:
1172 if not ui.quiet:
1173 ui.warn(_(b'falling back to default value: %s\n') % default)
1173 ui.warn(_(b'falling back to default value: %s\n') % default)
1174 slow_path = default
1174 slow_path = default
1175
1175
1176 msg = _(
1176 msg = _(
1177 b"accessing `persistent-nodemap` repository without associated "
1177 b"accessing `persistent-nodemap` repository without associated "
1178 b"fast implementation."
1178 b"fast implementation."
1179 )
1179 )
1180 hint = _(
1180 hint = _(
1181 b"check `hg help config.format.use-persistent-nodemap` "
1181 b"check `hg help config.format.use-persistent-nodemap` "
1182 b"for details"
1182 b"for details"
1183 )
1183 )
1184 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1184 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1185 if slow_path == b'warn':
1185 if slow_path == b'warn':
1186 msg = b"warning: " + msg + b'\n'
1186 msg = b"warning: " + msg + b'\n'
1187 ui.warn(msg)
1187 ui.warn(msg)
1188 if not ui.quiet:
1188 if not ui.quiet:
1189 hint = b'(' + hint + b')\n'
1189 hint = b'(' + hint + b')\n'
1190 ui.warn(hint)
1190 ui.warn(hint)
1191 if slow_path == b'abort':
1191 if slow_path == b'abort':
1192 raise error.Abort(msg, hint=hint)
1192 raise error.Abort(msg, hint=hint)
1193 options[b'persistent-nodemap'] = True
1193 options[b'persistent-nodemap'] = True
1194 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1194 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1195 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1195 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1196 if slow_path not in (b'allow', b'warn', b'abort'):
1196 if slow_path not in (b'allow', b'warn', b'abort'):
1197 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1197 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1198 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1198 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1199 ui.warn(msg % slow_path)
1199 ui.warn(msg % slow_path)
1200 if not ui.quiet:
1200 if not ui.quiet:
1201 ui.warn(_(b'falling back to default value: %s\n') % default)
1201 ui.warn(_(b'falling back to default value: %s\n') % default)
1202 slow_path = default
1202 slow_path = default
1203
1203
1204 msg = _(
1204 msg = _(
1205 b"accessing `dirstate-v2` repository without associated "
1205 b"accessing `dirstate-v2` repository without associated "
1206 b"fast implementation."
1206 b"fast implementation."
1207 )
1207 )
1208 hint = _(
1208 hint = _(
1209 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1209 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1210 )
1210 )
1211 if not dirstate.HAS_FAST_DIRSTATE_V2:
1211 if not dirstate.HAS_FAST_DIRSTATE_V2:
1212 if slow_path == b'warn':
1212 if slow_path == b'warn':
1213 msg = b"warning: " + msg + b'\n'
1213 msg = b"warning: " + msg + b'\n'
1214 ui.warn(msg)
1214 ui.warn(msg)
1215 if not ui.quiet:
1215 if not ui.quiet:
1216 hint = b'(' + hint + b')\n'
1216 hint = b'(' + hint + b')\n'
1217 ui.warn(hint)
1217 ui.warn(hint)
1218 if slow_path == b'abort':
1218 if slow_path == b'abort':
1219 raise error.Abort(msg, hint=hint)
1219 raise error.Abort(msg, hint=hint)
1220 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1220 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1221 options[b'persistent-nodemap.mmap'] = True
1221 options[b'persistent-nodemap.mmap'] = True
1222 if ui.configbool(b'devel', b'persistent-nodemap'):
1222 if ui.configbool(b'devel', b'persistent-nodemap'):
1223 options[b'devel-force-nodemap'] = True
1223 options[b'devel-force-nodemap'] = True
1224
1224
1225 return options
1225 return options
1226
1226
1227
1227
1228 def makemain(**kwargs):
1228 def makemain(**kwargs):
1229 """Produce a type conforming to ``ilocalrepositorymain``."""
1229 """Produce a type conforming to ``ilocalrepositorymain``."""
1230 return localrepository
1230 return localrepository
1231
1231
1232
1232
1233 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1233 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1234 class revlogfilestorage:
1234 class revlogfilestorage:
1235 """File storage when using revlogs."""
1235 """File storage when using revlogs."""
1236
1236
1237 def file(self, path):
1237 def file(self, path):
1238 if path.startswith(b'/'):
1238 if path.startswith(b'/'):
1239 path = path[1:]
1239 path = path[1:]
1240
1240
1241 return filelog.filelog(self.svfs, path)
1241 return filelog.filelog(self.svfs, path)
1242
1242
1243
1243
1244 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1244 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1245 class revlognarrowfilestorage:
1245 class revlognarrowfilestorage:
1246 """File storage when using revlogs and narrow files."""
1246 """File storage when using revlogs and narrow files."""
1247
1247
1248 def file(self, path):
1248 def file(self, path):
1249 if path.startswith(b'/'):
1249 if path.startswith(b'/'):
1250 path = path[1:]
1250 path = path[1:]
1251
1251
1252 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1252 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1253
1253
1254
1254
1255 def makefilestorage(requirements, features, **kwargs):
1255 def makefilestorage(requirements, features, **kwargs):
1256 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1256 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1257 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1257 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1258 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1258 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1259
1259
1260 if requirementsmod.NARROW_REQUIREMENT in requirements:
1260 if requirementsmod.NARROW_REQUIREMENT in requirements:
1261 return revlognarrowfilestorage
1261 return revlognarrowfilestorage
1262 else:
1262 else:
1263 return revlogfilestorage
1263 return revlogfilestorage
1264
1264
1265
1265
1266 # List of repository interfaces and factory functions for them. Each
1266 # List of repository interfaces and factory functions for them. Each
1267 # will be called in order during ``makelocalrepository()`` to iteratively
1267 # will be called in order during ``makelocalrepository()`` to iteratively
1268 # derive the final type for a local repository instance. We capture the
1268 # derive the final type for a local repository instance. We capture the
1269 # function as a lambda so we don't hold a reference and the module-level
1269 # function as a lambda so we don't hold a reference and the module-level
1270 # functions can be wrapped.
1270 # functions can be wrapped.
1271 REPO_INTERFACES = [
1271 REPO_INTERFACES = [
1272 (repository.ilocalrepositorymain, lambda: makemain),
1272 (repository.ilocalrepositorymain, lambda: makemain),
1273 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1273 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1274 ]
1274 ]
1275
1275
1276
1276
1277 @interfaceutil.implementer(repository.ilocalrepositorymain)
1277 @interfaceutil.implementer(repository.ilocalrepositorymain)
1278 class localrepository:
1278 class localrepository:
1279 """Main class for representing local repositories.
1279 """Main class for representing local repositories.
1280
1280
1281 All local repositories are instances of this class.
1281 All local repositories are instances of this class.
1282
1282
1283 Constructed on its own, instances of this class are not usable as
1283 Constructed on its own, instances of this class are not usable as
1284 repository objects. To obtain a usable repository object, call
1284 repository objects. To obtain a usable repository object, call
1285 ``hg.repository()``, ``localrepo.instance()``, or
1285 ``hg.repository()``, ``localrepo.instance()``, or
1286 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1286 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1287 ``instance()`` adds support for creating new repositories.
1287 ``instance()`` adds support for creating new repositories.
1288 ``hg.repository()`` adds more extension integration, including calling
1288 ``hg.repository()`` adds more extension integration, including calling
1289 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1289 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1290 used.
1290 used.
1291 """
1291 """
1292
1292
1293 _basesupported = {
1293 _basesupported = {
1294 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1294 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1295 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1295 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1296 requirementsmod.CHANGELOGV2_REQUIREMENT,
1296 requirementsmod.CHANGELOGV2_REQUIREMENT,
1297 requirementsmod.COPIESSDC_REQUIREMENT,
1297 requirementsmod.COPIESSDC_REQUIREMENT,
1298 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1298 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1299 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1299 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1300 requirementsmod.DOTENCODE_REQUIREMENT,
1300 requirementsmod.DOTENCODE_REQUIREMENT,
1301 requirementsmod.FNCACHE_REQUIREMENT,
1301 requirementsmod.FNCACHE_REQUIREMENT,
1302 requirementsmod.GENERALDELTA_REQUIREMENT,
1302 requirementsmod.GENERALDELTA_REQUIREMENT,
1303 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1303 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1304 requirementsmod.NODEMAP_REQUIREMENT,
1304 requirementsmod.NODEMAP_REQUIREMENT,
1305 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1305 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1306 requirementsmod.REVLOGV1_REQUIREMENT,
1306 requirementsmod.REVLOGV1_REQUIREMENT,
1307 requirementsmod.REVLOGV2_REQUIREMENT,
1307 requirementsmod.REVLOGV2_REQUIREMENT,
1308 requirementsmod.SHARED_REQUIREMENT,
1308 requirementsmod.SHARED_REQUIREMENT,
1309 requirementsmod.SHARESAFE_REQUIREMENT,
1309 requirementsmod.SHARESAFE_REQUIREMENT,
1310 requirementsmod.SPARSE_REQUIREMENT,
1310 requirementsmod.SPARSE_REQUIREMENT,
1311 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1311 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1312 requirementsmod.STORE_REQUIREMENT,
1312 requirementsmod.STORE_REQUIREMENT,
1313 requirementsmod.TREEMANIFEST_REQUIREMENT,
1313 requirementsmod.TREEMANIFEST_REQUIREMENT,
1314 }
1314 }
1315
1315
1316 # list of prefix for file which can be written without 'wlock'
1316 # list of prefix for file which can be written without 'wlock'
1317 # Extensions should extend this list when needed
1317 # Extensions should extend this list when needed
1318 _wlockfreeprefix = {
1318 _wlockfreeprefix = {
1319 # We migh consider requiring 'wlock' for the next
1319 # We migh consider requiring 'wlock' for the next
1320 # two, but pretty much all the existing code assume
1320 # two, but pretty much all the existing code assume
1321 # wlock is not needed so we keep them excluded for
1321 # wlock is not needed so we keep them excluded for
1322 # now.
1322 # now.
1323 b'hgrc',
1323 b'hgrc',
1324 b'requires',
1324 b'requires',
1325 # XXX cache is a complicatged business someone
1325 # XXX cache is a complicatged business someone
1326 # should investigate this in depth at some point
1326 # should investigate this in depth at some point
1327 b'cache/',
1327 b'cache/',
1328 # XXX bisect was still a bit too messy at the time
1328 # XXX bisect was still a bit too messy at the time
1329 # this changeset was introduced. Someone should fix
1329 # this changeset was introduced. Someone should fix
1330 # the remainig bit and drop this line
1330 # the remainig bit and drop this line
1331 b'bisect.state',
1331 b'bisect.state',
1332 }
1332 }
1333
1333
1334 def __init__(
1334 def __init__(
1335 self,
1335 self,
1336 baseui,
1336 baseui,
1337 ui,
1337 ui,
1338 origroot: bytes,
1338 origroot: bytes,
1339 wdirvfs: vfsmod.vfs,
1339 wdirvfs: vfsmod.vfs,
1340 hgvfs: vfsmod.vfs,
1340 hgvfs: vfsmod.vfs,
1341 requirements,
1341 requirements,
1342 supportedrequirements,
1342 supportedrequirements,
1343 sharedpath: bytes,
1343 sharedpath: bytes,
1344 store,
1344 store,
1345 cachevfs: vfsmod.vfs,
1345 cachevfs: vfsmod.vfs,
1346 wcachevfs: vfsmod.vfs,
1346 wcachevfs: vfsmod.vfs,
1347 features,
1347 features,
1348 intents=None,
1348 intents=None,
1349 ):
1349 ):
1350 """Create a new local repository instance.
1350 """Create a new local repository instance.
1351
1351
1352 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1352 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1353 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1353 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1354 object.
1354 object.
1355
1355
1356 Arguments:
1356 Arguments:
1357
1357
1358 baseui
1358 baseui
1359 ``ui.ui`` instance that ``ui`` argument was based off of.
1359 ``ui.ui`` instance that ``ui`` argument was based off of.
1360
1360
1361 ui
1361 ui
1362 ``ui.ui`` instance for use by the repository.
1362 ``ui.ui`` instance for use by the repository.
1363
1363
1364 origroot
1364 origroot
1365 ``bytes`` path to working directory root of this repository.
1365 ``bytes`` path to working directory root of this repository.
1366
1366
1367 wdirvfs
1367 wdirvfs
1368 ``vfs.vfs`` rooted at the working directory.
1368 ``vfs.vfs`` rooted at the working directory.
1369
1369
1370 hgvfs
1370 hgvfs
1371 ``vfs.vfs`` rooted at .hg/
1371 ``vfs.vfs`` rooted at .hg/
1372
1372
1373 requirements
1373 requirements
1374 ``set`` of bytestrings representing repository opening requirements.
1374 ``set`` of bytestrings representing repository opening requirements.
1375
1375
1376 supportedrequirements
1376 supportedrequirements
1377 ``set`` of bytestrings representing repository requirements that we
1377 ``set`` of bytestrings representing repository requirements that we
1378 know how to open. May be a supetset of ``requirements``.
1378 know how to open. May be a supetset of ``requirements``.
1379
1379
1380 sharedpath
1380 sharedpath
1381 ``bytes`` Defining path to storage base directory. Points to a
1381 ``bytes`` Defining path to storage base directory. Points to a
1382 ``.hg/`` directory somewhere.
1382 ``.hg/`` directory somewhere.
1383
1383
1384 store
1384 store
1385 ``store.basicstore`` (or derived) instance providing access to
1385 ``store.basicstore`` (or derived) instance providing access to
1386 versioned storage.
1386 versioned storage.
1387
1387
1388 cachevfs
1388 cachevfs
1389 ``vfs.vfs`` used for cache files.
1389 ``vfs.vfs`` used for cache files.
1390
1390
1391 wcachevfs
1391 wcachevfs
1392 ``vfs.vfs`` used for cache files related to the working copy.
1392 ``vfs.vfs`` used for cache files related to the working copy.
1393
1393
1394 features
1394 features
1395 ``set`` of bytestrings defining features/capabilities of this
1395 ``set`` of bytestrings defining features/capabilities of this
1396 instance.
1396 instance.
1397
1397
1398 intents
1398 intents
1399 ``set`` of system strings indicating what this repo will be used
1399 ``set`` of system strings indicating what this repo will be used
1400 for.
1400 for.
1401 """
1401 """
1402 self.baseui = baseui
1402 self.baseui = baseui
1403 self.ui = ui
1403 self.ui = ui
1404 self.origroot = origroot
1404 self.origroot = origroot
1405 # vfs rooted at working directory.
1405 # vfs rooted at working directory.
1406 self.wvfs = wdirvfs
1406 self.wvfs = wdirvfs
1407 self.root = wdirvfs.base
1407 self.root = wdirvfs.base
1408 # vfs rooted at .hg/. Used to access most non-store paths.
1408 # vfs rooted at .hg/. Used to access most non-store paths.
1409 self.vfs = hgvfs
1409 self.vfs = hgvfs
1410 self.path = hgvfs.base
1410 self.path = hgvfs.base
1411 self.requirements = requirements
1411 self.requirements = requirements
1412 self.nodeconstants = sha1nodeconstants
1412 self.nodeconstants = sha1nodeconstants
1413 self.nullid = self.nodeconstants.nullid
1413 self.nullid = self.nodeconstants.nullid
1414 self.supported = supportedrequirements
1414 self.supported = supportedrequirements
1415 self.sharedpath = sharedpath
1415 self.sharedpath = sharedpath
1416 self.store = store
1416 self.store = store
1417 self.cachevfs = cachevfs
1417 self.cachevfs = cachevfs
1418 self.wcachevfs = wcachevfs
1418 self.wcachevfs = wcachevfs
1419 self.features = features
1419 self.features = features
1420
1420
1421 self.filtername = None
1421 self.filtername = None
1422
1422
1423 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1423 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1424 b'devel', b'check-locks'
1424 b'devel', b'check-locks'
1425 ):
1425 ):
1426 self.vfs.audit = self._getvfsward(self.vfs.audit)
1426 self.vfs.audit = self._getvfsward(self.vfs.audit)
1427 # A list of callback to shape the phase if no data were found.
1427 # A list of callback to shape the phase if no data were found.
1428 # Callback are in the form: func(repo, roots) --> processed root.
1428 # Callback are in the form: func(repo, roots) --> processed root.
1429 # This list it to be filled by extension during repo setup
1429 # This list it to be filled by extension during repo setup
1430 self._phasedefaults = []
1430 self._phasedefaults = []
1431
1431
1432 color.setup(self.ui)
1432 color.setup(self.ui)
1433
1433
1434 self.spath = self.store.path
1434 self.spath = self.store.path
1435 self.svfs = self.store.vfs
1435 self.svfs = self.store.vfs
1436 self.sjoin = self.store.join
1436 self.sjoin = self.store.join
1437 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1437 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1438 b'devel', b'check-locks'
1438 b'devel', b'check-locks'
1439 ):
1439 ):
1440 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1440 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1441 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1441 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1442 else: # standard vfs
1442 else: # standard vfs
1443 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1443 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1444
1444
1445 self._dirstatevalidatewarned = False
1445 self._dirstatevalidatewarned = False
1446
1446
1447 self._branchcaches = branchmap.BranchMapCache()
1447 self._branchcaches = branchmap.BranchMapCache()
1448 self._revbranchcache = None
1448 self._revbranchcache = None
1449 self._filterpats = {}
1449 self._filterpats = {}
1450 self._datafilters = {}
1450 self._datafilters = {}
1451 self._transref = self._lockref = self._wlockref = None
1451 self._transref = self._lockref = self._wlockref = None
1452
1452
1453 # A cache for various files under .hg/ that tracks file changes,
1453 # A cache for various files under .hg/ that tracks file changes,
1454 # (used by the filecache decorator)
1454 # (used by the filecache decorator)
1455 #
1455 #
1456 # Maps a property name to its util.filecacheentry
1456 # Maps a property name to its util.filecacheentry
1457 self._filecache = {}
1457 self._filecache = {}
1458
1458
1459 # hold sets of revision to be filtered
1459 # hold sets of revision to be filtered
1460 # should be cleared when something might have changed the filter value:
1460 # should be cleared when something might have changed the filter value:
1461 # - new changesets,
1461 # - new changesets,
1462 # - phase change,
1462 # - phase change,
1463 # - new obsolescence marker,
1463 # - new obsolescence marker,
1464 # - working directory parent change,
1464 # - working directory parent change,
1465 # - bookmark changes
1465 # - bookmark changes
1466 self.filteredrevcache = {}
1466 self.filteredrevcache = {}
1467
1467
1468 self._dirstate = None
1468 self._dirstate = None
1469 # post-dirstate-status hooks
1469 # post-dirstate-status hooks
1470 self._postdsstatus = []
1470 self._postdsstatus = []
1471
1471
1472 self._pending_narrow_pats = None
1472 self._pending_narrow_pats = None
1473 self._pending_narrow_pats_dirstate = None
1473 self._pending_narrow_pats_dirstate = None
1474
1474
1475 # generic mapping between names and nodes
1475 # generic mapping between names and nodes
1476 self.names = namespaces.namespaces()
1476 self.names = namespaces.namespaces()
1477
1477
1478 # Key to signature value.
1478 # Key to signature value.
1479 self._sparsesignaturecache = {}
1479 self._sparsesignaturecache = {}
1480 # Signature to cached matcher instance.
1480 # Signature to cached matcher instance.
1481 self._sparsematchercache = {}
1481 self._sparsematchercache = {}
1482
1482
1483 self._extrafilterid = repoview.extrafilter(ui)
1483 self._extrafilterid = repoview.extrafilter(ui)
1484
1484
1485 self.filecopiesmode = None
1485 self.filecopiesmode = None
1486 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1486 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1487 self.filecopiesmode = b'changeset-sidedata'
1487 self.filecopiesmode = b'changeset-sidedata'
1488
1488
1489 self._wanted_sidedata = set()
1489 self._wanted_sidedata = set()
1490 self._sidedata_computers = {}
1490 self._sidedata_computers = {}
1491 sidedatamod.set_sidedata_spec_for_repo(self)
1491 sidedatamod.set_sidedata_spec_for_repo(self)
1492
1492
1493 def _getvfsward(self, origfunc):
1493 def _getvfsward(self, origfunc):
1494 """build a ward for self.vfs"""
1494 """build a ward for self.vfs"""
1495 rref = weakref.ref(self)
1495 rref = weakref.ref(self)
1496
1496
1497 def checkvfs(path, mode=None):
1497 def checkvfs(path, mode=None):
1498 ret = origfunc(path, mode=mode)
1498 ret = origfunc(path, mode=mode)
1499 repo = rref()
1499 repo = rref()
1500 if (
1500 if (
1501 repo is None
1501 repo is None
1502 or not util.safehasattr(repo, b'_wlockref')
1502 or not util.safehasattr(repo, b'_wlockref')
1503 or not util.safehasattr(repo, b'_lockref')
1503 or not util.safehasattr(repo, b'_lockref')
1504 ):
1504 ):
1505 return
1505 return
1506 if mode in (None, b'r', b'rb'):
1506 if mode in (None, b'r', b'rb'):
1507 return
1507 return
1508 if path.startswith(repo.path):
1508 if path.startswith(repo.path):
1509 # truncate name relative to the repository (.hg)
1509 # truncate name relative to the repository (.hg)
1510 path = path[len(repo.path) + 1 :]
1510 path = path[len(repo.path) + 1 :]
1511 if path.startswith(b'cache/'):
1511 if path.startswith(b'cache/'):
1512 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1512 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1513 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1513 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1514 # path prefixes covered by 'lock'
1514 # path prefixes covered by 'lock'
1515 vfs_path_prefixes = (
1515 vfs_path_prefixes = (
1516 b'journal.',
1516 b'journal.',
1517 b'undo.',
1517 b'undo.',
1518 b'strip-backup/',
1518 b'strip-backup/',
1519 b'cache/',
1519 b'cache/',
1520 )
1520 )
1521 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1521 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1522 if repo._currentlock(repo._lockref) is None:
1522 if repo._currentlock(repo._lockref) is None:
1523 repo.ui.develwarn(
1523 repo.ui.develwarn(
1524 b'write with no lock: "%s"' % path,
1524 b'write with no lock: "%s"' % path,
1525 stacklevel=3,
1525 stacklevel=3,
1526 config=b'check-locks',
1526 config=b'check-locks',
1527 )
1527 )
1528 elif repo._currentlock(repo._wlockref) is None:
1528 elif repo._currentlock(repo._wlockref) is None:
1529 # rest of vfs files are covered by 'wlock'
1529 # rest of vfs files are covered by 'wlock'
1530 #
1530 #
1531 # exclude special files
1531 # exclude special files
1532 for prefix in self._wlockfreeprefix:
1532 for prefix in self._wlockfreeprefix:
1533 if path.startswith(prefix):
1533 if path.startswith(prefix):
1534 return
1534 return
1535 repo.ui.develwarn(
1535 repo.ui.develwarn(
1536 b'write with no wlock: "%s"' % path,
1536 b'write with no wlock: "%s"' % path,
1537 stacklevel=3,
1537 stacklevel=3,
1538 config=b'check-locks',
1538 config=b'check-locks',
1539 )
1539 )
1540 return ret
1540 return ret
1541
1541
1542 return checkvfs
1542 return checkvfs
1543
1543
1544 def _getsvfsward(self, origfunc):
1544 def _getsvfsward(self, origfunc):
1545 """build a ward for self.svfs"""
1545 """build a ward for self.svfs"""
1546 rref = weakref.ref(self)
1546 rref = weakref.ref(self)
1547
1547
1548 def checksvfs(path, mode=None):
1548 def checksvfs(path, mode=None):
1549 ret = origfunc(path, mode=mode)
1549 ret = origfunc(path, mode=mode)
1550 repo = rref()
1550 repo = rref()
1551 if repo is None or not util.safehasattr(repo, b'_lockref'):
1551 if repo is None or not util.safehasattr(repo, b'_lockref'):
1552 return
1552 return
1553 if mode in (None, b'r', b'rb'):
1553 if mode in (None, b'r', b'rb'):
1554 return
1554 return
1555 if path.startswith(repo.sharedpath):
1555 if path.startswith(repo.sharedpath):
1556 # truncate name relative to the repository (.hg)
1556 # truncate name relative to the repository (.hg)
1557 path = path[len(repo.sharedpath) + 1 :]
1557 path = path[len(repo.sharedpath) + 1 :]
1558 if repo._currentlock(repo._lockref) is None:
1558 if repo._currentlock(repo._lockref) is None:
1559 repo.ui.develwarn(
1559 repo.ui.develwarn(
1560 b'write with no lock: "%s"' % path, stacklevel=4
1560 b'write with no lock: "%s"' % path, stacklevel=4
1561 )
1561 )
1562 return ret
1562 return ret
1563
1563
1564 return checksvfs
1564 return checksvfs
1565
1565
1566 def close(self):
1566 def close(self):
1567 self._writecaches()
1567 self._writecaches()
1568
1568
1569 def _writecaches(self):
1569 def _writecaches(self):
1570 if self._revbranchcache:
1570 if self._revbranchcache:
1571 self._revbranchcache.write()
1571 self._revbranchcache.write()
1572
1572
1573 def _restrictcapabilities(self, caps):
1573 def _restrictcapabilities(self, caps):
1574 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1574 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1575 caps = set(caps)
1575 caps = set(caps)
1576 capsblob = bundle2.encodecaps(
1576 capsblob = bundle2.encodecaps(
1577 bundle2.getrepocaps(self, role=b'client')
1577 bundle2.getrepocaps(self, role=b'client')
1578 )
1578 )
1579 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1579 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1580 if self.ui.configbool(b'experimental', b'narrow'):
1580 if self.ui.configbool(b'experimental', b'narrow'):
1581 caps.add(wireprototypes.NARROWCAP)
1581 caps.add(wireprototypes.NARROWCAP)
1582 return caps
1582 return caps
1583
1583
1584 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1584 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1585 # self -> auditor -> self._checknested -> self
1585 # self -> auditor -> self._checknested -> self
1586
1586
1587 @property
1587 @property
1588 def auditor(self):
1588 def auditor(self):
1589 # This is only used by context.workingctx.match in order to
1589 # This is only used by context.workingctx.match in order to
1590 # detect files in subrepos.
1590 # detect files in subrepos.
1591 return pathutil.pathauditor(self.root, callback=self._checknested)
1591 return pathutil.pathauditor(self.root, callback=self._checknested)
1592
1592
1593 @property
1593 @property
1594 def nofsauditor(self):
1594 def nofsauditor(self):
1595 # This is only used by context.basectx.match in order to detect
1595 # This is only used by context.basectx.match in order to detect
1596 # files in subrepos.
1596 # files in subrepos.
1597 return pathutil.pathauditor(
1597 return pathutil.pathauditor(
1598 self.root, callback=self._checknested, realfs=False, cached=True
1598 self.root, callback=self._checknested, realfs=False, cached=True
1599 )
1599 )
1600
1600
1601 def _checknested(self, path):
1601 def _checknested(self, path):
1602 """Determine if path is a legal nested repository."""
1602 """Determine if path is a legal nested repository."""
1603 if not path.startswith(self.root):
1603 if not path.startswith(self.root):
1604 return False
1604 return False
1605 subpath = path[len(self.root) + 1 :]
1605 subpath = path[len(self.root) + 1 :]
1606 normsubpath = util.pconvert(subpath)
1606 normsubpath = util.pconvert(subpath)
1607
1607
1608 # XXX: Checking against the current working copy is wrong in
1608 # XXX: Checking against the current working copy is wrong in
1609 # the sense that it can reject things like
1609 # the sense that it can reject things like
1610 #
1610 #
1611 # $ hg cat -r 10 sub/x.txt
1611 # $ hg cat -r 10 sub/x.txt
1612 #
1612 #
1613 # if sub/ is no longer a subrepository in the working copy
1613 # if sub/ is no longer a subrepository in the working copy
1614 # parent revision.
1614 # parent revision.
1615 #
1615 #
1616 # However, it can of course also allow things that would have
1616 # However, it can of course also allow things that would have
1617 # been rejected before, such as the above cat command if sub/
1617 # been rejected before, such as the above cat command if sub/
1618 # is a subrepository now, but was a normal directory before.
1618 # is a subrepository now, but was a normal directory before.
1619 # The old path auditor would have rejected by mistake since it
1619 # The old path auditor would have rejected by mistake since it
1620 # panics when it sees sub/.hg/.
1620 # panics when it sees sub/.hg/.
1621 #
1621 #
1622 # All in all, checking against the working copy seems sensible
1622 # All in all, checking against the working copy seems sensible
1623 # since we want to prevent access to nested repositories on
1623 # since we want to prevent access to nested repositories on
1624 # the filesystem *now*.
1624 # the filesystem *now*.
1625 ctx = self[None]
1625 ctx = self[None]
1626 parts = util.splitpath(subpath)
1626 parts = util.splitpath(subpath)
1627 while parts:
1627 while parts:
1628 prefix = b'/'.join(parts)
1628 prefix = b'/'.join(parts)
1629 if prefix in ctx.substate:
1629 if prefix in ctx.substate:
1630 if prefix == normsubpath:
1630 if prefix == normsubpath:
1631 return True
1631 return True
1632 else:
1632 else:
1633 sub = ctx.sub(prefix)
1633 sub = ctx.sub(prefix)
1634 return sub.checknested(subpath[len(prefix) + 1 :])
1634 return sub.checknested(subpath[len(prefix) + 1 :])
1635 else:
1635 else:
1636 parts.pop()
1636 parts.pop()
1637 return False
1637 return False
1638
1638
1639 def peer(self, path=None):
1639 def peer(self, path=None):
1640 return localpeer(self, path=path) # not cached to avoid reference cycle
1640 return localpeer(self, path=path) # not cached to avoid reference cycle
1641
1641
1642 def unfiltered(self):
1642 def unfiltered(self):
1643 """Return unfiltered version of the repository
1643 """Return unfiltered version of the repository
1644
1644
1645 Intended to be overwritten by filtered repo."""
1645 Intended to be overwritten by filtered repo."""
1646 return self
1646 return self
1647
1647
1648 def filtered(self, name, visibilityexceptions=None):
1648 def filtered(self, name, visibilityexceptions=None):
1649 """Return a filtered version of a repository
1649 """Return a filtered version of a repository
1650
1650
1651 The `name` parameter is the identifier of the requested view. This
1651 The `name` parameter is the identifier of the requested view. This
1652 will return a repoview object set "exactly" to the specified view.
1652 will return a repoview object set "exactly" to the specified view.
1653
1653
1654 This function does not apply recursive filtering to a repository. For
1654 This function does not apply recursive filtering to a repository. For
1655 example calling `repo.filtered("served")` will return a repoview using
1655 example calling `repo.filtered("served")` will return a repoview using
1656 the "served" view, regardless of the initial view used by `repo`.
1656 the "served" view, regardless of the initial view used by `repo`.
1657
1657
1658 In other word, there is always only one level of `repoview` "filtering".
1658 In other word, there is always only one level of `repoview` "filtering".
1659 """
1659 """
1660 if self._extrafilterid is not None and b'%' not in name:
1660 if self._extrafilterid is not None and b'%' not in name:
1661 name = name + b'%' + self._extrafilterid
1661 name = name + b'%' + self._extrafilterid
1662
1662
1663 cls = repoview.newtype(self.unfiltered().__class__)
1663 cls = repoview.newtype(self.unfiltered().__class__)
1664 return cls(self, name, visibilityexceptions)
1664 return cls(self, name, visibilityexceptions)
1665
1665
1666 @mixedrepostorecache(
1666 @mixedrepostorecache(
1667 (b'bookmarks', b'plain'),
1667 (b'bookmarks', b'plain'),
1668 (b'bookmarks.current', b'plain'),
1668 (b'bookmarks.current', b'plain'),
1669 (b'bookmarks', b''),
1669 (b'bookmarks', b''),
1670 (b'00changelog.i', b''),
1670 (b'00changelog.i', b''),
1671 )
1671 )
1672 def _bookmarks(self):
1672 def _bookmarks(self):
1673 # Since the multiple files involved in the transaction cannot be
1673 # Since the multiple files involved in the transaction cannot be
1674 # written atomically (with current repository format), there is a race
1674 # written atomically (with current repository format), there is a race
1675 # condition here.
1675 # condition here.
1676 #
1676 #
1677 # 1) changelog content A is read
1677 # 1) changelog content A is read
1678 # 2) outside transaction update changelog to content B
1678 # 2) outside transaction update changelog to content B
1679 # 3) outside transaction update bookmark file referring to content B
1679 # 3) outside transaction update bookmark file referring to content B
1680 # 4) bookmarks file content is read and filtered against changelog-A
1680 # 4) bookmarks file content is read and filtered against changelog-A
1681 #
1681 #
1682 # When this happens, bookmarks against nodes missing from A are dropped.
1682 # When this happens, bookmarks against nodes missing from A are dropped.
1683 #
1683 #
1684 # Having this happening during read is not great, but it become worse
1684 # Having this happening during read is not great, but it become worse
1685 # when this happen during write because the bookmarks to the "unknown"
1685 # when this happen during write because the bookmarks to the "unknown"
1686 # nodes will be dropped for good. However, writes happen within locks.
1686 # nodes will be dropped for good. However, writes happen within locks.
1687 # This locking makes it possible to have a race free consistent read.
1687 # This locking makes it possible to have a race free consistent read.
1688 # For this purpose data read from disc before locking are
1688 # For this purpose data read from disc before locking are
1689 # "invalidated" right after the locks are taken. This invalidations are
1689 # "invalidated" right after the locks are taken. This invalidations are
1690 # "light", the `filecache` mechanism keep the data in memory and will
1690 # "light", the `filecache` mechanism keep the data in memory and will
1691 # reuse them if the underlying files did not changed. Not parsing the
1691 # reuse them if the underlying files did not changed. Not parsing the
1692 # same data multiple times helps performances.
1692 # same data multiple times helps performances.
1693 #
1693 #
1694 # Unfortunately in the case describe above, the files tracked by the
1694 # Unfortunately in the case describe above, the files tracked by the
1695 # bookmarks file cache might not have changed, but the in-memory
1695 # bookmarks file cache might not have changed, but the in-memory
1696 # content is still "wrong" because we used an older changelog content
1696 # content is still "wrong" because we used an older changelog content
1697 # to process the on-disk data. So after locking, the changelog would be
1697 # to process the on-disk data. So after locking, the changelog would be
1698 # refreshed but `_bookmarks` would be preserved.
1698 # refreshed but `_bookmarks` would be preserved.
1699 # Adding `00changelog.i` to the list of tracked file is not
1699 # Adding `00changelog.i` to the list of tracked file is not
1700 # enough, because at the time we build the content for `_bookmarks` in
1700 # enough, because at the time we build the content for `_bookmarks` in
1701 # (4), the changelog file has already diverged from the content used
1701 # (4), the changelog file has already diverged from the content used
1702 # for loading `changelog` in (1)
1702 # for loading `changelog` in (1)
1703 #
1703 #
1704 # To prevent the issue, we force the changelog to be explicitly
1704 # To prevent the issue, we force the changelog to be explicitly
1705 # reloaded while computing `_bookmarks`. The data race can still happen
1705 # reloaded while computing `_bookmarks`. The data race can still happen
1706 # without the lock (with a narrower window), but it would no longer go
1706 # without the lock (with a narrower window), but it would no longer go
1707 # undetected during the lock time refresh.
1707 # undetected during the lock time refresh.
1708 #
1708 #
1709 # The new schedule is as follow
1709 # The new schedule is as follow
1710 #
1710 #
1711 # 1) filecache logic detect that `_bookmarks` needs to be computed
1711 # 1) filecache logic detect that `_bookmarks` needs to be computed
1712 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1712 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1713 # 3) We force `changelog` filecache to be tested
1713 # 3) We force `changelog` filecache to be tested
1714 # 4) cachestat for `changelog` are captured (for changelog)
1714 # 4) cachestat for `changelog` are captured (for changelog)
1715 # 5) `_bookmarks` is computed and cached
1715 # 5) `_bookmarks` is computed and cached
1716 #
1716 #
1717 # The step in (3) ensure we have a changelog at least as recent as the
1717 # The step in (3) ensure we have a changelog at least as recent as the
1718 # cache stat computed in (1). As a result at locking time:
1718 # cache stat computed in (1). As a result at locking time:
1719 # * if the changelog did not changed since (1) -> we can reuse the data
1719 # * if the changelog did not changed since (1) -> we can reuse the data
1720 # * otherwise -> the bookmarks get refreshed.
1720 # * otherwise -> the bookmarks get refreshed.
1721 self._refreshchangelog()
1721 self._refreshchangelog()
1722 return bookmarks.bmstore(self)
1722 return bookmarks.bmstore(self)
1723
1723
1724 def _refreshchangelog(self):
1724 def _refreshchangelog(self):
1725 """make sure the in memory changelog match the on-disk one"""
1725 """make sure the in memory changelog match the on-disk one"""
1726 if 'changelog' in vars(self) and self.currenttransaction() is None:
1726 if 'changelog' in vars(self) and self.currenttransaction() is None:
1727 del self.changelog
1727 del self.changelog
1728
1728
1729 @property
1729 @property
1730 def _activebookmark(self):
1730 def _activebookmark(self):
1731 return self._bookmarks.active
1731 return self._bookmarks.active
1732
1732
1733 # _phasesets depend on changelog. what we need is to call
1733 # _phasesets depend on changelog. what we need is to call
1734 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1734 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1735 # can't be easily expressed in filecache mechanism.
1735 # can't be easily expressed in filecache mechanism.
1736 @storecache(b'phaseroots', b'00changelog.i')
1736 @storecache(b'phaseroots', b'00changelog.i')
1737 def _phasecache(self):
1737 def _phasecache(self):
1738 return phases.phasecache(self, self._phasedefaults)
1738 return phases.phasecache(self, self._phasedefaults)
1739
1739
1740 @storecache(b'obsstore')
1740 @storecache(b'obsstore')
1741 def obsstore(self):
1741 def obsstore(self):
1742 return obsolete.makestore(self.ui, self)
1742 return obsolete.makestore(self.ui, self)
1743
1743
1744 @changelogcache()
1744 @changelogcache()
1745 def changelog(repo):
1745 def changelog(repo):
1746 # load dirstate before changelog to avoid race see issue6303
1746 # load dirstate before changelog to avoid race see issue6303
1747 repo.dirstate.prefetch_parents()
1747 repo.dirstate.prefetch_parents()
1748 return repo.store.changelog(
1748 return repo.store.changelog(
1749 txnutil.mayhavepending(repo.root),
1749 txnutil.mayhavepending(repo.root),
1750 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1750 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1751 )
1751 )
1752
1752
1753 @manifestlogcache()
1753 @manifestlogcache()
1754 def manifestlog(self):
1754 def manifestlog(self):
1755 return self.store.manifestlog(self, self._storenarrowmatch)
1755 return self.store.manifestlog(self, self._storenarrowmatch)
1756
1756
1757 @unfilteredpropertycache
1757 @unfilteredpropertycache
1758 def dirstate(self):
1758 def dirstate(self):
1759 if self._dirstate is None:
1759 if self._dirstate is None:
1760 self._dirstate = self._makedirstate()
1760 self._dirstate = self._makedirstate()
1761 else:
1761 else:
1762 self._dirstate.refresh()
1762 self._dirstate.refresh()
1763 return self._dirstate
1763 return self._dirstate
1764
1764
1765 def _makedirstate(self):
1765 def _makedirstate(self):
1766 """Extension point for wrapping the dirstate per-repo."""
1766 """Extension point for wrapping the dirstate per-repo."""
1767 sparsematchfn = None
1767 sparsematchfn = None
1768 if sparse.use_sparse(self):
1768 if sparse.use_sparse(self):
1769 sparsematchfn = lambda: sparse.matcher(self)
1769 sparsematchfn = lambda: sparse.matcher(self)
1770 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1770 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1771 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1771 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1772 use_dirstate_v2 = v2_req in self.requirements
1772 use_dirstate_v2 = v2_req in self.requirements
1773 use_tracked_hint = th in self.requirements
1773 use_tracked_hint = th in self.requirements
1774
1774
1775 return dirstate.dirstate(
1775 return dirstate.dirstate(
1776 self.vfs,
1776 self.vfs,
1777 self.ui,
1777 self.ui,
1778 self.root,
1778 self.root,
1779 self._dirstatevalidate,
1779 self._dirstatevalidate,
1780 sparsematchfn,
1780 sparsematchfn,
1781 self.nodeconstants,
1781 self.nodeconstants,
1782 use_dirstate_v2,
1782 use_dirstate_v2,
1783 use_tracked_hint=use_tracked_hint,
1783 use_tracked_hint=use_tracked_hint,
1784 )
1784 )
1785
1785
1786 def _dirstatevalidate(self, node):
1786 def _dirstatevalidate(self, node):
1787 try:
1787 try:
1788 self.changelog.rev(node)
1788 self.changelog.rev(node)
1789 return node
1789 return node
1790 except error.LookupError:
1790 except error.LookupError:
1791 if not self._dirstatevalidatewarned:
1791 if not self._dirstatevalidatewarned:
1792 self._dirstatevalidatewarned = True
1792 self._dirstatevalidatewarned = True
1793 self.ui.warn(
1793 self.ui.warn(
1794 _(b"warning: ignoring unknown working parent %s!\n")
1794 _(b"warning: ignoring unknown working parent %s!\n")
1795 % short(node)
1795 % short(node)
1796 )
1796 )
1797 return self.nullid
1797 return self.nullid
1798
1798
1799 @storecache(narrowspec.FILENAME)
1799 @storecache(narrowspec.FILENAME)
1800 def narrowpats(self):
1800 def narrowpats(self):
1801 """matcher patterns for this repository's narrowspec
1801 """matcher patterns for this repository's narrowspec
1802
1802
1803 A tuple of (includes, excludes).
1803 A tuple of (includes, excludes).
1804 """
1804 """
1805 # the narrow management should probably move into its own object
1805 # the narrow management should probably move into its own object
1806 val = self._pending_narrow_pats
1806 val = self._pending_narrow_pats
1807 if val is None:
1807 if val is None:
1808 val = narrowspec.load(self)
1808 val = narrowspec.load(self)
1809 return val
1809 return val
1810
1810
1811 @storecache(narrowspec.FILENAME)
1811 @storecache(narrowspec.FILENAME)
1812 def _storenarrowmatch(self):
1812 def _storenarrowmatch(self):
1813 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1813 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1814 return matchmod.always()
1814 return matchmod.always()
1815 include, exclude = self.narrowpats
1815 include, exclude = self.narrowpats
1816 return narrowspec.match(self.root, include=include, exclude=exclude)
1816 return narrowspec.match(self.root, include=include, exclude=exclude)
1817
1817
1818 @storecache(narrowspec.FILENAME)
1818 @storecache(narrowspec.FILENAME)
1819 def _narrowmatch(self):
1819 def _narrowmatch(self):
1820 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1820 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1821 return matchmod.always()
1821 return matchmod.always()
1822 narrowspec.checkworkingcopynarrowspec(self)
1822 narrowspec.checkworkingcopynarrowspec(self)
1823 include, exclude = self.narrowpats
1823 include, exclude = self.narrowpats
1824 return narrowspec.match(self.root, include=include, exclude=exclude)
1824 return narrowspec.match(self.root, include=include, exclude=exclude)
1825
1825
1826 def narrowmatch(self, match=None, includeexact=False):
1826 def narrowmatch(self, match=None, includeexact=False):
1827 """matcher corresponding the the repo's narrowspec
1827 """matcher corresponding the the repo's narrowspec
1828
1828
1829 If `match` is given, then that will be intersected with the narrow
1829 If `match` is given, then that will be intersected with the narrow
1830 matcher.
1830 matcher.
1831
1831
1832 If `includeexact` is True, then any exact matches from `match` will
1832 If `includeexact` is True, then any exact matches from `match` will
1833 be included even if they're outside the narrowspec.
1833 be included even if they're outside the narrowspec.
1834 """
1834 """
1835 if match:
1835 if match:
1836 if includeexact and not self._narrowmatch.always():
1836 if includeexact and not self._narrowmatch.always():
1837 # do not exclude explicitly-specified paths so that they can
1837 # do not exclude explicitly-specified paths so that they can
1838 # be warned later on
1838 # be warned later on
1839 em = matchmod.exact(match.files())
1839 em = matchmod.exact(match.files())
1840 nm = matchmod.unionmatcher([self._narrowmatch, em])
1840 nm = matchmod.unionmatcher([self._narrowmatch, em])
1841 return matchmod.intersectmatchers(match, nm)
1841 return matchmod.intersectmatchers(match, nm)
1842 return matchmod.intersectmatchers(match, self._narrowmatch)
1842 return matchmod.intersectmatchers(match, self._narrowmatch)
1843 return self._narrowmatch
1843 return self._narrowmatch
1844
1844
1845 def setnarrowpats(self, newincludes, newexcludes):
1845 def setnarrowpats(self, newincludes, newexcludes):
1846 narrowspec.save(self, newincludes, newexcludes)
1846 narrowspec.save(self, newincludes, newexcludes)
1847 self.invalidate(clearfilecache=True)
1847 self.invalidate(clearfilecache=True)
1848
1848
1849 @unfilteredpropertycache
1849 @unfilteredpropertycache
1850 def _quick_access_changeid_null(self):
1850 def _quick_access_changeid_null(self):
1851 return {
1851 return {
1852 b'null': (nullrev, self.nodeconstants.nullid),
1852 b'null': (nullrev, self.nodeconstants.nullid),
1853 nullrev: (nullrev, self.nodeconstants.nullid),
1853 nullrev: (nullrev, self.nodeconstants.nullid),
1854 self.nullid: (nullrev, self.nullid),
1854 self.nullid: (nullrev, self.nullid),
1855 }
1855 }
1856
1856
1857 @unfilteredpropertycache
1857 @unfilteredpropertycache
1858 def _quick_access_changeid_wc(self):
1858 def _quick_access_changeid_wc(self):
1859 # also fast path access to the working copy parents
1859 # also fast path access to the working copy parents
1860 # however, only do it for filter that ensure wc is visible.
1860 # however, only do it for filter that ensure wc is visible.
1861 quick = self._quick_access_changeid_null.copy()
1861 quick = self._quick_access_changeid_null.copy()
1862 cl = self.unfiltered().changelog
1862 cl = self.unfiltered().changelog
1863 for node in self.dirstate.parents():
1863 for node in self.dirstate.parents():
1864 if node == self.nullid:
1864 if node == self.nullid:
1865 continue
1865 continue
1866 rev = cl.index.get_rev(node)
1866 rev = cl.index.get_rev(node)
1867 if rev is None:
1867 if rev is None:
1868 # unknown working copy parent case:
1868 # unknown working copy parent case:
1869 #
1869 #
1870 # skip the fast path and let higher code deal with it
1870 # skip the fast path and let higher code deal with it
1871 continue
1871 continue
1872 pair = (rev, node)
1872 pair = (rev, node)
1873 quick[rev] = pair
1873 quick[rev] = pair
1874 quick[node] = pair
1874 quick[node] = pair
1875 # also add the parents of the parents
1875 # also add the parents of the parents
1876 for r in cl.parentrevs(rev):
1876 for r in cl.parentrevs(rev):
1877 if r == nullrev:
1877 if r == nullrev:
1878 continue
1878 continue
1879 n = cl.node(r)
1879 n = cl.node(r)
1880 pair = (r, n)
1880 pair = (r, n)
1881 quick[r] = pair
1881 quick[r] = pair
1882 quick[n] = pair
1882 quick[n] = pair
1883 p1node = self.dirstate.p1()
1883 p1node = self.dirstate.p1()
1884 if p1node != self.nullid:
1884 if p1node != self.nullid:
1885 quick[b'.'] = quick[p1node]
1885 quick[b'.'] = quick[p1node]
1886 return quick
1886 return quick
1887
1887
1888 @unfilteredmethod
1888 @unfilteredmethod
1889 def _quick_access_changeid_invalidate(self):
1889 def _quick_access_changeid_invalidate(self):
1890 if '_quick_access_changeid_wc' in vars(self):
1890 if '_quick_access_changeid_wc' in vars(self):
1891 del self.__dict__['_quick_access_changeid_wc']
1891 del self.__dict__['_quick_access_changeid_wc']
1892
1892
1893 @property
1893 @property
1894 def _quick_access_changeid(self):
1894 def _quick_access_changeid(self):
1895 """an helper dictionnary for __getitem__ calls
1895 """an helper dictionnary for __getitem__ calls
1896
1896
1897 This contains a list of symbol we can recognise right away without
1897 This contains a list of symbol we can recognise right away without
1898 further processing.
1898 further processing.
1899 """
1899 """
1900 if self.filtername in repoview.filter_has_wc:
1900 if self.filtername in repoview.filter_has_wc:
1901 return self._quick_access_changeid_wc
1901 return self._quick_access_changeid_wc
1902 return self._quick_access_changeid_null
1902 return self._quick_access_changeid_null
1903
1903
1904 def __getitem__(self, changeid):
1904 def __getitem__(self, changeid):
1905 # dealing with special cases
1905 # dealing with special cases
1906 if changeid is None:
1906 if changeid is None:
1907 return context.workingctx(self)
1907 return context.workingctx(self)
1908 if isinstance(changeid, context.basectx):
1908 if isinstance(changeid, context.basectx):
1909 return changeid
1909 return changeid
1910
1910
1911 # dealing with multiple revisions
1911 # dealing with multiple revisions
1912 if isinstance(changeid, slice):
1912 if isinstance(changeid, slice):
1913 # wdirrev isn't contiguous so the slice shouldn't include it
1913 # wdirrev isn't contiguous so the slice shouldn't include it
1914 return [
1914 return [
1915 self[i]
1915 self[i]
1916 for i in range(*changeid.indices(len(self)))
1916 for i in range(*changeid.indices(len(self)))
1917 if i not in self.changelog.filteredrevs
1917 if i not in self.changelog.filteredrevs
1918 ]
1918 ]
1919
1919
1920 # dealing with some special values
1920 # dealing with some special values
1921 quick_access = self._quick_access_changeid.get(changeid)
1921 quick_access = self._quick_access_changeid.get(changeid)
1922 if quick_access is not None:
1922 if quick_access is not None:
1923 rev, node = quick_access
1923 rev, node = quick_access
1924 return context.changectx(self, rev, node, maybe_filtered=False)
1924 return context.changectx(self, rev, node, maybe_filtered=False)
1925 if changeid == b'tip':
1925 if changeid == b'tip':
1926 node = self.changelog.tip()
1926 node = self.changelog.tip()
1927 rev = self.changelog.rev(node)
1927 rev = self.changelog.rev(node)
1928 return context.changectx(self, rev, node)
1928 return context.changectx(self, rev, node)
1929
1929
1930 # dealing with arbitrary values
1930 # dealing with arbitrary values
1931 try:
1931 try:
1932 if isinstance(changeid, int):
1932 if isinstance(changeid, int):
1933 node = self.changelog.node(changeid)
1933 node = self.changelog.node(changeid)
1934 rev = changeid
1934 rev = changeid
1935 elif changeid == b'.':
1935 elif changeid == b'.':
1936 # this is a hack to delay/avoid loading obsmarkers
1936 # this is a hack to delay/avoid loading obsmarkers
1937 # when we know that '.' won't be hidden
1937 # when we know that '.' won't be hidden
1938 node = self.dirstate.p1()
1938 node = self.dirstate.p1()
1939 rev = self.unfiltered().changelog.rev(node)
1939 rev = self.unfiltered().changelog.rev(node)
1940 elif len(changeid) == self.nodeconstants.nodelen:
1940 elif len(changeid) == self.nodeconstants.nodelen:
1941 try:
1941 try:
1942 node = changeid
1942 node = changeid
1943 rev = self.changelog.rev(changeid)
1943 rev = self.changelog.rev(changeid)
1944 except error.FilteredLookupError:
1944 except error.FilteredLookupError:
1945 changeid = hex(changeid) # for the error message
1945 changeid = hex(changeid) # for the error message
1946 raise
1946 raise
1947 except LookupError:
1947 except LookupError:
1948 # check if it might have come from damaged dirstate
1948 # check if it might have come from damaged dirstate
1949 #
1949 #
1950 # XXX we could avoid the unfiltered if we had a recognizable
1950 # XXX we could avoid the unfiltered if we had a recognizable
1951 # exception for filtered changeset access
1951 # exception for filtered changeset access
1952 if (
1952 if (
1953 self.local()
1953 self.local()
1954 and changeid in self.unfiltered().dirstate.parents()
1954 and changeid in self.unfiltered().dirstate.parents()
1955 ):
1955 ):
1956 msg = _(b"working directory has unknown parent '%s'!")
1956 msg = _(b"working directory has unknown parent '%s'!")
1957 raise error.Abort(msg % short(changeid))
1957 raise error.Abort(msg % short(changeid))
1958 changeid = hex(changeid) # for the error message
1958 changeid = hex(changeid) # for the error message
1959 raise
1959 raise
1960
1960
1961 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1961 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1962 node = bin(changeid)
1962 node = bin(changeid)
1963 rev = self.changelog.rev(node)
1963 rev = self.changelog.rev(node)
1964 else:
1964 else:
1965 raise error.ProgrammingError(
1965 raise error.ProgrammingError(
1966 b"unsupported changeid '%s' of type %s"
1966 b"unsupported changeid '%s' of type %s"
1967 % (changeid, pycompat.bytestr(type(changeid)))
1967 % (changeid, pycompat.bytestr(type(changeid)))
1968 )
1968 )
1969
1969
1970 return context.changectx(self, rev, node)
1970 return context.changectx(self, rev, node)
1971
1971
1972 except (error.FilteredIndexError, error.FilteredLookupError):
1972 except (error.FilteredIndexError, error.FilteredLookupError):
1973 raise error.FilteredRepoLookupError(
1973 raise error.FilteredRepoLookupError(
1974 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1974 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1975 )
1975 )
1976 except (IndexError, LookupError):
1976 except (IndexError, LookupError):
1977 raise error.RepoLookupError(
1977 raise error.RepoLookupError(
1978 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1978 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1979 )
1979 )
1980 except error.WdirUnsupported:
1980 except error.WdirUnsupported:
1981 return context.workingctx(self)
1981 return context.workingctx(self)
1982
1982
1983 def __contains__(self, changeid):
1983 def __contains__(self, changeid):
1984 """True if the given changeid exists"""
1984 """True if the given changeid exists"""
1985 try:
1985 try:
1986 self[changeid]
1986 self[changeid]
1987 return True
1987 return True
1988 except error.RepoLookupError:
1988 except error.RepoLookupError:
1989 return False
1989 return False
1990
1990
1991 def __nonzero__(self):
1991 def __nonzero__(self):
1992 return True
1992 return True
1993
1993
1994 __bool__ = __nonzero__
1994 __bool__ = __nonzero__
1995
1995
1996 def __len__(self):
1996 def __len__(self):
1997 # no need to pay the cost of repoview.changelog
1997 # no need to pay the cost of repoview.changelog
1998 unfi = self.unfiltered()
1998 unfi = self.unfiltered()
1999 return len(unfi.changelog)
1999 return len(unfi.changelog)
2000
2000
2001 def __iter__(self):
2001 def __iter__(self):
2002 return iter(self.changelog)
2002 return iter(self.changelog)
2003
2003
2004 def revs(self, expr: bytes, *args):
2004 def revs(self, expr: bytes, *args):
2005 """Find revisions matching a revset.
2005 """Find revisions matching a revset.
2006
2006
2007 The revset is specified as a string ``expr`` that may contain
2007 The revset is specified as a string ``expr`` that may contain
2008 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2008 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2009
2009
2010 Revset aliases from the configuration are not expanded. To expand
2010 Revset aliases from the configuration are not expanded. To expand
2011 user aliases, consider calling ``scmutil.revrange()`` or
2011 user aliases, consider calling ``scmutil.revrange()`` or
2012 ``repo.anyrevs([expr], user=True)``.
2012 ``repo.anyrevs([expr], user=True)``.
2013
2013
2014 Returns a smartset.abstractsmartset, which is a list-like interface
2014 Returns a smartset.abstractsmartset, which is a list-like interface
2015 that contains integer revisions.
2015 that contains integer revisions.
2016 """
2016 """
2017 tree = revsetlang.spectree(expr, *args)
2017 tree = revsetlang.spectree(expr, *args)
2018 return revset.makematcher(tree)(self)
2018 return revset.makematcher(tree)(self)
2019
2019
2020 def set(self, expr: bytes, *args):
2020 def set(self, expr: bytes, *args):
2021 """Find revisions matching a revset and emit changectx instances.
2021 """Find revisions matching a revset and emit changectx instances.
2022
2022
2023 This is a convenience wrapper around ``revs()`` that iterates the
2023 This is a convenience wrapper around ``revs()`` that iterates the
2024 result and is a generator of changectx instances.
2024 result and is a generator of changectx instances.
2025
2025
2026 Revset aliases from the configuration are not expanded. To expand
2026 Revset aliases from the configuration are not expanded. To expand
2027 user aliases, consider calling ``scmutil.revrange()``.
2027 user aliases, consider calling ``scmutil.revrange()``.
2028 """
2028 """
2029 for r in self.revs(expr, *args):
2029 for r in self.revs(expr, *args):
2030 yield self[r]
2030 yield self[r]
2031
2031
2032 def anyrevs(self, specs: bytes, user=False, localalias=None):
2032 def anyrevs(self, specs: bytes, user=False, localalias=None):
2033 """Find revisions matching one of the given revsets.
2033 """Find revisions matching one of the given revsets.
2034
2034
2035 Revset aliases from the configuration are not expanded by default. To
2035 Revset aliases from the configuration are not expanded by default. To
2036 expand user aliases, specify ``user=True``. To provide some local
2036 expand user aliases, specify ``user=True``. To provide some local
2037 definitions overriding user aliases, set ``localalias`` to
2037 definitions overriding user aliases, set ``localalias`` to
2038 ``{name: definitionstring}``.
2038 ``{name: definitionstring}``.
2039 """
2039 """
2040 if specs == [b'null']:
2040 if specs == [b'null']:
2041 return revset.baseset([nullrev])
2041 return revset.baseset([nullrev])
2042 if specs == [b'.']:
2042 if specs == [b'.']:
2043 quick_data = self._quick_access_changeid.get(b'.')
2043 quick_data = self._quick_access_changeid.get(b'.')
2044 if quick_data is not None:
2044 if quick_data is not None:
2045 return revset.baseset([quick_data[0]])
2045 return revset.baseset([quick_data[0]])
2046 if user:
2046 if user:
2047 m = revset.matchany(
2047 m = revset.matchany(
2048 self.ui,
2048 self.ui,
2049 specs,
2049 specs,
2050 lookup=revset.lookupfn(self),
2050 lookup=revset.lookupfn(self),
2051 localalias=localalias,
2051 localalias=localalias,
2052 )
2052 )
2053 else:
2053 else:
2054 m = revset.matchany(None, specs, localalias=localalias)
2054 m = revset.matchany(None, specs, localalias=localalias)
2055 return m(self)
2055 return m(self)
2056
2056
2057 def url(self) -> bytes:
2057 def url(self) -> bytes:
2058 return b'file:' + self.root
2058 return b'file:' + self.root
2059
2059
2060 def hook(self, name, throw=False, **args):
2060 def hook(self, name, throw=False, **args):
2061 """Call a hook, passing this repo instance.
2061 """Call a hook, passing this repo instance.
2062
2062
2063 This a convenience method to aid invoking hooks. Extensions likely
2063 This a convenience method to aid invoking hooks. Extensions likely
2064 won't call this unless they have registered a custom hook or are
2064 won't call this unless they have registered a custom hook or are
2065 replacing code that is expected to call a hook.
2065 replacing code that is expected to call a hook.
2066 """
2066 """
2067 return hook.hook(self.ui, self, name, throw, **args)
2067 return hook.hook(self.ui, self, name, throw, **args)
2068
2068
2069 @filteredpropertycache
2069 @filteredpropertycache
2070 def _tagscache(self):
2070 def _tagscache(self):
2071 """Returns a tagscache object that contains various tags related
2071 """Returns a tagscache object that contains various tags related
2072 caches."""
2072 caches."""
2073
2073
2074 # This simplifies its cache management by having one decorated
2074 # This simplifies its cache management by having one decorated
2075 # function (this one) and the rest simply fetch things from it.
2075 # function (this one) and the rest simply fetch things from it.
2076 class tagscache:
2076 class tagscache:
2077 def __init__(self):
2077 def __init__(self):
2078 # These two define the set of tags for this repository. tags
2078 # These two define the set of tags for this repository. tags
2079 # maps tag name to node; tagtypes maps tag name to 'global' or
2079 # maps tag name to node; tagtypes maps tag name to 'global' or
2080 # 'local'. (Global tags are defined by .hgtags across all
2080 # 'local'. (Global tags are defined by .hgtags across all
2081 # heads, and local tags are defined in .hg/localtags.)
2081 # heads, and local tags are defined in .hg/localtags.)
2082 # They constitute the in-memory cache of tags.
2082 # They constitute the in-memory cache of tags.
2083 self.tags = self.tagtypes = None
2083 self.tags = self.tagtypes = None
2084
2084
2085 self.nodetagscache = self.tagslist = None
2085 self.nodetagscache = self.tagslist = None
2086
2086
2087 cache = tagscache()
2087 cache = tagscache()
2088 cache.tags, cache.tagtypes = self._findtags()
2088 cache.tags, cache.tagtypes = self._findtags()
2089
2089
2090 return cache
2090 return cache
2091
2091
2092 def tags(self):
2092 def tags(self):
2093 '''return a mapping of tag to node'''
2093 '''return a mapping of tag to node'''
2094 t = {}
2094 t = {}
2095 if self.changelog.filteredrevs:
2095 if self.changelog.filteredrevs:
2096 tags, tt = self._findtags()
2096 tags, tt = self._findtags()
2097 else:
2097 else:
2098 tags = self._tagscache.tags
2098 tags = self._tagscache.tags
2099 rev = self.changelog.rev
2099 rev = self.changelog.rev
2100 for k, v in tags.items():
2100 for k, v in tags.items():
2101 try:
2101 try:
2102 # ignore tags to unknown nodes
2102 # ignore tags to unknown nodes
2103 rev(v)
2103 rev(v)
2104 t[k] = v
2104 t[k] = v
2105 except (error.LookupError, ValueError):
2105 except (error.LookupError, ValueError):
2106 pass
2106 pass
2107 return t
2107 return t
2108
2108
2109 def _findtags(self):
2109 def _findtags(self):
2110 """Do the hard work of finding tags. Return a pair of dicts
2110 """Do the hard work of finding tags. Return a pair of dicts
2111 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2111 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2112 maps tag name to a string like \'global\' or \'local\'.
2112 maps tag name to a string like \'global\' or \'local\'.
2113 Subclasses or extensions are free to add their own tags, but
2113 Subclasses or extensions are free to add their own tags, but
2114 should be aware that the returned dicts will be retained for the
2114 should be aware that the returned dicts will be retained for the
2115 duration of the localrepo object."""
2115 duration of the localrepo object."""
2116
2116
2117 # XXX what tagtype should subclasses/extensions use? Currently
2117 # XXX what tagtype should subclasses/extensions use? Currently
2118 # mq and bookmarks add tags, but do not set the tagtype at all.
2118 # mq and bookmarks add tags, but do not set the tagtype at all.
2119 # Should each extension invent its own tag type? Should there
2119 # Should each extension invent its own tag type? Should there
2120 # be one tagtype for all such "virtual" tags? Or is the status
2120 # be one tagtype for all such "virtual" tags? Or is the status
2121 # quo fine?
2121 # quo fine?
2122
2122
2123 # map tag name to (node, hist)
2123 # map tag name to (node, hist)
2124 alltags = tagsmod.findglobaltags(self.ui, self)
2124 alltags = tagsmod.findglobaltags(self.ui, self)
2125 # map tag name to tag type
2125 # map tag name to tag type
2126 tagtypes = {tag: b'global' for tag in alltags}
2126 tagtypes = {tag: b'global' for tag in alltags}
2127
2127
2128 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2128 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2129
2129
2130 # Build the return dicts. Have to re-encode tag names because
2130 # Build the return dicts. Have to re-encode tag names because
2131 # the tags module always uses UTF-8 (in order not to lose info
2131 # the tags module always uses UTF-8 (in order not to lose info
2132 # writing to the cache), but the rest of Mercurial wants them in
2132 # writing to the cache), but the rest of Mercurial wants them in
2133 # local encoding.
2133 # local encoding.
2134 tags = {}
2134 tags = {}
2135 for name, (node, hist) in alltags.items():
2135 for name, (node, hist) in alltags.items():
2136 if node != self.nullid:
2136 if node != self.nullid:
2137 tags[encoding.tolocal(name)] = node
2137 tags[encoding.tolocal(name)] = node
2138 tags[b'tip'] = self.changelog.tip()
2138 tags[b'tip'] = self.changelog.tip()
2139 tagtypes = {
2139 tagtypes = {
2140 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2140 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2141 }
2141 }
2142 return (tags, tagtypes)
2142 return (tags, tagtypes)
2143
2143
2144 def tagtype(self, tagname):
2144 def tagtype(self, tagname):
2145 """
2145 """
2146 return the type of the given tag. result can be:
2146 return the type of the given tag. result can be:
2147
2147
2148 'local' : a local tag
2148 'local' : a local tag
2149 'global' : a global tag
2149 'global' : a global tag
2150 None : tag does not exist
2150 None : tag does not exist
2151 """
2151 """
2152
2152
2153 return self._tagscache.tagtypes.get(tagname)
2153 return self._tagscache.tagtypes.get(tagname)
2154
2154
2155 def tagslist(self):
2155 def tagslist(self):
2156 '''return a list of tags ordered by revision'''
2156 '''return a list of tags ordered by revision'''
2157 if not self._tagscache.tagslist:
2157 if not self._tagscache.tagslist:
2158 l = []
2158 l = []
2159 for t, n in self.tags().items():
2159 for t, n in self.tags().items():
2160 l.append((self.changelog.rev(n), t, n))
2160 l.append((self.changelog.rev(n), t, n))
2161 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2161 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2162
2162
2163 return self._tagscache.tagslist
2163 return self._tagscache.tagslist
2164
2164
2165 def nodetags(self, node):
2165 def nodetags(self, node):
2166 '''return the tags associated with a node'''
2166 '''return the tags associated with a node'''
2167 if not self._tagscache.nodetagscache:
2167 if not self._tagscache.nodetagscache:
2168 nodetagscache = {}
2168 nodetagscache = {}
2169 for t, n in self._tagscache.tags.items():
2169 for t, n in self._tagscache.tags.items():
2170 nodetagscache.setdefault(n, []).append(t)
2170 nodetagscache.setdefault(n, []).append(t)
2171 for tags in nodetagscache.values():
2171 for tags in nodetagscache.values():
2172 tags.sort()
2172 tags.sort()
2173 self._tagscache.nodetagscache = nodetagscache
2173 self._tagscache.nodetagscache = nodetagscache
2174 return self._tagscache.nodetagscache.get(node, [])
2174 return self._tagscache.nodetagscache.get(node, [])
2175
2175
2176 def nodebookmarks(self, node):
2176 def nodebookmarks(self, node):
2177 """return the list of bookmarks pointing to the specified node"""
2177 """return the list of bookmarks pointing to the specified node"""
2178 return self._bookmarks.names(node)
2178 return self._bookmarks.names(node)
2179
2179
2180 def branchmap(self):
2180 def branchmap(self):
2181 """returns a dictionary {branch: [branchheads]} with branchheads
2181 """returns a dictionary {branch: [branchheads]} with branchheads
2182 ordered by increasing revision number"""
2182 ordered by increasing revision number"""
2183 return self._branchcaches[self]
2183 return self._branchcaches[self]
2184
2184
2185 @unfilteredmethod
2185 @unfilteredmethod
2186 def revbranchcache(self):
2186 def revbranchcache(self):
2187 if not self._revbranchcache:
2187 if not self._revbranchcache:
2188 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2188 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2189 return self._revbranchcache
2189 return self._revbranchcache
2190
2190
2191 def register_changeset(self, rev, changelogrevision):
2191 def register_changeset(self, rev, changelogrevision):
2192 self.revbranchcache().setdata(rev, changelogrevision)
2192 self.revbranchcache().setdata(rev, changelogrevision)
2193
2193
2194 def branchtip(self, branch, ignoremissing=False):
2194 def branchtip(self, branch, ignoremissing=False):
2195 """return the tip node for a given branch
2195 """return the tip node for a given branch
2196
2196
2197 If ignoremissing is True, then this method will not raise an error.
2197 If ignoremissing is True, then this method will not raise an error.
2198 This is helpful for callers that only expect None for a missing branch
2198 This is helpful for callers that only expect None for a missing branch
2199 (e.g. namespace).
2199 (e.g. namespace).
2200
2200
2201 """
2201 """
2202 try:
2202 try:
2203 return self.branchmap().branchtip(branch)
2203 return self.branchmap().branchtip(branch)
2204 except KeyError:
2204 except KeyError:
2205 if not ignoremissing:
2205 if not ignoremissing:
2206 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2206 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2207 else:
2207 else:
2208 pass
2208 pass
2209
2209
2210 def lookup(self, key):
2210 def lookup(self, key):
2211 node = scmutil.revsymbol(self, key).node()
2211 node = scmutil.revsymbol(self, key).node()
2212 if node is None:
2212 if node is None:
2213 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2213 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2214 return node
2214 return node
2215
2215
2216 def lookupbranch(self, key):
2216 def lookupbranch(self, key):
2217 if self.branchmap().hasbranch(key):
2217 if self.branchmap().hasbranch(key):
2218 return key
2218 return key
2219
2219
2220 return scmutil.revsymbol(self, key).branch()
2220 return scmutil.revsymbol(self, key).branch()
2221
2221
2222 def known(self, nodes):
2222 def known(self, nodes):
2223 cl = self.changelog
2223 cl = self.changelog
2224 get_rev = cl.index.get_rev
2224 get_rev = cl.index.get_rev
2225 filtered = cl.filteredrevs
2225 filtered = cl.filteredrevs
2226 result = []
2226 result = []
2227 for n in nodes:
2227 for n in nodes:
2228 r = get_rev(n)
2228 r = get_rev(n)
2229 resp = not (r is None or r in filtered)
2229 resp = not (r is None or r in filtered)
2230 result.append(resp)
2230 result.append(resp)
2231 return result
2231 return result
2232
2232
2233 def local(self):
2233 def local(self):
2234 return self
2234 return self
2235
2235
2236 def publishing(self):
2236 def publishing(self):
2237 # it's safe (and desirable) to trust the publish flag unconditionally
2237 # it's safe (and desirable) to trust the publish flag unconditionally
2238 # so that we don't finalize changes shared between users via ssh or nfs
2238 # so that we don't finalize changes shared between users via ssh or nfs
2239 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2239 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2240
2240
2241 def cancopy(self):
2241 def cancopy(self):
2242 # so statichttprepo's override of local() works
2242 # so statichttprepo's override of local() works
2243 if not self.local():
2243 if not self.local():
2244 return False
2244 return False
2245 if not self.publishing():
2245 if not self.publishing():
2246 return True
2246 return True
2247 # if publishing we can't copy if there is filtered content
2247 # if publishing we can't copy if there is filtered content
2248 return not self.filtered(b'visible').changelog.filteredrevs
2248 return not self.filtered(b'visible').changelog.filteredrevs
2249
2249
2250 def shared(self):
2250 def shared(self):
2251 '''the type of shared repository (None if not shared)'''
2251 '''the type of shared repository (None if not shared)'''
2252 if self.sharedpath != self.path:
2252 if self.sharedpath != self.path:
2253 return b'store'
2253 return b'store'
2254 return None
2254 return None
2255
2255
2256 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2256 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2257 return self.vfs.reljoin(self.root, f, *insidef)
2257 return self.vfs.reljoin(self.root, f, *insidef)
2258
2258
2259 def setparents(self, p1, p2=None):
2259 def setparents(self, p1, p2=None):
2260 if p2 is None:
2260 if p2 is None:
2261 p2 = self.nullid
2261 p2 = self.nullid
2262 self[None].setparents(p1, p2)
2262 self[None].setparents(p1, p2)
2263 self._quick_access_changeid_invalidate()
2263 self._quick_access_changeid_invalidate()
2264
2264
2265 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2265 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2266 """changeid must be a changeset revision, if specified.
2266 """changeid must be a changeset revision, if specified.
2267 fileid can be a file revision or node."""
2267 fileid can be a file revision or node."""
2268 return context.filectx(
2268 return context.filectx(
2269 self, path, changeid, fileid, changectx=changectx
2269 self, path, changeid, fileid, changectx=changectx
2270 )
2270 )
2271
2271
2272 def getcwd(self) -> bytes:
2272 def getcwd(self) -> bytes:
2273 return self.dirstate.getcwd()
2273 return self.dirstate.getcwd()
2274
2274
2275 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2275 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2276 return self.dirstate.pathto(f, cwd)
2276 return self.dirstate.pathto(f, cwd)
2277
2277
2278 def _loadfilter(self, filter):
2278 def _loadfilter(self, filter):
2279 if filter not in self._filterpats:
2279 if filter not in self._filterpats:
2280 l = []
2280 l = []
2281 for pat, cmd in self.ui.configitems(filter):
2281 for pat, cmd in self.ui.configitems(filter):
2282 if cmd == b'!':
2282 if cmd == b'!':
2283 continue
2283 continue
2284 mf = matchmod.match(self.root, b'', [pat])
2284 mf = matchmod.match(self.root, b'', [pat])
2285 fn = None
2285 fn = None
2286 params = cmd
2286 params = cmd
2287 for name, filterfn in self._datafilters.items():
2287 for name, filterfn in self._datafilters.items():
2288 if cmd.startswith(name):
2288 if cmd.startswith(name):
2289 fn = filterfn
2289 fn = filterfn
2290 params = cmd[len(name) :].lstrip()
2290 params = cmd[len(name) :].lstrip()
2291 break
2291 break
2292 if not fn:
2292 if not fn:
2293 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2293 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2294 fn.__name__ = 'commandfilter'
2294 fn.__name__ = 'commandfilter'
2295 # Wrap old filters not supporting keyword arguments
2295 # Wrap old filters not supporting keyword arguments
2296 if not pycompat.getargspec(fn)[2]:
2296 if not pycompat.getargspec(fn)[2]:
2297 oldfn = fn
2297 oldfn = fn
2298 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2298 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2299 fn.__name__ = 'compat-' + oldfn.__name__
2299 fn.__name__ = 'compat-' + oldfn.__name__
2300 l.append((mf, fn, params))
2300 l.append((mf, fn, params))
2301 self._filterpats[filter] = l
2301 self._filterpats[filter] = l
2302 return self._filterpats[filter]
2302 return self._filterpats[filter]
2303
2303
2304 def _filter(self, filterpats, filename, data):
2304 def _filter(self, filterpats, filename, data):
2305 for mf, fn, cmd in filterpats:
2305 for mf, fn, cmd in filterpats:
2306 if mf(filename):
2306 if mf(filename):
2307 self.ui.debug(
2307 self.ui.debug(
2308 b"filtering %s through %s\n"
2308 b"filtering %s through %s\n"
2309 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2309 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2310 )
2310 )
2311 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2311 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2312 break
2312 break
2313
2313
2314 return data
2314 return data
2315
2315
2316 @unfilteredpropertycache
2316 @unfilteredpropertycache
2317 def _encodefilterpats(self):
2317 def _encodefilterpats(self):
2318 return self._loadfilter(b'encode')
2318 return self._loadfilter(b'encode')
2319
2319
2320 @unfilteredpropertycache
2320 @unfilteredpropertycache
2321 def _decodefilterpats(self):
2321 def _decodefilterpats(self):
2322 return self._loadfilter(b'decode')
2322 return self._loadfilter(b'decode')
2323
2323
2324 def adddatafilter(self, name, filter):
2324 def adddatafilter(self, name, filter):
2325 self._datafilters[name] = filter
2325 self._datafilters[name] = filter
2326
2326
2327 def wread(self, filename: bytes) -> bytes:
2327 def wread(self, filename: bytes) -> bytes:
2328 if self.wvfs.islink(filename):
2328 if self.wvfs.islink(filename):
2329 data = self.wvfs.readlink(filename)
2329 data = self.wvfs.readlink(filename)
2330 else:
2330 else:
2331 data = self.wvfs.read(filename)
2331 data = self.wvfs.read(filename)
2332 return self._filter(self._encodefilterpats, filename, data)
2332 return self._filter(self._encodefilterpats, filename, data)
2333
2333
2334 def wwrite(
2334 def wwrite(
2335 self,
2335 self,
2336 filename: bytes,
2336 filename: bytes,
2337 data: bytes,
2337 data: bytes,
2338 flags: bytes,
2338 flags: bytes,
2339 backgroundclose=False,
2339 backgroundclose=False,
2340 **kwargs
2340 **kwargs
2341 ) -> int:
2341 ) -> int:
2342 """write ``data`` into ``filename`` in the working directory
2342 """write ``data`` into ``filename`` in the working directory
2343
2343
2344 This returns length of written (maybe decoded) data.
2344 This returns length of written (maybe decoded) data.
2345 """
2345 """
2346 data = self._filter(self._decodefilterpats, filename, data)
2346 data = self._filter(self._decodefilterpats, filename, data)
2347 if b'l' in flags:
2347 if b'l' in flags:
2348 self.wvfs.symlink(data, filename)
2348 self.wvfs.symlink(data, filename)
2349 else:
2349 else:
2350 self.wvfs.write(
2350 self.wvfs.write(
2351 filename, data, backgroundclose=backgroundclose, **kwargs
2351 filename, data, backgroundclose=backgroundclose, **kwargs
2352 )
2352 )
2353 if b'x' in flags:
2353 if b'x' in flags:
2354 self.wvfs.setflags(filename, False, True)
2354 self.wvfs.setflags(filename, False, True)
2355 else:
2355 else:
2356 self.wvfs.setflags(filename, False, False)
2356 self.wvfs.setflags(filename, False, False)
2357 return len(data)
2357 return len(data)
2358
2358
2359 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2359 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2360 return self._filter(self._decodefilterpats, filename, data)
2360 return self._filter(self._decodefilterpats, filename, data)
2361
2361
2362 def currenttransaction(self):
2362 def currenttransaction(self):
2363 """return the current transaction or None if non exists"""
2363 """return the current transaction or None if non exists"""
2364 if self._transref:
2364 if self._transref:
2365 tr = self._transref()
2365 tr = self._transref()
2366 else:
2366 else:
2367 tr = None
2367 tr = None
2368
2368
2369 if tr and tr.running():
2369 if tr and tr.running():
2370 return tr
2370 return tr
2371 return None
2371 return None
2372
2372
2373 def transaction(self, desc, report=None):
2373 def transaction(self, desc, report=None):
2374 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2374 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2375 b'devel', b'check-locks'
2375 b'devel', b'check-locks'
2376 ):
2376 ):
2377 if self._currentlock(self._lockref) is None:
2377 if self._currentlock(self._lockref) is None:
2378 raise error.ProgrammingError(b'transaction requires locking')
2378 raise error.ProgrammingError(b'transaction requires locking')
2379 tr = self.currenttransaction()
2379 tr = self.currenttransaction()
2380 if tr is not None:
2380 if tr is not None:
2381 return tr.nest(name=desc)
2381 return tr.nest(name=desc)
2382
2382
2383 # abort here if the journal already exists
2383 # abort here if the journal already exists
2384 if self.svfs.exists(b"journal"):
2384 if self.svfs.exists(b"journal"):
2385 raise error.RepoError(
2385 raise error.RepoError(
2386 _(b"abandoned transaction found"),
2386 _(b"abandoned transaction found"),
2387 hint=_(b"run 'hg recover' to clean up transaction"),
2387 hint=_(b"run 'hg recover' to clean up transaction"),
2388 )
2388 )
2389
2389
2390 # At that point your dirstate should be clean:
2390 # At that point your dirstate should be clean:
2391 #
2391 #
2392 # - If you don't have the wlock, why would you still have a dirty
2392 # - If you don't have the wlock, why would you still have a dirty
2393 # dirstate ?
2393 # dirstate ?
2394 #
2394 #
2395 # - If you hold the wlock, you should not be opening a transaction in
2395 # - If you hold the wlock, you should not be opening a transaction in
2396 # the middle of a `distate.changing_*` block. The transaction needs to
2396 # the middle of a `distate.changing_*` block. The transaction needs to
2397 # be open before that and wrap the change-context.
2397 # be open before that and wrap the change-context.
2398 #
2398 #
2399 # - If you are not within a `dirstate.changing_*` context, why is our
2399 # - If you are not within a `dirstate.changing_*` context, why is our
2400 # dirstate dirty?
2400 # dirstate dirty?
2401 if self.dirstate._dirty:
2401 if self.dirstate._dirty:
2402 m = "cannot open a transaction with a dirty dirstate"
2402 m = "cannot open a transaction with a dirty dirstate"
2403 raise error.ProgrammingError(m)
2403 raise error.ProgrammingError(m)
2404
2404
2405 idbase = b"%.40f#%f" % (random.random(), time.time())
2405 idbase = b"%.40f#%f" % (random.random(), time.time())
2406 ha = hex(hashutil.sha1(idbase).digest())
2406 ha = hex(hashutil.sha1(idbase).digest())
2407 txnid = b'TXN:' + ha
2407 txnid = b'TXN:' + ha
2408 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2408 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2409
2409
2410 self._writejournal(desc)
2410 self._writejournal(desc)
2411 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2411 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2412 if report:
2412 if report:
2413 rp = report
2413 rp = report
2414 else:
2414 else:
2415 rp = self.ui.warn
2415 rp = self.ui.warn
2416 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2416 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2417 # we must avoid cyclic reference between repo and transaction.
2417 # we must avoid cyclic reference between repo and transaction.
2418 reporef = weakref.ref(self)
2418 reporef = weakref.ref(self)
2419 # Code to track tag movement
2419 # Code to track tag movement
2420 #
2420 #
2421 # Since tags are all handled as file content, it is actually quite hard
2421 # Since tags are all handled as file content, it is actually quite hard
2422 # to track these movement from a code perspective. So we fallback to a
2422 # to track these movement from a code perspective. So we fallback to a
2423 # tracking at the repository level. One could envision to track changes
2423 # tracking at the repository level. One could envision to track changes
2424 # to the '.hgtags' file through changegroup apply but that fails to
2424 # to the '.hgtags' file through changegroup apply but that fails to
2425 # cope with case where transaction expose new heads without changegroup
2425 # cope with case where transaction expose new heads without changegroup
2426 # being involved (eg: phase movement).
2426 # being involved (eg: phase movement).
2427 #
2427 #
2428 # For now, We gate the feature behind a flag since this likely comes
2428 # For now, We gate the feature behind a flag since this likely comes
2429 # with performance impacts. The current code run more often than needed
2429 # with performance impacts. The current code run more often than needed
2430 # and do not use caches as much as it could. The current focus is on
2430 # and do not use caches as much as it could. The current focus is on
2431 # the behavior of the feature so we disable it by default. The flag
2431 # the behavior of the feature so we disable it by default. The flag
2432 # will be removed when we are happy with the performance impact.
2432 # will be removed when we are happy with the performance impact.
2433 #
2433 #
2434 # Once this feature is no longer experimental move the following
2434 # Once this feature is no longer experimental move the following
2435 # documentation to the appropriate help section:
2435 # documentation to the appropriate help section:
2436 #
2436 #
2437 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2437 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2438 # tags (new or changed or deleted tags). In addition the details of
2438 # tags (new or changed or deleted tags). In addition the details of
2439 # these changes are made available in a file at:
2439 # these changes are made available in a file at:
2440 # ``REPOROOT/.hg/changes/tags.changes``.
2440 # ``REPOROOT/.hg/changes/tags.changes``.
2441 # Make sure you check for HG_TAG_MOVED before reading that file as it
2441 # Make sure you check for HG_TAG_MOVED before reading that file as it
2442 # might exist from a previous transaction even if no tag were touched
2442 # might exist from a previous transaction even if no tag were touched
2443 # in this one. Changes are recorded in a line base format::
2443 # in this one. Changes are recorded in a line base format::
2444 #
2444 #
2445 # <action> <hex-node> <tag-name>\n
2445 # <action> <hex-node> <tag-name>\n
2446 #
2446 #
2447 # Actions are defined as follow:
2447 # Actions are defined as follow:
2448 # "-R": tag is removed,
2448 # "-R": tag is removed,
2449 # "+A": tag is added,
2449 # "+A": tag is added,
2450 # "-M": tag is moved (old value),
2450 # "-M": tag is moved (old value),
2451 # "+M": tag is moved (new value),
2451 # "+M": tag is moved (new value),
2452 tracktags = lambda x: None
2452 tracktags = lambda x: None
2453 # experimental config: experimental.hook-track-tags
2453 # experimental config: experimental.hook-track-tags
2454 shouldtracktags = self.ui.configbool(
2454 shouldtracktags = self.ui.configbool(
2455 b'experimental', b'hook-track-tags'
2455 b'experimental', b'hook-track-tags'
2456 )
2456 )
2457 if desc != b'strip' and shouldtracktags:
2457 if desc != b'strip' and shouldtracktags:
2458 oldheads = self.changelog.headrevs()
2458 oldheads = self.changelog.headrevs()
2459
2459
2460 def tracktags(tr2):
2460 def tracktags(tr2):
2461 repo = reporef()
2461 repo = reporef()
2462 assert repo is not None # help pytype
2462 assert repo is not None # help pytype
2463 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2463 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2464 newheads = repo.changelog.headrevs()
2464 newheads = repo.changelog.headrevs()
2465 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2465 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2466 # notes: we compare lists here.
2466 # notes: we compare lists here.
2467 # As we do it only once buiding set would not be cheaper
2467 # As we do it only once buiding set would not be cheaper
2468 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2468 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2469 if changes:
2469 if changes:
2470 tr2.hookargs[b'tag_moved'] = b'1'
2470 tr2.hookargs[b'tag_moved'] = b'1'
2471 with repo.vfs(
2471 with repo.vfs(
2472 b'changes/tags.changes', b'w', atomictemp=True
2472 b'changes/tags.changes', b'w', atomictemp=True
2473 ) as changesfile:
2473 ) as changesfile:
2474 # note: we do not register the file to the transaction
2474 # note: we do not register the file to the transaction
2475 # because we needs it to still exist on the transaction
2475 # because we needs it to still exist on the transaction
2476 # is close (for txnclose hooks)
2476 # is close (for txnclose hooks)
2477 tagsmod.writediff(changesfile, changes)
2477 tagsmod.writediff(changesfile, changes)
2478
2478
2479 def validate(tr2):
2479 def validate(tr2):
2480 """will run pre-closing hooks"""
2480 """will run pre-closing hooks"""
2481 # XXX the transaction API is a bit lacking here so we take a hacky
2481 # XXX the transaction API is a bit lacking here so we take a hacky
2482 # path for now
2482 # path for now
2483 #
2483 #
2484 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2484 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2485 # dict is copied before these run. In addition we needs the data
2485 # dict is copied before these run. In addition we needs the data
2486 # available to in memory hooks too.
2486 # available to in memory hooks too.
2487 #
2487 #
2488 # Moreover, we also need to make sure this runs before txnclose
2488 # Moreover, we also need to make sure this runs before txnclose
2489 # hooks and there is no "pending" mechanism that would execute
2489 # hooks and there is no "pending" mechanism that would execute
2490 # logic only if hooks are about to run.
2490 # logic only if hooks are about to run.
2491 #
2491 #
2492 # Fixing this limitation of the transaction is also needed to track
2492 # Fixing this limitation of the transaction is also needed to track
2493 # other families of changes (bookmarks, phases, obsolescence).
2493 # other families of changes (bookmarks, phases, obsolescence).
2494 #
2494 #
2495 # This will have to be fixed before we remove the experimental
2495 # This will have to be fixed before we remove the experimental
2496 # gating.
2496 # gating.
2497 tracktags(tr2)
2497 tracktags(tr2)
2498 repo = reporef()
2498 repo = reporef()
2499 assert repo is not None # help pytype
2499 assert repo is not None # help pytype
2500
2500
2501 singleheadopt = (b'experimental', b'single-head-per-branch')
2501 singleheadopt = (b'experimental', b'single-head-per-branch')
2502 singlehead = repo.ui.configbool(*singleheadopt)
2502 singlehead = repo.ui.configbool(*singleheadopt)
2503 if singlehead:
2503 if singlehead:
2504 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2504 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2505 accountclosed = singleheadsub.get(
2505 accountclosed = singleheadsub.get(
2506 b"account-closed-heads", False
2506 b"account-closed-heads", False
2507 )
2507 )
2508 if singleheadsub.get(b"public-changes-only", False):
2508 if singleheadsub.get(b"public-changes-only", False):
2509 filtername = b"immutable"
2509 filtername = b"immutable"
2510 else:
2510 else:
2511 filtername = b"visible"
2511 filtername = b"visible"
2512 scmutil.enforcesinglehead(
2512 scmutil.enforcesinglehead(
2513 repo, tr2, desc, accountclosed, filtername
2513 repo, tr2, desc, accountclosed, filtername
2514 )
2514 )
2515 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2515 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2516 for name, (old, new) in sorted(
2516 for name, (old, new) in sorted(
2517 tr.changes[b'bookmarks'].items()
2517 tr.changes[b'bookmarks'].items()
2518 ):
2518 ):
2519 args = tr.hookargs.copy()
2519 args = tr.hookargs.copy()
2520 args.update(bookmarks.preparehookargs(name, old, new))
2520 args.update(bookmarks.preparehookargs(name, old, new))
2521 repo.hook(
2521 repo.hook(
2522 b'pretxnclose-bookmark',
2522 b'pretxnclose-bookmark',
2523 throw=True,
2523 throw=True,
2524 **pycompat.strkwargs(args)
2524 **pycompat.strkwargs(args)
2525 )
2525 )
2526 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2526 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2527 cl = repo.unfiltered().changelog
2527 cl = repo.unfiltered().changelog
2528 for revs, (old, new) in tr.changes[b'phases']:
2528 for revs, (old, new) in tr.changes[b'phases']:
2529 for rev in revs:
2529 for rev in revs:
2530 args = tr.hookargs.copy()
2530 args = tr.hookargs.copy()
2531 node = hex(cl.node(rev))
2531 node = hex(cl.node(rev))
2532 args.update(phases.preparehookargs(node, old, new))
2532 args.update(phases.preparehookargs(node, old, new))
2533 repo.hook(
2533 repo.hook(
2534 b'pretxnclose-phase',
2534 b'pretxnclose-phase',
2535 throw=True,
2535 throw=True,
2536 **pycompat.strkwargs(args)
2536 **pycompat.strkwargs(args)
2537 )
2537 )
2538
2538
2539 repo.hook(
2539 repo.hook(
2540 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2540 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2541 )
2541 )
2542
2542
2543 def releasefn(tr, success):
2543 def releasefn(tr, success):
2544 repo = reporef()
2544 repo = reporef()
2545 if repo is None:
2545 if repo is None:
2546 # If the repo has been GC'd (and this release function is being
2546 # If the repo has been GC'd (and this release function is being
2547 # called from transaction.__del__), there's not much we can do,
2547 # called from transaction.__del__), there's not much we can do,
2548 # so just leave the unfinished transaction there and let the
2548 # so just leave the unfinished transaction there and let the
2549 # user run `hg recover`.
2549 # user run `hg recover`.
2550 return
2550 return
2551 if success:
2551 if success:
2552 # this should be explicitly invoked here, because
2552 # this should be explicitly invoked here, because
2553 # in-memory changes aren't written out at closing
2553 # in-memory changes aren't written out at closing
2554 # transaction, if tr.addfilegenerator (via
2554 # transaction, if tr.addfilegenerator (via
2555 # dirstate.write or so) isn't invoked while
2555 # dirstate.write or so) isn't invoked while
2556 # transaction running
2556 # transaction running
2557 repo.dirstate.write(None)
2557 repo.dirstate.write(None)
2558 else:
2558 else:
2559 # discard all changes (including ones already written
2559 # discard all changes (including ones already written
2560 # out) in this transaction
2560 # out) in this transaction
2561 repo.invalidate(clearfilecache=True)
2561 repo.invalidate(clearfilecache=True)
2562
2562
2563 tr = transaction.transaction(
2563 tr = transaction.transaction(
2564 rp,
2564 rp,
2565 self.svfs,
2565 self.svfs,
2566 vfsmap,
2566 vfsmap,
2567 b"journal",
2567 b"journal",
2568 b"undo",
2568 b"undo",
2569 aftertrans(renames),
2569 aftertrans(renames),
2570 self.store.createmode,
2570 self.store.createmode,
2571 validator=validate,
2571 validator=validate,
2572 releasefn=releasefn,
2572 releasefn=releasefn,
2573 checkambigfiles=_cachedfiles,
2573 checkambigfiles=_cachedfiles,
2574 name=desc,
2574 name=desc,
2575 )
2575 )
2576 tr.changes[b'origrepolen'] = len(self)
2576 tr.changes[b'origrepolen'] = len(self)
2577 tr.changes[b'obsmarkers'] = set()
2577 tr.changes[b'obsmarkers'] = set()
2578 tr.changes[b'phases'] = []
2578 tr.changes[b'phases'] = []
2579 tr.changes[b'bookmarks'] = {}
2579 tr.changes[b'bookmarks'] = {}
2580
2580
2581 tr.hookargs[b'txnid'] = txnid
2581 tr.hookargs[b'txnid'] = txnid
2582 tr.hookargs[b'txnname'] = desc
2582 tr.hookargs[b'txnname'] = desc
2583 tr.hookargs[b'changes'] = tr.changes
2583 tr.hookargs[b'changes'] = tr.changes
2584 # note: writing the fncache only during finalize mean that the file is
2584 # note: writing the fncache only during finalize mean that the file is
2585 # outdated when running hooks. As fncache is used for streaming clone,
2585 # outdated when running hooks. As fncache is used for streaming clone,
2586 # this is not expected to break anything that happen during the hooks.
2586 # this is not expected to break anything that happen during the hooks.
2587 tr.addfinalize(b'flush-fncache', self.store.write)
2587 tr.addfinalize(b'flush-fncache', self.store.write)
2588
2588
2589 def txnclosehook(tr2):
2589 def txnclosehook(tr2):
2590 """To be run if transaction is successful, will schedule a hook run"""
2590 """To be run if transaction is successful, will schedule a hook run"""
2591 # Don't reference tr2 in hook() so we don't hold a reference.
2591 # Don't reference tr2 in hook() so we don't hold a reference.
2592 # This reduces memory consumption when there are multiple
2592 # This reduces memory consumption when there are multiple
2593 # transactions per lock. This can likely go away if issue5045
2593 # transactions per lock. This can likely go away if issue5045
2594 # fixes the function accumulation.
2594 # fixes the function accumulation.
2595 hookargs = tr2.hookargs
2595 hookargs = tr2.hookargs
2596
2596
2597 def hookfunc(unused_success):
2597 def hookfunc(unused_success):
2598 repo = reporef()
2598 repo = reporef()
2599 assert repo is not None # help pytype
2599 assert repo is not None # help pytype
2600
2600
2601 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2601 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2602 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2602 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2603 for name, (old, new) in bmchanges:
2603 for name, (old, new) in bmchanges:
2604 args = tr.hookargs.copy()
2604 args = tr.hookargs.copy()
2605 args.update(bookmarks.preparehookargs(name, old, new))
2605 args.update(bookmarks.preparehookargs(name, old, new))
2606 repo.hook(
2606 repo.hook(
2607 b'txnclose-bookmark',
2607 b'txnclose-bookmark',
2608 throw=False,
2608 throw=False,
2609 **pycompat.strkwargs(args)
2609 **pycompat.strkwargs(args)
2610 )
2610 )
2611
2611
2612 if hook.hashook(repo.ui, b'txnclose-phase'):
2612 if hook.hashook(repo.ui, b'txnclose-phase'):
2613 cl = repo.unfiltered().changelog
2613 cl = repo.unfiltered().changelog
2614 phasemv = sorted(
2614 phasemv = sorted(
2615 tr.changes[b'phases'], key=lambda r: r[0][0]
2615 tr.changes[b'phases'], key=lambda r: r[0][0]
2616 )
2616 )
2617 for revs, (old, new) in phasemv:
2617 for revs, (old, new) in phasemv:
2618 for rev in revs:
2618 for rev in revs:
2619 args = tr.hookargs.copy()
2619 args = tr.hookargs.copy()
2620 node = hex(cl.node(rev))
2620 node = hex(cl.node(rev))
2621 args.update(phases.preparehookargs(node, old, new))
2621 args.update(phases.preparehookargs(node, old, new))
2622 repo.hook(
2622 repo.hook(
2623 b'txnclose-phase',
2623 b'txnclose-phase',
2624 throw=False,
2624 throw=False,
2625 **pycompat.strkwargs(args)
2625 **pycompat.strkwargs(args)
2626 )
2626 )
2627
2627
2628 repo.hook(
2628 repo.hook(
2629 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2629 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2630 )
2630 )
2631
2631
2632 repo = reporef()
2632 repo = reporef()
2633 assert repo is not None # help pytype
2633 assert repo is not None # help pytype
2634 repo._afterlock(hookfunc)
2634 repo._afterlock(hookfunc)
2635
2635
2636 tr.addfinalize(b'txnclose-hook', txnclosehook)
2636 tr.addfinalize(b'txnclose-hook', txnclosehook)
2637 # Include a leading "-" to make it happen before the transaction summary
2637 # Include a leading "-" to make it happen before the transaction summary
2638 # reports registered via scmutil.registersummarycallback() whose names
2638 # reports registered via scmutil.registersummarycallback() whose names
2639 # are 00-txnreport etc. That way, the caches will be warm when the
2639 # are 00-txnreport etc. That way, the caches will be warm when the
2640 # callbacks run.
2640 # callbacks run.
2641 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2641 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2642
2642
2643 def txnaborthook(tr2):
2643 def txnaborthook(tr2):
2644 """To be run if transaction is aborted"""
2644 """To be run if transaction is aborted"""
2645 repo = reporef()
2645 repo = reporef()
2646 assert repo is not None # help pytype
2646 assert repo is not None # help pytype
2647 repo.hook(
2647 repo.hook(
2648 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2648 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2649 )
2649 )
2650
2650
2651 tr.addabort(b'txnabort-hook', txnaborthook)
2651 tr.addabort(b'txnabort-hook', txnaborthook)
2652 # avoid eager cache invalidation. in-memory data should be identical
2652 # avoid eager cache invalidation. in-memory data should be identical
2653 # to stored data if transaction has no error.
2653 # to stored data if transaction has no error.
2654 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2654 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2655 self._transref = weakref.ref(tr)
2655 self._transref = weakref.ref(tr)
2656 scmutil.registersummarycallback(self, tr, desc)
2656 scmutil.registersummarycallback(self, tr, desc)
2657 # This only exist to deal with the need of rollback to have viable
2657 # This only exist to deal with the need of rollback to have viable
2658 # parents at the end of the operation. So backup viable parents at the
2658 # parents at the end of the operation. So backup viable parents at the
2659 # time of this operation.
2659 # time of this operation.
2660 #
2660 #
2661 # We only do it when the `wlock` is taken, otherwise other might be
2661 # We only do it when the `wlock` is taken, otherwise other might be
2662 # altering the dirstate under us.
2662 # altering the dirstate under us.
2663 #
2663 #
2664 # This is really not a great way to do this (first, because we cannot
2664 # This is really not a great way to do this (first, because we cannot
2665 # always do it). There are more viable alternative that exists
2665 # always do it). There are more viable alternative that exists
2666 #
2666 #
2667 # - backing only the working copy parent in a dedicated files and doing
2667 # - backing only the working copy parent in a dedicated files and doing
2668 # a clean "keep-update" to them on `hg rollback`.
2668 # a clean "keep-update" to them on `hg rollback`.
2669 #
2669 #
2670 # - slightly changing the behavior an applying a logic similar to "hg
2670 # - slightly changing the behavior an applying a logic similar to "hg
2671 # strip" to pick a working copy destination on `hg rollback`
2671 # strip" to pick a working copy destination on `hg rollback`
2672 if self.currentwlock() is not None:
2672 if self.currentwlock() is not None:
2673 ds = self.dirstate
2673 ds = self.dirstate
2674
2674
2675 def backup_dirstate(tr):
2675 def backup_dirstate(tr):
2676 for f in ds.all_file_names():
2676 for f in ds.all_file_names():
2677 # hardlink backup is okay because `dirstate` is always
2677 # hardlink backup is okay because `dirstate` is always
2678 # atomically written and possible data file are append only
2678 # atomically written and possible data file are append only
2679 # and resistant to trailing data.
2679 # and resistant to trailing data.
2680 tr.addbackup(f, hardlink=True, location=b'plain')
2680 tr.addbackup(f, hardlink=True, location=b'plain')
2681
2681
2682 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2682 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2683 return tr
2683 return tr
2684
2684
2685 def _journalfiles(self):
2685 def _journalfiles(self):
2686 return (
2686 return (
2687 (self.svfs, b'journal'),
2687 (self.svfs, b'journal'),
2688 (self.vfs, b'journal.branch'),
2688 (self.vfs, b'journal.branch'),
2689 (self.vfs, b'journal.desc'),
2689 (self.vfs, b'journal.desc'),
2690 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2690 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2691 (self.svfs, b'journal.phaseroots'),
2692 )
2691 )
2693
2692
2694 def undofiles(self):
2693 def undofiles(self):
2695 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2694 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2696
2695
2697 @unfilteredmethod
2696 @unfilteredmethod
2698 def _writejournal(self, desc):
2697 def _writejournal(self, desc):
2699 self.vfs.write(
2698 self.vfs.write(
2700 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2699 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2701 )
2700 )
2702 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2701 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2703 bookmarksvfs = bookmarks.bookmarksvfs(self)
2702 bookmarksvfs = bookmarks.bookmarksvfs(self)
2704 bookmarksvfs.write(
2703 bookmarksvfs.write(
2705 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2704 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2706 )
2705 )
2707 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2708
2706
2709 def recover(self):
2707 def recover(self):
2710 with self.lock():
2708 with self.lock():
2711 if self.svfs.exists(b"journal"):
2709 if self.svfs.exists(b"journal"):
2712 self.ui.status(_(b"rolling back interrupted transaction\n"))
2710 self.ui.status(_(b"rolling back interrupted transaction\n"))
2713 vfsmap = {
2711 vfsmap = {
2714 b'': self.svfs,
2712 b'': self.svfs,
2715 b'plain': self.vfs,
2713 b'plain': self.vfs,
2716 }
2714 }
2717 transaction.rollback(
2715 transaction.rollback(
2718 self.svfs,
2716 self.svfs,
2719 vfsmap,
2717 vfsmap,
2720 b"journal",
2718 b"journal",
2721 self.ui.warn,
2719 self.ui.warn,
2722 checkambigfiles=_cachedfiles,
2720 checkambigfiles=_cachedfiles,
2723 )
2721 )
2724 self.invalidate()
2722 self.invalidate()
2725 return True
2723 return True
2726 else:
2724 else:
2727 self.ui.warn(_(b"no interrupted transaction available\n"))
2725 self.ui.warn(_(b"no interrupted transaction available\n"))
2728 return False
2726 return False
2729
2727
2730 def rollback(self, dryrun=False, force=False):
2728 def rollback(self, dryrun=False, force=False):
2731 wlock = lock = None
2729 wlock = lock = None
2732 try:
2730 try:
2733 wlock = self.wlock()
2731 wlock = self.wlock()
2734 lock = self.lock()
2732 lock = self.lock()
2735 if self.svfs.exists(b"undo"):
2733 if self.svfs.exists(b"undo"):
2736 return self._rollback(dryrun, force)
2734 return self._rollback(dryrun, force)
2737 else:
2735 else:
2738 self.ui.warn(_(b"no rollback information available\n"))
2736 self.ui.warn(_(b"no rollback information available\n"))
2739 return 1
2737 return 1
2740 finally:
2738 finally:
2741 release(lock, wlock)
2739 release(lock, wlock)
2742
2740
2743 @unfilteredmethod # Until we get smarter cache management
2741 @unfilteredmethod # Until we get smarter cache management
2744 def _rollback(self, dryrun, force):
2742 def _rollback(self, dryrun, force):
2745 ui = self.ui
2743 ui = self.ui
2746
2744
2747 parents = self.dirstate.parents()
2745 parents = self.dirstate.parents()
2748 try:
2746 try:
2749 args = self.vfs.read(b'undo.desc').splitlines()
2747 args = self.vfs.read(b'undo.desc').splitlines()
2750 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2748 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2751 if len(args) >= 3:
2749 if len(args) >= 3:
2752 detail = args[2]
2750 detail = args[2]
2753 oldtip = oldlen - 1
2751 oldtip = oldlen - 1
2754
2752
2755 if detail and ui.verbose:
2753 if detail and ui.verbose:
2756 msg = _(
2754 msg = _(
2757 b'repository tip rolled back to revision %d'
2755 b'repository tip rolled back to revision %d'
2758 b' (undo %s: %s)\n'
2756 b' (undo %s: %s)\n'
2759 ) % (oldtip, desc, detail)
2757 ) % (oldtip, desc, detail)
2760 else:
2758 else:
2761 msg = _(
2759 msg = _(
2762 b'repository tip rolled back to revision %d (undo %s)\n'
2760 b'repository tip rolled back to revision %d (undo %s)\n'
2763 ) % (oldtip, desc)
2761 ) % (oldtip, desc)
2764 parentgone = any(self[p].rev() > oldtip for p in parents)
2762 parentgone = any(self[p].rev() > oldtip for p in parents)
2765 except IOError:
2763 except IOError:
2766 msg = _(b'rolling back unknown transaction\n')
2764 msg = _(b'rolling back unknown transaction\n')
2767 desc = None
2765 desc = None
2768 parentgone = True
2766 parentgone = True
2769
2767
2770 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2768 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2771 raise error.Abort(
2769 raise error.Abort(
2772 _(
2770 _(
2773 b'rollback of last commit while not checked out '
2771 b'rollback of last commit while not checked out '
2774 b'may lose data'
2772 b'may lose data'
2775 ),
2773 ),
2776 hint=_(b'use -f to force'),
2774 hint=_(b'use -f to force'),
2777 )
2775 )
2778
2776
2779 ui.status(msg)
2777 ui.status(msg)
2780 if dryrun:
2778 if dryrun:
2781 return 0
2779 return 0
2782
2780
2783 self.destroying()
2781 self.destroying()
2784 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2782 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2785 skip_journal_pattern = None
2783 skip_journal_pattern = None
2786 if not parentgone:
2784 if not parentgone:
2787 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2785 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2788 transaction.rollback(
2786 transaction.rollback(
2789 self.svfs,
2787 self.svfs,
2790 vfsmap,
2788 vfsmap,
2791 b'undo',
2789 b'undo',
2792 ui.warn,
2790 ui.warn,
2793 checkambigfiles=_cachedfiles,
2791 checkambigfiles=_cachedfiles,
2794 skip_journal_pattern=skip_journal_pattern,
2792 skip_journal_pattern=skip_journal_pattern,
2795 )
2793 )
2796 bookmarksvfs = bookmarks.bookmarksvfs(self)
2794 bookmarksvfs = bookmarks.bookmarksvfs(self)
2797 if bookmarksvfs.exists(b'undo.bookmarks'):
2795 if bookmarksvfs.exists(b'undo.bookmarks'):
2798 bookmarksvfs.rename(
2796 bookmarksvfs.rename(
2799 b'undo.bookmarks', b'bookmarks', checkambig=True
2797 b'undo.bookmarks', b'bookmarks', checkambig=True
2800 )
2798 )
2801 if self.svfs.exists(b'undo.phaseroots'):
2802 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2803 self.invalidate()
2799 self.invalidate()
2804 self.dirstate.invalidate()
2800 self.dirstate.invalidate()
2805
2801
2806 if parentgone:
2802 if parentgone:
2807 # replace this with some explicit parent update in the future.
2803 # replace this with some explicit parent update in the future.
2808 has_node = self.changelog.index.has_node
2804 has_node = self.changelog.index.has_node
2809 if not all(has_node(p) for p in self.dirstate._pl):
2805 if not all(has_node(p) for p in self.dirstate._pl):
2810 # There was no dirstate to backup initially, we need to drop
2806 # There was no dirstate to backup initially, we need to drop
2811 # the existing one.
2807 # the existing one.
2812 with self.dirstate.changing_parents(self):
2808 with self.dirstate.changing_parents(self):
2813 self.dirstate.setparents(self.nullid)
2809 self.dirstate.setparents(self.nullid)
2814 self.dirstate.clear()
2810 self.dirstate.clear()
2815
2811
2816 try:
2812 try:
2817 branch = self.vfs.read(b'undo.branch')
2813 branch = self.vfs.read(b'undo.branch')
2818 self.dirstate.setbranch(encoding.tolocal(branch))
2814 self.dirstate.setbranch(encoding.tolocal(branch))
2819 except IOError:
2815 except IOError:
2820 ui.warn(
2816 ui.warn(
2821 _(
2817 _(
2822 b'named branch could not be reset: '
2818 b'named branch could not be reset: '
2823 b'current branch is still \'%s\'\n'
2819 b'current branch is still \'%s\'\n'
2824 )
2820 )
2825 % self.dirstate.branch()
2821 % self.dirstate.branch()
2826 )
2822 )
2827
2823
2828 parents = tuple([p.rev() for p in self[None].parents()])
2824 parents = tuple([p.rev() for p in self[None].parents()])
2829 if len(parents) > 1:
2825 if len(parents) > 1:
2830 ui.status(
2826 ui.status(
2831 _(
2827 _(
2832 b'working directory now based on '
2828 b'working directory now based on '
2833 b'revisions %d and %d\n'
2829 b'revisions %d and %d\n'
2834 )
2830 )
2835 % parents
2831 % parents
2836 )
2832 )
2837 else:
2833 else:
2838 ui.status(
2834 ui.status(
2839 _(b'working directory now based on revision %d\n') % parents
2835 _(b'working directory now based on revision %d\n') % parents
2840 )
2836 )
2841 mergestatemod.mergestate.clean(self)
2837 mergestatemod.mergestate.clean(self)
2842
2838
2843 # TODO: if we know which new heads may result from this rollback, pass
2839 # TODO: if we know which new heads may result from this rollback, pass
2844 # them to destroy(), which will prevent the branchhead cache from being
2840 # them to destroy(), which will prevent the branchhead cache from being
2845 # invalidated.
2841 # invalidated.
2846 self.destroyed()
2842 self.destroyed()
2847 return 0
2843 return 0
2848
2844
2849 def _buildcacheupdater(self, newtransaction):
2845 def _buildcacheupdater(self, newtransaction):
2850 """called during transaction to build the callback updating cache
2846 """called during transaction to build the callback updating cache
2851
2847
2852 Lives on the repository to help extension who might want to augment
2848 Lives on the repository to help extension who might want to augment
2853 this logic. For this purpose, the created transaction is passed to the
2849 this logic. For this purpose, the created transaction is passed to the
2854 method.
2850 method.
2855 """
2851 """
2856 # we must avoid cyclic reference between repo and transaction.
2852 # we must avoid cyclic reference between repo and transaction.
2857 reporef = weakref.ref(self)
2853 reporef = weakref.ref(self)
2858
2854
2859 def updater(tr):
2855 def updater(tr):
2860 repo = reporef()
2856 repo = reporef()
2861 assert repo is not None # help pytype
2857 assert repo is not None # help pytype
2862 repo.updatecaches(tr)
2858 repo.updatecaches(tr)
2863
2859
2864 return updater
2860 return updater
2865
2861
2866 @unfilteredmethod
2862 @unfilteredmethod
2867 def updatecaches(self, tr=None, full=False, caches=None):
2863 def updatecaches(self, tr=None, full=False, caches=None):
2868 """warm appropriate caches
2864 """warm appropriate caches
2869
2865
2870 If this function is called after a transaction closed. The transaction
2866 If this function is called after a transaction closed. The transaction
2871 will be available in the 'tr' argument. This can be used to selectively
2867 will be available in the 'tr' argument. This can be used to selectively
2872 update caches relevant to the changes in that transaction.
2868 update caches relevant to the changes in that transaction.
2873
2869
2874 If 'full' is set, make sure all caches the function knows about have
2870 If 'full' is set, make sure all caches the function knows about have
2875 up-to-date data. Even the ones usually loaded more lazily.
2871 up-to-date data. Even the ones usually loaded more lazily.
2876
2872
2877 The `full` argument can take a special "post-clone" value. In this case
2873 The `full` argument can take a special "post-clone" value. In this case
2878 the cache warming is made after a clone and of the slower cache might
2874 the cache warming is made after a clone and of the slower cache might
2879 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2875 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2880 as we plan for a cleaner way to deal with this for 5.9.
2876 as we plan for a cleaner way to deal with this for 5.9.
2881 """
2877 """
2882 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2878 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2883 # During strip, many caches are invalid but
2879 # During strip, many caches are invalid but
2884 # later call to `destroyed` will refresh them.
2880 # later call to `destroyed` will refresh them.
2885 return
2881 return
2886
2882
2887 unfi = self.unfiltered()
2883 unfi = self.unfiltered()
2888
2884
2889 if full:
2885 if full:
2890 msg = (
2886 msg = (
2891 "`full` argument for `repo.updatecaches` is deprecated\n"
2887 "`full` argument for `repo.updatecaches` is deprecated\n"
2892 "(use `caches=repository.CACHE_ALL` instead)"
2888 "(use `caches=repository.CACHE_ALL` instead)"
2893 )
2889 )
2894 self.ui.deprecwarn(msg, b"5.9")
2890 self.ui.deprecwarn(msg, b"5.9")
2895 caches = repository.CACHES_ALL
2891 caches = repository.CACHES_ALL
2896 if full == b"post-clone":
2892 if full == b"post-clone":
2897 caches = repository.CACHES_POST_CLONE
2893 caches = repository.CACHES_POST_CLONE
2898 caches = repository.CACHES_ALL
2894 caches = repository.CACHES_ALL
2899 elif caches is None:
2895 elif caches is None:
2900 caches = repository.CACHES_DEFAULT
2896 caches = repository.CACHES_DEFAULT
2901
2897
2902 if repository.CACHE_BRANCHMAP_SERVED in caches:
2898 if repository.CACHE_BRANCHMAP_SERVED in caches:
2903 if tr is None or tr.changes[b'origrepolen'] < len(self):
2899 if tr is None or tr.changes[b'origrepolen'] < len(self):
2904 # accessing the 'served' branchmap should refresh all the others,
2900 # accessing the 'served' branchmap should refresh all the others,
2905 self.ui.debug(b'updating the branch cache\n')
2901 self.ui.debug(b'updating the branch cache\n')
2906 self.filtered(b'served').branchmap()
2902 self.filtered(b'served').branchmap()
2907 self.filtered(b'served.hidden').branchmap()
2903 self.filtered(b'served.hidden').branchmap()
2908 # flush all possibly delayed write.
2904 # flush all possibly delayed write.
2909 self._branchcaches.write_delayed(self)
2905 self._branchcaches.write_delayed(self)
2910
2906
2911 if repository.CACHE_CHANGELOG_CACHE in caches:
2907 if repository.CACHE_CHANGELOG_CACHE in caches:
2912 self.changelog.update_caches(transaction=tr)
2908 self.changelog.update_caches(transaction=tr)
2913
2909
2914 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2910 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2915 self.manifestlog.update_caches(transaction=tr)
2911 self.manifestlog.update_caches(transaction=tr)
2916
2912
2917 if repository.CACHE_REV_BRANCH in caches:
2913 if repository.CACHE_REV_BRANCH in caches:
2918 rbc = unfi.revbranchcache()
2914 rbc = unfi.revbranchcache()
2919 for r in unfi.changelog:
2915 for r in unfi.changelog:
2920 rbc.branchinfo(r)
2916 rbc.branchinfo(r)
2921 rbc.write()
2917 rbc.write()
2922
2918
2923 if repository.CACHE_FULL_MANIFEST in caches:
2919 if repository.CACHE_FULL_MANIFEST in caches:
2924 # ensure the working copy parents are in the manifestfulltextcache
2920 # ensure the working copy parents are in the manifestfulltextcache
2925 for ctx in self[b'.'].parents():
2921 for ctx in self[b'.'].parents():
2926 ctx.manifest() # accessing the manifest is enough
2922 ctx.manifest() # accessing the manifest is enough
2927
2923
2928 if repository.CACHE_FILE_NODE_TAGS in caches:
2924 if repository.CACHE_FILE_NODE_TAGS in caches:
2929 # accessing fnode cache warms the cache
2925 # accessing fnode cache warms the cache
2930 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2926 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2931
2927
2932 if repository.CACHE_TAGS_DEFAULT in caches:
2928 if repository.CACHE_TAGS_DEFAULT in caches:
2933 # accessing tags warm the cache
2929 # accessing tags warm the cache
2934 self.tags()
2930 self.tags()
2935 if repository.CACHE_TAGS_SERVED in caches:
2931 if repository.CACHE_TAGS_SERVED in caches:
2936 self.filtered(b'served').tags()
2932 self.filtered(b'served').tags()
2937
2933
2938 if repository.CACHE_BRANCHMAP_ALL in caches:
2934 if repository.CACHE_BRANCHMAP_ALL in caches:
2939 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2935 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2940 # so we're forcing a write to cause these caches to be warmed up
2936 # so we're forcing a write to cause these caches to be warmed up
2941 # even if they haven't explicitly been requested yet (if they've
2937 # even if they haven't explicitly been requested yet (if they've
2942 # never been used by hg, they won't ever have been written, even if
2938 # never been used by hg, they won't ever have been written, even if
2943 # they're a subset of another kind of cache that *has* been used).
2939 # they're a subset of another kind of cache that *has* been used).
2944 for filt in repoview.filtertable.keys():
2940 for filt in repoview.filtertable.keys():
2945 filtered = self.filtered(filt)
2941 filtered = self.filtered(filt)
2946 filtered.branchmap().write(filtered)
2942 filtered.branchmap().write(filtered)
2947
2943
2948 def invalidatecaches(self):
2944 def invalidatecaches(self):
2949 if '_tagscache' in vars(self):
2945 if '_tagscache' in vars(self):
2950 # can't use delattr on proxy
2946 # can't use delattr on proxy
2951 del self.__dict__['_tagscache']
2947 del self.__dict__['_tagscache']
2952
2948
2953 self._branchcaches.clear()
2949 self._branchcaches.clear()
2954 self.invalidatevolatilesets()
2950 self.invalidatevolatilesets()
2955 self._sparsesignaturecache.clear()
2951 self._sparsesignaturecache.clear()
2956
2952
2957 def invalidatevolatilesets(self):
2953 def invalidatevolatilesets(self):
2958 self.filteredrevcache.clear()
2954 self.filteredrevcache.clear()
2959 obsolete.clearobscaches(self)
2955 obsolete.clearobscaches(self)
2960 self._quick_access_changeid_invalidate()
2956 self._quick_access_changeid_invalidate()
2961
2957
2962 def invalidatedirstate(self):
2958 def invalidatedirstate(self):
2963 """Invalidates the dirstate, causing the next call to dirstate
2959 """Invalidates the dirstate, causing the next call to dirstate
2964 to check if it was modified since the last time it was read,
2960 to check if it was modified since the last time it was read,
2965 rereading it if it has.
2961 rereading it if it has.
2966
2962
2967 This is different to dirstate.invalidate() that it doesn't always
2963 This is different to dirstate.invalidate() that it doesn't always
2968 rereads the dirstate. Use dirstate.invalidate() if you want to
2964 rereads the dirstate. Use dirstate.invalidate() if you want to
2969 explicitly read the dirstate again (i.e. restoring it to a previous
2965 explicitly read the dirstate again (i.e. restoring it to a previous
2970 known good state)."""
2966 known good state)."""
2971 unfi = self.unfiltered()
2967 unfi = self.unfiltered()
2972 if 'dirstate' in unfi.__dict__:
2968 if 'dirstate' in unfi.__dict__:
2973 del unfi.__dict__['dirstate']
2969 del unfi.__dict__['dirstate']
2974
2970
2975 def invalidate(self, clearfilecache=False):
2971 def invalidate(self, clearfilecache=False):
2976 """Invalidates both store and non-store parts other than dirstate
2972 """Invalidates both store and non-store parts other than dirstate
2977
2973
2978 If a transaction is running, invalidation of store is omitted,
2974 If a transaction is running, invalidation of store is omitted,
2979 because discarding in-memory changes might cause inconsistency
2975 because discarding in-memory changes might cause inconsistency
2980 (e.g. incomplete fncache causes unintentional failure, but
2976 (e.g. incomplete fncache causes unintentional failure, but
2981 redundant one doesn't).
2977 redundant one doesn't).
2982 """
2978 """
2983 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2979 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2984 for k in list(self._filecache.keys()):
2980 for k in list(self._filecache.keys()):
2985 if (
2981 if (
2986 k == b'changelog'
2982 k == b'changelog'
2987 and self.currenttransaction()
2983 and self.currenttransaction()
2988 and self.changelog._delayed
2984 and self.changelog._delayed
2989 ):
2985 ):
2990 # The changelog object may store unwritten revisions. We don't
2986 # The changelog object may store unwritten revisions. We don't
2991 # want to lose them.
2987 # want to lose them.
2992 # TODO: Solve the problem instead of working around it.
2988 # TODO: Solve the problem instead of working around it.
2993 continue
2989 continue
2994
2990
2995 if clearfilecache:
2991 if clearfilecache:
2996 del self._filecache[k]
2992 del self._filecache[k]
2997 try:
2993 try:
2998 delattr(unfiltered, k)
2994 delattr(unfiltered, k)
2999 except AttributeError:
2995 except AttributeError:
3000 pass
2996 pass
3001 self.invalidatecaches()
2997 self.invalidatecaches()
3002 if not self.currenttransaction():
2998 if not self.currenttransaction():
3003 # TODO: Changing contents of store outside transaction
2999 # TODO: Changing contents of store outside transaction
3004 # causes inconsistency. We should make in-memory store
3000 # causes inconsistency. We should make in-memory store
3005 # changes detectable, and abort if changed.
3001 # changes detectable, and abort if changed.
3006 self.store.invalidatecaches()
3002 self.store.invalidatecaches()
3007
3003
3008 def invalidateall(self):
3004 def invalidateall(self):
3009 """Fully invalidates both store and non-store parts, causing the
3005 """Fully invalidates both store and non-store parts, causing the
3010 subsequent operation to reread any outside changes."""
3006 subsequent operation to reread any outside changes."""
3011 # extension should hook this to invalidate its caches
3007 # extension should hook this to invalidate its caches
3012 self.invalidate()
3008 self.invalidate()
3013 self.invalidatedirstate()
3009 self.invalidatedirstate()
3014
3010
3015 @unfilteredmethod
3011 @unfilteredmethod
3016 def _refreshfilecachestats(self, tr):
3012 def _refreshfilecachestats(self, tr):
3017 """Reload stats of cached files so that they are flagged as valid"""
3013 """Reload stats of cached files so that they are flagged as valid"""
3018 for k, ce in self._filecache.items():
3014 for k, ce in self._filecache.items():
3019 k = pycompat.sysstr(k)
3015 k = pycompat.sysstr(k)
3020 if k == 'dirstate' or k not in self.__dict__:
3016 if k == 'dirstate' or k not in self.__dict__:
3021 continue
3017 continue
3022 ce.refresh()
3018 ce.refresh()
3023
3019
3024 def _lock(
3020 def _lock(
3025 self,
3021 self,
3026 vfs,
3022 vfs,
3027 lockname,
3023 lockname,
3028 wait,
3024 wait,
3029 releasefn,
3025 releasefn,
3030 acquirefn,
3026 acquirefn,
3031 desc,
3027 desc,
3032 ):
3028 ):
3033 timeout = 0
3029 timeout = 0
3034 warntimeout = 0
3030 warntimeout = 0
3035 if wait:
3031 if wait:
3036 timeout = self.ui.configint(b"ui", b"timeout")
3032 timeout = self.ui.configint(b"ui", b"timeout")
3037 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3033 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3038 # internal config: ui.signal-safe-lock
3034 # internal config: ui.signal-safe-lock
3039 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3035 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3040
3036
3041 l = lockmod.trylock(
3037 l = lockmod.trylock(
3042 self.ui,
3038 self.ui,
3043 vfs,
3039 vfs,
3044 lockname,
3040 lockname,
3045 timeout,
3041 timeout,
3046 warntimeout,
3042 warntimeout,
3047 releasefn=releasefn,
3043 releasefn=releasefn,
3048 acquirefn=acquirefn,
3044 acquirefn=acquirefn,
3049 desc=desc,
3045 desc=desc,
3050 signalsafe=signalsafe,
3046 signalsafe=signalsafe,
3051 )
3047 )
3052 return l
3048 return l
3053
3049
3054 def _afterlock(self, callback):
3050 def _afterlock(self, callback):
3055 """add a callback to be run when the repository is fully unlocked
3051 """add a callback to be run when the repository is fully unlocked
3056
3052
3057 The callback will be executed when the outermost lock is released
3053 The callback will be executed when the outermost lock is released
3058 (with wlock being higher level than 'lock')."""
3054 (with wlock being higher level than 'lock')."""
3059 for ref in (self._wlockref, self._lockref):
3055 for ref in (self._wlockref, self._lockref):
3060 l = ref and ref()
3056 l = ref and ref()
3061 if l and l.held:
3057 if l and l.held:
3062 l.postrelease.append(callback)
3058 l.postrelease.append(callback)
3063 break
3059 break
3064 else: # no lock have been found.
3060 else: # no lock have been found.
3065 callback(True)
3061 callback(True)
3066
3062
3067 def lock(self, wait=True):
3063 def lock(self, wait=True):
3068 """Lock the repository store (.hg/store) and return a weak reference
3064 """Lock the repository store (.hg/store) and return a weak reference
3069 to the lock. Use this before modifying the store (e.g. committing or
3065 to the lock. Use this before modifying the store (e.g. committing or
3070 stripping). If you are opening a transaction, get a lock as well.)
3066 stripping). If you are opening a transaction, get a lock as well.)
3071
3067
3072 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3068 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3073 'wlock' first to avoid a dead-lock hazard."""
3069 'wlock' first to avoid a dead-lock hazard."""
3074 l = self._currentlock(self._lockref)
3070 l = self._currentlock(self._lockref)
3075 if l is not None:
3071 if l is not None:
3076 l.lock()
3072 l.lock()
3077 return l
3073 return l
3078
3074
3079 l = self._lock(
3075 l = self._lock(
3080 vfs=self.svfs,
3076 vfs=self.svfs,
3081 lockname=b"lock",
3077 lockname=b"lock",
3082 wait=wait,
3078 wait=wait,
3083 releasefn=None,
3079 releasefn=None,
3084 acquirefn=self.invalidate,
3080 acquirefn=self.invalidate,
3085 desc=_(b'repository %s') % self.origroot,
3081 desc=_(b'repository %s') % self.origroot,
3086 )
3082 )
3087 self._lockref = weakref.ref(l)
3083 self._lockref = weakref.ref(l)
3088 return l
3084 return l
3089
3085
3090 def wlock(self, wait=True):
3086 def wlock(self, wait=True):
3091 """Lock the non-store parts of the repository (everything under
3087 """Lock the non-store parts of the repository (everything under
3092 .hg except .hg/store) and return a weak reference to the lock.
3088 .hg except .hg/store) and return a weak reference to the lock.
3093
3089
3094 Use this before modifying files in .hg.
3090 Use this before modifying files in .hg.
3095
3091
3096 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3092 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3097 'wlock' first to avoid a dead-lock hazard."""
3093 'wlock' first to avoid a dead-lock hazard."""
3098 l = self._wlockref() if self._wlockref else None
3094 l = self._wlockref() if self._wlockref else None
3099 if l is not None and l.held:
3095 if l is not None and l.held:
3100 l.lock()
3096 l.lock()
3101 return l
3097 return l
3102
3098
3103 # We do not need to check for non-waiting lock acquisition. Such
3099 # We do not need to check for non-waiting lock acquisition. Such
3104 # acquisition would not cause dead-lock as they would just fail.
3100 # acquisition would not cause dead-lock as they would just fail.
3105 if wait and (
3101 if wait and (
3106 self.ui.configbool(b'devel', b'all-warnings')
3102 self.ui.configbool(b'devel', b'all-warnings')
3107 or self.ui.configbool(b'devel', b'check-locks')
3103 or self.ui.configbool(b'devel', b'check-locks')
3108 ):
3104 ):
3109 if self._currentlock(self._lockref) is not None:
3105 if self._currentlock(self._lockref) is not None:
3110 self.ui.develwarn(b'"wlock" acquired after "lock"')
3106 self.ui.develwarn(b'"wlock" acquired after "lock"')
3111
3107
3112 def unlock():
3108 def unlock():
3113 if self.dirstate.is_changing_any:
3109 if self.dirstate.is_changing_any:
3114 msg = b"wlock release in the middle of a changing parents"
3110 msg = b"wlock release in the middle of a changing parents"
3115 self.ui.develwarn(msg)
3111 self.ui.develwarn(msg)
3116 self.dirstate.invalidate()
3112 self.dirstate.invalidate()
3117 else:
3113 else:
3118 if self.dirstate._dirty:
3114 if self.dirstate._dirty:
3119 msg = b"dirty dirstate on wlock release"
3115 msg = b"dirty dirstate on wlock release"
3120 self.ui.develwarn(msg)
3116 self.ui.develwarn(msg)
3121 self.dirstate.write(None)
3117 self.dirstate.write(None)
3122
3118
3123 unfi = self.unfiltered()
3119 unfi = self.unfiltered()
3124 if 'dirstate' in unfi.__dict__:
3120 if 'dirstate' in unfi.__dict__:
3125 del unfi.__dict__['dirstate']
3121 del unfi.__dict__['dirstate']
3126
3122
3127 l = self._lock(
3123 l = self._lock(
3128 self.vfs,
3124 self.vfs,
3129 b"wlock",
3125 b"wlock",
3130 wait,
3126 wait,
3131 unlock,
3127 unlock,
3132 self.invalidatedirstate,
3128 self.invalidatedirstate,
3133 _(b'working directory of %s') % self.origroot,
3129 _(b'working directory of %s') % self.origroot,
3134 )
3130 )
3135 self._wlockref = weakref.ref(l)
3131 self._wlockref = weakref.ref(l)
3136 return l
3132 return l
3137
3133
3138 def _currentlock(self, lockref):
3134 def _currentlock(self, lockref):
3139 """Returns the lock if it's held, or None if it's not."""
3135 """Returns the lock if it's held, or None if it's not."""
3140 if lockref is None:
3136 if lockref is None:
3141 return None
3137 return None
3142 l = lockref()
3138 l = lockref()
3143 if l is None or not l.held:
3139 if l is None or not l.held:
3144 return None
3140 return None
3145 return l
3141 return l
3146
3142
3147 def currentwlock(self):
3143 def currentwlock(self):
3148 """Returns the wlock if it's held, or None if it's not."""
3144 """Returns the wlock if it's held, or None if it's not."""
3149 return self._currentlock(self._wlockref)
3145 return self._currentlock(self._wlockref)
3150
3146
3151 def checkcommitpatterns(self, wctx, match, status, fail):
3147 def checkcommitpatterns(self, wctx, match, status, fail):
3152 """check for commit arguments that aren't committable"""
3148 """check for commit arguments that aren't committable"""
3153 if match.isexact() or match.prefix():
3149 if match.isexact() or match.prefix():
3154 matched = set(status.modified + status.added + status.removed)
3150 matched = set(status.modified + status.added + status.removed)
3155
3151
3156 for f in match.files():
3152 for f in match.files():
3157 f = self.dirstate.normalize(f)
3153 f = self.dirstate.normalize(f)
3158 if f == b'.' or f in matched or f in wctx.substate:
3154 if f == b'.' or f in matched or f in wctx.substate:
3159 continue
3155 continue
3160 if f in status.deleted:
3156 if f in status.deleted:
3161 fail(f, _(b'file not found!'))
3157 fail(f, _(b'file not found!'))
3162 # Is it a directory that exists or used to exist?
3158 # Is it a directory that exists or used to exist?
3163 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3159 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3164 d = f + b'/'
3160 d = f + b'/'
3165 for mf in matched:
3161 for mf in matched:
3166 if mf.startswith(d):
3162 if mf.startswith(d):
3167 break
3163 break
3168 else:
3164 else:
3169 fail(f, _(b"no match under directory!"))
3165 fail(f, _(b"no match under directory!"))
3170 elif f not in self.dirstate:
3166 elif f not in self.dirstate:
3171 fail(f, _(b"file not tracked!"))
3167 fail(f, _(b"file not tracked!"))
3172
3168
3173 @unfilteredmethod
3169 @unfilteredmethod
3174 def commit(
3170 def commit(
3175 self,
3171 self,
3176 text=b"",
3172 text=b"",
3177 user=None,
3173 user=None,
3178 date=None,
3174 date=None,
3179 match=None,
3175 match=None,
3180 force=False,
3176 force=False,
3181 editor=None,
3177 editor=None,
3182 extra=None,
3178 extra=None,
3183 ):
3179 ):
3184 """Add a new revision to current repository.
3180 """Add a new revision to current repository.
3185
3181
3186 Revision information is gathered from the working directory,
3182 Revision information is gathered from the working directory,
3187 match can be used to filter the committed files. If editor is
3183 match can be used to filter the committed files. If editor is
3188 supplied, it is called to get a commit message.
3184 supplied, it is called to get a commit message.
3189 """
3185 """
3190 if extra is None:
3186 if extra is None:
3191 extra = {}
3187 extra = {}
3192
3188
3193 def fail(f, msg):
3189 def fail(f, msg):
3194 raise error.InputError(b'%s: %s' % (f, msg))
3190 raise error.InputError(b'%s: %s' % (f, msg))
3195
3191
3196 if not match:
3192 if not match:
3197 match = matchmod.always()
3193 match = matchmod.always()
3198
3194
3199 if not force:
3195 if not force:
3200 match.bad = fail
3196 match.bad = fail
3201
3197
3202 # lock() for recent changelog (see issue4368)
3198 # lock() for recent changelog (see issue4368)
3203 with self.wlock(), self.lock():
3199 with self.wlock(), self.lock():
3204 wctx = self[None]
3200 wctx = self[None]
3205 merge = len(wctx.parents()) > 1
3201 merge = len(wctx.parents()) > 1
3206
3202
3207 if not force and merge and not match.always():
3203 if not force and merge and not match.always():
3208 raise error.Abort(
3204 raise error.Abort(
3209 _(
3205 _(
3210 b'cannot partially commit a merge '
3206 b'cannot partially commit a merge '
3211 b'(do not specify files or patterns)'
3207 b'(do not specify files or patterns)'
3212 )
3208 )
3213 )
3209 )
3214
3210
3215 status = self.status(match=match, clean=force)
3211 status = self.status(match=match, clean=force)
3216 if force:
3212 if force:
3217 status.modified.extend(
3213 status.modified.extend(
3218 status.clean
3214 status.clean
3219 ) # mq may commit clean files
3215 ) # mq may commit clean files
3220
3216
3221 # check subrepos
3217 # check subrepos
3222 subs, commitsubs, newstate = subrepoutil.precommit(
3218 subs, commitsubs, newstate = subrepoutil.precommit(
3223 self.ui, wctx, status, match, force=force
3219 self.ui, wctx, status, match, force=force
3224 )
3220 )
3225
3221
3226 # make sure all explicit patterns are matched
3222 # make sure all explicit patterns are matched
3227 if not force:
3223 if not force:
3228 self.checkcommitpatterns(wctx, match, status, fail)
3224 self.checkcommitpatterns(wctx, match, status, fail)
3229
3225
3230 cctx = context.workingcommitctx(
3226 cctx = context.workingcommitctx(
3231 self, status, text, user, date, extra
3227 self, status, text, user, date, extra
3232 )
3228 )
3233
3229
3234 ms = mergestatemod.mergestate.read(self)
3230 ms = mergestatemod.mergestate.read(self)
3235 mergeutil.checkunresolved(ms)
3231 mergeutil.checkunresolved(ms)
3236
3232
3237 # internal config: ui.allowemptycommit
3233 # internal config: ui.allowemptycommit
3238 if cctx.isempty() and not self.ui.configbool(
3234 if cctx.isempty() and not self.ui.configbool(
3239 b'ui', b'allowemptycommit'
3235 b'ui', b'allowemptycommit'
3240 ):
3236 ):
3241 self.ui.debug(b'nothing to commit, clearing merge state\n')
3237 self.ui.debug(b'nothing to commit, clearing merge state\n')
3242 ms.reset()
3238 ms.reset()
3243 return None
3239 return None
3244
3240
3245 if merge and cctx.deleted():
3241 if merge and cctx.deleted():
3246 raise error.Abort(_(b"cannot commit merge with missing files"))
3242 raise error.Abort(_(b"cannot commit merge with missing files"))
3247
3243
3248 if editor:
3244 if editor:
3249 cctx._text = editor(self, cctx, subs)
3245 cctx._text = editor(self, cctx, subs)
3250 edited = text != cctx._text
3246 edited = text != cctx._text
3251
3247
3252 # Save commit message in case this transaction gets rolled back
3248 # Save commit message in case this transaction gets rolled back
3253 # (e.g. by a pretxncommit hook). Leave the content alone on
3249 # (e.g. by a pretxncommit hook). Leave the content alone on
3254 # the assumption that the user will use the same editor again.
3250 # the assumption that the user will use the same editor again.
3255 msg_path = self.savecommitmessage(cctx._text)
3251 msg_path = self.savecommitmessage(cctx._text)
3256
3252
3257 # commit subs and write new state
3253 # commit subs and write new state
3258 if subs:
3254 if subs:
3259 uipathfn = scmutil.getuipathfn(self)
3255 uipathfn = scmutil.getuipathfn(self)
3260 for s in sorted(commitsubs):
3256 for s in sorted(commitsubs):
3261 sub = wctx.sub(s)
3257 sub = wctx.sub(s)
3262 self.ui.status(
3258 self.ui.status(
3263 _(b'committing subrepository %s\n')
3259 _(b'committing subrepository %s\n')
3264 % uipathfn(subrepoutil.subrelpath(sub))
3260 % uipathfn(subrepoutil.subrelpath(sub))
3265 )
3261 )
3266 sr = sub.commit(cctx._text, user, date)
3262 sr = sub.commit(cctx._text, user, date)
3267 newstate[s] = (newstate[s][0], sr)
3263 newstate[s] = (newstate[s][0], sr)
3268 subrepoutil.writestate(self, newstate)
3264 subrepoutil.writestate(self, newstate)
3269
3265
3270 p1, p2 = self.dirstate.parents()
3266 p1, p2 = self.dirstate.parents()
3271 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3267 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3272 try:
3268 try:
3273 self.hook(
3269 self.hook(
3274 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3270 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3275 )
3271 )
3276 with self.transaction(b'commit'):
3272 with self.transaction(b'commit'):
3277 ret = self.commitctx(cctx, True)
3273 ret = self.commitctx(cctx, True)
3278 # update bookmarks, dirstate and mergestate
3274 # update bookmarks, dirstate and mergestate
3279 bookmarks.update(self, [p1, p2], ret)
3275 bookmarks.update(self, [p1, p2], ret)
3280 cctx.markcommitted(ret)
3276 cctx.markcommitted(ret)
3281 ms.reset()
3277 ms.reset()
3282 except: # re-raises
3278 except: # re-raises
3283 if edited:
3279 if edited:
3284 self.ui.write(
3280 self.ui.write(
3285 _(b'note: commit message saved in %s\n') % msg_path
3281 _(b'note: commit message saved in %s\n') % msg_path
3286 )
3282 )
3287 self.ui.write(
3283 self.ui.write(
3288 _(
3284 _(
3289 b"note: use 'hg commit --logfile "
3285 b"note: use 'hg commit --logfile "
3290 b"%s --edit' to reuse it\n"
3286 b"%s --edit' to reuse it\n"
3291 )
3287 )
3292 % msg_path
3288 % msg_path
3293 )
3289 )
3294 raise
3290 raise
3295
3291
3296 def commithook(unused_success):
3292 def commithook(unused_success):
3297 # hack for command that use a temporary commit (eg: histedit)
3293 # hack for command that use a temporary commit (eg: histedit)
3298 # temporary commit got stripped before hook release
3294 # temporary commit got stripped before hook release
3299 if self.changelog.hasnode(ret):
3295 if self.changelog.hasnode(ret):
3300 self.hook(
3296 self.hook(
3301 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3297 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3302 )
3298 )
3303
3299
3304 self._afterlock(commithook)
3300 self._afterlock(commithook)
3305 return ret
3301 return ret
3306
3302
3307 @unfilteredmethod
3303 @unfilteredmethod
3308 def commitctx(self, ctx, error=False, origctx=None):
3304 def commitctx(self, ctx, error=False, origctx=None):
3309 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3305 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3310
3306
3311 @unfilteredmethod
3307 @unfilteredmethod
3312 def destroying(self):
3308 def destroying(self):
3313 """Inform the repository that nodes are about to be destroyed.
3309 """Inform the repository that nodes are about to be destroyed.
3314 Intended for use by strip and rollback, so there's a common
3310 Intended for use by strip and rollback, so there's a common
3315 place for anything that has to be done before destroying history.
3311 place for anything that has to be done before destroying history.
3316
3312
3317 This is mostly useful for saving state that is in memory and waiting
3313 This is mostly useful for saving state that is in memory and waiting
3318 to be flushed when the current lock is released. Because a call to
3314 to be flushed when the current lock is released. Because a call to
3319 destroyed is imminent, the repo will be invalidated causing those
3315 destroyed is imminent, the repo will be invalidated causing those
3320 changes to stay in memory (waiting for the next unlock), or vanish
3316 changes to stay in memory (waiting for the next unlock), or vanish
3321 completely.
3317 completely.
3322 """
3318 """
3323 # When using the same lock to commit and strip, the phasecache is left
3319 # When using the same lock to commit and strip, the phasecache is left
3324 # dirty after committing. Then when we strip, the repo is invalidated,
3320 # dirty after committing. Then when we strip, the repo is invalidated,
3325 # causing those changes to disappear.
3321 # causing those changes to disappear.
3326 if '_phasecache' in vars(self):
3322 if '_phasecache' in vars(self):
3327 self._phasecache.write()
3323 self._phasecache.write()
3328
3324
3329 @unfilteredmethod
3325 @unfilteredmethod
3330 def destroyed(self):
3326 def destroyed(self):
3331 """Inform the repository that nodes have been destroyed.
3327 """Inform the repository that nodes have been destroyed.
3332 Intended for use by strip and rollback, so there's a common
3328 Intended for use by strip and rollback, so there's a common
3333 place for anything that has to be done after destroying history.
3329 place for anything that has to be done after destroying history.
3334 """
3330 """
3335 # When one tries to:
3331 # When one tries to:
3336 # 1) destroy nodes thus calling this method (e.g. strip)
3332 # 1) destroy nodes thus calling this method (e.g. strip)
3337 # 2) use phasecache somewhere (e.g. commit)
3333 # 2) use phasecache somewhere (e.g. commit)
3338 #
3334 #
3339 # then 2) will fail because the phasecache contains nodes that were
3335 # then 2) will fail because the phasecache contains nodes that were
3340 # removed. We can either remove phasecache from the filecache,
3336 # removed. We can either remove phasecache from the filecache,
3341 # causing it to reload next time it is accessed, or simply filter
3337 # causing it to reload next time it is accessed, or simply filter
3342 # the removed nodes now and write the updated cache.
3338 # the removed nodes now and write the updated cache.
3343 self._phasecache.filterunknown(self)
3339 self._phasecache.filterunknown(self)
3344 self._phasecache.write()
3340 self._phasecache.write()
3345
3341
3346 # refresh all repository caches
3342 # refresh all repository caches
3347 self.updatecaches()
3343 self.updatecaches()
3348
3344
3349 # Ensure the persistent tag cache is updated. Doing it now
3345 # Ensure the persistent tag cache is updated. Doing it now
3350 # means that the tag cache only has to worry about destroyed
3346 # means that the tag cache only has to worry about destroyed
3351 # heads immediately after a strip/rollback. That in turn
3347 # heads immediately after a strip/rollback. That in turn
3352 # guarantees that "cachetip == currenttip" (comparing both rev
3348 # guarantees that "cachetip == currenttip" (comparing both rev
3353 # and node) always means no nodes have been added or destroyed.
3349 # and node) always means no nodes have been added or destroyed.
3354
3350
3355 # XXX this is suboptimal when qrefresh'ing: we strip the current
3351 # XXX this is suboptimal when qrefresh'ing: we strip the current
3356 # head, refresh the tag cache, then immediately add a new head.
3352 # head, refresh the tag cache, then immediately add a new head.
3357 # But I think doing it this way is necessary for the "instant
3353 # But I think doing it this way is necessary for the "instant
3358 # tag cache retrieval" case to work.
3354 # tag cache retrieval" case to work.
3359 self.invalidate()
3355 self.invalidate()
3360
3356
3361 def status(
3357 def status(
3362 self,
3358 self,
3363 node1=b'.',
3359 node1=b'.',
3364 node2=None,
3360 node2=None,
3365 match=None,
3361 match=None,
3366 ignored=False,
3362 ignored=False,
3367 clean=False,
3363 clean=False,
3368 unknown=False,
3364 unknown=False,
3369 listsubrepos=False,
3365 listsubrepos=False,
3370 ):
3366 ):
3371 '''a convenience method that calls node1.status(node2)'''
3367 '''a convenience method that calls node1.status(node2)'''
3372 return self[node1].status(
3368 return self[node1].status(
3373 node2, match, ignored, clean, unknown, listsubrepos
3369 node2, match, ignored, clean, unknown, listsubrepos
3374 )
3370 )
3375
3371
3376 def addpostdsstatus(self, ps):
3372 def addpostdsstatus(self, ps):
3377 """Add a callback to run within the wlock, at the point at which status
3373 """Add a callback to run within the wlock, at the point at which status
3378 fixups happen.
3374 fixups happen.
3379
3375
3380 On status completion, callback(wctx, status) will be called with the
3376 On status completion, callback(wctx, status) will be called with the
3381 wlock held, unless the dirstate has changed from underneath or the wlock
3377 wlock held, unless the dirstate has changed from underneath or the wlock
3382 couldn't be grabbed.
3378 couldn't be grabbed.
3383
3379
3384 Callbacks should not capture and use a cached copy of the dirstate --
3380 Callbacks should not capture and use a cached copy of the dirstate --
3385 it might change in the meanwhile. Instead, they should access the
3381 it might change in the meanwhile. Instead, they should access the
3386 dirstate via wctx.repo().dirstate.
3382 dirstate via wctx.repo().dirstate.
3387
3383
3388 This list is emptied out after each status run -- extensions should
3384 This list is emptied out after each status run -- extensions should
3389 make sure it adds to this list each time dirstate.status is called.
3385 make sure it adds to this list each time dirstate.status is called.
3390 Extensions should also make sure they don't call this for statuses
3386 Extensions should also make sure they don't call this for statuses
3391 that don't involve the dirstate.
3387 that don't involve the dirstate.
3392 """
3388 """
3393
3389
3394 # The list is located here for uniqueness reasons -- it is actually
3390 # The list is located here for uniqueness reasons -- it is actually
3395 # managed by the workingctx, but that isn't unique per-repo.
3391 # managed by the workingctx, but that isn't unique per-repo.
3396 self._postdsstatus.append(ps)
3392 self._postdsstatus.append(ps)
3397
3393
3398 def postdsstatus(self):
3394 def postdsstatus(self):
3399 """Used by workingctx to get the list of post-dirstate-status hooks."""
3395 """Used by workingctx to get the list of post-dirstate-status hooks."""
3400 return self._postdsstatus
3396 return self._postdsstatus
3401
3397
3402 def clearpostdsstatus(self):
3398 def clearpostdsstatus(self):
3403 """Used by workingctx to clear post-dirstate-status hooks."""
3399 """Used by workingctx to clear post-dirstate-status hooks."""
3404 del self._postdsstatus[:]
3400 del self._postdsstatus[:]
3405
3401
3406 def heads(self, start=None):
3402 def heads(self, start=None):
3407 if start is None:
3403 if start is None:
3408 cl = self.changelog
3404 cl = self.changelog
3409 headrevs = reversed(cl.headrevs())
3405 headrevs = reversed(cl.headrevs())
3410 return [cl.node(rev) for rev in headrevs]
3406 return [cl.node(rev) for rev in headrevs]
3411
3407
3412 heads = self.changelog.heads(start)
3408 heads = self.changelog.heads(start)
3413 # sort the output in rev descending order
3409 # sort the output in rev descending order
3414 return sorted(heads, key=self.changelog.rev, reverse=True)
3410 return sorted(heads, key=self.changelog.rev, reverse=True)
3415
3411
3416 def branchheads(self, branch=None, start=None, closed=False):
3412 def branchheads(self, branch=None, start=None, closed=False):
3417 """return a (possibly filtered) list of heads for the given branch
3413 """return a (possibly filtered) list of heads for the given branch
3418
3414
3419 Heads are returned in topological order, from newest to oldest.
3415 Heads are returned in topological order, from newest to oldest.
3420 If branch is None, use the dirstate branch.
3416 If branch is None, use the dirstate branch.
3421 If start is not None, return only heads reachable from start.
3417 If start is not None, return only heads reachable from start.
3422 If closed is True, return heads that are marked as closed as well.
3418 If closed is True, return heads that are marked as closed as well.
3423 """
3419 """
3424 if branch is None:
3420 if branch is None:
3425 branch = self[None].branch()
3421 branch = self[None].branch()
3426 branches = self.branchmap()
3422 branches = self.branchmap()
3427 if not branches.hasbranch(branch):
3423 if not branches.hasbranch(branch):
3428 return []
3424 return []
3429 # the cache returns heads ordered lowest to highest
3425 # the cache returns heads ordered lowest to highest
3430 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3426 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3431 if start is not None:
3427 if start is not None:
3432 # filter out the heads that cannot be reached from startrev
3428 # filter out the heads that cannot be reached from startrev
3433 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3429 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3434 bheads = [h for h in bheads if h in fbheads]
3430 bheads = [h for h in bheads if h in fbheads]
3435 return bheads
3431 return bheads
3436
3432
3437 def branches(self, nodes):
3433 def branches(self, nodes):
3438 if not nodes:
3434 if not nodes:
3439 nodes = [self.changelog.tip()]
3435 nodes = [self.changelog.tip()]
3440 b = []
3436 b = []
3441 for n in nodes:
3437 for n in nodes:
3442 t = n
3438 t = n
3443 while True:
3439 while True:
3444 p = self.changelog.parents(n)
3440 p = self.changelog.parents(n)
3445 if p[1] != self.nullid or p[0] == self.nullid:
3441 if p[1] != self.nullid or p[0] == self.nullid:
3446 b.append((t, n, p[0], p[1]))
3442 b.append((t, n, p[0], p[1]))
3447 break
3443 break
3448 n = p[0]
3444 n = p[0]
3449 return b
3445 return b
3450
3446
3451 def between(self, pairs):
3447 def between(self, pairs):
3452 r = []
3448 r = []
3453
3449
3454 for top, bottom in pairs:
3450 for top, bottom in pairs:
3455 n, l, i = top, [], 0
3451 n, l, i = top, [], 0
3456 f = 1
3452 f = 1
3457
3453
3458 while n != bottom and n != self.nullid:
3454 while n != bottom and n != self.nullid:
3459 p = self.changelog.parents(n)[0]
3455 p = self.changelog.parents(n)[0]
3460 if i == f:
3456 if i == f:
3461 l.append(n)
3457 l.append(n)
3462 f = f * 2
3458 f = f * 2
3463 n = p
3459 n = p
3464 i += 1
3460 i += 1
3465
3461
3466 r.append(l)
3462 r.append(l)
3467
3463
3468 return r
3464 return r
3469
3465
3470 def checkpush(self, pushop):
3466 def checkpush(self, pushop):
3471 """Extensions can override this function if additional checks have
3467 """Extensions can override this function if additional checks have
3472 to be performed before pushing, or call it if they override push
3468 to be performed before pushing, or call it if they override push
3473 command.
3469 command.
3474 """
3470 """
3475
3471
3476 @unfilteredpropertycache
3472 @unfilteredpropertycache
3477 def prepushoutgoinghooks(self):
3473 def prepushoutgoinghooks(self):
3478 """Return util.hooks consists of a pushop with repo, remote, outgoing
3474 """Return util.hooks consists of a pushop with repo, remote, outgoing
3479 methods, which are called before pushing changesets.
3475 methods, which are called before pushing changesets.
3480 """
3476 """
3481 return util.hooks()
3477 return util.hooks()
3482
3478
3483 def pushkey(self, namespace, key, old, new):
3479 def pushkey(self, namespace, key, old, new):
3484 try:
3480 try:
3485 tr = self.currenttransaction()
3481 tr = self.currenttransaction()
3486 hookargs = {}
3482 hookargs = {}
3487 if tr is not None:
3483 if tr is not None:
3488 hookargs.update(tr.hookargs)
3484 hookargs.update(tr.hookargs)
3489 hookargs = pycompat.strkwargs(hookargs)
3485 hookargs = pycompat.strkwargs(hookargs)
3490 hookargs['namespace'] = namespace
3486 hookargs['namespace'] = namespace
3491 hookargs['key'] = key
3487 hookargs['key'] = key
3492 hookargs['old'] = old
3488 hookargs['old'] = old
3493 hookargs['new'] = new
3489 hookargs['new'] = new
3494 self.hook(b'prepushkey', throw=True, **hookargs)
3490 self.hook(b'prepushkey', throw=True, **hookargs)
3495 except error.HookAbort as exc:
3491 except error.HookAbort as exc:
3496 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3492 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3497 if exc.hint:
3493 if exc.hint:
3498 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3494 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3499 return False
3495 return False
3500 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3496 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3501 ret = pushkey.push(self, namespace, key, old, new)
3497 ret = pushkey.push(self, namespace, key, old, new)
3502
3498
3503 def runhook(unused_success):
3499 def runhook(unused_success):
3504 self.hook(
3500 self.hook(
3505 b'pushkey',
3501 b'pushkey',
3506 namespace=namespace,
3502 namespace=namespace,
3507 key=key,
3503 key=key,
3508 old=old,
3504 old=old,
3509 new=new,
3505 new=new,
3510 ret=ret,
3506 ret=ret,
3511 )
3507 )
3512
3508
3513 self._afterlock(runhook)
3509 self._afterlock(runhook)
3514 return ret
3510 return ret
3515
3511
3516 def listkeys(self, namespace):
3512 def listkeys(self, namespace):
3517 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3513 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3518 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3514 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3519 values = pushkey.list(self, namespace)
3515 values = pushkey.list(self, namespace)
3520 self.hook(b'listkeys', namespace=namespace, values=values)
3516 self.hook(b'listkeys', namespace=namespace, values=values)
3521 return values
3517 return values
3522
3518
3523 def debugwireargs(self, one, two, three=None, four=None, five=None):
3519 def debugwireargs(self, one, two, three=None, four=None, five=None):
3524 '''used to test argument passing over the wire'''
3520 '''used to test argument passing over the wire'''
3525 return b"%s %s %s %s %s" % (
3521 return b"%s %s %s %s %s" % (
3526 one,
3522 one,
3527 two,
3523 two,
3528 pycompat.bytestr(three),
3524 pycompat.bytestr(three),
3529 pycompat.bytestr(four),
3525 pycompat.bytestr(four),
3530 pycompat.bytestr(five),
3526 pycompat.bytestr(five),
3531 )
3527 )
3532
3528
3533 def savecommitmessage(self, text):
3529 def savecommitmessage(self, text):
3534 fp = self.vfs(b'last-message.txt', b'wb')
3530 fp = self.vfs(b'last-message.txt', b'wb')
3535 try:
3531 try:
3536 fp.write(text)
3532 fp.write(text)
3537 finally:
3533 finally:
3538 fp.close()
3534 fp.close()
3539 return self.pathto(fp.name[len(self.root) + 1 :])
3535 return self.pathto(fp.name[len(self.root) + 1 :])
3540
3536
3541 def register_wanted_sidedata(self, category):
3537 def register_wanted_sidedata(self, category):
3542 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3538 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3543 # Only revlogv2 repos can want sidedata.
3539 # Only revlogv2 repos can want sidedata.
3544 return
3540 return
3545 self._wanted_sidedata.add(pycompat.bytestr(category))
3541 self._wanted_sidedata.add(pycompat.bytestr(category))
3546
3542
3547 def register_sidedata_computer(
3543 def register_sidedata_computer(
3548 self, kind, category, keys, computer, flags, replace=False
3544 self, kind, category, keys, computer, flags, replace=False
3549 ):
3545 ):
3550 if kind not in revlogconst.ALL_KINDS:
3546 if kind not in revlogconst.ALL_KINDS:
3551 msg = _(b"unexpected revlog kind '%s'.")
3547 msg = _(b"unexpected revlog kind '%s'.")
3552 raise error.ProgrammingError(msg % kind)
3548 raise error.ProgrammingError(msg % kind)
3553 category = pycompat.bytestr(category)
3549 category = pycompat.bytestr(category)
3554 already_registered = category in self._sidedata_computers.get(kind, [])
3550 already_registered = category in self._sidedata_computers.get(kind, [])
3555 if already_registered and not replace:
3551 if already_registered and not replace:
3556 msg = _(
3552 msg = _(
3557 b"cannot register a sidedata computer twice for category '%s'."
3553 b"cannot register a sidedata computer twice for category '%s'."
3558 )
3554 )
3559 raise error.ProgrammingError(msg % category)
3555 raise error.ProgrammingError(msg % category)
3560 if replace and not already_registered:
3556 if replace and not already_registered:
3561 msg = _(
3557 msg = _(
3562 b"cannot replace a sidedata computer that isn't registered "
3558 b"cannot replace a sidedata computer that isn't registered "
3563 b"for category '%s'."
3559 b"for category '%s'."
3564 )
3560 )
3565 raise error.ProgrammingError(msg % category)
3561 raise error.ProgrammingError(msg % category)
3566 self._sidedata_computers.setdefault(kind, {})
3562 self._sidedata_computers.setdefault(kind, {})
3567 self._sidedata_computers[kind][category] = (keys, computer, flags)
3563 self._sidedata_computers[kind][category] = (keys, computer, flags)
3568
3564
3569
3565
3570 # used to avoid circular references so destructors work
3566 # used to avoid circular references so destructors work
3571 def aftertrans(files):
3567 def aftertrans(files):
3572 renamefiles = [tuple(t) for t in files]
3568 renamefiles = [tuple(t) for t in files]
3573
3569
3574 def a():
3570 def a():
3575 for vfs, src, dest in renamefiles:
3571 for vfs, src, dest in renamefiles:
3576 # if src and dest refer to a same file, vfs.rename is a no-op,
3572 # if src and dest refer to a same file, vfs.rename is a no-op,
3577 # leaving both src and dest on disk. delete dest to make sure
3573 # leaving both src and dest on disk. delete dest to make sure
3578 # the rename couldn't be such a no-op.
3574 # the rename couldn't be such a no-op.
3579 vfs.tryunlink(dest)
3575 vfs.tryunlink(dest)
3580 try:
3576 try:
3581 vfs.rename(src, dest)
3577 vfs.rename(src, dest)
3582 except FileNotFoundError: # journal file does not yet exist
3578 except FileNotFoundError: # journal file does not yet exist
3583 pass
3579 pass
3584
3580
3585 return a
3581 return a
3586
3582
3587
3583
3588 def undoname(fn: bytes) -> bytes:
3584 def undoname(fn: bytes) -> bytes:
3589 base, name = os.path.split(fn)
3585 base, name = os.path.split(fn)
3590 assert name.startswith(b'journal')
3586 assert name.startswith(b'journal')
3591 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3587 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3592
3588
3593
3589
3594 def instance(ui, path: bytes, create, intents=None, createopts=None):
3590 def instance(ui, path: bytes, create, intents=None, createopts=None):
3595 # prevent cyclic import localrepo -> upgrade -> localrepo
3591 # prevent cyclic import localrepo -> upgrade -> localrepo
3596 from . import upgrade
3592 from . import upgrade
3597
3593
3598 localpath = urlutil.urllocalpath(path)
3594 localpath = urlutil.urllocalpath(path)
3599 if create:
3595 if create:
3600 createrepository(ui, localpath, createopts=createopts)
3596 createrepository(ui, localpath, createopts=createopts)
3601
3597
3602 def repo_maker():
3598 def repo_maker():
3603 return makelocalrepository(ui, localpath, intents=intents)
3599 return makelocalrepository(ui, localpath, intents=intents)
3604
3600
3605 repo = repo_maker()
3601 repo = repo_maker()
3606 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3602 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3607 return repo
3603 return repo
3608
3604
3609
3605
3610 def islocal(path: bytes) -> bool:
3606 def islocal(path: bytes) -> bool:
3611 return True
3607 return True
3612
3608
3613
3609
3614 def defaultcreateopts(ui, createopts=None):
3610 def defaultcreateopts(ui, createopts=None):
3615 """Populate the default creation options for a repository.
3611 """Populate the default creation options for a repository.
3616
3612
3617 A dictionary of explicitly requested creation options can be passed
3613 A dictionary of explicitly requested creation options can be passed
3618 in. Missing keys will be populated.
3614 in. Missing keys will be populated.
3619 """
3615 """
3620 createopts = dict(createopts or {})
3616 createopts = dict(createopts or {})
3621
3617
3622 if b'backend' not in createopts:
3618 if b'backend' not in createopts:
3623 # experimental config: storage.new-repo-backend
3619 # experimental config: storage.new-repo-backend
3624 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3620 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3625
3621
3626 return createopts
3622 return createopts
3627
3623
3628
3624
3629 def clone_requirements(ui, createopts, srcrepo):
3625 def clone_requirements(ui, createopts, srcrepo):
3630 """clone the requirements of a local repo for a local clone
3626 """clone the requirements of a local repo for a local clone
3631
3627
3632 The store requirements are unchanged while the working copy requirements
3628 The store requirements are unchanged while the working copy requirements
3633 depends on the configuration
3629 depends on the configuration
3634 """
3630 """
3635 target_requirements = set()
3631 target_requirements = set()
3636 if not srcrepo.requirements:
3632 if not srcrepo.requirements:
3637 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3633 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3638 # with it.
3634 # with it.
3639 return target_requirements
3635 return target_requirements
3640 createopts = defaultcreateopts(ui, createopts=createopts)
3636 createopts = defaultcreateopts(ui, createopts=createopts)
3641 for r in newreporequirements(ui, createopts):
3637 for r in newreporequirements(ui, createopts):
3642 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3638 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3643 target_requirements.add(r)
3639 target_requirements.add(r)
3644
3640
3645 for r in srcrepo.requirements:
3641 for r in srcrepo.requirements:
3646 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3642 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3647 target_requirements.add(r)
3643 target_requirements.add(r)
3648 return target_requirements
3644 return target_requirements
3649
3645
3650
3646
3651 def newreporequirements(ui, createopts):
3647 def newreporequirements(ui, createopts):
3652 """Determine the set of requirements for a new local repository.
3648 """Determine the set of requirements for a new local repository.
3653
3649
3654 Extensions can wrap this function to specify custom requirements for
3650 Extensions can wrap this function to specify custom requirements for
3655 new repositories.
3651 new repositories.
3656 """
3652 """
3657
3653
3658 if b'backend' not in createopts:
3654 if b'backend' not in createopts:
3659 raise error.ProgrammingError(
3655 raise error.ProgrammingError(
3660 b'backend key not present in createopts; '
3656 b'backend key not present in createopts; '
3661 b'was defaultcreateopts() called?'
3657 b'was defaultcreateopts() called?'
3662 )
3658 )
3663
3659
3664 if createopts[b'backend'] != b'revlogv1':
3660 if createopts[b'backend'] != b'revlogv1':
3665 raise error.Abort(
3661 raise error.Abort(
3666 _(
3662 _(
3667 b'unable to determine repository requirements for '
3663 b'unable to determine repository requirements for '
3668 b'storage backend: %s'
3664 b'storage backend: %s'
3669 )
3665 )
3670 % createopts[b'backend']
3666 % createopts[b'backend']
3671 )
3667 )
3672
3668
3673 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3669 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3674 if ui.configbool(b'format', b'usestore'):
3670 if ui.configbool(b'format', b'usestore'):
3675 requirements.add(requirementsmod.STORE_REQUIREMENT)
3671 requirements.add(requirementsmod.STORE_REQUIREMENT)
3676 if ui.configbool(b'format', b'usefncache'):
3672 if ui.configbool(b'format', b'usefncache'):
3677 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3673 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3678 if ui.configbool(b'format', b'dotencode'):
3674 if ui.configbool(b'format', b'dotencode'):
3679 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3675 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3680
3676
3681 compengines = ui.configlist(b'format', b'revlog-compression')
3677 compengines = ui.configlist(b'format', b'revlog-compression')
3682 for compengine in compengines:
3678 for compengine in compengines:
3683 if compengine in util.compengines:
3679 if compengine in util.compengines:
3684 engine = util.compengines[compengine]
3680 engine = util.compengines[compengine]
3685 if engine.available() and engine.revlogheader():
3681 if engine.available() and engine.revlogheader():
3686 break
3682 break
3687 else:
3683 else:
3688 raise error.Abort(
3684 raise error.Abort(
3689 _(
3685 _(
3690 b'compression engines %s defined by '
3686 b'compression engines %s defined by '
3691 b'format.revlog-compression not available'
3687 b'format.revlog-compression not available'
3692 )
3688 )
3693 % b', '.join(b'"%s"' % e for e in compengines),
3689 % b', '.join(b'"%s"' % e for e in compengines),
3694 hint=_(
3690 hint=_(
3695 b'run "hg debuginstall" to list available '
3691 b'run "hg debuginstall" to list available '
3696 b'compression engines'
3692 b'compression engines'
3697 ),
3693 ),
3698 )
3694 )
3699
3695
3700 # zlib is the historical default and doesn't need an explicit requirement.
3696 # zlib is the historical default and doesn't need an explicit requirement.
3701 if compengine == b'zstd':
3697 if compengine == b'zstd':
3702 requirements.add(b'revlog-compression-zstd')
3698 requirements.add(b'revlog-compression-zstd')
3703 elif compengine != b'zlib':
3699 elif compengine != b'zlib':
3704 requirements.add(b'exp-compression-%s' % compengine)
3700 requirements.add(b'exp-compression-%s' % compengine)
3705
3701
3706 if scmutil.gdinitconfig(ui):
3702 if scmutil.gdinitconfig(ui):
3707 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3703 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3708 if ui.configbool(b'format', b'sparse-revlog'):
3704 if ui.configbool(b'format', b'sparse-revlog'):
3709 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3705 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3710
3706
3711 # experimental config: format.use-dirstate-v2
3707 # experimental config: format.use-dirstate-v2
3712 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3708 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3713 if ui.configbool(b'format', b'use-dirstate-v2'):
3709 if ui.configbool(b'format', b'use-dirstate-v2'):
3714 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3710 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3715
3711
3716 # experimental config: format.exp-use-copies-side-data-changeset
3712 # experimental config: format.exp-use-copies-side-data-changeset
3717 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3713 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3718 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3714 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3719 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3715 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3720 if ui.configbool(b'experimental', b'treemanifest'):
3716 if ui.configbool(b'experimental', b'treemanifest'):
3721 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3717 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3722
3718
3723 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3719 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3724 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3720 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3725 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3721 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3726
3722
3727 revlogv2 = ui.config(b'experimental', b'revlogv2')
3723 revlogv2 = ui.config(b'experimental', b'revlogv2')
3728 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3724 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3729 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3725 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3730 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3726 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3731 # experimental config: format.internal-phase
3727 # experimental config: format.internal-phase
3732 if ui.configbool(b'format', b'use-internal-phase'):
3728 if ui.configbool(b'format', b'use-internal-phase'):
3733 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3729 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3734
3730
3735 # experimental config: format.exp-archived-phase
3731 # experimental config: format.exp-archived-phase
3736 if ui.configbool(b'format', b'exp-archived-phase'):
3732 if ui.configbool(b'format', b'exp-archived-phase'):
3737 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3733 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3738
3734
3739 if createopts.get(b'narrowfiles'):
3735 if createopts.get(b'narrowfiles'):
3740 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3736 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3741
3737
3742 if createopts.get(b'lfs'):
3738 if createopts.get(b'lfs'):
3743 requirements.add(b'lfs')
3739 requirements.add(b'lfs')
3744
3740
3745 if ui.configbool(b'format', b'bookmarks-in-store'):
3741 if ui.configbool(b'format', b'bookmarks-in-store'):
3746 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3742 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3747
3743
3748 if ui.configbool(b'format', b'use-persistent-nodemap'):
3744 if ui.configbool(b'format', b'use-persistent-nodemap'):
3749 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3745 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3750
3746
3751 # if share-safe is enabled, let's create the new repository with the new
3747 # if share-safe is enabled, let's create the new repository with the new
3752 # requirement
3748 # requirement
3753 if ui.configbool(b'format', b'use-share-safe'):
3749 if ui.configbool(b'format', b'use-share-safe'):
3754 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3750 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3755
3751
3756 # if we are creating a share-repoΒΉ we have to handle requirement
3752 # if we are creating a share-repoΒΉ we have to handle requirement
3757 # differently.
3753 # differently.
3758 #
3754 #
3759 # [1] (i.e. reusing the store from another repository, just having a
3755 # [1] (i.e. reusing the store from another repository, just having a
3760 # working copy)
3756 # working copy)
3761 if b'sharedrepo' in createopts:
3757 if b'sharedrepo' in createopts:
3762 source_requirements = set(createopts[b'sharedrepo'].requirements)
3758 source_requirements = set(createopts[b'sharedrepo'].requirements)
3763
3759
3764 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3760 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3765 # share to an old school repository, we have to copy the
3761 # share to an old school repository, we have to copy the
3766 # requirements and hope for the best.
3762 # requirements and hope for the best.
3767 requirements = source_requirements
3763 requirements = source_requirements
3768 else:
3764 else:
3769 # We have control on the working copy only, so "copy" the non
3765 # We have control on the working copy only, so "copy" the non
3770 # working copy part over, ignoring previous logic.
3766 # working copy part over, ignoring previous logic.
3771 to_drop = set()
3767 to_drop = set()
3772 for req in requirements:
3768 for req in requirements:
3773 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3769 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3774 continue
3770 continue
3775 if req in source_requirements:
3771 if req in source_requirements:
3776 continue
3772 continue
3777 to_drop.add(req)
3773 to_drop.add(req)
3778 requirements -= to_drop
3774 requirements -= to_drop
3779 requirements |= source_requirements
3775 requirements |= source_requirements
3780
3776
3781 if createopts.get(b'sharedrelative'):
3777 if createopts.get(b'sharedrelative'):
3782 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3778 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3783 else:
3779 else:
3784 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3780 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3785
3781
3786 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3782 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3787 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3783 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3788 msg = _(b"ignoring unknown tracked key version: %d\n")
3784 msg = _(b"ignoring unknown tracked key version: %d\n")
3789 hint = _(
3785 hint = _(
3790 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3786 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3791 )
3787 )
3792 if version != 1:
3788 if version != 1:
3793 ui.warn(msg % version, hint=hint)
3789 ui.warn(msg % version, hint=hint)
3794 else:
3790 else:
3795 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3791 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3796
3792
3797 return requirements
3793 return requirements
3798
3794
3799
3795
3800 def checkrequirementscompat(ui, requirements):
3796 def checkrequirementscompat(ui, requirements):
3801 """Checks compatibility of repository requirements enabled and disabled.
3797 """Checks compatibility of repository requirements enabled and disabled.
3802
3798
3803 Returns a set of requirements which needs to be dropped because dependend
3799 Returns a set of requirements which needs to be dropped because dependend
3804 requirements are not enabled. Also warns users about it"""
3800 requirements are not enabled. Also warns users about it"""
3805
3801
3806 dropped = set()
3802 dropped = set()
3807
3803
3808 if requirementsmod.STORE_REQUIREMENT not in requirements:
3804 if requirementsmod.STORE_REQUIREMENT not in requirements:
3809 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3805 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3810 ui.warn(
3806 ui.warn(
3811 _(
3807 _(
3812 b'ignoring enabled \'format.bookmarks-in-store\' config '
3808 b'ignoring enabled \'format.bookmarks-in-store\' config '
3813 b'beacuse it is incompatible with disabled '
3809 b'beacuse it is incompatible with disabled '
3814 b'\'format.usestore\' config\n'
3810 b'\'format.usestore\' config\n'
3815 )
3811 )
3816 )
3812 )
3817 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3813 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3818
3814
3819 if (
3815 if (
3820 requirementsmod.SHARED_REQUIREMENT in requirements
3816 requirementsmod.SHARED_REQUIREMENT in requirements
3821 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3817 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3822 ):
3818 ):
3823 raise error.Abort(
3819 raise error.Abort(
3824 _(
3820 _(
3825 b"cannot create shared repository as source was created"
3821 b"cannot create shared repository as source was created"
3826 b" with 'format.usestore' config disabled"
3822 b" with 'format.usestore' config disabled"
3827 )
3823 )
3828 )
3824 )
3829
3825
3830 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3826 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3831 if ui.hasconfig(b'format', b'use-share-safe'):
3827 if ui.hasconfig(b'format', b'use-share-safe'):
3832 msg = _(
3828 msg = _(
3833 b"ignoring enabled 'format.use-share-safe' config because "
3829 b"ignoring enabled 'format.use-share-safe' config because "
3834 b"it is incompatible with disabled 'format.usestore'"
3830 b"it is incompatible with disabled 'format.usestore'"
3835 b" config\n"
3831 b" config\n"
3836 )
3832 )
3837 ui.warn(msg)
3833 ui.warn(msg)
3838 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3834 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3839
3835
3840 return dropped
3836 return dropped
3841
3837
3842
3838
3843 def filterknowncreateopts(ui, createopts):
3839 def filterknowncreateopts(ui, createopts):
3844 """Filters a dict of repo creation options against options that are known.
3840 """Filters a dict of repo creation options against options that are known.
3845
3841
3846 Receives a dict of repo creation options and returns a dict of those
3842 Receives a dict of repo creation options and returns a dict of those
3847 options that we don't know how to handle.
3843 options that we don't know how to handle.
3848
3844
3849 This function is called as part of repository creation. If the
3845 This function is called as part of repository creation. If the
3850 returned dict contains any items, repository creation will not
3846 returned dict contains any items, repository creation will not
3851 be allowed, as it means there was a request to create a repository
3847 be allowed, as it means there was a request to create a repository
3852 with options not recognized by loaded code.
3848 with options not recognized by loaded code.
3853
3849
3854 Extensions can wrap this function to filter out creation options
3850 Extensions can wrap this function to filter out creation options
3855 they know how to handle.
3851 they know how to handle.
3856 """
3852 """
3857 known = {
3853 known = {
3858 b'backend',
3854 b'backend',
3859 b'lfs',
3855 b'lfs',
3860 b'narrowfiles',
3856 b'narrowfiles',
3861 b'sharedrepo',
3857 b'sharedrepo',
3862 b'sharedrelative',
3858 b'sharedrelative',
3863 b'shareditems',
3859 b'shareditems',
3864 b'shallowfilestore',
3860 b'shallowfilestore',
3865 }
3861 }
3866
3862
3867 return {k: v for k, v in createopts.items() if k not in known}
3863 return {k: v for k, v in createopts.items() if k not in known}
3868
3864
3869
3865
3870 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3866 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3871 """Create a new repository in a vfs.
3867 """Create a new repository in a vfs.
3872
3868
3873 ``path`` path to the new repo's working directory.
3869 ``path`` path to the new repo's working directory.
3874 ``createopts`` options for the new repository.
3870 ``createopts`` options for the new repository.
3875 ``requirement`` predefined set of requirements.
3871 ``requirement`` predefined set of requirements.
3876 (incompatible with ``createopts``)
3872 (incompatible with ``createopts``)
3877
3873
3878 The following keys for ``createopts`` are recognized:
3874 The following keys for ``createopts`` are recognized:
3879
3875
3880 backend
3876 backend
3881 The storage backend to use.
3877 The storage backend to use.
3882 lfs
3878 lfs
3883 Repository will be created with ``lfs`` requirement. The lfs extension
3879 Repository will be created with ``lfs`` requirement. The lfs extension
3884 will automatically be loaded when the repository is accessed.
3880 will automatically be loaded when the repository is accessed.
3885 narrowfiles
3881 narrowfiles
3886 Set up repository to support narrow file storage.
3882 Set up repository to support narrow file storage.
3887 sharedrepo
3883 sharedrepo
3888 Repository object from which storage should be shared.
3884 Repository object from which storage should be shared.
3889 sharedrelative
3885 sharedrelative
3890 Boolean indicating if the path to the shared repo should be
3886 Boolean indicating if the path to the shared repo should be
3891 stored as relative. By default, the pointer to the "parent" repo
3887 stored as relative. By default, the pointer to the "parent" repo
3892 is stored as an absolute path.
3888 is stored as an absolute path.
3893 shareditems
3889 shareditems
3894 Set of items to share to the new repository (in addition to storage).
3890 Set of items to share to the new repository (in addition to storage).
3895 shallowfilestore
3891 shallowfilestore
3896 Indicates that storage for files should be shallow (not all ancestor
3892 Indicates that storage for files should be shallow (not all ancestor
3897 revisions are known).
3893 revisions are known).
3898 """
3894 """
3899
3895
3900 if requirements is not None:
3896 if requirements is not None:
3901 if createopts is not None:
3897 if createopts is not None:
3902 msg = b'cannot specify both createopts and requirements'
3898 msg = b'cannot specify both createopts and requirements'
3903 raise error.ProgrammingError(msg)
3899 raise error.ProgrammingError(msg)
3904 createopts = {}
3900 createopts = {}
3905 else:
3901 else:
3906 createopts = defaultcreateopts(ui, createopts=createopts)
3902 createopts = defaultcreateopts(ui, createopts=createopts)
3907
3903
3908 unknownopts = filterknowncreateopts(ui, createopts)
3904 unknownopts = filterknowncreateopts(ui, createopts)
3909
3905
3910 if not isinstance(unknownopts, dict):
3906 if not isinstance(unknownopts, dict):
3911 raise error.ProgrammingError(
3907 raise error.ProgrammingError(
3912 b'filterknowncreateopts() did not return a dict'
3908 b'filterknowncreateopts() did not return a dict'
3913 )
3909 )
3914
3910
3915 if unknownopts:
3911 if unknownopts:
3916 raise error.Abort(
3912 raise error.Abort(
3917 _(
3913 _(
3918 b'unable to create repository because of unknown '
3914 b'unable to create repository because of unknown '
3919 b'creation option: %s'
3915 b'creation option: %s'
3920 )
3916 )
3921 % b', '.join(sorted(unknownopts)),
3917 % b', '.join(sorted(unknownopts)),
3922 hint=_(b'is a required extension not loaded?'),
3918 hint=_(b'is a required extension not loaded?'),
3923 )
3919 )
3924
3920
3925 requirements = newreporequirements(ui, createopts=createopts)
3921 requirements = newreporequirements(ui, createopts=createopts)
3926 requirements -= checkrequirementscompat(ui, requirements)
3922 requirements -= checkrequirementscompat(ui, requirements)
3927
3923
3928 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3924 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3929
3925
3930 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3926 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3931 if hgvfs.exists():
3927 if hgvfs.exists():
3932 raise error.RepoError(_(b'repository %s already exists') % path)
3928 raise error.RepoError(_(b'repository %s already exists') % path)
3933
3929
3934 if b'sharedrepo' in createopts:
3930 if b'sharedrepo' in createopts:
3935 sharedpath = createopts[b'sharedrepo'].sharedpath
3931 sharedpath = createopts[b'sharedrepo'].sharedpath
3936
3932
3937 if createopts.get(b'sharedrelative'):
3933 if createopts.get(b'sharedrelative'):
3938 try:
3934 try:
3939 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3935 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3940 sharedpath = util.pconvert(sharedpath)
3936 sharedpath = util.pconvert(sharedpath)
3941 except (IOError, ValueError) as e:
3937 except (IOError, ValueError) as e:
3942 # ValueError is raised on Windows if the drive letters differ
3938 # ValueError is raised on Windows if the drive letters differ
3943 # on each path.
3939 # on each path.
3944 raise error.Abort(
3940 raise error.Abort(
3945 _(b'cannot calculate relative path'),
3941 _(b'cannot calculate relative path'),
3946 hint=stringutil.forcebytestr(e),
3942 hint=stringutil.forcebytestr(e),
3947 )
3943 )
3948
3944
3949 if not wdirvfs.exists():
3945 if not wdirvfs.exists():
3950 wdirvfs.makedirs()
3946 wdirvfs.makedirs()
3951
3947
3952 hgvfs.makedir(notindexed=True)
3948 hgvfs.makedir(notindexed=True)
3953 if b'sharedrepo' not in createopts:
3949 if b'sharedrepo' not in createopts:
3954 hgvfs.mkdir(b'cache')
3950 hgvfs.mkdir(b'cache')
3955 hgvfs.mkdir(b'wcache')
3951 hgvfs.mkdir(b'wcache')
3956
3952
3957 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3953 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3958 if has_store and b'sharedrepo' not in createopts:
3954 if has_store and b'sharedrepo' not in createopts:
3959 hgvfs.mkdir(b'store')
3955 hgvfs.mkdir(b'store')
3960
3956
3961 # We create an invalid changelog outside the store so very old
3957 # We create an invalid changelog outside the store so very old
3962 # Mercurial versions (which didn't know about the requirements
3958 # Mercurial versions (which didn't know about the requirements
3963 # file) encounter an error on reading the changelog. This
3959 # file) encounter an error on reading the changelog. This
3964 # effectively locks out old clients and prevents them from
3960 # effectively locks out old clients and prevents them from
3965 # mucking with a repo in an unknown format.
3961 # mucking with a repo in an unknown format.
3966 #
3962 #
3967 # The revlog header has version 65535, which won't be recognized by
3963 # The revlog header has version 65535, which won't be recognized by
3968 # such old clients.
3964 # such old clients.
3969 hgvfs.append(
3965 hgvfs.append(
3970 b'00changelog.i',
3966 b'00changelog.i',
3971 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3967 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3972 b'layout',
3968 b'layout',
3973 )
3969 )
3974
3970
3975 # Filter the requirements into working copy and store ones
3971 # Filter the requirements into working copy and store ones
3976 wcreq, storereq = scmutil.filterrequirements(requirements)
3972 wcreq, storereq = scmutil.filterrequirements(requirements)
3977 # write working copy ones
3973 # write working copy ones
3978 scmutil.writerequires(hgvfs, wcreq)
3974 scmutil.writerequires(hgvfs, wcreq)
3979 # If there are store requirements and the current repository
3975 # If there are store requirements and the current repository
3980 # is not a shared one, write stored requirements
3976 # is not a shared one, write stored requirements
3981 # For new shared repository, we don't need to write the store
3977 # For new shared repository, we don't need to write the store
3982 # requirements as they are already present in store requires
3978 # requirements as they are already present in store requires
3983 if storereq and b'sharedrepo' not in createopts:
3979 if storereq and b'sharedrepo' not in createopts:
3984 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3980 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3985 scmutil.writerequires(storevfs, storereq)
3981 scmutil.writerequires(storevfs, storereq)
3986
3982
3987 # Write out file telling readers where to find the shared store.
3983 # Write out file telling readers where to find the shared store.
3988 if b'sharedrepo' in createopts:
3984 if b'sharedrepo' in createopts:
3989 hgvfs.write(b'sharedpath', sharedpath)
3985 hgvfs.write(b'sharedpath', sharedpath)
3990
3986
3991 if createopts.get(b'shareditems'):
3987 if createopts.get(b'shareditems'):
3992 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3988 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3993 hgvfs.write(b'shared', shared)
3989 hgvfs.write(b'shared', shared)
3994
3990
3995
3991
3996 def poisonrepository(repo):
3992 def poisonrepository(repo):
3997 """Poison a repository instance so it can no longer be used."""
3993 """Poison a repository instance so it can no longer be used."""
3998 # Perform any cleanup on the instance.
3994 # Perform any cleanup on the instance.
3999 repo.close()
3995 repo.close()
4000
3996
4001 # Our strategy is to replace the type of the object with one that
3997 # Our strategy is to replace the type of the object with one that
4002 # has all attribute lookups result in error.
3998 # has all attribute lookups result in error.
4003 #
3999 #
4004 # But we have to allow the close() method because some constructors
4000 # But we have to allow the close() method because some constructors
4005 # of repos call close() on repo references.
4001 # of repos call close() on repo references.
4006 class poisonedrepository:
4002 class poisonedrepository:
4007 def __getattribute__(self, item):
4003 def __getattribute__(self, item):
4008 if item == 'close':
4004 if item == 'close':
4009 return object.__getattribute__(self, item)
4005 return object.__getattribute__(self, item)
4010
4006
4011 raise error.ProgrammingError(
4007 raise error.ProgrammingError(
4012 b'repo instances should not be used after unshare'
4008 b'repo instances should not be used after unshare'
4013 )
4009 )
4014
4010
4015 def close(self):
4011 def close(self):
4016 pass
4012 pass
4017
4013
4018 # We may have a repoview, which intercepts __setattr__. So be sure
4014 # We may have a repoview, which intercepts __setattr__. So be sure
4019 # we operate at the lowest level possible.
4015 # we operate at the lowest level possible.
4020 object.__setattr__(repo, '__class__', poisonedrepository)
4016 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,533 +1,531 b''
1 #require repofncache
1 #require repofncache
2
2
3 An extension which will set fncache chunksize to 1 byte to make sure that logic
3 An extension which will set fncache chunksize to 1 byte to make sure that logic
4 does not break
4 does not break
5
5
6 $ cat > chunksize.py <<EOF
6 $ cat > chunksize.py <<EOF
7 > from mercurial import store
7 > from mercurial import store
8 > store.fncache_chunksize = 1
8 > store.fncache_chunksize = 1
9 > EOF
9 > EOF
10
10
11 $ cat >> $HGRCPATH <<EOF
11 $ cat >> $HGRCPATH <<EOF
12 > [extensions]
12 > [extensions]
13 > chunksize = $TESTTMP/chunksize.py
13 > chunksize = $TESTTMP/chunksize.py
14 > EOF
14 > EOF
15
15
16 Init repo1:
16 Init repo1:
17
17
18 $ hg init repo1
18 $ hg init repo1
19 $ cd repo1
19 $ cd repo1
20 $ echo "some text" > a
20 $ echo "some text" > a
21 $ hg add
21 $ hg add
22 adding a
22 adding a
23 $ hg ci -m first
23 $ hg ci -m first
24 $ cat .hg/store/fncache | sort
24 $ cat .hg/store/fncache | sort
25 data/a.i
25 data/a.i
26
26
27 Testing a.i/b:
27 Testing a.i/b:
28
28
29 $ mkdir a.i
29 $ mkdir a.i
30 $ echo "some other text" > a.i/b
30 $ echo "some other text" > a.i/b
31 $ hg add
31 $ hg add
32 adding a.i/b
32 adding a.i/b
33 $ hg ci -m second
33 $ hg ci -m second
34 $ cat .hg/store/fncache | sort
34 $ cat .hg/store/fncache | sort
35 data/a.i
35 data/a.i
36 data/a.i.hg/b.i
36 data/a.i.hg/b.i
37
37
38 Testing a.i.hg/c:
38 Testing a.i.hg/c:
39
39
40 $ mkdir a.i.hg
40 $ mkdir a.i.hg
41 $ echo "yet another text" > a.i.hg/c
41 $ echo "yet another text" > a.i.hg/c
42 $ hg add
42 $ hg add
43 adding a.i.hg/c
43 adding a.i.hg/c
44 $ hg ci -m third
44 $ hg ci -m third
45 $ cat .hg/store/fncache | sort
45 $ cat .hg/store/fncache | sort
46 data/a.i
46 data/a.i
47 data/a.i.hg.hg/c.i
47 data/a.i.hg.hg/c.i
48 data/a.i.hg/b.i
48 data/a.i.hg/b.i
49
49
50 Testing verify:
50 Testing verify:
51
51
52 $ hg verify -q
52 $ hg verify -q
53
53
54 $ rm .hg/store/fncache
54 $ rm .hg/store/fncache
55
55
56 $ hg verify
56 $ hg verify
57 checking changesets
57 checking changesets
58 checking manifests
58 checking manifests
59 crosschecking files in changesets and manifests
59 crosschecking files in changesets and manifests
60 checking files
60 checking files
61 warning: revlog 'data/a.i' not in fncache!
61 warning: revlog 'data/a.i' not in fncache!
62 warning: revlog 'data/a.i.hg/c.i' not in fncache!
62 warning: revlog 'data/a.i.hg/c.i' not in fncache!
63 warning: revlog 'data/a.i/b.i' not in fncache!
63 warning: revlog 'data/a.i/b.i' not in fncache!
64 checking dirstate
64 checking dirstate
65 checked 3 changesets with 3 changes to 3 files
65 checked 3 changesets with 3 changes to 3 files
66 3 warnings encountered!
66 3 warnings encountered!
67 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
67 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
68
68
69 Follow the hint to make sure it works
69 Follow the hint to make sure it works
70
70
71 $ hg debugrebuildfncache
71 $ hg debugrebuildfncache
72 adding data/a.i
72 adding data/a.i
73 adding data/a.i.hg/c.i
73 adding data/a.i.hg/c.i
74 adding data/a.i/b.i
74 adding data/a.i/b.i
75 3 items added, 0 removed from fncache
75 3 items added, 0 removed from fncache
76
76
77 $ hg verify -q
77 $ hg verify -q
78
78
79 $ cd ..
79 $ cd ..
80
80
81 Non store repo:
81 Non store repo:
82
82
83 $ hg --config format.usestore=False init foo
83 $ hg --config format.usestore=False init foo
84 $ cd foo
84 $ cd foo
85 $ mkdir tst.d
85 $ mkdir tst.d
86 $ echo foo > tst.d/foo
86 $ echo foo > tst.d/foo
87 $ hg ci -Amfoo
87 $ hg ci -Amfoo
88 adding tst.d/foo
88 adding tst.d/foo
89 $ find .hg | sort
89 $ find .hg | sort
90 .hg
90 .hg
91 .hg/00changelog.i
91 .hg/00changelog.i
92 .hg/00manifest.i
92 .hg/00manifest.i
93 .hg/cache
93 .hg/cache
94 .hg/cache/branch2-served
94 .hg/cache/branch2-served
95 .hg/cache/rbc-names-v1
95 .hg/cache/rbc-names-v1
96 .hg/cache/rbc-revs-v1
96 .hg/cache/rbc-revs-v1
97 .hg/data
97 .hg/data
98 .hg/data/tst.d.hg
98 .hg/data/tst.d.hg
99 .hg/data/tst.d.hg/foo.i
99 .hg/data/tst.d.hg/foo.i
100 .hg/dirstate
100 .hg/dirstate
101 .hg/fsmonitor.state (fsmonitor !)
101 .hg/fsmonitor.state (fsmonitor !)
102 .hg/last-message.txt
102 .hg/last-message.txt
103 .hg/phaseroots
103 .hg/phaseroots
104 .hg/requires
104 .hg/requires
105 .hg/undo
105 .hg/undo
106 .hg/undo.backupfiles
106 .hg/undo.backupfiles
107 .hg/undo.bookmarks
107 .hg/undo.bookmarks
108 .hg/undo.branch
108 .hg/undo.branch
109 .hg/undo.desc
109 .hg/undo.desc
110 .hg/undo.phaseroots
111 .hg/wcache
110 .hg/wcache
112 .hg/wcache/checkisexec (execbit !)
111 .hg/wcache/checkisexec (execbit !)
113 .hg/wcache/checklink (symlink !)
112 .hg/wcache/checklink (symlink !)
114 .hg/wcache/checklink-target (symlink !)
113 .hg/wcache/checklink-target (symlink !)
115 .hg/wcache/manifestfulltextcache (reporevlogstore !)
114 .hg/wcache/manifestfulltextcache (reporevlogstore !)
116 $ cd ..
115 $ cd ..
117
116
118 Non fncache repo:
117 Non fncache repo:
119
118
120 $ hg --config format.usefncache=False init bar
119 $ hg --config format.usefncache=False init bar
121 $ cd bar
120 $ cd bar
122 $ mkdir tst.d
121 $ mkdir tst.d
123 $ echo foo > tst.d/Foo
122 $ echo foo > tst.d/Foo
124 $ hg ci -Amfoo
123 $ hg ci -Amfoo
125 adding tst.d/Foo
124 adding tst.d/Foo
126 $ find .hg | sort
125 $ find .hg | sort
127 .hg
126 .hg
128 .hg/00changelog.i
127 .hg/00changelog.i
129 .hg/cache
128 .hg/cache
130 .hg/cache/branch2-served
129 .hg/cache/branch2-served
131 .hg/cache/rbc-names-v1
130 .hg/cache/rbc-names-v1
132 .hg/cache/rbc-revs-v1
131 .hg/cache/rbc-revs-v1
133 .hg/dirstate
132 .hg/dirstate
134 .hg/fsmonitor.state (fsmonitor !)
133 .hg/fsmonitor.state (fsmonitor !)
135 .hg/last-message.txt
134 .hg/last-message.txt
136 .hg/requires
135 .hg/requires
137 .hg/store
136 .hg/store
138 .hg/store/00changelog.i
137 .hg/store/00changelog.i
139 .hg/store/00manifest.i
138 .hg/store/00manifest.i
140 .hg/store/data
139 .hg/store/data
141 .hg/store/data/tst.d.hg
140 .hg/store/data/tst.d.hg
142 .hg/store/data/tst.d.hg/_foo.i
141 .hg/store/data/tst.d.hg/_foo.i
143 .hg/store/phaseroots
142 .hg/store/phaseroots
144 .hg/store/requires
143 .hg/store/requires
145 .hg/store/undo
144 .hg/store/undo
146 .hg/store/undo.backupfiles
145 .hg/store/undo.backupfiles
147 .hg/store/undo.phaseroots
148 .hg/undo.bookmarks
146 .hg/undo.bookmarks
149 .hg/undo.branch
147 .hg/undo.branch
150 .hg/undo.desc
148 .hg/undo.desc
151 .hg/wcache
149 .hg/wcache
152 .hg/wcache/checkisexec (execbit !)
150 .hg/wcache/checkisexec (execbit !)
153 .hg/wcache/checklink (symlink !)
151 .hg/wcache/checklink (symlink !)
154 .hg/wcache/checklink-target (symlink !)
152 .hg/wcache/checklink-target (symlink !)
155 .hg/wcache/manifestfulltextcache (reporevlogstore !)
153 .hg/wcache/manifestfulltextcache (reporevlogstore !)
156 $ cd ..
154 $ cd ..
157
155
158 Encoding of reserved / long paths in the store
156 Encoding of reserved / long paths in the store
159
157
160 $ hg init r2
158 $ hg init r2
161 $ cd r2
159 $ cd r2
162 $ cat <<EOF > .hg/hgrc
160 $ cat <<EOF > .hg/hgrc
163 > [ui]
161 > [ui]
164 > portablefilenames = ignore
162 > portablefilenames = ignore
165 > EOF
163 > EOF
166
164
167 $ hg import -q --bypass - <<EOF
165 $ hg import -q --bypass - <<EOF
168 > # HG changeset patch
166 > # HG changeset patch
169 > # User test
167 > # User test
170 > # Date 0 0
168 > # Date 0 0
171 > # Node ID 1c7a2f7cb77be1a0def34e4c7cabc562ad98fbd7
169 > # Node ID 1c7a2f7cb77be1a0def34e4c7cabc562ad98fbd7
172 > # Parent 0000000000000000000000000000000000000000
170 > # Parent 0000000000000000000000000000000000000000
173 > 1
171 > 1
174 >
172 >
175 > diff --git a/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
173 > diff --git a/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
176 > new file mode 100644
174 > new file mode 100644
177 > --- /dev/null
175 > --- /dev/null
178 > +++ b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
176 > +++ b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
179 > @@ -0,0 +1,1 @@
177 > @@ -0,0 +1,1 @@
180 > +foo
178 > +foo
181 > diff --git a/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
179 > diff --git a/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
182 > new file mode 100644
180 > new file mode 100644
183 > --- /dev/null
181 > --- /dev/null
184 > +++ b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
182 > +++ b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
185 > @@ -0,0 +1,1 @@
183 > @@ -0,0 +1,1 @@
186 > +foo
184 > +foo
187 > diff --git a/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
185 > diff --git a/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
188 > new file mode 100644
186 > new file mode 100644
189 > --- /dev/null
187 > --- /dev/null
190 > +++ b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
188 > +++ b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
191 > @@ -0,0 +1,1 @@
189 > @@ -0,0 +1,1 @@
192 > +foo
190 > +foo
193 > diff --git a/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
191 > diff --git a/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
194 > new file mode 100644
192 > new file mode 100644
195 > --- /dev/null
193 > --- /dev/null
196 > +++ b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
194 > +++ b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
197 > @@ -0,0 +1,1 @@
195 > @@ -0,0 +1,1 @@
198 > +foo
196 > +foo
199 > diff --git a/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
197 > diff --git a/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
200 > new file mode 100644
198 > new file mode 100644
201 > --- /dev/null
199 > --- /dev/null
202 > +++ b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
200 > +++ b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
203 > @@ -0,0 +1,1 @@
201 > @@ -0,0 +1,1 @@
204 > +foo
202 > +foo
205 > EOF
203 > EOF
206
204
207 $ find .hg/store -name *.i | sort
205 $ find .hg/store -name *.i | sort
208 .hg/store/00changelog.i
206 .hg/store/00changelog.i
209 .hg/store/00manifest.i
207 .hg/store/00manifest.i
210 .hg/store/data/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i
208 .hg/store/data/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i
211 .hg/store/dh/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxx168e07b38e65eff86ab579afaaa8e30bfbe0f35f.i
209 .hg/store/dh/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxx168e07b38e65eff86ab579afaaa8e30bfbe0f35f.i
212 .hg/store/dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i
210 .hg/store/dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i
213 .hg/store/dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i
211 .hg/store/dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i
214 .hg/store/dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilename0d8e1f4187c650e2f1fdca9fd90f786bc0976b6b.i
212 .hg/store/dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilename0d8e1f4187c650e2f1fdca9fd90f786bc0976b6b.i
215
213
216 $ cd ..
214 $ cd ..
217
215
218 Aborting lock does not prevent fncache writes
216 Aborting lock does not prevent fncache writes
219
217
220 $ cat > exceptionext.py <<EOF
218 $ cat > exceptionext.py <<EOF
221 > import os
219 > import os
222 > from mercurial import commands, error, extensions
220 > from mercurial import commands, error, extensions
223 >
221 >
224 > def lockexception(orig, vfs, lockname, wait, releasefn, *args, **kwargs):
222 > def lockexception(orig, vfs, lockname, wait, releasefn, *args, **kwargs):
225 > def releasewrap():
223 > def releasewrap():
226 > l.held = False # ensure __del__ is a noop
224 > l.held = False # ensure __del__ is a noop
227 > raise error.Abort(b"forced lock failure")
225 > raise error.Abort(b"forced lock failure")
228 > l = orig(vfs, lockname, wait, releasewrap, *args, **kwargs)
226 > l = orig(vfs, lockname, wait, releasewrap, *args, **kwargs)
229 > return l
227 > return l
230 >
228 >
231 > def reposetup(ui, repo):
229 > def reposetup(ui, repo):
232 > extensions.wrapfunction(repo, '_lock', lockexception)
230 > extensions.wrapfunction(repo, '_lock', lockexception)
233 >
231 >
234 > cmdtable = {}
232 > cmdtable = {}
235 >
233 >
236 > # wrap "commit" command to prevent wlock from being '__del__()'-ed
234 > # wrap "commit" command to prevent wlock from being '__del__()'-ed
237 > # at the end of dispatching (for intentional "forced lcok failure")
235 > # at the end of dispatching (for intentional "forced lcok failure")
238 > def commitwrap(orig, ui, repo, *pats, **opts):
236 > def commitwrap(orig, ui, repo, *pats, **opts):
239 > repo = repo.unfiltered() # to use replaced repo._lock certainly
237 > repo = repo.unfiltered() # to use replaced repo._lock certainly
240 > wlock = repo.wlock()
238 > wlock = repo.wlock()
241 > try:
239 > try:
242 > return orig(ui, repo, *pats, **opts)
240 > return orig(ui, repo, *pats, **opts)
243 > finally:
241 > finally:
244 > # multiple 'relase()' is needed for complete releasing wlock,
242 > # multiple 'relase()' is needed for complete releasing wlock,
245 > # because "forced" abort at last releasing store lock
243 > # because "forced" abort at last releasing store lock
246 > # prevents wlock from being released at same 'lockmod.release()'
244 > # prevents wlock from being released at same 'lockmod.release()'
247 > for i in range(wlock.held):
245 > for i in range(wlock.held):
248 > wlock.release()
246 > wlock.release()
249 >
247 >
250 > def extsetup(ui):
248 > def extsetup(ui):
251 > extensions.wrapcommand(commands.table, b"commit", commitwrap)
249 > extensions.wrapcommand(commands.table, b"commit", commitwrap)
252 > EOF
250 > EOF
253 $ extpath=`pwd`/exceptionext.py
251 $ extpath=`pwd`/exceptionext.py
254 $ hg init fncachetxn
252 $ hg init fncachetxn
255 $ cd fncachetxn
253 $ cd fncachetxn
256 $ printf "[extensions]\nexceptionext=$extpath\n" >> .hg/hgrc
254 $ printf "[extensions]\nexceptionext=$extpath\n" >> .hg/hgrc
257 $ touch y
255 $ touch y
258 $ hg ci -qAm y
256 $ hg ci -qAm y
259 abort: forced lock failure
257 abort: forced lock failure
260 [255]
258 [255]
261 $ cat .hg/store/fncache
259 $ cat .hg/store/fncache
262 data/y.i
260 data/y.i
263
261
264 Aborting transaction prevents fncache change
262 Aborting transaction prevents fncache change
265
263
266 $ cat > ../exceptionext.py <<EOF
264 $ cat > ../exceptionext.py <<EOF
267 > import os
265 > import os
268 > from mercurial import commands, error, extensions, localrepo
266 > from mercurial import commands, error, extensions, localrepo
269 >
267 >
270 > def wrapper(orig, self, *args, **kwargs):
268 > def wrapper(orig, self, *args, **kwargs):
271 > tr = orig(self, *args, **kwargs)
269 > tr = orig(self, *args, **kwargs)
272 > def fail(tr):
270 > def fail(tr):
273 > raise error.Abort(b"forced transaction failure")
271 > raise error.Abort(b"forced transaction failure")
274 > # zzz prefix to ensure it sorted after store.write
272 > # zzz prefix to ensure it sorted after store.write
275 > tr.addfinalize(b'zzz-forcefails', fail)
273 > tr.addfinalize(b'zzz-forcefails', fail)
276 > return tr
274 > return tr
277 >
275 >
278 > def uisetup(ui):
276 > def uisetup(ui):
279 > extensions.wrapfunction(
277 > extensions.wrapfunction(
280 > localrepo.localrepository, b'transaction', wrapper)
278 > localrepo.localrepository, b'transaction', wrapper)
281 >
279 >
282 > cmdtable = {}
280 > cmdtable = {}
283 >
281 >
284 > EOF
282 > EOF
285
283
286 Clean cached version
284 Clean cached version
287 $ rm -f "${extpath}c"
285 $ rm -f "${extpath}c"
288 $ rm -Rf "`dirname $extpath`/__pycache__"
286 $ rm -Rf "`dirname $extpath`/__pycache__"
289
287
290 $ touch z
288 $ touch z
291 $ hg ci -qAm z
289 $ hg ci -qAm z
292 transaction abort!
290 transaction abort!
293 rollback completed
291 rollback completed
294 abort: forced transaction failure
292 abort: forced transaction failure
295 [255]
293 [255]
296 $ cat .hg/store/fncache
294 $ cat .hg/store/fncache
297 data/y.i
295 data/y.i
298
296
299 Aborted transactions can be recovered later
297 Aborted transactions can be recovered later
300
298
301 $ cat > ../exceptionext.py <<EOF
299 $ cat > ../exceptionext.py <<EOF
302 > import os
300 > import os
303 > import signal
301 > import signal
304 > from mercurial import (
302 > from mercurial import (
305 > commands,
303 > commands,
306 > error,
304 > error,
307 > extensions,
305 > extensions,
308 > localrepo,
306 > localrepo,
309 > transaction,
307 > transaction,
310 > )
308 > )
311 >
309 >
312 > def trwrapper(orig, self, *args, **kwargs):
310 > def trwrapper(orig, self, *args, **kwargs):
313 > tr = orig(self, *args, **kwargs)
311 > tr = orig(self, *args, **kwargs)
314 > def fail(tr):
312 > def fail(tr):
315 > os.kill(os.getpid(), signal.SIGKILL)
313 > os.kill(os.getpid(), signal.SIGKILL)
316 > # zzz prefix to ensure it sorted after store.write
314 > # zzz prefix to ensure it sorted after store.write
317 > tr.addfinalize(b'zzz-forcefails', fail)
315 > tr.addfinalize(b'zzz-forcefails', fail)
318 > return tr
316 > return tr
319 >
317 >
320 > def uisetup(ui):
318 > def uisetup(ui):
321 > extensions.wrapfunction(localrepo.localrepository, 'transaction',
319 > extensions.wrapfunction(localrepo.localrepository, 'transaction',
322 > trwrapper)
320 > trwrapper)
323 >
321 >
324 > cmdtable = {}
322 > cmdtable = {}
325 >
323 >
326 > EOF
324 > EOF
327
325
328 Clean cached versions
326 Clean cached versions
329 $ rm -f "${extpath}c"
327 $ rm -f "${extpath}c"
330 $ rm -Rf "`dirname $extpath`/__pycache__"
328 $ rm -Rf "`dirname $extpath`/__pycache__"
331
329
332 $ hg up -q 1
330 $ hg up -q 1
333 $ touch z
331 $ touch z
334 # Cannot rely on the return code value as chg use a different one.
332 # Cannot rely on the return code value as chg use a different one.
335 # So we use a `|| echo` trick
333 # So we use a `|| echo` trick
336 # XXX-CHG fixing chg behavior would be nice here.
334 # XXX-CHG fixing chg behavior would be nice here.
337 $ hg ci -qAm z || echo "He's Dead, Jim." 2>/dev/null
335 $ hg ci -qAm z || echo "He's Dead, Jim." 2>/dev/null
338 *Killed* (glob) (?)
336 *Killed* (glob) (?)
339 He's Dead, Jim.
337 He's Dead, Jim.
340 $ cat .hg/store/fncache | sort
338 $ cat .hg/store/fncache | sort
341 data/y.i
339 data/y.i
342 data/z.i
340 data/z.i
343 $ hg recover --verify
341 $ hg recover --verify
344 rolling back interrupted transaction
342 rolling back interrupted transaction
345 checking changesets
343 checking changesets
346 checking manifests
344 checking manifests
347 crosschecking files in changesets and manifests
345 crosschecking files in changesets and manifests
348 checking files
346 checking files
349 checking dirstate
347 checking dirstate
350 checked 1 changesets with 1 changes to 1 files
348 checked 1 changesets with 1 changes to 1 files
351 $ cat .hg/store/fncache
349 $ cat .hg/store/fncache
352 data/y.i
350 data/y.i
353
351
354 $ cd ..
352 $ cd ..
355
353
356 debugrebuildfncache does nothing unless repo has fncache requirement
354 debugrebuildfncache does nothing unless repo has fncache requirement
357
355
358 $ hg --config format.usefncache=false init nofncache
356 $ hg --config format.usefncache=false init nofncache
359 $ cd nofncache
357 $ cd nofncache
360 $ hg debugrebuildfncache
358 $ hg debugrebuildfncache
361 (not rebuilding fncache because repository does not support fncache)
359 (not rebuilding fncache because repository does not support fncache)
362
360
363 $ cd ..
361 $ cd ..
364
362
365 debugrebuildfncache works on empty repository
363 debugrebuildfncache works on empty repository
366
364
367 $ hg init empty
365 $ hg init empty
368 $ cd empty
366 $ cd empty
369 $ hg debugrebuildfncache
367 $ hg debugrebuildfncache
370 fncache already up to date
368 fncache already up to date
371 $ cd ..
369 $ cd ..
372
370
373 debugrebuildfncache on an up to date repository no-ops
371 debugrebuildfncache on an up to date repository no-ops
374
372
375 $ hg init repo
373 $ hg init repo
376 $ cd repo
374 $ cd repo
377 $ echo initial > foo
375 $ echo initial > foo
378 $ echo initial > .bar
376 $ echo initial > .bar
379 $ hg commit -A -m initial
377 $ hg commit -A -m initial
380 adding .bar
378 adding .bar
381 adding foo
379 adding foo
382
380
383 $ cat .hg/store/fncache | sort
381 $ cat .hg/store/fncache | sort
384 data/.bar.i
382 data/.bar.i
385 data/foo.i
383 data/foo.i
386
384
387 $ hg debugrebuildfncache
385 $ hg debugrebuildfncache
388 fncache already up to date
386 fncache already up to date
389
387
390 debugrebuildfncache restores deleted fncache file
388 debugrebuildfncache restores deleted fncache file
391
389
392 $ rm -f .hg/store/fncache
390 $ rm -f .hg/store/fncache
393 $ hg debugrebuildfncache
391 $ hg debugrebuildfncache
394 adding data/.bar.i
392 adding data/.bar.i
395 adding data/foo.i
393 adding data/foo.i
396 2 items added, 0 removed from fncache
394 2 items added, 0 removed from fncache
397
395
398 $ cat .hg/store/fncache | sort
396 $ cat .hg/store/fncache | sort
399 data/.bar.i
397 data/.bar.i
400 data/foo.i
398 data/foo.i
401
399
402 Rebuild after rebuild should no-op
400 Rebuild after rebuild should no-op
403
401
404 $ hg debugrebuildfncache
402 $ hg debugrebuildfncache
405 fncache already up to date
403 fncache already up to date
406
404
407 A single missing file should get restored, an extra file should be removed
405 A single missing file should get restored, an extra file should be removed
408
406
409 $ cat > .hg/store/fncache << EOF
407 $ cat > .hg/store/fncache << EOF
410 > data/foo.i
408 > data/foo.i
411 > data/bad-entry.i
409 > data/bad-entry.i
412 > EOF
410 > EOF
413
411
414 $ hg debugrebuildfncache
412 $ hg debugrebuildfncache
415 removing data/bad-entry.i
413 removing data/bad-entry.i
416 adding data/.bar.i
414 adding data/.bar.i
417 1 items added, 1 removed from fncache
415 1 items added, 1 removed from fncache
418
416
419 $ cat .hg/store/fncache | sort
417 $ cat .hg/store/fncache | sort
420 data/.bar.i
418 data/.bar.i
421 data/foo.i
419 data/foo.i
422
420
423 debugrebuildfncache recovers from truncated line in fncache
421 debugrebuildfncache recovers from truncated line in fncache
424
422
425 $ printf a > .hg/store/fncache
423 $ printf a > .hg/store/fncache
426 $ hg debugrebuildfncache
424 $ hg debugrebuildfncache
427 fncache does not ends with a newline
425 fncache does not ends with a newline
428 adding data/.bar.i
426 adding data/.bar.i
429 adding data/foo.i
427 adding data/foo.i
430 2 items added, 0 removed from fncache
428 2 items added, 0 removed from fncache
431
429
432 $ cat .hg/store/fncache | sort
430 $ cat .hg/store/fncache | sort
433 data/.bar.i
431 data/.bar.i
434 data/foo.i
432 data/foo.i
435
433
436 $ cd ..
434 $ cd ..
437
435
438 Try a simple variation without dotencode to ensure fncache is ignorant of encoding
436 Try a simple variation without dotencode to ensure fncache is ignorant of encoding
439
437
440 $ hg --config format.dotencode=false init nodotencode
438 $ hg --config format.dotencode=false init nodotencode
441 $ cd nodotencode
439 $ cd nodotencode
442 $ echo initial > foo
440 $ echo initial > foo
443 $ echo initial > .bar
441 $ echo initial > .bar
444 $ hg commit -A -m initial
442 $ hg commit -A -m initial
445 adding .bar
443 adding .bar
446 adding foo
444 adding foo
447
445
448 $ cat .hg/store/fncache | sort
446 $ cat .hg/store/fncache | sort
449 data/.bar.i
447 data/.bar.i
450 data/foo.i
448 data/foo.i
451
449
452 $ rm .hg/store/fncache
450 $ rm .hg/store/fncache
453 $ hg debugrebuildfncache
451 $ hg debugrebuildfncache
454 adding data/.bar.i
452 adding data/.bar.i
455 adding data/foo.i
453 adding data/foo.i
456 2 items added, 0 removed from fncache
454 2 items added, 0 removed from fncache
457
455
458 $ cat .hg/store/fncache | sort
456 $ cat .hg/store/fncache | sort
459 data/.bar.i
457 data/.bar.i
460 data/foo.i
458 data/foo.i
461
459
462 $ cd ..
460 $ cd ..
463
461
464 In repositories that have accumulated a large number of files over time, the
462 In repositories that have accumulated a large number of files over time, the
465 fncache file is going to be large. If we possibly can avoid loading it, so much the better.
463 fncache file is going to be large. If we possibly can avoid loading it, so much the better.
466 The cache should not loaded when committing changes to existing files, or when unbundling
464 The cache should not loaded when committing changes to existing files, or when unbundling
467 changesets that only contain changes to existing files:
465 changesets that only contain changes to existing files:
468
466
469 $ cat > fncacheloadwarn.py << EOF
467 $ cat > fncacheloadwarn.py << EOF
470 > from mercurial import extensions, localrepo
468 > from mercurial import extensions, localrepo
471 >
469 >
472 > def extsetup(ui):
470 > def extsetup(ui):
473 > def wrapstore(orig, requirements, *args):
471 > def wrapstore(orig, requirements, *args):
474 > store = orig(requirements, *args)
472 > store = orig(requirements, *args)
475 > if b'store' in requirements and b'fncache' in requirements:
473 > if b'store' in requirements and b'fncache' in requirements:
476 > instrumentfncachestore(store, ui)
474 > instrumentfncachestore(store, ui)
477 > return store
475 > return store
478 > extensions.wrapfunction(localrepo, 'makestore', wrapstore)
476 > extensions.wrapfunction(localrepo, 'makestore', wrapstore)
479 >
477 >
480 > def instrumentfncachestore(fncachestore, ui):
478 > def instrumentfncachestore(fncachestore, ui):
481 > class instrumentedfncache(type(fncachestore.fncache)):
479 > class instrumentedfncache(type(fncachestore.fncache)):
482 > def _load(self):
480 > def _load(self):
483 > ui.warn(b'fncache load triggered!\n')
481 > ui.warn(b'fncache load triggered!\n')
484 > super(instrumentedfncache, self)._load()
482 > super(instrumentedfncache, self)._load()
485 > fncachestore.fncache.__class__ = instrumentedfncache
483 > fncachestore.fncache.__class__ = instrumentedfncache
486 > EOF
484 > EOF
487
485
488 $ fncachextpath=`pwd`/fncacheloadwarn.py
486 $ fncachextpath=`pwd`/fncacheloadwarn.py
489 $ hg init nofncacheload
487 $ hg init nofncacheload
490 $ cd nofncacheload
488 $ cd nofncacheload
491 $ printf "[extensions]\nfncacheloadwarn=$fncachextpath\n" >> .hg/hgrc
489 $ printf "[extensions]\nfncacheloadwarn=$fncachextpath\n" >> .hg/hgrc
492
490
493 A new file should trigger a load, as we'd want to update the fncache set in that case:
491 A new file should trigger a load, as we'd want to update the fncache set in that case:
494
492
495 $ touch foo
493 $ touch foo
496 $ hg ci -qAm foo
494 $ hg ci -qAm foo
497 fncache load triggered!
495 fncache load triggered!
498
496
499 But modifying that file should not:
497 But modifying that file should not:
500
498
501 $ echo bar >> foo
499 $ echo bar >> foo
502 $ hg ci -qm foo
500 $ hg ci -qm foo
503
501
504 If a transaction has been aborted, the zero-size truncated index file will
502 If a transaction has been aborted, the zero-size truncated index file will
505 not prevent the fncache from being loaded; rather than actually abort
503 not prevent the fncache from being loaded; rather than actually abort
506 a transaction, we simulate the situation by creating a zero-size index file:
504 a transaction, we simulate the situation by creating a zero-size index file:
507
505
508 $ touch .hg/store/data/bar.i
506 $ touch .hg/store/data/bar.i
509 $ touch bar
507 $ touch bar
510 $ hg ci -qAm bar
508 $ hg ci -qAm bar
511 fncache load triggered!
509 fncache load triggered!
512
510
513 Unbundling should follow the same rules; existing files should not cause a load:
511 Unbundling should follow the same rules; existing files should not cause a load:
514
512
515 (loading during the clone is expected)
513 (loading during the clone is expected)
516 $ hg clone -q . tobundle
514 $ hg clone -q . tobundle
517 fncache load triggered!
515 fncache load triggered!
518 fncache load triggered!
516 fncache load triggered!
519
517
520 $ echo 'new line' > tobundle/bar
518 $ echo 'new line' > tobundle/bar
521 $ hg -R tobundle ci -qm bar
519 $ hg -R tobundle ci -qm bar
522 $ hg -R tobundle bundle -q barupdated.hg
520 $ hg -R tobundle bundle -q barupdated.hg
523 $ hg unbundle -q barupdated.hg
521 $ hg unbundle -q barupdated.hg
524
522
525 but adding new files should:
523 but adding new files should:
526
524
527 $ touch tobundle/newfile
525 $ touch tobundle/newfile
528 $ hg -R tobundle ci -qAm newfile
526 $ hg -R tobundle ci -qAm newfile
529 $ hg -R tobundle bundle -q newfile.hg
527 $ hg -R tobundle bundle -q newfile.hg
530 $ hg unbundle -q newfile.hg
528 $ hg unbundle -q newfile.hg
531 fncache load triggered!
529 fncache load triggered!
532
530
533 $ cd ..
531 $ cd ..
@@ -1,439 +1,433 b''
1 #require hardlink reporevlogstore
1 #require hardlink reporevlogstore
2
2
3 $ cat > nlinks.py <<EOF
3 $ cat > nlinks.py <<EOF
4 > import sys
4 > import sys
5 > from mercurial import pycompat, util
5 > from mercurial import pycompat, util
6 > for f in sorted(sys.stdin.readlines()):
6 > for f in sorted(sys.stdin.readlines()):
7 > f = f[:-1]
7 > f = f[:-1]
8 > print(util.nlinks(pycompat.fsencode(f)), f)
8 > print(util.nlinks(pycompat.fsencode(f)), f)
9 > EOF
9 > EOF
10
10
11 $ nlinksdir()
11 $ nlinksdir()
12 > {
12 > {
13 > find "$@" -type f | "$PYTHON" $TESTTMP/nlinks.py
13 > find "$@" -type f | "$PYTHON" $TESTTMP/nlinks.py
14 > }
14 > }
15
15
16 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
16 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
17
17
18 $ cat > linkcp.py <<EOF
18 $ cat > linkcp.py <<EOF
19 > import sys
19 > import sys
20 > from mercurial import pycompat, util
20 > from mercurial import pycompat, util
21 > util.copyfiles(pycompat.fsencode(sys.argv[1]),
21 > util.copyfiles(pycompat.fsencode(sys.argv[1]),
22 > pycompat.fsencode(sys.argv[2]), hardlink=True)
22 > pycompat.fsencode(sys.argv[2]), hardlink=True)
23 > EOF
23 > EOF
24
24
25 $ linkcp()
25 $ linkcp()
26 > {
26 > {
27 > "$PYTHON" $TESTTMP/linkcp.py $1 $2
27 > "$PYTHON" $TESTTMP/linkcp.py $1 $2
28 > }
28 > }
29
29
30 Prepare repo r1:
30 Prepare repo r1:
31
31
32 $ hg init r1
32 $ hg init r1
33 $ cd r1
33 $ cd r1
34
34
35 $ echo c1 > f1
35 $ echo c1 > f1
36 $ hg add f1
36 $ hg add f1
37 $ hg ci -m0
37 $ hg ci -m0
38
38
39 $ mkdir d1
39 $ mkdir d1
40 $ cd d1
40 $ cd d1
41 $ echo c2 > f2
41 $ echo c2 > f2
42 $ hg add f2
42 $ hg add f2
43 $ hg ci -m1
43 $ hg ci -m1
44 $ cd ../..
44 $ cd ../..
45
45
46 $ nlinksdir r1/.hg/store
46 $ nlinksdir r1/.hg/store
47 1 r1/.hg/store/00changelog.i
47 1 r1/.hg/store/00changelog.i
48 1 r1/.hg/store/00manifest.i
48 1 r1/.hg/store/00manifest.i
49 1 r1/.hg/store/data/d1/f2.i
49 1 r1/.hg/store/data/d1/f2.i
50 1 r1/.hg/store/data/f1.i
50 1 r1/.hg/store/data/f1.i
51 1 r1/.hg/store/fncache (repofncache !)
51 1 r1/.hg/store/fncache (repofncache !)
52 1 r1/.hg/store/phaseroots
52 1 r1/.hg/store/phaseroots
53 1 r1/.hg/store/requires
53 1 r1/.hg/store/requires
54 1 r1/.hg/store/undo
54 1 r1/.hg/store/undo
55 1 r1/.hg/store/undo.backup.fncache (repofncache !)
55 1 r1/.hg/store/undo.backup.fncache (repofncache !)
56 1 r1/.hg/store/undo.backupfiles
56 1 r1/.hg/store/undo.backupfiles
57 1 r1/.hg/store/undo.phaseroots
58
57
59
58
60 Create hardlinked clone r2:
59 Create hardlinked clone r2:
61
60
62 $ hg clone -U --debug r1 r2 --config progress.debug=true
61 $ hg clone -U --debug r1 r2 --config progress.debug=true
63 linking: 1/7 files (14.29%)
62 linking: 1/7 files (14.29%)
64 linking: 2/7 files (28.57%)
63 linking: 2/7 files (28.57%)
65 linking: 3/7 files (42.86%)
64 linking: 3/7 files (42.86%)
66 linking: 4/7 files (57.14%)
65 linking: 4/7 files (57.14%)
67 linking: 5/7 files (71.43%)
66 linking: 5/7 files (71.43%)
68 linking: 6/7 files (85.71%)
67 linking: 6/7 files (85.71%)
69 linking: 7/7 files (100.00%)
68 linking: 7/7 files (100.00%)
70 linked 7 files
69 linked 7 files
71 updating the branch cache
70 updating the branch cache
72
71
73 Create non-hardlinked clone r3:
72 Create non-hardlinked clone r3:
74
73
75 $ hg clone --pull r1 r3
74 $ hg clone --pull r1 r3
76 requesting all changes
75 requesting all changes
77 adding changesets
76 adding changesets
78 adding manifests
77 adding manifests
79 adding file changes
78 adding file changes
80 added 2 changesets with 2 changes to 2 files
79 added 2 changesets with 2 changes to 2 files
81 new changesets 40d85e9847f2:7069c422939c
80 new changesets 40d85e9847f2:7069c422939c
82 updating to branch default
81 updating to branch default
83 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
82 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
84
83
85
84
86 Repos r1 and r2 should now contain hardlinked files:
85 Repos r1 and r2 should now contain hardlinked files:
87
86
88 $ nlinksdir r1/.hg/store
87 $ nlinksdir r1/.hg/store
89 2 r1/.hg/store/00changelog.i
88 2 r1/.hg/store/00changelog.i
90 2 r1/.hg/store/00manifest.i
89 2 r1/.hg/store/00manifest.i
91 2 r1/.hg/store/data/d1/f2.i
90 2 r1/.hg/store/data/d1/f2.i
92 2 r1/.hg/store/data/f1.i
91 2 r1/.hg/store/data/f1.i
93 1 r1/.hg/store/fncache (repofncache !)
92 1 r1/.hg/store/fncache (repofncache !)
94 1 r1/.hg/store/phaseroots
93 1 r1/.hg/store/phaseroots
95 1 r1/.hg/store/requires
94 1 r1/.hg/store/requires
96 1 r1/.hg/store/undo
95 1 r1/.hg/store/undo
97 1 r1/.hg/store/undo.backup.fncache (repofncache !)
96 1 r1/.hg/store/undo.backup.fncache (repofncache !)
98 1 r1/.hg/store/undo.backupfiles
97 1 r1/.hg/store/undo.backupfiles
99 1 r1/.hg/store/undo.phaseroots
100
98
101 $ nlinksdir r2/.hg/store
99 $ nlinksdir r2/.hg/store
102 2 r2/.hg/store/00changelog.i
100 2 r2/.hg/store/00changelog.i
103 2 r2/.hg/store/00manifest.i
101 2 r2/.hg/store/00manifest.i
104 2 r2/.hg/store/data/d1/f2.i
102 2 r2/.hg/store/data/d1/f2.i
105 2 r2/.hg/store/data/f1.i
103 2 r2/.hg/store/data/f1.i
106 1 r2/.hg/store/fncache (repofncache !)
104 1 r2/.hg/store/fncache (repofncache !)
107 1 r2/.hg/store/requires
105 1 r2/.hg/store/requires
108
106
109 Repo r3 should not be hardlinked:
107 Repo r3 should not be hardlinked:
110
108
111 $ nlinksdir r3/.hg/store
109 $ nlinksdir r3/.hg/store
112 1 r3/.hg/store/00changelog.i
110 1 r3/.hg/store/00changelog.i
113 1 r3/.hg/store/00manifest.i
111 1 r3/.hg/store/00manifest.i
114 1 r3/.hg/store/data/d1/f2.i
112 1 r3/.hg/store/data/d1/f2.i
115 1 r3/.hg/store/data/f1.i
113 1 r3/.hg/store/data/f1.i
116 1 r3/.hg/store/fncache (repofncache !)
114 1 r3/.hg/store/fncache (repofncache !)
117 1 r3/.hg/store/phaseroots
115 1 r3/.hg/store/phaseroots
118 1 r3/.hg/store/requires
116 1 r3/.hg/store/requires
119 1 r3/.hg/store/undo
117 1 r3/.hg/store/undo
120 1 r3/.hg/store/undo.backupfiles
118 1 r3/.hg/store/undo.backupfiles
121 1 r3/.hg/store/undo.phaseroots
122
119
123
120
124 Create a non-inlined filelog in r3:
121 Create a non-inlined filelog in r3:
125
122
126 $ cd r3/d1
123 $ cd r3/d1
127 >>> f = open('data1', 'wb')
124 >>> f = open('data1', 'wb')
128 >>> for x in range(10000):
125 >>> for x in range(10000):
129 ... f.write(b"%d\n" % x) and None
126 ... f.write(b"%d\n" % x) and None
130 >>> f.close()
127 >>> f.close()
131 $ for j in 0 1 2 3 4 5 6 7 8 9; do
128 $ for j in 0 1 2 3 4 5 6 7 8 9; do
132 > cat data1 >> f2
129 > cat data1 >> f2
133 > hg commit -m$j
130 > hg commit -m$j
134 > done
131 > done
135 $ cd ../..
132 $ cd ../..
136
133
137 $ nlinksdir r3/.hg/store
134 $ nlinksdir r3/.hg/store
138 1 r3/.hg/store/00changelog.i
135 1 r3/.hg/store/00changelog.i
139 1 r3/.hg/store/00manifest.i
136 1 r3/.hg/store/00manifest.i
140 1 r3/.hg/store/data/d1/f2.d
137 1 r3/.hg/store/data/d1/f2.d
141 1 r3/.hg/store/data/d1/f2.i
138 1 r3/.hg/store/data/d1/f2.i
142 1 r3/.hg/store/data/f1.i
139 1 r3/.hg/store/data/f1.i
143 1 r3/.hg/store/fncache (repofncache !)
140 1 r3/.hg/store/fncache (repofncache !)
144 1 r3/.hg/store/phaseroots
141 1 r3/.hg/store/phaseroots
145 1 r3/.hg/store/requires
142 1 r3/.hg/store/requires
146 1 r3/.hg/store/undo
143 1 r3/.hg/store/undo
147 1 r3/.hg/store/undo.backup.fncache (repofncache !)
144 1 r3/.hg/store/undo.backup.fncache (repofncache !)
148 1 r3/.hg/store/undo.backup.phaseroots
145 1 r3/.hg/store/undo.backup.phaseroots
149 1 r3/.hg/store/undo.backupfiles
146 1 r3/.hg/store/undo.backupfiles
150 1 r3/.hg/store/undo.phaseroots
151
147
152 Push to repo r1 should break up most hardlinks in r2:
148 Push to repo r1 should break up most hardlinks in r2:
153
149
154 $ hg -R r2 verify -q
150 $ hg -R r2 verify -q
155
151
156 $ cd r3
152 $ cd r3
157 $ hg push
153 $ hg push
158 pushing to $TESTTMP/r1
154 pushing to $TESTTMP/r1
159 searching for changes
155 searching for changes
160 adding changesets
156 adding changesets
161 adding manifests
157 adding manifests
162 adding file changes
158 adding file changes
163 added 10 changesets with 10 changes to 1 files
159 added 10 changesets with 10 changes to 1 files
164
160
165 $ cd ..
161 $ cd ..
166
162
167 $ nlinksdir r2/.hg/store
163 $ nlinksdir r2/.hg/store
168 1 r2/.hg/store/00changelog.i
164 1 r2/.hg/store/00changelog.i
169 1 r2/.hg/store/00manifest.i
165 1 r2/.hg/store/00manifest.i
170 1 r2/.hg/store/data/d1/f2.i
166 1 r2/.hg/store/data/d1/f2.i
171 2 r2/.hg/store/data/f1.i
167 2 r2/.hg/store/data/f1.i
172 [12] r2/\.hg/store/fncache (re) (repofncache !)
168 [12] r2/\.hg/store/fncache (re) (repofncache !)
173 1 r2/.hg/store/requires
169 1 r2/.hg/store/requires
174
170
175 #if hardlink-whitelisted repofncache
171 #if hardlink-whitelisted repofncache
176 $ nlinksdir r2/.hg/store/fncache
172 $ nlinksdir r2/.hg/store/fncache
177 1 r2/.hg/store/fncache
173 1 r2/.hg/store/fncache
178 #endif
174 #endif
179
175
180 $ hg -R r2 verify -q
176 $ hg -R r2 verify -q
181
177
182 $ cd r1
178 $ cd r1
183 $ hg up
179 $ hg up
184 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
180 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
185
181
186 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
182 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
187
183
188 $ echo c1c1 >> f1
184 $ echo c1c1 >> f1
189 $ hg ci -m00
185 $ hg ci -m00
190 $ cd ..
186 $ cd ..
191
187
192 $ nlinksdir r2/.hg/store
188 $ nlinksdir r2/.hg/store
193 1 r2/.hg/store/00changelog.i
189 1 r2/.hg/store/00changelog.i
194 1 r2/.hg/store/00manifest.i
190 1 r2/.hg/store/00manifest.i
195 1 r2/.hg/store/data/d1/f2.i
191 1 r2/.hg/store/data/d1/f2.i
196 1 r2/.hg/store/data/f1.i
192 1 r2/.hg/store/data/f1.i
197 1 r2/.hg/store/fncache (repofncache !)
193 1 r2/.hg/store/fncache (repofncache !)
198 1 r2/.hg/store/requires
194 1 r2/.hg/store/requires
199
195
200 #if hardlink-whitelisted repofncache
196 #if hardlink-whitelisted repofncache
201 $ nlinksdir r2/.hg/store/fncache
197 $ nlinksdir r2/.hg/store/fncache
202 1 r2/.hg/store/fncache
198 1 r2/.hg/store/fncache
203 #endif
199 #endif
204
200
205 Create a file which exec permissions we will change
201 Create a file which exec permissions we will change
206 $ cd r3
202 $ cd r3
207 $ echo "echo hello world" > f3
203 $ echo "echo hello world" > f3
208 $ hg add f3
204 $ hg add f3
209 $ hg ci -mf3
205 $ hg ci -mf3
210 $ cd ..
206 $ cd ..
211
207
212 $ cd r3
208 $ cd r3
213 $ hg tip --template '{rev}:{node|short}\n'
209 $ hg tip --template '{rev}:{node|short}\n'
214 12:d3b77733a28a
210 12:d3b77733a28a
215 $ echo bla > f1
211 $ echo bla > f1
216 $ chmod +x f3
212 $ chmod +x f3
217 $ hg ci -m1
213 $ hg ci -m1
218 $ cd ..
214 $ cd ..
219
215
220 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
216 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
221
217
222 $ linkcp r3 r4
218 $ linkcp r3 r4
223
219
224 'checklink' is produced by hardlinking a symlink, which is undefined whether
220 'checklink' is produced by hardlinking a symlink, which is undefined whether
225 the symlink should be followed or not. It does behave differently on Linux and
221 the symlink should be followed or not. It does behave differently on Linux and
226 BSD. Just remove it so the test pass on both platforms.
222 BSD. Just remove it so the test pass on both platforms.
227
223
228 $ rm -f r4/.hg/wcache/checklink
224 $ rm -f r4/.hg/wcache/checklink
229
225
230 r4 has hardlinks in the working dir (not just inside .hg):
226 r4 has hardlinks in the working dir (not just inside .hg):
231
227
232 $ nlinksdir r4
228 $ nlinksdir r4
233 2 r4/.hg/00changelog.i
229 2 r4/.hg/00changelog.i
234 2 r4/.hg/branch
230 2 r4/.hg/branch
235 2 r4/.hg/cache/branch2-base
231 2 r4/.hg/cache/branch2-base
236 2 r4/.hg/cache/branch2-immutable
232 2 r4/.hg/cache/branch2-immutable
237 2 r4/.hg/cache/branch2-served
233 2 r4/.hg/cache/branch2-served
238 2 r4/.hg/cache/branch2-served.hidden
234 2 r4/.hg/cache/branch2-served.hidden
239 2 r4/.hg/cache/branch2-visible
235 2 r4/.hg/cache/branch2-visible
240 2 r4/.hg/cache/branch2-visible-hidden
236 2 r4/.hg/cache/branch2-visible-hidden
241 2 r4/.hg/cache/rbc-names-v1
237 2 r4/.hg/cache/rbc-names-v1
242 2 r4/.hg/cache/rbc-revs-v1
238 2 r4/.hg/cache/rbc-revs-v1
243 2 r4/.hg/cache/tags2
239 2 r4/.hg/cache/tags2
244 2 r4/.hg/cache/tags2-served
240 2 r4/.hg/cache/tags2-served
245 2 r4/.hg/dirstate
241 2 r4/.hg/dirstate
246 2 r4/.hg/fsmonitor.state (fsmonitor !)
242 2 r4/.hg/fsmonitor.state (fsmonitor !)
247 2 r4/.hg/hgrc
243 2 r4/.hg/hgrc
248 2 r4/.hg/last-message.txt
244 2 r4/.hg/last-message.txt
249 2 r4/.hg/requires
245 2 r4/.hg/requires
250 2 r4/.hg/store/00changelog.i
246 2 r4/.hg/store/00changelog.i
251 2 r4/.hg/store/00manifest.i
247 2 r4/.hg/store/00manifest.i
252 2 r4/.hg/store/data/d1/f2.d
248 2 r4/.hg/store/data/d1/f2.d
253 2 r4/.hg/store/data/d1/f2.i
249 2 r4/.hg/store/data/d1/f2.i
254 2 r4/.hg/store/data/f1.i
250 2 r4/.hg/store/data/f1.i
255 2 r4/.hg/store/data/f3.i
251 2 r4/.hg/store/data/f3.i
256 2 r4/.hg/store/fncache (repofncache !)
252 2 r4/.hg/store/fncache (repofncache !)
257 2 r4/.hg/store/phaseroots
253 2 r4/.hg/store/phaseroots
258 2 r4/.hg/store/requires
254 2 r4/.hg/store/requires
259 2 r4/.hg/store/undo
255 2 r4/.hg/store/undo
260 2 r4/.hg/store/undo.backup.fncache (repofncache !)
256 2 r4/.hg/store/undo.backup.fncache (repofncache !)
261 2 r4/.hg/store/undo.backup.phaseroots
257 2 r4/.hg/store/undo.backup.phaseroots
262 2 r4/.hg/store/undo.backupfiles
258 2 r4/.hg/store/undo.backupfiles
263 2 r4/.hg/store/undo.phaseroots
264 2 r4/\.hg/undo\.backup\.dirstate (re)
259 2 r4/\.hg/undo\.backup\.dirstate (re)
265 2 r4/.hg/undo.bookmarks
260 2 r4/.hg/undo.bookmarks
266 2 r4/.hg/undo.branch
261 2 r4/.hg/undo.branch
267 2 r4/.hg/undo.desc
262 2 r4/.hg/undo.desc
268 2 r4/.hg/wcache/checkisexec (execbit !)
263 2 r4/.hg/wcache/checkisexec (execbit !)
269 2 r4/.hg/wcache/checklink-target (symlink !)
264 2 r4/.hg/wcache/checklink-target (symlink !)
270 2 r4/.hg/wcache/checknoexec (execbit !)
265 2 r4/.hg/wcache/checknoexec (execbit !)
271 2 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
266 2 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
272 2 r4/d1/data1
267 2 r4/d1/data1
273 2 r4/d1/f2
268 2 r4/d1/f2
274 2 r4/f1
269 2 r4/f1
275 2 r4/f3
270 2 r4/f3
276
271
277 Update back to revision 12 in r4 should break hardlink of file f1 and f3:
272 Update back to revision 12 in r4 should break hardlink of file f1 and f3:
278 #if hardlink-whitelisted
273 #if hardlink-whitelisted
279 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/dirstate
274 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/dirstate
280 2 r4/.hg/dirstate
275 2 r4/.hg/dirstate
281 2 r4/.hg/undo.backup.dirstate
276 2 r4/.hg/undo.backup.dirstate
282 #endif
277 #endif
283
278
284
279
285 $ hg -R r4 up 12
280 $ hg -R r4 up 12
286 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (execbit !)
281 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (execbit !)
287 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-execbit !)
282 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-execbit !)
288
283
289 $ nlinksdir r4
284 $ nlinksdir r4
290 2 r4/.hg/00changelog.i
285 2 r4/.hg/00changelog.i
291 1 r4/.hg/branch
286 1 r4/.hg/branch
292 2 r4/.hg/cache/branch2-base
287 2 r4/.hg/cache/branch2-base
293 2 r4/.hg/cache/branch2-immutable
288 2 r4/.hg/cache/branch2-immutable
294 2 r4/.hg/cache/branch2-served
289 2 r4/.hg/cache/branch2-served
295 2 r4/.hg/cache/branch2-served.hidden
290 2 r4/.hg/cache/branch2-served.hidden
296 2 r4/.hg/cache/branch2-visible
291 2 r4/.hg/cache/branch2-visible
297 2 r4/.hg/cache/branch2-visible-hidden
292 2 r4/.hg/cache/branch2-visible-hidden
298 2 r4/.hg/cache/rbc-names-v1
293 2 r4/.hg/cache/rbc-names-v1
299 2 r4/.hg/cache/rbc-revs-v1
294 2 r4/.hg/cache/rbc-revs-v1
300 2 r4/.hg/cache/tags2
295 2 r4/.hg/cache/tags2
301 2 r4/.hg/cache/tags2-served
296 2 r4/.hg/cache/tags2-served
302 1 r4/.hg/dirstate
297 1 r4/.hg/dirstate
303 1 r4/.hg/fsmonitor.state (fsmonitor !)
298 1 r4/.hg/fsmonitor.state (fsmonitor !)
304 2 r4/.hg/hgrc
299 2 r4/.hg/hgrc
305 2 r4/.hg/last-message.txt
300 2 r4/.hg/last-message.txt
306 2 r4/.hg/requires
301 2 r4/.hg/requires
307 2 r4/.hg/store/00changelog.i
302 2 r4/.hg/store/00changelog.i
308 2 r4/.hg/store/00manifest.i
303 2 r4/.hg/store/00manifest.i
309 2 r4/.hg/store/data/d1/f2.d
304 2 r4/.hg/store/data/d1/f2.d
310 2 r4/.hg/store/data/d1/f2.i
305 2 r4/.hg/store/data/d1/f2.i
311 2 r4/.hg/store/data/f1.i
306 2 r4/.hg/store/data/f1.i
312 2 r4/.hg/store/data/f3.i
307 2 r4/.hg/store/data/f3.i
313 2 r4/.hg/store/fncache
308 2 r4/.hg/store/fncache
314 2 r4/.hg/store/phaseroots
309 2 r4/.hg/store/phaseroots
315 2 r4/.hg/store/requires
310 2 r4/.hg/store/requires
316 2 r4/.hg/store/undo
311 2 r4/.hg/store/undo
317 2 r4/.hg/store/undo.backup.fncache (repofncache !)
312 2 r4/.hg/store/undo.backup.fncache (repofncache !)
318 2 r4/.hg/store/undo.backup.phaseroots
313 2 r4/.hg/store/undo.backup.phaseroots
319 2 r4/.hg/store/undo.backupfiles
314 2 r4/.hg/store/undo.backupfiles
320 2 r4/.hg/store/undo.phaseroots
321 2 r4/\.hg/undo\.backup\.dirstate (re)
315 2 r4/\.hg/undo\.backup\.dirstate (re)
322 2 r4/.hg/undo.bookmarks
316 2 r4/.hg/undo.bookmarks
323 2 r4/.hg/undo.branch
317 2 r4/.hg/undo.branch
324 2 r4/.hg/undo.desc
318 2 r4/.hg/undo.desc
325 2 r4/.hg/wcache/checkisexec (execbit !)
319 2 r4/.hg/wcache/checkisexec (execbit !)
326 2 r4/.hg/wcache/checklink-target (symlink !)
320 2 r4/.hg/wcache/checklink-target (symlink !)
327 2 r4/.hg/wcache/checknoexec (execbit !)
321 2 r4/.hg/wcache/checknoexec (execbit !)
328 1 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
322 1 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
329 2 r4/d1/data1
323 2 r4/d1/data1
330 2 r4/d1/f2
324 2 r4/d1/f2
331 1 r4/f1
325 1 r4/f1
332 1 r4/f3 (execbit !)
326 1 r4/f3 (execbit !)
333 2 r4/f3 (no-execbit !)
327 2 r4/f3 (no-execbit !)
334
328
335 #if hardlink-whitelisted
329 #if hardlink-whitelisted
336 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/dirstate
330 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/dirstate
337 1 r4/.hg/dirstate
331 1 r4/.hg/dirstate
338 2 r4/.hg/undo.backup.dirstate
332 2 r4/.hg/undo.backup.dirstate
339 #endif
333 #endif
340
334
341 Test hardlinking outside hg:
335 Test hardlinking outside hg:
342
336
343 $ mkdir x
337 $ mkdir x
344 $ echo foo > x/a
338 $ echo foo > x/a
345
339
346 $ linkcp x y
340 $ linkcp x y
347 $ echo bar >> y/a
341 $ echo bar >> y/a
348
342
349 No diff if hardlink:
343 No diff if hardlink:
350
344
351 $ diff x/a y/a
345 $ diff x/a y/a
352
346
353 Test mq hardlinking:
347 Test mq hardlinking:
354
348
355 $ echo "[extensions]" >> $HGRCPATH
349 $ echo "[extensions]" >> $HGRCPATH
356 $ echo "mq=" >> $HGRCPATH
350 $ echo "mq=" >> $HGRCPATH
357
351
358 $ hg init a
352 $ hg init a
359 $ cd a
353 $ cd a
360
354
361 $ hg qimport -n foo - << EOF
355 $ hg qimport -n foo - << EOF
362 > # HG changeset patch
356 > # HG changeset patch
363 > # Date 1 0
357 > # Date 1 0
364 > diff -r 2588a8b53d66 a
358 > diff -r 2588a8b53d66 a
365 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
359 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
366 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
360 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
367 > @@ -0,0 +1,1 @@
361 > @@ -0,0 +1,1 @@
368 > +a
362 > +a
369 > EOF
363 > EOF
370 adding foo to series file
364 adding foo to series file
371
365
372 $ hg qpush
366 $ hg qpush
373 applying foo
367 applying foo
374 now at: foo
368 now at: foo
375
369
376 $ cd ..
370 $ cd ..
377 $ linkcp a b
371 $ linkcp a b
378 $ cd b
372 $ cd b
379
373
380 $ hg qimport -n bar - << EOF
374 $ hg qimport -n bar - << EOF
381 > # HG changeset patch
375 > # HG changeset patch
382 > # Date 2 0
376 > # Date 2 0
383 > diff -r 2588a8b53d66 a
377 > diff -r 2588a8b53d66 a
384 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
378 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
385 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
379 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
386 > @@ -0,0 +1,1 @@
380 > @@ -0,0 +1,1 @@
387 > +b
381 > +b
388 > EOF
382 > EOF
389 adding bar to series file
383 adding bar to series file
390
384
391 $ hg qpush
385 $ hg qpush
392 applying bar
386 applying bar
393 now at: bar
387 now at: bar
394
388
395 $ cat .hg/patches/status
389 $ cat .hg/patches/status
396 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
390 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
397 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
391 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
398
392
399 $ cat .hg/patches/series
393 $ cat .hg/patches/series
400 foo
394 foo
401 bar
395 bar
402
396
403 $ cat ../a/.hg/patches/status
397 $ cat ../a/.hg/patches/status
404 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
398 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
405
399
406 $ cat ../a/.hg/patches/series
400 $ cat ../a/.hg/patches/series
407 foo
401 foo
408
402
409 Test tags hardlinking:
403 Test tags hardlinking:
410
404
411 $ hg qdel -r qbase:qtip
405 $ hg qdel -r qbase:qtip
412 patch foo finalized without changeset message
406 patch foo finalized without changeset message
413 patch bar finalized without changeset message
407 patch bar finalized without changeset message
414
408
415 $ hg tag -l lfoo
409 $ hg tag -l lfoo
416 $ hg tag foo
410 $ hg tag foo
417
411
418 $ cd ..
412 $ cd ..
419 $ linkcp b c
413 $ linkcp b c
420 $ cd c
414 $ cd c
421
415
422 $ hg tag -l -r 0 lbar
416 $ hg tag -l -r 0 lbar
423 $ hg tag -r 0 bar
417 $ hg tag -r 0 bar
424
418
425 $ cat .hgtags
419 $ cat .hgtags
426 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
420 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
427 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
421 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
428
422
429 $ cat .hg/localtags
423 $ cat .hg/localtags
430 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
424 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
431 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
425 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
432
426
433 $ cat ../b/.hgtags
427 $ cat ../b/.hgtags
434 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
428 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
435
429
436 $ cat ../b/.hg/localtags
430 $ cat ../b/.hg/localtags
437 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
431 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
438
432
439 $ cd ..
433 $ cd ..
@@ -1,1434 +1,1432 b''
1 commit hooks can see env vars
1 commit hooks can see env vars
2 (and post-transaction one are run unlocked)
2 (and post-transaction one are run unlocked)
3
3
4
4
5 $ cat > $TESTTMP/txnabort.checkargs.py <<EOF
5 $ cat > $TESTTMP/txnabort.checkargs.py <<EOF
6 > from mercurial import pycompat
6 > from mercurial import pycompat
7 > def showargs(ui, repo, hooktype, **kwargs):
7 > def showargs(ui, repo, hooktype, **kwargs):
8 > kwargs = pycompat.byteskwargs(kwargs)
8 > kwargs = pycompat.byteskwargs(kwargs)
9 > ui.write(b'%s Python hook: %s\n' % (hooktype,
9 > ui.write(b'%s Python hook: %s\n' % (hooktype,
10 > b','.join(sorted(kwargs))))
10 > b','.join(sorted(kwargs))))
11 > EOF
11 > EOF
12
12
13 $ hg init a
13 $ hg init a
14 $ cd a
14 $ cd a
15 $ cat > .hg/hgrc <<EOF
15 $ cat > .hg/hgrc <<EOF
16 > [hooks]
16 > [hooks]
17 > commit = sh -c "HG_LOCAL= HG_TAG= printenv.py --line commit"
17 > commit = sh -c "HG_LOCAL= HG_TAG= printenv.py --line commit"
18 > commit.b = sh -c "HG_LOCAL= HG_TAG= printenv.py --line commit.b"
18 > commit.b = sh -c "HG_LOCAL= HG_TAG= printenv.py --line commit.b"
19 > precommit = sh -c "HG_LOCAL= HG_NODE= HG_TAG= printenv.py --line precommit"
19 > precommit = sh -c "HG_LOCAL= HG_NODE= HG_TAG= printenv.py --line precommit"
20 > pretxncommit = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxncommit"
20 > pretxncommit = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxncommit"
21 > pretxncommit.tip = hg -q tip
21 > pretxncommit.tip = hg -q tip
22 > pre-identify = sh -c "printenv.py --line pre-identify 1"
22 > pre-identify = sh -c "printenv.py --line pre-identify 1"
23 > pre-cat = sh -c "printenv.py --line pre-cat"
23 > pre-cat = sh -c "printenv.py --line pre-cat"
24 > post-cat = sh -c "printenv.py --line post-cat"
24 > post-cat = sh -c "printenv.py --line post-cat"
25 > pretxnopen = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxnopen"
25 > pretxnopen = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxnopen"
26 > pretxnclose = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxnclose"
26 > pretxnclose = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxnclose"
27 > txnclose = sh -c "HG_LOCAL= HG_TAG= printenv.py --line txnclose"
27 > txnclose = sh -c "HG_LOCAL= HG_TAG= printenv.py --line txnclose"
28 > txnabort.0 = python:$TESTTMP/txnabort.checkargs.py:showargs
28 > txnabort.0 = python:$TESTTMP/txnabort.checkargs.py:showargs
29 > txnabort.1 = sh -c "HG_LOCAL= HG_TAG= printenv.py --line txnabort"
29 > txnabort.1 = sh -c "HG_LOCAL= HG_TAG= printenv.py --line txnabort"
30 > txnclose.checklock = sh -c "hg debuglock > /dev/null"
30 > txnclose.checklock = sh -c "hg debuglock > /dev/null"
31 > EOF
31 > EOF
32 $ echo a > a
32 $ echo a > a
33 $ hg add a
33 $ hg add a
34 $ hg commit -m a
34 $ hg commit -m a
35 precommit hook: HG_HOOKNAME=precommit
35 precommit hook: HG_HOOKNAME=precommit
36 HG_HOOKTYPE=precommit
36 HG_HOOKTYPE=precommit
37 HG_PARENT1=0000000000000000000000000000000000000000
37 HG_PARENT1=0000000000000000000000000000000000000000
38
38
39 pretxnopen hook: HG_HOOKNAME=pretxnopen
39 pretxnopen hook: HG_HOOKNAME=pretxnopen
40 HG_HOOKTYPE=pretxnopen
40 HG_HOOKTYPE=pretxnopen
41 HG_TXNID=TXN:$ID$
41 HG_TXNID=TXN:$ID$
42 HG_TXNNAME=commit
42 HG_TXNNAME=commit
43
43
44 pretxncommit hook: HG_HOOKNAME=pretxncommit
44 pretxncommit hook: HG_HOOKNAME=pretxncommit
45 HG_HOOKTYPE=pretxncommit
45 HG_HOOKTYPE=pretxncommit
46 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
46 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
47 HG_PARENT1=0000000000000000000000000000000000000000
47 HG_PARENT1=0000000000000000000000000000000000000000
48 HG_PENDING=$TESTTMP/a
48 HG_PENDING=$TESTTMP/a
49
49
50 0:cb9a9f314b8b
50 0:cb9a9f314b8b
51 pretxnclose hook: HG_HOOKNAME=pretxnclose
51 pretxnclose hook: HG_HOOKNAME=pretxnclose
52 HG_HOOKTYPE=pretxnclose
52 HG_HOOKTYPE=pretxnclose
53 HG_PENDING=$TESTTMP/a
53 HG_PENDING=$TESTTMP/a
54 HG_PHASES_MOVED=1
54 HG_PHASES_MOVED=1
55 HG_TXNID=TXN:$ID$
55 HG_TXNID=TXN:$ID$
56 HG_TXNNAME=commit
56 HG_TXNNAME=commit
57
57
58 txnclose hook: HG_HOOKNAME=txnclose
58 txnclose hook: HG_HOOKNAME=txnclose
59 HG_HOOKTYPE=txnclose
59 HG_HOOKTYPE=txnclose
60 HG_PHASES_MOVED=1
60 HG_PHASES_MOVED=1
61 HG_TXNID=TXN:$ID$
61 HG_TXNID=TXN:$ID$
62 HG_TXNNAME=commit
62 HG_TXNNAME=commit
63
63
64 commit hook: HG_HOOKNAME=commit
64 commit hook: HG_HOOKNAME=commit
65 HG_HOOKTYPE=commit
65 HG_HOOKTYPE=commit
66 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
66 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
67 HG_PARENT1=0000000000000000000000000000000000000000
67 HG_PARENT1=0000000000000000000000000000000000000000
68
68
69 commit.b hook: HG_HOOKNAME=commit.b
69 commit.b hook: HG_HOOKNAME=commit.b
70 HG_HOOKTYPE=commit
70 HG_HOOKTYPE=commit
71 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
71 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
72 HG_PARENT1=0000000000000000000000000000000000000000
72 HG_PARENT1=0000000000000000000000000000000000000000
73
73
74
74
75 $ hg clone . ../b
75 $ hg clone . ../b
76 updating to branch default
76 updating to branch default
77 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
77 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
78 $ cd ../b
78 $ cd ../b
79
79
80 changegroup hooks can see env vars
80 changegroup hooks can see env vars
81
81
82 $ cat > .hg/hgrc <<EOF
82 $ cat > .hg/hgrc <<EOF
83 > [hooks]
83 > [hooks]
84 > prechangegroup = sh -c "printenv.py --line prechangegroup"
84 > prechangegroup = sh -c "printenv.py --line prechangegroup"
85 > changegroup = sh -c "printenv.py --line changegroup"
85 > changegroup = sh -c "printenv.py --line changegroup"
86 > incoming = sh -c "printenv.py --line incoming"
86 > incoming = sh -c "printenv.py --line incoming"
87 > EOF
87 > EOF
88
88
89 pretxncommit and commit hooks can see both parents of merge
89 pretxncommit and commit hooks can see both parents of merge
90
90
91 $ cd ../a
91 $ cd ../a
92 $ echo b >> a
92 $ echo b >> a
93 $ hg commit -m a1 -d "1 0"
93 $ hg commit -m a1 -d "1 0"
94 precommit hook: HG_HOOKNAME=precommit
94 precommit hook: HG_HOOKNAME=precommit
95 HG_HOOKTYPE=precommit
95 HG_HOOKTYPE=precommit
96 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
96 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
97
97
98 pretxnopen hook: HG_HOOKNAME=pretxnopen
98 pretxnopen hook: HG_HOOKNAME=pretxnopen
99 HG_HOOKTYPE=pretxnopen
99 HG_HOOKTYPE=pretxnopen
100 HG_TXNID=TXN:$ID$
100 HG_TXNID=TXN:$ID$
101 HG_TXNNAME=commit
101 HG_TXNNAME=commit
102
102
103 pretxncommit hook: HG_HOOKNAME=pretxncommit
103 pretxncommit hook: HG_HOOKNAME=pretxncommit
104 HG_HOOKTYPE=pretxncommit
104 HG_HOOKTYPE=pretxncommit
105 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
105 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
106 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
106 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
107 HG_PENDING=$TESTTMP/a
107 HG_PENDING=$TESTTMP/a
108
108
109 1:ab228980c14d
109 1:ab228980c14d
110 pretxnclose hook: HG_HOOKNAME=pretxnclose
110 pretxnclose hook: HG_HOOKNAME=pretxnclose
111 HG_HOOKTYPE=pretxnclose
111 HG_HOOKTYPE=pretxnclose
112 HG_PENDING=$TESTTMP/a
112 HG_PENDING=$TESTTMP/a
113 HG_TXNID=TXN:$ID$
113 HG_TXNID=TXN:$ID$
114 HG_TXNNAME=commit
114 HG_TXNNAME=commit
115
115
116 txnclose hook: HG_HOOKNAME=txnclose
116 txnclose hook: HG_HOOKNAME=txnclose
117 HG_HOOKTYPE=txnclose
117 HG_HOOKTYPE=txnclose
118 HG_TXNID=TXN:$ID$
118 HG_TXNID=TXN:$ID$
119 HG_TXNNAME=commit
119 HG_TXNNAME=commit
120
120
121 commit hook: HG_HOOKNAME=commit
121 commit hook: HG_HOOKNAME=commit
122 HG_HOOKTYPE=commit
122 HG_HOOKTYPE=commit
123 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
123 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
124 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
124 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
125
125
126 commit.b hook: HG_HOOKNAME=commit.b
126 commit.b hook: HG_HOOKNAME=commit.b
127 HG_HOOKTYPE=commit
127 HG_HOOKTYPE=commit
128 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
128 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
129 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
129 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
130
130
131 $ hg update -C 0
131 $ hg update -C 0
132 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
132 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
133 $ echo b > b
133 $ echo b > b
134 $ hg add b
134 $ hg add b
135 $ hg commit -m b -d '1 0'
135 $ hg commit -m b -d '1 0'
136 precommit hook: HG_HOOKNAME=precommit
136 precommit hook: HG_HOOKNAME=precommit
137 HG_HOOKTYPE=precommit
137 HG_HOOKTYPE=precommit
138 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
138 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
139
139
140 pretxnopen hook: HG_HOOKNAME=pretxnopen
140 pretxnopen hook: HG_HOOKNAME=pretxnopen
141 HG_HOOKTYPE=pretxnopen
141 HG_HOOKTYPE=pretxnopen
142 HG_TXNID=TXN:$ID$
142 HG_TXNID=TXN:$ID$
143 HG_TXNNAME=commit
143 HG_TXNNAME=commit
144
144
145 pretxncommit hook: HG_HOOKNAME=pretxncommit
145 pretxncommit hook: HG_HOOKNAME=pretxncommit
146 HG_HOOKTYPE=pretxncommit
146 HG_HOOKTYPE=pretxncommit
147 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
147 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
148 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
148 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
149 HG_PENDING=$TESTTMP/a
149 HG_PENDING=$TESTTMP/a
150
150
151 2:ee9deb46ab31
151 2:ee9deb46ab31
152 pretxnclose hook: HG_HOOKNAME=pretxnclose
152 pretxnclose hook: HG_HOOKNAME=pretxnclose
153 HG_HOOKTYPE=pretxnclose
153 HG_HOOKTYPE=pretxnclose
154 HG_PENDING=$TESTTMP/a
154 HG_PENDING=$TESTTMP/a
155 HG_TXNID=TXN:$ID$
155 HG_TXNID=TXN:$ID$
156 HG_TXNNAME=commit
156 HG_TXNNAME=commit
157
157
158 created new head
158 created new head
159 txnclose hook: HG_HOOKNAME=txnclose
159 txnclose hook: HG_HOOKNAME=txnclose
160 HG_HOOKTYPE=txnclose
160 HG_HOOKTYPE=txnclose
161 HG_TXNID=TXN:$ID$
161 HG_TXNID=TXN:$ID$
162 HG_TXNNAME=commit
162 HG_TXNNAME=commit
163
163
164 commit hook: HG_HOOKNAME=commit
164 commit hook: HG_HOOKNAME=commit
165 HG_HOOKTYPE=commit
165 HG_HOOKTYPE=commit
166 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
166 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
167 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
167 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
168
168
169 commit.b hook: HG_HOOKNAME=commit.b
169 commit.b hook: HG_HOOKNAME=commit.b
170 HG_HOOKTYPE=commit
170 HG_HOOKTYPE=commit
171 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
171 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
172 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
172 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
173
173
174 $ hg merge 1
174 $ hg merge 1
175 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
175 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
176 (branch merge, don't forget to commit)
176 (branch merge, don't forget to commit)
177 $ hg commit -m merge -d '2 0'
177 $ hg commit -m merge -d '2 0'
178 precommit hook: HG_HOOKNAME=precommit
178 precommit hook: HG_HOOKNAME=precommit
179 HG_HOOKTYPE=precommit
179 HG_HOOKTYPE=precommit
180 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
180 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
181 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
181 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
182
182
183 pretxnopen hook: HG_HOOKNAME=pretxnopen
183 pretxnopen hook: HG_HOOKNAME=pretxnopen
184 HG_HOOKTYPE=pretxnopen
184 HG_HOOKTYPE=pretxnopen
185 HG_TXNID=TXN:$ID$
185 HG_TXNID=TXN:$ID$
186 HG_TXNNAME=commit
186 HG_TXNNAME=commit
187
187
188 pretxncommit hook: HG_HOOKNAME=pretxncommit
188 pretxncommit hook: HG_HOOKNAME=pretxncommit
189 HG_HOOKTYPE=pretxncommit
189 HG_HOOKTYPE=pretxncommit
190 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
190 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
191 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
191 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
192 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
192 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
193 HG_PENDING=$TESTTMP/a
193 HG_PENDING=$TESTTMP/a
194
194
195 3:07f3376c1e65
195 3:07f3376c1e65
196 pretxnclose hook: HG_HOOKNAME=pretxnclose
196 pretxnclose hook: HG_HOOKNAME=pretxnclose
197 HG_HOOKTYPE=pretxnclose
197 HG_HOOKTYPE=pretxnclose
198 HG_PENDING=$TESTTMP/a
198 HG_PENDING=$TESTTMP/a
199 HG_TXNID=TXN:$ID$
199 HG_TXNID=TXN:$ID$
200 HG_TXNNAME=commit
200 HG_TXNNAME=commit
201
201
202 txnclose hook: HG_HOOKNAME=txnclose
202 txnclose hook: HG_HOOKNAME=txnclose
203 HG_HOOKTYPE=txnclose
203 HG_HOOKTYPE=txnclose
204 HG_TXNID=TXN:$ID$
204 HG_TXNID=TXN:$ID$
205 HG_TXNNAME=commit
205 HG_TXNNAME=commit
206
206
207 commit hook: HG_HOOKNAME=commit
207 commit hook: HG_HOOKNAME=commit
208 HG_HOOKTYPE=commit
208 HG_HOOKTYPE=commit
209 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
209 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
210 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
210 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
211 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
211 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
212
212
213 commit.b hook: HG_HOOKNAME=commit.b
213 commit.b hook: HG_HOOKNAME=commit.b
214 HG_HOOKTYPE=commit
214 HG_HOOKTYPE=commit
215 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
215 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
216 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
216 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
217 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
217 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
218
218
219
219
220 test generic hooks
220 test generic hooks
221
221
222 $ hg id
222 $ hg id
223 pre-identify hook: HG_ARGS=id
223 pre-identify hook: HG_ARGS=id
224 HG_HOOKNAME=pre-identify
224 HG_HOOKNAME=pre-identify
225 HG_HOOKTYPE=pre-identify
225 HG_HOOKTYPE=pre-identify
226 HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None, 'template': ''}
226 HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None, 'template': ''}
227 HG_PATS=[]
227 HG_PATS=[]
228
228
229 abort: pre-identify hook exited with status 1
229 abort: pre-identify hook exited with status 1
230 [40]
230 [40]
231 $ hg cat b
231 $ hg cat b
232 pre-cat hook: HG_ARGS=cat b
232 pre-cat hook: HG_ARGS=cat b
233 HG_HOOKNAME=pre-cat
233 HG_HOOKNAME=pre-cat
234 HG_HOOKTYPE=pre-cat
234 HG_HOOKTYPE=pre-cat
235 HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': '', 'template': ''}
235 HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': '', 'template': ''}
236 HG_PATS=['b']
236 HG_PATS=['b']
237
237
238 b
238 b
239 post-cat hook: HG_ARGS=cat b
239 post-cat hook: HG_ARGS=cat b
240 HG_HOOKNAME=post-cat
240 HG_HOOKNAME=post-cat
241 HG_HOOKTYPE=post-cat
241 HG_HOOKTYPE=post-cat
242 HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': '', 'template': ''}
242 HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': '', 'template': ''}
243 HG_PATS=['b']
243 HG_PATS=['b']
244 HG_RESULT=0
244 HG_RESULT=0
245
245
246
246
247 $ cd ../b
247 $ cd ../b
248 $ hg pull ../a
248 $ hg pull ../a
249 pulling from ../a
249 pulling from ../a
250 searching for changes
250 searching for changes
251 prechangegroup hook: HG_HOOKNAME=prechangegroup
251 prechangegroup hook: HG_HOOKNAME=prechangegroup
252 HG_HOOKTYPE=prechangegroup
252 HG_HOOKTYPE=prechangegroup
253 HG_SOURCE=pull
253 HG_SOURCE=pull
254 HG_TXNID=TXN:$ID$
254 HG_TXNID=TXN:$ID$
255 HG_TXNNAME=pull
255 HG_TXNNAME=pull
256 file:/*/$TESTTMP/a (glob)
256 file:/*/$TESTTMP/a (glob)
257 HG_URL=file:$TESTTMP/a
257 HG_URL=file:$TESTTMP/a
258
258
259 adding changesets
259 adding changesets
260 adding manifests
260 adding manifests
261 adding file changes
261 adding file changes
262 added 3 changesets with 2 changes to 2 files
262 added 3 changesets with 2 changes to 2 files
263 new changesets ab228980c14d:07f3376c1e65
263 new changesets ab228980c14d:07f3376c1e65
264 changegroup hook: HG_HOOKNAME=changegroup
264 changegroup hook: HG_HOOKNAME=changegroup
265 HG_HOOKTYPE=changegroup
265 HG_HOOKTYPE=changegroup
266 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
266 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
267 HG_NODE_LAST=07f3376c1e655977439df2a814e3cc14b27abac2
267 HG_NODE_LAST=07f3376c1e655977439df2a814e3cc14b27abac2
268 HG_SOURCE=pull
268 HG_SOURCE=pull
269 HG_TXNID=TXN:$ID$
269 HG_TXNID=TXN:$ID$
270 HG_TXNNAME=pull
270 HG_TXNNAME=pull
271 file:/*/$TESTTMP/a (glob)
271 file:/*/$TESTTMP/a (glob)
272 HG_URL=file:$TESTTMP/a
272 HG_URL=file:$TESTTMP/a
273
273
274 incoming hook: HG_HOOKNAME=incoming
274 incoming hook: HG_HOOKNAME=incoming
275 HG_HOOKTYPE=incoming
275 HG_HOOKTYPE=incoming
276 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
276 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
277 HG_SOURCE=pull
277 HG_SOURCE=pull
278 HG_TXNID=TXN:$ID$
278 HG_TXNID=TXN:$ID$
279 HG_TXNNAME=pull
279 HG_TXNNAME=pull
280 file:/*/$TESTTMP/a (glob)
280 file:/*/$TESTTMP/a (glob)
281 HG_URL=file:$TESTTMP/a
281 HG_URL=file:$TESTTMP/a
282
282
283 incoming hook: HG_HOOKNAME=incoming
283 incoming hook: HG_HOOKNAME=incoming
284 HG_HOOKTYPE=incoming
284 HG_HOOKTYPE=incoming
285 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
285 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
286 HG_SOURCE=pull
286 HG_SOURCE=pull
287 HG_TXNID=TXN:$ID$
287 HG_TXNID=TXN:$ID$
288 HG_TXNNAME=pull
288 HG_TXNNAME=pull
289 file:/*/$TESTTMP/a (glob)
289 file:/*/$TESTTMP/a (glob)
290 HG_URL=file:$TESTTMP/a
290 HG_URL=file:$TESTTMP/a
291
291
292 incoming hook: HG_HOOKNAME=incoming
292 incoming hook: HG_HOOKNAME=incoming
293 HG_HOOKTYPE=incoming
293 HG_HOOKTYPE=incoming
294 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
294 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
295 HG_SOURCE=pull
295 HG_SOURCE=pull
296 HG_TXNID=TXN:$ID$
296 HG_TXNID=TXN:$ID$
297 HG_TXNNAME=pull
297 HG_TXNNAME=pull
298 file:/*/$TESTTMP/a (glob)
298 file:/*/$TESTTMP/a (glob)
299 HG_URL=file:$TESTTMP/a
299 HG_URL=file:$TESTTMP/a
300
300
301 (run 'hg update' to get a working copy)
301 (run 'hg update' to get a working copy)
302
302
303 tag hooks can see env vars
303 tag hooks can see env vars
304
304
305 $ cd ../a
305 $ cd ../a
306 $ cat >> .hg/hgrc <<EOF
306 $ cat >> .hg/hgrc <<EOF
307 > pretag = sh -c "printenv.py --line pretag"
307 > pretag = sh -c "printenv.py --line pretag"
308 > tag = sh -c "HG_PARENT1= HG_PARENT2= printenv.py --line tag"
308 > tag = sh -c "HG_PARENT1= HG_PARENT2= printenv.py --line tag"
309 > EOF
309 > EOF
310 $ hg tag -d '3 0' a
310 $ hg tag -d '3 0' a
311 pretag hook: HG_HOOKNAME=pretag
311 pretag hook: HG_HOOKNAME=pretag
312 HG_HOOKTYPE=pretag
312 HG_HOOKTYPE=pretag
313 HG_LOCAL=0
313 HG_LOCAL=0
314 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
314 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
315 HG_TAG=a
315 HG_TAG=a
316
316
317 precommit hook: HG_HOOKNAME=precommit
317 precommit hook: HG_HOOKNAME=precommit
318 HG_HOOKTYPE=precommit
318 HG_HOOKTYPE=precommit
319 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
319 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
320
320
321 pretxnopen hook: HG_HOOKNAME=pretxnopen
321 pretxnopen hook: HG_HOOKNAME=pretxnopen
322 HG_HOOKTYPE=pretxnopen
322 HG_HOOKTYPE=pretxnopen
323 HG_TXNID=TXN:$ID$
323 HG_TXNID=TXN:$ID$
324 HG_TXNNAME=commit
324 HG_TXNNAME=commit
325
325
326 pretxncommit hook: HG_HOOKNAME=pretxncommit
326 pretxncommit hook: HG_HOOKNAME=pretxncommit
327 HG_HOOKTYPE=pretxncommit
327 HG_HOOKTYPE=pretxncommit
328 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
328 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
329 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
329 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
330 HG_PENDING=$TESTTMP/a
330 HG_PENDING=$TESTTMP/a
331
331
332 4:539e4b31b6dc
332 4:539e4b31b6dc
333 pretxnclose hook: HG_HOOKNAME=pretxnclose
333 pretxnclose hook: HG_HOOKNAME=pretxnclose
334 HG_HOOKTYPE=pretxnclose
334 HG_HOOKTYPE=pretxnclose
335 HG_PENDING=$TESTTMP/a
335 HG_PENDING=$TESTTMP/a
336 HG_TXNID=TXN:$ID$
336 HG_TXNID=TXN:$ID$
337 HG_TXNNAME=commit
337 HG_TXNNAME=commit
338
338
339 tag hook: HG_HOOKNAME=tag
339 tag hook: HG_HOOKNAME=tag
340 HG_HOOKTYPE=tag
340 HG_HOOKTYPE=tag
341 HG_LOCAL=0
341 HG_LOCAL=0
342 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
342 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
343 HG_TAG=a
343 HG_TAG=a
344
344
345 txnclose hook: HG_HOOKNAME=txnclose
345 txnclose hook: HG_HOOKNAME=txnclose
346 HG_HOOKTYPE=txnclose
346 HG_HOOKTYPE=txnclose
347 HG_TXNID=TXN:$ID$
347 HG_TXNID=TXN:$ID$
348 HG_TXNNAME=commit
348 HG_TXNNAME=commit
349
349
350 commit hook: HG_HOOKNAME=commit
350 commit hook: HG_HOOKNAME=commit
351 HG_HOOKTYPE=commit
351 HG_HOOKTYPE=commit
352 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
352 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
353 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
353 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
354
354
355 commit.b hook: HG_HOOKNAME=commit.b
355 commit.b hook: HG_HOOKNAME=commit.b
356 HG_HOOKTYPE=commit
356 HG_HOOKTYPE=commit
357 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
357 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
358 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
358 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
359
359
360 $ hg tag -l la
360 $ hg tag -l la
361 pretag hook: HG_HOOKNAME=pretag
361 pretag hook: HG_HOOKNAME=pretag
362 HG_HOOKTYPE=pretag
362 HG_HOOKTYPE=pretag
363 HG_LOCAL=1
363 HG_LOCAL=1
364 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
364 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
365 HG_TAG=la
365 HG_TAG=la
366
366
367 tag hook: HG_HOOKNAME=tag
367 tag hook: HG_HOOKNAME=tag
368 HG_HOOKTYPE=tag
368 HG_HOOKTYPE=tag
369 HG_LOCAL=1
369 HG_LOCAL=1
370 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
370 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
371 HG_TAG=la
371 HG_TAG=la
372
372
373
373
374 pretag hook can forbid tagging
374 pretag hook can forbid tagging
375
375
376 $ cat >> .hg/hgrc <<EOF
376 $ cat >> .hg/hgrc <<EOF
377 > pretag.forbid = sh -c "printenv.py --line pretag.forbid 1"
377 > pretag.forbid = sh -c "printenv.py --line pretag.forbid 1"
378 > EOF
378 > EOF
379 $ hg tag -d '4 0' fa
379 $ hg tag -d '4 0' fa
380 pretag hook: HG_HOOKNAME=pretag
380 pretag hook: HG_HOOKNAME=pretag
381 HG_HOOKTYPE=pretag
381 HG_HOOKTYPE=pretag
382 HG_LOCAL=0
382 HG_LOCAL=0
383 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
383 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
384 HG_TAG=fa
384 HG_TAG=fa
385
385
386 pretag.forbid hook: HG_HOOKNAME=pretag.forbid
386 pretag.forbid hook: HG_HOOKNAME=pretag.forbid
387 HG_HOOKTYPE=pretag
387 HG_HOOKTYPE=pretag
388 HG_LOCAL=0
388 HG_LOCAL=0
389 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
389 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
390 HG_TAG=fa
390 HG_TAG=fa
391
391
392 abort: pretag.forbid hook exited with status 1
392 abort: pretag.forbid hook exited with status 1
393 [40]
393 [40]
394 $ hg tag -l fla
394 $ hg tag -l fla
395 pretag hook: HG_HOOKNAME=pretag
395 pretag hook: HG_HOOKNAME=pretag
396 HG_HOOKTYPE=pretag
396 HG_HOOKTYPE=pretag
397 HG_LOCAL=1
397 HG_LOCAL=1
398 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
398 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
399 HG_TAG=fla
399 HG_TAG=fla
400
400
401 pretag.forbid hook: HG_HOOKNAME=pretag.forbid
401 pretag.forbid hook: HG_HOOKNAME=pretag.forbid
402 HG_HOOKTYPE=pretag
402 HG_HOOKTYPE=pretag
403 HG_LOCAL=1
403 HG_LOCAL=1
404 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
404 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
405 HG_TAG=fla
405 HG_TAG=fla
406
406
407 abort: pretag.forbid hook exited with status 1
407 abort: pretag.forbid hook exited with status 1
408 [40]
408 [40]
409
409
410 pretxncommit hook can see changeset, can roll back txn, changeset no
410 pretxncommit hook can see changeset, can roll back txn, changeset no
411 more there after
411 more there after
412
412
413 $ cat >> .hg/hgrc <<EOF
413 $ cat >> .hg/hgrc <<EOF
414 > pretxncommit.forbid0 = sh -c "hg tip -q"
414 > pretxncommit.forbid0 = sh -c "hg tip -q"
415 > pretxncommit.forbid1 = sh -c "printenv.py --line pretxncommit.forbid 1"
415 > pretxncommit.forbid1 = sh -c "printenv.py --line pretxncommit.forbid 1"
416 > EOF
416 > EOF
417 $ echo z > z
417 $ echo z > z
418 $ hg add z
418 $ hg add z
419 $ hg -q tip
419 $ hg -q tip
420 4:539e4b31b6dc
420 4:539e4b31b6dc
421 $ hg commit -m 'fail' -d '4 0'
421 $ hg commit -m 'fail' -d '4 0'
422 precommit hook: HG_HOOKNAME=precommit
422 precommit hook: HG_HOOKNAME=precommit
423 HG_HOOKTYPE=precommit
423 HG_HOOKTYPE=precommit
424 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
424 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
425
425
426 pretxnopen hook: HG_HOOKNAME=pretxnopen
426 pretxnopen hook: HG_HOOKNAME=pretxnopen
427 HG_HOOKTYPE=pretxnopen
427 HG_HOOKTYPE=pretxnopen
428 HG_TXNID=TXN:$ID$
428 HG_TXNID=TXN:$ID$
429 HG_TXNNAME=commit
429 HG_TXNNAME=commit
430
430
431 pretxncommit hook: HG_HOOKNAME=pretxncommit
431 pretxncommit hook: HG_HOOKNAME=pretxncommit
432 HG_HOOKTYPE=pretxncommit
432 HG_HOOKTYPE=pretxncommit
433 HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567
433 HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567
434 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
434 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
435 HG_PENDING=$TESTTMP/a
435 HG_PENDING=$TESTTMP/a
436
436
437 5:6f611f8018c1
437 5:6f611f8018c1
438 5:6f611f8018c1
438 5:6f611f8018c1
439 pretxncommit.forbid hook: HG_HOOKNAME=pretxncommit.forbid1
439 pretxncommit.forbid hook: HG_HOOKNAME=pretxncommit.forbid1
440 HG_HOOKTYPE=pretxncommit
440 HG_HOOKTYPE=pretxncommit
441 HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567
441 HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567
442 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
442 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
443 HG_PENDING=$TESTTMP/a
443 HG_PENDING=$TESTTMP/a
444
444
445 transaction abort!
445 transaction abort!
446 txnabort Python hook: changes,txnid,txnname
446 txnabort Python hook: changes,txnid,txnname
447 txnabort hook: HG_HOOKNAME=txnabort.1
447 txnabort hook: HG_HOOKNAME=txnabort.1
448 HG_HOOKTYPE=txnabort
448 HG_HOOKTYPE=txnabort
449 HG_TXNID=TXN:$ID$
449 HG_TXNID=TXN:$ID$
450 HG_TXNNAME=commit
450 HG_TXNNAME=commit
451
451
452 rollback completed
452 rollback completed
453 abort: pretxncommit.forbid1 hook exited with status 1
453 abort: pretxncommit.forbid1 hook exited with status 1
454 [40]
454 [40]
455 $ hg -q tip
455 $ hg -q tip
456 4:539e4b31b6dc
456 4:539e4b31b6dc
457
457
458 (Check that no 'changelog.i.a' file were left behind)
458 (Check that no 'changelog.i.a' file were left behind)
459
459
460 $ ls -1 .hg/store/
460 $ ls -1 .hg/store/
461 00changelog.i
461 00changelog.i
462 00manifest.i
462 00manifest.i
463 data
463 data
464 fncache (repofncache !)
464 fncache (repofncache !)
465 journal.phaseroots
466 phaseroots
465 phaseroots
467 requires
466 requires
468 undo
467 undo
469 undo.backup.fncache (repofncache !)
468 undo.backup.fncache (repofncache !)
470 undo.backupfiles
469 undo.backupfiles
471 undo.phaseroots
472
470
473
471
474 precommit hook can prevent commit
472 precommit hook can prevent commit
475
473
476 $ cat >> .hg/hgrc <<EOF
474 $ cat >> .hg/hgrc <<EOF
477 > precommit.forbid = sh -c "printenv.py --line precommit.forbid 1"
475 > precommit.forbid = sh -c "printenv.py --line precommit.forbid 1"
478 > EOF
476 > EOF
479 $ hg commit -m 'fail' -d '4 0'
477 $ hg commit -m 'fail' -d '4 0'
480 precommit hook: HG_HOOKNAME=precommit
478 precommit hook: HG_HOOKNAME=precommit
481 HG_HOOKTYPE=precommit
479 HG_HOOKTYPE=precommit
482 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
480 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
483
481
484 precommit.forbid hook: HG_HOOKNAME=precommit.forbid
482 precommit.forbid hook: HG_HOOKNAME=precommit.forbid
485 HG_HOOKTYPE=precommit
483 HG_HOOKTYPE=precommit
486 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
484 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
487
485
488 abort: precommit.forbid hook exited with status 1
486 abort: precommit.forbid hook exited with status 1
489 [40]
487 [40]
490 $ hg -q tip
488 $ hg -q tip
491 4:539e4b31b6dc
489 4:539e4b31b6dc
492
490
493 preupdate hook can prevent update
491 preupdate hook can prevent update
494
492
495 $ cat >> .hg/hgrc <<EOF
493 $ cat >> .hg/hgrc <<EOF
496 > preupdate = sh -c "printenv.py --line preupdate"
494 > preupdate = sh -c "printenv.py --line preupdate"
497 > EOF
495 > EOF
498 $ hg update 1
496 $ hg update 1
499 preupdate hook: HG_HOOKNAME=preupdate
497 preupdate hook: HG_HOOKNAME=preupdate
500 HG_HOOKTYPE=preupdate
498 HG_HOOKTYPE=preupdate
501 HG_PARENT1=ab228980c14d
499 HG_PARENT1=ab228980c14d
502
500
503 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
501 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
504
502
505 update hook
503 update hook
506
504
507 $ cat >> .hg/hgrc <<EOF
505 $ cat >> .hg/hgrc <<EOF
508 > update = sh -c "printenv.py --line update"
506 > update = sh -c "printenv.py --line update"
509 > EOF
507 > EOF
510 $ hg update
508 $ hg update
511 preupdate hook: HG_HOOKNAME=preupdate
509 preupdate hook: HG_HOOKNAME=preupdate
512 HG_HOOKTYPE=preupdate
510 HG_HOOKTYPE=preupdate
513 HG_PARENT1=539e4b31b6dc
511 HG_PARENT1=539e4b31b6dc
514
512
515 update hook: HG_ERROR=0
513 update hook: HG_ERROR=0
516 HG_HOOKNAME=update
514 HG_HOOKNAME=update
517 HG_HOOKTYPE=update
515 HG_HOOKTYPE=update
518 HG_PARENT1=539e4b31b6dc
516 HG_PARENT1=539e4b31b6dc
519
517
520 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
518 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
521
519
522 pushkey hook
520 pushkey hook
523
521
524 $ cat >> .hg/hgrc <<EOF
522 $ cat >> .hg/hgrc <<EOF
525 > pushkey = sh -c "printenv.py --line pushkey"
523 > pushkey = sh -c "printenv.py --line pushkey"
526 > EOF
524 > EOF
527 $ cd ../b
525 $ cd ../b
528 $ hg bookmark -r null foo
526 $ hg bookmark -r null foo
529 $ hg push -B foo ../a
527 $ hg push -B foo ../a
530 pushing to ../a
528 pushing to ../a
531 searching for changes
529 searching for changes
532 no changes found
530 no changes found
533 pretxnopen hook: HG_HOOKNAME=pretxnopen
531 pretxnopen hook: HG_HOOKNAME=pretxnopen
534 HG_HOOKTYPE=pretxnopen
532 HG_HOOKTYPE=pretxnopen
535 HG_TXNID=TXN:$ID$
533 HG_TXNID=TXN:$ID$
536 HG_TXNNAME=push
534 HG_TXNNAME=push
537
535
538 pretxnclose hook: HG_BOOKMARK_MOVED=1
536 pretxnclose hook: HG_BOOKMARK_MOVED=1
539 HG_BUNDLE2=1
537 HG_BUNDLE2=1
540 HG_HOOKNAME=pretxnclose
538 HG_HOOKNAME=pretxnclose
541 HG_HOOKTYPE=pretxnclose
539 HG_HOOKTYPE=pretxnclose
542 HG_PENDING=$TESTTMP/a
540 HG_PENDING=$TESTTMP/a
543 HG_SOURCE=push
541 HG_SOURCE=push
544 HG_TXNID=TXN:$ID$
542 HG_TXNID=TXN:$ID$
545 HG_TXNNAME=push
543 HG_TXNNAME=push
546 HG_URL=file:$TESTTMP/a
544 HG_URL=file:$TESTTMP/a
547
545
548 pushkey hook: HG_BUNDLE2=1
546 pushkey hook: HG_BUNDLE2=1
549 HG_HOOKNAME=pushkey
547 HG_HOOKNAME=pushkey
550 HG_HOOKTYPE=pushkey
548 HG_HOOKTYPE=pushkey
551 HG_KEY=foo
549 HG_KEY=foo
552 HG_NAMESPACE=bookmarks
550 HG_NAMESPACE=bookmarks
553 HG_NEW=0000000000000000000000000000000000000000
551 HG_NEW=0000000000000000000000000000000000000000
554 HG_PUSHKEYCOMPAT=1
552 HG_PUSHKEYCOMPAT=1
555 HG_SOURCE=push
553 HG_SOURCE=push
556 HG_TXNID=TXN:$ID$
554 HG_TXNID=TXN:$ID$
557 HG_TXNNAME=push
555 HG_TXNNAME=push
558 HG_URL=file:$TESTTMP/a
556 HG_URL=file:$TESTTMP/a
559
557
560 txnclose hook: HG_BOOKMARK_MOVED=1
558 txnclose hook: HG_BOOKMARK_MOVED=1
561 HG_BUNDLE2=1
559 HG_BUNDLE2=1
562 HG_HOOKNAME=txnclose
560 HG_HOOKNAME=txnclose
563 HG_HOOKTYPE=txnclose
561 HG_HOOKTYPE=txnclose
564 HG_SOURCE=push
562 HG_SOURCE=push
565 HG_TXNID=TXN:$ID$
563 HG_TXNID=TXN:$ID$
566 HG_TXNNAME=push
564 HG_TXNNAME=push
567 HG_URL=file:$TESTTMP/a
565 HG_URL=file:$TESTTMP/a
568
566
569 exporting bookmark foo
567 exporting bookmark foo
570 [1]
568 [1]
571 $ cd ../a
569 $ cd ../a
572
570
573 listkeys hook
571 listkeys hook
574
572
575 $ cat >> .hg/hgrc <<EOF
573 $ cat >> .hg/hgrc <<EOF
576 > listkeys = sh -c "printenv.py --line listkeys"
574 > listkeys = sh -c "printenv.py --line listkeys"
577 > EOF
575 > EOF
578 $ hg bookmark -r null bar
576 $ hg bookmark -r null bar
579 pretxnopen hook: HG_HOOKNAME=pretxnopen
577 pretxnopen hook: HG_HOOKNAME=pretxnopen
580 HG_HOOKTYPE=pretxnopen
578 HG_HOOKTYPE=pretxnopen
581 HG_TXNID=TXN:$ID$
579 HG_TXNID=TXN:$ID$
582 HG_TXNNAME=bookmark
580 HG_TXNNAME=bookmark
583
581
584 pretxnclose hook: HG_BOOKMARK_MOVED=1
582 pretxnclose hook: HG_BOOKMARK_MOVED=1
585 HG_HOOKNAME=pretxnclose
583 HG_HOOKNAME=pretxnclose
586 HG_HOOKTYPE=pretxnclose
584 HG_HOOKTYPE=pretxnclose
587 HG_PENDING=$TESTTMP/a
585 HG_PENDING=$TESTTMP/a
588 HG_TXNID=TXN:$ID$
586 HG_TXNID=TXN:$ID$
589 HG_TXNNAME=bookmark
587 HG_TXNNAME=bookmark
590
588
591 txnclose hook: HG_BOOKMARK_MOVED=1
589 txnclose hook: HG_BOOKMARK_MOVED=1
592 HG_HOOKNAME=txnclose
590 HG_HOOKNAME=txnclose
593 HG_HOOKTYPE=txnclose
591 HG_HOOKTYPE=txnclose
594 HG_TXNID=TXN:$ID$
592 HG_TXNID=TXN:$ID$
595 HG_TXNNAME=bookmark
593 HG_TXNNAME=bookmark
596
594
597 $ cd ../b
595 $ cd ../b
598 $ hg pull -B bar ../a
596 $ hg pull -B bar ../a
599 pulling from ../a
597 pulling from ../a
600 listkeys hook: HG_HOOKNAME=listkeys
598 listkeys hook: HG_HOOKNAME=listkeys
601 HG_HOOKTYPE=listkeys
599 HG_HOOKTYPE=listkeys
602 HG_NAMESPACE=bookmarks
600 HG_NAMESPACE=bookmarks
603 HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
601 HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
604
602
605 no changes found
603 no changes found
606 adding remote bookmark bar
604 adding remote bookmark bar
607 $ cd ../a
605 $ cd ../a
608
606
609 test that prepushkey can prevent incoming keys
607 test that prepushkey can prevent incoming keys
610
608
611 $ cat >> .hg/hgrc <<EOF
609 $ cat >> .hg/hgrc <<EOF
612 > prepushkey = sh -c "printenv.py --line prepushkey.forbid 1"
610 > prepushkey = sh -c "printenv.py --line prepushkey.forbid 1"
613 > EOF
611 > EOF
614 $ cd ../b
612 $ cd ../b
615 $ hg bookmark -r null baz
613 $ hg bookmark -r null baz
616 $ hg push -B baz ../a
614 $ hg push -B baz ../a
617 pushing to ../a
615 pushing to ../a
618 searching for changes
616 searching for changes
619 listkeys hook: HG_HOOKNAME=listkeys
617 listkeys hook: HG_HOOKNAME=listkeys
620 HG_HOOKTYPE=listkeys
618 HG_HOOKTYPE=listkeys
621 HG_NAMESPACE=phases
619 HG_NAMESPACE=phases
622 HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
620 HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
623
621
624 listkeys hook: HG_HOOKNAME=listkeys
622 listkeys hook: HG_HOOKNAME=listkeys
625 HG_HOOKTYPE=listkeys
623 HG_HOOKTYPE=listkeys
626 HG_NAMESPACE=bookmarks
624 HG_NAMESPACE=bookmarks
627 HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
625 HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
628
626
629 no changes found
627 no changes found
630 pretxnopen hook: HG_HOOKNAME=pretxnopen
628 pretxnopen hook: HG_HOOKNAME=pretxnopen
631 HG_HOOKTYPE=pretxnopen
629 HG_HOOKTYPE=pretxnopen
632 HG_TXNID=TXN:$ID$
630 HG_TXNID=TXN:$ID$
633 HG_TXNNAME=push
631 HG_TXNNAME=push
634
632
635 prepushkey.forbid hook: HG_BUNDLE2=1
633 prepushkey.forbid hook: HG_BUNDLE2=1
636 HG_HOOKNAME=prepushkey
634 HG_HOOKNAME=prepushkey
637 HG_HOOKTYPE=prepushkey
635 HG_HOOKTYPE=prepushkey
638 HG_KEY=baz
636 HG_KEY=baz
639 HG_NAMESPACE=bookmarks
637 HG_NAMESPACE=bookmarks
640 HG_NEW=0000000000000000000000000000000000000000
638 HG_NEW=0000000000000000000000000000000000000000
641 HG_PUSHKEYCOMPAT=1
639 HG_PUSHKEYCOMPAT=1
642 HG_SOURCE=push
640 HG_SOURCE=push
643 HG_TXNID=TXN:$ID$
641 HG_TXNID=TXN:$ID$
644 HG_TXNNAME=push
642 HG_TXNNAME=push
645 HG_URL=file:$TESTTMP/a
643 HG_URL=file:$TESTTMP/a
646
644
647 txnabort Python hook: bundle2,changes,source,txnid,txnname,url
645 txnabort Python hook: bundle2,changes,source,txnid,txnname,url
648 txnabort hook: HG_BUNDLE2=1
646 txnabort hook: HG_BUNDLE2=1
649 HG_HOOKNAME=txnabort.1
647 HG_HOOKNAME=txnabort.1
650 HG_HOOKTYPE=txnabort
648 HG_HOOKTYPE=txnabort
651 HG_SOURCE=push
649 HG_SOURCE=push
652 HG_TXNID=TXN:$ID$
650 HG_TXNID=TXN:$ID$
653 HG_TXNNAME=push
651 HG_TXNNAME=push
654 HG_URL=file:$TESTTMP/a
652 HG_URL=file:$TESTTMP/a
655
653
656 abort: prepushkey hook exited with status 1
654 abort: prepushkey hook exited with status 1
657 [40]
655 [40]
658 $ cd ../a
656 $ cd ../a
659
657
660 test that prelistkeys can prevent listing keys
658 test that prelistkeys can prevent listing keys
661
659
662 $ cat >> .hg/hgrc <<EOF
660 $ cat >> .hg/hgrc <<EOF
663 > prelistkeys = sh -c "printenv.py --line prelistkeys.forbid 1"
661 > prelistkeys = sh -c "printenv.py --line prelistkeys.forbid 1"
664 > EOF
662 > EOF
665 $ hg bookmark -r null quux
663 $ hg bookmark -r null quux
666 pretxnopen hook: HG_HOOKNAME=pretxnopen
664 pretxnopen hook: HG_HOOKNAME=pretxnopen
667 HG_HOOKTYPE=pretxnopen
665 HG_HOOKTYPE=pretxnopen
668 HG_TXNID=TXN:$ID$
666 HG_TXNID=TXN:$ID$
669 HG_TXNNAME=bookmark
667 HG_TXNNAME=bookmark
670
668
671 pretxnclose hook: HG_BOOKMARK_MOVED=1
669 pretxnclose hook: HG_BOOKMARK_MOVED=1
672 HG_HOOKNAME=pretxnclose
670 HG_HOOKNAME=pretxnclose
673 HG_HOOKTYPE=pretxnclose
671 HG_HOOKTYPE=pretxnclose
674 HG_PENDING=$TESTTMP/a
672 HG_PENDING=$TESTTMP/a
675 HG_TXNID=TXN:$ID$
673 HG_TXNID=TXN:$ID$
676 HG_TXNNAME=bookmark
674 HG_TXNNAME=bookmark
677
675
678 txnclose hook: HG_BOOKMARK_MOVED=1
676 txnclose hook: HG_BOOKMARK_MOVED=1
679 HG_HOOKNAME=txnclose
677 HG_HOOKNAME=txnclose
680 HG_HOOKTYPE=txnclose
678 HG_HOOKTYPE=txnclose
681 HG_TXNID=TXN:$ID$
679 HG_TXNID=TXN:$ID$
682 HG_TXNNAME=bookmark
680 HG_TXNNAME=bookmark
683
681
684 $ cd ../b
682 $ cd ../b
685 $ hg pull -B quux ../a
683 $ hg pull -B quux ../a
686 pulling from ../a
684 pulling from ../a
687 prelistkeys.forbid hook: HG_HOOKNAME=prelistkeys
685 prelistkeys.forbid hook: HG_HOOKNAME=prelistkeys
688 HG_HOOKTYPE=prelistkeys
686 HG_HOOKTYPE=prelistkeys
689 HG_NAMESPACE=bookmarks
687 HG_NAMESPACE=bookmarks
690
688
691 abort: prelistkeys hook exited with status 1
689 abort: prelistkeys hook exited with status 1
692 [40]
690 [40]
693 $ cd ../a
691 $ cd ../a
694 $ rm .hg/hgrc
692 $ rm .hg/hgrc
695
693
696 prechangegroup hook can prevent incoming changes
694 prechangegroup hook can prevent incoming changes
697
695
698 $ cd ../b
696 $ cd ../b
699 $ hg -q tip
697 $ hg -q tip
700 3:07f3376c1e65
698 3:07f3376c1e65
701 $ cat > .hg/hgrc <<EOF
699 $ cat > .hg/hgrc <<EOF
702 > [hooks]
700 > [hooks]
703 > prechangegroup.forbid = sh -c "printenv.py --line prechangegroup.forbid 1"
701 > prechangegroup.forbid = sh -c "printenv.py --line prechangegroup.forbid 1"
704 > EOF
702 > EOF
705 $ hg pull ../a
703 $ hg pull ../a
706 pulling from ../a
704 pulling from ../a
707 searching for changes
705 searching for changes
708 prechangegroup.forbid hook: HG_HOOKNAME=prechangegroup.forbid
706 prechangegroup.forbid hook: HG_HOOKNAME=prechangegroup.forbid
709 HG_HOOKTYPE=prechangegroup
707 HG_HOOKTYPE=prechangegroup
710 HG_SOURCE=pull
708 HG_SOURCE=pull
711 HG_TXNID=TXN:$ID$
709 HG_TXNID=TXN:$ID$
712 HG_TXNNAME=pull
710 HG_TXNNAME=pull
713 file:/*/$TESTTMP/a (glob)
711 file:/*/$TESTTMP/a (glob)
714 HG_URL=file:$TESTTMP/a
712 HG_URL=file:$TESTTMP/a
715
713
716 abort: prechangegroup.forbid hook exited with status 1
714 abort: prechangegroup.forbid hook exited with status 1
717 [40]
715 [40]
718
716
719 pretxnchangegroup hook can see incoming changes, can roll back txn,
717 pretxnchangegroup hook can see incoming changes, can roll back txn,
720 incoming changes no longer there after
718 incoming changes no longer there after
721
719
722 $ cat > .hg/hgrc <<EOF
720 $ cat > .hg/hgrc <<EOF
723 > [hooks]
721 > [hooks]
724 > pretxnchangegroup.forbid0 = hg tip -q
722 > pretxnchangegroup.forbid0 = hg tip -q
725 > pretxnchangegroup.forbid1 = sh -c "printenv.py --line pretxnchangegroup.forbid 1"
723 > pretxnchangegroup.forbid1 = sh -c "printenv.py --line pretxnchangegroup.forbid 1"
726 > EOF
724 > EOF
727 $ hg pull ../a
725 $ hg pull ../a
728 pulling from ../a
726 pulling from ../a
729 searching for changes
727 searching for changes
730 adding changesets
728 adding changesets
731 adding manifests
729 adding manifests
732 adding file changes
730 adding file changes
733 4:539e4b31b6dc
731 4:539e4b31b6dc
734 pretxnchangegroup.forbid hook: HG_HOOKNAME=pretxnchangegroup.forbid1
732 pretxnchangegroup.forbid hook: HG_HOOKNAME=pretxnchangegroup.forbid1
735 HG_HOOKTYPE=pretxnchangegroup
733 HG_HOOKTYPE=pretxnchangegroup
736 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
734 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
737 HG_NODE_LAST=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
735 HG_NODE_LAST=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
738 HG_PENDING=$TESTTMP/b
736 HG_PENDING=$TESTTMP/b
739 HG_SOURCE=pull
737 HG_SOURCE=pull
740 HG_TXNID=TXN:$ID$
738 HG_TXNID=TXN:$ID$
741 HG_TXNNAME=pull
739 HG_TXNNAME=pull
742 file:/*/$TESTTMP/a (glob)
740 file:/*/$TESTTMP/a (glob)
743 HG_URL=file:$TESTTMP/a
741 HG_URL=file:$TESTTMP/a
744
742
745 transaction abort!
743 transaction abort!
746 rollback completed
744 rollback completed
747 abort: pretxnchangegroup.forbid1 hook exited with status 1
745 abort: pretxnchangegroup.forbid1 hook exited with status 1
748 [40]
746 [40]
749 $ hg -q tip
747 $ hg -q tip
750 3:07f3376c1e65
748 3:07f3376c1e65
751
749
752 outgoing hooks can see env vars
750 outgoing hooks can see env vars
753
751
754 $ rm .hg/hgrc
752 $ rm .hg/hgrc
755 $ cat > ../a/.hg/hgrc <<EOF
753 $ cat > ../a/.hg/hgrc <<EOF
756 > [hooks]
754 > [hooks]
757 > preoutgoing = sh -c "printenv.py --line preoutgoing"
755 > preoutgoing = sh -c "printenv.py --line preoutgoing"
758 > outgoing = sh -c "printenv.py --line outgoing"
756 > outgoing = sh -c "printenv.py --line outgoing"
759 > EOF
757 > EOF
760 $ hg pull ../a
758 $ hg pull ../a
761 pulling from ../a
759 pulling from ../a
762 searching for changes
760 searching for changes
763 preoutgoing hook: HG_HOOKNAME=preoutgoing
761 preoutgoing hook: HG_HOOKNAME=preoutgoing
764 HG_HOOKTYPE=preoutgoing
762 HG_HOOKTYPE=preoutgoing
765 HG_SOURCE=pull
763 HG_SOURCE=pull
766
764
767 outgoing hook: HG_HOOKNAME=outgoing
765 outgoing hook: HG_HOOKNAME=outgoing
768 HG_HOOKTYPE=outgoing
766 HG_HOOKTYPE=outgoing
769 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
767 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
770 HG_SOURCE=pull
768 HG_SOURCE=pull
771
769
772 adding changesets
770 adding changesets
773 adding manifests
771 adding manifests
774 adding file changes
772 adding file changes
775 adding remote bookmark quux
773 adding remote bookmark quux
776 added 1 changesets with 1 changes to 1 files
774 added 1 changesets with 1 changes to 1 files
777 new changesets 539e4b31b6dc
775 new changesets 539e4b31b6dc
778 (run 'hg update' to get a working copy)
776 (run 'hg update' to get a working copy)
779 $ hg rollback
777 $ hg rollback
780 repository tip rolled back to revision 3 (undo pull)
778 repository tip rolled back to revision 3 (undo pull)
781
779
782 preoutgoing hook can prevent outgoing changes
780 preoutgoing hook can prevent outgoing changes
783
781
784 $ cat >> ../a/.hg/hgrc <<EOF
782 $ cat >> ../a/.hg/hgrc <<EOF
785 > preoutgoing.forbid = sh -c "printenv.py --line preoutgoing.forbid 1"
783 > preoutgoing.forbid = sh -c "printenv.py --line preoutgoing.forbid 1"
786 > EOF
784 > EOF
787 $ hg pull ../a
785 $ hg pull ../a
788 pulling from ../a
786 pulling from ../a
789 searching for changes
787 searching for changes
790 preoutgoing hook: HG_HOOKNAME=preoutgoing
788 preoutgoing hook: HG_HOOKNAME=preoutgoing
791 HG_HOOKTYPE=preoutgoing
789 HG_HOOKTYPE=preoutgoing
792 HG_SOURCE=pull
790 HG_SOURCE=pull
793
791
794 preoutgoing.forbid hook: HG_HOOKNAME=preoutgoing.forbid
792 preoutgoing.forbid hook: HG_HOOKNAME=preoutgoing.forbid
795 HG_HOOKTYPE=preoutgoing
793 HG_HOOKTYPE=preoutgoing
796 HG_SOURCE=pull
794 HG_SOURCE=pull
797
795
798 abort: preoutgoing.forbid hook exited with status 1
796 abort: preoutgoing.forbid hook exited with status 1
799 [40]
797 [40]
800
798
801 outgoing hooks work for local clones
799 outgoing hooks work for local clones
802
800
803 $ cd ..
801 $ cd ..
804 $ cat > a/.hg/hgrc <<EOF
802 $ cat > a/.hg/hgrc <<EOF
805 > [hooks]
803 > [hooks]
806 > preoutgoing = sh -c "printenv.py --line preoutgoing"
804 > preoutgoing = sh -c "printenv.py --line preoutgoing"
807 > outgoing = sh -c "printenv.py --line outgoing"
805 > outgoing = sh -c "printenv.py --line outgoing"
808 > EOF
806 > EOF
809 $ hg clone a c
807 $ hg clone a c
810 preoutgoing hook: HG_HOOKNAME=preoutgoing
808 preoutgoing hook: HG_HOOKNAME=preoutgoing
811 HG_HOOKTYPE=preoutgoing
809 HG_HOOKTYPE=preoutgoing
812 HG_SOURCE=clone
810 HG_SOURCE=clone
813
811
814 outgoing hook: HG_HOOKNAME=outgoing
812 outgoing hook: HG_HOOKNAME=outgoing
815 HG_HOOKTYPE=outgoing
813 HG_HOOKTYPE=outgoing
816 HG_NODE=0000000000000000000000000000000000000000
814 HG_NODE=0000000000000000000000000000000000000000
817 HG_SOURCE=clone
815 HG_SOURCE=clone
818
816
819 updating to branch default
817 updating to branch default
820 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
818 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
821 $ rm -rf c
819 $ rm -rf c
822
820
823 preoutgoing hook can prevent outgoing changes for local clones
821 preoutgoing hook can prevent outgoing changes for local clones
824
822
825 $ cat >> a/.hg/hgrc <<EOF
823 $ cat >> a/.hg/hgrc <<EOF
826 > preoutgoing.forbid = sh -c "printenv.py --line preoutgoing.forbid 1"
824 > preoutgoing.forbid = sh -c "printenv.py --line preoutgoing.forbid 1"
827 > EOF
825 > EOF
828 $ hg clone a zzz
826 $ hg clone a zzz
829 preoutgoing hook: HG_HOOKNAME=preoutgoing
827 preoutgoing hook: HG_HOOKNAME=preoutgoing
830 HG_HOOKTYPE=preoutgoing
828 HG_HOOKTYPE=preoutgoing
831 HG_SOURCE=clone
829 HG_SOURCE=clone
832
830
833 preoutgoing.forbid hook: HG_HOOKNAME=preoutgoing.forbid
831 preoutgoing.forbid hook: HG_HOOKNAME=preoutgoing.forbid
834 HG_HOOKTYPE=preoutgoing
832 HG_HOOKTYPE=preoutgoing
835 HG_SOURCE=clone
833 HG_SOURCE=clone
836
834
837 abort: preoutgoing.forbid hook exited with status 1
835 abort: preoutgoing.forbid hook exited with status 1
838 [40]
836 [40]
839
837
840 $ cd "$TESTTMP/b"
838 $ cd "$TESTTMP/b"
841
839
842 $ cat > hooktests.py <<EOF
840 $ cat > hooktests.py <<EOF
843 > from mercurial import (
841 > from mercurial import (
844 > error,
842 > error,
845 > pycompat,
843 > pycompat,
846 > )
844 > )
847 >
845 >
848 > uncallable = 0
846 > uncallable = 0
849 >
847 >
850 > def printargs(ui, args):
848 > def printargs(ui, args):
851 > a = list(pycompat.byteskwargs(args).items())
849 > a = list(pycompat.byteskwargs(args).items())
852 > a.sort()
850 > a.sort()
853 > ui.write(b'hook args:\n')
851 > ui.write(b'hook args:\n')
854 > for k, v in a:
852 > for k, v in a:
855 > ui.write(b' %s %s\n' % (k, v))
853 > ui.write(b' %s %s\n' % (k, v))
856 >
854 >
857 > def passhook(ui, repo, **args):
855 > def passhook(ui, repo, **args):
858 > printargs(ui, args)
856 > printargs(ui, args)
859 >
857 >
860 > def failhook(ui, repo, **args):
858 > def failhook(ui, repo, **args):
861 > printargs(ui, args)
859 > printargs(ui, args)
862 > return True
860 > return True
863 >
861 >
864 > class LocalException(Exception):
862 > class LocalException(Exception):
865 > pass
863 > pass
866 >
864 >
867 > def raisehook(**args):
865 > def raisehook(**args):
868 > raise LocalException('exception from hook')
866 > raise LocalException('exception from hook')
869 >
867 >
870 > def aborthook(**args):
868 > def aborthook(**args):
871 > raise error.Abort(b'raise abort from hook')
869 > raise error.Abort(b'raise abort from hook')
872 >
870 >
873 > def brokenhook(**args):
871 > def brokenhook(**args):
874 > return 1 + {}
872 > return 1 + {}
875 >
873 >
876 > def verbosehook(ui, **args):
874 > def verbosehook(ui, **args):
877 > ui.note(b'verbose output from hook\n')
875 > ui.note(b'verbose output from hook\n')
878 >
876 >
879 > def printtags(ui, repo, **args):
877 > def printtags(ui, repo, **args):
880 > ui.write(b'[%s]\n' % b', '.join(sorted(repo.tags())))
878 > ui.write(b'[%s]\n' % b', '.join(sorted(repo.tags())))
881 >
879 >
882 > class container(object):
880 > class container(object):
883 > unreachable = 1
881 > unreachable = 1
884 > EOF
882 > EOF
885
883
886 $ cat > syntaxerror.py << NO_CHECK_EOF
884 $ cat > syntaxerror.py << NO_CHECK_EOF
887 > (foo
885 > (foo
888 > NO_CHECK_EOF
886 > NO_CHECK_EOF
889
887
890 test python hooks
888 test python hooks
891
889
892 #if windows
890 #if windows
893 $ PYTHONPATH="$TESTTMP/b;$PYTHONPATH"
891 $ PYTHONPATH="$TESTTMP/b;$PYTHONPATH"
894 #else
892 #else
895 $ PYTHONPATH="$TESTTMP/b:$PYTHONPATH"
893 $ PYTHONPATH="$TESTTMP/b:$PYTHONPATH"
896 #endif
894 #endif
897 $ export PYTHONPATH
895 $ export PYTHONPATH
898
896
899 $ echo '[hooks]' > ../a/.hg/hgrc
897 $ echo '[hooks]' > ../a/.hg/hgrc
900 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
898 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
901 $ hg pull ../a 2>&1 | grep 'raised an exception'
899 $ hg pull ../a 2>&1 | grep 'raised an exception'
902 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
900 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
903
901
904 $ echo '[hooks]' > ../a/.hg/hgrc
902 $ echo '[hooks]' > ../a/.hg/hgrc
905 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
903 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
906 $ hg pull ../a 2>&1 | grep 'raised an exception'
904 $ hg pull ../a 2>&1 | grep 'raised an exception'
907 error: preoutgoing.raise hook raised an exception: exception from hook
905 error: preoutgoing.raise hook raised an exception: exception from hook
908
906
909 $ echo '[hooks]' > ../a/.hg/hgrc
907 $ echo '[hooks]' > ../a/.hg/hgrc
910 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
908 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
911 $ hg pull ../a
909 $ hg pull ../a
912 pulling from ../a
910 pulling from ../a
913 searching for changes
911 searching for changes
914 error: preoutgoing.abort hook failed: raise abort from hook
912 error: preoutgoing.abort hook failed: raise abort from hook
915 abort: raise abort from hook
913 abort: raise abort from hook
916 [255]
914 [255]
917
915
918 $ echo '[hooks]' > ../a/.hg/hgrc
916 $ echo '[hooks]' > ../a/.hg/hgrc
919 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
917 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
920 $ hg pull ../a
918 $ hg pull ../a
921 pulling from ../a
919 pulling from ../a
922 searching for changes
920 searching for changes
923 hook args:
921 hook args:
924 hooktype preoutgoing
922 hooktype preoutgoing
925 source pull
923 source pull
926 abort: preoutgoing.fail hook failed
924 abort: preoutgoing.fail hook failed
927 [40]
925 [40]
928
926
929 $ echo '[hooks]' > ../a/.hg/hgrc
927 $ echo '[hooks]' > ../a/.hg/hgrc
930 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
928 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
931 $ hg pull ../a
929 $ hg pull ../a
932 pulling from ../a
930 pulling from ../a
933 searching for changes
931 searching for changes
934 abort: preoutgoing.uncallable hook is invalid: "hooktests.uncallable" is not callable
932 abort: preoutgoing.uncallable hook is invalid: "hooktests.uncallable" is not callable
935 [255]
933 [255]
936
934
937 $ echo '[hooks]' > ../a/.hg/hgrc
935 $ echo '[hooks]' > ../a/.hg/hgrc
938 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
936 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
939 $ hg pull ../a
937 $ hg pull ../a
940 pulling from ../a
938 pulling from ../a
941 searching for changes
939 searching for changes
942 abort: preoutgoing.nohook hook is invalid: "hooktests.nohook" is not defined
940 abort: preoutgoing.nohook hook is invalid: "hooktests.nohook" is not defined
943 [255]
941 [255]
944
942
945 $ echo '[hooks]' > ../a/.hg/hgrc
943 $ echo '[hooks]' > ../a/.hg/hgrc
946 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
944 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
947 $ hg pull ../a
945 $ hg pull ../a
948 pulling from ../a
946 pulling from ../a
949 searching for changes
947 searching for changes
950 abort: preoutgoing.nomodule hook is invalid: "nomodule" not in a module
948 abort: preoutgoing.nomodule hook is invalid: "nomodule" not in a module
951 [255]
949 [255]
952
950
953 $ echo '[hooks]' > ../a/.hg/hgrc
951 $ echo '[hooks]' > ../a/.hg/hgrc
954 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
952 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
955 $ hg pull ../a
953 $ hg pull ../a
956 pulling from ../a
954 pulling from ../a
957 searching for changes
955 searching for changes
958 abort: preoutgoing.badmodule hook is invalid: import of "nomodule" failed
956 abort: preoutgoing.badmodule hook is invalid: import of "nomodule" failed
959 (run with --traceback for stack trace)
957 (run with --traceback for stack trace)
960 [255]
958 [255]
961
959
962 $ echo '[hooks]' > ../a/.hg/hgrc
960 $ echo '[hooks]' > ../a/.hg/hgrc
963 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
961 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
964 $ hg pull ../a
962 $ hg pull ../a
965 pulling from ../a
963 pulling from ../a
966 searching for changes
964 searching for changes
967 abort: preoutgoing.unreachable hook is invalid: import of "hooktests.container" failed
965 abort: preoutgoing.unreachable hook is invalid: import of "hooktests.container" failed
968 (run with --traceback for stack trace)
966 (run with --traceback for stack trace)
969 [255]
967 [255]
970
968
971 $ echo '[hooks]' > ../a/.hg/hgrc
969 $ echo '[hooks]' > ../a/.hg/hgrc
972 $ echo 'preoutgoing.syntaxerror = python:syntaxerror.syntaxerror' >> ../a/.hg/hgrc
970 $ echo 'preoutgoing.syntaxerror = python:syntaxerror.syntaxerror' >> ../a/.hg/hgrc
973 $ hg pull ../a
971 $ hg pull ../a
974 pulling from ../a
972 pulling from ../a
975 searching for changes
973 searching for changes
976 abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
974 abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
977 (run with --traceback for stack trace)
975 (run with --traceback for stack trace)
978 [255]
976 [255]
979
977
980 $ hg pull ../a --traceback 2>&1 | egrep 'pulling|searching|^exception|Traceback|SyntaxError|ImportError|ModuleNotFoundError|HookLoadError|abort'
978 $ hg pull ../a --traceback 2>&1 | egrep 'pulling|searching|^exception|Traceback|SyntaxError|ImportError|ModuleNotFoundError|HookLoadError|abort'
981 pulling from ../a
979 pulling from ../a
982 searching for changes
980 searching for changes
983 exception from first failed import attempt:
981 exception from first failed import attempt:
984 Traceback (most recent call last):
982 Traceback (most recent call last):
985 SyntaxError: * (glob)
983 SyntaxError: * (glob)
986 exception from second failed import attempt:
984 exception from second failed import attempt:
987 Traceback (most recent call last):
985 Traceback (most recent call last):
988 SyntaxError: * (glob)
986 SyntaxError: * (glob)
989 Traceback (most recent call last):
987 Traceback (most recent call last):
990 ImportError: No module named 'hgext_syntaxerror' (no-py36 !)
988 ImportError: No module named 'hgext_syntaxerror' (no-py36 !)
991 ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
989 ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
992 Traceback (most recent call last):
990 Traceback (most recent call last):
993 SyntaxError: * (glob)
991 SyntaxError: * (glob)
994 Traceback (most recent call last):
992 Traceback (most recent call last):
995 ImportError: No module named 'hgext_syntaxerror' (no-py36 !)
993 ImportError: No module named 'hgext_syntaxerror' (no-py36 !)
996 ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
994 ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
997 Traceback (most recent call last):
995 Traceback (most recent call last):
998 raise error.HookLoadError( (py38 !)
996 raise error.HookLoadError( (py38 !)
999 mercurial.error.HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
997 mercurial.error.HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
1000 abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
998 abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
1001
999
1002 $ echo '[hooks]' > ../a/.hg/hgrc
1000 $ echo '[hooks]' > ../a/.hg/hgrc
1003 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
1001 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
1004 $ hg pull ../a
1002 $ hg pull ../a
1005 pulling from ../a
1003 pulling from ../a
1006 searching for changes
1004 searching for changes
1007 hook args:
1005 hook args:
1008 hooktype preoutgoing
1006 hooktype preoutgoing
1009 source pull
1007 source pull
1010 adding changesets
1008 adding changesets
1011 adding manifests
1009 adding manifests
1012 adding file changes
1010 adding file changes
1013 adding remote bookmark quux
1011 adding remote bookmark quux
1014 added 1 changesets with 1 changes to 1 files
1012 added 1 changesets with 1 changes to 1 files
1015 new changesets 539e4b31b6dc
1013 new changesets 539e4b31b6dc
1016 (run 'hg update' to get a working copy)
1014 (run 'hg update' to get a working copy)
1017
1015
1018 post- python hooks that fail to *run* don't cause an abort
1016 post- python hooks that fail to *run* don't cause an abort
1019 $ rm ../a/.hg/hgrc
1017 $ rm ../a/.hg/hgrc
1020 $ echo '[hooks]' > .hg/hgrc
1018 $ echo '[hooks]' > .hg/hgrc
1021 $ echo 'post-pull.broken = python:hooktests.brokenhook' >> .hg/hgrc
1019 $ echo 'post-pull.broken = python:hooktests.brokenhook' >> .hg/hgrc
1022 $ hg pull ../a
1020 $ hg pull ../a
1023 pulling from ../a
1021 pulling from ../a
1024 searching for changes
1022 searching for changes
1025 no changes found
1023 no changes found
1026 error: post-pull.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
1024 error: post-pull.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
1027 (run with --traceback for stack trace)
1025 (run with --traceback for stack trace)
1028
1026
1029 but post- python hooks that fail to *load* do
1027 but post- python hooks that fail to *load* do
1030 $ echo '[hooks]' > .hg/hgrc
1028 $ echo '[hooks]' > .hg/hgrc
1031 $ echo 'post-pull.nomodule = python:nomodule' >> .hg/hgrc
1029 $ echo 'post-pull.nomodule = python:nomodule' >> .hg/hgrc
1032 $ hg pull ../a
1030 $ hg pull ../a
1033 pulling from ../a
1031 pulling from ../a
1034 searching for changes
1032 searching for changes
1035 no changes found
1033 no changes found
1036 abort: post-pull.nomodule hook is invalid: "nomodule" not in a module
1034 abort: post-pull.nomodule hook is invalid: "nomodule" not in a module
1037 [255]
1035 [255]
1038
1036
1039 $ echo '[hooks]' > .hg/hgrc
1037 $ echo '[hooks]' > .hg/hgrc
1040 $ echo 'post-pull.badmodule = python:nomodule.nowhere' >> .hg/hgrc
1038 $ echo 'post-pull.badmodule = python:nomodule.nowhere' >> .hg/hgrc
1041 $ hg pull ../a
1039 $ hg pull ../a
1042 pulling from ../a
1040 pulling from ../a
1043 searching for changes
1041 searching for changes
1044 no changes found
1042 no changes found
1045 abort: post-pull.badmodule hook is invalid: import of "nomodule" failed
1043 abort: post-pull.badmodule hook is invalid: import of "nomodule" failed
1046 (run with --traceback for stack trace)
1044 (run with --traceback for stack trace)
1047 [255]
1045 [255]
1048
1046
1049 $ echo '[hooks]' > .hg/hgrc
1047 $ echo '[hooks]' > .hg/hgrc
1050 $ echo 'post-pull.nohook = python:hooktests.nohook' >> .hg/hgrc
1048 $ echo 'post-pull.nohook = python:hooktests.nohook' >> .hg/hgrc
1051 $ hg pull ../a
1049 $ hg pull ../a
1052 pulling from ../a
1050 pulling from ../a
1053 searching for changes
1051 searching for changes
1054 no changes found
1052 no changes found
1055 abort: post-pull.nohook hook is invalid: "hooktests.nohook" is not defined
1053 abort: post-pull.nohook hook is invalid: "hooktests.nohook" is not defined
1056 [255]
1054 [255]
1057
1055
1058 make sure --traceback works
1056 make sure --traceback works
1059
1057
1060 $ echo '[hooks]' > .hg/hgrc
1058 $ echo '[hooks]' > .hg/hgrc
1061 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
1059 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
1062
1060
1063 $ echo aa > a
1061 $ echo aa > a
1064 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
1062 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
1065 Traceback (most recent call last):
1063 Traceback (most recent call last):
1066
1064
1067 $ cd ..
1065 $ cd ..
1068 $ hg init c
1066 $ hg init c
1069 $ cd c
1067 $ cd c
1070
1068
1071 $ cat > hookext.py <<EOF
1069 $ cat > hookext.py <<EOF
1072 > def autohook(ui, **args):
1070 > def autohook(ui, **args):
1073 > ui.write(b'Automatically installed hook\n')
1071 > ui.write(b'Automatically installed hook\n')
1074 >
1072 >
1075 > def reposetup(ui, repo):
1073 > def reposetup(ui, repo):
1076 > repo.ui.setconfig(b"hooks", b"commit.auto", autohook)
1074 > repo.ui.setconfig(b"hooks", b"commit.auto", autohook)
1077 > EOF
1075 > EOF
1078 $ echo '[extensions]' >> .hg/hgrc
1076 $ echo '[extensions]' >> .hg/hgrc
1079 $ echo 'hookext = hookext.py' >> .hg/hgrc
1077 $ echo 'hookext = hookext.py' >> .hg/hgrc
1080
1078
1081 $ touch foo
1079 $ touch foo
1082 $ hg add foo
1080 $ hg add foo
1083 $ hg ci -d '0 0' -m 'add foo'
1081 $ hg ci -d '0 0' -m 'add foo'
1084 Automatically installed hook
1082 Automatically installed hook
1085 $ echo >> foo
1083 $ echo >> foo
1086 $ hg ci --debug -d '0 0' -m 'change foo'
1084 $ hg ci --debug -d '0 0' -m 'change foo'
1087 committing files:
1085 committing files:
1088 foo
1086 foo
1089 committing manifest
1087 committing manifest
1090 committing changelog
1088 committing changelog
1091 updating the branch cache
1089 updating the branch cache
1092 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
1090 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
1093 calling hook commit.auto: hgext_hookext.autohook
1091 calling hook commit.auto: hgext_hookext.autohook
1094 Automatically installed hook
1092 Automatically installed hook
1095
1093
1096 $ hg showconfig hooks
1094 $ hg showconfig hooks
1097 hooks.commit.auto=<function autohook at *> (glob)
1095 hooks.commit.auto=<function autohook at *> (glob)
1098
1096
1099 test python hook configured with python:[file]:[hook] syntax
1097 test python hook configured with python:[file]:[hook] syntax
1100
1098
1101 $ cd ..
1099 $ cd ..
1102 $ mkdir d
1100 $ mkdir d
1103 $ cd d
1101 $ cd d
1104 $ hg init repo
1102 $ hg init repo
1105 $ mkdir hooks
1103 $ mkdir hooks
1106
1104
1107 $ cd hooks
1105 $ cd hooks
1108 $ cat > testhooks.py <<EOF
1106 $ cat > testhooks.py <<EOF
1109 > def testhook(ui, **args):
1107 > def testhook(ui, **args):
1110 > ui.write(b'hook works\n')
1108 > ui.write(b'hook works\n')
1111 > EOF
1109 > EOF
1112 $ echo '[hooks]' > ../repo/.hg/hgrc
1110 $ echo '[hooks]' > ../repo/.hg/hgrc
1113 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
1111 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
1114
1112
1115 $ cd ../repo
1113 $ cd ../repo
1116 $ hg commit -d '0 0'
1114 $ hg commit -d '0 0'
1117 hook works
1115 hook works
1118 nothing changed
1116 nothing changed
1119 [1]
1117 [1]
1120
1118
1121 $ echo '[hooks]' > .hg/hgrc
1119 $ echo '[hooks]' > .hg/hgrc
1122 $ echo "update.ne = python:`pwd`/nonexistent.py:testhook" >> .hg/hgrc
1120 $ echo "update.ne = python:`pwd`/nonexistent.py:testhook" >> .hg/hgrc
1123 $ echo "pre-identify.npmd = python:`pwd`/:no_python_module_dir" >> .hg/hgrc
1121 $ echo "pre-identify.npmd = python:`pwd`/:no_python_module_dir" >> .hg/hgrc
1124
1122
1125 $ hg up null
1123 $ hg up null
1126 loading update.ne hook failed:
1124 loading update.ne hook failed:
1127 abort: $ENOENT$: '$TESTTMP/d/repo/nonexistent.py'
1125 abort: $ENOENT$: '$TESTTMP/d/repo/nonexistent.py'
1128 [255]
1126 [255]
1129
1127
1130 $ hg id
1128 $ hg id
1131 loading pre-identify.npmd hook failed:
1129 loading pre-identify.npmd hook failed:
1132 abort: No module named 'repo'
1130 abort: No module named 'repo'
1133 [255]
1131 [255]
1134
1132
1135 $ cd ../../b
1133 $ cd ../../b
1136
1134
1137 make sure --traceback works on hook import failure
1135 make sure --traceback works on hook import failure
1138
1136
1139 $ cat > importfail.py <<EOF
1137 $ cat > importfail.py <<EOF
1140 > import somebogusmodule
1138 > import somebogusmodule
1141 > # dereference something in the module to force demandimport to load it
1139 > # dereference something in the module to force demandimport to load it
1142 > somebogusmodule.whatever
1140 > somebogusmodule.whatever
1143 > EOF
1141 > EOF
1144
1142
1145 $ echo '[hooks]' > .hg/hgrc
1143 $ echo '[hooks]' > .hg/hgrc
1146 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
1144 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
1147
1145
1148 $ echo a >> a
1146 $ echo a >> a
1149 $ hg --traceback commit -ma 2>&1 | egrep '^exception|ImportError|ModuleNotFoundError|Traceback|HookLoadError|abort'
1147 $ hg --traceback commit -ma 2>&1 | egrep '^exception|ImportError|ModuleNotFoundError|Traceback|HookLoadError|abort'
1150 exception from first failed import attempt:
1148 exception from first failed import attempt:
1151 Traceback (most recent call last):
1149 Traceback (most recent call last):
1152 ImportError: No module named 'somebogusmodule' (no-py36 !)
1150 ImportError: No module named 'somebogusmodule' (no-py36 !)
1153 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1151 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1154 exception from second failed import attempt:
1152 exception from second failed import attempt:
1155 Traceback (most recent call last):
1153 Traceback (most recent call last):
1156 ImportError: No module named 'somebogusmodule' (no-py36 !)
1154 ImportError: No module named 'somebogusmodule' (no-py36 !)
1157 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1155 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1158 Traceback (most recent call last):
1156 Traceback (most recent call last):
1159 ImportError: No module named 'hgext_importfail' (no-py36 !)
1157 ImportError: No module named 'hgext_importfail' (no-py36 !)
1160 ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
1158 ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
1161 Traceback (most recent call last):
1159 Traceback (most recent call last):
1162 ImportError: No module named 'somebogusmodule' (no-py36 !)
1160 ImportError: No module named 'somebogusmodule' (no-py36 !)
1163 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1161 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1164 Traceback (most recent call last):
1162 Traceback (most recent call last):
1165 ImportError: No module named 'hgext_importfail' (no-py36 !)
1163 ImportError: No module named 'hgext_importfail' (no-py36 !)
1166 ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
1164 ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
1167 Traceback (most recent call last):
1165 Traceback (most recent call last):
1168 raise error.HookLoadError( (py38 !)
1166 raise error.HookLoadError( (py38 !)
1169 mercurial.error.HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed
1167 mercurial.error.HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed
1170 abort: precommit.importfail hook is invalid: import of "importfail" failed
1168 abort: precommit.importfail hook is invalid: import of "importfail" failed
1171
1169
1172 Issue1827: Hooks Update & Commit not completely post operation
1170 Issue1827: Hooks Update & Commit not completely post operation
1173
1171
1174 commit and update hooks should run after command completion. The largefiles
1172 commit and update hooks should run after command completion. The largefiles
1175 use demonstrates a recursive wlock, showing the hook doesn't run until the
1173 use demonstrates a recursive wlock, showing the hook doesn't run until the
1176 final release (and dirstate flush).
1174 final release (and dirstate flush).
1177
1175
1178 $ echo '[hooks]' > .hg/hgrc
1176 $ echo '[hooks]' > .hg/hgrc
1179 $ echo 'commit = hg id' >> .hg/hgrc
1177 $ echo 'commit = hg id' >> .hg/hgrc
1180 $ echo 'update = hg id' >> .hg/hgrc
1178 $ echo 'update = hg id' >> .hg/hgrc
1181 $ echo bb > a
1179 $ echo bb > a
1182 $ hg ci -ma
1180 $ hg ci -ma
1183 223eafe2750c tip
1181 223eafe2750c tip
1184 $ hg up 0 --config extensions.largefiles=
1182 $ hg up 0 --config extensions.largefiles=
1185 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
1183 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
1186 cb9a9f314b8b
1184 cb9a9f314b8b
1187 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1185 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1188
1186
1189 make sure --verbose (and --quiet/--debug etc.) are propagated to the local ui
1187 make sure --verbose (and --quiet/--debug etc.) are propagated to the local ui
1190 that is passed to pre/post hooks
1188 that is passed to pre/post hooks
1191
1189
1192 $ echo '[hooks]' > .hg/hgrc
1190 $ echo '[hooks]' > .hg/hgrc
1193 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
1191 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
1194 $ hg id
1192 $ hg id
1195 cb9a9f314b8b
1193 cb9a9f314b8b
1196 $ hg id --verbose
1194 $ hg id --verbose
1197 calling hook pre-identify: hooktests.verbosehook
1195 calling hook pre-identify: hooktests.verbosehook
1198 verbose output from hook
1196 verbose output from hook
1199 cb9a9f314b8b
1197 cb9a9f314b8b
1200
1198
1201 Ensure hooks can be prioritized
1199 Ensure hooks can be prioritized
1202
1200
1203 $ echo '[hooks]' > .hg/hgrc
1201 $ echo '[hooks]' > .hg/hgrc
1204 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
1202 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
1205 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
1203 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
1206 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
1204 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
1207 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
1205 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
1208 $ hg id --verbose
1206 $ hg id --verbose
1209 calling hook pre-identify.b: hooktests.verbosehook
1207 calling hook pre-identify.b: hooktests.verbosehook
1210 verbose output from hook
1208 verbose output from hook
1211 calling hook pre-identify.a: hooktests.verbosehook
1209 calling hook pre-identify.a: hooktests.verbosehook
1212 verbose output from hook
1210 verbose output from hook
1213 calling hook pre-identify.c: hooktests.verbosehook
1211 calling hook pre-identify.c: hooktests.verbosehook
1214 verbose output from hook
1212 verbose output from hook
1215 cb9a9f314b8b
1213 cb9a9f314b8b
1216
1214
1217 new tags must be visible in pretxncommit (issue3210)
1215 new tags must be visible in pretxncommit (issue3210)
1218
1216
1219 $ echo 'pretxncommit.printtags = python:hooktests.printtags' >> .hg/hgrc
1217 $ echo 'pretxncommit.printtags = python:hooktests.printtags' >> .hg/hgrc
1220 $ hg tag -f foo
1218 $ hg tag -f foo
1221 [a, foo, tip]
1219 [a, foo, tip]
1222
1220
1223 post-init hooks must not crash (issue4983)
1221 post-init hooks must not crash (issue4983)
1224 This also creates the `to` repo for the next test block.
1222 This also creates the `to` repo for the next test block.
1225
1223
1226 $ cd ..
1224 $ cd ..
1227 $ cat << EOF >> hgrc-with-post-init-hook
1225 $ cat << EOF >> hgrc-with-post-init-hook
1228 > [hooks]
1226 > [hooks]
1229 > post-init = sh -c "printenv.py --line post-init"
1227 > post-init = sh -c "printenv.py --line post-init"
1230 > EOF
1228 > EOF
1231 $ HGRCPATH=hgrc-with-post-init-hook hg init to
1229 $ HGRCPATH=hgrc-with-post-init-hook hg init to
1232 post-init hook: HG_ARGS=init to
1230 post-init hook: HG_ARGS=init to
1233 HG_HOOKNAME=post-init
1231 HG_HOOKNAME=post-init
1234 HG_HOOKTYPE=post-init
1232 HG_HOOKTYPE=post-init
1235 HG_OPTS={'insecure': None, 'remotecmd': '', 'ssh': ''}
1233 HG_OPTS={'insecure': None, 'remotecmd': '', 'ssh': ''}
1236 HG_PATS=['to']
1234 HG_PATS=['to']
1237 HG_RESULT=0
1235 HG_RESULT=0
1238
1236
1239
1237
1240 new commits must be visible in pretxnchangegroup (issue3428)
1238 new commits must be visible in pretxnchangegroup (issue3428)
1241
1239
1242 $ echo '[hooks]' >> to/.hg/hgrc
1240 $ echo '[hooks]' >> to/.hg/hgrc
1243 $ echo 'prechangegroup = hg --traceback tip' >> to/.hg/hgrc
1241 $ echo 'prechangegroup = hg --traceback tip' >> to/.hg/hgrc
1244 $ echo 'pretxnchangegroup = hg --traceback tip' >> to/.hg/hgrc
1242 $ echo 'pretxnchangegroup = hg --traceback tip' >> to/.hg/hgrc
1245 $ echo a >> to/a
1243 $ echo a >> to/a
1246 $ hg --cwd to ci -Ama
1244 $ hg --cwd to ci -Ama
1247 adding a
1245 adding a
1248 $ hg clone to from
1246 $ hg clone to from
1249 updating to branch default
1247 updating to branch default
1250 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1248 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1251 $ echo aa >> from/a
1249 $ echo aa >> from/a
1252 $ hg --cwd from ci -mb
1250 $ hg --cwd from ci -mb
1253 $ hg --cwd from push
1251 $ hg --cwd from push
1254 pushing to $TESTTMP/to
1252 pushing to $TESTTMP/to
1255 searching for changes
1253 searching for changes
1256 changeset: 0:cb9a9f314b8b
1254 changeset: 0:cb9a9f314b8b
1257 tag: tip
1255 tag: tip
1258 user: test
1256 user: test
1259 date: Thu Jan 01 00:00:00 1970 +0000
1257 date: Thu Jan 01 00:00:00 1970 +0000
1260 summary: a
1258 summary: a
1261
1259
1262 adding changesets
1260 adding changesets
1263 adding manifests
1261 adding manifests
1264 adding file changes
1262 adding file changes
1265 changeset: 1:9836a07b9b9d
1263 changeset: 1:9836a07b9b9d
1266 tag: tip
1264 tag: tip
1267 user: test
1265 user: test
1268 date: Thu Jan 01 00:00:00 1970 +0000
1266 date: Thu Jan 01 00:00:00 1970 +0000
1269 summary: b
1267 summary: b
1270
1268
1271 added 1 changesets with 1 changes to 1 files
1269 added 1 changesets with 1 changes to 1 files
1272
1270
1273 pretxnclose hook failure should abort the transaction
1271 pretxnclose hook failure should abort the transaction
1274
1272
1275 $ hg init txnfailure
1273 $ hg init txnfailure
1276 $ cd txnfailure
1274 $ cd txnfailure
1277 $ touch a && hg commit -Aqm a
1275 $ touch a && hg commit -Aqm a
1278 $ cat >> .hg/hgrc <<EOF
1276 $ cat >> .hg/hgrc <<EOF
1279 > [hooks]
1277 > [hooks]
1280 > pretxnclose.error = exit 1
1278 > pretxnclose.error = exit 1
1281 > EOF
1279 > EOF
1282 $ hg strip -r 0 --config extensions.strip=
1280 $ hg strip -r 0 --config extensions.strip=
1283 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1281 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1284 saved backup bundle to * (glob)
1282 saved backup bundle to * (glob)
1285 transaction abort!
1283 transaction abort!
1286 rollback completed
1284 rollback completed
1287 strip failed, backup bundle stored in * (glob)
1285 strip failed, backup bundle stored in * (glob)
1288 abort: pretxnclose.error hook exited with status 1
1286 abort: pretxnclose.error hook exited with status 1
1289 [40]
1287 [40]
1290 $ hg recover
1288 $ hg recover
1291 no interrupted transaction available
1289 no interrupted transaction available
1292 [1]
1290 [1]
1293 $ cd ..
1291 $ cd ..
1294
1292
1295 check whether HG_PENDING makes pending changes only in related
1293 check whether HG_PENDING makes pending changes only in related
1296 repositories visible to an external hook.
1294 repositories visible to an external hook.
1297
1295
1298 (emulate a transaction running concurrently by copied
1296 (emulate a transaction running concurrently by copied
1299 .hg/store/00changelog.i.a in subsequent test)
1297 .hg/store/00changelog.i.a in subsequent test)
1300
1298
1301 $ cat > $TESTTMP/savepending.sh <<EOF
1299 $ cat > $TESTTMP/savepending.sh <<EOF
1302 > cp .hg/store/00changelog.i.a .hg/store/00changelog.i.a.saved
1300 > cp .hg/store/00changelog.i.a .hg/store/00changelog.i.a.saved
1303 > exit 1 # to avoid adding new revision for subsequent tests
1301 > exit 1 # to avoid adding new revision for subsequent tests
1304 > EOF
1302 > EOF
1305 $ cd a
1303 $ cd a
1306 $ hg tip -q
1304 $ hg tip -q
1307 4:539e4b31b6dc
1305 4:539e4b31b6dc
1308 $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" commit -m "invisible"
1306 $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" commit -m "invisible"
1309 transaction abort!
1307 transaction abort!
1310 rollback completed
1308 rollback completed
1311 abort: pretxnclose hook exited with status 1
1309 abort: pretxnclose hook exited with status 1
1312 [40]
1310 [40]
1313 $ cp .hg/store/00changelog.i.a.saved .hg/store/00changelog.i.a
1311 $ cp .hg/store/00changelog.i.a.saved .hg/store/00changelog.i.a
1314
1312
1315 (check (in)visibility of new changeset while transaction running in
1313 (check (in)visibility of new changeset while transaction running in
1316 repo)
1314 repo)
1317
1315
1318 $ cat > $TESTTMP/checkpending.sh <<EOF
1316 $ cat > $TESTTMP/checkpending.sh <<EOF
1319 > echo '@a'
1317 > echo '@a'
1320 > hg -R "$TESTTMP/a" tip -q
1318 > hg -R "$TESTTMP/a" tip -q
1321 > echo '@a/nested'
1319 > echo '@a/nested'
1322 > hg -R "$TESTTMP/a/nested" tip -q
1320 > hg -R "$TESTTMP/a/nested" tip -q
1323 > exit 1 # to avoid adding new revision for subsequent tests
1321 > exit 1 # to avoid adding new revision for subsequent tests
1324 > EOF
1322 > EOF
1325 $ hg init nested
1323 $ hg init nested
1326 $ cd nested
1324 $ cd nested
1327 $ echo a > a
1325 $ echo a > a
1328 $ hg add a
1326 $ hg add a
1329 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkpending.sh" commit -m '#0'
1327 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkpending.sh" commit -m '#0'
1330 @a
1328 @a
1331 4:539e4b31b6dc
1329 4:539e4b31b6dc
1332 @a/nested
1330 @a/nested
1333 0:bf5e395ced2c
1331 0:bf5e395ced2c
1334 transaction abort!
1332 transaction abort!
1335 rollback completed
1333 rollback completed
1336 abort: pretxnclose hook exited with status 1
1334 abort: pretxnclose hook exited with status 1
1337 [40]
1335 [40]
1338
1336
1339 Hook from untrusted hgrc are reported as failure
1337 Hook from untrusted hgrc are reported as failure
1340 ================================================
1338 ================================================
1341
1339
1342 $ cat << EOF > $TESTTMP/untrusted.py
1340 $ cat << EOF > $TESTTMP/untrusted.py
1343 > from mercurial import scmutil, util
1341 > from mercurial import scmutil, util
1344 > def uisetup(ui):
1342 > def uisetup(ui):
1345 > class untrustedui(ui.__class__):
1343 > class untrustedui(ui.__class__):
1346 > def _trusted(self, fp, f):
1344 > def _trusted(self, fp, f):
1347 > if util.normpath(fp.name).endswith(b'untrusted/.hg/hgrc'):
1345 > if util.normpath(fp.name).endswith(b'untrusted/.hg/hgrc'):
1348 > return False
1346 > return False
1349 > return super(untrustedui, self)._trusted(fp, f)
1347 > return super(untrustedui, self)._trusted(fp, f)
1350 > ui.__class__ = untrustedui
1348 > ui.__class__ = untrustedui
1351 > EOF
1349 > EOF
1352 $ cat << EOF >> $HGRCPATH
1350 $ cat << EOF >> $HGRCPATH
1353 > [extensions]
1351 > [extensions]
1354 > untrusted=$TESTTMP/untrusted.py
1352 > untrusted=$TESTTMP/untrusted.py
1355 > EOF
1353 > EOF
1356 $ hg init untrusted
1354 $ hg init untrusted
1357 $ cd untrusted
1355 $ cd untrusted
1358
1356
1359 Non-blocking hook
1357 Non-blocking hook
1360 -----------------
1358 -----------------
1361
1359
1362 $ cat << EOF >> .hg/hgrc
1360 $ cat << EOF >> .hg/hgrc
1363 > [hooks]
1361 > [hooks]
1364 > txnclose.testing=echo txnclose hook called
1362 > txnclose.testing=echo txnclose hook called
1365 > EOF
1363 > EOF
1366 $ touch a && hg commit -Aqm a
1364 $ touch a && hg commit -Aqm a
1367 warning: untrusted hook txnclose.testing not executed
1365 warning: untrusted hook txnclose.testing not executed
1368 $ hg log
1366 $ hg log
1369 changeset: 0:3903775176ed
1367 changeset: 0:3903775176ed
1370 tag: tip
1368 tag: tip
1371 user: test
1369 user: test
1372 date: Thu Jan 01 00:00:00 1970 +0000
1370 date: Thu Jan 01 00:00:00 1970 +0000
1373 summary: a
1371 summary: a
1374
1372
1375
1373
1376 Non-blocking hook
1374 Non-blocking hook
1377 -----------------
1375 -----------------
1378
1376
1379 $ cat << EOF >> .hg/hgrc
1377 $ cat << EOF >> .hg/hgrc
1380 > [hooks]
1378 > [hooks]
1381 > pretxnclose.testing=echo pre-txnclose hook called
1379 > pretxnclose.testing=echo pre-txnclose hook called
1382 > EOF
1380 > EOF
1383 $ touch b && hg commit -Aqm a
1381 $ touch b && hg commit -Aqm a
1384 transaction abort!
1382 transaction abort!
1385 rollback completed
1383 rollback completed
1386 abort: untrusted hook pretxnclose.testing not executed
1384 abort: untrusted hook pretxnclose.testing not executed
1387 (see 'hg help config.trusted')
1385 (see 'hg help config.trusted')
1388 [40]
1386 [40]
1389 $ hg log
1387 $ hg log
1390 changeset: 0:3903775176ed
1388 changeset: 0:3903775176ed
1391 tag: tip
1389 tag: tip
1392 user: test
1390 user: test
1393 date: Thu Jan 01 00:00:00 1970 +0000
1391 date: Thu Jan 01 00:00:00 1970 +0000
1394 summary: a
1392 summary: a
1395
1393
1396
1394
1397 unsetup the test
1395 unsetup the test
1398 ----------------
1396 ----------------
1399
1397
1400 # touch the file to unconfuse chg with a diffrent mtime
1398 # touch the file to unconfuse chg with a diffrent mtime
1401 $ sleep 1
1399 $ sleep 1
1402 $ touch $TESTTMP/untrusted.py
1400 $ touch $TESTTMP/untrusted.py
1403 $ cat << EOF >> $HGRCPATH
1401 $ cat << EOF >> $HGRCPATH
1404 > [extensions]
1402 > [extensions]
1405 > untrusted=!
1403 > untrusted=!
1406 > EOF
1404 > EOF
1407
1405
1408 HGPLAIN setting in hooks
1406 HGPLAIN setting in hooks
1409 ========================
1407 ========================
1410
1408
1411 $ cat << EOF >> .hg/hgrc
1409 $ cat << EOF >> .hg/hgrc
1412 > [hooks]
1410 > [hooks]
1413 > pre-version.testing-default=sh -c "echo '### default ###' plain: \${HGPLAIN:-'<unset>'}"
1411 > pre-version.testing-default=sh -c "echo '### default ###' plain: \${HGPLAIN:-'<unset>'}"
1414 > pre-version.testing-yes=sh -c "echo '### yes #######' plain: \${HGPLAIN:-'<unset>'}"
1412 > pre-version.testing-yes=sh -c "echo '### yes #######' plain: \${HGPLAIN:-'<unset>'}"
1415 > pre-version.testing-yes:run-with-plain=yes
1413 > pre-version.testing-yes:run-with-plain=yes
1416 > pre-version.testing-no=sh -c "echo '### no ########' plain: \${HGPLAIN:-'<unset>'}"
1414 > pre-version.testing-no=sh -c "echo '### no ########' plain: \${HGPLAIN:-'<unset>'}"
1417 > pre-version.testing-no:run-with-plain=no
1415 > pre-version.testing-no:run-with-plain=no
1418 > pre-version.testing-auto=sh -c "echo '### auto ######' plain: \${HGPLAIN:-'<unset>'}"
1416 > pre-version.testing-auto=sh -c "echo '### auto ######' plain: \${HGPLAIN:-'<unset>'}"
1419 > pre-version.testing-auto:run-with-plain=auto
1417 > pre-version.testing-auto:run-with-plain=auto
1420 > EOF
1418 > EOF
1421
1419
1422 $ (unset HGPLAIN; hg version --quiet)
1420 $ (unset HGPLAIN; hg version --quiet)
1423 ### default ### plain: 1
1421 ### default ### plain: 1
1424 ### yes ####### plain: 1
1422 ### yes ####### plain: 1
1425 ### no ######## plain: <unset>
1423 ### no ######## plain: <unset>
1426 ### auto ###### plain: <unset>
1424 ### auto ###### plain: <unset>
1427 Mercurial Distributed SCM (*) (glob)
1425 Mercurial Distributed SCM (*) (glob)
1428
1426
1429 $ HGPLAIN=1 hg version --quiet
1427 $ HGPLAIN=1 hg version --quiet
1430 ### default ### plain: 1
1428 ### default ### plain: 1
1431 ### yes ####### plain: 1
1429 ### yes ####### plain: 1
1432 ### no ######## plain: <unset>
1430 ### no ######## plain: <unset>
1433 ### auto ###### plain: 1
1431 ### auto ###### plain: 1
1434 Mercurial Distributed SCM (*) (glob)
1432 Mercurial Distributed SCM (*) (glob)
@@ -1,182 +1,180 b''
1 #require unix-permissions
1 #require unix-permissions
2
2
3 test that new files created in .hg inherit the permissions from .hg/store
3 test that new files created in .hg inherit the permissions from .hg/store
4
4
5 $ mkdir dir
5 $ mkdir dir
6
6
7 just in case somebody has a strange $TMPDIR
7 just in case somebody has a strange $TMPDIR
8
8
9 $ chmod g-s dir
9 $ chmod g-s dir
10 $ cd dir
10 $ cd dir
11
11
12 $ cat >printmodes.py <<EOF
12 $ cat >printmodes.py <<EOF
13 > import os
13 > import os
14 > import sys
14 > import sys
15 >
15 >
16 > allnames = []
16 > allnames = []
17 > isdir = {}
17 > isdir = {}
18 > for root, dirs, files in os.walk(sys.argv[1]):
18 > for root, dirs, files in os.walk(sys.argv[1]):
19 > for d in dirs:
19 > for d in dirs:
20 > name = os.path.join(root, d)
20 > name = os.path.join(root, d)
21 > isdir[name] = 1
21 > isdir[name] = 1
22 > allnames.append(name)
22 > allnames.append(name)
23 > for f in files:
23 > for f in files:
24 > name = os.path.join(root, f)
24 > name = os.path.join(root, f)
25 > allnames.append(name)
25 > allnames.append(name)
26 > allnames.sort()
26 > allnames.sort()
27 > for name in allnames:
27 > for name in allnames:
28 > suffix = name in isdir and '/' or ''
28 > suffix = name in isdir and '/' or ''
29 > print('%05o %s%s' % (os.lstat(name).st_mode & 0o7777, name, suffix))
29 > print('%05o %s%s' % (os.lstat(name).st_mode & 0o7777, name, suffix))
30 > EOF
30 > EOF
31
31
32 $ cat >mode.py <<EOF
32 $ cat >mode.py <<EOF
33 > import os
33 > import os
34 > import sys
34 > import sys
35 > print('%05o' % os.lstat(sys.argv[1]).st_mode)
35 > print('%05o' % os.lstat(sys.argv[1]).st_mode)
36 > EOF
36 > EOF
37
37
38 $ umask 077
38 $ umask 077
39
39
40 $ hg init repo
40 $ hg init repo
41 $ cd repo
41 $ cd repo
42
42
43 $ chmod 0770 .hg/store .hg/cache .hg/wcache
43 $ chmod 0770 .hg/store .hg/cache .hg/wcache
44
44
45 before commit
45 before commit
46 store can be written by the group, other files cannot
46 store can be written by the group, other files cannot
47 store is setgid
47 store is setgid
48
48
49 $ "$PYTHON" ../printmodes.py .
49 $ "$PYTHON" ../printmodes.py .
50 00700 ./.hg/
50 00700 ./.hg/
51 00600 ./.hg/00changelog.i
51 00600 ./.hg/00changelog.i
52 00770 ./.hg/cache/
52 00770 ./.hg/cache/
53 00600 ./.hg/requires
53 00600 ./.hg/requires
54 00770 ./.hg/store/
54 00770 ./.hg/store/
55 00600 ./.hg/store/requires
55 00600 ./.hg/store/requires
56 00770 ./.hg/wcache/
56 00770 ./.hg/wcache/
57
57
58 $ mkdir dir
58 $ mkdir dir
59 $ touch foo dir/bar
59 $ touch foo dir/bar
60 $ hg ci -qAm 'add files'
60 $ hg ci -qAm 'add files'
61
61
62 after commit
62 after commit
63 working dir files can only be written by the owner
63 working dir files can only be written by the owner
64 files created in .hg can be written by the group
64 files created in .hg can be written by the group
65 (in particular, store/**, dirstate, branch cache file, undo files)
65 (in particular, store/**, dirstate, branch cache file, undo files)
66 new directories are setgid
66 new directories are setgid
67
67
68 $ "$PYTHON" ../printmodes.py .
68 $ "$PYTHON" ../printmodes.py .
69 00700 ./.hg/
69 00700 ./.hg/
70 00600 ./.hg/00changelog.i
70 00600 ./.hg/00changelog.i
71 00770 ./.hg/cache/
71 00770 ./.hg/cache/
72 00660 ./.hg/cache/branch2-served
72 00660 ./.hg/cache/branch2-served
73 00660 ./.hg/cache/rbc-names-v1
73 00660 ./.hg/cache/rbc-names-v1
74 00660 ./.hg/cache/rbc-revs-v1
74 00660 ./.hg/cache/rbc-revs-v1
75 00660 ./.hg/dirstate
75 00660 ./.hg/dirstate
76 00660 ./.hg/fsmonitor.state (fsmonitor !)
76 00660 ./.hg/fsmonitor.state (fsmonitor !)
77 00660 ./.hg/last-message.txt
77 00660 ./.hg/last-message.txt
78 00600 ./.hg/requires
78 00600 ./.hg/requires
79 00770 ./.hg/store/
79 00770 ./.hg/store/
80 00660 ./.hg/store/00changelog.i
80 00660 ./.hg/store/00changelog.i
81 00660 ./.hg/store/00manifest.i
81 00660 ./.hg/store/00manifest.i
82 00770 ./.hg/store/data/
82 00770 ./.hg/store/data/
83 00770 ./.hg/store/data/dir/
83 00770 ./.hg/store/data/dir/
84 00660 ./.hg/store/data/dir/bar.i (reporevlogstore !)
84 00660 ./.hg/store/data/dir/bar.i (reporevlogstore !)
85 00660 ./.hg/store/data/foo.i (reporevlogstore !)
85 00660 ./.hg/store/data/foo.i (reporevlogstore !)
86 00770 ./.hg/store/data/dir/bar/ (reposimplestore !)
86 00770 ./.hg/store/data/dir/bar/ (reposimplestore !)
87 00660 ./.hg/store/data/dir/bar/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
87 00660 ./.hg/store/data/dir/bar/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
88 00660 ./.hg/store/data/dir/bar/index (reposimplestore !)
88 00660 ./.hg/store/data/dir/bar/index (reposimplestore !)
89 00770 ./.hg/store/data/foo/ (reposimplestore !)
89 00770 ./.hg/store/data/foo/ (reposimplestore !)
90 00660 ./.hg/store/data/foo/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
90 00660 ./.hg/store/data/foo/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
91 00660 ./.hg/store/data/foo/index (reposimplestore !)
91 00660 ./.hg/store/data/foo/index (reposimplestore !)
92 00660 ./.hg/store/fncache (repofncache !)
92 00660 ./.hg/store/fncache (repofncache !)
93 00660 ./.hg/store/phaseroots
93 00660 ./.hg/store/phaseroots
94 00600 ./.hg/store/requires
94 00600 ./.hg/store/requires
95 00660 ./.hg/store/undo
95 00660 ./.hg/store/undo
96 00660 ./.hg/store/undo.backupfiles
96 00660 ./.hg/store/undo.backupfiles
97 00660 ./.hg/store/undo.phaseroots
98 00660 ./.hg/undo.bookmarks
97 00660 ./.hg/undo.bookmarks
99 00660 ./.hg/undo.branch
98 00660 ./.hg/undo.branch
100 00660 ./.hg/undo.desc
99 00660 ./.hg/undo.desc
101 00770 ./.hg/wcache/
100 00770 ./.hg/wcache/
102 00711 ./.hg/wcache/checkisexec
101 00711 ./.hg/wcache/checkisexec
103 007.. ./.hg/wcache/checklink (re)
102 007.. ./.hg/wcache/checklink (re)
104 00600 ./.hg/wcache/checklink-target
103 00600 ./.hg/wcache/checklink-target
105 00660 ./.hg/wcache/manifestfulltextcache (reporevlogstore !)
104 00660 ./.hg/wcache/manifestfulltextcache (reporevlogstore !)
106 00700 ./dir/
105 00700 ./dir/
107 00600 ./dir/bar
106 00600 ./dir/bar
108 00600 ./foo
107 00600 ./foo
109
108
110 $ umask 007
109 $ umask 007
111 $ hg init ../push
110 $ hg init ../push
112
111
113 before push
112 before push
114 group can write everything
113 group can write everything
115
114
116 $ "$PYTHON" ../printmodes.py ../push
115 $ "$PYTHON" ../printmodes.py ../push
117 00770 ../push/.hg/
116 00770 ../push/.hg/
118 00660 ../push/.hg/00changelog.i
117 00660 ../push/.hg/00changelog.i
119 00770 ../push/.hg/cache/
118 00770 ../push/.hg/cache/
120 00660 ../push/.hg/requires
119 00660 ../push/.hg/requires
121 00770 ../push/.hg/store/
120 00770 ../push/.hg/store/
122 00660 ../push/.hg/store/requires
121 00660 ../push/.hg/store/requires
123 00770 ../push/.hg/wcache/
122 00770 ../push/.hg/wcache/
124
123
125 $ umask 077
124 $ umask 077
126 $ hg -q push ../push
125 $ hg -q push ../push
127
126
128 after push
127 after push
129 group can still write everything
128 group can still write everything
130
129
131 $ "$PYTHON" ../printmodes.py ../push
130 $ "$PYTHON" ../printmodes.py ../push
132 00770 ../push/.hg/
131 00770 ../push/.hg/
133 00660 ../push/.hg/00changelog.i
132 00660 ../push/.hg/00changelog.i
134 00770 ../push/.hg/cache/
133 00770 ../push/.hg/cache/
135 00660 ../push/.hg/cache/branch2-base
134 00660 ../push/.hg/cache/branch2-base
136 00660 ../push/.hg/cache/rbc-names-v1
135 00660 ../push/.hg/cache/rbc-names-v1
137 00660 ../push/.hg/cache/rbc-revs-v1
136 00660 ../push/.hg/cache/rbc-revs-v1
138 00660 ../push/.hg/requires
137 00660 ../push/.hg/requires
139 00770 ../push/.hg/store/
138 00770 ../push/.hg/store/
140 00660 ../push/.hg/store/00changelog.i
139 00660 ../push/.hg/store/00changelog.i
141 00660 ../push/.hg/store/00manifest.i
140 00660 ../push/.hg/store/00manifest.i
142 00770 ../push/.hg/store/data/
141 00770 ../push/.hg/store/data/
143 00770 ../push/.hg/store/data/dir/
142 00770 ../push/.hg/store/data/dir/
144 00660 ../push/.hg/store/data/dir/bar.i (reporevlogstore !)
143 00660 ../push/.hg/store/data/dir/bar.i (reporevlogstore !)
145 00660 ../push/.hg/store/data/foo.i (reporevlogstore !)
144 00660 ../push/.hg/store/data/foo.i (reporevlogstore !)
146 00770 ../push/.hg/store/data/dir/bar/ (reposimplestore !)
145 00770 ../push/.hg/store/data/dir/bar/ (reposimplestore !)
147 00660 ../push/.hg/store/data/dir/bar/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
146 00660 ../push/.hg/store/data/dir/bar/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
148 00660 ../push/.hg/store/data/dir/bar/index (reposimplestore !)
147 00660 ../push/.hg/store/data/dir/bar/index (reposimplestore !)
149 00770 ../push/.hg/store/data/foo/ (reposimplestore !)
148 00770 ../push/.hg/store/data/foo/ (reposimplestore !)
150 00660 ../push/.hg/store/data/foo/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
149 00660 ../push/.hg/store/data/foo/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
151 00660 ../push/.hg/store/data/foo/index (reposimplestore !)
150 00660 ../push/.hg/store/data/foo/index (reposimplestore !)
152 00660 ../push/.hg/store/fncache (repofncache !)
151 00660 ../push/.hg/store/fncache (repofncache !)
153 00660 ../push/.hg/store/requires
152 00660 ../push/.hg/store/requires
154 00660 ../push/.hg/store/undo
153 00660 ../push/.hg/store/undo
155 00660 ../push/.hg/store/undo.backupfiles
154 00660 ../push/.hg/store/undo.backupfiles
156 00660 ../push/.hg/store/undo.phaseroots
157 00660 ../push/.hg/undo.bookmarks
155 00660 ../push/.hg/undo.bookmarks
158 00660 ../push/.hg/undo.branch
156 00660 ../push/.hg/undo.branch
159 00660 ../push/.hg/undo.desc
157 00660 ../push/.hg/undo.desc
160 00770 ../push/.hg/wcache/
158 00770 ../push/.hg/wcache/
161
159
162
160
163 Test that we don't lose the setgid bit when we call chmod.
161 Test that we don't lose the setgid bit when we call chmod.
164 Not all systems support setgid directories (e.g. HFS+), so
162 Not all systems support setgid directories (e.g. HFS+), so
165 just check that directories have the same mode.
163 just check that directories have the same mode.
166
164
167 $ cd ..
165 $ cd ..
168 $ hg init setgid
166 $ hg init setgid
169 $ cd setgid
167 $ cd setgid
170 $ chmod g+rwx .hg/store
168 $ chmod g+rwx .hg/store
171 $ chmod g+s .hg/store 2> /dev/null || true
169 $ chmod g+s .hg/store 2> /dev/null || true
172 $ mkdir dir
170 $ mkdir dir
173 $ touch dir/file
171 $ touch dir/file
174 $ hg ci -qAm 'add dir/file'
172 $ hg ci -qAm 'add dir/file'
175 $ storemode=`"$PYTHON" ../mode.py .hg/store`
173 $ storemode=`"$PYTHON" ../mode.py .hg/store`
176 $ dirmode=`"$PYTHON" ../mode.py .hg/store/data/dir`
174 $ dirmode=`"$PYTHON" ../mode.py .hg/store/data/dir`
177 $ if [ "$storemode" != "$dirmode" ]; then
175 $ if [ "$storemode" != "$dirmode" ]; then
178 > echo "$storemode != $dirmode"
176 > echo "$storemode != $dirmode"
179 > fi
177 > fi
180 $ cd ..
178 $ cd ..
181
179
182 $ cd .. # g-s dir
180 $ cd .. # g-s dir
@@ -1,104 +1,103 b''
1 #testcases tree flat-fncache flat-nofncache
1 #testcases tree flat-fncache flat-nofncache
2
2
3 Tests narrow stream clones
3 Tests narrow stream clones
4
4
5 $ . "$TESTDIR/narrow-library.sh"
5 $ . "$TESTDIR/narrow-library.sh"
6
6
7 #if tree
7 #if tree
8 $ cat << EOF >> $HGRCPATH
8 $ cat << EOF >> $HGRCPATH
9 > [experimental]
9 > [experimental]
10 > treemanifest = 1
10 > treemanifest = 1
11 > EOF
11 > EOF
12 #endif
12 #endif
13
13
14 #if flat-nofncache
14 #if flat-nofncache
15 $ cat << EOF >> $HGRCPATH
15 $ cat << EOF >> $HGRCPATH
16 > [format]
16 > [format]
17 > usefncache = 0
17 > usefncache = 0
18 > EOF
18 > EOF
19 #endif
19 #endif
20
20
21 Server setup
21 Server setup
22
22
23 $ hg init master
23 $ hg init master
24 $ cd master
24 $ cd master
25 $ mkdir dir
25 $ mkdir dir
26 $ mkdir dir/src
26 $ mkdir dir/src
27 $ cd dir/src
27 $ cd dir/src
28 $ for x in `$TESTDIR/seq.py 20`; do echo $x > "F$x"; hg add "F$x"; hg commit -m "Commit src $x"; done
28 $ for x in `$TESTDIR/seq.py 20`; do echo $x > "F$x"; hg add "F$x"; hg commit -m "Commit src $x"; done
29
29
30 $ cd ..
30 $ cd ..
31 $ mkdir tests
31 $ mkdir tests
32 $ cd tests
32 $ cd tests
33 $ for x in `$TESTDIR/seq.py 20`; do echo $x > "F$x"; hg add "F$x"; hg commit -m "Commit src $x"; done
33 $ for x in `$TESTDIR/seq.py 20`; do echo $x > "F$x"; hg add "F$x"; hg commit -m "Commit src $x"; done
34 $ cd ../../..
34 $ cd ../../..
35
35
36 Trying to stream clone when the server does not support it
36 Trying to stream clone when the server does not support it
37
37
38 $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/F10" --stream
38 $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/F10" --stream
39 streaming all changes
39 streaming all changes
40 remote: abort: server does not support narrow stream clones
40 remote: abort: server does not support narrow stream clones
41 abort: pull failed on remote
41 abort: pull failed on remote
42 [100]
42 [100]
43
43
44 Enable stream clone on the server
44 Enable stream clone on the server
45
45
46 $ echo "[experimental]" >> master/.hg/hgrc
46 $ echo "[experimental]" >> master/.hg/hgrc
47 $ echo "server.stream-narrow-clones=True" >> master/.hg/hgrc
47 $ echo "server.stream-narrow-clones=True" >> master/.hg/hgrc
48
48
49 Cloning a specific file when stream clone is supported
49 Cloning a specific file when stream clone is supported
50
50
51 $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/F10" --stream
51 $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/F10" --stream
52 streaming all changes
52 streaming all changes
53 * files to transfer, * KB of data (glob)
53 * files to transfer, * KB of data (glob)
54 transferred * KB in * seconds (* */sec) (glob)
54 transferred * KB in * seconds (* */sec) (glob)
55
55
56 $ cd narrow
56 $ cd narrow
57 $ ls -A
57 $ ls -A
58 .hg
58 .hg
59 $ hg tracked
59 $ hg tracked
60 I path:dir/src/F10
60 I path:dir/src/F10
61
61
62 Making sure we have the correct set of requirements
62 Making sure we have the correct set of requirements
63
63
64 $ hg debugrequires
64 $ hg debugrequires
65 dotencode (tree !)
65 dotencode (tree !)
66 dotencode (flat-fncache !)
66 dotencode (flat-fncache !)
67 dirstate-v2 (dirstate-v2 !)
67 dirstate-v2 (dirstate-v2 !)
68 fncache (tree !)
68 fncache (tree !)
69 fncache (flat-fncache !)
69 fncache (flat-fncache !)
70 generaldelta
70 generaldelta
71 narrowhg-experimental
71 narrowhg-experimental
72 persistent-nodemap (rust !)
72 persistent-nodemap (rust !)
73 revlog-compression-zstd (zstd !)
73 revlog-compression-zstd (zstd !)
74 revlogv1
74 revlogv1
75 share-safe
75 share-safe
76 sparserevlog
76 sparserevlog
77 store
77 store
78 treemanifest (tree !)
78 treemanifest (tree !)
79
79
80 Making sure store has the required files
80 Making sure store has the required files
81
81
82 $ ls .hg/store/
82 $ ls .hg/store/
83 00changelog.i
83 00changelog.i
84 00manifest.i
84 00manifest.i
85 data
85 data
86 fncache (tree !)
86 fncache (tree !)
87 fncache (flat-fncache !)
87 fncache (flat-fncache !)
88 meta (tree !)
88 meta (tree !)
89 narrowspec
89 narrowspec
90 requires
90 requires
91 undo
91 undo
92 undo.backupfiles
92 undo.backupfiles
93 undo.phaseroots
94
93
95 Checking that repository has all the required data and not broken
94 Checking that repository has all the required data and not broken
96
95
97 $ hg verify
96 $ hg verify
98 checking changesets
97 checking changesets
99 checking manifests
98 checking manifests
100 checking directory manifests (tree !)
99 checking directory manifests (tree !)
101 crosschecking files in changesets and manifests
100 crosschecking files in changesets and manifests
102 checking files
101 checking files
103 checking dirstate
102 checking dirstate
104 checked 40 changesets with 1 changes to 1 files
103 checked 40 changesets with 1 changes to 1 files
@@ -1,2103 +1,2101 b''
1 #require no-reposimplestore
1 #require no-reposimplestore
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [extensions]
4 > [extensions]
5 > share =
5 > share =
6 > [format]
6 > [format]
7 > # stabilize test accross variant
7 > # stabilize test accross variant
8 > revlog-compression=zlib
8 > revlog-compression=zlib
9 > [storage]
9 > [storage]
10 > dirstate-v2.slow-path=allow
10 > dirstate-v2.slow-path=allow
11 > EOF
11 > EOF
12
12
13 store and revlogv1 are required in source
13 store and revlogv1 are required in source
14
14
15 $ hg --config format.usestore=false init no-store
15 $ hg --config format.usestore=false init no-store
16 $ hg -R no-store debugupgraderepo
16 $ hg -R no-store debugupgraderepo
17 abort: cannot upgrade repository; requirement missing: store
17 abort: cannot upgrade repository; requirement missing: store
18 [255]
18 [255]
19
19
20 $ hg init no-revlogv1
20 $ hg init no-revlogv1
21 $ cat > no-revlogv1/.hg/requires << EOF
21 $ cat > no-revlogv1/.hg/requires << EOF
22 > dotencode
22 > dotencode
23 > fncache
23 > fncache
24 > generaldelta
24 > generaldelta
25 > store
25 > store
26 > EOF
26 > EOF
27
27
28 $ hg -R no-revlogv1 debugupgraderepo
28 $ hg -R no-revlogv1 debugupgraderepo
29 abort: cannot upgrade repository; missing a revlog version
29 abort: cannot upgrade repository; missing a revlog version
30 [255]
30 [255]
31
31
32 Cannot upgrade shared repositories
32 Cannot upgrade shared repositories
33
33
34 $ hg init share-parent
34 $ hg init share-parent
35 $ hg -R share-parent debugbuilddag -n .+9
35 $ hg -R share-parent debugbuilddag -n .+9
36 $ hg -R share-parent up tip
36 $ hg -R share-parent up tip
37 10 files updated, 0 files merged, 0 files removed, 0 files unresolved
37 10 files updated, 0 files merged, 0 files removed, 0 files unresolved
38 $ hg -q share share-parent share-child
38 $ hg -q share share-parent share-child
39
39
40 $ hg -R share-child debugupgraderepo --config format.sparse-revlog=no
40 $ hg -R share-child debugupgraderepo --config format.sparse-revlog=no
41 abort: cannot use these actions on a share repository: sparserevlog
41 abort: cannot use these actions on a share repository: sparserevlog
42 (upgrade the main repository directly)
42 (upgrade the main repository directly)
43 [255]
43 [255]
44
44
45 Unless the action is compatible with share
45 Unless the action is compatible with share
46
46
47 $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=yes --quiet
47 $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=yes --quiet
48 requirements
48 requirements
49 preserved: * (glob)
49 preserved: * (glob)
50 added: dirstate-v2
50 added: dirstate-v2
51
51
52 no revlogs to process
52 no revlogs to process
53
53
54
54
55 $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=yes --quiet --run
55 $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=yes --quiet --run
56 upgrade will perform the following actions:
56 upgrade will perform the following actions:
57
57
58 requirements
58 requirements
59 preserved: * (glob)
59 preserved: * (glob)
60 added: dirstate-v2
60 added: dirstate-v2
61
61
62 no revlogs to process
62 no revlogs to process
63
63
64 $ hg debugformat -R share-child | grep dirstate-v2
64 $ hg debugformat -R share-child | grep dirstate-v2
65 dirstate-v2: yes
65 dirstate-v2: yes
66 $ hg debugformat -R share-parent | grep dirstate-v2
66 $ hg debugformat -R share-parent | grep dirstate-v2
67 dirstate-v2: no
67 dirstate-v2: no
68 $ hg status --all -R share-child
68 $ hg status --all -R share-child
69 C nf0
69 C nf0
70 C nf1
70 C nf1
71 C nf2
71 C nf2
72 C nf3
72 C nf3
73 C nf4
73 C nf4
74 C nf5
74 C nf5
75 C nf6
75 C nf6
76 C nf7
76 C nf7
77 C nf8
77 C nf8
78 C nf9
78 C nf9
79 $ hg log -l 3 -R share-child
79 $ hg log -l 3 -R share-child
80 changeset: 9:0059eb38e4a4
80 changeset: 9:0059eb38e4a4
81 tag: tip
81 tag: tip
82 user: debugbuilddag
82 user: debugbuilddag
83 date: Thu Jan 01 00:00:09 1970 +0000
83 date: Thu Jan 01 00:00:09 1970 +0000
84 summary: r9
84 summary: r9
85
85
86 changeset: 8:4d5be70c8130
86 changeset: 8:4d5be70c8130
87 user: debugbuilddag
87 user: debugbuilddag
88 date: Thu Jan 01 00:00:08 1970 +0000
88 date: Thu Jan 01 00:00:08 1970 +0000
89 summary: r8
89 summary: r8
90
90
91 changeset: 7:e60bfe72517e
91 changeset: 7:e60bfe72517e
92 user: debugbuilddag
92 user: debugbuilddag
93 date: Thu Jan 01 00:00:07 1970 +0000
93 date: Thu Jan 01 00:00:07 1970 +0000
94 summary: r7
94 summary: r7
95
95
96 $ hg status --all -R share-parent
96 $ hg status --all -R share-parent
97 C nf0
97 C nf0
98 C nf1
98 C nf1
99 C nf2
99 C nf2
100 C nf3
100 C nf3
101 C nf4
101 C nf4
102 C nf5
102 C nf5
103 C nf6
103 C nf6
104 C nf7
104 C nf7
105 C nf8
105 C nf8
106 C nf9
106 C nf9
107 $ hg log -l 3 -R share-parent
107 $ hg log -l 3 -R share-parent
108 changeset: 9:0059eb38e4a4
108 changeset: 9:0059eb38e4a4
109 tag: tip
109 tag: tip
110 user: debugbuilddag
110 user: debugbuilddag
111 date: Thu Jan 01 00:00:09 1970 +0000
111 date: Thu Jan 01 00:00:09 1970 +0000
112 summary: r9
112 summary: r9
113
113
114 changeset: 8:4d5be70c8130
114 changeset: 8:4d5be70c8130
115 user: debugbuilddag
115 user: debugbuilddag
116 date: Thu Jan 01 00:00:08 1970 +0000
116 date: Thu Jan 01 00:00:08 1970 +0000
117 summary: r8
117 summary: r8
118
118
119 changeset: 7:e60bfe72517e
119 changeset: 7:e60bfe72517e
120 user: debugbuilddag
120 user: debugbuilddag
121 date: Thu Jan 01 00:00:07 1970 +0000
121 date: Thu Jan 01 00:00:07 1970 +0000
122 summary: r7
122 summary: r7
123
123
124
124
125 $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=no --quiet --run
125 $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=no --quiet --run
126 upgrade will perform the following actions:
126 upgrade will perform the following actions:
127
127
128 requirements
128 requirements
129 preserved: * (glob)
129 preserved: * (glob)
130 removed: dirstate-v2
130 removed: dirstate-v2
131
131
132 no revlogs to process
132 no revlogs to process
133
133
134 $ hg debugformat -R share-child | grep dirstate-v2
134 $ hg debugformat -R share-child | grep dirstate-v2
135 dirstate-v2: no
135 dirstate-v2: no
136 $ hg debugformat -R share-parent | grep dirstate-v2
136 $ hg debugformat -R share-parent | grep dirstate-v2
137 dirstate-v2: no
137 dirstate-v2: no
138 $ hg status --all -R share-child
138 $ hg status --all -R share-child
139 C nf0
139 C nf0
140 C nf1
140 C nf1
141 C nf2
141 C nf2
142 C nf3
142 C nf3
143 C nf4
143 C nf4
144 C nf5
144 C nf5
145 C nf6
145 C nf6
146 C nf7
146 C nf7
147 C nf8
147 C nf8
148 C nf9
148 C nf9
149 $ hg log -l 3 -R share-child
149 $ hg log -l 3 -R share-child
150 changeset: 9:0059eb38e4a4
150 changeset: 9:0059eb38e4a4
151 tag: tip
151 tag: tip
152 user: debugbuilddag
152 user: debugbuilddag
153 date: Thu Jan 01 00:00:09 1970 +0000
153 date: Thu Jan 01 00:00:09 1970 +0000
154 summary: r9
154 summary: r9
155
155
156 changeset: 8:4d5be70c8130
156 changeset: 8:4d5be70c8130
157 user: debugbuilddag
157 user: debugbuilddag
158 date: Thu Jan 01 00:00:08 1970 +0000
158 date: Thu Jan 01 00:00:08 1970 +0000
159 summary: r8
159 summary: r8
160
160
161 changeset: 7:e60bfe72517e
161 changeset: 7:e60bfe72517e
162 user: debugbuilddag
162 user: debugbuilddag
163 date: Thu Jan 01 00:00:07 1970 +0000
163 date: Thu Jan 01 00:00:07 1970 +0000
164 summary: r7
164 summary: r7
165
165
166 $ hg status --all -R share-parent
166 $ hg status --all -R share-parent
167 C nf0
167 C nf0
168 C nf1
168 C nf1
169 C nf2
169 C nf2
170 C nf3
170 C nf3
171 C nf4
171 C nf4
172 C nf5
172 C nf5
173 C nf6
173 C nf6
174 C nf7
174 C nf7
175 C nf8
175 C nf8
176 C nf9
176 C nf9
177 $ hg log -l 3 -R share-parent
177 $ hg log -l 3 -R share-parent
178 changeset: 9:0059eb38e4a4
178 changeset: 9:0059eb38e4a4
179 tag: tip
179 tag: tip
180 user: debugbuilddag
180 user: debugbuilddag
181 date: Thu Jan 01 00:00:09 1970 +0000
181 date: Thu Jan 01 00:00:09 1970 +0000
182 summary: r9
182 summary: r9
183
183
184 changeset: 8:4d5be70c8130
184 changeset: 8:4d5be70c8130
185 user: debugbuilddag
185 user: debugbuilddag
186 date: Thu Jan 01 00:00:08 1970 +0000
186 date: Thu Jan 01 00:00:08 1970 +0000
187 summary: r8
187 summary: r8
188
188
189 changeset: 7:e60bfe72517e
189 changeset: 7:e60bfe72517e
190 user: debugbuilddag
190 user: debugbuilddag
191 date: Thu Jan 01 00:00:07 1970 +0000
191 date: Thu Jan 01 00:00:07 1970 +0000
192 summary: r7
192 summary: r7
193
193
194
194
195 Do not yet support upgrading treemanifest repos
195 Do not yet support upgrading treemanifest repos
196
196
197 $ hg --config experimental.treemanifest=true init treemanifest
197 $ hg --config experimental.treemanifest=true init treemanifest
198 $ hg -R treemanifest debugupgraderepo
198 $ hg -R treemanifest debugupgraderepo
199 abort: cannot upgrade repository; unsupported source requirement: treemanifest
199 abort: cannot upgrade repository; unsupported source requirement: treemanifest
200 [255]
200 [255]
201
201
202 Cannot add treemanifest requirement during upgrade
202 Cannot add treemanifest requirement during upgrade
203
203
204 $ hg init disallowaddedreq
204 $ hg init disallowaddedreq
205 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
205 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
206 abort: cannot upgrade repository; do not support adding requirement: treemanifest
206 abort: cannot upgrade repository; do not support adding requirement: treemanifest
207 [255]
207 [255]
208
208
209 An upgrade of a repository created with recommended settings only suggests optimizations
209 An upgrade of a repository created with recommended settings only suggests optimizations
210
210
211 $ hg init empty
211 $ hg init empty
212 $ cd empty
212 $ cd empty
213 $ hg debugformat
213 $ hg debugformat
214 format-variant repo
214 format-variant repo
215 fncache: yes
215 fncache: yes
216 dirstate-v2: no
216 dirstate-v2: no
217 tracked-hint: no
217 tracked-hint: no
218 dotencode: yes
218 dotencode: yes
219 generaldelta: yes
219 generaldelta: yes
220 share-safe: yes
220 share-safe: yes
221 sparserevlog: yes
221 sparserevlog: yes
222 persistent-nodemap: no (no-rust !)
222 persistent-nodemap: no (no-rust !)
223 persistent-nodemap: yes (rust !)
223 persistent-nodemap: yes (rust !)
224 copies-sdc: no
224 copies-sdc: no
225 revlog-v2: no
225 revlog-v2: no
226 changelog-v2: no
226 changelog-v2: no
227 plain-cl-delta: yes
227 plain-cl-delta: yes
228 compression: zlib
228 compression: zlib
229 compression-level: default
229 compression-level: default
230 $ hg debugformat --verbose
230 $ hg debugformat --verbose
231 format-variant repo config default
231 format-variant repo config default
232 fncache: yes yes yes
232 fncache: yes yes yes
233 dirstate-v2: no no no
233 dirstate-v2: no no no
234 tracked-hint: no no no
234 tracked-hint: no no no
235 dotencode: yes yes yes
235 dotencode: yes yes yes
236 generaldelta: yes yes yes
236 generaldelta: yes yes yes
237 share-safe: yes yes yes
237 share-safe: yes yes yes
238 sparserevlog: yes yes yes
238 sparserevlog: yes yes yes
239 persistent-nodemap: no no no (no-rust !)
239 persistent-nodemap: no no no (no-rust !)
240 persistent-nodemap: yes yes no (rust !)
240 persistent-nodemap: yes yes no (rust !)
241 copies-sdc: no no no
241 copies-sdc: no no no
242 revlog-v2: no no no
242 revlog-v2: no no no
243 changelog-v2: no no no
243 changelog-v2: no no no
244 plain-cl-delta: yes yes yes
244 plain-cl-delta: yes yes yes
245 compression: zlib zlib zlib (no-zstd !)
245 compression: zlib zlib zlib (no-zstd !)
246 compression: zlib zlib zstd (zstd !)
246 compression: zlib zlib zstd (zstd !)
247 compression-level: default default default
247 compression-level: default default default
248 $ hg debugformat --verbose --config format.usefncache=no
248 $ hg debugformat --verbose --config format.usefncache=no
249 format-variant repo config default
249 format-variant repo config default
250 fncache: yes no yes
250 fncache: yes no yes
251 dirstate-v2: no no no
251 dirstate-v2: no no no
252 tracked-hint: no no no
252 tracked-hint: no no no
253 dotencode: yes no yes
253 dotencode: yes no yes
254 generaldelta: yes yes yes
254 generaldelta: yes yes yes
255 share-safe: yes yes yes
255 share-safe: yes yes yes
256 sparserevlog: yes yes yes
256 sparserevlog: yes yes yes
257 persistent-nodemap: no no no (no-rust !)
257 persistent-nodemap: no no no (no-rust !)
258 persistent-nodemap: yes yes no (rust !)
258 persistent-nodemap: yes yes no (rust !)
259 copies-sdc: no no no
259 copies-sdc: no no no
260 revlog-v2: no no no
260 revlog-v2: no no no
261 changelog-v2: no no no
261 changelog-v2: no no no
262 plain-cl-delta: yes yes yes
262 plain-cl-delta: yes yes yes
263 compression: zlib zlib zlib (no-zstd !)
263 compression: zlib zlib zlib (no-zstd !)
264 compression: zlib zlib zstd (zstd !)
264 compression: zlib zlib zstd (zstd !)
265 compression-level: default default default
265 compression-level: default default default
266 $ hg debugformat --verbose --config format.usefncache=no --color=debug
266 $ hg debugformat --verbose --config format.usefncache=no --color=debug
267 format-variant repo config default
267 format-variant repo config default
268 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
268 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
269 [formatvariant.name.uptodate|dirstate-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
269 [formatvariant.name.uptodate|dirstate-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
270 [formatvariant.name.uptodate|tracked-hint: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
270 [formatvariant.name.uptodate|tracked-hint: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
271 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
271 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
272 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
272 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
273 [formatvariant.name.uptodate|share-safe: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
273 [formatvariant.name.uptodate|share-safe: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
274 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
274 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
275 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no] (no-rust !)
275 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no] (no-rust !)
276 [formatvariant.name.mismatchdefault|persistent-nodemap:][formatvariant.repo.mismatchdefault| yes][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
276 [formatvariant.name.mismatchdefault|persistent-nodemap:][formatvariant.repo.mismatchdefault| yes][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
277 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
277 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
278 [formatvariant.name.uptodate|revlog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
278 [formatvariant.name.uptodate|revlog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
279 [formatvariant.name.uptodate|changelog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
279 [formatvariant.name.uptodate|changelog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
280 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
280 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
281 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] (no-zstd !)
281 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] (no-zstd !)
282 [formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !)
282 [formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !)
283 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
283 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
284 $ hg debugformat -Tjson
284 $ hg debugformat -Tjson
285 [
285 [
286 {
286 {
287 "config": true,
287 "config": true,
288 "default": true,
288 "default": true,
289 "name": "fncache",
289 "name": "fncache",
290 "repo": true
290 "repo": true
291 },
291 },
292 {
292 {
293 "config": false,
293 "config": false,
294 "default": false,
294 "default": false,
295 "name": "dirstate-v2",
295 "name": "dirstate-v2",
296 "repo": false
296 "repo": false
297 },
297 },
298 {
298 {
299 "config": false,
299 "config": false,
300 "default": false,
300 "default": false,
301 "name": "tracked-hint",
301 "name": "tracked-hint",
302 "repo": false
302 "repo": false
303 },
303 },
304 {
304 {
305 "config": true,
305 "config": true,
306 "default": true,
306 "default": true,
307 "name": "dotencode",
307 "name": "dotencode",
308 "repo": true
308 "repo": true
309 },
309 },
310 {
310 {
311 "config": true,
311 "config": true,
312 "default": true,
312 "default": true,
313 "name": "generaldelta",
313 "name": "generaldelta",
314 "repo": true
314 "repo": true
315 },
315 },
316 {
316 {
317 "config": true,
317 "config": true,
318 "default": true,
318 "default": true,
319 "name": "share-safe",
319 "name": "share-safe",
320 "repo": true
320 "repo": true
321 },
321 },
322 {
322 {
323 "config": true,
323 "config": true,
324 "default": true,
324 "default": true,
325 "name": "sparserevlog",
325 "name": "sparserevlog",
326 "repo": true
326 "repo": true
327 },
327 },
328 {
328 {
329 "config": false, (no-rust !)
329 "config": false, (no-rust !)
330 "config": true, (rust !)
330 "config": true, (rust !)
331 "default": false,
331 "default": false,
332 "name": "persistent-nodemap",
332 "name": "persistent-nodemap",
333 "repo": false (no-rust !)
333 "repo": false (no-rust !)
334 "repo": true (rust !)
334 "repo": true (rust !)
335 },
335 },
336 {
336 {
337 "config": false,
337 "config": false,
338 "default": false,
338 "default": false,
339 "name": "copies-sdc",
339 "name": "copies-sdc",
340 "repo": false
340 "repo": false
341 },
341 },
342 {
342 {
343 "config": false,
343 "config": false,
344 "default": false,
344 "default": false,
345 "name": "revlog-v2",
345 "name": "revlog-v2",
346 "repo": false
346 "repo": false
347 },
347 },
348 {
348 {
349 "config": false,
349 "config": false,
350 "default": false,
350 "default": false,
351 "name": "changelog-v2",
351 "name": "changelog-v2",
352 "repo": false
352 "repo": false
353 },
353 },
354 {
354 {
355 "config": true,
355 "config": true,
356 "default": true,
356 "default": true,
357 "name": "plain-cl-delta",
357 "name": "plain-cl-delta",
358 "repo": true
358 "repo": true
359 },
359 },
360 {
360 {
361 "config": "zlib",
361 "config": "zlib",
362 "default": "zlib", (no-zstd !)
362 "default": "zlib", (no-zstd !)
363 "default": "zstd", (zstd !)
363 "default": "zstd", (zstd !)
364 "name": "compression",
364 "name": "compression",
365 "repo": "zlib"
365 "repo": "zlib"
366 },
366 },
367 {
367 {
368 "config": "default",
368 "config": "default",
369 "default": "default",
369 "default": "default",
370 "name": "compression-level",
370 "name": "compression-level",
371 "repo": "default"
371 "repo": "default"
372 }
372 }
373 ]
373 ]
374 $ hg debugupgraderepo
374 $ hg debugupgraderepo
375 (no format upgrades found in existing repository)
375 (no format upgrades found in existing repository)
376 performing an upgrade with "--run" will make the following changes:
376 performing an upgrade with "--run" will make the following changes:
377
377
378 requirements
378 requirements
379 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
379 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
380 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
380 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
381
381
382 no revlogs to process
382 no revlogs to process
383
383
384 additional optimizations are available by specifying "--optimize <name>":
384 additional optimizations are available by specifying "--optimize <name>":
385
385
386 re-delta-parent
386 re-delta-parent
387 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
387 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
388
388
389 re-delta-multibase
389 re-delta-multibase
390 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
390 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
391
391
392 re-delta-all
392 re-delta-all
393 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
393 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
394
394
395 re-delta-fulladd
395 re-delta-fulladd
396 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
396 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
397
397
398
398
399 $ hg debugupgraderepo --quiet
399 $ hg debugupgraderepo --quiet
400 requirements
400 requirements
401 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
401 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
402 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
402 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
403
403
404 no revlogs to process
404 no revlogs to process
405
405
406
406
407 --optimize can be used to add optimizations
407 --optimize can be used to add optimizations
408
408
409 $ hg debugupgrade --optimize 're-delta-parent'
409 $ hg debugupgrade --optimize 're-delta-parent'
410 (no format upgrades found in existing repository)
410 (no format upgrades found in existing repository)
411 performing an upgrade with "--run" will make the following changes:
411 performing an upgrade with "--run" will make the following changes:
412
412
413 requirements
413 requirements
414 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
414 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
415 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
415 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
416
416
417 optimisations: re-delta-parent
417 optimisations: re-delta-parent
418
418
419 re-delta-parent
419 re-delta-parent
420 deltas within internal storage will choose a new base revision if needed
420 deltas within internal storage will choose a new base revision if needed
421
421
422 processed revlogs:
422 processed revlogs:
423 - all-filelogs
423 - all-filelogs
424 - changelog
424 - changelog
425 - manifest
425 - manifest
426
426
427 additional optimizations are available by specifying "--optimize <name>":
427 additional optimizations are available by specifying "--optimize <name>":
428
428
429 re-delta-multibase
429 re-delta-multibase
430 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
430 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
431
431
432 re-delta-all
432 re-delta-all
433 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
433 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
434
434
435 re-delta-fulladd
435 re-delta-fulladd
436 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
436 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
437
437
438
438
439 modern form of the option
439 modern form of the option
440
440
441 $ hg debugupgrade --optimize re-delta-parent
441 $ hg debugupgrade --optimize re-delta-parent
442 (no format upgrades found in existing repository)
442 (no format upgrades found in existing repository)
443 performing an upgrade with "--run" will make the following changes:
443 performing an upgrade with "--run" will make the following changes:
444
444
445 requirements
445 requirements
446 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
446 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
447 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
447 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
448
448
449 optimisations: re-delta-parent
449 optimisations: re-delta-parent
450
450
451 re-delta-parent
451 re-delta-parent
452 deltas within internal storage will choose a new base revision if needed
452 deltas within internal storage will choose a new base revision if needed
453
453
454 processed revlogs:
454 processed revlogs:
455 - all-filelogs
455 - all-filelogs
456 - changelog
456 - changelog
457 - manifest
457 - manifest
458
458
459 additional optimizations are available by specifying "--optimize <name>":
459 additional optimizations are available by specifying "--optimize <name>":
460
460
461 re-delta-multibase
461 re-delta-multibase
462 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
462 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
463
463
464 re-delta-all
464 re-delta-all
465 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
465 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
466
466
467 re-delta-fulladd
467 re-delta-fulladd
468 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
468 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
469
469
470
470
471 $ hg debugupgrade --optimize re-delta-parent --quiet
471 $ hg debugupgrade --optimize re-delta-parent --quiet
472 requirements
472 requirements
473 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
473 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
474 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
474 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
475
475
476 optimisations: re-delta-parent
476 optimisations: re-delta-parent
477
477
478 processed revlogs:
478 processed revlogs:
479 - all-filelogs
479 - all-filelogs
480 - changelog
480 - changelog
481 - manifest
481 - manifest
482
482
483
483
484 passing multiple optimization:
484 passing multiple optimization:
485
485
486 $ hg debugupgrade --optimize re-delta-parent --optimize re-delta-multibase --quiet
486 $ hg debugupgrade --optimize re-delta-parent --optimize re-delta-multibase --quiet
487 requirements
487 requirements
488 preserved: * (glob)
488 preserved: * (glob)
489
489
490 optimisations: re-delta-multibase, re-delta-parent
490 optimisations: re-delta-multibase, re-delta-parent
491
491
492 processed revlogs:
492 processed revlogs:
493 - all-filelogs
493 - all-filelogs
494 - changelog
494 - changelog
495 - manifest
495 - manifest
496
496
497
497
498 unknown optimization:
498 unknown optimization:
499
499
500 $ hg debugupgrade --optimize foobar
500 $ hg debugupgrade --optimize foobar
501 abort: unknown optimization action requested: foobar
501 abort: unknown optimization action requested: foobar
502 (run without arguments to see valid optimizations)
502 (run without arguments to see valid optimizations)
503 [255]
503 [255]
504
504
505 Various sub-optimal detections work
505 Various sub-optimal detections work
506
506
507 $ cat > .hg/requires << EOF
507 $ cat > .hg/requires << EOF
508 > revlogv1
508 > revlogv1
509 > store
509 > store
510 > EOF
510 > EOF
511
511
512 $ hg debugformat
512 $ hg debugformat
513 format-variant repo
513 format-variant repo
514 fncache: no
514 fncache: no
515 dirstate-v2: no
515 dirstate-v2: no
516 tracked-hint: no
516 tracked-hint: no
517 dotencode: no
517 dotencode: no
518 generaldelta: no
518 generaldelta: no
519 share-safe: no
519 share-safe: no
520 sparserevlog: no
520 sparserevlog: no
521 persistent-nodemap: no
521 persistent-nodemap: no
522 copies-sdc: no
522 copies-sdc: no
523 revlog-v2: no
523 revlog-v2: no
524 changelog-v2: no
524 changelog-v2: no
525 plain-cl-delta: yes
525 plain-cl-delta: yes
526 compression: zlib
526 compression: zlib
527 compression-level: default
527 compression-level: default
528 $ hg debugformat --verbose
528 $ hg debugformat --verbose
529 format-variant repo config default
529 format-variant repo config default
530 fncache: no yes yes
530 fncache: no yes yes
531 dirstate-v2: no no no
531 dirstate-v2: no no no
532 tracked-hint: no no no
532 tracked-hint: no no no
533 dotencode: no yes yes
533 dotencode: no yes yes
534 generaldelta: no yes yes
534 generaldelta: no yes yes
535 share-safe: no yes yes
535 share-safe: no yes yes
536 sparserevlog: no yes yes
536 sparserevlog: no yes yes
537 persistent-nodemap: no no no (no-rust !)
537 persistent-nodemap: no no no (no-rust !)
538 persistent-nodemap: no yes no (rust !)
538 persistent-nodemap: no yes no (rust !)
539 copies-sdc: no no no
539 copies-sdc: no no no
540 revlog-v2: no no no
540 revlog-v2: no no no
541 changelog-v2: no no no
541 changelog-v2: no no no
542 plain-cl-delta: yes yes yes
542 plain-cl-delta: yes yes yes
543 compression: zlib zlib zlib (no-zstd !)
543 compression: zlib zlib zlib (no-zstd !)
544 compression: zlib zlib zstd (zstd !)
544 compression: zlib zlib zstd (zstd !)
545 compression-level: default default default
545 compression-level: default default default
546 $ hg debugformat --verbose --config format.usegeneraldelta=no
546 $ hg debugformat --verbose --config format.usegeneraldelta=no
547 format-variant repo config default
547 format-variant repo config default
548 fncache: no yes yes
548 fncache: no yes yes
549 dirstate-v2: no no no
549 dirstate-v2: no no no
550 tracked-hint: no no no
550 tracked-hint: no no no
551 dotencode: no yes yes
551 dotencode: no yes yes
552 generaldelta: no no yes
552 generaldelta: no no yes
553 share-safe: no yes yes
553 share-safe: no yes yes
554 sparserevlog: no no yes
554 sparserevlog: no no yes
555 persistent-nodemap: no no no (no-rust !)
555 persistent-nodemap: no no no (no-rust !)
556 persistent-nodemap: no yes no (rust !)
556 persistent-nodemap: no yes no (rust !)
557 copies-sdc: no no no
557 copies-sdc: no no no
558 revlog-v2: no no no
558 revlog-v2: no no no
559 changelog-v2: no no no
559 changelog-v2: no no no
560 plain-cl-delta: yes yes yes
560 plain-cl-delta: yes yes yes
561 compression: zlib zlib zlib (no-zstd !)
561 compression: zlib zlib zlib (no-zstd !)
562 compression: zlib zlib zstd (zstd !)
562 compression: zlib zlib zstd (zstd !)
563 compression-level: default default default
563 compression-level: default default default
564 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
564 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
565 format-variant repo config default
565 format-variant repo config default
566 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
566 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
567 [formatvariant.name.uptodate|dirstate-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
567 [formatvariant.name.uptodate|dirstate-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
568 [formatvariant.name.uptodate|tracked-hint: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
568 [formatvariant.name.uptodate|tracked-hint: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
569 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
569 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
570 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
570 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
571 [formatvariant.name.mismatchconfig|share-safe: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
571 [formatvariant.name.mismatchconfig|share-safe: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
572 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
572 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
573 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no] (no-rust !)
573 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no] (no-rust !)
574 [formatvariant.name.mismatchconfig|persistent-nodemap:][formatvariant.repo.mismatchconfig| no][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
574 [formatvariant.name.mismatchconfig|persistent-nodemap:][formatvariant.repo.mismatchconfig| no][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
575 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
575 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
576 [formatvariant.name.uptodate|revlog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
576 [formatvariant.name.uptodate|revlog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
577 [formatvariant.name.uptodate|changelog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
577 [formatvariant.name.uptodate|changelog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
578 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
578 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
579 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] (no-zstd !)
579 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] (no-zstd !)
580 [formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !)
580 [formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !)
581 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
581 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
582 $ hg debugupgraderepo
582 $ hg debugupgraderepo
583 note: selecting all-filelogs for processing to change: dotencode
583 note: selecting all-filelogs for processing to change: dotencode
584 note: selecting all-manifestlogs for processing to change: dotencode
584 note: selecting all-manifestlogs for processing to change: dotencode
585 note: selecting changelog for processing to change: dotencode
585 note: selecting changelog for processing to change: dotencode
586
586
587 repository lacks features recommended by current config options:
587 repository lacks features recommended by current config options:
588
588
589 fncache
589 fncache
590 long and reserved filenames may not work correctly; repository performance is sub-optimal
590 long and reserved filenames may not work correctly; repository performance is sub-optimal
591
591
592 dotencode
592 dotencode
593 storage of filenames beginning with a period or space may not work correctly
593 storage of filenames beginning with a period or space may not work correctly
594
594
595 generaldelta
595 generaldelta
596 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
596 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
597
597
598 share-safe
598 share-safe
599 old shared repositories do not share source repository requirements and config. This leads to various problems when the source repository format is upgraded or some new extensions are enabled.
599 old shared repositories do not share source repository requirements and config. This leads to various problems when the source repository format is upgraded or some new extensions are enabled.
600
600
601 sparserevlog
601 sparserevlog
602 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
602 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
603
603
604 persistent-nodemap (rust !)
604 persistent-nodemap (rust !)
605 persist the node -> rev mapping on disk to speedup lookup (rust !)
605 persist the node -> rev mapping on disk to speedup lookup (rust !)
606 (rust !)
606 (rust !)
607
607
608 performing an upgrade with "--run" will make the following changes:
608 performing an upgrade with "--run" will make the following changes:
609
609
610 requirements
610 requirements
611 preserved: revlogv1, store
611 preserved: revlogv1, store
612 added: dotencode, fncache, generaldelta, share-safe, sparserevlog (no-rust !)
612 added: dotencode, fncache, generaldelta, share-safe, sparserevlog (no-rust !)
613 added: dotencode, fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
613 added: dotencode, fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
614
614
615 fncache
615 fncache
616 repository will be more resilient to storing certain paths and performance of certain operations should be improved
616 repository will be more resilient to storing certain paths and performance of certain operations should be improved
617
617
618 dotencode
618 dotencode
619 repository will be better able to store files beginning with a space or period
619 repository will be better able to store files beginning with a space or period
620
620
621 generaldelta
621 generaldelta
622 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
622 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
623
623
624 share-safe
624 share-safe
625 Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
625 Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
626
626
627 sparserevlog
627 sparserevlog
628 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
628 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
629
629
630 persistent-nodemap (rust !)
630 persistent-nodemap (rust !)
631 Speedup revision lookup by node id. (rust !)
631 Speedup revision lookup by node id. (rust !)
632 (rust !)
632 (rust !)
633 processed revlogs:
633 processed revlogs:
634 - all-filelogs
634 - all-filelogs
635 - changelog
635 - changelog
636 - manifest
636 - manifest
637
637
638 additional optimizations are available by specifying "--optimize <name>":
638 additional optimizations are available by specifying "--optimize <name>":
639
639
640 re-delta-parent
640 re-delta-parent
641 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
641 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
642
642
643 re-delta-multibase
643 re-delta-multibase
644 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
644 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
645
645
646 re-delta-all
646 re-delta-all
647 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
647 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
648
648
649 re-delta-fulladd
649 re-delta-fulladd
650 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
650 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
651
651
652 $ hg debugupgraderepo --quiet
652 $ hg debugupgraderepo --quiet
653 requirements
653 requirements
654 preserved: revlogv1, store
654 preserved: revlogv1, store
655 added: dotencode, fncache, generaldelta, share-safe, sparserevlog (no-rust !)
655 added: dotencode, fncache, generaldelta, share-safe, sparserevlog (no-rust !)
656 added: dotencode, fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
656 added: dotencode, fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
657
657
658 processed revlogs:
658 processed revlogs:
659 - all-filelogs
659 - all-filelogs
660 - changelog
660 - changelog
661 - manifest
661 - manifest
662
662
663
663
664 $ hg --config format.dotencode=false debugupgraderepo
664 $ hg --config format.dotencode=false debugupgraderepo
665 note: selecting all-filelogs for processing to change: fncache
665 note: selecting all-filelogs for processing to change: fncache
666 note: selecting all-manifestlogs for processing to change: fncache
666 note: selecting all-manifestlogs for processing to change: fncache
667 note: selecting changelog for processing to change: fncache
667 note: selecting changelog for processing to change: fncache
668
668
669 repository lacks features recommended by current config options:
669 repository lacks features recommended by current config options:
670
670
671 fncache
671 fncache
672 long and reserved filenames may not work correctly; repository performance is sub-optimal
672 long and reserved filenames may not work correctly; repository performance is sub-optimal
673
673
674 generaldelta
674 generaldelta
675 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
675 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
676
676
677 share-safe
677 share-safe
678 old shared repositories do not share source repository requirements and config. This leads to various problems when the source repository format is upgraded or some new extensions are enabled.
678 old shared repositories do not share source repository requirements and config. This leads to various problems when the source repository format is upgraded or some new extensions are enabled.
679
679
680 sparserevlog
680 sparserevlog
681 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
681 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
682
682
683 persistent-nodemap (rust !)
683 persistent-nodemap (rust !)
684 persist the node -> rev mapping on disk to speedup lookup (rust !)
684 persist the node -> rev mapping on disk to speedup lookup (rust !)
685 (rust !)
685 (rust !)
686 repository lacks features used by the default config options:
686 repository lacks features used by the default config options:
687
687
688 dotencode
688 dotencode
689 storage of filenames beginning with a period or space may not work correctly
689 storage of filenames beginning with a period or space may not work correctly
690
690
691
691
692 performing an upgrade with "--run" will make the following changes:
692 performing an upgrade with "--run" will make the following changes:
693
693
694 requirements
694 requirements
695 preserved: revlogv1, store
695 preserved: revlogv1, store
696 added: fncache, generaldelta, share-safe, sparserevlog (no-rust !)
696 added: fncache, generaldelta, share-safe, sparserevlog (no-rust !)
697 added: fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
697 added: fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
698
698
699 fncache
699 fncache
700 repository will be more resilient to storing certain paths and performance of certain operations should be improved
700 repository will be more resilient to storing certain paths and performance of certain operations should be improved
701
701
702 generaldelta
702 generaldelta
703 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
703 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
704
704
705 share-safe
705 share-safe
706 Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
706 Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
707
707
708 sparserevlog
708 sparserevlog
709 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
709 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
710
710
711 persistent-nodemap (rust !)
711 persistent-nodemap (rust !)
712 Speedup revision lookup by node id. (rust !)
712 Speedup revision lookup by node id. (rust !)
713 (rust !)
713 (rust !)
714 processed revlogs:
714 processed revlogs:
715 - all-filelogs
715 - all-filelogs
716 - changelog
716 - changelog
717 - manifest
717 - manifest
718
718
719 additional optimizations are available by specifying "--optimize <name>":
719 additional optimizations are available by specifying "--optimize <name>":
720
720
721 re-delta-parent
721 re-delta-parent
722 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
722 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
723
723
724 re-delta-multibase
724 re-delta-multibase
725 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
725 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
726
726
727 re-delta-all
727 re-delta-all
728 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
728 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
729
729
730 re-delta-fulladd
730 re-delta-fulladd
731 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
731 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
732
732
733
733
734 $ cd ..
734 $ cd ..
735
735
736 Upgrading a repository that is already modern essentially no-ops
736 Upgrading a repository that is already modern essentially no-ops
737
737
738 $ hg init modern
738 $ hg init modern
739 $ hg -R modern debugupgraderepo --run
739 $ hg -R modern debugupgraderepo --run
740 nothing to do
740 nothing to do
741
741
742 Upgrading a repository to generaldelta works
742 Upgrading a repository to generaldelta works
743
743
744 $ hg --config format.usegeneraldelta=false init upgradegd
744 $ hg --config format.usegeneraldelta=false init upgradegd
745 $ cd upgradegd
745 $ cd upgradegd
746 $ touch f0
746 $ touch f0
747 $ hg -q commit -A -m initial
747 $ hg -q commit -A -m initial
748 $ mkdir FooBarDirectory.d
748 $ mkdir FooBarDirectory.d
749 $ touch FooBarDirectory.d/f1
749 $ touch FooBarDirectory.d/f1
750 $ hg -q commit -A -m 'add f1'
750 $ hg -q commit -A -m 'add f1'
751 $ hg -q up -r 0
751 $ hg -q up -r 0
752 >>> import random
752 >>> import random
753 >>> random.seed(0) # have a reproducible content
753 >>> random.seed(0) # have a reproducible content
754 >>> with open("f2", "wb") as f:
754 >>> with open("f2", "wb") as f:
755 ... for i in range(100000):
755 ... for i in range(100000):
756 ... f.write(b"%d\n" % random.randint(1000000000, 9999999999)) and None
756 ... f.write(b"%d\n" % random.randint(1000000000, 9999999999)) and None
757 $ hg -q commit -A -m 'add f2'
757 $ hg -q commit -A -m 'add f2'
758
758
759 make sure we have a .d file
759 make sure we have a .d file
760
760
761 $ ls -d .hg/store/data/*
761 $ ls -d .hg/store/data/*
762 .hg/store/data/_foo_bar_directory.d.hg
762 .hg/store/data/_foo_bar_directory.d.hg
763 .hg/store/data/f0.i
763 .hg/store/data/f0.i
764 .hg/store/data/f2.d
764 .hg/store/data/f2.d
765 .hg/store/data/f2.i
765 .hg/store/data/f2.i
766
766
767 $ hg debugupgraderepo --run --config format.sparse-revlog=false
767 $ hg debugupgraderepo --run --config format.sparse-revlog=false
768 note: selecting all-filelogs for processing to change: generaldelta
768 note: selecting all-filelogs for processing to change: generaldelta
769 note: selecting all-manifestlogs for processing to change: generaldelta
769 note: selecting all-manifestlogs for processing to change: generaldelta
770 note: selecting changelog for processing to change: generaldelta
770 note: selecting changelog for processing to change: generaldelta
771
771
772 upgrade will perform the following actions:
772 upgrade will perform the following actions:
773
773
774 requirements
774 requirements
775 preserved: dotencode, fncache, revlogv1, share-safe, store (no-rust !)
775 preserved: dotencode, fncache, revlogv1, share-safe, store (no-rust !)
776 preserved: dotencode, fncache, persistent-nodemap, revlogv1, share-safe, store (rust !)
776 preserved: dotencode, fncache, persistent-nodemap, revlogv1, share-safe, store (rust !)
777 added: generaldelta
777 added: generaldelta
778
778
779 generaldelta
779 generaldelta
780 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
780 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
781
781
782 processed revlogs:
782 processed revlogs:
783 - all-filelogs
783 - all-filelogs
784 - changelog
784 - changelog
785 - manifest
785 - manifest
786
786
787 beginning upgrade...
787 beginning upgrade...
788 repository locked and read-only
788 repository locked and read-only
789 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
789 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
790 (it is safe to interrupt this process any time before data migration completes)
790 (it is safe to interrupt this process any time before data migration completes)
791 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
791 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
792 migrating 519 KB in store; 1.05 MB tracked data
792 migrating 519 KB in store; 1.05 MB tracked data
793 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
793 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
794 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
794 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
795 migrating 1 manifests containing 3 revisions (384 bytes in store; 238 bytes tracked data)
795 migrating 1 manifests containing 3 revisions (384 bytes in store; 238 bytes tracked data)
796 finished migrating 3 manifest revisions across 1 manifests; change in size: -17 bytes
796 finished migrating 3 manifest revisions across 1 manifests; change in size: -17 bytes
797 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
797 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
798 finished migrating 3 changelog revisions; change in size: 0 bytes
798 finished migrating 3 changelog revisions; change in size: 0 bytes
799 finished migrating 9 total revisions; total change in store size: -17 bytes
799 finished migrating 9 total revisions; total change in store size: -17 bytes
800 copying phaseroots
800 copying phaseroots
801 copying requires
801 copying requires
802 data fully upgraded in a temporary repository
802 data fully upgraded in a temporary repository
803 marking source repository as being upgraded; clients will be unable to read from repository
803 marking source repository as being upgraded; clients will be unable to read from repository
804 starting in-place swap of repository data
804 starting in-place swap of repository data
805 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
805 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
806 replacing store...
806 replacing store...
807 store replacement complete; repository was inconsistent for *s (glob)
807 store replacement complete; repository was inconsistent for *s (glob)
808 finalizing requirements file and making repository readable again
808 finalizing requirements file and making repository readable again
809 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
809 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
810 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
810 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
811 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
811 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
812
812
813 Original requirements backed up
813 Original requirements backed up
814
814
815 $ cat .hg/upgradebackup.*/requires
815 $ cat .hg/upgradebackup.*/requires
816 share-safe
816 share-safe
817 $ cat .hg/upgradebackup.*/store/requires
817 $ cat .hg/upgradebackup.*/store/requires
818 dotencode
818 dotencode
819 fncache
819 fncache
820 persistent-nodemap (rust !)
820 persistent-nodemap (rust !)
821 revlogv1
821 revlogv1
822 store
822 store
823 upgradeinprogress
823 upgradeinprogress
824
824
825 generaldelta added to original requirements files
825 generaldelta added to original requirements files
826
826
827 $ hg debugrequires
827 $ hg debugrequires
828 dotencode
828 dotencode
829 fncache
829 fncache
830 generaldelta
830 generaldelta
831 persistent-nodemap (rust !)
831 persistent-nodemap (rust !)
832 revlogv1
832 revlogv1
833 share-safe
833 share-safe
834 store
834 store
835
835
836 store directory has files we expect
836 store directory has files we expect
837
837
838 $ ls .hg/store
838 $ ls .hg/store
839 00changelog.i
839 00changelog.i
840 00manifest.i
840 00manifest.i
841 data
841 data
842 fncache
842 fncache
843 phaseroots
843 phaseroots
844 requires
844 requires
845 undo
845 undo
846 undo.backupfiles
846 undo.backupfiles
847 undo.phaseroots
848
847
849 manifest should be generaldelta
848 manifest should be generaldelta
850
849
851 $ hg debugrevlog -m | grep flags
850 $ hg debugrevlog -m | grep flags
852 flags : inline, generaldelta
851 flags : inline, generaldelta
853
852
854 verify should be happy
853 verify should be happy
855
854
856 $ hg verify -q
855 $ hg verify -q
857
856
858 old store should be backed up
857 old store should be backed up
859
858
860 $ ls -d .hg/upgradebackup.*/
859 $ ls -d .hg/upgradebackup.*/
861 .hg/upgradebackup.*/ (glob)
860 .hg/upgradebackup.*/ (glob)
862 $ ls .hg/upgradebackup.*/store
861 $ ls .hg/upgradebackup.*/store
863 00changelog.i
862 00changelog.i
864 00manifest.i
863 00manifest.i
865 data
864 data
866 fncache
865 fncache
867 phaseroots
866 phaseroots
868 requires
867 requires
869 undo
868 undo
870 undo.backup.fncache
869 undo.backup.fncache
871 undo.backupfiles
870 undo.backupfiles
872 undo.phaseroots
873
871
874 unless --no-backup is passed
872 unless --no-backup is passed
875
873
876 $ rm -rf .hg/upgradebackup.*/
874 $ rm -rf .hg/upgradebackup.*/
877 $ hg debugupgraderepo --run --no-backup
875 $ hg debugupgraderepo --run --no-backup
878 note: selecting all-filelogs for processing to change: sparserevlog
876 note: selecting all-filelogs for processing to change: sparserevlog
879 note: selecting all-manifestlogs for processing to change: sparserevlog
877 note: selecting all-manifestlogs for processing to change: sparserevlog
880 note: selecting changelog for processing to change: sparserevlog
878 note: selecting changelog for processing to change: sparserevlog
881
879
882 upgrade will perform the following actions:
880 upgrade will perform the following actions:
883
881
884 requirements
882 requirements
885 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
883 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
886 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
884 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
887 added: sparserevlog
885 added: sparserevlog
888
886
889 sparserevlog
887 sparserevlog
890 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
888 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
891
889
892 processed revlogs:
890 processed revlogs:
893 - all-filelogs
891 - all-filelogs
894 - changelog
892 - changelog
895 - manifest
893 - manifest
896
894
897 beginning upgrade...
895 beginning upgrade...
898 repository locked and read-only
896 repository locked and read-only
899 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
897 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
900 (it is safe to interrupt this process any time before data migration completes)
898 (it is safe to interrupt this process any time before data migration completes)
901 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
899 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
902 migrating 519 KB in store; 1.05 MB tracked data
900 migrating 519 KB in store; 1.05 MB tracked data
903 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
901 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
904 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
902 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
905 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
903 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
906 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
904 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
907 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
905 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
908 finished migrating 3 changelog revisions; change in size: 0 bytes
906 finished migrating 3 changelog revisions; change in size: 0 bytes
909 finished migrating 9 total revisions; total change in store size: 0 bytes
907 finished migrating 9 total revisions; total change in store size: 0 bytes
910 copying phaseroots
908 copying phaseroots
911 copying requires
909 copying requires
912 data fully upgraded in a temporary repository
910 data fully upgraded in a temporary repository
913 marking source repository as being upgraded; clients will be unable to read from repository
911 marking source repository as being upgraded; clients will be unable to read from repository
914 starting in-place swap of repository data
912 starting in-place swap of repository data
915 replacing store...
913 replacing store...
916 store replacement complete; repository was inconsistent for * (glob)
914 store replacement complete; repository was inconsistent for * (glob)
917 finalizing requirements file and making repository readable again
915 finalizing requirements file and making repository readable again
918 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
916 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
919 $ ls -1 .hg/ | grep upgradebackup
917 $ ls -1 .hg/ | grep upgradebackup
920 [1]
918 [1]
921
919
922 We can restrict optimization to some revlog:
920 We can restrict optimization to some revlog:
923
921
924 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
922 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
925 upgrade will perform the following actions:
923 upgrade will perform the following actions:
926
924
927 requirements
925 requirements
928 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
926 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
929 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
927 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
930
928
931 optimisations: re-delta-parent
929 optimisations: re-delta-parent
932
930
933 re-delta-parent
931 re-delta-parent
934 deltas within internal storage will choose a new base revision if needed
932 deltas within internal storage will choose a new base revision if needed
935
933
936 processed revlogs:
934 processed revlogs:
937 - manifest
935 - manifest
938
936
939 beginning upgrade...
937 beginning upgrade...
940 repository locked and read-only
938 repository locked and read-only
941 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
939 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
942 (it is safe to interrupt this process any time before data migration completes)
940 (it is safe to interrupt this process any time before data migration completes)
943 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
941 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
944 migrating 519 KB in store; 1.05 MB tracked data
942 migrating 519 KB in store; 1.05 MB tracked data
945 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
943 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
946 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
944 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
947 blindly copying data/f0.i containing 1 revisions
945 blindly copying data/f0.i containing 1 revisions
948 blindly copying data/f2.i containing 1 revisions
946 blindly copying data/f2.i containing 1 revisions
949 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
947 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
950 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
948 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
951 cloning 3 revisions from 00manifest.i
949 cloning 3 revisions from 00manifest.i
952 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
950 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
953 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
951 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
954 blindly copying 00changelog.i containing 3 revisions
952 blindly copying 00changelog.i containing 3 revisions
955 finished migrating 3 changelog revisions; change in size: 0 bytes
953 finished migrating 3 changelog revisions; change in size: 0 bytes
956 finished migrating 9 total revisions; total change in store size: 0 bytes
954 finished migrating 9 total revisions; total change in store size: 0 bytes
957 copying phaseroots
955 copying phaseroots
958 copying requires
956 copying requires
959 data fully upgraded in a temporary repository
957 data fully upgraded in a temporary repository
960 marking source repository as being upgraded; clients will be unable to read from repository
958 marking source repository as being upgraded; clients will be unable to read from repository
961 starting in-place swap of repository data
959 starting in-place swap of repository data
962 replacing store...
960 replacing store...
963 store replacement complete; repository was inconsistent for *s (glob)
961 store replacement complete; repository was inconsistent for *s (glob)
964 finalizing requirements file and making repository readable again
962 finalizing requirements file and making repository readable again
965 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
963 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
966
964
967 Check that the repo still works fine
965 Check that the repo still works fine
968
966
969 $ hg log -G --stat
967 $ hg log -G --stat
970 @ changeset: 2:fca376863211
968 @ changeset: 2:fca376863211
971 | tag: tip
969 | tag: tip
972 | parent: 0:ba592bf28da2
970 | parent: 0:ba592bf28da2
973 | user: test
971 | user: test
974 | date: Thu Jan 01 00:00:00 1970 +0000
972 | date: Thu Jan 01 00:00:00 1970 +0000
975 | summary: add f2
973 | summary: add f2
976 |
974 |
977 | f2 | 100000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
975 | f2 | 100000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
978 | 1 files changed, 100000 insertions(+), 0 deletions(-)
976 | 1 files changed, 100000 insertions(+), 0 deletions(-)
979 |
977 |
980 | o changeset: 1:2029ce2354e2
978 | o changeset: 1:2029ce2354e2
981 |/ user: test
979 |/ user: test
982 | date: Thu Jan 01 00:00:00 1970 +0000
980 | date: Thu Jan 01 00:00:00 1970 +0000
983 | summary: add f1
981 | summary: add f1
984 |
982 |
985 |
983 |
986 o changeset: 0:ba592bf28da2
984 o changeset: 0:ba592bf28da2
987 user: test
985 user: test
988 date: Thu Jan 01 00:00:00 1970 +0000
986 date: Thu Jan 01 00:00:00 1970 +0000
989 summary: initial
987 summary: initial
990
988
991
989
992
990
993 $ hg verify -q
991 $ hg verify -q
994
992
995 Check we can select negatively
993 Check we can select negatively
996
994
997 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
995 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
998 upgrade will perform the following actions:
996 upgrade will perform the following actions:
999
997
1000 requirements
998 requirements
1001 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
999 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1002 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1000 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1003
1001
1004 optimisations: re-delta-parent
1002 optimisations: re-delta-parent
1005
1003
1006 re-delta-parent
1004 re-delta-parent
1007 deltas within internal storage will choose a new base revision if needed
1005 deltas within internal storage will choose a new base revision if needed
1008
1006
1009 processed revlogs:
1007 processed revlogs:
1010 - all-filelogs
1008 - all-filelogs
1011 - changelog
1009 - changelog
1012
1010
1013 beginning upgrade...
1011 beginning upgrade...
1014 repository locked and read-only
1012 repository locked and read-only
1015 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1013 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1016 (it is safe to interrupt this process any time before data migration completes)
1014 (it is safe to interrupt this process any time before data migration completes)
1017 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1015 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1018 migrating 519 KB in store; 1.05 MB tracked data
1016 migrating 519 KB in store; 1.05 MB tracked data
1019 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1017 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1020 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1018 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1021 cloning 1 revisions from data/f0.i
1019 cloning 1 revisions from data/f0.i
1022 cloning 1 revisions from data/f2.i
1020 cloning 1 revisions from data/f2.i
1023 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1021 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1024 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1022 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1025 blindly copying 00manifest.i containing 3 revisions
1023 blindly copying 00manifest.i containing 3 revisions
1026 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1024 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1027 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1025 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1028 cloning 3 revisions from 00changelog.i
1026 cloning 3 revisions from 00changelog.i
1029 finished migrating 3 changelog revisions; change in size: 0 bytes
1027 finished migrating 3 changelog revisions; change in size: 0 bytes
1030 finished migrating 9 total revisions; total change in store size: 0 bytes
1028 finished migrating 9 total revisions; total change in store size: 0 bytes
1031 copying phaseroots
1029 copying phaseroots
1032 copying requires
1030 copying requires
1033 data fully upgraded in a temporary repository
1031 data fully upgraded in a temporary repository
1034 marking source repository as being upgraded; clients will be unable to read from repository
1032 marking source repository as being upgraded; clients will be unable to read from repository
1035 starting in-place swap of repository data
1033 starting in-place swap of repository data
1036 replacing store...
1034 replacing store...
1037 store replacement complete; repository was inconsistent for *s (glob)
1035 store replacement complete; repository was inconsistent for *s (glob)
1038 finalizing requirements file and making repository readable again
1036 finalizing requirements file and making repository readable again
1039 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1037 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1040 $ hg verify -q
1038 $ hg verify -q
1041
1039
1042 Check that we can select changelog only
1040 Check that we can select changelog only
1043
1041
1044 $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback
1042 $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback
1045 upgrade will perform the following actions:
1043 upgrade will perform the following actions:
1046
1044
1047 requirements
1045 requirements
1048 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1046 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1049 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1047 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1050
1048
1051 optimisations: re-delta-parent
1049 optimisations: re-delta-parent
1052
1050
1053 re-delta-parent
1051 re-delta-parent
1054 deltas within internal storage will choose a new base revision if needed
1052 deltas within internal storage will choose a new base revision if needed
1055
1053
1056 processed revlogs:
1054 processed revlogs:
1057 - changelog
1055 - changelog
1058
1056
1059 beginning upgrade...
1057 beginning upgrade...
1060 repository locked and read-only
1058 repository locked and read-only
1061 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1059 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1062 (it is safe to interrupt this process any time before data migration completes)
1060 (it is safe to interrupt this process any time before data migration completes)
1063 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1061 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1064 migrating 519 KB in store; 1.05 MB tracked data
1062 migrating 519 KB in store; 1.05 MB tracked data
1065 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1063 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1066 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
1064 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
1067 blindly copying data/f0.i containing 1 revisions
1065 blindly copying data/f0.i containing 1 revisions
1068 blindly copying data/f2.i containing 1 revisions
1066 blindly copying data/f2.i containing 1 revisions
1069 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1067 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1070 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1068 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1071 blindly copying 00manifest.i containing 3 revisions
1069 blindly copying 00manifest.i containing 3 revisions
1072 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1070 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1073 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1071 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1074 cloning 3 revisions from 00changelog.i
1072 cloning 3 revisions from 00changelog.i
1075 finished migrating 3 changelog revisions; change in size: 0 bytes
1073 finished migrating 3 changelog revisions; change in size: 0 bytes
1076 finished migrating 9 total revisions; total change in store size: 0 bytes
1074 finished migrating 9 total revisions; total change in store size: 0 bytes
1077 copying phaseroots
1075 copying phaseroots
1078 copying requires
1076 copying requires
1079 data fully upgraded in a temporary repository
1077 data fully upgraded in a temporary repository
1080 marking source repository as being upgraded; clients will be unable to read from repository
1078 marking source repository as being upgraded; clients will be unable to read from repository
1081 starting in-place swap of repository data
1079 starting in-place swap of repository data
1082 replacing store...
1080 replacing store...
1083 store replacement complete; repository was inconsistent for *s (glob)
1081 store replacement complete; repository was inconsistent for *s (glob)
1084 finalizing requirements file and making repository readable again
1082 finalizing requirements file and making repository readable again
1085 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1083 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1086 $ hg verify -q
1084 $ hg verify -q
1087
1085
1088 Check that we can select filelog only
1086 Check that we can select filelog only
1089
1087
1090 $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback
1088 $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback
1091 upgrade will perform the following actions:
1089 upgrade will perform the following actions:
1092
1090
1093 requirements
1091 requirements
1094 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1092 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1095 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1093 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1096
1094
1097 optimisations: re-delta-parent
1095 optimisations: re-delta-parent
1098
1096
1099 re-delta-parent
1097 re-delta-parent
1100 deltas within internal storage will choose a new base revision if needed
1098 deltas within internal storage will choose a new base revision if needed
1101
1099
1102 processed revlogs:
1100 processed revlogs:
1103 - all-filelogs
1101 - all-filelogs
1104
1102
1105 beginning upgrade...
1103 beginning upgrade...
1106 repository locked and read-only
1104 repository locked and read-only
1107 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1105 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1108 (it is safe to interrupt this process any time before data migration completes)
1106 (it is safe to interrupt this process any time before data migration completes)
1109 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1107 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1110 migrating 519 KB in store; 1.05 MB tracked data
1108 migrating 519 KB in store; 1.05 MB tracked data
1111 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1109 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1112 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1110 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1113 cloning 1 revisions from data/f0.i
1111 cloning 1 revisions from data/f0.i
1114 cloning 1 revisions from data/f2.i
1112 cloning 1 revisions from data/f2.i
1115 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1113 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1116 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1114 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1117 blindly copying 00manifest.i containing 3 revisions
1115 blindly copying 00manifest.i containing 3 revisions
1118 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1116 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1119 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1117 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1120 blindly copying 00changelog.i containing 3 revisions
1118 blindly copying 00changelog.i containing 3 revisions
1121 finished migrating 3 changelog revisions; change in size: 0 bytes
1119 finished migrating 3 changelog revisions; change in size: 0 bytes
1122 finished migrating 9 total revisions; total change in store size: 0 bytes
1120 finished migrating 9 total revisions; total change in store size: 0 bytes
1123 copying phaseroots
1121 copying phaseroots
1124 copying requires
1122 copying requires
1125 data fully upgraded in a temporary repository
1123 data fully upgraded in a temporary repository
1126 marking source repository as being upgraded; clients will be unable to read from repository
1124 marking source repository as being upgraded; clients will be unable to read from repository
1127 starting in-place swap of repository data
1125 starting in-place swap of repository data
1128 replacing store...
1126 replacing store...
1129 store replacement complete; repository was inconsistent for *s (glob)
1127 store replacement complete; repository was inconsistent for *s (glob)
1130 finalizing requirements file and making repository readable again
1128 finalizing requirements file and making repository readable again
1131 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1129 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1132 $ hg verify -q
1130 $ hg verify -q
1133
1131
1134
1132
1135 Check you can't skip revlog clone during important format downgrade
1133 Check you can't skip revlog clone during important format downgrade
1136
1134
1137 $ echo "[format]" > .hg/hgrc
1135 $ echo "[format]" > .hg/hgrc
1138 $ echo "sparse-revlog=no" >> .hg/hgrc
1136 $ echo "sparse-revlog=no" >> .hg/hgrc
1139 $ hg debugupgrade --optimize re-delta-parent --no-manifest --no-backup --quiet
1137 $ hg debugupgrade --optimize re-delta-parent --no-manifest --no-backup --quiet
1140 warning: ignoring --no-manifest, as upgrade is changing: sparserevlog
1138 warning: ignoring --no-manifest, as upgrade is changing: sparserevlog
1141
1139
1142 requirements
1140 requirements
1143 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1141 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1144 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1142 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1145 removed: sparserevlog
1143 removed: sparserevlog
1146
1144
1147 optimisations: re-delta-parent
1145 optimisations: re-delta-parent
1148
1146
1149 processed revlogs:
1147 processed revlogs:
1150 - all-filelogs
1148 - all-filelogs
1151 - changelog
1149 - changelog
1152 - manifest
1150 - manifest
1153
1151
1154 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
1152 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
1155 note: selecting all-filelogs for processing to change: sparserevlog
1153 note: selecting all-filelogs for processing to change: sparserevlog
1156 note: selecting changelog for processing to change: sparserevlog
1154 note: selecting changelog for processing to change: sparserevlog
1157
1155
1158 upgrade will perform the following actions:
1156 upgrade will perform the following actions:
1159
1157
1160 requirements
1158 requirements
1161 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1159 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1162 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1160 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1163 removed: sparserevlog
1161 removed: sparserevlog
1164
1162
1165 optimisations: re-delta-parent
1163 optimisations: re-delta-parent
1166
1164
1167 re-delta-parent
1165 re-delta-parent
1168 deltas within internal storage will choose a new base revision if needed
1166 deltas within internal storage will choose a new base revision if needed
1169
1167
1170 processed revlogs:
1168 processed revlogs:
1171 - all-filelogs
1169 - all-filelogs
1172 - changelog
1170 - changelog
1173 - manifest
1171 - manifest
1174
1172
1175 beginning upgrade...
1173 beginning upgrade...
1176 repository locked and read-only
1174 repository locked and read-only
1177 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1175 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1178 (it is safe to interrupt this process any time before data migration completes)
1176 (it is safe to interrupt this process any time before data migration completes)
1179 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1177 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1180 migrating 519 KB in store; 1.05 MB tracked data
1178 migrating 519 KB in store; 1.05 MB tracked data
1181 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1179 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1182 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1180 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1183 cloning 1 revisions from data/f0.i
1181 cloning 1 revisions from data/f0.i
1184 cloning 1 revisions from data/f2.i
1182 cloning 1 revisions from data/f2.i
1185 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1183 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1186 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1184 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1187 cloning 3 revisions from 00manifest.i
1185 cloning 3 revisions from 00manifest.i
1188 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1186 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1189 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1187 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1190 cloning 3 revisions from 00changelog.i
1188 cloning 3 revisions from 00changelog.i
1191 finished migrating 3 changelog revisions; change in size: 0 bytes
1189 finished migrating 3 changelog revisions; change in size: 0 bytes
1192 finished migrating 9 total revisions; total change in store size: 0 bytes
1190 finished migrating 9 total revisions; total change in store size: 0 bytes
1193 copying phaseroots
1191 copying phaseroots
1194 copying requires
1192 copying requires
1195 data fully upgraded in a temporary repository
1193 data fully upgraded in a temporary repository
1196 marking source repository as being upgraded; clients will be unable to read from repository
1194 marking source repository as being upgraded; clients will be unable to read from repository
1197 starting in-place swap of repository data
1195 starting in-place swap of repository data
1198 replacing store...
1196 replacing store...
1199 store replacement complete; repository was inconsistent for *s (glob)
1197 store replacement complete; repository was inconsistent for *s (glob)
1200 finalizing requirements file and making repository readable again
1198 finalizing requirements file and making repository readable again
1201 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1199 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1202 $ hg verify -q
1200 $ hg verify -q
1203
1201
1204 Check you can't skip revlog clone during important format upgrade
1202 Check you can't skip revlog clone during important format upgrade
1205
1203
1206 $ echo "sparse-revlog=yes" >> .hg/hgrc
1204 $ echo "sparse-revlog=yes" >> .hg/hgrc
1207 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
1205 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
1208 note: selecting all-filelogs for processing to change: sparserevlog
1206 note: selecting all-filelogs for processing to change: sparserevlog
1209 note: selecting changelog for processing to change: sparserevlog
1207 note: selecting changelog for processing to change: sparserevlog
1210
1208
1211 upgrade will perform the following actions:
1209 upgrade will perform the following actions:
1212
1210
1213 requirements
1211 requirements
1214 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1212 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1215 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1213 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1216 added: sparserevlog
1214 added: sparserevlog
1217
1215
1218 optimisations: re-delta-parent
1216 optimisations: re-delta-parent
1219
1217
1220 sparserevlog
1218 sparserevlog
1221 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
1219 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
1222
1220
1223 re-delta-parent
1221 re-delta-parent
1224 deltas within internal storage will choose a new base revision if needed
1222 deltas within internal storage will choose a new base revision if needed
1225
1223
1226 processed revlogs:
1224 processed revlogs:
1227 - all-filelogs
1225 - all-filelogs
1228 - changelog
1226 - changelog
1229 - manifest
1227 - manifest
1230
1228
1231 beginning upgrade...
1229 beginning upgrade...
1232 repository locked and read-only
1230 repository locked and read-only
1233 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1231 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1234 (it is safe to interrupt this process any time before data migration completes)
1232 (it is safe to interrupt this process any time before data migration completes)
1235 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1233 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1236 migrating 519 KB in store; 1.05 MB tracked data
1234 migrating 519 KB in store; 1.05 MB tracked data
1237 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1235 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1238 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1236 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1239 cloning 1 revisions from data/f0.i
1237 cloning 1 revisions from data/f0.i
1240 cloning 1 revisions from data/f2.i
1238 cloning 1 revisions from data/f2.i
1241 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1239 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1242 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1240 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1243 cloning 3 revisions from 00manifest.i
1241 cloning 3 revisions from 00manifest.i
1244 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1242 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1245 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1243 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1246 cloning 3 revisions from 00changelog.i
1244 cloning 3 revisions from 00changelog.i
1247 finished migrating 3 changelog revisions; change in size: 0 bytes
1245 finished migrating 3 changelog revisions; change in size: 0 bytes
1248 finished migrating 9 total revisions; total change in store size: 0 bytes
1246 finished migrating 9 total revisions; total change in store size: 0 bytes
1249 copying phaseroots
1247 copying phaseroots
1250 copying requires
1248 copying requires
1251 data fully upgraded in a temporary repository
1249 data fully upgraded in a temporary repository
1252 marking source repository as being upgraded; clients will be unable to read from repository
1250 marking source repository as being upgraded; clients will be unable to read from repository
1253 starting in-place swap of repository data
1251 starting in-place swap of repository data
1254 replacing store...
1252 replacing store...
1255 store replacement complete; repository was inconsistent for *s (glob)
1253 store replacement complete; repository was inconsistent for *s (glob)
1256 finalizing requirements file and making repository readable again
1254 finalizing requirements file and making repository readable again
1257 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1255 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1258 $ hg verify -q
1256 $ hg verify -q
1259
1257
1260 $ cd ..
1258 $ cd ..
1261
1259
1262 store files with special filenames aren't encoded during copy
1260 store files with special filenames aren't encoded during copy
1263
1261
1264 $ hg init store-filenames
1262 $ hg init store-filenames
1265 $ cd store-filenames
1263 $ cd store-filenames
1266 $ touch foo
1264 $ touch foo
1267 $ hg -q commit -A -m initial
1265 $ hg -q commit -A -m initial
1268 $ touch .hg/store/.XX_special_filename
1266 $ touch .hg/store/.XX_special_filename
1269
1267
1270 $ hg debugupgraderepo --run
1268 $ hg debugupgraderepo --run
1271 nothing to do
1269 nothing to do
1272 $ hg debugupgraderepo --run --optimize 're-delta-fulladd'
1270 $ hg debugupgraderepo --run --optimize 're-delta-fulladd'
1273 upgrade will perform the following actions:
1271 upgrade will perform the following actions:
1274
1272
1275 requirements
1273 requirements
1276 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1274 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1277 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1275 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1278
1276
1279 optimisations: re-delta-fulladd
1277 optimisations: re-delta-fulladd
1280
1278
1281 re-delta-fulladd
1279 re-delta-fulladd
1282 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
1280 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
1283
1281
1284 processed revlogs:
1282 processed revlogs:
1285 - all-filelogs
1283 - all-filelogs
1286 - changelog
1284 - changelog
1287 - manifest
1285 - manifest
1288
1286
1289 beginning upgrade...
1287 beginning upgrade...
1290 repository locked and read-only
1288 repository locked and read-only
1291 creating temporary repository to stage upgraded data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1289 creating temporary repository to stage upgraded data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1292 (it is safe to interrupt this process any time before data migration completes)
1290 (it is safe to interrupt this process any time before data migration completes)
1293 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1291 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1294 migrating 301 bytes in store; 107 bytes tracked data
1292 migrating 301 bytes in store; 107 bytes tracked data
1295 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
1293 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
1296 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1294 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1297 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
1295 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
1298 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1296 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1299 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
1297 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
1300 finished migrating 1 changelog revisions; change in size: 0 bytes
1298 finished migrating 1 changelog revisions; change in size: 0 bytes
1301 finished migrating 3 total revisions; total change in store size: 0 bytes
1299 finished migrating 3 total revisions; total change in store size: 0 bytes
1302 copying .XX_special_filename
1300 copying .XX_special_filename
1303 copying phaseroots
1301 copying phaseroots
1304 copying requires
1302 copying requires
1305 data fully upgraded in a temporary repository
1303 data fully upgraded in a temporary repository
1306 marking source repository as being upgraded; clients will be unable to read from repository
1304 marking source repository as being upgraded; clients will be unable to read from repository
1307 starting in-place swap of repository data
1305 starting in-place swap of repository data
1308 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1306 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1309 replacing store...
1307 replacing store...
1310 store replacement complete; repository was inconsistent for *s (glob)
1308 store replacement complete; repository was inconsistent for *s (glob)
1311 finalizing requirements file and making repository readable again
1309 finalizing requirements file and making repository readable again
1312 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1310 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1313 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1311 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1314 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1312 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1315
1313
1316 fncache is valid after upgrade
1314 fncache is valid after upgrade
1317
1315
1318 $ hg debugrebuildfncache
1316 $ hg debugrebuildfncache
1319 fncache already up to date
1317 fncache already up to date
1320
1318
1321 $ cd ..
1319 $ cd ..
1322
1320
1323 Check upgrading a large file repository
1321 Check upgrading a large file repository
1324 ---------------------------------------
1322 ---------------------------------------
1325
1323
1326 $ hg init largefilesrepo
1324 $ hg init largefilesrepo
1327 $ cat << EOF >> largefilesrepo/.hg/hgrc
1325 $ cat << EOF >> largefilesrepo/.hg/hgrc
1328 > [extensions]
1326 > [extensions]
1329 > largefiles =
1327 > largefiles =
1330 > EOF
1328 > EOF
1331
1329
1332 $ cd largefilesrepo
1330 $ cd largefilesrepo
1333 $ touch foo
1331 $ touch foo
1334 $ hg add --large foo
1332 $ hg add --large foo
1335 $ hg -q commit -m initial
1333 $ hg -q commit -m initial
1336 $ hg debugrequires
1334 $ hg debugrequires
1337 dotencode
1335 dotencode
1338 fncache
1336 fncache
1339 generaldelta
1337 generaldelta
1340 largefiles
1338 largefiles
1341 persistent-nodemap (rust !)
1339 persistent-nodemap (rust !)
1342 revlogv1
1340 revlogv1
1343 share-safe
1341 share-safe
1344 sparserevlog
1342 sparserevlog
1345 store
1343 store
1346
1344
1347 $ hg debugupgraderepo --run
1345 $ hg debugupgraderepo --run
1348 nothing to do
1346 nothing to do
1349 $ hg debugrequires
1347 $ hg debugrequires
1350 dotencode
1348 dotencode
1351 fncache
1349 fncache
1352 generaldelta
1350 generaldelta
1353 largefiles
1351 largefiles
1354 persistent-nodemap (rust !)
1352 persistent-nodemap (rust !)
1355 revlogv1
1353 revlogv1
1356 share-safe
1354 share-safe
1357 sparserevlog
1355 sparserevlog
1358 store
1356 store
1359
1357
1360 $ cat << EOF >> .hg/hgrc
1358 $ cat << EOF >> .hg/hgrc
1361 > [extensions]
1359 > [extensions]
1362 > lfs =
1360 > lfs =
1363 > [lfs]
1361 > [lfs]
1364 > threshold = 10
1362 > threshold = 10
1365 > EOF
1363 > EOF
1366 $ echo '123456789012345' > lfs.bin
1364 $ echo '123456789012345' > lfs.bin
1367 $ hg ci -Am 'lfs.bin'
1365 $ hg ci -Am 'lfs.bin'
1368 adding lfs.bin
1366 adding lfs.bin
1369 $ hg debugrequires | grep lfs
1367 $ hg debugrequires | grep lfs
1370 lfs
1368 lfs
1371 $ find .hg/store/lfs -type f
1369 $ find .hg/store/lfs -type f
1372 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1370 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1373
1371
1374 $ hg debugupgraderepo --run
1372 $ hg debugupgraderepo --run
1375 nothing to do
1373 nothing to do
1376
1374
1377 $ hg debugrequires | grep lfs
1375 $ hg debugrequires | grep lfs
1378 lfs
1376 lfs
1379 $ find .hg/store/lfs -type f
1377 $ find .hg/store/lfs -type f
1380 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1378 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1381 $ hg verify -q
1379 $ hg verify -q
1382 $ hg debugdata lfs.bin 0
1380 $ hg debugdata lfs.bin 0
1383 version https://git-lfs.github.com/spec/v1
1381 version https://git-lfs.github.com/spec/v1
1384 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1382 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1385 size 16
1383 size 16
1386 x-is-binary 0
1384 x-is-binary 0
1387
1385
1388 $ cd ..
1386 $ cd ..
1389
1387
1390 repository config is taken in account
1388 repository config is taken in account
1391 -------------------------------------
1389 -------------------------------------
1392
1390
1393 $ cat << EOF >> $HGRCPATH
1391 $ cat << EOF >> $HGRCPATH
1394 > [format]
1392 > [format]
1395 > maxchainlen = 1
1393 > maxchainlen = 1
1396 > EOF
1394 > EOF
1397
1395
1398 $ hg init localconfig
1396 $ hg init localconfig
1399 $ cd localconfig
1397 $ cd localconfig
1400 $ cat << EOF > file
1398 $ cat << EOF > file
1401 > some content
1399 > some content
1402 > with some length
1400 > with some length
1403 > to make sure we get a delta
1401 > to make sure we get a delta
1404 > after changes
1402 > after changes
1405 > very long
1403 > very long
1406 > very long
1404 > very long
1407 > very long
1405 > very long
1408 > very long
1406 > very long
1409 > very long
1407 > very long
1410 > very long
1408 > very long
1411 > very long
1409 > very long
1412 > very long
1410 > very long
1413 > very long
1411 > very long
1414 > very long
1412 > very long
1415 > very long
1413 > very long
1416 > EOF
1414 > EOF
1417 $ hg -q commit -A -m A
1415 $ hg -q commit -A -m A
1418 $ echo "new line" >> file
1416 $ echo "new line" >> file
1419 $ hg -q commit -m B
1417 $ hg -q commit -m B
1420 $ echo "new line" >> file
1418 $ echo "new line" >> file
1421 $ hg -q commit -m C
1419 $ hg -q commit -m C
1422
1420
1423 $ cat << EOF >> .hg/hgrc
1421 $ cat << EOF >> .hg/hgrc
1424 > [format]
1422 > [format]
1425 > maxchainlen = 9001
1423 > maxchainlen = 9001
1426 > EOF
1424 > EOF
1427 $ hg config format
1425 $ hg config format
1428 format.revlog-compression=$BUNDLE2_COMPRESSIONS$
1426 format.revlog-compression=$BUNDLE2_COMPRESSIONS$
1429 format.maxchainlen=9001
1427 format.maxchainlen=9001
1430 $ hg debugdeltachain file
1428 $ hg debugdeltachain file
1431 rev p1 p2 chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1429 rev p1 p2 chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1432 0 -1 -1 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1430 0 -1 -1 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1433 1 0 -1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1431 1 0 -1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1434 2 1 -1 1 2 0 snap 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
1432 2 1 -1 1 2 0 snap 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
1435
1433
1436 $ hg debugupgraderepo --run --optimize 're-delta-all'
1434 $ hg debugupgraderepo --run --optimize 're-delta-all'
1437 upgrade will perform the following actions:
1435 upgrade will perform the following actions:
1438
1436
1439 requirements
1437 requirements
1440 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1438 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1441 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1439 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1442
1440
1443 optimisations: re-delta-all
1441 optimisations: re-delta-all
1444
1442
1445 re-delta-all
1443 re-delta-all
1446 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
1444 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
1447
1445
1448 processed revlogs:
1446 processed revlogs:
1449 - all-filelogs
1447 - all-filelogs
1450 - changelog
1448 - changelog
1451 - manifest
1449 - manifest
1452
1450
1453 beginning upgrade...
1451 beginning upgrade...
1454 repository locked and read-only
1452 repository locked and read-only
1455 creating temporary repository to stage upgraded data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
1453 creating temporary repository to stage upgraded data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
1456 (it is safe to interrupt this process any time before data migration completes)
1454 (it is safe to interrupt this process any time before data migration completes)
1457 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1455 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1458 migrating 1019 bytes in store; 882 bytes tracked data
1456 migrating 1019 bytes in store; 882 bytes tracked data
1459 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
1457 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
1460 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
1458 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
1461 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
1459 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
1462 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1460 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1463 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
1461 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
1464 finished migrating 3 changelog revisions; change in size: 0 bytes
1462 finished migrating 3 changelog revisions; change in size: 0 bytes
1465 finished migrating 9 total revisions; total change in store size: -9 bytes
1463 finished migrating 9 total revisions; total change in store size: -9 bytes
1466 copying phaseroots
1464 copying phaseroots
1467 copying requires
1465 copying requires
1468 data fully upgraded in a temporary repository
1466 data fully upgraded in a temporary repository
1469 marking source repository as being upgraded; clients will be unable to read from repository
1467 marking source repository as being upgraded; clients will be unable to read from repository
1470 starting in-place swap of repository data
1468 starting in-place swap of repository data
1471 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1469 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1472 replacing store...
1470 replacing store...
1473 store replacement complete; repository was inconsistent for *s (glob)
1471 store replacement complete; repository was inconsistent for *s (glob)
1474 finalizing requirements file and making repository readable again
1472 finalizing requirements file and making repository readable again
1475 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
1473 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
1476 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1474 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1477 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1475 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1478 $ hg debugdeltachain file
1476 $ hg debugdeltachain file
1479 rev p1 p2 chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1477 rev p1 p2 chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1480 0 -1 -1 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1478 0 -1 -1 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1481 1 0 -1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1479 1 0 -1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1482 2 1 -1 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
1480 2 1 -1 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
1483 $ cd ..
1481 $ cd ..
1484
1482
1485 $ cat << EOF >> $HGRCPATH
1483 $ cat << EOF >> $HGRCPATH
1486 > [format]
1484 > [format]
1487 > maxchainlen = 9001
1485 > maxchainlen = 9001
1488 > EOF
1486 > EOF
1489
1487
1490 Check upgrading a sparse-revlog repository
1488 Check upgrading a sparse-revlog repository
1491 ---------------------------------------
1489 ---------------------------------------
1492
1490
1493 $ hg init sparserevlogrepo --config format.sparse-revlog=no
1491 $ hg init sparserevlogrepo --config format.sparse-revlog=no
1494 $ cd sparserevlogrepo
1492 $ cd sparserevlogrepo
1495 $ touch foo
1493 $ touch foo
1496 $ hg add foo
1494 $ hg add foo
1497 $ hg -q commit -m "foo"
1495 $ hg -q commit -m "foo"
1498 $ hg debugrequires
1496 $ hg debugrequires
1499 dotencode
1497 dotencode
1500 fncache
1498 fncache
1501 generaldelta
1499 generaldelta
1502 persistent-nodemap (rust !)
1500 persistent-nodemap (rust !)
1503 revlogv1
1501 revlogv1
1504 share-safe
1502 share-safe
1505 store
1503 store
1506
1504
1507 Check that we can add the sparse-revlog format requirement
1505 Check that we can add the sparse-revlog format requirement
1508 $ hg --config format.sparse-revlog=yes debugupgraderepo --run --quiet
1506 $ hg --config format.sparse-revlog=yes debugupgraderepo --run --quiet
1509 upgrade will perform the following actions:
1507 upgrade will perform the following actions:
1510
1508
1511 requirements
1509 requirements
1512 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1510 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1513 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1511 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1514 added: sparserevlog
1512 added: sparserevlog
1515
1513
1516 processed revlogs:
1514 processed revlogs:
1517 - all-filelogs
1515 - all-filelogs
1518 - changelog
1516 - changelog
1519 - manifest
1517 - manifest
1520
1518
1521 $ hg debugrequires
1519 $ hg debugrequires
1522 dotencode
1520 dotencode
1523 fncache
1521 fncache
1524 generaldelta
1522 generaldelta
1525 persistent-nodemap (rust !)
1523 persistent-nodemap (rust !)
1526 revlogv1
1524 revlogv1
1527 share-safe
1525 share-safe
1528 sparserevlog
1526 sparserevlog
1529 store
1527 store
1530
1528
1531 Check that we can remove the sparse-revlog format requirement
1529 Check that we can remove the sparse-revlog format requirement
1532 $ hg --config format.sparse-revlog=no debugupgraderepo --run --quiet
1530 $ hg --config format.sparse-revlog=no debugupgraderepo --run --quiet
1533 upgrade will perform the following actions:
1531 upgrade will perform the following actions:
1534
1532
1535 requirements
1533 requirements
1536 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1534 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1537 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1535 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1538 removed: sparserevlog
1536 removed: sparserevlog
1539
1537
1540 processed revlogs:
1538 processed revlogs:
1541 - all-filelogs
1539 - all-filelogs
1542 - changelog
1540 - changelog
1543 - manifest
1541 - manifest
1544
1542
1545 $ hg debugrequires
1543 $ hg debugrequires
1546 dotencode
1544 dotencode
1547 fncache
1545 fncache
1548 generaldelta
1546 generaldelta
1549 persistent-nodemap (rust !)
1547 persistent-nodemap (rust !)
1550 revlogv1
1548 revlogv1
1551 share-safe
1549 share-safe
1552 store
1550 store
1553
1551
1554 #if zstd
1552 #if zstd
1555
1553
1556 Check upgrading to a zstd revlog
1554 Check upgrading to a zstd revlog
1557 --------------------------------
1555 --------------------------------
1558
1556
1559 upgrade
1557 upgrade
1560
1558
1561 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup --quiet
1559 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup --quiet
1562 upgrade will perform the following actions:
1560 upgrade will perform the following actions:
1563
1561
1564 requirements
1562 requirements
1565 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1563 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1566 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1564 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1567 added: revlog-compression-zstd, sparserevlog
1565 added: revlog-compression-zstd, sparserevlog
1568
1566
1569 processed revlogs:
1567 processed revlogs:
1570 - all-filelogs
1568 - all-filelogs
1571 - changelog
1569 - changelog
1572 - manifest
1570 - manifest
1573
1571
1574 $ hg debugformat -v
1572 $ hg debugformat -v
1575 format-variant repo config default
1573 format-variant repo config default
1576 fncache: yes yes yes
1574 fncache: yes yes yes
1577 dirstate-v2: no no no
1575 dirstate-v2: no no no
1578 tracked-hint: no no no
1576 tracked-hint: no no no
1579 dotencode: yes yes yes
1577 dotencode: yes yes yes
1580 generaldelta: yes yes yes
1578 generaldelta: yes yes yes
1581 share-safe: yes yes yes
1579 share-safe: yes yes yes
1582 sparserevlog: yes yes yes
1580 sparserevlog: yes yes yes
1583 persistent-nodemap: no no no (no-rust !)
1581 persistent-nodemap: no no no (no-rust !)
1584 persistent-nodemap: yes yes no (rust !)
1582 persistent-nodemap: yes yes no (rust !)
1585 copies-sdc: no no no
1583 copies-sdc: no no no
1586 revlog-v2: no no no
1584 revlog-v2: no no no
1587 changelog-v2: no no no
1585 changelog-v2: no no no
1588 plain-cl-delta: yes yes yes
1586 plain-cl-delta: yes yes yes
1589 compression: zlib zlib zlib (no-zstd !)
1587 compression: zlib zlib zlib (no-zstd !)
1590 compression: zstd zlib zstd (zstd !)
1588 compression: zstd zlib zstd (zstd !)
1591 compression-level: default default default
1589 compression-level: default default default
1592 $ hg debugrequires
1590 $ hg debugrequires
1593 dotencode
1591 dotencode
1594 fncache
1592 fncache
1595 generaldelta
1593 generaldelta
1596 persistent-nodemap (rust !)
1594 persistent-nodemap (rust !)
1597 revlog-compression-zstd
1595 revlog-compression-zstd
1598 revlogv1
1596 revlogv1
1599 share-safe
1597 share-safe
1600 sparserevlog
1598 sparserevlog
1601 store
1599 store
1602
1600
1603 downgrade
1601 downgrade
1604
1602
1605 $ hg debugupgraderepo --run --no-backup --quiet
1603 $ hg debugupgraderepo --run --no-backup --quiet
1606 upgrade will perform the following actions:
1604 upgrade will perform the following actions:
1607
1605
1608 requirements
1606 requirements
1609 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1607 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1610 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1608 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1611 removed: revlog-compression-zstd
1609 removed: revlog-compression-zstd
1612
1610
1613 processed revlogs:
1611 processed revlogs:
1614 - all-filelogs
1612 - all-filelogs
1615 - changelog
1613 - changelog
1616 - manifest
1614 - manifest
1617
1615
1618 $ hg debugformat -v
1616 $ hg debugformat -v
1619 format-variant repo config default
1617 format-variant repo config default
1620 fncache: yes yes yes
1618 fncache: yes yes yes
1621 dirstate-v2: no no no
1619 dirstate-v2: no no no
1622 tracked-hint: no no no
1620 tracked-hint: no no no
1623 dotencode: yes yes yes
1621 dotencode: yes yes yes
1624 generaldelta: yes yes yes
1622 generaldelta: yes yes yes
1625 share-safe: yes yes yes
1623 share-safe: yes yes yes
1626 sparserevlog: yes yes yes
1624 sparserevlog: yes yes yes
1627 persistent-nodemap: no no no (no-rust !)
1625 persistent-nodemap: no no no (no-rust !)
1628 persistent-nodemap: yes yes no (rust !)
1626 persistent-nodemap: yes yes no (rust !)
1629 copies-sdc: no no no
1627 copies-sdc: no no no
1630 revlog-v2: no no no
1628 revlog-v2: no no no
1631 changelog-v2: no no no
1629 changelog-v2: no no no
1632 plain-cl-delta: yes yes yes
1630 plain-cl-delta: yes yes yes
1633 compression: zlib zlib zlib (no-zstd !)
1631 compression: zlib zlib zlib (no-zstd !)
1634 compression: zlib zlib zstd (zstd !)
1632 compression: zlib zlib zstd (zstd !)
1635 compression-level: default default default
1633 compression-level: default default default
1636 $ hg debugrequires
1634 $ hg debugrequires
1637 dotencode
1635 dotencode
1638 fncache
1636 fncache
1639 generaldelta
1637 generaldelta
1640 persistent-nodemap (rust !)
1638 persistent-nodemap (rust !)
1641 revlogv1
1639 revlogv1
1642 share-safe
1640 share-safe
1643 sparserevlog
1641 sparserevlog
1644 store
1642 store
1645
1643
1646 upgrade from hgrc
1644 upgrade from hgrc
1647
1645
1648 $ cat >> .hg/hgrc << EOF
1646 $ cat >> .hg/hgrc << EOF
1649 > [format]
1647 > [format]
1650 > revlog-compression=zstd
1648 > revlog-compression=zstd
1651 > EOF
1649 > EOF
1652 $ hg debugupgraderepo --run --no-backup --quiet
1650 $ hg debugupgraderepo --run --no-backup --quiet
1653 upgrade will perform the following actions:
1651 upgrade will perform the following actions:
1654
1652
1655 requirements
1653 requirements
1656 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1654 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1657 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1655 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1658 added: revlog-compression-zstd
1656 added: revlog-compression-zstd
1659
1657
1660 processed revlogs:
1658 processed revlogs:
1661 - all-filelogs
1659 - all-filelogs
1662 - changelog
1660 - changelog
1663 - manifest
1661 - manifest
1664
1662
1665 $ hg debugformat -v
1663 $ hg debugformat -v
1666 format-variant repo config default
1664 format-variant repo config default
1667 fncache: yes yes yes
1665 fncache: yes yes yes
1668 dirstate-v2: no no no
1666 dirstate-v2: no no no
1669 tracked-hint: no no no
1667 tracked-hint: no no no
1670 dotencode: yes yes yes
1668 dotencode: yes yes yes
1671 generaldelta: yes yes yes
1669 generaldelta: yes yes yes
1672 share-safe: yes yes yes
1670 share-safe: yes yes yes
1673 sparserevlog: yes yes yes
1671 sparserevlog: yes yes yes
1674 persistent-nodemap: no no no (no-rust !)
1672 persistent-nodemap: no no no (no-rust !)
1675 persistent-nodemap: yes yes no (rust !)
1673 persistent-nodemap: yes yes no (rust !)
1676 copies-sdc: no no no
1674 copies-sdc: no no no
1677 revlog-v2: no no no
1675 revlog-v2: no no no
1678 changelog-v2: no no no
1676 changelog-v2: no no no
1679 plain-cl-delta: yes yes yes
1677 plain-cl-delta: yes yes yes
1680 compression: zlib zlib zlib (no-zstd !)
1678 compression: zlib zlib zlib (no-zstd !)
1681 compression: zstd zstd zstd (zstd !)
1679 compression: zstd zstd zstd (zstd !)
1682 compression-level: default default default
1680 compression-level: default default default
1683 $ hg debugrequires
1681 $ hg debugrequires
1684 dotencode
1682 dotencode
1685 fncache
1683 fncache
1686 generaldelta
1684 generaldelta
1687 persistent-nodemap (rust !)
1685 persistent-nodemap (rust !)
1688 revlog-compression-zstd
1686 revlog-compression-zstd
1689 revlogv1
1687 revlogv1
1690 share-safe
1688 share-safe
1691 sparserevlog
1689 sparserevlog
1692 store
1690 store
1693
1691
1694 #endif
1692 #endif
1695
1693
1696 Check upgrading to a revlog format supporting sidedata
1694 Check upgrading to a revlog format supporting sidedata
1697 ------------------------------------------------------
1695 ------------------------------------------------------
1698
1696
1699 upgrade
1697 upgrade
1700
1698
1701 $ hg debugsidedata -c 0
1699 $ hg debugsidedata -c 0
1702 $ hg --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" --quiet
1700 $ hg --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" --quiet
1703 upgrade will perform the following actions:
1701 upgrade will perform the following actions:
1704
1702
1705 requirements
1703 requirements
1706 preserved: dotencode, fncache, generaldelta, share-safe, store (no-zstd !)
1704 preserved: dotencode, fncache, generaldelta, share-safe, store (no-zstd !)
1707 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
1705 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
1708 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
1706 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
1709 removed: revlogv1
1707 removed: revlogv1
1710 added: exp-revlogv2.2 (zstd !)
1708 added: exp-revlogv2.2 (zstd !)
1711 added: exp-revlogv2.2, sparserevlog (no-zstd !)
1709 added: exp-revlogv2.2, sparserevlog (no-zstd !)
1712
1710
1713 processed revlogs:
1711 processed revlogs:
1714 - all-filelogs
1712 - all-filelogs
1715 - changelog
1713 - changelog
1716 - manifest
1714 - manifest
1717
1715
1718 $ hg debugformat -v
1716 $ hg debugformat -v
1719 format-variant repo config default
1717 format-variant repo config default
1720 fncache: yes yes yes
1718 fncache: yes yes yes
1721 dirstate-v2: no no no
1719 dirstate-v2: no no no
1722 tracked-hint: no no no
1720 tracked-hint: no no no
1723 dotencode: yes yes yes
1721 dotencode: yes yes yes
1724 generaldelta: yes yes yes
1722 generaldelta: yes yes yes
1725 share-safe: yes yes yes
1723 share-safe: yes yes yes
1726 sparserevlog: yes yes yes
1724 sparserevlog: yes yes yes
1727 persistent-nodemap: no no no (no-rust !)
1725 persistent-nodemap: no no no (no-rust !)
1728 persistent-nodemap: yes yes no (rust !)
1726 persistent-nodemap: yes yes no (rust !)
1729 copies-sdc: no no no
1727 copies-sdc: no no no
1730 revlog-v2: yes no no
1728 revlog-v2: yes no no
1731 changelog-v2: no no no
1729 changelog-v2: no no no
1732 plain-cl-delta: yes yes yes
1730 plain-cl-delta: yes yes yes
1733 compression: zlib zlib zlib (no-zstd !)
1731 compression: zlib zlib zlib (no-zstd !)
1734 compression: zstd zstd zstd (zstd !)
1732 compression: zstd zstd zstd (zstd !)
1735 compression-level: default default default
1733 compression-level: default default default
1736 $ hg debugrequires
1734 $ hg debugrequires
1737 dotencode
1735 dotencode
1738 exp-revlogv2.2
1736 exp-revlogv2.2
1739 fncache
1737 fncache
1740 generaldelta
1738 generaldelta
1741 persistent-nodemap (rust !)
1739 persistent-nodemap (rust !)
1742 revlog-compression-zstd (zstd !)
1740 revlog-compression-zstd (zstd !)
1743 share-safe
1741 share-safe
1744 sparserevlog
1742 sparserevlog
1745 store
1743 store
1746 $ hg debugsidedata -c 0
1744 $ hg debugsidedata -c 0
1747 2 sidedata entries
1745 2 sidedata entries
1748 entry-0001 size 4
1746 entry-0001 size 4
1749 entry-0002 size 32
1747 entry-0002 size 32
1750
1748
1751 downgrade
1749 downgrade
1752
1750
1753 $ hg debugupgraderepo --config experimental.revlogv2=no --run --no-backup --quiet
1751 $ hg debugupgraderepo --config experimental.revlogv2=no --run --no-backup --quiet
1754 upgrade will perform the following actions:
1752 upgrade will perform the following actions:
1755
1753
1756 requirements
1754 requirements
1757 preserved: dotencode, fncache, generaldelta, share-safe, sparserevlog, store (no-zstd !)
1755 preserved: dotencode, fncache, generaldelta, share-safe, sparserevlog, store (no-zstd !)
1758 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
1756 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
1759 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
1757 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
1760 removed: exp-revlogv2.2
1758 removed: exp-revlogv2.2
1761 added: revlogv1
1759 added: revlogv1
1762
1760
1763 processed revlogs:
1761 processed revlogs:
1764 - all-filelogs
1762 - all-filelogs
1765 - changelog
1763 - changelog
1766 - manifest
1764 - manifest
1767
1765
1768 $ hg debugformat -v
1766 $ hg debugformat -v
1769 format-variant repo config default
1767 format-variant repo config default
1770 fncache: yes yes yes
1768 fncache: yes yes yes
1771 dirstate-v2: no no no
1769 dirstate-v2: no no no
1772 tracked-hint: no no no
1770 tracked-hint: no no no
1773 dotencode: yes yes yes
1771 dotencode: yes yes yes
1774 generaldelta: yes yes yes
1772 generaldelta: yes yes yes
1775 share-safe: yes yes yes
1773 share-safe: yes yes yes
1776 sparserevlog: yes yes yes
1774 sparserevlog: yes yes yes
1777 persistent-nodemap: no no no (no-rust !)
1775 persistent-nodemap: no no no (no-rust !)
1778 persistent-nodemap: yes yes no (rust !)
1776 persistent-nodemap: yes yes no (rust !)
1779 copies-sdc: no no no
1777 copies-sdc: no no no
1780 revlog-v2: no no no
1778 revlog-v2: no no no
1781 changelog-v2: no no no
1779 changelog-v2: no no no
1782 plain-cl-delta: yes yes yes
1780 plain-cl-delta: yes yes yes
1783 compression: zlib zlib zlib (no-zstd !)
1781 compression: zlib zlib zlib (no-zstd !)
1784 compression: zstd zstd zstd (zstd !)
1782 compression: zstd zstd zstd (zstd !)
1785 compression-level: default default default
1783 compression-level: default default default
1786 $ hg debugrequires
1784 $ hg debugrequires
1787 dotencode
1785 dotencode
1788 fncache
1786 fncache
1789 generaldelta
1787 generaldelta
1790 persistent-nodemap (rust !)
1788 persistent-nodemap (rust !)
1791 revlog-compression-zstd (zstd !)
1789 revlog-compression-zstd (zstd !)
1792 revlogv1
1790 revlogv1
1793 share-safe
1791 share-safe
1794 sparserevlog
1792 sparserevlog
1795 store
1793 store
1796 $ hg debugsidedata -c 0
1794 $ hg debugsidedata -c 0
1797
1795
1798 upgrade from hgrc
1796 upgrade from hgrc
1799
1797
1800 $ cat >> .hg/hgrc << EOF
1798 $ cat >> .hg/hgrc << EOF
1801 > [experimental]
1799 > [experimental]
1802 > revlogv2=enable-unstable-format-and-corrupt-my-data
1800 > revlogv2=enable-unstable-format-and-corrupt-my-data
1803 > EOF
1801 > EOF
1804 $ hg debugupgraderepo --run --no-backup --quiet
1802 $ hg debugupgraderepo --run --no-backup --quiet
1805 upgrade will perform the following actions:
1803 upgrade will perform the following actions:
1806
1804
1807 requirements
1805 requirements
1808 preserved: dotencode, fncache, generaldelta, share-safe, sparserevlog, store (no-zstd !)
1806 preserved: dotencode, fncache, generaldelta, share-safe, sparserevlog, store (no-zstd !)
1809 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
1807 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
1810 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
1808 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
1811 removed: revlogv1
1809 removed: revlogv1
1812 added: exp-revlogv2.2
1810 added: exp-revlogv2.2
1813
1811
1814 processed revlogs:
1812 processed revlogs:
1815 - all-filelogs
1813 - all-filelogs
1816 - changelog
1814 - changelog
1817 - manifest
1815 - manifest
1818
1816
1819 $ hg debugformat -v
1817 $ hg debugformat -v
1820 format-variant repo config default
1818 format-variant repo config default
1821 fncache: yes yes yes
1819 fncache: yes yes yes
1822 dirstate-v2: no no no
1820 dirstate-v2: no no no
1823 tracked-hint: no no no
1821 tracked-hint: no no no
1824 dotencode: yes yes yes
1822 dotencode: yes yes yes
1825 generaldelta: yes yes yes
1823 generaldelta: yes yes yes
1826 share-safe: yes yes yes
1824 share-safe: yes yes yes
1827 sparserevlog: yes yes yes
1825 sparserevlog: yes yes yes
1828 persistent-nodemap: no no no (no-rust !)
1826 persistent-nodemap: no no no (no-rust !)
1829 persistent-nodemap: yes yes no (rust !)
1827 persistent-nodemap: yes yes no (rust !)
1830 copies-sdc: no no no
1828 copies-sdc: no no no
1831 revlog-v2: yes yes no
1829 revlog-v2: yes yes no
1832 changelog-v2: no no no
1830 changelog-v2: no no no
1833 plain-cl-delta: yes yes yes
1831 plain-cl-delta: yes yes yes
1834 compression: zlib zlib zlib (no-zstd !)
1832 compression: zlib zlib zlib (no-zstd !)
1835 compression: zstd zstd zstd (zstd !)
1833 compression: zstd zstd zstd (zstd !)
1836 compression-level: default default default
1834 compression-level: default default default
1837 $ hg debugrequires
1835 $ hg debugrequires
1838 dotencode
1836 dotencode
1839 exp-revlogv2.2
1837 exp-revlogv2.2
1840 fncache
1838 fncache
1841 generaldelta
1839 generaldelta
1842 persistent-nodemap (rust !)
1840 persistent-nodemap (rust !)
1843 revlog-compression-zstd (zstd !)
1841 revlog-compression-zstd (zstd !)
1844 share-safe
1842 share-safe
1845 sparserevlog
1843 sparserevlog
1846 store
1844 store
1847 $ hg debugsidedata -c 0
1845 $ hg debugsidedata -c 0
1848
1846
1849 Demonstrate that nothing to perform upgrade will still run all the way through
1847 Demonstrate that nothing to perform upgrade will still run all the way through
1850
1848
1851 $ hg debugupgraderepo --run
1849 $ hg debugupgraderepo --run
1852 nothing to do
1850 nothing to do
1853
1851
1854 #if no-rust
1852 #if no-rust
1855
1853
1856 $ cat << EOF >> $HGRCPATH
1854 $ cat << EOF >> $HGRCPATH
1857 > [storage]
1855 > [storage]
1858 > dirstate-v2.slow-path = allow
1856 > dirstate-v2.slow-path = allow
1859 > EOF
1857 > EOF
1860
1858
1861 #endif
1859 #endif
1862
1860
1863 Upgrade to dirstate-v2
1861 Upgrade to dirstate-v2
1864
1862
1865 $ hg debugformat -v --config format.use-dirstate-v2=1 | grep dirstate-v2
1863 $ hg debugformat -v --config format.use-dirstate-v2=1 | grep dirstate-v2
1866 dirstate-v2: no yes no
1864 dirstate-v2: no yes no
1867 $ hg debugupgraderepo --config format.use-dirstate-v2=1 --run
1865 $ hg debugupgraderepo --config format.use-dirstate-v2=1 --run
1868 upgrade will perform the following actions:
1866 upgrade will perform the following actions:
1869
1867
1870 requirements
1868 requirements
1871 preserved: * (glob)
1869 preserved: * (glob)
1872 added: dirstate-v2
1870 added: dirstate-v2
1873
1871
1874 dirstate-v2
1872 dirstate-v2
1875 "hg status" will be faster
1873 "hg status" will be faster
1876
1874
1877 no revlogs to process
1875 no revlogs to process
1878
1876
1879 beginning upgrade...
1877 beginning upgrade...
1880 repository locked and read-only
1878 repository locked and read-only
1881 creating temporary repository to stage upgraded data: $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1879 creating temporary repository to stage upgraded data: $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1882 (it is safe to interrupt this process any time before data migration completes)
1880 (it is safe to interrupt this process any time before data migration completes)
1883 upgrading to dirstate-v2 from v1
1881 upgrading to dirstate-v2 from v1
1884 replaced files will be backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1882 replaced files will be backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1885 removing temporary repository $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1883 removing temporary repository $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1886 $ ls .hg/upgradebackup.*/dirstate
1884 $ ls .hg/upgradebackup.*/dirstate
1887 .hg/upgradebackup.*/dirstate (glob)
1885 .hg/upgradebackup.*/dirstate (glob)
1888 $ hg debugformat -v | grep dirstate-v2
1886 $ hg debugformat -v | grep dirstate-v2
1889 dirstate-v2: yes no no
1887 dirstate-v2: yes no no
1890 $ hg status
1888 $ hg status
1891 $ dd bs=12 count=1 if=.hg/dirstate 2> /dev/null
1889 $ dd bs=12 count=1 if=.hg/dirstate 2> /dev/null
1892 dirstate-v2
1890 dirstate-v2
1893
1891
1894 Downgrade from dirstate-v2
1892 Downgrade from dirstate-v2
1895
1893
1896 $ hg debugupgraderepo --run
1894 $ hg debugupgraderepo --run
1897 upgrade will perform the following actions:
1895 upgrade will perform the following actions:
1898
1896
1899 requirements
1897 requirements
1900 preserved: * (glob)
1898 preserved: * (glob)
1901 removed: dirstate-v2
1899 removed: dirstate-v2
1902
1900
1903 no revlogs to process
1901 no revlogs to process
1904
1902
1905 beginning upgrade...
1903 beginning upgrade...
1906 repository locked and read-only
1904 repository locked and read-only
1907 creating temporary repository to stage upgraded data: $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1905 creating temporary repository to stage upgraded data: $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1908 (it is safe to interrupt this process any time before data migration completes)
1906 (it is safe to interrupt this process any time before data migration completes)
1909 downgrading from dirstate-v2 to v1
1907 downgrading from dirstate-v2 to v1
1910 replaced files will be backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1908 replaced files will be backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1911 removing temporary repository $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1909 removing temporary repository $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1912 $ hg debugformat -v | grep dirstate-v2
1910 $ hg debugformat -v | grep dirstate-v2
1913 dirstate-v2: no no no
1911 dirstate-v2: no no no
1914 $ hg status
1912 $ hg status
1915
1913
1916 $ cd ..
1914 $ cd ..
1917
1915
1918 dirstate-v2: upgrade and downgrade from and empty repository:
1916 dirstate-v2: upgrade and downgrade from and empty repository:
1919 -------------------------------------------------------------
1917 -------------------------------------------------------------
1920
1918
1921 $ hg init --config format.use-dirstate-v2=no dirstate-v2-empty
1919 $ hg init --config format.use-dirstate-v2=no dirstate-v2-empty
1922 $ cd dirstate-v2-empty
1920 $ cd dirstate-v2-empty
1923 $ hg debugformat | grep dirstate-v2
1921 $ hg debugformat | grep dirstate-v2
1924 dirstate-v2: no
1922 dirstate-v2: no
1925
1923
1926 upgrade
1924 upgrade
1927
1925
1928 $ hg debugupgraderepo --run --config format.use-dirstate-v2=yes
1926 $ hg debugupgraderepo --run --config format.use-dirstate-v2=yes
1929 upgrade will perform the following actions:
1927 upgrade will perform the following actions:
1930
1928
1931 requirements
1929 requirements
1932 preserved: * (glob)
1930 preserved: * (glob)
1933 added: dirstate-v2
1931 added: dirstate-v2
1934
1932
1935 dirstate-v2
1933 dirstate-v2
1936 "hg status" will be faster
1934 "hg status" will be faster
1937
1935
1938 no revlogs to process
1936 no revlogs to process
1939
1937
1940 beginning upgrade...
1938 beginning upgrade...
1941 repository locked and read-only
1939 repository locked and read-only
1942 creating temporary repository to stage upgraded data: $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1940 creating temporary repository to stage upgraded data: $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1943 (it is safe to interrupt this process any time before data migration completes)
1941 (it is safe to interrupt this process any time before data migration completes)
1944 upgrading to dirstate-v2 from v1
1942 upgrading to dirstate-v2 from v1
1945 replaced files will be backed up at $TESTTMP/dirstate-v2-empty/.hg/upgradebackup.* (glob)
1943 replaced files will be backed up at $TESTTMP/dirstate-v2-empty/.hg/upgradebackup.* (glob)
1946 removing temporary repository $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1944 removing temporary repository $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1947 $ hg debugformat | grep dirstate-v2
1945 $ hg debugformat | grep dirstate-v2
1948 dirstate-v2: yes
1946 dirstate-v2: yes
1949
1947
1950 downgrade
1948 downgrade
1951
1949
1952 $ hg debugupgraderepo --run --config format.use-dirstate-v2=no
1950 $ hg debugupgraderepo --run --config format.use-dirstate-v2=no
1953 upgrade will perform the following actions:
1951 upgrade will perform the following actions:
1954
1952
1955 requirements
1953 requirements
1956 preserved: * (glob)
1954 preserved: * (glob)
1957 removed: dirstate-v2
1955 removed: dirstate-v2
1958
1956
1959 no revlogs to process
1957 no revlogs to process
1960
1958
1961 beginning upgrade...
1959 beginning upgrade...
1962 repository locked and read-only
1960 repository locked and read-only
1963 creating temporary repository to stage upgraded data: $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1961 creating temporary repository to stage upgraded data: $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1964 (it is safe to interrupt this process any time before data migration completes)
1962 (it is safe to interrupt this process any time before data migration completes)
1965 downgrading from dirstate-v2 to v1
1963 downgrading from dirstate-v2 to v1
1966 replaced files will be backed up at $TESTTMP/dirstate-v2-empty/.hg/upgradebackup.* (glob)
1964 replaced files will be backed up at $TESTTMP/dirstate-v2-empty/.hg/upgradebackup.* (glob)
1967 removing temporary repository $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1965 removing temporary repository $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1968 $ hg debugformat | grep dirstate-v2
1966 $ hg debugformat | grep dirstate-v2
1969 dirstate-v2: no
1967 dirstate-v2: no
1970
1968
1971 $ cd ..
1969 $ cd ..
1972
1970
1973 Test automatic upgrade/downgrade
1971 Test automatic upgrade/downgrade
1974 ================================
1972 ================================
1975
1973
1976
1974
1977 For dirstate v2
1975 For dirstate v2
1978 ---------------
1976 ---------------
1979
1977
1980 create an initial repository
1978 create an initial repository
1981
1979
1982 $ hg init auto-upgrade \
1980 $ hg init auto-upgrade \
1983 > --config format.use-dirstate-v2=no \
1981 > --config format.use-dirstate-v2=no \
1984 > --config format.use-dirstate-tracked-hint=yes \
1982 > --config format.use-dirstate-tracked-hint=yes \
1985 > --config format.use-share-safe=no
1983 > --config format.use-share-safe=no
1986 $ hg debugbuilddag -R auto-upgrade --new-file .+5
1984 $ hg debugbuilddag -R auto-upgrade --new-file .+5
1987 $ hg -R auto-upgrade update
1985 $ hg -R auto-upgrade update
1988 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
1986 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
1989 $ hg debugformat -R auto-upgrade | grep dirstate-v2
1987 $ hg debugformat -R auto-upgrade | grep dirstate-v2
1990 dirstate-v2: no
1988 dirstate-v2: no
1991
1989
1992 upgrade it to dirstate-v2 automatically
1990 upgrade it to dirstate-v2 automatically
1993
1991
1994 $ hg status -R auto-upgrade \
1992 $ hg status -R auto-upgrade \
1995 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
1993 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
1996 > --config format.use-dirstate-v2=yes
1994 > --config format.use-dirstate-v2=yes
1997 automatically upgrading repository to the `dirstate-v2` feature
1995 automatically upgrading repository to the `dirstate-v2` feature
1998 (see `hg help config.format.use-dirstate-v2` for details)
1996 (see `hg help config.format.use-dirstate-v2` for details)
1999 $ hg debugformat -R auto-upgrade | grep dirstate-v2
1997 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2000 dirstate-v2: yes
1998 dirstate-v2: yes
2001
1999
2002 downgrade it from dirstate-v2 automatically
2000 downgrade it from dirstate-v2 automatically
2003
2001
2004 $ hg status -R auto-upgrade \
2002 $ hg status -R auto-upgrade \
2005 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2003 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2006 > --config format.use-dirstate-v2=no
2004 > --config format.use-dirstate-v2=no
2007 automatically downgrading repository from the `dirstate-v2` feature
2005 automatically downgrading repository from the `dirstate-v2` feature
2008 (see `hg help config.format.use-dirstate-v2` for details)
2006 (see `hg help config.format.use-dirstate-v2` for details)
2009 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2007 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2010 dirstate-v2: no
2008 dirstate-v2: no
2011
2009
2012
2010
2013 For multiple change at the same time
2011 For multiple change at the same time
2014 ------------------------------------
2012 ------------------------------------
2015
2013
2016 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2014 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2017 dirstate-v2: no
2015 dirstate-v2: no
2018 tracked-hint: yes
2016 tracked-hint: yes
2019 share-safe: no
2017 share-safe: no
2020
2018
2021 $ hg status -R auto-upgrade \
2019 $ hg status -R auto-upgrade \
2022 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2020 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2023 > --config format.use-dirstate-v2=yes \
2021 > --config format.use-dirstate-v2=yes \
2024 > --config format.use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories=yes \
2022 > --config format.use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories=yes \
2025 > --config format.use-dirstate-tracked-hint=no\
2023 > --config format.use-dirstate-tracked-hint=no\
2026 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories=yes \
2024 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories=yes \
2027 > --config format.use-share-safe=yes
2025 > --config format.use-share-safe=yes
2028 automatically upgrading repository to the `dirstate-v2` feature
2026 automatically upgrading repository to the `dirstate-v2` feature
2029 (see `hg help config.format.use-dirstate-v2` for details)
2027 (see `hg help config.format.use-dirstate-v2` for details)
2030 automatically upgrading repository to the `share-safe` feature
2028 automatically upgrading repository to the `share-safe` feature
2031 (see `hg help config.format.use-share-safe` for details)
2029 (see `hg help config.format.use-share-safe` for details)
2032 automatically downgrading repository from the `tracked-hint` feature
2030 automatically downgrading repository from the `tracked-hint` feature
2033 (see `hg help config.format.use-dirstate-tracked-hint` for details)
2031 (see `hg help config.format.use-dirstate-tracked-hint` for details)
2034 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2032 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2035 dirstate-v2: yes
2033 dirstate-v2: yes
2036 tracked-hint: no
2034 tracked-hint: no
2037 share-safe: yes
2035 share-safe: yes
2038
2036
2039 Quiet upgrade and downgrade
2037 Quiet upgrade and downgrade
2040 ---------------------------
2038 ---------------------------
2041
2039
2042
2040
2043 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2041 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2044 dirstate-v2: yes
2042 dirstate-v2: yes
2045 tracked-hint: no
2043 tracked-hint: no
2046 share-safe: yes
2044 share-safe: yes
2047 $ hg status -R auto-upgrade \
2045 $ hg status -R auto-upgrade \
2048 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2046 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2049 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet=yes \
2047 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet=yes \
2050 > --config format.use-dirstate-v2=no \
2048 > --config format.use-dirstate-v2=no \
2051 > --config format.use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories=yes \
2049 > --config format.use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories=yes \
2052 > --config format.use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet=yes \
2050 > --config format.use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet=yes \
2053 > --config format.use-dirstate-tracked-hint=yes \
2051 > --config format.use-dirstate-tracked-hint=yes \
2054 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories=yes \
2052 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories=yes \
2055 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet=yes \
2053 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet=yes \
2056 > --config format.use-share-safe=no
2054 > --config format.use-share-safe=no
2057
2055
2058 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2056 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2059 dirstate-v2: no
2057 dirstate-v2: no
2060 tracked-hint: yes
2058 tracked-hint: yes
2061 share-safe: no
2059 share-safe: no
2062
2060
2063 $ hg status -R auto-upgrade \
2061 $ hg status -R auto-upgrade \
2064 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2062 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2065 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet=yes \
2063 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet=yes \
2066 > --config format.use-dirstate-v2=yes \
2064 > --config format.use-dirstate-v2=yes \
2067 > --config format.use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories=yes \
2065 > --config format.use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories=yes \
2068 > --config format.use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet=yes \
2066 > --config format.use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet=yes \
2069 > --config format.use-dirstate-tracked-hint=no\
2067 > --config format.use-dirstate-tracked-hint=no\
2070 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories=yes \
2068 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories=yes \
2071 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet=yes \
2069 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet=yes \
2072 > --config format.use-share-safe=yes
2070 > --config format.use-share-safe=yes
2073 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2071 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2074 dirstate-v2: yes
2072 dirstate-v2: yes
2075 tracked-hint: no
2073 tracked-hint: no
2076 share-safe: yes
2074 share-safe: yes
2077
2075
2078 Attempting Auto-upgrade on a read-only repository
2076 Attempting Auto-upgrade on a read-only repository
2079 -------------------------------------------------
2077 -------------------------------------------------
2080
2078
2081 $ chmod -R a-w auto-upgrade
2079 $ chmod -R a-w auto-upgrade
2082
2080
2083 $ hg status -R auto-upgrade \
2081 $ hg status -R auto-upgrade \
2084 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2082 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2085 > --config format.use-dirstate-v2=no
2083 > --config format.use-dirstate-v2=no
2086 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2084 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2087 dirstate-v2: yes
2085 dirstate-v2: yes
2088
2086
2089 $ chmod -R u+w auto-upgrade
2087 $ chmod -R u+w auto-upgrade
2090
2088
2091 Attempting Auto-upgrade on a locked repository
2089 Attempting Auto-upgrade on a locked repository
2092 ----------------------------------------------
2090 ----------------------------------------------
2093
2091
2094 $ hg -R auto-upgrade debuglock --set-lock --quiet &
2092 $ hg -R auto-upgrade debuglock --set-lock --quiet &
2095 $ echo $! >> $DAEMON_PIDS
2093 $ echo $! >> $DAEMON_PIDS
2096 $ $RUNTESTDIR/testlib/wait-on-file 10 auto-upgrade/.hg/store/lock
2094 $ $RUNTESTDIR/testlib/wait-on-file 10 auto-upgrade/.hg/store/lock
2097 $ hg status -R auto-upgrade \
2095 $ hg status -R auto-upgrade \
2098 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2096 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2099 > --config format.use-dirstate-v2=no
2097 > --config format.use-dirstate-v2=no
2100 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2098 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2101 dirstate-v2: yes
2099 dirstate-v2: yes
2102
2100
2103 $ killdaemons.py
2101 $ killdaemons.py
General Comments 0
You need to be logged in to leave comments. Login now