##// END OF EJS Templates
narrow: add capabilities for local repos, not just remote peers...
Charles Chamberlain -
r47664:63100115 default
parent child Browse files
Show More
@@ -1,3755 +1,3758 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 sha1nodeconstants,
24 sha1nodeconstants,
25 short,
25 short,
26 )
26 )
27 from .pycompat import (
27 from .pycompat import (
28 delattr,
28 delattr,
29 getattr,
29 getattr,
30 )
30 )
31 from . import (
31 from . import (
32 bookmarks,
32 bookmarks,
33 branchmap,
33 branchmap,
34 bundle2,
34 bundle2,
35 bundlecaches,
35 bundlecaches,
36 changegroup,
36 changegroup,
37 color,
37 color,
38 commit,
38 commit,
39 context,
39 context,
40 dirstate,
40 dirstate,
41 dirstateguard,
41 dirstateguard,
42 discovery,
42 discovery,
43 encoding,
43 encoding,
44 error,
44 error,
45 exchange,
45 exchange,
46 extensions,
46 extensions,
47 filelog,
47 filelog,
48 hook,
48 hook,
49 lock as lockmod,
49 lock as lockmod,
50 match as matchmod,
50 match as matchmod,
51 mergestate as mergestatemod,
51 mergestate as mergestatemod,
52 mergeutil,
52 mergeutil,
53 metadata as metadatamod,
53 metadata as metadatamod,
54 namespaces,
54 namespaces,
55 narrowspec,
55 narrowspec,
56 obsolete,
56 obsolete,
57 pathutil,
57 pathutil,
58 phases,
58 phases,
59 pushkey,
59 pushkey,
60 pycompat,
60 pycompat,
61 rcutil,
61 rcutil,
62 repoview,
62 repoview,
63 requirements as requirementsmod,
63 requirements as requirementsmod,
64 revlog,
64 revlog,
65 revset,
65 revset,
66 revsetlang,
66 revsetlang,
67 scmutil,
67 scmutil,
68 sparse,
68 sparse,
69 store as storemod,
69 store as storemod,
70 subrepoutil,
70 subrepoutil,
71 tags as tagsmod,
71 tags as tagsmod,
72 transaction,
72 transaction,
73 txnutil,
73 txnutil,
74 util,
74 util,
75 vfs as vfsmod,
75 vfs as vfsmod,
76 wireprototypes,
76 )
77 )
77
78
78 from .interfaces import (
79 from .interfaces import (
79 repository,
80 repository,
80 util as interfaceutil,
81 util as interfaceutil,
81 )
82 )
82
83
83 from .utils import (
84 from .utils import (
84 hashutil,
85 hashutil,
85 procutil,
86 procutil,
86 stringutil,
87 stringutil,
87 )
88 )
88
89
89 from .revlogutils import (
90 from .revlogutils import (
90 concurrency_checker as revlogchecker,
91 concurrency_checker as revlogchecker,
91 constants as revlogconst,
92 constants as revlogconst,
92 )
93 )
93
94
94 release = lockmod.release
95 release = lockmod.release
95 urlerr = util.urlerr
96 urlerr = util.urlerr
96 urlreq = util.urlreq
97 urlreq = util.urlreq
97
98
98 # set of (path, vfs-location) tuples. vfs-location is:
99 # set of (path, vfs-location) tuples. vfs-location is:
99 # - 'plain for vfs relative paths
100 # - 'plain for vfs relative paths
100 # - '' for svfs relative paths
101 # - '' for svfs relative paths
101 _cachedfiles = set()
102 _cachedfiles = set()
102
103
103
104
104 class _basefilecache(scmutil.filecache):
105 class _basefilecache(scmutil.filecache):
105 """All filecache usage on repo are done for logic that should be unfiltered"""
106 """All filecache usage on repo are done for logic that should be unfiltered"""
106
107
107 def __get__(self, repo, type=None):
108 def __get__(self, repo, type=None):
108 if repo is None:
109 if repo is None:
109 return self
110 return self
110 # proxy to unfiltered __dict__ since filtered repo has no entry
111 # proxy to unfiltered __dict__ since filtered repo has no entry
111 unfi = repo.unfiltered()
112 unfi = repo.unfiltered()
112 try:
113 try:
113 return unfi.__dict__[self.sname]
114 return unfi.__dict__[self.sname]
114 except KeyError:
115 except KeyError:
115 pass
116 pass
116 return super(_basefilecache, self).__get__(unfi, type)
117 return super(_basefilecache, self).__get__(unfi, type)
117
118
118 def set(self, repo, value):
119 def set(self, repo, value):
119 return super(_basefilecache, self).set(repo.unfiltered(), value)
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
120
121
121
122
122 class repofilecache(_basefilecache):
123 class repofilecache(_basefilecache):
123 """filecache for files in .hg but outside of .hg/store"""
124 """filecache for files in .hg but outside of .hg/store"""
124
125
125 def __init__(self, *paths):
126 def __init__(self, *paths):
126 super(repofilecache, self).__init__(*paths)
127 super(repofilecache, self).__init__(*paths)
127 for path in paths:
128 for path in paths:
128 _cachedfiles.add((path, b'plain'))
129 _cachedfiles.add((path, b'plain'))
129
130
130 def join(self, obj, fname):
131 def join(self, obj, fname):
131 return obj.vfs.join(fname)
132 return obj.vfs.join(fname)
132
133
133
134
134 class storecache(_basefilecache):
135 class storecache(_basefilecache):
135 """filecache for files in the store"""
136 """filecache for files in the store"""
136
137
137 def __init__(self, *paths):
138 def __init__(self, *paths):
138 super(storecache, self).__init__(*paths)
139 super(storecache, self).__init__(*paths)
139 for path in paths:
140 for path in paths:
140 _cachedfiles.add((path, b''))
141 _cachedfiles.add((path, b''))
141
142
142 def join(self, obj, fname):
143 def join(self, obj, fname):
143 return obj.sjoin(fname)
144 return obj.sjoin(fname)
144
145
145
146
146 class mixedrepostorecache(_basefilecache):
147 class mixedrepostorecache(_basefilecache):
147 """filecache for a mix files in .hg/store and outside"""
148 """filecache for a mix files in .hg/store and outside"""
148
149
149 def __init__(self, *pathsandlocations):
150 def __init__(self, *pathsandlocations):
150 # scmutil.filecache only uses the path for passing back into our
151 # scmutil.filecache only uses the path for passing back into our
151 # join(), so we can safely pass a list of paths and locations
152 # join(), so we can safely pass a list of paths and locations
152 super(mixedrepostorecache, self).__init__(*pathsandlocations)
153 super(mixedrepostorecache, self).__init__(*pathsandlocations)
153 _cachedfiles.update(pathsandlocations)
154 _cachedfiles.update(pathsandlocations)
154
155
155 def join(self, obj, fnameandlocation):
156 def join(self, obj, fnameandlocation):
156 fname, location = fnameandlocation
157 fname, location = fnameandlocation
157 if location == b'plain':
158 if location == b'plain':
158 return obj.vfs.join(fname)
159 return obj.vfs.join(fname)
159 else:
160 else:
160 if location != b'':
161 if location != b'':
161 raise error.ProgrammingError(
162 raise error.ProgrammingError(
162 b'unexpected location: %s' % location
163 b'unexpected location: %s' % location
163 )
164 )
164 return obj.sjoin(fname)
165 return obj.sjoin(fname)
165
166
166
167
167 def isfilecached(repo, name):
168 def isfilecached(repo, name):
168 """check if a repo has already cached "name" filecache-ed property
169 """check if a repo has already cached "name" filecache-ed property
169
170
170 This returns (cachedobj-or-None, iscached) tuple.
171 This returns (cachedobj-or-None, iscached) tuple.
171 """
172 """
172 cacheentry = repo.unfiltered()._filecache.get(name, None)
173 cacheentry = repo.unfiltered()._filecache.get(name, None)
173 if not cacheentry:
174 if not cacheentry:
174 return None, False
175 return None, False
175 return cacheentry.obj, True
176 return cacheentry.obj, True
176
177
177
178
178 class unfilteredpropertycache(util.propertycache):
179 class unfilteredpropertycache(util.propertycache):
179 """propertycache that apply to unfiltered repo only"""
180 """propertycache that apply to unfiltered repo only"""
180
181
181 def __get__(self, repo, type=None):
182 def __get__(self, repo, type=None):
182 unfi = repo.unfiltered()
183 unfi = repo.unfiltered()
183 if unfi is repo:
184 if unfi is repo:
184 return super(unfilteredpropertycache, self).__get__(unfi)
185 return super(unfilteredpropertycache, self).__get__(unfi)
185 return getattr(unfi, self.name)
186 return getattr(unfi, self.name)
186
187
187
188
188 class filteredpropertycache(util.propertycache):
189 class filteredpropertycache(util.propertycache):
189 """propertycache that must take filtering in account"""
190 """propertycache that must take filtering in account"""
190
191
191 def cachevalue(self, obj, value):
192 def cachevalue(self, obj, value):
192 object.__setattr__(obj, self.name, value)
193 object.__setattr__(obj, self.name, value)
193
194
194
195
195 def hasunfilteredcache(repo, name):
196 def hasunfilteredcache(repo, name):
196 """check if a repo has an unfilteredpropertycache value for <name>"""
197 """check if a repo has an unfilteredpropertycache value for <name>"""
197 return name in vars(repo.unfiltered())
198 return name in vars(repo.unfiltered())
198
199
199
200
200 def unfilteredmethod(orig):
201 def unfilteredmethod(orig):
201 """decorate method that always need to be run on unfiltered version"""
202 """decorate method that always need to be run on unfiltered version"""
202
203
203 @functools.wraps(orig)
204 @functools.wraps(orig)
204 def wrapper(repo, *args, **kwargs):
205 def wrapper(repo, *args, **kwargs):
205 return orig(repo.unfiltered(), *args, **kwargs)
206 return orig(repo.unfiltered(), *args, **kwargs)
206
207
207 return wrapper
208 return wrapper
208
209
209
210
210 moderncaps = {
211 moderncaps = {
211 b'lookup',
212 b'lookup',
212 b'branchmap',
213 b'branchmap',
213 b'pushkey',
214 b'pushkey',
214 b'known',
215 b'known',
215 b'getbundle',
216 b'getbundle',
216 b'unbundle',
217 b'unbundle',
217 }
218 }
218 legacycaps = moderncaps.union({b'changegroupsubset'})
219 legacycaps = moderncaps.union({b'changegroupsubset'})
219
220
220
221
221 @interfaceutil.implementer(repository.ipeercommandexecutor)
222 @interfaceutil.implementer(repository.ipeercommandexecutor)
222 class localcommandexecutor(object):
223 class localcommandexecutor(object):
223 def __init__(self, peer):
224 def __init__(self, peer):
224 self._peer = peer
225 self._peer = peer
225 self._sent = False
226 self._sent = False
226 self._closed = False
227 self._closed = False
227
228
228 def __enter__(self):
229 def __enter__(self):
229 return self
230 return self
230
231
231 def __exit__(self, exctype, excvalue, exctb):
232 def __exit__(self, exctype, excvalue, exctb):
232 self.close()
233 self.close()
233
234
234 def callcommand(self, command, args):
235 def callcommand(self, command, args):
235 if self._sent:
236 if self._sent:
236 raise error.ProgrammingError(
237 raise error.ProgrammingError(
237 b'callcommand() cannot be used after sendcommands()'
238 b'callcommand() cannot be used after sendcommands()'
238 )
239 )
239
240
240 if self._closed:
241 if self._closed:
241 raise error.ProgrammingError(
242 raise error.ProgrammingError(
242 b'callcommand() cannot be used after close()'
243 b'callcommand() cannot be used after close()'
243 )
244 )
244
245
245 # We don't need to support anything fancy. Just call the named
246 # We don't need to support anything fancy. Just call the named
246 # method on the peer and return a resolved future.
247 # method on the peer and return a resolved future.
247 fn = getattr(self._peer, pycompat.sysstr(command))
248 fn = getattr(self._peer, pycompat.sysstr(command))
248
249
249 f = pycompat.futures.Future()
250 f = pycompat.futures.Future()
250
251
251 try:
252 try:
252 result = fn(**pycompat.strkwargs(args))
253 result = fn(**pycompat.strkwargs(args))
253 except Exception:
254 except Exception:
254 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
255 else:
256 else:
256 f.set_result(result)
257 f.set_result(result)
257
258
258 return f
259 return f
259
260
260 def sendcommands(self):
261 def sendcommands(self):
261 self._sent = True
262 self._sent = True
262
263
263 def close(self):
264 def close(self):
264 self._closed = True
265 self._closed = True
265
266
266
267
267 @interfaceutil.implementer(repository.ipeercommands)
268 @interfaceutil.implementer(repository.ipeercommands)
268 class localpeer(repository.peer):
269 class localpeer(repository.peer):
269 '''peer for a local repo; reflects only the most recent API'''
270 '''peer for a local repo; reflects only the most recent API'''
270
271
271 def __init__(self, repo, caps=None):
272 def __init__(self, repo, caps=None):
272 super(localpeer, self).__init__()
273 super(localpeer, self).__init__()
273
274
274 if caps is None:
275 if caps is None:
275 caps = moderncaps.copy()
276 caps = moderncaps.copy()
276 self._repo = repo.filtered(b'served')
277 self._repo = repo.filtered(b'served')
277 self.ui = repo.ui
278 self.ui = repo.ui
278
279
279 if repo._wanted_sidedata:
280 if repo._wanted_sidedata:
280 formatted = bundle2.format_remote_wanted_sidedata(repo)
281 formatted = bundle2.format_remote_wanted_sidedata(repo)
281 caps.add(b'exp-wanted-sidedata=' + formatted)
282 caps.add(b'exp-wanted-sidedata=' + formatted)
282
283
283 self._caps = repo._restrictcapabilities(caps)
284 self._caps = repo._restrictcapabilities(caps)
284
285
285 # Begin of _basepeer interface.
286 # Begin of _basepeer interface.
286
287
287 def url(self):
288 def url(self):
288 return self._repo.url()
289 return self._repo.url()
289
290
290 def local(self):
291 def local(self):
291 return self._repo
292 return self._repo
292
293
293 def peer(self):
294 def peer(self):
294 return self
295 return self
295
296
296 def canpush(self):
297 def canpush(self):
297 return True
298 return True
298
299
299 def close(self):
300 def close(self):
300 self._repo.close()
301 self._repo.close()
301
302
302 # End of _basepeer interface.
303 # End of _basepeer interface.
303
304
304 # Begin of _basewirecommands interface.
305 # Begin of _basewirecommands interface.
305
306
306 def branchmap(self):
307 def branchmap(self):
307 return self._repo.branchmap()
308 return self._repo.branchmap()
308
309
309 def capabilities(self):
310 def capabilities(self):
310 return self._caps
311 return self._caps
311
312
312 def clonebundles(self):
313 def clonebundles(self):
313 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
314 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
314
315
315 def debugwireargs(self, one, two, three=None, four=None, five=None):
316 def debugwireargs(self, one, two, three=None, four=None, five=None):
316 """Used to test argument passing over the wire"""
317 """Used to test argument passing over the wire"""
317 return b"%s %s %s %s %s" % (
318 return b"%s %s %s %s %s" % (
318 one,
319 one,
319 two,
320 two,
320 pycompat.bytestr(three),
321 pycompat.bytestr(three),
321 pycompat.bytestr(four),
322 pycompat.bytestr(four),
322 pycompat.bytestr(five),
323 pycompat.bytestr(five),
323 )
324 )
324
325
325 def getbundle(
326 def getbundle(
326 self,
327 self,
327 source,
328 source,
328 heads=None,
329 heads=None,
329 common=None,
330 common=None,
330 bundlecaps=None,
331 bundlecaps=None,
331 remote_sidedata=None,
332 remote_sidedata=None,
332 **kwargs
333 **kwargs
333 ):
334 ):
334 chunks = exchange.getbundlechunks(
335 chunks = exchange.getbundlechunks(
335 self._repo,
336 self._repo,
336 source,
337 source,
337 heads=heads,
338 heads=heads,
338 common=common,
339 common=common,
339 bundlecaps=bundlecaps,
340 bundlecaps=bundlecaps,
340 remote_sidedata=remote_sidedata,
341 remote_sidedata=remote_sidedata,
341 **kwargs
342 **kwargs
342 )[1]
343 )[1]
343 cb = util.chunkbuffer(chunks)
344 cb = util.chunkbuffer(chunks)
344
345
345 if exchange.bundle2requested(bundlecaps):
346 if exchange.bundle2requested(bundlecaps):
346 # When requesting a bundle2, getbundle returns a stream to make the
347 # When requesting a bundle2, getbundle returns a stream to make the
347 # wire level function happier. We need to build a proper object
348 # wire level function happier. We need to build a proper object
348 # from it in local peer.
349 # from it in local peer.
349 return bundle2.getunbundler(self.ui, cb)
350 return bundle2.getunbundler(self.ui, cb)
350 else:
351 else:
351 return changegroup.getunbundler(b'01', cb, None)
352 return changegroup.getunbundler(b'01', cb, None)
352
353
353 def heads(self):
354 def heads(self):
354 return self._repo.heads()
355 return self._repo.heads()
355
356
356 def known(self, nodes):
357 def known(self, nodes):
357 return self._repo.known(nodes)
358 return self._repo.known(nodes)
358
359
359 def listkeys(self, namespace):
360 def listkeys(self, namespace):
360 return self._repo.listkeys(namespace)
361 return self._repo.listkeys(namespace)
361
362
362 def lookup(self, key):
363 def lookup(self, key):
363 return self._repo.lookup(key)
364 return self._repo.lookup(key)
364
365
365 def pushkey(self, namespace, key, old, new):
366 def pushkey(self, namespace, key, old, new):
366 return self._repo.pushkey(namespace, key, old, new)
367 return self._repo.pushkey(namespace, key, old, new)
367
368
368 def stream_out(self):
369 def stream_out(self):
369 raise error.Abort(_(b'cannot perform stream clone against local peer'))
370 raise error.Abort(_(b'cannot perform stream clone against local peer'))
370
371
371 def unbundle(self, bundle, heads, url):
372 def unbundle(self, bundle, heads, url):
372 """apply a bundle on a repo
373 """apply a bundle on a repo
373
374
374 This function handles the repo locking itself."""
375 This function handles the repo locking itself."""
375 try:
376 try:
376 try:
377 try:
377 bundle = exchange.readbundle(self.ui, bundle, None)
378 bundle = exchange.readbundle(self.ui, bundle, None)
378 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
379 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
379 if util.safehasattr(ret, b'getchunks'):
380 if util.safehasattr(ret, b'getchunks'):
380 # This is a bundle20 object, turn it into an unbundler.
381 # This is a bundle20 object, turn it into an unbundler.
381 # This little dance should be dropped eventually when the
382 # This little dance should be dropped eventually when the
382 # API is finally improved.
383 # API is finally improved.
383 stream = util.chunkbuffer(ret.getchunks())
384 stream = util.chunkbuffer(ret.getchunks())
384 ret = bundle2.getunbundler(self.ui, stream)
385 ret = bundle2.getunbundler(self.ui, stream)
385 return ret
386 return ret
386 except Exception as exc:
387 except Exception as exc:
387 # If the exception contains output salvaged from a bundle2
388 # If the exception contains output salvaged from a bundle2
388 # reply, we need to make sure it is printed before continuing
389 # reply, we need to make sure it is printed before continuing
389 # to fail. So we build a bundle2 with such output and consume
390 # to fail. So we build a bundle2 with such output and consume
390 # it directly.
391 # it directly.
391 #
392 #
392 # This is not very elegant but allows a "simple" solution for
393 # This is not very elegant but allows a "simple" solution for
393 # issue4594
394 # issue4594
394 output = getattr(exc, '_bundle2salvagedoutput', ())
395 output = getattr(exc, '_bundle2salvagedoutput', ())
395 if output:
396 if output:
396 bundler = bundle2.bundle20(self._repo.ui)
397 bundler = bundle2.bundle20(self._repo.ui)
397 for out in output:
398 for out in output:
398 bundler.addpart(out)
399 bundler.addpart(out)
399 stream = util.chunkbuffer(bundler.getchunks())
400 stream = util.chunkbuffer(bundler.getchunks())
400 b = bundle2.getunbundler(self.ui, stream)
401 b = bundle2.getunbundler(self.ui, stream)
401 bundle2.processbundle(self._repo, b)
402 bundle2.processbundle(self._repo, b)
402 raise
403 raise
403 except error.PushRaced as exc:
404 except error.PushRaced as exc:
404 raise error.ResponseError(
405 raise error.ResponseError(
405 _(b'push failed:'), stringutil.forcebytestr(exc)
406 _(b'push failed:'), stringutil.forcebytestr(exc)
406 )
407 )
407
408
408 # End of _basewirecommands interface.
409 # End of _basewirecommands interface.
409
410
410 # Begin of peer interface.
411 # Begin of peer interface.
411
412
412 def commandexecutor(self):
413 def commandexecutor(self):
413 return localcommandexecutor(self)
414 return localcommandexecutor(self)
414
415
415 # End of peer interface.
416 # End of peer interface.
416
417
417
418
418 @interfaceutil.implementer(repository.ipeerlegacycommands)
419 @interfaceutil.implementer(repository.ipeerlegacycommands)
419 class locallegacypeer(localpeer):
420 class locallegacypeer(localpeer):
420 """peer extension which implements legacy methods too; used for tests with
421 """peer extension which implements legacy methods too; used for tests with
421 restricted capabilities"""
422 restricted capabilities"""
422
423
423 def __init__(self, repo):
424 def __init__(self, repo):
424 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
425 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
425
426
426 # Begin of baselegacywirecommands interface.
427 # Begin of baselegacywirecommands interface.
427
428
428 def between(self, pairs):
429 def between(self, pairs):
429 return self._repo.between(pairs)
430 return self._repo.between(pairs)
430
431
431 def branches(self, nodes):
432 def branches(self, nodes):
432 return self._repo.branches(nodes)
433 return self._repo.branches(nodes)
433
434
434 def changegroup(self, nodes, source):
435 def changegroup(self, nodes, source):
435 outgoing = discovery.outgoing(
436 outgoing = discovery.outgoing(
436 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
437 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
437 )
438 )
438 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
439 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
439
440
440 def changegroupsubset(self, bases, heads, source):
441 def changegroupsubset(self, bases, heads, source):
441 outgoing = discovery.outgoing(
442 outgoing = discovery.outgoing(
442 self._repo, missingroots=bases, ancestorsof=heads
443 self._repo, missingroots=bases, ancestorsof=heads
443 )
444 )
444 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
445 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
445
446
446 # End of baselegacywirecommands interface.
447 # End of baselegacywirecommands interface.
447
448
448
449
449 # Functions receiving (ui, features) that extensions can register to impact
450 # Functions receiving (ui, features) that extensions can register to impact
450 # the ability to load repositories with custom requirements. Only
451 # the ability to load repositories with custom requirements. Only
451 # functions defined in loaded extensions are called.
452 # functions defined in loaded extensions are called.
452 #
453 #
453 # The function receives a set of requirement strings that the repository
454 # The function receives a set of requirement strings that the repository
454 # is capable of opening. Functions will typically add elements to the
455 # is capable of opening. Functions will typically add elements to the
455 # set to reflect that the extension knows how to handle that requirements.
456 # set to reflect that the extension knows how to handle that requirements.
456 featuresetupfuncs = set()
457 featuresetupfuncs = set()
457
458
458
459
459 def _getsharedvfs(hgvfs, requirements):
460 def _getsharedvfs(hgvfs, requirements):
460 """returns the vfs object pointing to root of shared source
461 """returns the vfs object pointing to root of shared source
461 repo for a shared repository
462 repo for a shared repository
462
463
463 hgvfs is vfs pointing at .hg/ of current repo (shared one)
464 hgvfs is vfs pointing at .hg/ of current repo (shared one)
464 requirements is a set of requirements of current repo (shared one)
465 requirements is a set of requirements of current repo (shared one)
465 """
466 """
466 # The ``shared`` or ``relshared`` requirements indicate the
467 # The ``shared`` or ``relshared`` requirements indicate the
467 # store lives in the path contained in the ``.hg/sharedpath`` file.
468 # store lives in the path contained in the ``.hg/sharedpath`` file.
468 # This is an absolute path for ``shared`` and relative to
469 # This is an absolute path for ``shared`` and relative to
469 # ``.hg/`` for ``relshared``.
470 # ``.hg/`` for ``relshared``.
470 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
471 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
471 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
472 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
472 sharedpath = util.normpath(hgvfs.join(sharedpath))
473 sharedpath = util.normpath(hgvfs.join(sharedpath))
473
474
474 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
475 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
475
476
476 if not sharedvfs.exists():
477 if not sharedvfs.exists():
477 raise error.RepoError(
478 raise error.RepoError(
478 _(b'.hg/sharedpath points to nonexistent directory %s')
479 _(b'.hg/sharedpath points to nonexistent directory %s')
479 % sharedvfs.base
480 % sharedvfs.base
480 )
481 )
481 return sharedvfs
482 return sharedvfs
482
483
483
484
484 def _readrequires(vfs, allowmissing):
485 def _readrequires(vfs, allowmissing):
485 """reads the require file present at root of this vfs
486 """reads the require file present at root of this vfs
486 and return a set of requirements
487 and return a set of requirements
487
488
488 If allowmissing is True, we suppress ENOENT if raised"""
489 If allowmissing is True, we suppress ENOENT if raised"""
489 # requires file contains a newline-delimited list of
490 # requires file contains a newline-delimited list of
490 # features/capabilities the opener (us) must have in order to use
491 # features/capabilities the opener (us) must have in order to use
491 # the repository. This file was introduced in Mercurial 0.9.2,
492 # the repository. This file was introduced in Mercurial 0.9.2,
492 # which means very old repositories may not have one. We assume
493 # which means very old repositories may not have one. We assume
493 # a missing file translates to no requirements.
494 # a missing file translates to no requirements.
494 try:
495 try:
495 requirements = set(vfs.read(b'requires').splitlines())
496 requirements = set(vfs.read(b'requires').splitlines())
496 except IOError as e:
497 except IOError as e:
497 if not (allowmissing and e.errno == errno.ENOENT):
498 if not (allowmissing and e.errno == errno.ENOENT):
498 raise
499 raise
499 requirements = set()
500 requirements = set()
500 return requirements
501 return requirements
501
502
502
503
503 def makelocalrepository(baseui, path, intents=None):
504 def makelocalrepository(baseui, path, intents=None):
504 """Create a local repository object.
505 """Create a local repository object.
505
506
506 Given arguments needed to construct a local repository, this function
507 Given arguments needed to construct a local repository, this function
507 performs various early repository loading functionality (such as
508 performs various early repository loading functionality (such as
508 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
509 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
509 the repository can be opened, derives a type suitable for representing
510 the repository can be opened, derives a type suitable for representing
510 that repository, and returns an instance of it.
511 that repository, and returns an instance of it.
511
512
512 The returned object conforms to the ``repository.completelocalrepository``
513 The returned object conforms to the ``repository.completelocalrepository``
513 interface.
514 interface.
514
515
515 The repository type is derived by calling a series of factory functions
516 The repository type is derived by calling a series of factory functions
516 for each aspect/interface of the final repository. These are defined by
517 for each aspect/interface of the final repository. These are defined by
517 ``REPO_INTERFACES``.
518 ``REPO_INTERFACES``.
518
519
519 Each factory function is called to produce a type implementing a specific
520 Each factory function is called to produce a type implementing a specific
520 interface. The cumulative list of returned types will be combined into a
521 interface. The cumulative list of returned types will be combined into a
521 new type and that type will be instantiated to represent the local
522 new type and that type will be instantiated to represent the local
522 repository.
523 repository.
523
524
524 The factory functions each receive various state that may be consulted
525 The factory functions each receive various state that may be consulted
525 as part of deriving a type.
526 as part of deriving a type.
526
527
527 Extensions should wrap these factory functions to customize repository type
528 Extensions should wrap these factory functions to customize repository type
528 creation. Note that an extension's wrapped function may be called even if
529 creation. Note that an extension's wrapped function may be called even if
529 that extension is not loaded for the repo being constructed. Extensions
530 that extension is not loaded for the repo being constructed. Extensions
530 should check if their ``__name__`` appears in the
531 should check if their ``__name__`` appears in the
531 ``extensionmodulenames`` set passed to the factory function and no-op if
532 ``extensionmodulenames`` set passed to the factory function and no-op if
532 not.
533 not.
533 """
534 """
534 ui = baseui.copy()
535 ui = baseui.copy()
535 # Prevent copying repo configuration.
536 # Prevent copying repo configuration.
536 ui.copy = baseui.copy
537 ui.copy = baseui.copy
537
538
538 # Working directory VFS rooted at repository root.
539 # Working directory VFS rooted at repository root.
539 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
540 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
540
541
541 # Main VFS for .hg/ directory.
542 # Main VFS for .hg/ directory.
542 hgpath = wdirvfs.join(b'.hg')
543 hgpath = wdirvfs.join(b'.hg')
543 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
544 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
544 # Whether this repository is shared one or not
545 # Whether this repository is shared one or not
545 shared = False
546 shared = False
546 # If this repository is shared, vfs pointing to shared repo
547 # If this repository is shared, vfs pointing to shared repo
547 sharedvfs = None
548 sharedvfs = None
548
549
549 # The .hg/ path should exist and should be a directory. All other
550 # The .hg/ path should exist and should be a directory. All other
550 # cases are errors.
551 # cases are errors.
551 if not hgvfs.isdir():
552 if not hgvfs.isdir():
552 try:
553 try:
553 hgvfs.stat()
554 hgvfs.stat()
554 except OSError as e:
555 except OSError as e:
555 if e.errno != errno.ENOENT:
556 if e.errno != errno.ENOENT:
556 raise
557 raise
557 except ValueError as e:
558 except ValueError as e:
558 # Can be raised on Python 3.8 when path is invalid.
559 # Can be raised on Python 3.8 when path is invalid.
559 raise error.Abort(
560 raise error.Abort(
560 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
561 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
561 )
562 )
562
563
563 raise error.RepoError(_(b'repository %s not found') % path)
564 raise error.RepoError(_(b'repository %s not found') % path)
564
565
565 requirements = _readrequires(hgvfs, True)
566 requirements = _readrequires(hgvfs, True)
566 shared = (
567 shared = (
567 requirementsmod.SHARED_REQUIREMENT in requirements
568 requirementsmod.SHARED_REQUIREMENT in requirements
568 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
569 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
569 )
570 )
570 storevfs = None
571 storevfs = None
571 if shared:
572 if shared:
572 # This is a shared repo
573 # This is a shared repo
573 sharedvfs = _getsharedvfs(hgvfs, requirements)
574 sharedvfs = _getsharedvfs(hgvfs, requirements)
574 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
575 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
575 else:
576 else:
576 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
577 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
577
578
578 # if .hg/requires contains the sharesafe requirement, it means
579 # if .hg/requires contains the sharesafe requirement, it means
579 # there exists a `.hg/store/requires` too and we should read it
580 # there exists a `.hg/store/requires` too and we should read it
580 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
581 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
581 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
582 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
582 # is not present, refer checkrequirementscompat() for that
583 # is not present, refer checkrequirementscompat() for that
583 #
584 #
584 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
585 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
585 # repository was shared the old way. We check the share source .hg/requires
586 # repository was shared the old way. We check the share source .hg/requires
586 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
587 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
587 # to be reshared
588 # to be reshared
588 hint = _(b"see `hg help config.format.use-share-safe` for more information")
589 hint = _(b"see `hg help config.format.use-share-safe` for more information")
589 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
590
591
591 if (
592 if (
592 shared
593 shared
593 and requirementsmod.SHARESAFE_REQUIREMENT
594 and requirementsmod.SHARESAFE_REQUIREMENT
594 not in _readrequires(sharedvfs, True)
595 not in _readrequires(sharedvfs, True)
595 ):
596 ):
596 mismatch_warn = ui.configbool(
597 mismatch_warn = ui.configbool(
597 b'share', b'safe-mismatch.source-not-safe.warn'
598 b'share', b'safe-mismatch.source-not-safe.warn'
598 )
599 )
599 mismatch_config = ui.config(
600 mismatch_config = ui.config(
600 b'share', b'safe-mismatch.source-not-safe'
601 b'share', b'safe-mismatch.source-not-safe'
601 )
602 )
602 if mismatch_config in (
603 if mismatch_config in (
603 b'downgrade-allow',
604 b'downgrade-allow',
604 b'allow',
605 b'allow',
605 b'downgrade-abort',
606 b'downgrade-abort',
606 ):
607 ):
607 # prevent cyclic import localrepo -> upgrade -> localrepo
608 # prevent cyclic import localrepo -> upgrade -> localrepo
608 from . import upgrade
609 from . import upgrade
609
610
610 upgrade.downgrade_share_to_non_safe(
611 upgrade.downgrade_share_to_non_safe(
611 ui,
612 ui,
612 hgvfs,
613 hgvfs,
613 sharedvfs,
614 sharedvfs,
614 requirements,
615 requirements,
615 mismatch_config,
616 mismatch_config,
616 mismatch_warn,
617 mismatch_warn,
617 )
618 )
618 elif mismatch_config == b'abort':
619 elif mismatch_config == b'abort':
619 raise error.Abort(
620 raise error.Abort(
620 _(b"share source does not support share-safe requirement"),
621 _(b"share source does not support share-safe requirement"),
621 hint=hint,
622 hint=hint,
622 )
623 )
623 else:
624 else:
624 raise error.Abort(
625 raise error.Abort(
625 _(
626 _(
626 b"share-safe mismatch with source.\nUnrecognized"
627 b"share-safe mismatch with source.\nUnrecognized"
627 b" value '%s' of `share.safe-mismatch.source-not-safe`"
628 b" value '%s' of `share.safe-mismatch.source-not-safe`"
628 b" set."
629 b" set."
629 )
630 )
630 % mismatch_config,
631 % mismatch_config,
631 hint=hint,
632 hint=hint,
632 )
633 )
633 else:
634 else:
634 requirements |= _readrequires(storevfs, False)
635 requirements |= _readrequires(storevfs, False)
635 elif shared:
636 elif shared:
636 sourcerequires = _readrequires(sharedvfs, False)
637 sourcerequires = _readrequires(sharedvfs, False)
637 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
638 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
638 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
639 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
639 mismatch_warn = ui.configbool(
640 mismatch_warn = ui.configbool(
640 b'share', b'safe-mismatch.source-safe.warn'
641 b'share', b'safe-mismatch.source-safe.warn'
641 )
642 )
642 if mismatch_config in (
643 if mismatch_config in (
643 b'upgrade-allow',
644 b'upgrade-allow',
644 b'allow',
645 b'allow',
645 b'upgrade-abort',
646 b'upgrade-abort',
646 ):
647 ):
647 # prevent cyclic import localrepo -> upgrade -> localrepo
648 # prevent cyclic import localrepo -> upgrade -> localrepo
648 from . import upgrade
649 from . import upgrade
649
650
650 upgrade.upgrade_share_to_safe(
651 upgrade.upgrade_share_to_safe(
651 ui,
652 ui,
652 hgvfs,
653 hgvfs,
653 storevfs,
654 storevfs,
654 requirements,
655 requirements,
655 mismatch_config,
656 mismatch_config,
656 mismatch_warn,
657 mismatch_warn,
657 )
658 )
658 elif mismatch_config == b'abort':
659 elif mismatch_config == b'abort':
659 raise error.Abort(
660 raise error.Abort(
660 _(
661 _(
661 b'version mismatch: source uses share-safe'
662 b'version mismatch: source uses share-safe'
662 b' functionality while the current share does not'
663 b' functionality while the current share does not'
663 ),
664 ),
664 hint=hint,
665 hint=hint,
665 )
666 )
666 else:
667 else:
667 raise error.Abort(
668 raise error.Abort(
668 _(
669 _(
669 b"share-safe mismatch with source.\nUnrecognized"
670 b"share-safe mismatch with source.\nUnrecognized"
670 b" value '%s' of `share.safe-mismatch.source-safe` set."
671 b" value '%s' of `share.safe-mismatch.source-safe` set."
671 )
672 )
672 % mismatch_config,
673 % mismatch_config,
673 hint=hint,
674 hint=hint,
674 )
675 )
675
676
676 # The .hg/hgrc file may load extensions or contain config options
677 # The .hg/hgrc file may load extensions or contain config options
677 # that influence repository construction. Attempt to load it and
678 # that influence repository construction. Attempt to load it and
678 # process any new extensions that it may have pulled in.
679 # process any new extensions that it may have pulled in.
679 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
680 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
680 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
681 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
681 extensions.loadall(ui)
682 extensions.loadall(ui)
682 extensions.populateui(ui)
683 extensions.populateui(ui)
683
684
684 # Set of module names of extensions loaded for this repository.
685 # Set of module names of extensions loaded for this repository.
685 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
686 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
686
687
687 supportedrequirements = gathersupportedrequirements(ui)
688 supportedrequirements = gathersupportedrequirements(ui)
688
689
689 # We first validate the requirements are known.
690 # We first validate the requirements are known.
690 ensurerequirementsrecognized(requirements, supportedrequirements)
691 ensurerequirementsrecognized(requirements, supportedrequirements)
691
692
692 # Then we validate that the known set is reasonable to use together.
693 # Then we validate that the known set is reasonable to use together.
693 ensurerequirementscompatible(ui, requirements)
694 ensurerequirementscompatible(ui, requirements)
694
695
695 # TODO there are unhandled edge cases related to opening repositories with
696 # TODO there are unhandled edge cases related to opening repositories with
696 # shared storage. If storage is shared, we should also test for requirements
697 # shared storage. If storage is shared, we should also test for requirements
697 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
698 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
698 # that repo, as that repo may load extensions needed to open it. This is a
699 # that repo, as that repo may load extensions needed to open it. This is a
699 # bit complicated because we don't want the other hgrc to overwrite settings
700 # bit complicated because we don't want the other hgrc to overwrite settings
700 # in this hgrc.
701 # in this hgrc.
701 #
702 #
702 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
703 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
703 # file when sharing repos. But if a requirement is added after the share is
704 # file when sharing repos. But if a requirement is added after the share is
704 # performed, thereby introducing a new requirement for the opener, we may
705 # performed, thereby introducing a new requirement for the opener, we may
705 # will not see that and could encounter a run-time error interacting with
706 # will not see that and could encounter a run-time error interacting with
706 # that shared store since it has an unknown-to-us requirement.
707 # that shared store since it has an unknown-to-us requirement.
707
708
708 # At this point, we know we should be capable of opening the repository.
709 # At this point, we know we should be capable of opening the repository.
709 # Now get on with doing that.
710 # Now get on with doing that.
710
711
711 features = set()
712 features = set()
712
713
713 # The "store" part of the repository holds versioned data. How it is
714 # The "store" part of the repository holds versioned data. How it is
714 # accessed is determined by various requirements. If `shared` or
715 # accessed is determined by various requirements. If `shared` or
715 # `relshared` requirements are present, this indicates current repository
716 # `relshared` requirements are present, this indicates current repository
716 # is a share and store exists in path mentioned in `.hg/sharedpath`
717 # is a share and store exists in path mentioned in `.hg/sharedpath`
717 if shared:
718 if shared:
718 storebasepath = sharedvfs.base
719 storebasepath = sharedvfs.base
719 cachepath = sharedvfs.join(b'cache')
720 cachepath = sharedvfs.join(b'cache')
720 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
721 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
721 else:
722 else:
722 storebasepath = hgvfs.base
723 storebasepath = hgvfs.base
723 cachepath = hgvfs.join(b'cache')
724 cachepath = hgvfs.join(b'cache')
724 wcachepath = hgvfs.join(b'wcache')
725 wcachepath = hgvfs.join(b'wcache')
725
726
726 # The store has changed over time and the exact layout is dictated by
727 # The store has changed over time and the exact layout is dictated by
727 # requirements. The store interface abstracts differences across all
728 # requirements. The store interface abstracts differences across all
728 # of them.
729 # of them.
729 store = makestore(
730 store = makestore(
730 requirements,
731 requirements,
731 storebasepath,
732 storebasepath,
732 lambda base: vfsmod.vfs(base, cacheaudited=True),
733 lambda base: vfsmod.vfs(base, cacheaudited=True),
733 )
734 )
734 hgvfs.createmode = store.createmode
735 hgvfs.createmode = store.createmode
735
736
736 storevfs = store.vfs
737 storevfs = store.vfs
737 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
738 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
738
739
739 # The cache vfs is used to manage cache files.
740 # The cache vfs is used to manage cache files.
740 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
741 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
741 cachevfs.createmode = store.createmode
742 cachevfs.createmode = store.createmode
742 # The cache vfs is used to manage cache files related to the working copy
743 # The cache vfs is used to manage cache files related to the working copy
743 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
744 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
744 wcachevfs.createmode = store.createmode
745 wcachevfs.createmode = store.createmode
745
746
746 # Now resolve the type for the repository object. We do this by repeatedly
747 # Now resolve the type for the repository object. We do this by repeatedly
747 # calling a factory function to produces types for specific aspects of the
748 # calling a factory function to produces types for specific aspects of the
748 # repo's operation. The aggregate returned types are used as base classes
749 # repo's operation. The aggregate returned types are used as base classes
749 # for a dynamically-derived type, which will represent our new repository.
750 # for a dynamically-derived type, which will represent our new repository.
750
751
751 bases = []
752 bases = []
752 extrastate = {}
753 extrastate = {}
753
754
754 for iface, fn in REPO_INTERFACES:
755 for iface, fn in REPO_INTERFACES:
755 # We pass all potentially useful state to give extensions tons of
756 # We pass all potentially useful state to give extensions tons of
756 # flexibility.
757 # flexibility.
757 typ = fn()(
758 typ = fn()(
758 ui=ui,
759 ui=ui,
759 intents=intents,
760 intents=intents,
760 requirements=requirements,
761 requirements=requirements,
761 features=features,
762 features=features,
762 wdirvfs=wdirvfs,
763 wdirvfs=wdirvfs,
763 hgvfs=hgvfs,
764 hgvfs=hgvfs,
764 store=store,
765 store=store,
765 storevfs=storevfs,
766 storevfs=storevfs,
766 storeoptions=storevfs.options,
767 storeoptions=storevfs.options,
767 cachevfs=cachevfs,
768 cachevfs=cachevfs,
768 wcachevfs=wcachevfs,
769 wcachevfs=wcachevfs,
769 extensionmodulenames=extensionmodulenames,
770 extensionmodulenames=extensionmodulenames,
770 extrastate=extrastate,
771 extrastate=extrastate,
771 baseclasses=bases,
772 baseclasses=bases,
772 )
773 )
773
774
774 if not isinstance(typ, type):
775 if not isinstance(typ, type):
775 raise error.ProgrammingError(
776 raise error.ProgrammingError(
776 b'unable to construct type for %s' % iface
777 b'unable to construct type for %s' % iface
777 )
778 )
778
779
779 bases.append(typ)
780 bases.append(typ)
780
781
781 # type() allows you to use characters in type names that wouldn't be
782 # type() allows you to use characters in type names that wouldn't be
782 # recognized as Python symbols in source code. We abuse that to add
783 # recognized as Python symbols in source code. We abuse that to add
783 # rich information about our constructed repo.
784 # rich information about our constructed repo.
784 name = pycompat.sysstr(
785 name = pycompat.sysstr(
785 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
786 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
786 )
787 )
787
788
788 cls = type(name, tuple(bases), {})
789 cls = type(name, tuple(bases), {})
789
790
790 return cls(
791 return cls(
791 baseui=baseui,
792 baseui=baseui,
792 ui=ui,
793 ui=ui,
793 origroot=path,
794 origroot=path,
794 wdirvfs=wdirvfs,
795 wdirvfs=wdirvfs,
795 hgvfs=hgvfs,
796 hgvfs=hgvfs,
796 requirements=requirements,
797 requirements=requirements,
797 supportedrequirements=supportedrequirements,
798 supportedrequirements=supportedrequirements,
798 sharedpath=storebasepath,
799 sharedpath=storebasepath,
799 store=store,
800 store=store,
800 cachevfs=cachevfs,
801 cachevfs=cachevfs,
801 wcachevfs=wcachevfs,
802 wcachevfs=wcachevfs,
802 features=features,
803 features=features,
803 intents=intents,
804 intents=intents,
804 )
805 )
805
806
806
807
807 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
808 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
808 """Load hgrc files/content into a ui instance.
809 """Load hgrc files/content into a ui instance.
809
810
810 This is called during repository opening to load any additional
811 This is called during repository opening to load any additional
811 config files or settings relevant to the current repository.
812 config files or settings relevant to the current repository.
812
813
813 Returns a bool indicating whether any additional configs were loaded.
814 Returns a bool indicating whether any additional configs were loaded.
814
815
815 Extensions should monkeypatch this function to modify how per-repo
816 Extensions should monkeypatch this function to modify how per-repo
816 configs are loaded. For example, an extension may wish to pull in
817 configs are loaded. For example, an extension may wish to pull in
817 configs from alternate files or sources.
818 configs from alternate files or sources.
818
819
819 sharedvfs is vfs object pointing to source repo if the current one is a
820 sharedvfs is vfs object pointing to source repo if the current one is a
820 shared one
821 shared one
821 """
822 """
822 if not rcutil.use_repo_hgrc():
823 if not rcutil.use_repo_hgrc():
823 return False
824 return False
824
825
825 ret = False
826 ret = False
826 # first load config from shared source if we has to
827 # first load config from shared source if we has to
827 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
828 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
828 try:
829 try:
829 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
830 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
830 ret = True
831 ret = True
831 except IOError:
832 except IOError:
832 pass
833 pass
833
834
834 try:
835 try:
835 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
836 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
836 ret = True
837 ret = True
837 except IOError:
838 except IOError:
838 pass
839 pass
839
840
840 try:
841 try:
841 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
842 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
842 ret = True
843 ret = True
843 except IOError:
844 except IOError:
844 pass
845 pass
845
846
846 return ret
847 return ret
847
848
848
849
849 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
850 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
850 """Perform additional actions after .hg/hgrc is loaded.
851 """Perform additional actions after .hg/hgrc is loaded.
851
852
852 This function is called during repository loading immediately after
853 This function is called during repository loading immediately after
853 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
854 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
854
855
855 The function can be used to validate configs, automatically add
856 The function can be used to validate configs, automatically add
856 options (including extensions) based on requirements, etc.
857 options (including extensions) based on requirements, etc.
857 """
858 """
858
859
859 # Map of requirements to list of extensions to load automatically when
860 # Map of requirements to list of extensions to load automatically when
860 # requirement is present.
861 # requirement is present.
861 autoextensions = {
862 autoextensions = {
862 b'git': [b'git'],
863 b'git': [b'git'],
863 b'largefiles': [b'largefiles'],
864 b'largefiles': [b'largefiles'],
864 b'lfs': [b'lfs'],
865 b'lfs': [b'lfs'],
865 }
866 }
866
867
867 for requirement, names in sorted(autoextensions.items()):
868 for requirement, names in sorted(autoextensions.items()):
868 if requirement not in requirements:
869 if requirement not in requirements:
869 continue
870 continue
870
871
871 for name in names:
872 for name in names:
872 if not ui.hasconfig(b'extensions', name):
873 if not ui.hasconfig(b'extensions', name):
873 ui.setconfig(b'extensions', name, b'', source=b'autoload')
874 ui.setconfig(b'extensions', name, b'', source=b'autoload')
874
875
875
876
876 def gathersupportedrequirements(ui):
877 def gathersupportedrequirements(ui):
877 """Determine the complete set of recognized requirements."""
878 """Determine the complete set of recognized requirements."""
878 # Start with all requirements supported by this file.
879 # Start with all requirements supported by this file.
879 supported = set(localrepository._basesupported)
880 supported = set(localrepository._basesupported)
880
881
881 # Execute ``featuresetupfuncs`` entries if they belong to an extension
882 # Execute ``featuresetupfuncs`` entries if they belong to an extension
882 # relevant to this ui instance.
883 # relevant to this ui instance.
883 modules = {m.__name__ for n, m in extensions.extensions(ui)}
884 modules = {m.__name__ for n, m in extensions.extensions(ui)}
884
885
885 for fn in featuresetupfuncs:
886 for fn in featuresetupfuncs:
886 if fn.__module__ in modules:
887 if fn.__module__ in modules:
887 fn(ui, supported)
888 fn(ui, supported)
888
889
889 # Add derived requirements from registered compression engines.
890 # Add derived requirements from registered compression engines.
890 for name in util.compengines:
891 for name in util.compengines:
891 engine = util.compengines[name]
892 engine = util.compengines[name]
892 if engine.available() and engine.revlogheader():
893 if engine.available() and engine.revlogheader():
893 supported.add(b'exp-compression-%s' % name)
894 supported.add(b'exp-compression-%s' % name)
894 if engine.name() == b'zstd':
895 if engine.name() == b'zstd':
895 supported.add(b'revlog-compression-zstd')
896 supported.add(b'revlog-compression-zstd')
896
897
897 return supported
898 return supported
898
899
899
900
900 def ensurerequirementsrecognized(requirements, supported):
901 def ensurerequirementsrecognized(requirements, supported):
901 """Validate that a set of local requirements is recognized.
902 """Validate that a set of local requirements is recognized.
902
903
903 Receives a set of requirements. Raises an ``error.RepoError`` if there
904 Receives a set of requirements. Raises an ``error.RepoError`` if there
904 exists any requirement in that set that currently loaded code doesn't
905 exists any requirement in that set that currently loaded code doesn't
905 recognize.
906 recognize.
906
907
907 Returns a set of supported requirements.
908 Returns a set of supported requirements.
908 """
909 """
909 missing = set()
910 missing = set()
910
911
911 for requirement in requirements:
912 for requirement in requirements:
912 if requirement in supported:
913 if requirement in supported:
913 continue
914 continue
914
915
915 if not requirement or not requirement[0:1].isalnum():
916 if not requirement or not requirement[0:1].isalnum():
916 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
917 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
917
918
918 missing.add(requirement)
919 missing.add(requirement)
919
920
920 if missing:
921 if missing:
921 raise error.RequirementError(
922 raise error.RequirementError(
922 _(b'repository requires features unknown to this Mercurial: %s')
923 _(b'repository requires features unknown to this Mercurial: %s')
923 % b' '.join(sorted(missing)),
924 % b' '.join(sorted(missing)),
924 hint=_(
925 hint=_(
925 b'see https://mercurial-scm.org/wiki/MissingRequirement '
926 b'see https://mercurial-scm.org/wiki/MissingRequirement '
926 b'for more information'
927 b'for more information'
927 ),
928 ),
928 )
929 )
929
930
930
931
931 def ensurerequirementscompatible(ui, requirements):
932 def ensurerequirementscompatible(ui, requirements):
932 """Validates that a set of recognized requirements is mutually compatible.
933 """Validates that a set of recognized requirements is mutually compatible.
933
934
934 Some requirements may not be compatible with others or require
935 Some requirements may not be compatible with others or require
935 config options that aren't enabled. This function is called during
936 config options that aren't enabled. This function is called during
936 repository opening to ensure that the set of requirements needed
937 repository opening to ensure that the set of requirements needed
937 to open a repository is sane and compatible with config options.
938 to open a repository is sane and compatible with config options.
938
939
939 Extensions can monkeypatch this function to perform additional
940 Extensions can monkeypatch this function to perform additional
940 checking.
941 checking.
941
942
942 ``error.RepoError`` should be raised on failure.
943 ``error.RepoError`` should be raised on failure.
943 """
944 """
944 if (
945 if (
945 requirementsmod.SPARSE_REQUIREMENT in requirements
946 requirementsmod.SPARSE_REQUIREMENT in requirements
946 and not sparse.enabled
947 and not sparse.enabled
947 ):
948 ):
948 raise error.RepoError(
949 raise error.RepoError(
949 _(
950 _(
950 b'repository is using sparse feature but '
951 b'repository is using sparse feature but '
951 b'sparse is not enabled; enable the '
952 b'sparse is not enabled; enable the '
952 b'"sparse" extensions to access'
953 b'"sparse" extensions to access'
953 )
954 )
954 )
955 )
955
956
956
957
957 def makestore(requirements, path, vfstype):
958 def makestore(requirements, path, vfstype):
958 """Construct a storage object for a repository."""
959 """Construct a storage object for a repository."""
959 if requirementsmod.STORE_REQUIREMENT in requirements:
960 if requirementsmod.STORE_REQUIREMENT in requirements:
960 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
961 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
961 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
962 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
962 return storemod.fncachestore(path, vfstype, dotencode)
963 return storemod.fncachestore(path, vfstype, dotencode)
963
964
964 return storemod.encodedstore(path, vfstype)
965 return storemod.encodedstore(path, vfstype)
965
966
966 return storemod.basicstore(path, vfstype)
967 return storemod.basicstore(path, vfstype)
967
968
968
969
969 def resolvestorevfsoptions(ui, requirements, features):
970 def resolvestorevfsoptions(ui, requirements, features):
970 """Resolve the options to pass to the store vfs opener.
971 """Resolve the options to pass to the store vfs opener.
971
972
972 The returned dict is used to influence behavior of the storage layer.
973 The returned dict is used to influence behavior of the storage layer.
973 """
974 """
974 options = {}
975 options = {}
975
976
976 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
977 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
977 options[b'treemanifest'] = True
978 options[b'treemanifest'] = True
978
979
979 # experimental config: format.manifestcachesize
980 # experimental config: format.manifestcachesize
980 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
981 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
981 if manifestcachesize is not None:
982 if manifestcachesize is not None:
982 options[b'manifestcachesize'] = manifestcachesize
983 options[b'manifestcachesize'] = manifestcachesize
983
984
984 # In the absence of another requirement superseding a revlog-related
985 # In the absence of another requirement superseding a revlog-related
985 # requirement, we have to assume the repo is using revlog version 0.
986 # requirement, we have to assume the repo is using revlog version 0.
986 # This revlog format is super old and we don't bother trying to parse
987 # This revlog format is super old and we don't bother trying to parse
987 # opener options for it because those options wouldn't do anything
988 # opener options for it because those options wouldn't do anything
988 # meaningful on such old repos.
989 # meaningful on such old repos.
989 if (
990 if (
990 requirementsmod.REVLOGV1_REQUIREMENT in requirements
991 requirementsmod.REVLOGV1_REQUIREMENT in requirements
991 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
992 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
992 ):
993 ):
993 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
994 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
994 else: # explicitly mark repo as using revlogv0
995 else: # explicitly mark repo as using revlogv0
995 options[b'revlogv0'] = True
996 options[b'revlogv0'] = True
996
997
997 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
998 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
998 options[b'copies-storage'] = b'changeset-sidedata'
999 options[b'copies-storage'] = b'changeset-sidedata'
999 else:
1000 else:
1000 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1001 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1001 copiesextramode = (b'changeset-only', b'compatibility')
1002 copiesextramode = (b'changeset-only', b'compatibility')
1002 if writecopiesto in copiesextramode:
1003 if writecopiesto in copiesextramode:
1003 options[b'copies-storage'] = b'extra'
1004 options[b'copies-storage'] = b'extra'
1004
1005
1005 return options
1006 return options
1006
1007
1007
1008
1008 def resolverevlogstorevfsoptions(ui, requirements, features):
1009 def resolverevlogstorevfsoptions(ui, requirements, features):
1009 """Resolve opener options specific to revlogs."""
1010 """Resolve opener options specific to revlogs."""
1010
1011
1011 options = {}
1012 options = {}
1012 options[b'flagprocessors'] = {}
1013 options[b'flagprocessors'] = {}
1013
1014
1014 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1015 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1015 options[b'revlogv1'] = True
1016 options[b'revlogv1'] = True
1016 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1017 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1017 options[b'revlogv2'] = True
1018 options[b'revlogv2'] = True
1018
1019
1019 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1020 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1020 options[b'generaldelta'] = True
1021 options[b'generaldelta'] = True
1021
1022
1022 # experimental config: format.chunkcachesize
1023 # experimental config: format.chunkcachesize
1023 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1024 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1024 if chunkcachesize is not None:
1025 if chunkcachesize is not None:
1025 options[b'chunkcachesize'] = chunkcachesize
1026 options[b'chunkcachesize'] = chunkcachesize
1026
1027
1027 deltabothparents = ui.configbool(
1028 deltabothparents = ui.configbool(
1028 b'storage', b'revlog.optimize-delta-parent-choice'
1029 b'storage', b'revlog.optimize-delta-parent-choice'
1029 )
1030 )
1030 options[b'deltabothparents'] = deltabothparents
1031 options[b'deltabothparents'] = deltabothparents
1031
1032
1032 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1033 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1033 lazydeltabase = False
1034 lazydeltabase = False
1034 if lazydelta:
1035 if lazydelta:
1035 lazydeltabase = ui.configbool(
1036 lazydeltabase = ui.configbool(
1036 b'storage', b'revlog.reuse-external-delta-parent'
1037 b'storage', b'revlog.reuse-external-delta-parent'
1037 )
1038 )
1038 if lazydeltabase is None:
1039 if lazydeltabase is None:
1039 lazydeltabase = not scmutil.gddeltaconfig(ui)
1040 lazydeltabase = not scmutil.gddeltaconfig(ui)
1040 options[b'lazydelta'] = lazydelta
1041 options[b'lazydelta'] = lazydelta
1041 options[b'lazydeltabase'] = lazydeltabase
1042 options[b'lazydeltabase'] = lazydeltabase
1042
1043
1043 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1044 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1044 if 0 <= chainspan:
1045 if 0 <= chainspan:
1045 options[b'maxdeltachainspan'] = chainspan
1046 options[b'maxdeltachainspan'] = chainspan
1046
1047
1047 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1048 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1048 if mmapindexthreshold is not None:
1049 if mmapindexthreshold is not None:
1049 options[b'mmapindexthreshold'] = mmapindexthreshold
1050 options[b'mmapindexthreshold'] = mmapindexthreshold
1050
1051
1051 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1052 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1052 srdensitythres = float(
1053 srdensitythres = float(
1053 ui.config(b'experimental', b'sparse-read.density-threshold')
1054 ui.config(b'experimental', b'sparse-read.density-threshold')
1054 )
1055 )
1055 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1056 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1056 options[b'with-sparse-read'] = withsparseread
1057 options[b'with-sparse-read'] = withsparseread
1057 options[b'sparse-read-density-threshold'] = srdensitythres
1058 options[b'sparse-read-density-threshold'] = srdensitythres
1058 options[b'sparse-read-min-gap-size'] = srmingapsize
1059 options[b'sparse-read-min-gap-size'] = srmingapsize
1059
1060
1060 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1061 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1061 options[b'sparse-revlog'] = sparserevlog
1062 options[b'sparse-revlog'] = sparserevlog
1062 if sparserevlog:
1063 if sparserevlog:
1063 options[b'generaldelta'] = True
1064 options[b'generaldelta'] = True
1064
1065
1065 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1066 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1066 options[b'side-data'] = sidedata
1067 options[b'side-data'] = sidedata
1067
1068
1068 maxchainlen = None
1069 maxchainlen = None
1069 if sparserevlog:
1070 if sparserevlog:
1070 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1071 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1071 # experimental config: format.maxchainlen
1072 # experimental config: format.maxchainlen
1072 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1073 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1073 if maxchainlen is not None:
1074 if maxchainlen is not None:
1074 options[b'maxchainlen'] = maxchainlen
1075 options[b'maxchainlen'] = maxchainlen
1075
1076
1076 for r in requirements:
1077 for r in requirements:
1077 # we allow multiple compression engine requirement to co-exist because
1078 # we allow multiple compression engine requirement to co-exist because
1078 # strickly speaking, revlog seems to support mixed compression style.
1079 # strickly speaking, revlog seems to support mixed compression style.
1079 #
1080 #
1080 # The compression used for new entries will be "the last one"
1081 # The compression used for new entries will be "the last one"
1081 prefix = r.startswith
1082 prefix = r.startswith
1082 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1083 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1083 options[b'compengine'] = r.split(b'-', 2)[2]
1084 options[b'compengine'] = r.split(b'-', 2)[2]
1084
1085
1085 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1086 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1086 if options[b'zlib.level'] is not None:
1087 if options[b'zlib.level'] is not None:
1087 if not (0 <= options[b'zlib.level'] <= 9):
1088 if not (0 <= options[b'zlib.level'] <= 9):
1088 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1089 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1089 raise error.Abort(msg % options[b'zlib.level'])
1090 raise error.Abort(msg % options[b'zlib.level'])
1090 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1091 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1091 if options[b'zstd.level'] is not None:
1092 if options[b'zstd.level'] is not None:
1092 if not (0 <= options[b'zstd.level'] <= 22):
1093 if not (0 <= options[b'zstd.level'] <= 22):
1093 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1094 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1094 raise error.Abort(msg % options[b'zstd.level'])
1095 raise error.Abort(msg % options[b'zstd.level'])
1095
1096
1096 if requirementsmod.NARROW_REQUIREMENT in requirements:
1097 if requirementsmod.NARROW_REQUIREMENT in requirements:
1097 options[b'enableellipsis'] = True
1098 options[b'enableellipsis'] = True
1098
1099
1099 if ui.configbool(b'experimental', b'rust.index'):
1100 if ui.configbool(b'experimental', b'rust.index'):
1100 options[b'rust.index'] = True
1101 options[b'rust.index'] = True
1101 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1102 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1102 slow_path = ui.config(
1103 slow_path = ui.config(
1103 b'storage', b'revlog.persistent-nodemap.slow-path'
1104 b'storage', b'revlog.persistent-nodemap.slow-path'
1104 )
1105 )
1105 if slow_path not in (b'allow', b'warn', b'abort'):
1106 if slow_path not in (b'allow', b'warn', b'abort'):
1106 default = ui.config_default(
1107 default = ui.config_default(
1107 b'storage', b'revlog.persistent-nodemap.slow-path'
1108 b'storage', b'revlog.persistent-nodemap.slow-path'
1108 )
1109 )
1109 msg = _(
1110 msg = _(
1110 b'unknown value for config '
1111 b'unknown value for config '
1111 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1112 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1112 )
1113 )
1113 ui.warn(msg % slow_path)
1114 ui.warn(msg % slow_path)
1114 if not ui.quiet:
1115 if not ui.quiet:
1115 ui.warn(_(b'falling back to default value: %s\n') % default)
1116 ui.warn(_(b'falling back to default value: %s\n') % default)
1116 slow_path = default
1117 slow_path = default
1117
1118
1118 msg = _(
1119 msg = _(
1119 b"accessing `persistent-nodemap` repository without associated "
1120 b"accessing `persistent-nodemap` repository without associated "
1120 b"fast implementation."
1121 b"fast implementation."
1121 )
1122 )
1122 hint = _(
1123 hint = _(
1123 b"check `hg help config.format.use-persistent-nodemap` "
1124 b"check `hg help config.format.use-persistent-nodemap` "
1124 b"for details"
1125 b"for details"
1125 )
1126 )
1126 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1127 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1127 if slow_path == b'warn':
1128 if slow_path == b'warn':
1128 msg = b"warning: " + msg + b'\n'
1129 msg = b"warning: " + msg + b'\n'
1129 ui.warn(msg)
1130 ui.warn(msg)
1130 if not ui.quiet:
1131 if not ui.quiet:
1131 hint = b'(' + hint + b')\n'
1132 hint = b'(' + hint + b')\n'
1132 ui.warn(hint)
1133 ui.warn(hint)
1133 if slow_path == b'abort':
1134 if slow_path == b'abort':
1134 raise error.Abort(msg, hint=hint)
1135 raise error.Abort(msg, hint=hint)
1135 options[b'persistent-nodemap'] = True
1136 options[b'persistent-nodemap'] = True
1136 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1137 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1137 options[b'persistent-nodemap.mmap'] = True
1138 options[b'persistent-nodemap.mmap'] = True
1138 if ui.configbool(b'devel', b'persistent-nodemap'):
1139 if ui.configbool(b'devel', b'persistent-nodemap'):
1139 options[b'devel-force-nodemap'] = True
1140 options[b'devel-force-nodemap'] = True
1140
1141
1141 return options
1142 return options
1142
1143
1143
1144
1144 def makemain(**kwargs):
1145 def makemain(**kwargs):
1145 """Produce a type conforming to ``ilocalrepositorymain``."""
1146 """Produce a type conforming to ``ilocalrepositorymain``."""
1146 return localrepository
1147 return localrepository
1147
1148
1148
1149
1149 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1150 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1150 class revlogfilestorage(object):
1151 class revlogfilestorage(object):
1151 """File storage when using revlogs."""
1152 """File storage when using revlogs."""
1152
1153
1153 def file(self, path):
1154 def file(self, path):
1154 if path.startswith(b'/'):
1155 if path.startswith(b'/'):
1155 path = path[1:]
1156 path = path[1:]
1156
1157
1157 return filelog.filelog(self.svfs, path)
1158 return filelog.filelog(self.svfs, path)
1158
1159
1159
1160
1160 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1161 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1161 class revlognarrowfilestorage(object):
1162 class revlognarrowfilestorage(object):
1162 """File storage when using revlogs and narrow files."""
1163 """File storage when using revlogs and narrow files."""
1163
1164
1164 def file(self, path):
1165 def file(self, path):
1165 if path.startswith(b'/'):
1166 if path.startswith(b'/'):
1166 path = path[1:]
1167 path = path[1:]
1167
1168
1168 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1169 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1169
1170
1170
1171
1171 def makefilestorage(requirements, features, **kwargs):
1172 def makefilestorage(requirements, features, **kwargs):
1172 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1173 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1173 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1174 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1174 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1175 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1175
1176
1176 if requirementsmod.NARROW_REQUIREMENT in requirements:
1177 if requirementsmod.NARROW_REQUIREMENT in requirements:
1177 return revlognarrowfilestorage
1178 return revlognarrowfilestorage
1178 else:
1179 else:
1179 return revlogfilestorage
1180 return revlogfilestorage
1180
1181
1181
1182
1182 # List of repository interfaces and factory functions for them. Each
1183 # List of repository interfaces and factory functions for them. Each
1183 # will be called in order during ``makelocalrepository()`` to iteratively
1184 # will be called in order during ``makelocalrepository()`` to iteratively
1184 # derive the final type for a local repository instance. We capture the
1185 # derive the final type for a local repository instance. We capture the
1185 # function as a lambda so we don't hold a reference and the module-level
1186 # function as a lambda so we don't hold a reference and the module-level
1186 # functions can be wrapped.
1187 # functions can be wrapped.
1187 REPO_INTERFACES = [
1188 REPO_INTERFACES = [
1188 (repository.ilocalrepositorymain, lambda: makemain),
1189 (repository.ilocalrepositorymain, lambda: makemain),
1189 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1190 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1190 ]
1191 ]
1191
1192
1192
1193
1193 @interfaceutil.implementer(repository.ilocalrepositorymain)
1194 @interfaceutil.implementer(repository.ilocalrepositorymain)
1194 class localrepository(object):
1195 class localrepository(object):
1195 """Main class for representing local repositories.
1196 """Main class for representing local repositories.
1196
1197
1197 All local repositories are instances of this class.
1198 All local repositories are instances of this class.
1198
1199
1199 Constructed on its own, instances of this class are not usable as
1200 Constructed on its own, instances of this class are not usable as
1200 repository objects. To obtain a usable repository object, call
1201 repository objects. To obtain a usable repository object, call
1201 ``hg.repository()``, ``localrepo.instance()``, or
1202 ``hg.repository()``, ``localrepo.instance()``, or
1202 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1203 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1203 ``instance()`` adds support for creating new repositories.
1204 ``instance()`` adds support for creating new repositories.
1204 ``hg.repository()`` adds more extension integration, including calling
1205 ``hg.repository()`` adds more extension integration, including calling
1205 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1206 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1206 used.
1207 used.
1207 """
1208 """
1208
1209
1209 # obsolete experimental requirements:
1210 # obsolete experimental requirements:
1210 # - manifestv2: An experimental new manifest format that allowed
1211 # - manifestv2: An experimental new manifest format that allowed
1211 # for stem compression of long paths. Experiment ended up not
1212 # for stem compression of long paths. Experiment ended up not
1212 # being successful (repository sizes went up due to worse delta
1213 # being successful (repository sizes went up due to worse delta
1213 # chains), and the code was deleted in 4.6.
1214 # chains), and the code was deleted in 4.6.
1214 supportedformats = {
1215 supportedformats = {
1215 requirementsmod.REVLOGV1_REQUIREMENT,
1216 requirementsmod.REVLOGV1_REQUIREMENT,
1216 requirementsmod.GENERALDELTA_REQUIREMENT,
1217 requirementsmod.GENERALDELTA_REQUIREMENT,
1217 requirementsmod.TREEMANIFEST_REQUIREMENT,
1218 requirementsmod.TREEMANIFEST_REQUIREMENT,
1218 requirementsmod.COPIESSDC_REQUIREMENT,
1219 requirementsmod.COPIESSDC_REQUIREMENT,
1219 requirementsmod.REVLOGV2_REQUIREMENT,
1220 requirementsmod.REVLOGV2_REQUIREMENT,
1220 requirementsmod.SIDEDATA_REQUIREMENT,
1221 requirementsmod.SIDEDATA_REQUIREMENT,
1221 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1222 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1222 requirementsmod.NODEMAP_REQUIREMENT,
1223 requirementsmod.NODEMAP_REQUIREMENT,
1223 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1224 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1224 requirementsmod.SHARESAFE_REQUIREMENT,
1225 requirementsmod.SHARESAFE_REQUIREMENT,
1225 }
1226 }
1226 _basesupported = supportedformats | {
1227 _basesupported = supportedformats | {
1227 requirementsmod.STORE_REQUIREMENT,
1228 requirementsmod.STORE_REQUIREMENT,
1228 requirementsmod.FNCACHE_REQUIREMENT,
1229 requirementsmod.FNCACHE_REQUIREMENT,
1229 requirementsmod.SHARED_REQUIREMENT,
1230 requirementsmod.SHARED_REQUIREMENT,
1230 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1231 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1231 requirementsmod.DOTENCODE_REQUIREMENT,
1232 requirementsmod.DOTENCODE_REQUIREMENT,
1232 requirementsmod.SPARSE_REQUIREMENT,
1233 requirementsmod.SPARSE_REQUIREMENT,
1233 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1234 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1234 }
1235 }
1235
1236
1236 # list of prefix for file which can be written without 'wlock'
1237 # list of prefix for file which can be written without 'wlock'
1237 # Extensions should extend this list when needed
1238 # Extensions should extend this list when needed
1238 _wlockfreeprefix = {
1239 _wlockfreeprefix = {
1239 # We migh consider requiring 'wlock' for the next
1240 # We migh consider requiring 'wlock' for the next
1240 # two, but pretty much all the existing code assume
1241 # two, but pretty much all the existing code assume
1241 # wlock is not needed so we keep them excluded for
1242 # wlock is not needed so we keep them excluded for
1242 # now.
1243 # now.
1243 b'hgrc',
1244 b'hgrc',
1244 b'requires',
1245 b'requires',
1245 # XXX cache is a complicatged business someone
1246 # XXX cache is a complicatged business someone
1246 # should investigate this in depth at some point
1247 # should investigate this in depth at some point
1247 b'cache/',
1248 b'cache/',
1248 # XXX shouldn't be dirstate covered by the wlock?
1249 # XXX shouldn't be dirstate covered by the wlock?
1249 b'dirstate',
1250 b'dirstate',
1250 # XXX bisect was still a bit too messy at the time
1251 # XXX bisect was still a bit too messy at the time
1251 # this changeset was introduced. Someone should fix
1252 # this changeset was introduced. Someone should fix
1252 # the remainig bit and drop this line
1253 # the remainig bit and drop this line
1253 b'bisect.state',
1254 b'bisect.state',
1254 }
1255 }
1255
1256
1256 def __init__(
1257 def __init__(
1257 self,
1258 self,
1258 baseui,
1259 baseui,
1259 ui,
1260 ui,
1260 origroot,
1261 origroot,
1261 wdirvfs,
1262 wdirvfs,
1262 hgvfs,
1263 hgvfs,
1263 requirements,
1264 requirements,
1264 supportedrequirements,
1265 supportedrequirements,
1265 sharedpath,
1266 sharedpath,
1266 store,
1267 store,
1267 cachevfs,
1268 cachevfs,
1268 wcachevfs,
1269 wcachevfs,
1269 features,
1270 features,
1270 intents=None,
1271 intents=None,
1271 ):
1272 ):
1272 """Create a new local repository instance.
1273 """Create a new local repository instance.
1273
1274
1274 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1275 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1275 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1276 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1276 object.
1277 object.
1277
1278
1278 Arguments:
1279 Arguments:
1279
1280
1280 baseui
1281 baseui
1281 ``ui.ui`` instance that ``ui`` argument was based off of.
1282 ``ui.ui`` instance that ``ui`` argument was based off of.
1282
1283
1283 ui
1284 ui
1284 ``ui.ui`` instance for use by the repository.
1285 ``ui.ui`` instance for use by the repository.
1285
1286
1286 origroot
1287 origroot
1287 ``bytes`` path to working directory root of this repository.
1288 ``bytes`` path to working directory root of this repository.
1288
1289
1289 wdirvfs
1290 wdirvfs
1290 ``vfs.vfs`` rooted at the working directory.
1291 ``vfs.vfs`` rooted at the working directory.
1291
1292
1292 hgvfs
1293 hgvfs
1293 ``vfs.vfs`` rooted at .hg/
1294 ``vfs.vfs`` rooted at .hg/
1294
1295
1295 requirements
1296 requirements
1296 ``set`` of bytestrings representing repository opening requirements.
1297 ``set`` of bytestrings representing repository opening requirements.
1297
1298
1298 supportedrequirements
1299 supportedrequirements
1299 ``set`` of bytestrings representing repository requirements that we
1300 ``set`` of bytestrings representing repository requirements that we
1300 know how to open. May be a supetset of ``requirements``.
1301 know how to open. May be a supetset of ``requirements``.
1301
1302
1302 sharedpath
1303 sharedpath
1303 ``bytes`` Defining path to storage base directory. Points to a
1304 ``bytes`` Defining path to storage base directory. Points to a
1304 ``.hg/`` directory somewhere.
1305 ``.hg/`` directory somewhere.
1305
1306
1306 store
1307 store
1307 ``store.basicstore`` (or derived) instance providing access to
1308 ``store.basicstore`` (or derived) instance providing access to
1308 versioned storage.
1309 versioned storage.
1309
1310
1310 cachevfs
1311 cachevfs
1311 ``vfs.vfs`` used for cache files.
1312 ``vfs.vfs`` used for cache files.
1312
1313
1313 wcachevfs
1314 wcachevfs
1314 ``vfs.vfs`` used for cache files related to the working copy.
1315 ``vfs.vfs`` used for cache files related to the working copy.
1315
1316
1316 features
1317 features
1317 ``set`` of bytestrings defining features/capabilities of this
1318 ``set`` of bytestrings defining features/capabilities of this
1318 instance.
1319 instance.
1319
1320
1320 intents
1321 intents
1321 ``set`` of system strings indicating what this repo will be used
1322 ``set`` of system strings indicating what this repo will be used
1322 for.
1323 for.
1323 """
1324 """
1324 self.baseui = baseui
1325 self.baseui = baseui
1325 self.ui = ui
1326 self.ui = ui
1326 self.origroot = origroot
1327 self.origroot = origroot
1327 # vfs rooted at working directory.
1328 # vfs rooted at working directory.
1328 self.wvfs = wdirvfs
1329 self.wvfs = wdirvfs
1329 self.root = wdirvfs.base
1330 self.root = wdirvfs.base
1330 # vfs rooted at .hg/. Used to access most non-store paths.
1331 # vfs rooted at .hg/. Used to access most non-store paths.
1331 self.vfs = hgvfs
1332 self.vfs = hgvfs
1332 self.path = hgvfs.base
1333 self.path = hgvfs.base
1333 self.requirements = requirements
1334 self.requirements = requirements
1334 self.nodeconstants = sha1nodeconstants
1335 self.nodeconstants = sha1nodeconstants
1335 self.nullid = self.nodeconstants.nullid
1336 self.nullid = self.nodeconstants.nullid
1336 self.supported = supportedrequirements
1337 self.supported = supportedrequirements
1337 self.sharedpath = sharedpath
1338 self.sharedpath = sharedpath
1338 self.store = store
1339 self.store = store
1339 self.cachevfs = cachevfs
1340 self.cachevfs = cachevfs
1340 self.wcachevfs = wcachevfs
1341 self.wcachevfs = wcachevfs
1341 self.features = features
1342 self.features = features
1342
1343
1343 self.filtername = None
1344 self.filtername = None
1344
1345
1345 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1346 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1346 b'devel', b'check-locks'
1347 b'devel', b'check-locks'
1347 ):
1348 ):
1348 self.vfs.audit = self._getvfsward(self.vfs.audit)
1349 self.vfs.audit = self._getvfsward(self.vfs.audit)
1349 # A list of callback to shape the phase if no data were found.
1350 # A list of callback to shape the phase if no data were found.
1350 # Callback are in the form: func(repo, roots) --> processed root.
1351 # Callback are in the form: func(repo, roots) --> processed root.
1351 # This list it to be filled by extension during repo setup
1352 # This list it to be filled by extension during repo setup
1352 self._phasedefaults = []
1353 self._phasedefaults = []
1353
1354
1354 color.setup(self.ui)
1355 color.setup(self.ui)
1355
1356
1356 self.spath = self.store.path
1357 self.spath = self.store.path
1357 self.svfs = self.store.vfs
1358 self.svfs = self.store.vfs
1358 self.sjoin = self.store.join
1359 self.sjoin = self.store.join
1359 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1360 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1360 b'devel', b'check-locks'
1361 b'devel', b'check-locks'
1361 ):
1362 ):
1362 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1363 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1363 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1364 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1364 else: # standard vfs
1365 else: # standard vfs
1365 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1366 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1366
1367
1367 self._dirstatevalidatewarned = False
1368 self._dirstatevalidatewarned = False
1368
1369
1369 self._branchcaches = branchmap.BranchMapCache()
1370 self._branchcaches = branchmap.BranchMapCache()
1370 self._revbranchcache = None
1371 self._revbranchcache = None
1371 self._filterpats = {}
1372 self._filterpats = {}
1372 self._datafilters = {}
1373 self._datafilters = {}
1373 self._transref = self._lockref = self._wlockref = None
1374 self._transref = self._lockref = self._wlockref = None
1374
1375
1375 # A cache for various files under .hg/ that tracks file changes,
1376 # A cache for various files under .hg/ that tracks file changes,
1376 # (used by the filecache decorator)
1377 # (used by the filecache decorator)
1377 #
1378 #
1378 # Maps a property name to its util.filecacheentry
1379 # Maps a property name to its util.filecacheentry
1379 self._filecache = {}
1380 self._filecache = {}
1380
1381
1381 # hold sets of revision to be filtered
1382 # hold sets of revision to be filtered
1382 # should be cleared when something might have changed the filter value:
1383 # should be cleared when something might have changed the filter value:
1383 # - new changesets,
1384 # - new changesets,
1384 # - phase change,
1385 # - phase change,
1385 # - new obsolescence marker,
1386 # - new obsolescence marker,
1386 # - working directory parent change,
1387 # - working directory parent change,
1387 # - bookmark changes
1388 # - bookmark changes
1388 self.filteredrevcache = {}
1389 self.filteredrevcache = {}
1389
1390
1390 # post-dirstate-status hooks
1391 # post-dirstate-status hooks
1391 self._postdsstatus = []
1392 self._postdsstatus = []
1392
1393
1393 # generic mapping between names and nodes
1394 # generic mapping between names and nodes
1394 self.names = namespaces.namespaces()
1395 self.names = namespaces.namespaces()
1395
1396
1396 # Key to signature value.
1397 # Key to signature value.
1397 self._sparsesignaturecache = {}
1398 self._sparsesignaturecache = {}
1398 # Signature to cached matcher instance.
1399 # Signature to cached matcher instance.
1399 self._sparsematchercache = {}
1400 self._sparsematchercache = {}
1400
1401
1401 self._extrafilterid = repoview.extrafilter(ui)
1402 self._extrafilterid = repoview.extrafilter(ui)
1402
1403
1403 self.filecopiesmode = None
1404 self.filecopiesmode = None
1404 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1405 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1405 self.filecopiesmode = b'changeset-sidedata'
1406 self.filecopiesmode = b'changeset-sidedata'
1406
1407
1407 self._wanted_sidedata = set()
1408 self._wanted_sidedata = set()
1408 self._sidedata_computers = {}
1409 self._sidedata_computers = {}
1409 metadatamod.set_sidedata_spec_for_repo(self)
1410 metadatamod.set_sidedata_spec_for_repo(self)
1410
1411
1411 def _getvfsward(self, origfunc):
1412 def _getvfsward(self, origfunc):
1412 """build a ward for self.vfs"""
1413 """build a ward for self.vfs"""
1413 rref = weakref.ref(self)
1414 rref = weakref.ref(self)
1414
1415
1415 def checkvfs(path, mode=None):
1416 def checkvfs(path, mode=None):
1416 ret = origfunc(path, mode=mode)
1417 ret = origfunc(path, mode=mode)
1417 repo = rref()
1418 repo = rref()
1418 if (
1419 if (
1419 repo is None
1420 repo is None
1420 or not util.safehasattr(repo, b'_wlockref')
1421 or not util.safehasattr(repo, b'_wlockref')
1421 or not util.safehasattr(repo, b'_lockref')
1422 or not util.safehasattr(repo, b'_lockref')
1422 ):
1423 ):
1423 return
1424 return
1424 if mode in (None, b'r', b'rb'):
1425 if mode in (None, b'r', b'rb'):
1425 return
1426 return
1426 if path.startswith(repo.path):
1427 if path.startswith(repo.path):
1427 # truncate name relative to the repository (.hg)
1428 # truncate name relative to the repository (.hg)
1428 path = path[len(repo.path) + 1 :]
1429 path = path[len(repo.path) + 1 :]
1429 if path.startswith(b'cache/'):
1430 if path.startswith(b'cache/'):
1430 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1431 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1431 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1432 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1432 # path prefixes covered by 'lock'
1433 # path prefixes covered by 'lock'
1433 vfs_path_prefixes = (
1434 vfs_path_prefixes = (
1434 b'journal.',
1435 b'journal.',
1435 b'undo.',
1436 b'undo.',
1436 b'strip-backup/',
1437 b'strip-backup/',
1437 b'cache/',
1438 b'cache/',
1438 )
1439 )
1439 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1440 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1440 if repo._currentlock(repo._lockref) is None:
1441 if repo._currentlock(repo._lockref) is None:
1441 repo.ui.develwarn(
1442 repo.ui.develwarn(
1442 b'write with no lock: "%s"' % path,
1443 b'write with no lock: "%s"' % path,
1443 stacklevel=3,
1444 stacklevel=3,
1444 config=b'check-locks',
1445 config=b'check-locks',
1445 )
1446 )
1446 elif repo._currentlock(repo._wlockref) is None:
1447 elif repo._currentlock(repo._wlockref) is None:
1447 # rest of vfs files are covered by 'wlock'
1448 # rest of vfs files are covered by 'wlock'
1448 #
1449 #
1449 # exclude special files
1450 # exclude special files
1450 for prefix in self._wlockfreeprefix:
1451 for prefix in self._wlockfreeprefix:
1451 if path.startswith(prefix):
1452 if path.startswith(prefix):
1452 return
1453 return
1453 repo.ui.develwarn(
1454 repo.ui.develwarn(
1454 b'write with no wlock: "%s"' % path,
1455 b'write with no wlock: "%s"' % path,
1455 stacklevel=3,
1456 stacklevel=3,
1456 config=b'check-locks',
1457 config=b'check-locks',
1457 )
1458 )
1458 return ret
1459 return ret
1459
1460
1460 return checkvfs
1461 return checkvfs
1461
1462
1462 def _getsvfsward(self, origfunc):
1463 def _getsvfsward(self, origfunc):
1463 """build a ward for self.svfs"""
1464 """build a ward for self.svfs"""
1464 rref = weakref.ref(self)
1465 rref = weakref.ref(self)
1465
1466
1466 def checksvfs(path, mode=None):
1467 def checksvfs(path, mode=None):
1467 ret = origfunc(path, mode=mode)
1468 ret = origfunc(path, mode=mode)
1468 repo = rref()
1469 repo = rref()
1469 if repo is None or not util.safehasattr(repo, b'_lockref'):
1470 if repo is None or not util.safehasattr(repo, b'_lockref'):
1470 return
1471 return
1471 if mode in (None, b'r', b'rb'):
1472 if mode in (None, b'r', b'rb'):
1472 return
1473 return
1473 if path.startswith(repo.sharedpath):
1474 if path.startswith(repo.sharedpath):
1474 # truncate name relative to the repository (.hg)
1475 # truncate name relative to the repository (.hg)
1475 path = path[len(repo.sharedpath) + 1 :]
1476 path = path[len(repo.sharedpath) + 1 :]
1476 if repo._currentlock(repo._lockref) is None:
1477 if repo._currentlock(repo._lockref) is None:
1477 repo.ui.develwarn(
1478 repo.ui.develwarn(
1478 b'write with no lock: "%s"' % path, stacklevel=4
1479 b'write with no lock: "%s"' % path, stacklevel=4
1479 )
1480 )
1480 return ret
1481 return ret
1481
1482
1482 return checksvfs
1483 return checksvfs
1483
1484
1484 def close(self):
1485 def close(self):
1485 self._writecaches()
1486 self._writecaches()
1486
1487
1487 def _writecaches(self):
1488 def _writecaches(self):
1488 if self._revbranchcache:
1489 if self._revbranchcache:
1489 self._revbranchcache.write()
1490 self._revbranchcache.write()
1490
1491
1491 def _restrictcapabilities(self, caps):
1492 def _restrictcapabilities(self, caps):
1492 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1493 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1493 caps = set(caps)
1494 caps = set(caps)
1494 capsblob = bundle2.encodecaps(
1495 capsblob = bundle2.encodecaps(
1495 bundle2.getrepocaps(self, role=b'client')
1496 bundle2.getrepocaps(self, role=b'client')
1496 )
1497 )
1497 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1498 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1499 if self.ui.configbool(b'experimental', b'narrow'):
1500 caps.add(wireprototypes.NARROWCAP)
1498 return caps
1501 return caps
1499
1502
1500 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1503 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1501 # self -> auditor -> self._checknested -> self
1504 # self -> auditor -> self._checknested -> self
1502
1505
1503 @property
1506 @property
1504 def auditor(self):
1507 def auditor(self):
1505 # This is only used by context.workingctx.match in order to
1508 # This is only used by context.workingctx.match in order to
1506 # detect files in subrepos.
1509 # detect files in subrepos.
1507 return pathutil.pathauditor(self.root, callback=self._checknested)
1510 return pathutil.pathauditor(self.root, callback=self._checknested)
1508
1511
1509 @property
1512 @property
1510 def nofsauditor(self):
1513 def nofsauditor(self):
1511 # This is only used by context.basectx.match in order to detect
1514 # This is only used by context.basectx.match in order to detect
1512 # files in subrepos.
1515 # files in subrepos.
1513 return pathutil.pathauditor(
1516 return pathutil.pathauditor(
1514 self.root, callback=self._checknested, realfs=False, cached=True
1517 self.root, callback=self._checknested, realfs=False, cached=True
1515 )
1518 )
1516
1519
1517 def _checknested(self, path):
1520 def _checknested(self, path):
1518 """Determine if path is a legal nested repository."""
1521 """Determine if path is a legal nested repository."""
1519 if not path.startswith(self.root):
1522 if not path.startswith(self.root):
1520 return False
1523 return False
1521 subpath = path[len(self.root) + 1 :]
1524 subpath = path[len(self.root) + 1 :]
1522 normsubpath = util.pconvert(subpath)
1525 normsubpath = util.pconvert(subpath)
1523
1526
1524 # XXX: Checking against the current working copy is wrong in
1527 # XXX: Checking against the current working copy is wrong in
1525 # the sense that it can reject things like
1528 # the sense that it can reject things like
1526 #
1529 #
1527 # $ hg cat -r 10 sub/x.txt
1530 # $ hg cat -r 10 sub/x.txt
1528 #
1531 #
1529 # if sub/ is no longer a subrepository in the working copy
1532 # if sub/ is no longer a subrepository in the working copy
1530 # parent revision.
1533 # parent revision.
1531 #
1534 #
1532 # However, it can of course also allow things that would have
1535 # However, it can of course also allow things that would have
1533 # been rejected before, such as the above cat command if sub/
1536 # been rejected before, such as the above cat command if sub/
1534 # is a subrepository now, but was a normal directory before.
1537 # is a subrepository now, but was a normal directory before.
1535 # The old path auditor would have rejected by mistake since it
1538 # The old path auditor would have rejected by mistake since it
1536 # panics when it sees sub/.hg/.
1539 # panics when it sees sub/.hg/.
1537 #
1540 #
1538 # All in all, checking against the working copy seems sensible
1541 # All in all, checking against the working copy seems sensible
1539 # since we want to prevent access to nested repositories on
1542 # since we want to prevent access to nested repositories on
1540 # the filesystem *now*.
1543 # the filesystem *now*.
1541 ctx = self[None]
1544 ctx = self[None]
1542 parts = util.splitpath(subpath)
1545 parts = util.splitpath(subpath)
1543 while parts:
1546 while parts:
1544 prefix = b'/'.join(parts)
1547 prefix = b'/'.join(parts)
1545 if prefix in ctx.substate:
1548 if prefix in ctx.substate:
1546 if prefix == normsubpath:
1549 if prefix == normsubpath:
1547 return True
1550 return True
1548 else:
1551 else:
1549 sub = ctx.sub(prefix)
1552 sub = ctx.sub(prefix)
1550 return sub.checknested(subpath[len(prefix) + 1 :])
1553 return sub.checknested(subpath[len(prefix) + 1 :])
1551 else:
1554 else:
1552 parts.pop()
1555 parts.pop()
1553 return False
1556 return False
1554
1557
1555 def peer(self):
1558 def peer(self):
1556 return localpeer(self) # not cached to avoid reference cycle
1559 return localpeer(self) # not cached to avoid reference cycle
1557
1560
1558 def unfiltered(self):
1561 def unfiltered(self):
1559 """Return unfiltered version of the repository
1562 """Return unfiltered version of the repository
1560
1563
1561 Intended to be overwritten by filtered repo."""
1564 Intended to be overwritten by filtered repo."""
1562 return self
1565 return self
1563
1566
1564 def filtered(self, name, visibilityexceptions=None):
1567 def filtered(self, name, visibilityexceptions=None):
1565 """Return a filtered version of a repository
1568 """Return a filtered version of a repository
1566
1569
1567 The `name` parameter is the identifier of the requested view. This
1570 The `name` parameter is the identifier of the requested view. This
1568 will return a repoview object set "exactly" to the specified view.
1571 will return a repoview object set "exactly" to the specified view.
1569
1572
1570 This function does not apply recursive filtering to a repository. For
1573 This function does not apply recursive filtering to a repository. For
1571 example calling `repo.filtered("served")` will return a repoview using
1574 example calling `repo.filtered("served")` will return a repoview using
1572 the "served" view, regardless of the initial view used by `repo`.
1575 the "served" view, regardless of the initial view used by `repo`.
1573
1576
1574 In other word, there is always only one level of `repoview` "filtering".
1577 In other word, there is always only one level of `repoview` "filtering".
1575 """
1578 """
1576 if self._extrafilterid is not None and b'%' not in name:
1579 if self._extrafilterid is not None and b'%' not in name:
1577 name = name + b'%' + self._extrafilterid
1580 name = name + b'%' + self._extrafilterid
1578
1581
1579 cls = repoview.newtype(self.unfiltered().__class__)
1582 cls = repoview.newtype(self.unfiltered().__class__)
1580 return cls(self, name, visibilityexceptions)
1583 return cls(self, name, visibilityexceptions)
1581
1584
1582 @mixedrepostorecache(
1585 @mixedrepostorecache(
1583 (b'bookmarks', b'plain'),
1586 (b'bookmarks', b'plain'),
1584 (b'bookmarks.current', b'plain'),
1587 (b'bookmarks.current', b'plain'),
1585 (b'bookmarks', b''),
1588 (b'bookmarks', b''),
1586 (b'00changelog.i', b''),
1589 (b'00changelog.i', b''),
1587 )
1590 )
1588 def _bookmarks(self):
1591 def _bookmarks(self):
1589 # Since the multiple files involved in the transaction cannot be
1592 # Since the multiple files involved in the transaction cannot be
1590 # written atomically (with current repository format), there is a race
1593 # written atomically (with current repository format), there is a race
1591 # condition here.
1594 # condition here.
1592 #
1595 #
1593 # 1) changelog content A is read
1596 # 1) changelog content A is read
1594 # 2) outside transaction update changelog to content B
1597 # 2) outside transaction update changelog to content B
1595 # 3) outside transaction update bookmark file referring to content B
1598 # 3) outside transaction update bookmark file referring to content B
1596 # 4) bookmarks file content is read and filtered against changelog-A
1599 # 4) bookmarks file content is read and filtered against changelog-A
1597 #
1600 #
1598 # When this happens, bookmarks against nodes missing from A are dropped.
1601 # When this happens, bookmarks against nodes missing from A are dropped.
1599 #
1602 #
1600 # Having this happening during read is not great, but it become worse
1603 # Having this happening during read is not great, but it become worse
1601 # when this happen during write because the bookmarks to the "unknown"
1604 # when this happen during write because the bookmarks to the "unknown"
1602 # nodes will be dropped for good. However, writes happen within locks.
1605 # nodes will be dropped for good. However, writes happen within locks.
1603 # This locking makes it possible to have a race free consistent read.
1606 # This locking makes it possible to have a race free consistent read.
1604 # For this purpose data read from disc before locking are
1607 # For this purpose data read from disc before locking are
1605 # "invalidated" right after the locks are taken. This invalidations are
1608 # "invalidated" right after the locks are taken. This invalidations are
1606 # "light", the `filecache` mechanism keep the data in memory and will
1609 # "light", the `filecache` mechanism keep the data in memory and will
1607 # reuse them if the underlying files did not changed. Not parsing the
1610 # reuse them if the underlying files did not changed. Not parsing the
1608 # same data multiple times helps performances.
1611 # same data multiple times helps performances.
1609 #
1612 #
1610 # Unfortunately in the case describe above, the files tracked by the
1613 # Unfortunately in the case describe above, the files tracked by the
1611 # bookmarks file cache might not have changed, but the in-memory
1614 # bookmarks file cache might not have changed, but the in-memory
1612 # content is still "wrong" because we used an older changelog content
1615 # content is still "wrong" because we used an older changelog content
1613 # to process the on-disk data. So after locking, the changelog would be
1616 # to process the on-disk data. So after locking, the changelog would be
1614 # refreshed but `_bookmarks` would be preserved.
1617 # refreshed but `_bookmarks` would be preserved.
1615 # Adding `00changelog.i` to the list of tracked file is not
1618 # Adding `00changelog.i` to the list of tracked file is not
1616 # enough, because at the time we build the content for `_bookmarks` in
1619 # enough, because at the time we build the content for `_bookmarks` in
1617 # (4), the changelog file has already diverged from the content used
1620 # (4), the changelog file has already diverged from the content used
1618 # for loading `changelog` in (1)
1621 # for loading `changelog` in (1)
1619 #
1622 #
1620 # To prevent the issue, we force the changelog to be explicitly
1623 # To prevent the issue, we force the changelog to be explicitly
1621 # reloaded while computing `_bookmarks`. The data race can still happen
1624 # reloaded while computing `_bookmarks`. The data race can still happen
1622 # without the lock (with a narrower window), but it would no longer go
1625 # without the lock (with a narrower window), but it would no longer go
1623 # undetected during the lock time refresh.
1626 # undetected during the lock time refresh.
1624 #
1627 #
1625 # The new schedule is as follow
1628 # The new schedule is as follow
1626 #
1629 #
1627 # 1) filecache logic detect that `_bookmarks` needs to be computed
1630 # 1) filecache logic detect that `_bookmarks` needs to be computed
1628 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1631 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1629 # 3) We force `changelog` filecache to be tested
1632 # 3) We force `changelog` filecache to be tested
1630 # 4) cachestat for `changelog` are captured (for changelog)
1633 # 4) cachestat for `changelog` are captured (for changelog)
1631 # 5) `_bookmarks` is computed and cached
1634 # 5) `_bookmarks` is computed and cached
1632 #
1635 #
1633 # The step in (3) ensure we have a changelog at least as recent as the
1636 # The step in (3) ensure we have a changelog at least as recent as the
1634 # cache stat computed in (1). As a result at locking time:
1637 # cache stat computed in (1). As a result at locking time:
1635 # * if the changelog did not changed since (1) -> we can reuse the data
1638 # * if the changelog did not changed since (1) -> we can reuse the data
1636 # * otherwise -> the bookmarks get refreshed.
1639 # * otherwise -> the bookmarks get refreshed.
1637 self._refreshchangelog()
1640 self._refreshchangelog()
1638 return bookmarks.bmstore(self)
1641 return bookmarks.bmstore(self)
1639
1642
1640 def _refreshchangelog(self):
1643 def _refreshchangelog(self):
1641 """make sure the in memory changelog match the on-disk one"""
1644 """make sure the in memory changelog match the on-disk one"""
1642 if 'changelog' in vars(self) and self.currenttransaction() is None:
1645 if 'changelog' in vars(self) and self.currenttransaction() is None:
1643 del self.changelog
1646 del self.changelog
1644
1647
1645 @property
1648 @property
1646 def _activebookmark(self):
1649 def _activebookmark(self):
1647 return self._bookmarks.active
1650 return self._bookmarks.active
1648
1651
1649 # _phasesets depend on changelog. what we need is to call
1652 # _phasesets depend on changelog. what we need is to call
1650 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1653 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1651 # can't be easily expressed in filecache mechanism.
1654 # can't be easily expressed in filecache mechanism.
1652 @storecache(b'phaseroots', b'00changelog.i')
1655 @storecache(b'phaseroots', b'00changelog.i')
1653 def _phasecache(self):
1656 def _phasecache(self):
1654 return phases.phasecache(self, self._phasedefaults)
1657 return phases.phasecache(self, self._phasedefaults)
1655
1658
1656 @storecache(b'obsstore')
1659 @storecache(b'obsstore')
1657 def obsstore(self):
1660 def obsstore(self):
1658 return obsolete.makestore(self.ui, self)
1661 return obsolete.makestore(self.ui, self)
1659
1662
1660 @storecache(b'00changelog.i')
1663 @storecache(b'00changelog.i')
1661 def changelog(self):
1664 def changelog(self):
1662 # load dirstate before changelog to avoid race see issue6303
1665 # load dirstate before changelog to avoid race see issue6303
1663 self.dirstate.prefetch_parents()
1666 self.dirstate.prefetch_parents()
1664 return self.store.changelog(
1667 return self.store.changelog(
1665 txnutil.mayhavepending(self.root),
1668 txnutil.mayhavepending(self.root),
1666 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1669 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1667 )
1670 )
1668
1671
1669 @storecache(b'00manifest.i')
1672 @storecache(b'00manifest.i')
1670 def manifestlog(self):
1673 def manifestlog(self):
1671 return self.store.manifestlog(self, self._storenarrowmatch)
1674 return self.store.manifestlog(self, self._storenarrowmatch)
1672
1675
1673 @repofilecache(b'dirstate')
1676 @repofilecache(b'dirstate')
1674 def dirstate(self):
1677 def dirstate(self):
1675 return self._makedirstate()
1678 return self._makedirstate()
1676
1679
1677 def _makedirstate(self):
1680 def _makedirstate(self):
1678 """Extension point for wrapping the dirstate per-repo."""
1681 """Extension point for wrapping the dirstate per-repo."""
1679 sparsematchfn = lambda: sparse.matcher(self)
1682 sparsematchfn = lambda: sparse.matcher(self)
1680
1683
1681 return dirstate.dirstate(
1684 return dirstate.dirstate(
1682 self.vfs,
1685 self.vfs,
1683 self.ui,
1686 self.ui,
1684 self.root,
1687 self.root,
1685 self._dirstatevalidate,
1688 self._dirstatevalidate,
1686 sparsematchfn,
1689 sparsematchfn,
1687 self.nodeconstants,
1690 self.nodeconstants,
1688 )
1691 )
1689
1692
1690 def _dirstatevalidate(self, node):
1693 def _dirstatevalidate(self, node):
1691 try:
1694 try:
1692 self.changelog.rev(node)
1695 self.changelog.rev(node)
1693 return node
1696 return node
1694 except error.LookupError:
1697 except error.LookupError:
1695 if not self._dirstatevalidatewarned:
1698 if not self._dirstatevalidatewarned:
1696 self._dirstatevalidatewarned = True
1699 self._dirstatevalidatewarned = True
1697 self.ui.warn(
1700 self.ui.warn(
1698 _(b"warning: ignoring unknown working parent %s!\n")
1701 _(b"warning: ignoring unknown working parent %s!\n")
1699 % short(node)
1702 % short(node)
1700 )
1703 )
1701 return nullid
1704 return nullid
1702
1705
1703 @storecache(narrowspec.FILENAME)
1706 @storecache(narrowspec.FILENAME)
1704 def narrowpats(self):
1707 def narrowpats(self):
1705 """matcher patterns for this repository's narrowspec
1708 """matcher patterns for this repository's narrowspec
1706
1709
1707 A tuple of (includes, excludes).
1710 A tuple of (includes, excludes).
1708 """
1711 """
1709 return narrowspec.load(self)
1712 return narrowspec.load(self)
1710
1713
1711 @storecache(narrowspec.FILENAME)
1714 @storecache(narrowspec.FILENAME)
1712 def _storenarrowmatch(self):
1715 def _storenarrowmatch(self):
1713 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1716 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1714 return matchmod.always()
1717 return matchmod.always()
1715 include, exclude = self.narrowpats
1718 include, exclude = self.narrowpats
1716 return narrowspec.match(self.root, include=include, exclude=exclude)
1719 return narrowspec.match(self.root, include=include, exclude=exclude)
1717
1720
1718 @storecache(narrowspec.FILENAME)
1721 @storecache(narrowspec.FILENAME)
1719 def _narrowmatch(self):
1722 def _narrowmatch(self):
1720 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1723 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1721 return matchmod.always()
1724 return matchmod.always()
1722 narrowspec.checkworkingcopynarrowspec(self)
1725 narrowspec.checkworkingcopynarrowspec(self)
1723 include, exclude = self.narrowpats
1726 include, exclude = self.narrowpats
1724 return narrowspec.match(self.root, include=include, exclude=exclude)
1727 return narrowspec.match(self.root, include=include, exclude=exclude)
1725
1728
1726 def narrowmatch(self, match=None, includeexact=False):
1729 def narrowmatch(self, match=None, includeexact=False):
1727 """matcher corresponding the the repo's narrowspec
1730 """matcher corresponding the the repo's narrowspec
1728
1731
1729 If `match` is given, then that will be intersected with the narrow
1732 If `match` is given, then that will be intersected with the narrow
1730 matcher.
1733 matcher.
1731
1734
1732 If `includeexact` is True, then any exact matches from `match` will
1735 If `includeexact` is True, then any exact matches from `match` will
1733 be included even if they're outside the narrowspec.
1736 be included even if they're outside the narrowspec.
1734 """
1737 """
1735 if match:
1738 if match:
1736 if includeexact and not self._narrowmatch.always():
1739 if includeexact and not self._narrowmatch.always():
1737 # do not exclude explicitly-specified paths so that they can
1740 # do not exclude explicitly-specified paths so that they can
1738 # be warned later on
1741 # be warned later on
1739 em = matchmod.exact(match.files())
1742 em = matchmod.exact(match.files())
1740 nm = matchmod.unionmatcher([self._narrowmatch, em])
1743 nm = matchmod.unionmatcher([self._narrowmatch, em])
1741 return matchmod.intersectmatchers(match, nm)
1744 return matchmod.intersectmatchers(match, nm)
1742 return matchmod.intersectmatchers(match, self._narrowmatch)
1745 return matchmod.intersectmatchers(match, self._narrowmatch)
1743 return self._narrowmatch
1746 return self._narrowmatch
1744
1747
1745 def setnarrowpats(self, newincludes, newexcludes):
1748 def setnarrowpats(self, newincludes, newexcludes):
1746 narrowspec.save(self, newincludes, newexcludes)
1749 narrowspec.save(self, newincludes, newexcludes)
1747 self.invalidate(clearfilecache=True)
1750 self.invalidate(clearfilecache=True)
1748
1751
1749 @unfilteredpropertycache
1752 @unfilteredpropertycache
1750 def _quick_access_changeid_null(self):
1753 def _quick_access_changeid_null(self):
1751 return {
1754 return {
1752 b'null': (nullrev, nullid),
1755 b'null': (nullrev, nullid),
1753 nullrev: (nullrev, nullid),
1756 nullrev: (nullrev, nullid),
1754 nullid: (nullrev, nullid),
1757 nullid: (nullrev, nullid),
1755 }
1758 }
1756
1759
1757 @unfilteredpropertycache
1760 @unfilteredpropertycache
1758 def _quick_access_changeid_wc(self):
1761 def _quick_access_changeid_wc(self):
1759 # also fast path access to the working copy parents
1762 # also fast path access to the working copy parents
1760 # however, only do it for filter that ensure wc is visible.
1763 # however, only do it for filter that ensure wc is visible.
1761 quick = self._quick_access_changeid_null.copy()
1764 quick = self._quick_access_changeid_null.copy()
1762 cl = self.unfiltered().changelog
1765 cl = self.unfiltered().changelog
1763 for node in self.dirstate.parents():
1766 for node in self.dirstate.parents():
1764 if node == nullid:
1767 if node == nullid:
1765 continue
1768 continue
1766 rev = cl.index.get_rev(node)
1769 rev = cl.index.get_rev(node)
1767 if rev is None:
1770 if rev is None:
1768 # unknown working copy parent case:
1771 # unknown working copy parent case:
1769 #
1772 #
1770 # skip the fast path and let higher code deal with it
1773 # skip the fast path and let higher code deal with it
1771 continue
1774 continue
1772 pair = (rev, node)
1775 pair = (rev, node)
1773 quick[rev] = pair
1776 quick[rev] = pair
1774 quick[node] = pair
1777 quick[node] = pair
1775 # also add the parents of the parents
1778 # also add the parents of the parents
1776 for r in cl.parentrevs(rev):
1779 for r in cl.parentrevs(rev):
1777 if r == nullrev:
1780 if r == nullrev:
1778 continue
1781 continue
1779 n = cl.node(r)
1782 n = cl.node(r)
1780 pair = (r, n)
1783 pair = (r, n)
1781 quick[r] = pair
1784 quick[r] = pair
1782 quick[n] = pair
1785 quick[n] = pair
1783 p1node = self.dirstate.p1()
1786 p1node = self.dirstate.p1()
1784 if p1node != nullid:
1787 if p1node != nullid:
1785 quick[b'.'] = quick[p1node]
1788 quick[b'.'] = quick[p1node]
1786 return quick
1789 return quick
1787
1790
1788 @unfilteredmethod
1791 @unfilteredmethod
1789 def _quick_access_changeid_invalidate(self):
1792 def _quick_access_changeid_invalidate(self):
1790 if '_quick_access_changeid_wc' in vars(self):
1793 if '_quick_access_changeid_wc' in vars(self):
1791 del self.__dict__['_quick_access_changeid_wc']
1794 del self.__dict__['_quick_access_changeid_wc']
1792
1795
1793 @property
1796 @property
1794 def _quick_access_changeid(self):
1797 def _quick_access_changeid(self):
1795 """an helper dictionnary for __getitem__ calls
1798 """an helper dictionnary for __getitem__ calls
1796
1799
1797 This contains a list of symbol we can recognise right away without
1800 This contains a list of symbol we can recognise right away without
1798 further processing.
1801 further processing.
1799 """
1802 """
1800 if self.filtername in repoview.filter_has_wc:
1803 if self.filtername in repoview.filter_has_wc:
1801 return self._quick_access_changeid_wc
1804 return self._quick_access_changeid_wc
1802 return self._quick_access_changeid_null
1805 return self._quick_access_changeid_null
1803
1806
1804 def __getitem__(self, changeid):
1807 def __getitem__(self, changeid):
1805 # dealing with special cases
1808 # dealing with special cases
1806 if changeid is None:
1809 if changeid is None:
1807 return context.workingctx(self)
1810 return context.workingctx(self)
1808 if isinstance(changeid, context.basectx):
1811 if isinstance(changeid, context.basectx):
1809 return changeid
1812 return changeid
1810
1813
1811 # dealing with multiple revisions
1814 # dealing with multiple revisions
1812 if isinstance(changeid, slice):
1815 if isinstance(changeid, slice):
1813 # wdirrev isn't contiguous so the slice shouldn't include it
1816 # wdirrev isn't contiguous so the slice shouldn't include it
1814 return [
1817 return [
1815 self[i]
1818 self[i]
1816 for i in pycompat.xrange(*changeid.indices(len(self)))
1819 for i in pycompat.xrange(*changeid.indices(len(self)))
1817 if i not in self.changelog.filteredrevs
1820 if i not in self.changelog.filteredrevs
1818 ]
1821 ]
1819
1822
1820 # dealing with some special values
1823 # dealing with some special values
1821 quick_access = self._quick_access_changeid.get(changeid)
1824 quick_access = self._quick_access_changeid.get(changeid)
1822 if quick_access is not None:
1825 if quick_access is not None:
1823 rev, node = quick_access
1826 rev, node = quick_access
1824 return context.changectx(self, rev, node, maybe_filtered=False)
1827 return context.changectx(self, rev, node, maybe_filtered=False)
1825 if changeid == b'tip':
1828 if changeid == b'tip':
1826 node = self.changelog.tip()
1829 node = self.changelog.tip()
1827 rev = self.changelog.rev(node)
1830 rev = self.changelog.rev(node)
1828 return context.changectx(self, rev, node)
1831 return context.changectx(self, rev, node)
1829
1832
1830 # dealing with arbitrary values
1833 # dealing with arbitrary values
1831 try:
1834 try:
1832 if isinstance(changeid, int):
1835 if isinstance(changeid, int):
1833 node = self.changelog.node(changeid)
1836 node = self.changelog.node(changeid)
1834 rev = changeid
1837 rev = changeid
1835 elif changeid == b'.':
1838 elif changeid == b'.':
1836 # this is a hack to delay/avoid loading obsmarkers
1839 # this is a hack to delay/avoid loading obsmarkers
1837 # when we know that '.' won't be hidden
1840 # when we know that '.' won't be hidden
1838 node = self.dirstate.p1()
1841 node = self.dirstate.p1()
1839 rev = self.unfiltered().changelog.rev(node)
1842 rev = self.unfiltered().changelog.rev(node)
1840 elif len(changeid) == 20:
1843 elif len(changeid) == 20:
1841 try:
1844 try:
1842 node = changeid
1845 node = changeid
1843 rev = self.changelog.rev(changeid)
1846 rev = self.changelog.rev(changeid)
1844 except error.FilteredLookupError:
1847 except error.FilteredLookupError:
1845 changeid = hex(changeid) # for the error message
1848 changeid = hex(changeid) # for the error message
1846 raise
1849 raise
1847 except LookupError:
1850 except LookupError:
1848 # check if it might have come from damaged dirstate
1851 # check if it might have come from damaged dirstate
1849 #
1852 #
1850 # XXX we could avoid the unfiltered if we had a recognizable
1853 # XXX we could avoid the unfiltered if we had a recognizable
1851 # exception for filtered changeset access
1854 # exception for filtered changeset access
1852 if (
1855 if (
1853 self.local()
1856 self.local()
1854 and changeid in self.unfiltered().dirstate.parents()
1857 and changeid in self.unfiltered().dirstate.parents()
1855 ):
1858 ):
1856 msg = _(b"working directory has unknown parent '%s'!")
1859 msg = _(b"working directory has unknown parent '%s'!")
1857 raise error.Abort(msg % short(changeid))
1860 raise error.Abort(msg % short(changeid))
1858 changeid = hex(changeid) # for the error message
1861 changeid = hex(changeid) # for the error message
1859 raise
1862 raise
1860
1863
1861 elif len(changeid) == 40:
1864 elif len(changeid) == 40:
1862 node = bin(changeid)
1865 node = bin(changeid)
1863 rev = self.changelog.rev(node)
1866 rev = self.changelog.rev(node)
1864 else:
1867 else:
1865 raise error.ProgrammingError(
1868 raise error.ProgrammingError(
1866 b"unsupported changeid '%s' of type %s"
1869 b"unsupported changeid '%s' of type %s"
1867 % (changeid, pycompat.bytestr(type(changeid)))
1870 % (changeid, pycompat.bytestr(type(changeid)))
1868 )
1871 )
1869
1872
1870 return context.changectx(self, rev, node)
1873 return context.changectx(self, rev, node)
1871
1874
1872 except (error.FilteredIndexError, error.FilteredLookupError):
1875 except (error.FilteredIndexError, error.FilteredLookupError):
1873 raise error.FilteredRepoLookupError(
1876 raise error.FilteredRepoLookupError(
1874 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1877 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1875 )
1878 )
1876 except (IndexError, LookupError):
1879 except (IndexError, LookupError):
1877 raise error.RepoLookupError(
1880 raise error.RepoLookupError(
1878 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1881 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1879 )
1882 )
1880 except error.WdirUnsupported:
1883 except error.WdirUnsupported:
1881 return context.workingctx(self)
1884 return context.workingctx(self)
1882
1885
1883 def __contains__(self, changeid):
1886 def __contains__(self, changeid):
1884 """True if the given changeid exists"""
1887 """True if the given changeid exists"""
1885 try:
1888 try:
1886 self[changeid]
1889 self[changeid]
1887 return True
1890 return True
1888 except error.RepoLookupError:
1891 except error.RepoLookupError:
1889 return False
1892 return False
1890
1893
1891 def __nonzero__(self):
1894 def __nonzero__(self):
1892 return True
1895 return True
1893
1896
1894 __bool__ = __nonzero__
1897 __bool__ = __nonzero__
1895
1898
1896 def __len__(self):
1899 def __len__(self):
1897 # no need to pay the cost of repoview.changelog
1900 # no need to pay the cost of repoview.changelog
1898 unfi = self.unfiltered()
1901 unfi = self.unfiltered()
1899 return len(unfi.changelog)
1902 return len(unfi.changelog)
1900
1903
1901 def __iter__(self):
1904 def __iter__(self):
1902 return iter(self.changelog)
1905 return iter(self.changelog)
1903
1906
1904 def revs(self, expr, *args):
1907 def revs(self, expr, *args):
1905 """Find revisions matching a revset.
1908 """Find revisions matching a revset.
1906
1909
1907 The revset is specified as a string ``expr`` that may contain
1910 The revset is specified as a string ``expr`` that may contain
1908 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1911 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1909
1912
1910 Revset aliases from the configuration are not expanded. To expand
1913 Revset aliases from the configuration are not expanded. To expand
1911 user aliases, consider calling ``scmutil.revrange()`` or
1914 user aliases, consider calling ``scmutil.revrange()`` or
1912 ``repo.anyrevs([expr], user=True)``.
1915 ``repo.anyrevs([expr], user=True)``.
1913
1916
1914 Returns a smartset.abstractsmartset, which is a list-like interface
1917 Returns a smartset.abstractsmartset, which is a list-like interface
1915 that contains integer revisions.
1918 that contains integer revisions.
1916 """
1919 """
1917 tree = revsetlang.spectree(expr, *args)
1920 tree = revsetlang.spectree(expr, *args)
1918 return revset.makematcher(tree)(self)
1921 return revset.makematcher(tree)(self)
1919
1922
1920 def set(self, expr, *args):
1923 def set(self, expr, *args):
1921 """Find revisions matching a revset and emit changectx instances.
1924 """Find revisions matching a revset and emit changectx instances.
1922
1925
1923 This is a convenience wrapper around ``revs()`` that iterates the
1926 This is a convenience wrapper around ``revs()`` that iterates the
1924 result and is a generator of changectx instances.
1927 result and is a generator of changectx instances.
1925
1928
1926 Revset aliases from the configuration are not expanded. To expand
1929 Revset aliases from the configuration are not expanded. To expand
1927 user aliases, consider calling ``scmutil.revrange()``.
1930 user aliases, consider calling ``scmutil.revrange()``.
1928 """
1931 """
1929 for r in self.revs(expr, *args):
1932 for r in self.revs(expr, *args):
1930 yield self[r]
1933 yield self[r]
1931
1934
1932 def anyrevs(self, specs, user=False, localalias=None):
1935 def anyrevs(self, specs, user=False, localalias=None):
1933 """Find revisions matching one of the given revsets.
1936 """Find revisions matching one of the given revsets.
1934
1937
1935 Revset aliases from the configuration are not expanded by default. To
1938 Revset aliases from the configuration are not expanded by default. To
1936 expand user aliases, specify ``user=True``. To provide some local
1939 expand user aliases, specify ``user=True``. To provide some local
1937 definitions overriding user aliases, set ``localalias`` to
1940 definitions overriding user aliases, set ``localalias`` to
1938 ``{name: definitionstring}``.
1941 ``{name: definitionstring}``.
1939 """
1942 """
1940 if specs == [b'null']:
1943 if specs == [b'null']:
1941 return revset.baseset([nullrev])
1944 return revset.baseset([nullrev])
1942 if specs == [b'.']:
1945 if specs == [b'.']:
1943 quick_data = self._quick_access_changeid.get(b'.')
1946 quick_data = self._quick_access_changeid.get(b'.')
1944 if quick_data is not None:
1947 if quick_data is not None:
1945 return revset.baseset([quick_data[0]])
1948 return revset.baseset([quick_data[0]])
1946 if user:
1949 if user:
1947 m = revset.matchany(
1950 m = revset.matchany(
1948 self.ui,
1951 self.ui,
1949 specs,
1952 specs,
1950 lookup=revset.lookupfn(self),
1953 lookup=revset.lookupfn(self),
1951 localalias=localalias,
1954 localalias=localalias,
1952 )
1955 )
1953 else:
1956 else:
1954 m = revset.matchany(None, specs, localalias=localalias)
1957 m = revset.matchany(None, specs, localalias=localalias)
1955 return m(self)
1958 return m(self)
1956
1959
1957 def url(self):
1960 def url(self):
1958 return b'file:' + self.root
1961 return b'file:' + self.root
1959
1962
1960 def hook(self, name, throw=False, **args):
1963 def hook(self, name, throw=False, **args):
1961 """Call a hook, passing this repo instance.
1964 """Call a hook, passing this repo instance.
1962
1965
1963 This a convenience method to aid invoking hooks. Extensions likely
1966 This a convenience method to aid invoking hooks. Extensions likely
1964 won't call this unless they have registered a custom hook or are
1967 won't call this unless they have registered a custom hook or are
1965 replacing code that is expected to call a hook.
1968 replacing code that is expected to call a hook.
1966 """
1969 """
1967 return hook.hook(self.ui, self, name, throw, **args)
1970 return hook.hook(self.ui, self, name, throw, **args)
1968
1971
1969 @filteredpropertycache
1972 @filteredpropertycache
1970 def _tagscache(self):
1973 def _tagscache(self):
1971 """Returns a tagscache object that contains various tags related
1974 """Returns a tagscache object that contains various tags related
1972 caches."""
1975 caches."""
1973
1976
1974 # This simplifies its cache management by having one decorated
1977 # This simplifies its cache management by having one decorated
1975 # function (this one) and the rest simply fetch things from it.
1978 # function (this one) and the rest simply fetch things from it.
1976 class tagscache(object):
1979 class tagscache(object):
1977 def __init__(self):
1980 def __init__(self):
1978 # These two define the set of tags for this repository. tags
1981 # These two define the set of tags for this repository. tags
1979 # maps tag name to node; tagtypes maps tag name to 'global' or
1982 # maps tag name to node; tagtypes maps tag name to 'global' or
1980 # 'local'. (Global tags are defined by .hgtags across all
1983 # 'local'. (Global tags are defined by .hgtags across all
1981 # heads, and local tags are defined in .hg/localtags.)
1984 # heads, and local tags are defined in .hg/localtags.)
1982 # They constitute the in-memory cache of tags.
1985 # They constitute the in-memory cache of tags.
1983 self.tags = self.tagtypes = None
1986 self.tags = self.tagtypes = None
1984
1987
1985 self.nodetagscache = self.tagslist = None
1988 self.nodetagscache = self.tagslist = None
1986
1989
1987 cache = tagscache()
1990 cache = tagscache()
1988 cache.tags, cache.tagtypes = self._findtags()
1991 cache.tags, cache.tagtypes = self._findtags()
1989
1992
1990 return cache
1993 return cache
1991
1994
1992 def tags(self):
1995 def tags(self):
1993 '''return a mapping of tag to node'''
1996 '''return a mapping of tag to node'''
1994 t = {}
1997 t = {}
1995 if self.changelog.filteredrevs:
1998 if self.changelog.filteredrevs:
1996 tags, tt = self._findtags()
1999 tags, tt = self._findtags()
1997 else:
2000 else:
1998 tags = self._tagscache.tags
2001 tags = self._tagscache.tags
1999 rev = self.changelog.rev
2002 rev = self.changelog.rev
2000 for k, v in pycompat.iteritems(tags):
2003 for k, v in pycompat.iteritems(tags):
2001 try:
2004 try:
2002 # ignore tags to unknown nodes
2005 # ignore tags to unknown nodes
2003 rev(v)
2006 rev(v)
2004 t[k] = v
2007 t[k] = v
2005 except (error.LookupError, ValueError):
2008 except (error.LookupError, ValueError):
2006 pass
2009 pass
2007 return t
2010 return t
2008
2011
2009 def _findtags(self):
2012 def _findtags(self):
2010 """Do the hard work of finding tags. Return a pair of dicts
2013 """Do the hard work of finding tags. Return a pair of dicts
2011 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2014 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2012 maps tag name to a string like \'global\' or \'local\'.
2015 maps tag name to a string like \'global\' or \'local\'.
2013 Subclasses or extensions are free to add their own tags, but
2016 Subclasses or extensions are free to add their own tags, but
2014 should be aware that the returned dicts will be retained for the
2017 should be aware that the returned dicts will be retained for the
2015 duration of the localrepo object."""
2018 duration of the localrepo object."""
2016
2019
2017 # XXX what tagtype should subclasses/extensions use? Currently
2020 # XXX what tagtype should subclasses/extensions use? Currently
2018 # mq and bookmarks add tags, but do not set the tagtype at all.
2021 # mq and bookmarks add tags, but do not set the tagtype at all.
2019 # Should each extension invent its own tag type? Should there
2022 # Should each extension invent its own tag type? Should there
2020 # be one tagtype for all such "virtual" tags? Or is the status
2023 # be one tagtype for all such "virtual" tags? Or is the status
2021 # quo fine?
2024 # quo fine?
2022
2025
2023 # map tag name to (node, hist)
2026 # map tag name to (node, hist)
2024 alltags = tagsmod.findglobaltags(self.ui, self)
2027 alltags = tagsmod.findglobaltags(self.ui, self)
2025 # map tag name to tag type
2028 # map tag name to tag type
2026 tagtypes = {tag: b'global' for tag in alltags}
2029 tagtypes = {tag: b'global' for tag in alltags}
2027
2030
2028 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2031 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2029
2032
2030 # Build the return dicts. Have to re-encode tag names because
2033 # Build the return dicts. Have to re-encode tag names because
2031 # the tags module always uses UTF-8 (in order not to lose info
2034 # the tags module always uses UTF-8 (in order not to lose info
2032 # writing to the cache), but the rest of Mercurial wants them in
2035 # writing to the cache), but the rest of Mercurial wants them in
2033 # local encoding.
2036 # local encoding.
2034 tags = {}
2037 tags = {}
2035 for (name, (node, hist)) in pycompat.iteritems(alltags):
2038 for (name, (node, hist)) in pycompat.iteritems(alltags):
2036 if node != nullid:
2039 if node != nullid:
2037 tags[encoding.tolocal(name)] = node
2040 tags[encoding.tolocal(name)] = node
2038 tags[b'tip'] = self.changelog.tip()
2041 tags[b'tip'] = self.changelog.tip()
2039 tagtypes = {
2042 tagtypes = {
2040 encoding.tolocal(name): value
2043 encoding.tolocal(name): value
2041 for (name, value) in pycompat.iteritems(tagtypes)
2044 for (name, value) in pycompat.iteritems(tagtypes)
2042 }
2045 }
2043 return (tags, tagtypes)
2046 return (tags, tagtypes)
2044
2047
2045 def tagtype(self, tagname):
2048 def tagtype(self, tagname):
2046 """
2049 """
2047 return the type of the given tag. result can be:
2050 return the type of the given tag. result can be:
2048
2051
2049 'local' : a local tag
2052 'local' : a local tag
2050 'global' : a global tag
2053 'global' : a global tag
2051 None : tag does not exist
2054 None : tag does not exist
2052 """
2055 """
2053
2056
2054 return self._tagscache.tagtypes.get(tagname)
2057 return self._tagscache.tagtypes.get(tagname)
2055
2058
2056 def tagslist(self):
2059 def tagslist(self):
2057 '''return a list of tags ordered by revision'''
2060 '''return a list of tags ordered by revision'''
2058 if not self._tagscache.tagslist:
2061 if not self._tagscache.tagslist:
2059 l = []
2062 l = []
2060 for t, n in pycompat.iteritems(self.tags()):
2063 for t, n in pycompat.iteritems(self.tags()):
2061 l.append((self.changelog.rev(n), t, n))
2064 l.append((self.changelog.rev(n), t, n))
2062 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2065 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2063
2066
2064 return self._tagscache.tagslist
2067 return self._tagscache.tagslist
2065
2068
2066 def nodetags(self, node):
2069 def nodetags(self, node):
2067 '''return the tags associated with a node'''
2070 '''return the tags associated with a node'''
2068 if not self._tagscache.nodetagscache:
2071 if not self._tagscache.nodetagscache:
2069 nodetagscache = {}
2072 nodetagscache = {}
2070 for t, n in pycompat.iteritems(self._tagscache.tags):
2073 for t, n in pycompat.iteritems(self._tagscache.tags):
2071 nodetagscache.setdefault(n, []).append(t)
2074 nodetagscache.setdefault(n, []).append(t)
2072 for tags in pycompat.itervalues(nodetagscache):
2075 for tags in pycompat.itervalues(nodetagscache):
2073 tags.sort()
2076 tags.sort()
2074 self._tagscache.nodetagscache = nodetagscache
2077 self._tagscache.nodetagscache = nodetagscache
2075 return self._tagscache.nodetagscache.get(node, [])
2078 return self._tagscache.nodetagscache.get(node, [])
2076
2079
2077 def nodebookmarks(self, node):
2080 def nodebookmarks(self, node):
2078 """return the list of bookmarks pointing to the specified node"""
2081 """return the list of bookmarks pointing to the specified node"""
2079 return self._bookmarks.names(node)
2082 return self._bookmarks.names(node)
2080
2083
2081 def branchmap(self):
2084 def branchmap(self):
2082 """returns a dictionary {branch: [branchheads]} with branchheads
2085 """returns a dictionary {branch: [branchheads]} with branchheads
2083 ordered by increasing revision number"""
2086 ordered by increasing revision number"""
2084 return self._branchcaches[self]
2087 return self._branchcaches[self]
2085
2088
2086 @unfilteredmethod
2089 @unfilteredmethod
2087 def revbranchcache(self):
2090 def revbranchcache(self):
2088 if not self._revbranchcache:
2091 if not self._revbranchcache:
2089 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2092 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2090 return self._revbranchcache
2093 return self._revbranchcache
2091
2094
2092 def register_changeset(self, rev, changelogrevision):
2095 def register_changeset(self, rev, changelogrevision):
2093 self.revbranchcache().setdata(rev, changelogrevision)
2096 self.revbranchcache().setdata(rev, changelogrevision)
2094
2097
2095 def branchtip(self, branch, ignoremissing=False):
2098 def branchtip(self, branch, ignoremissing=False):
2096 """return the tip node for a given branch
2099 """return the tip node for a given branch
2097
2100
2098 If ignoremissing is True, then this method will not raise an error.
2101 If ignoremissing is True, then this method will not raise an error.
2099 This is helpful for callers that only expect None for a missing branch
2102 This is helpful for callers that only expect None for a missing branch
2100 (e.g. namespace).
2103 (e.g. namespace).
2101
2104
2102 """
2105 """
2103 try:
2106 try:
2104 return self.branchmap().branchtip(branch)
2107 return self.branchmap().branchtip(branch)
2105 except KeyError:
2108 except KeyError:
2106 if not ignoremissing:
2109 if not ignoremissing:
2107 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2110 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2108 else:
2111 else:
2109 pass
2112 pass
2110
2113
2111 def lookup(self, key):
2114 def lookup(self, key):
2112 node = scmutil.revsymbol(self, key).node()
2115 node = scmutil.revsymbol(self, key).node()
2113 if node is None:
2116 if node is None:
2114 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2117 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2115 return node
2118 return node
2116
2119
2117 def lookupbranch(self, key):
2120 def lookupbranch(self, key):
2118 if self.branchmap().hasbranch(key):
2121 if self.branchmap().hasbranch(key):
2119 return key
2122 return key
2120
2123
2121 return scmutil.revsymbol(self, key).branch()
2124 return scmutil.revsymbol(self, key).branch()
2122
2125
2123 def known(self, nodes):
2126 def known(self, nodes):
2124 cl = self.changelog
2127 cl = self.changelog
2125 get_rev = cl.index.get_rev
2128 get_rev = cl.index.get_rev
2126 filtered = cl.filteredrevs
2129 filtered = cl.filteredrevs
2127 result = []
2130 result = []
2128 for n in nodes:
2131 for n in nodes:
2129 r = get_rev(n)
2132 r = get_rev(n)
2130 resp = not (r is None or r in filtered)
2133 resp = not (r is None or r in filtered)
2131 result.append(resp)
2134 result.append(resp)
2132 return result
2135 return result
2133
2136
2134 def local(self):
2137 def local(self):
2135 return self
2138 return self
2136
2139
2137 def publishing(self):
2140 def publishing(self):
2138 # it's safe (and desirable) to trust the publish flag unconditionally
2141 # it's safe (and desirable) to trust the publish flag unconditionally
2139 # so that we don't finalize changes shared between users via ssh or nfs
2142 # so that we don't finalize changes shared between users via ssh or nfs
2140 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2143 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2141
2144
2142 def cancopy(self):
2145 def cancopy(self):
2143 # so statichttprepo's override of local() works
2146 # so statichttprepo's override of local() works
2144 if not self.local():
2147 if not self.local():
2145 return False
2148 return False
2146 if not self.publishing():
2149 if not self.publishing():
2147 return True
2150 return True
2148 # if publishing we can't copy if there is filtered content
2151 # if publishing we can't copy if there is filtered content
2149 return not self.filtered(b'visible').changelog.filteredrevs
2152 return not self.filtered(b'visible').changelog.filteredrevs
2150
2153
2151 def shared(self):
2154 def shared(self):
2152 '''the type of shared repository (None if not shared)'''
2155 '''the type of shared repository (None if not shared)'''
2153 if self.sharedpath != self.path:
2156 if self.sharedpath != self.path:
2154 return b'store'
2157 return b'store'
2155 return None
2158 return None
2156
2159
2157 def wjoin(self, f, *insidef):
2160 def wjoin(self, f, *insidef):
2158 return self.vfs.reljoin(self.root, f, *insidef)
2161 return self.vfs.reljoin(self.root, f, *insidef)
2159
2162
2160 def setparents(self, p1, p2=nullid):
2163 def setparents(self, p1, p2=nullid):
2161 self[None].setparents(p1, p2)
2164 self[None].setparents(p1, p2)
2162 self._quick_access_changeid_invalidate()
2165 self._quick_access_changeid_invalidate()
2163
2166
2164 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2167 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2165 """changeid must be a changeset revision, if specified.
2168 """changeid must be a changeset revision, if specified.
2166 fileid can be a file revision or node."""
2169 fileid can be a file revision or node."""
2167 return context.filectx(
2170 return context.filectx(
2168 self, path, changeid, fileid, changectx=changectx
2171 self, path, changeid, fileid, changectx=changectx
2169 )
2172 )
2170
2173
2171 def getcwd(self):
2174 def getcwd(self):
2172 return self.dirstate.getcwd()
2175 return self.dirstate.getcwd()
2173
2176
2174 def pathto(self, f, cwd=None):
2177 def pathto(self, f, cwd=None):
2175 return self.dirstate.pathto(f, cwd)
2178 return self.dirstate.pathto(f, cwd)
2176
2179
2177 def _loadfilter(self, filter):
2180 def _loadfilter(self, filter):
2178 if filter not in self._filterpats:
2181 if filter not in self._filterpats:
2179 l = []
2182 l = []
2180 for pat, cmd in self.ui.configitems(filter):
2183 for pat, cmd in self.ui.configitems(filter):
2181 if cmd == b'!':
2184 if cmd == b'!':
2182 continue
2185 continue
2183 mf = matchmod.match(self.root, b'', [pat])
2186 mf = matchmod.match(self.root, b'', [pat])
2184 fn = None
2187 fn = None
2185 params = cmd
2188 params = cmd
2186 for name, filterfn in pycompat.iteritems(self._datafilters):
2189 for name, filterfn in pycompat.iteritems(self._datafilters):
2187 if cmd.startswith(name):
2190 if cmd.startswith(name):
2188 fn = filterfn
2191 fn = filterfn
2189 params = cmd[len(name) :].lstrip()
2192 params = cmd[len(name) :].lstrip()
2190 break
2193 break
2191 if not fn:
2194 if not fn:
2192 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2195 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2193 fn.__name__ = 'commandfilter'
2196 fn.__name__ = 'commandfilter'
2194 # Wrap old filters not supporting keyword arguments
2197 # Wrap old filters not supporting keyword arguments
2195 if not pycompat.getargspec(fn)[2]:
2198 if not pycompat.getargspec(fn)[2]:
2196 oldfn = fn
2199 oldfn = fn
2197 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2200 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2198 fn.__name__ = 'compat-' + oldfn.__name__
2201 fn.__name__ = 'compat-' + oldfn.__name__
2199 l.append((mf, fn, params))
2202 l.append((mf, fn, params))
2200 self._filterpats[filter] = l
2203 self._filterpats[filter] = l
2201 return self._filterpats[filter]
2204 return self._filterpats[filter]
2202
2205
2203 def _filter(self, filterpats, filename, data):
2206 def _filter(self, filterpats, filename, data):
2204 for mf, fn, cmd in filterpats:
2207 for mf, fn, cmd in filterpats:
2205 if mf(filename):
2208 if mf(filename):
2206 self.ui.debug(
2209 self.ui.debug(
2207 b"filtering %s through %s\n"
2210 b"filtering %s through %s\n"
2208 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2211 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2209 )
2212 )
2210 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2213 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2211 break
2214 break
2212
2215
2213 return data
2216 return data
2214
2217
2215 @unfilteredpropertycache
2218 @unfilteredpropertycache
2216 def _encodefilterpats(self):
2219 def _encodefilterpats(self):
2217 return self._loadfilter(b'encode')
2220 return self._loadfilter(b'encode')
2218
2221
2219 @unfilteredpropertycache
2222 @unfilteredpropertycache
2220 def _decodefilterpats(self):
2223 def _decodefilterpats(self):
2221 return self._loadfilter(b'decode')
2224 return self._loadfilter(b'decode')
2222
2225
2223 def adddatafilter(self, name, filter):
2226 def adddatafilter(self, name, filter):
2224 self._datafilters[name] = filter
2227 self._datafilters[name] = filter
2225
2228
2226 def wread(self, filename):
2229 def wread(self, filename):
2227 if self.wvfs.islink(filename):
2230 if self.wvfs.islink(filename):
2228 data = self.wvfs.readlink(filename)
2231 data = self.wvfs.readlink(filename)
2229 else:
2232 else:
2230 data = self.wvfs.read(filename)
2233 data = self.wvfs.read(filename)
2231 return self._filter(self._encodefilterpats, filename, data)
2234 return self._filter(self._encodefilterpats, filename, data)
2232
2235
2233 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2236 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2234 """write ``data`` into ``filename`` in the working directory
2237 """write ``data`` into ``filename`` in the working directory
2235
2238
2236 This returns length of written (maybe decoded) data.
2239 This returns length of written (maybe decoded) data.
2237 """
2240 """
2238 data = self._filter(self._decodefilterpats, filename, data)
2241 data = self._filter(self._decodefilterpats, filename, data)
2239 if b'l' in flags:
2242 if b'l' in flags:
2240 self.wvfs.symlink(data, filename)
2243 self.wvfs.symlink(data, filename)
2241 else:
2244 else:
2242 self.wvfs.write(
2245 self.wvfs.write(
2243 filename, data, backgroundclose=backgroundclose, **kwargs
2246 filename, data, backgroundclose=backgroundclose, **kwargs
2244 )
2247 )
2245 if b'x' in flags:
2248 if b'x' in flags:
2246 self.wvfs.setflags(filename, False, True)
2249 self.wvfs.setflags(filename, False, True)
2247 else:
2250 else:
2248 self.wvfs.setflags(filename, False, False)
2251 self.wvfs.setflags(filename, False, False)
2249 return len(data)
2252 return len(data)
2250
2253
2251 def wwritedata(self, filename, data):
2254 def wwritedata(self, filename, data):
2252 return self._filter(self._decodefilterpats, filename, data)
2255 return self._filter(self._decodefilterpats, filename, data)
2253
2256
2254 def currenttransaction(self):
2257 def currenttransaction(self):
2255 """return the current transaction or None if non exists"""
2258 """return the current transaction or None if non exists"""
2256 if self._transref:
2259 if self._transref:
2257 tr = self._transref()
2260 tr = self._transref()
2258 else:
2261 else:
2259 tr = None
2262 tr = None
2260
2263
2261 if tr and tr.running():
2264 if tr and tr.running():
2262 return tr
2265 return tr
2263 return None
2266 return None
2264
2267
2265 def transaction(self, desc, report=None):
2268 def transaction(self, desc, report=None):
2266 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2269 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2267 b'devel', b'check-locks'
2270 b'devel', b'check-locks'
2268 ):
2271 ):
2269 if self._currentlock(self._lockref) is None:
2272 if self._currentlock(self._lockref) is None:
2270 raise error.ProgrammingError(b'transaction requires locking')
2273 raise error.ProgrammingError(b'transaction requires locking')
2271 tr = self.currenttransaction()
2274 tr = self.currenttransaction()
2272 if tr is not None:
2275 if tr is not None:
2273 return tr.nest(name=desc)
2276 return tr.nest(name=desc)
2274
2277
2275 # abort here if the journal already exists
2278 # abort here if the journal already exists
2276 if self.svfs.exists(b"journal"):
2279 if self.svfs.exists(b"journal"):
2277 raise error.RepoError(
2280 raise error.RepoError(
2278 _(b"abandoned transaction found"),
2281 _(b"abandoned transaction found"),
2279 hint=_(b"run 'hg recover' to clean up transaction"),
2282 hint=_(b"run 'hg recover' to clean up transaction"),
2280 )
2283 )
2281
2284
2282 idbase = b"%.40f#%f" % (random.random(), time.time())
2285 idbase = b"%.40f#%f" % (random.random(), time.time())
2283 ha = hex(hashutil.sha1(idbase).digest())
2286 ha = hex(hashutil.sha1(idbase).digest())
2284 txnid = b'TXN:' + ha
2287 txnid = b'TXN:' + ha
2285 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2288 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2286
2289
2287 self._writejournal(desc)
2290 self._writejournal(desc)
2288 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2291 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2289 if report:
2292 if report:
2290 rp = report
2293 rp = report
2291 else:
2294 else:
2292 rp = self.ui.warn
2295 rp = self.ui.warn
2293 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2296 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2294 # we must avoid cyclic reference between repo and transaction.
2297 # we must avoid cyclic reference between repo and transaction.
2295 reporef = weakref.ref(self)
2298 reporef = weakref.ref(self)
2296 # Code to track tag movement
2299 # Code to track tag movement
2297 #
2300 #
2298 # Since tags are all handled as file content, it is actually quite hard
2301 # Since tags are all handled as file content, it is actually quite hard
2299 # to track these movement from a code perspective. So we fallback to a
2302 # to track these movement from a code perspective. So we fallback to a
2300 # tracking at the repository level. One could envision to track changes
2303 # tracking at the repository level. One could envision to track changes
2301 # to the '.hgtags' file through changegroup apply but that fails to
2304 # to the '.hgtags' file through changegroup apply but that fails to
2302 # cope with case where transaction expose new heads without changegroup
2305 # cope with case where transaction expose new heads without changegroup
2303 # being involved (eg: phase movement).
2306 # being involved (eg: phase movement).
2304 #
2307 #
2305 # For now, We gate the feature behind a flag since this likely comes
2308 # For now, We gate the feature behind a flag since this likely comes
2306 # with performance impacts. The current code run more often than needed
2309 # with performance impacts. The current code run more often than needed
2307 # and do not use caches as much as it could. The current focus is on
2310 # and do not use caches as much as it could. The current focus is on
2308 # the behavior of the feature so we disable it by default. The flag
2311 # the behavior of the feature so we disable it by default. The flag
2309 # will be removed when we are happy with the performance impact.
2312 # will be removed when we are happy with the performance impact.
2310 #
2313 #
2311 # Once this feature is no longer experimental move the following
2314 # Once this feature is no longer experimental move the following
2312 # documentation to the appropriate help section:
2315 # documentation to the appropriate help section:
2313 #
2316 #
2314 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2317 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2315 # tags (new or changed or deleted tags). In addition the details of
2318 # tags (new or changed or deleted tags). In addition the details of
2316 # these changes are made available in a file at:
2319 # these changes are made available in a file at:
2317 # ``REPOROOT/.hg/changes/tags.changes``.
2320 # ``REPOROOT/.hg/changes/tags.changes``.
2318 # Make sure you check for HG_TAG_MOVED before reading that file as it
2321 # Make sure you check for HG_TAG_MOVED before reading that file as it
2319 # might exist from a previous transaction even if no tag were touched
2322 # might exist from a previous transaction even if no tag were touched
2320 # in this one. Changes are recorded in a line base format::
2323 # in this one. Changes are recorded in a line base format::
2321 #
2324 #
2322 # <action> <hex-node> <tag-name>\n
2325 # <action> <hex-node> <tag-name>\n
2323 #
2326 #
2324 # Actions are defined as follow:
2327 # Actions are defined as follow:
2325 # "-R": tag is removed,
2328 # "-R": tag is removed,
2326 # "+A": tag is added,
2329 # "+A": tag is added,
2327 # "-M": tag is moved (old value),
2330 # "-M": tag is moved (old value),
2328 # "+M": tag is moved (new value),
2331 # "+M": tag is moved (new value),
2329 tracktags = lambda x: None
2332 tracktags = lambda x: None
2330 # experimental config: experimental.hook-track-tags
2333 # experimental config: experimental.hook-track-tags
2331 shouldtracktags = self.ui.configbool(
2334 shouldtracktags = self.ui.configbool(
2332 b'experimental', b'hook-track-tags'
2335 b'experimental', b'hook-track-tags'
2333 )
2336 )
2334 if desc != b'strip' and shouldtracktags:
2337 if desc != b'strip' and shouldtracktags:
2335 oldheads = self.changelog.headrevs()
2338 oldheads = self.changelog.headrevs()
2336
2339
2337 def tracktags(tr2):
2340 def tracktags(tr2):
2338 repo = reporef()
2341 repo = reporef()
2339 assert repo is not None # help pytype
2342 assert repo is not None # help pytype
2340 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2343 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2341 newheads = repo.changelog.headrevs()
2344 newheads = repo.changelog.headrevs()
2342 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2345 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2343 # notes: we compare lists here.
2346 # notes: we compare lists here.
2344 # As we do it only once buiding set would not be cheaper
2347 # As we do it only once buiding set would not be cheaper
2345 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2348 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2346 if changes:
2349 if changes:
2347 tr2.hookargs[b'tag_moved'] = b'1'
2350 tr2.hookargs[b'tag_moved'] = b'1'
2348 with repo.vfs(
2351 with repo.vfs(
2349 b'changes/tags.changes', b'w', atomictemp=True
2352 b'changes/tags.changes', b'w', atomictemp=True
2350 ) as changesfile:
2353 ) as changesfile:
2351 # note: we do not register the file to the transaction
2354 # note: we do not register the file to the transaction
2352 # because we needs it to still exist on the transaction
2355 # because we needs it to still exist on the transaction
2353 # is close (for txnclose hooks)
2356 # is close (for txnclose hooks)
2354 tagsmod.writediff(changesfile, changes)
2357 tagsmod.writediff(changesfile, changes)
2355
2358
2356 def validate(tr2):
2359 def validate(tr2):
2357 """will run pre-closing hooks"""
2360 """will run pre-closing hooks"""
2358 # XXX the transaction API is a bit lacking here so we take a hacky
2361 # XXX the transaction API is a bit lacking here so we take a hacky
2359 # path for now
2362 # path for now
2360 #
2363 #
2361 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2364 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2362 # dict is copied before these run. In addition we needs the data
2365 # dict is copied before these run. In addition we needs the data
2363 # available to in memory hooks too.
2366 # available to in memory hooks too.
2364 #
2367 #
2365 # Moreover, we also need to make sure this runs before txnclose
2368 # Moreover, we also need to make sure this runs before txnclose
2366 # hooks and there is no "pending" mechanism that would execute
2369 # hooks and there is no "pending" mechanism that would execute
2367 # logic only if hooks are about to run.
2370 # logic only if hooks are about to run.
2368 #
2371 #
2369 # Fixing this limitation of the transaction is also needed to track
2372 # Fixing this limitation of the transaction is also needed to track
2370 # other families of changes (bookmarks, phases, obsolescence).
2373 # other families of changes (bookmarks, phases, obsolescence).
2371 #
2374 #
2372 # This will have to be fixed before we remove the experimental
2375 # This will have to be fixed before we remove the experimental
2373 # gating.
2376 # gating.
2374 tracktags(tr2)
2377 tracktags(tr2)
2375 repo = reporef()
2378 repo = reporef()
2376 assert repo is not None # help pytype
2379 assert repo is not None # help pytype
2377
2380
2378 singleheadopt = (b'experimental', b'single-head-per-branch')
2381 singleheadopt = (b'experimental', b'single-head-per-branch')
2379 singlehead = repo.ui.configbool(*singleheadopt)
2382 singlehead = repo.ui.configbool(*singleheadopt)
2380 if singlehead:
2383 if singlehead:
2381 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2384 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2382 accountclosed = singleheadsub.get(
2385 accountclosed = singleheadsub.get(
2383 b"account-closed-heads", False
2386 b"account-closed-heads", False
2384 )
2387 )
2385 if singleheadsub.get(b"public-changes-only", False):
2388 if singleheadsub.get(b"public-changes-only", False):
2386 filtername = b"immutable"
2389 filtername = b"immutable"
2387 else:
2390 else:
2388 filtername = b"visible"
2391 filtername = b"visible"
2389 scmutil.enforcesinglehead(
2392 scmutil.enforcesinglehead(
2390 repo, tr2, desc, accountclosed, filtername
2393 repo, tr2, desc, accountclosed, filtername
2391 )
2394 )
2392 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2395 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2393 for name, (old, new) in sorted(
2396 for name, (old, new) in sorted(
2394 tr.changes[b'bookmarks'].items()
2397 tr.changes[b'bookmarks'].items()
2395 ):
2398 ):
2396 args = tr.hookargs.copy()
2399 args = tr.hookargs.copy()
2397 args.update(bookmarks.preparehookargs(name, old, new))
2400 args.update(bookmarks.preparehookargs(name, old, new))
2398 repo.hook(
2401 repo.hook(
2399 b'pretxnclose-bookmark',
2402 b'pretxnclose-bookmark',
2400 throw=True,
2403 throw=True,
2401 **pycompat.strkwargs(args)
2404 **pycompat.strkwargs(args)
2402 )
2405 )
2403 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2406 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2404 cl = repo.unfiltered().changelog
2407 cl = repo.unfiltered().changelog
2405 for revs, (old, new) in tr.changes[b'phases']:
2408 for revs, (old, new) in tr.changes[b'phases']:
2406 for rev in revs:
2409 for rev in revs:
2407 args = tr.hookargs.copy()
2410 args = tr.hookargs.copy()
2408 node = hex(cl.node(rev))
2411 node = hex(cl.node(rev))
2409 args.update(phases.preparehookargs(node, old, new))
2412 args.update(phases.preparehookargs(node, old, new))
2410 repo.hook(
2413 repo.hook(
2411 b'pretxnclose-phase',
2414 b'pretxnclose-phase',
2412 throw=True,
2415 throw=True,
2413 **pycompat.strkwargs(args)
2416 **pycompat.strkwargs(args)
2414 )
2417 )
2415
2418
2416 repo.hook(
2419 repo.hook(
2417 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2420 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2418 )
2421 )
2419
2422
2420 def releasefn(tr, success):
2423 def releasefn(tr, success):
2421 repo = reporef()
2424 repo = reporef()
2422 if repo is None:
2425 if repo is None:
2423 # If the repo has been GC'd (and this release function is being
2426 # If the repo has been GC'd (and this release function is being
2424 # called from transaction.__del__), there's not much we can do,
2427 # called from transaction.__del__), there's not much we can do,
2425 # so just leave the unfinished transaction there and let the
2428 # so just leave the unfinished transaction there and let the
2426 # user run `hg recover`.
2429 # user run `hg recover`.
2427 return
2430 return
2428 if success:
2431 if success:
2429 # this should be explicitly invoked here, because
2432 # this should be explicitly invoked here, because
2430 # in-memory changes aren't written out at closing
2433 # in-memory changes aren't written out at closing
2431 # transaction, if tr.addfilegenerator (via
2434 # transaction, if tr.addfilegenerator (via
2432 # dirstate.write or so) isn't invoked while
2435 # dirstate.write or so) isn't invoked while
2433 # transaction running
2436 # transaction running
2434 repo.dirstate.write(None)
2437 repo.dirstate.write(None)
2435 else:
2438 else:
2436 # discard all changes (including ones already written
2439 # discard all changes (including ones already written
2437 # out) in this transaction
2440 # out) in this transaction
2438 narrowspec.restorebackup(self, b'journal.narrowspec')
2441 narrowspec.restorebackup(self, b'journal.narrowspec')
2439 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2442 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2440 repo.dirstate.restorebackup(None, b'journal.dirstate')
2443 repo.dirstate.restorebackup(None, b'journal.dirstate')
2441
2444
2442 repo.invalidate(clearfilecache=True)
2445 repo.invalidate(clearfilecache=True)
2443
2446
2444 tr = transaction.transaction(
2447 tr = transaction.transaction(
2445 rp,
2448 rp,
2446 self.svfs,
2449 self.svfs,
2447 vfsmap,
2450 vfsmap,
2448 b"journal",
2451 b"journal",
2449 b"undo",
2452 b"undo",
2450 aftertrans(renames),
2453 aftertrans(renames),
2451 self.store.createmode,
2454 self.store.createmode,
2452 validator=validate,
2455 validator=validate,
2453 releasefn=releasefn,
2456 releasefn=releasefn,
2454 checkambigfiles=_cachedfiles,
2457 checkambigfiles=_cachedfiles,
2455 name=desc,
2458 name=desc,
2456 )
2459 )
2457 tr.changes[b'origrepolen'] = len(self)
2460 tr.changes[b'origrepolen'] = len(self)
2458 tr.changes[b'obsmarkers'] = set()
2461 tr.changes[b'obsmarkers'] = set()
2459 tr.changes[b'phases'] = []
2462 tr.changes[b'phases'] = []
2460 tr.changes[b'bookmarks'] = {}
2463 tr.changes[b'bookmarks'] = {}
2461
2464
2462 tr.hookargs[b'txnid'] = txnid
2465 tr.hookargs[b'txnid'] = txnid
2463 tr.hookargs[b'txnname'] = desc
2466 tr.hookargs[b'txnname'] = desc
2464 tr.hookargs[b'changes'] = tr.changes
2467 tr.hookargs[b'changes'] = tr.changes
2465 # note: writing the fncache only during finalize mean that the file is
2468 # note: writing the fncache only during finalize mean that the file is
2466 # outdated when running hooks. As fncache is used for streaming clone,
2469 # outdated when running hooks. As fncache is used for streaming clone,
2467 # this is not expected to break anything that happen during the hooks.
2470 # this is not expected to break anything that happen during the hooks.
2468 tr.addfinalize(b'flush-fncache', self.store.write)
2471 tr.addfinalize(b'flush-fncache', self.store.write)
2469
2472
2470 def txnclosehook(tr2):
2473 def txnclosehook(tr2):
2471 """To be run if transaction is successful, will schedule a hook run"""
2474 """To be run if transaction is successful, will schedule a hook run"""
2472 # Don't reference tr2 in hook() so we don't hold a reference.
2475 # Don't reference tr2 in hook() so we don't hold a reference.
2473 # This reduces memory consumption when there are multiple
2476 # This reduces memory consumption when there are multiple
2474 # transactions per lock. This can likely go away if issue5045
2477 # transactions per lock. This can likely go away if issue5045
2475 # fixes the function accumulation.
2478 # fixes the function accumulation.
2476 hookargs = tr2.hookargs
2479 hookargs = tr2.hookargs
2477
2480
2478 def hookfunc(unused_success):
2481 def hookfunc(unused_success):
2479 repo = reporef()
2482 repo = reporef()
2480 assert repo is not None # help pytype
2483 assert repo is not None # help pytype
2481
2484
2482 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2485 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2483 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2486 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2484 for name, (old, new) in bmchanges:
2487 for name, (old, new) in bmchanges:
2485 args = tr.hookargs.copy()
2488 args = tr.hookargs.copy()
2486 args.update(bookmarks.preparehookargs(name, old, new))
2489 args.update(bookmarks.preparehookargs(name, old, new))
2487 repo.hook(
2490 repo.hook(
2488 b'txnclose-bookmark',
2491 b'txnclose-bookmark',
2489 throw=False,
2492 throw=False,
2490 **pycompat.strkwargs(args)
2493 **pycompat.strkwargs(args)
2491 )
2494 )
2492
2495
2493 if hook.hashook(repo.ui, b'txnclose-phase'):
2496 if hook.hashook(repo.ui, b'txnclose-phase'):
2494 cl = repo.unfiltered().changelog
2497 cl = repo.unfiltered().changelog
2495 phasemv = sorted(
2498 phasemv = sorted(
2496 tr.changes[b'phases'], key=lambda r: r[0][0]
2499 tr.changes[b'phases'], key=lambda r: r[0][0]
2497 )
2500 )
2498 for revs, (old, new) in phasemv:
2501 for revs, (old, new) in phasemv:
2499 for rev in revs:
2502 for rev in revs:
2500 args = tr.hookargs.copy()
2503 args = tr.hookargs.copy()
2501 node = hex(cl.node(rev))
2504 node = hex(cl.node(rev))
2502 args.update(phases.preparehookargs(node, old, new))
2505 args.update(phases.preparehookargs(node, old, new))
2503 repo.hook(
2506 repo.hook(
2504 b'txnclose-phase',
2507 b'txnclose-phase',
2505 throw=False,
2508 throw=False,
2506 **pycompat.strkwargs(args)
2509 **pycompat.strkwargs(args)
2507 )
2510 )
2508
2511
2509 repo.hook(
2512 repo.hook(
2510 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2513 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2511 )
2514 )
2512
2515
2513 repo = reporef()
2516 repo = reporef()
2514 assert repo is not None # help pytype
2517 assert repo is not None # help pytype
2515 repo._afterlock(hookfunc)
2518 repo._afterlock(hookfunc)
2516
2519
2517 tr.addfinalize(b'txnclose-hook', txnclosehook)
2520 tr.addfinalize(b'txnclose-hook', txnclosehook)
2518 # Include a leading "-" to make it happen before the transaction summary
2521 # Include a leading "-" to make it happen before the transaction summary
2519 # reports registered via scmutil.registersummarycallback() whose names
2522 # reports registered via scmutil.registersummarycallback() whose names
2520 # are 00-txnreport etc. That way, the caches will be warm when the
2523 # are 00-txnreport etc. That way, the caches will be warm when the
2521 # callbacks run.
2524 # callbacks run.
2522 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2525 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2523
2526
2524 def txnaborthook(tr2):
2527 def txnaborthook(tr2):
2525 """To be run if transaction is aborted"""
2528 """To be run if transaction is aborted"""
2526 repo = reporef()
2529 repo = reporef()
2527 assert repo is not None # help pytype
2530 assert repo is not None # help pytype
2528 repo.hook(
2531 repo.hook(
2529 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2532 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2530 )
2533 )
2531
2534
2532 tr.addabort(b'txnabort-hook', txnaborthook)
2535 tr.addabort(b'txnabort-hook', txnaborthook)
2533 # avoid eager cache invalidation. in-memory data should be identical
2536 # avoid eager cache invalidation. in-memory data should be identical
2534 # to stored data if transaction has no error.
2537 # to stored data if transaction has no error.
2535 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2538 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2536 self._transref = weakref.ref(tr)
2539 self._transref = weakref.ref(tr)
2537 scmutil.registersummarycallback(self, tr, desc)
2540 scmutil.registersummarycallback(self, tr, desc)
2538 return tr
2541 return tr
2539
2542
2540 def _journalfiles(self):
2543 def _journalfiles(self):
2541 return (
2544 return (
2542 (self.svfs, b'journal'),
2545 (self.svfs, b'journal'),
2543 (self.svfs, b'journal.narrowspec'),
2546 (self.svfs, b'journal.narrowspec'),
2544 (self.vfs, b'journal.narrowspec.dirstate'),
2547 (self.vfs, b'journal.narrowspec.dirstate'),
2545 (self.vfs, b'journal.dirstate'),
2548 (self.vfs, b'journal.dirstate'),
2546 (self.vfs, b'journal.branch'),
2549 (self.vfs, b'journal.branch'),
2547 (self.vfs, b'journal.desc'),
2550 (self.vfs, b'journal.desc'),
2548 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2551 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2549 (self.svfs, b'journal.phaseroots'),
2552 (self.svfs, b'journal.phaseroots'),
2550 )
2553 )
2551
2554
2552 def undofiles(self):
2555 def undofiles(self):
2553 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2556 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2554
2557
2555 @unfilteredmethod
2558 @unfilteredmethod
2556 def _writejournal(self, desc):
2559 def _writejournal(self, desc):
2557 self.dirstate.savebackup(None, b'journal.dirstate')
2560 self.dirstate.savebackup(None, b'journal.dirstate')
2558 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2561 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2559 narrowspec.savebackup(self, b'journal.narrowspec')
2562 narrowspec.savebackup(self, b'journal.narrowspec')
2560 self.vfs.write(
2563 self.vfs.write(
2561 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2564 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2562 )
2565 )
2563 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2566 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2564 bookmarksvfs = bookmarks.bookmarksvfs(self)
2567 bookmarksvfs = bookmarks.bookmarksvfs(self)
2565 bookmarksvfs.write(
2568 bookmarksvfs.write(
2566 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2569 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2567 )
2570 )
2568 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2571 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2569
2572
2570 def recover(self):
2573 def recover(self):
2571 with self.lock():
2574 with self.lock():
2572 if self.svfs.exists(b"journal"):
2575 if self.svfs.exists(b"journal"):
2573 self.ui.status(_(b"rolling back interrupted transaction\n"))
2576 self.ui.status(_(b"rolling back interrupted transaction\n"))
2574 vfsmap = {
2577 vfsmap = {
2575 b'': self.svfs,
2578 b'': self.svfs,
2576 b'plain': self.vfs,
2579 b'plain': self.vfs,
2577 }
2580 }
2578 transaction.rollback(
2581 transaction.rollback(
2579 self.svfs,
2582 self.svfs,
2580 vfsmap,
2583 vfsmap,
2581 b"journal",
2584 b"journal",
2582 self.ui.warn,
2585 self.ui.warn,
2583 checkambigfiles=_cachedfiles,
2586 checkambigfiles=_cachedfiles,
2584 )
2587 )
2585 self.invalidate()
2588 self.invalidate()
2586 return True
2589 return True
2587 else:
2590 else:
2588 self.ui.warn(_(b"no interrupted transaction available\n"))
2591 self.ui.warn(_(b"no interrupted transaction available\n"))
2589 return False
2592 return False
2590
2593
2591 def rollback(self, dryrun=False, force=False):
2594 def rollback(self, dryrun=False, force=False):
2592 wlock = lock = dsguard = None
2595 wlock = lock = dsguard = None
2593 try:
2596 try:
2594 wlock = self.wlock()
2597 wlock = self.wlock()
2595 lock = self.lock()
2598 lock = self.lock()
2596 if self.svfs.exists(b"undo"):
2599 if self.svfs.exists(b"undo"):
2597 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2600 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2598
2601
2599 return self._rollback(dryrun, force, dsguard)
2602 return self._rollback(dryrun, force, dsguard)
2600 else:
2603 else:
2601 self.ui.warn(_(b"no rollback information available\n"))
2604 self.ui.warn(_(b"no rollback information available\n"))
2602 return 1
2605 return 1
2603 finally:
2606 finally:
2604 release(dsguard, lock, wlock)
2607 release(dsguard, lock, wlock)
2605
2608
2606 @unfilteredmethod # Until we get smarter cache management
2609 @unfilteredmethod # Until we get smarter cache management
2607 def _rollback(self, dryrun, force, dsguard):
2610 def _rollback(self, dryrun, force, dsguard):
2608 ui = self.ui
2611 ui = self.ui
2609 try:
2612 try:
2610 args = self.vfs.read(b'undo.desc').splitlines()
2613 args = self.vfs.read(b'undo.desc').splitlines()
2611 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2614 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2612 if len(args) >= 3:
2615 if len(args) >= 3:
2613 detail = args[2]
2616 detail = args[2]
2614 oldtip = oldlen - 1
2617 oldtip = oldlen - 1
2615
2618
2616 if detail and ui.verbose:
2619 if detail and ui.verbose:
2617 msg = _(
2620 msg = _(
2618 b'repository tip rolled back to revision %d'
2621 b'repository tip rolled back to revision %d'
2619 b' (undo %s: %s)\n'
2622 b' (undo %s: %s)\n'
2620 ) % (oldtip, desc, detail)
2623 ) % (oldtip, desc, detail)
2621 else:
2624 else:
2622 msg = _(
2625 msg = _(
2623 b'repository tip rolled back to revision %d (undo %s)\n'
2626 b'repository tip rolled back to revision %d (undo %s)\n'
2624 ) % (oldtip, desc)
2627 ) % (oldtip, desc)
2625 except IOError:
2628 except IOError:
2626 msg = _(b'rolling back unknown transaction\n')
2629 msg = _(b'rolling back unknown transaction\n')
2627 desc = None
2630 desc = None
2628
2631
2629 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2632 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2630 raise error.Abort(
2633 raise error.Abort(
2631 _(
2634 _(
2632 b'rollback of last commit while not checked out '
2635 b'rollback of last commit while not checked out '
2633 b'may lose data'
2636 b'may lose data'
2634 ),
2637 ),
2635 hint=_(b'use -f to force'),
2638 hint=_(b'use -f to force'),
2636 )
2639 )
2637
2640
2638 ui.status(msg)
2641 ui.status(msg)
2639 if dryrun:
2642 if dryrun:
2640 return 0
2643 return 0
2641
2644
2642 parents = self.dirstate.parents()
2645 parents = self.dirstate.parents()
2643 self.destroying()
2646 self.destroying()
2644 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2647 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2645 transaction.rollback(
2648 transaction.rollback(
2646 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2649 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2647 )
2650 )
2648 bookmarksvfs = bookmarks.bookmarksvfs(self)
2651 bookmarksvfs = bookmarks.bookmarksvfs(self)
2649 if bookmarksvfs.exists(b'undo.bookmarks'):
2652 if bookmarksvfs.exists(b'undo.bookmarks'):
2650 bookmarksvfs.rename(
2653 bookmarksvfs.rename(
2651 b'undo.bookmarks', b'bookmarks', checkambig=True
2654 b'undo.bookmarks', b'bookmarks', checkambig=True
2652 )
2655 )
2653 if self.svfs.exists(b'undo.phaseroots'):
2656 if self.svfs.exists(b'undo.phaseroots'):
2654 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2657 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2655 self.invalidate()
2658 self.invalidate()
2656
2659
2657 has_node = self.changelog.index.has_node
2660 has_node = self.changelog.index.has_node
2658 parentgone = any(not has_node(p) for p in parents)
2661 parentgone = any(not has_node(p) for p in parents)
2659 if parentgone:
2662 if parentgone:
2660 # prevent dirstateguard from overwriting already restored one
2663 # prevent dirstateguard from overwriting already restored one
2661 dsguard.close()
2664 dsguard.close()
2662
2665
2663 narrowspec.restorebackup(self, b'undo.narrowspec')
2666 narrowspec.restorebackup(self, b'undo.narrowspec')
2664 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2667 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2665 self.dirstate.restorebackup(None, b'undo.dirstate')
2668 self.dirstate.restorebackup(None, b'undo.dirstate')
2666 try:
2669 try:
2667 branch = self.vfs.read(b'undo.branch')
2670 branch = self.vfs.read(b'undo.branch')
2668 self.dirstate.setbranch(encoding.tolocal(branch))
2671 self.dirstate.setbranch(encoding.tolocal(branch))
2669 except IOError:
2672 except IOError:
2670 ui.warn(
2673 ui.warn(
2671 _(
2674 _(
2672 b'named branch could not be reset: '
2675 b'named branch could not be reset: '
2673 b'current branch is still \'%s\'\n'
2676 b'current branch is still \'%s\'\n'
2674 )
2677 )
2675 % self.dirstate.branch()
2678 % self.dirstate.branch()
2676 )
2679 )
2677
2680
2678 parents = tuple([p.rev() for p in self[None].parents()])
2681 parents = tuple([p.rev() for p in self[None].parents()])
2679 if len(parents) > 1:
2682 if len(parents) > 1:
2680 ui.status(
2683 ui.status(
2681 _(
2684 _(
2682 b'working directory now based on '
2685 b'working directory now based on '
2683 b'revisions %d and %d\n'
2686 b'revisions %d and %d\n'
2684 )
2687 )
2685 % parents
2688 % parents
2686 )
2689 )
2687 else:
2690 else:
2688 ui.status(
2691 ui.status(
2689 _(b'working directory now based on revision %d\n') % parents
2692 _(b'working directory now based on revision %d\n') % parents
2690 )
2693 )
2691 mergestatemod.mergestate.clean(self)
2694 mergestatemod.mergestate.clean(self)
2692
2695
2693 # TODO: if we know which new heads may result from this rollback, pass
2696 # TODO: if we know which new heads may result from this rollback, pass
2694 # them to destroy(), which will prevent the branchhead cache from being
2697 # them to destroy(), which will prevent the branchhead cache from being
2695 # invalidated.
2698 # invalidated.
2696 self.destroyed()
2699 self.destroyed()
2697 return 0
2700 return 0
2698
2701
2699 def _buildcacheupdater(self, newtransaction):
2702 def _buildcacheupdater(self, newtransaction):
2700 """called during transaction to build the callback updating cache
2703 """called during transaction to build the callback updating cache
2701
2704
2702 Lives on the repository to help extension who might want to augment
2705 Lives on the repository to help extension who might want to augment
2703 this logic. For this purpose, the created transaction is passed to the
2706 this logic. For this purpose, the created transaction is passed to the
2704 method.
2707 method.
2705 """
2708 """
2706 # we must avoid cyclic reference between repo and transaction.
2709 # we must avoid cyclic reference between repo and transaction.
2707 reporef = weakref.ref(self)
2710 reporef = weakref.ref(self)
2708
2711
2709 def updater(tr):
2712 def updater(tr):
2710 repo = reporef()
2713 repo = reporef()
2711 assert repo is not None # help pytype
2714 assert repo is not None # help pytype
2712 repo.updatecaches(tr)
2715 repo.updatecaches(tr)
2713
2716
2714 return updater
2717 return updater
2715
2718
2716 @unfilteredmethod
2719 @unfilteredmethod
2717 def updatecaches(self, tr=None, full=False):
2720 def updatecaches(self, tr=None, full=False):
2718 """warm appropriate caches
2721 """warm appropriate caches
2719
2722
2720 If this function is called after a transaction closed. The transaction
2723 If this function is called after a transaction closed. The transaction
2721 will be available in the 'tr' argument. This can be used to selectively
2724 will be available in the 'tr' argument. This can be used to selectively
2722 update caches relevant to the changes in that transaction.
2725 update caches relevant to the changes in that transaction.
2723
2726
2724 If 'full' is set, make sure all caches the function knows about have
2727 If 'full' is set, make sure all caches the function knows about have
2725 up-to-date data. Even the ones usually loaded more lazily.
2728 up-to-date data. Even the ones usually loaded more lazily.
2726 """
2729 """
2727 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2730 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2728 # During strip, many caches are invalid but
2731 # During strip, many caches are invalid but
2729 # later call to `destroyed` will refresh them.
2732 # later call to `destroyed` will refresh them.
2730 return
2733 return
2731
2734
2732 if tr is None or tr.changes[b'origrepolen'] < len(self):
2735 if tr is None or tr.changes[b'origrepolen'] < len(self):
2733 # accessing the 'served' branchmap should refresh all the others,
2736 # accessing the 'served' branchmap should refresh all the others,
2734 self.ui.debug(b'updating the branch cache\n')
2737 self.ui.debug(b'updating the branch cache\n')
2735 self.filtered(b'served').branchmap()
2738 self.filtered(b'served').branchmap()
2736 self.filtered(b'served.hidden').branchmap()
2739 self.filtered(b'served.hidden').branchmap()
2737
2740
2738 if full:
2741 if full:
2739 unfi = self.unfiltered()
2742 unfi = self.unfiltered()
2740
2743
2741 self.changelog.update_caches(transaction=tr)
2744 self.changelog.update_caches(transaction=tr)
2742 self.manifestlog.update_caches(transaction=tr)
2745 self.manifestlog.update_caches(transaction=tr)
2743
2746
2744 rbc = unfi.revbranchcache()
2747 rbc = unfi.revbranchcache()
2745 for r in unfi.changelog:
2748 for r in unfi.changelog:
2746 rbc.branchinfo(r)
2749 rbc.branchinfo(r)
2747 rbc.write()
2750 rbc.write()
2748
2751
2749 # ensure the working copy parents are in the manifestfulltextcache
2752 # ensure the working copy parents are in the manifestfulltextcache
2750 for ctx in self[b'.'].parents():
2753 for ctx in self[b'.'].parents():
2751 ctx.manifest() # accessing the manifest is enough
2754 ctx.manifest() # accessing the manifest is enough
2752
2755
2753 # accessing fnode cache warms the cache
2756 # accessing fnode cache warms the cache
2754 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2757 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2755 # accessing tags warm the cache
2758 # accessing tags warm the cache
2756 self.tags()
2759 self.tags()
2757 self.filtered(b'served').tags()
2760 self.filtered(b'served').tags()
2758
2761
2759 # The `full` arg is documented as updating even the lazily-loaded
2762 # The `full` arg is documented as updating even the lazily-loaded
2760 # caches immediately, so we're forcing a write to cause these caches
2763 # caches immediately, so we're forcing a write to cause these caches
2761 # to be warmed up even if they haven't explicitly been requested
2764 # to be warmed up even if they haven't explicitly been requested
2762 # yet (if they've never been used by hg, they won't ever have been
2765 # yet (if they've never been used by hg, they won't ever have been
2763 # written, even if they're a subset of another kind of cache that
2766 # written, even if they're a subset of another kind of cache that
2764 # *has* been used).
2767 # *has* been used).
2765 for filt in repoview.filtertable.keys():
2768 for filt in repoview.filtertable.keys():
2766 filtered = self.filtered(filt)
2769 filtered = self.filtered(filt)
2767 filtered.branchmap().write(filtered)
2770 filtered.branchmap().write(filtered)
2768
2771
2769 def invalidatecaches(self):
2772 def invalidatecaches(self):
2770
2773
2771 if '_tagscache' in vars(self):
2774 if '_tagscache' in vars(self):
2772 # can't use delattr on proxy
2775 # can't use delattr on proxy
2773 del self.__dict__['_tagscache']
2776 del self.__dict__['_tagscache']
2774
2777
2775 self._branchcaches.clear()
2778 self._branchcaches.clear()
2776 self.invalidatevolatilesets()
2779 self.invalidatevolatilesets()
2777 self._sparsesignaturecache.clear()
2780 self._sparsesignaturecache.clear()
2778
2781
2779 def invalidatevolatilesets(self):
2782 def invalidatevolatilesets(self):
2780 self.filteredrevcache.clear()
2783 self.filteredrevcache.clear()
2781 obsolete.clearobscaches(self)
2784 obsolete.clearobscaches(self)
2782 self._quick_access_changeid_invalidate()
2785 self._quick_access_changeid_invalidate()
2783
2786
2784 def invalidatedirstate(self):
2787 def invalidatedirstate(self):
2785 """Invalidates the dirstate, causing the next call to dirstate
2788 """Invalidates the dirstate, causing the next call to dirstate
2786 to check if it was modified since the last time it was read,
2789 to check if it was modified since the last time it was read,
2787 rereading it if it has.
2790 rereading it if it has.
2788
2791
2789 This is different to dirstate.invalidate() that it doesn't always
2792 This is different to dirstate.invalidate() that it doesn't always
2790 rereads the dirstate. Use dirstate.invalidate() if you want to
2793 rereads the dirstate. Use dirstate.invalidate() if you want to
2791 explicitly read the dirstate again (i.e. restoring it to a previous
2794 explicitly read the dirstate again (i.e. restoring it to a previous
2792 known good state)."""
2795 known good state)."""
2793 if hasunfilteredcache(self, 'dirstate'):
2796 if hasunfilteredcache(self, 'dirstate'):
2794 for k in self.dirstate._filecache:
2797 for k in self.dirstate._filecache:
2795 try:
2798 try:
2796 delattr(self.dirstate, k)
2799 delattr(self.dirstate, k)
2797 except AttributeError:
2800 except AttributeError:
2798 pass
2801 pass
2799 delattr(self.unfiltered(), 'dirstate')
2802 delattr(self.unfiltered(), 'dirstate')
2800
2803
2801 def invalidate(self, clearfilecache=False):
2804 def invalidate(self, clearfilecache=False):
2802 """Invalidates both store and non-store parts other than dirstate
2805 """Invalidates both store and non-store parts other than dirstate
2803
2806
2804 If a transaction is running, invalidation of store is omitted,
2807 If a transaction is running, invalidation of store is omitted,
2805 because discarding in-memory changes might cause inconsistency
2808 because discarding in-memory changes might cause inconsistency
2806 (e.g. incomplete fncache causes unintentional failure, but
2809 (e.g. incomplete fncache causes unintentional failure, but
2807 redundant one doesn't).
2810 redundant one doesn't).
2808 """
2811 """
2809 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2812 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2810 for k in list(self._filecache.keys()):
2813 for k in list(self._filecache.keys()):
2811 # dirstate is invalidated separately in invalidatedirstate()
2814 # dirstate is invalidated separately in invalidatedirstate()
2812 if k == b'dirstate':
2815 if k == b'dirstate':
2813 continue
2816 continue
2814 if (
2817 if (
2815 k == b'changelog'
2818 k == b'changelog'
2816 and self.currenttransaction()
2819 and self.currenttransaction()
2817 and self.changelog._delayed
2820 and self.changelog._delayed
2818 ):
2821 ):
2819 # The changelog object may store unwritten revisions. We don't
2822 # The changelog object may store unwritten revisions. We don't
2820 # want to lose them.
2823 # want to lose them.
2821 # TODO: Solve the problem instead of working around it.
2824 # TODO: Solve the problem instead of working around it.
2822 continue
2825 continue
2823
2826
2824 if clearfilecache:
2827 if clearfilecache:
2825 del self._filecache[k]
2828 del self._filecache[k]
2826 try:
2829 try:
2827 delattr(unfiltered, k)
2830 delattr(unfiltered, k)
2828 except AttributeError:
2831 except AttributeError:
2829 pass
2832 pass
2830 self.invalidatecaches()
2833 self.invalidatecaches()
2831 if not self.currenttransaction():
2834 if not self.currenttransaction():
2832 # TODO: Changing contents of store outside transaction
2835 # TODO: Changing contents of store outside transaction
2833 # causes inconsistency. We should make in-memory store
2836 # causes inconsistency. We should make in-memory store
2834 # changes detectable, and abort if changed.
2837 # changes detectable, and abort if changed.
2835 self.store.invalidatecaches()
2838 self.store.invalidatecaches()
2836
2839
2837 def invalidateall(self):
2840 def invalidateall(self):
2838 """Fully invalidates both store and non-store parts, causing the
2841 """Fully invalidates both store and non-store parts, causing the
2839 subsequent operation to reread any outside changes."""
2842 subsequent operation to reread any outside changes."""
2840 # extension should hook this to invalidate its caches
2843 # extension should hook this to invalidate its caches
2841 self.invalidate()
2844 self.invalidate()
2842 self.invalidatedirstate()
2845 self.invalidatedirstate()
2843
2846
2844 @unfilteredmethod
2847 @unfilteredmethod
2845 def _refreshfilecachestats(self, tr):
2848 def _refreshfilecachestats(self, tr):
2846 """Reload stats of cached files so that they are flagged as valid"""
2849 """Reload stats of cached files so that they are flagged as valid"""
2847 for k, ce in self._filecache.items():
2850 for k, ce in self._filecache.items():
2848 k = pycompat.sysstr(k)
2851 k = pycompat.sysstr(k)
2849 if k == 'dirstate' or k not in self.__dict__:
2852 if k == 'dirstate' or k not in self.__dict__:
2850 continue
2853 continue
2851 ce.refresh()
2854 ce.refresh()
2852
2855
2853 def _lock(
2856 def _lock(
2854 self,
2857 self,
2855 vfs,
2858 vfs,
2856 lockname,
2859 lockname,
2857 wait,
2860 wait,
2858 releasefn,
2861 releasefn,
2859 acquirefn,
2862 acquirefn,
2860 desc,
2863 desc,
2861 ):
2864 ):
2862 timeout = 0
2865 timeout = 0
2863 warntimeout = 0
2866 warntimeout = 0
2864 if wait:
2867 if wait:
2865 timeout = self.ui.configint(b"ui", b"timeout")
2868 timeout = self.ui.configint(b"ui", b"timeout")
2866 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2869 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2867 # internal config: ui.signal-safe-lock
2870 # internal config: ui.signal-safe-lock
2868 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2871 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2869
2872
2870 l = lockmod.trylock(
2873 l = lockmod.trylock(
2871 self.ui,
2874 self.ui,
2872 vfs,
2875 vfs,
2873 lockname,
2876 lockname,
2874 timeout,
2877 timeout,
2875 warntimeout,
2878 warntimeout,
2876 releasefn=releasefn,
2879 releasefn=releasefn,
2877 acquirefn=acquirefn,
2880 acquirefn=acquirefn,
2878 desc=desc,
2881 desc=desc,
2879 signalsafe=signalsafe,
2882 signalsafe=signalsafe,
2880 )
2883 )
2881 return l
2884 return l
2882
2885
2883 def _afterlock(self, callback):
2886 def _afterlock(self, callback):
2884 """add a callback to be run when the repository is fully unlocked
2887 """add a callback to be run when the repository is fully unlocked
2885
2888
2886 The callback will be executed when the outermost lock is released
2889 The callback will be executed when the outermost lock is released
2887 (with wlock being higher level than 'lock')."""
2890 (with wlock being higher level than 'lock')."""
2888 for ref in (self._wlockref, self._lockref):
2891 for ref in (self._wlockref, self._lockref):
2889 l = ref and ref()
2892 l = ref and ref()
2890 if l and l.held:
2893 if l and l.held:
2891 l.postrelease.append(callback)
2894 l.postrelease.append(callback)
2892 break
2895 break
2893 else: # no lock have been found.
2896 else: # no lock have been found.
2894 callback(True)
2897 callback(True)
2895
2898
2896 def lock(self, wait=True):
2899 def lock(self, wait=True):
2897 """Lock the repository store (.hg/store) and return a weak reference
2900 """Lock the repository store (.hg/store) and return a weak reference
2898 to the lock. Use this before modifying the store (e.g. committing or
2901 to the lock. Use this before modifying the store (e.g. committing or
2899 stripping). If you are opening a transaction, get a lock as well.)
2902 stripping). If you are opening a transaction, get a lock as well.)
2900
2903
2901 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2904 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2902 'wlock' first to avoid a dead-lock hazard."""
2905 'wlock' first to avoid a dead-lock hazard."""
2903 l = self._currentlock(self._lockref)
2906 l = self._currentlock(self._lockref)
2904 if l is not None:
2907 if l is not None:
2905 l.lock()
2908 l.lock()
2906 return l
2909 return l
2907
2910
2908 l = self._lock(
2911 l = self._lock(
2909 vfs=self.svfs,
2912 vfs=self.svfs,
2910 lockname=b"lock",
2913 lockname=b"lock",
2911 wait=wait,
2914 wait=wait,
2912 releasefn=None,
2915 releasefn=None,
2913 acquirefn=self.invalidate,
2916 acquirefn=self.invalidate,
2914 desc=_(b'repository %s') % self.origroot,
2917 desc=_(b'repository %s') % self.origroot,
2915 )
2918 )
2916 self._lockref = weakref.ref(l)
2919 self._lockref = weakref.ref(l)
2917 return l
2920 return l
2918
2921
2919 def wlock(self, wait=True):
2922 def wlock(self, wait=True):
2920 """Lock the non-store parts of the repository (everything under
2923 """Lock the non-store parts of the repository (everything under
2921 .hg except .hg/store) and return a weak reference to the lock.
2924 .hg except .hg/store) and return a weak reference to the lock.
2922
2925
2923 Use this before modifying files in .hg.
2926 Use this before modifying files in .hg.
2924
2927
2925 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2928 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2926 'wlock' first to avoid a dead-lock hazard."""
2929 'wlock' first to avoid a dead-lock hazard."""
2927 l = self._wlockref() if self._wlockref else None
2930 l = self._wlockref() if self._wlockref else None
2928 if l is not None and l.held:
2931 if l is not None and l.held:
2929 l.lock()
2932 l.lock()
2930 return l
2933 return l
2931
2934
2932 # We do not need to check for non-waiting lock acquisition. Such
2935 # We do not need to check for non-waiting lock acquisition. Such
2933 # acquisition would not cause dead-lock as they would just fail.
2936 # acquisition would not cause dead-lock as they would just fail.
2934 if wait and (
2937 if wait and (
2935 self.ui.configbool(b'devel', b'all-warnings')
2938 self.ui.configbool(b'devel', b'all-warnings')
2936 or self.ui.configbool(b'devel', b'check-locks')
2939 or self.ui.configbool(b'devel', b'check-locks')
2937 ):
2940 ):
2938 if self._currentlock(self._lockref) is not None:
2941 if self._currentlock(self._lockref) is not None:
2939 self.ui.develwarn(b'"wlock" acquired after "lock"')
2942 self.ui.develwarn(b'"wlock" acquired after "lock"')
2940
2943
2941 def unlock():
2944 def unlock():
2942 if self.dirstate.pendingparentchange():
2945 if self.dirstate.pendingparentchange():
2943 self.dirstate.invalidate()
2946 self.dirstate.invalidate()
2944 else:
2947 else:
2945 self.dirstate.write(None)
2948 self.dirstate.write(None)
2946
2949
2947 self._filecache[b'dirstate'].refresh()
2950 self._filecache[b'dirstate'].refresh()
2948
2951
2949 l = self._lock(
2952 l = self._lock(
2950 self.vfs,
2953 self.vfs,
2951 b"wlock",
2954 b"wlock",
2952 wait,
2955 wait,
2953 unlock,
2956 unlock,
2954 self.invalidatedirstate,
2957 self.invalidatedirstate,
2955 _(b'working directory of %s') % self.origroot,
2958 _(b'working directory of %s') % self.origroot,
2956 )
2959 )
2957 self._wlockref = weakref.ref(l)
2960 self._wlockref = weakref.ref(l)
2958 return l
2961 return l
2959
2962
2960 def _currentlock(self, lockref):
2963 def _currentlock(self, lockref):
2961 """Returns the lock if it's held, or None if it's not."""
2964 """Returns the lock if it's held, or None if it's not."""
2962 if lockref is None:
2965 if lockref is None:
2963 return None
2966 return None
2964 l = lockref()
2967 l = lockref()
2965 if l is None or not l.held:
2968 if l is None or not l.held:
2966 return None
2969 return None
2967 return l
2970 return l
2968
2971
2969 def currentwlock(self):
2972 def currentwlock(self):
2970 """Returns the wlock if it's held, or None if it's not."""
2973 """Returns the wlock if it's held, or None if it's not."""
2971 return self._currentlock(self._wlockref)
2974 return self._currentlock(self._wlockref)
2972
2975
2973 def checkcommitpatterns(self, wctx, match, status, fail):
2976 def checkcommitpatterns(self, wctx, match, status, fail):
2974 """check for commit arguments that aren't committable"""
2977 """check for commit arguments that aren't committable"""
2975 if match.isexact() or match.prefix():
2978 if match.isexact() or match.prefix():
2976 matched = set(status.modified + status.added + status.removed)
2979 matched = set(status.modified + status.added + status.removed)
2977
2980
2978 for f in match.files():
2981 for f in match.files():
2979 f = self.dirstate.normalize(f)
2982 f = self.dirstate.normalize(f)
2980 if f == b'.' or f in matched or f in wctx.substate:
2983 if f == b'.' or f in matched or f in wctx.substate:
2981 continue
2984 continue
2982 if f in status.deleted:
2985 if f in status.deleted:
2983 fail(f, _(b'file not found!'))
2986 fail(f, _(b'file not found!'))
2984 # Is it a directory that exists or used to exist?
2987 # Is it a directory that exists or used to exist?
2985 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2988 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2986 d = f + b'/'
2989 d = f + b'/'
2987 for mf in matched:
2990 for mf in matched:
2988 if mf.startswith(d):
2991 if mf.startswith(d):
2989 break
2992 break
2990 else:
2993 else:
2991 fail(f, _(b"no match under directory!"))
2994 fail(f, _(b"no match under directory!"))
2992 elif f not in self.dirstate:
2995 elif f not in self.dirstate:
2993 fail(f, _(b"file not tracked!"))
2996 fail(f, _(b"file not tracked!"))
2994
2997
2995 @unfilteredmethod
2998 @unfilteredmethod
2996 def commit(
2999 def commit(
2997 self,
3000 self,
2998 text=b"",
3001 text=b"",
2999 user=None,
3002 user=None,
3000 date=None,
3003 date=None,
3001 match=None,
3004 match=None,
3002 force=False,
3005 force=False,
3003 editor=None,
3006 editor=None,
3004 extra=None,
3007 extra=None,
3005 ):
3008 ):
3006 """Add a new revision to current repository.
3009 """Add a new revision to current repository.
3007
3010
3008 Revision information is gathered from the working directory,
3011 Revision information is gathered from the working directory,
3009 match can be used to filter the committed files. If editor is
3012 match can be used to filter the committed files. If editor is
3010 supplied, it is called to get a commit message.
3013 supplied, it is called to get a commit message.
3011 """
3014 """
3012 if extra is None:
3015 if extra is None:
3013 extra = {}
3016 extra = {}
3014
3017
3015 def fail(f, msg):
3018 def fail(f, msg):
3016 raise error.InputError(b'%s: %s' % (f, msg))
3019 raise error.InputError(b'%s: %s' % (f, msg))
3017
3020
3018 if not match:
3021 if not match:
3019 match = matchmod.always()
3022 match = matchmod.always()
3020
3023
3021 if not force:
3024 if not force:
3022 match.bad = fail
3025 match.bad = fail
3023
3026
3024 # lock() for recent changelog (see issue4368)
3027 # lock() for recent changelog (see issue4368)
3025 with self.wlock(), self.lock():
3028 with self.wlock(), self.lock():
3026 wctx = self[None]
3029 wctx = self[None]
3027 merge = len(wctx.parents()) > 1
3030 merge = len(wctx.parents()) > 1
3028
3031
3029 if not force and merge and not match.always():
3032 if not force and merge and not match.always():
3030 raise error.Abort(
3033 raise error.Abort(
3031 _(
3034 _(
3032 b'cannot partially commit a merge '
3035 b'cannot partially commit a merge '
3033 b'(do not specify files or patterns)'
3036 b'(do not specify files or patterns)'
3034 )
3037 )
3035 )
3038 )
3036
3039
3037 status = self.status(match=match, clean=force)
3040 status = self.status(match=match, clean=force)
3038 if force:
3041 if force:
3039 status.modified.extend(
3042 status.modified.extend(
3040 status.clean
3043 status.clean
3041 ) # mq may commit clean files
3044 ) # mq may commit clean files
3042
3045
3043 # check subrepos
3046 # check subrepos
3044 subs, commitsubs, newstate = subrepoutil.precommit(
3047 subs, commitsubs, newstate = subrepoutil.precommit(
3045 self.ui, wctx, status, match, force=force
3048 self.ui, wctx, status, match, force=force
3046 )
3049 )
3047
3050
3048 # make sure all explicit patterns are matched
3051 # make sure all explicit patterns are matched
3049 if not force:
3052 if not force:
3050 self.checkcommitpatterns(wctx, match, status, fail)
3053 self.checkcommitpatterns(wctx, match, status, fail)
3051
3054
3052 cctx = context.workingcommitctx(
3055 cctx = context.workingcommitctx(
3053 self, status, text, user, date, extra
3056 self, status, text, user, date, extra
3054 )
3057 )
3055
3058
3056 ms = mergestatemod.mergestate.read(self)
3059 ms = mergestatemod.mergestate.read(self)
3057 mergeutil.checkunresolved(ms)
3060 mergeutil.checkunresolved(ms)
3058
3061
3059 # internal config: ui.allowemptycommit
3062 # internal config: ui.allowemptycommit
3060 if cctx.isempty() and not self.ui.configbool(
3063 if cctx.isempty() and not self.ui.configbool(
3061 b'ui', b'allowemptycommit'
3064 b'ui', b'allowemptycommit'
3062 ):
3065 ):
3063 self.ui.debug(b'nothing to commit, clearing merge state\n')
3066 self.ui.debug(b'nothing to commit, clearing merge state\n')
3064 ms.reset()
3067 ms.reset()
3065 return None
3068 return None
3066
3069
3067 if merge and cctx.deleted():
3070 if merge and cctx.deleted():
3068 raise error.Abort(_(b"cannot commit merge with missing files"))
3071 raise error.Abort(_(b"cannot commit merge with missing files"))
3069
3072
3070 if editor:
3073 if editor:
3071 cctx._text = editor(self, cctx, subs)
3074 cctx._text = editor(self, cctx, subs)
3072 edited = text != cctx._text
3075 edited = text != cctx._text
3073
3076
3074 # Save commit message in case this transaction gets rolled back
3077 # Save commit message in case this transaction gets rolled back
3075 # (e.g. by a pretxncommit hook). Leave the content alone on
3078 # (e.g. by a pretxncommit hook). Leave the content alone on
3076 # the assumption that the user will use the same editor again.
3079 # the assumption that the user will use the same editor again.
3077 msgfn = self.savecommitmessage(cctx._text)
3080 msgfn = self.savecommitmessage(cctx._text)
3078
3081
3079 # commit subs and write new state
3082 # commit subs and write new state
3080 if subs:
3083 if subs:
3081 uipathfn = scmutil.getuipathfn(self)
3084 uipathfn = scmutil.getuipathfn(self)
3082 for s in sorted(commitsubs):
3085 for s in sorted(commitsubs):
3083 sub = wctx.sub(s)
3086 sub = wctx.sub(s)
3084 self.ui.status(
3087 self.ui.status(
3085 _(b'committing subrepository %s\n')
3088 _(b'committing subrepository %s\n')
3086 % uipathfn(subrepoutil.subrelpath(sub))
3089 % uipathfn(subrepoutil.subrelpath(sub))
3087 )
3090 )
3088 sr = sub.commit(cctx._text, user, date)
3091 sr = sub.commit(cctx._text, user, date)
3089 newstate[s] = (newstate[s][0], sr)
3092 newstate[s] = (newstate[s][0], sr)
3090 subrepoutil.writestate(self, newstate)
3093 subrepoutil.writestate(self, newstate)
3091
3094
3092 p1, p2 = self.dirstate.parents()
3095 p1, p2 = self.dirstate.parents()
3093 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3096 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3094 try:
3097 try:
3095 self.hook(
3098 self.hook(
3096 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3099 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3097 )
3100 )
3098 with self.transaction(b'commit'):
3101 with self.transaction(b'commit'):
3099 ret = self.commitctx(cctx, True)
3102 ret = self.commitctx(cctx, True)
3100 # update bookmarks, dirstate and mergestate
3103 # update bookmarks, dirstate and mergestate
3101 bookmarks.update(self, [p1, p2], ret)
3104 bookmarks.update(self, [p1, p2], ret)
3102 cctx.markcommitted(ret)
3105 cctx.markcommitted(ret)
3103 ms.reset()
3106 ms.reset()
3104 except: # re-raises
3107 except: # re-raises
3105 if edited:
3108 if edited:
3106 self.ui.write(
3109 self.ui.write(
3107 _(b'note: commit message saved in %s\n') % msgfn
3110 _(b'note: commit message saved in %s\n') % msgfn
3108 )
3111 )
3109 self.ui.write(
3112 self.ui.write(
3110 _(
3113 _(
3111 b"note: use 'hg commit --logfile "
3114 b"note: use 'hg commit --logfile "
3112 b".hg/last-message.txt --edit' to reuse it\n"
3115 b".hg/last-message.txt --edit' to reuse it\n"
3113 )
3116 )
3114 )
3117 )
3115 raise
3118 raise
3116
3119
3117 def commithook(unused_success):
3120 def commithook(unused_success):
3118 # hack for command that use a temporary commit (eg: histedit)
3121 # hack for command that use a temporary commit (eg: histedit)
3119 # temporary commit got stripped before hook release
3122 # temporary commit got stripped before hook release
3120 if self.changelog.hasnode(ret):
3123 if self.changelog.hasnode(ret):
3121 self.hook(
3124 self.hook(
3122 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3125 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3123 )
3126 )
3124
3127
3125 self._afterlock(commithook)
3128 self._afterlock(commithook)
3126 return ret
3129 return ret
3127
3130
3128 @unfilteredmethod
3131 @unfilteredmethod
3129 def commitctx(self, ctx, error=False, origctx=None):
3132 def commitctx(self, ctx, error=False, origctx=None):
3130 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3133 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3131
3134
3132 @unfilteredmethod
3135 @unfilteredmethod
3133 def destroying(self):
3136 def destroying(self):
3134 """Inform the repository that nodes are about to be destroyed.
3137 """Inform the repository that nodes are about to be destroyed.
3135 Intended for use by strip and rollback, so there's a common
3138 Intended for use by strip and rollback, so there's a common
3136 place for anything that has to be done before destroying history.
3139 place for anything that has to be done before destroying history.
3137
3140
3138 This is mostly useful for saving state that is in memory and waiting
3141 This is mostly useful for saving state that is in memory and waiting
3139 to be flushed when the current lock is released. Because a call to
3142 to be flushed when the current lock is released. Because a call to
3140 destroyed is imminent, the repo will be invalidated causing those
3143 destroyed is imminent, the repo will be invalidated causing those
3141 changes to stay in memory (waiting for the next unlock), or vanish
3144 changes to stay in memory (waiting for the next unlock), or vanish
3142 completely.
3145 completely.
3143 """
3146 """
3144 # When using the same lock to commit and strip, the phasecache is left
3147 # When using the same lock to commit and strip, the phasecache is left
3145 # dirty after committing. Then when we strip, the repo is invalidated,
3148 # dirty after committing. Then when we strip, the repo is invalidated,
3146 # causing those changes to disappear.
3149 # causing those changes to disappear.
3147 if '_phasecache' in vars(self):
3150 if '_phasecache' in vars(self):
3148 self._phasecache.write()
3151 self._phasecache.write()
3149
3152
3150 @unfilteredmethod
3153 @unfilteredmethod
3151 def destroyed(self):
3154 def destroyed(self):
3152 """Inform the repository that nodes have been destroyed.
3155 """Inform the repository that nodes have been destroyed.
3153 Intended for use by strip and rollback, so there's a common
3156 Intended for use by strip and rollback, so there's a common
3154 place for anything that has to be done after destroying history.
3157 place for anything that has to be done after destroying history.
3155 """
3158 """
3156 # When one tries to:
3159 # When one tries to:
3157 # 1) destroy nodes thus calling this method (e.g. strip)
3160 # 1) destroy nodes thus calling this method (e.g. strip)
3158 # 2) use phasecache somewhere (e.g. commit)
3161 # 2) use phasecache somewhere (e.g. commit)
3159 #
3162 #
3160 # then 2) will fail because the phasecache contains nodes that were
3163 # then 2) will fail because the phasecache contains nodes that were
3161 # removed. We can either remove phasecache from the filecache,
3164 # removed. We can either remove phasecache from the filecache,
3162 # causing it to reload next time it is accessed, or simply filter
3165 # causing it to reload next time it is accessed, or simply filter
3163 # the removed nodes now and write the updated cache.
3166 # the removed nodes now and write the updated cache.
3164 self._phasecache.filterunknown(self)
3167 self._phasecache.filterunknown(self)
3165 self._phasecache.write()
3168 self._phasecache.write()
3166
3169
3167 # refresh all repository caches
3170 # refresh all repository caches
3168 self.updatecaches()
3171 self.updatecaches()
3169
3172
3170 # Ensure the persistent tag cache is updated. Doing it now
3173 # Ensure the persistent tag cache is updated. Doing it now
3171 # means that the tag cache only has to worry about destroyed
3174 # means that the tag cache only has to worry about destroyed
3172 # heads immediately after a strip/rollback. That in turn
3175 # heads immediately after a strip/rollback. That in turn
3173 # guarantees that "cachetip == currenttip" (comparing both rev
3176 # guarantees that "cachetip == currenttip" (comparing both rev
3174 # and node) always means no nodes have been added or destroyed.
3177 # and node) always means no nodes have been added or destroyed.
3175
3178
3176 # XXX this is suboptimal when qrefresh'ing: we strip the current
3179 # XXX this is suboptimal when qrefresh'ing: we strip the current
3177 # head, refresh the tag cache, then immediately add a new head.
3180 # head, refresh the tag cache, then immediately add a new head.
3178 # But I think doing it this way is necessary for the "instant
3181 # But I think doing it this way is necessary for the "instant
3179 # tag cache retrieval" case to work.
3182 # tag cache retrieval" case to work.
3180 self.invalidate()
3183 self.invalidate()
3181
3184
3182 def status(
3185 def status(
3183 self,
3186 self,
3184 node1=b'.',
3187 node1=b'.',
3185 node2=None,
3188 node2=None,
3186 match=None,
3189 match=None,
3187 ignored=False,
3190 ignored=False,
3188 clean=False,
3191 clean=False,
3189 unknown=False,
3192 unknown=False,
3190 listsubrepos=False,
3193 listsubrepos=False,
3191 ):
3194 ):
3192 '''a convenience method that calls node1.status(node2)'''
3195 '''a convenience method that calls node1.status(node2)'''
3193 return self[node1].status(
3196 return self[node1].status(
3194 node2, match, ignored, clean, unknown, listsubrepos
3197 node2, match, ignored, clean, unknown, listsubrepos
3195 )
3198 )
3196
3199
3197 def addpostdsstatus(self, ps):
3200 def addpostdsstatus(self, ps):
3198 """Add a callback to run within the wlock, at the point at which status
3201 """Add a callback to run within the wlock, at the point at which status
3199 fixups happen.
3202 fixups happen.
3200
3203
3201 On status completion, callback(wctx, status) will be called with the
3204 On status completion, callback(wctx, status) will be called with the
3202 wlock held, unless the dirstate has changed from underneath or the wlock
3205 wlock held, unless the dirstate has changed from underneath or the wlock
3203 couldn't be grabbed.
3206 couldn't be grabbed.
3204
3207
3205 Callbacks should not capture and use a cached copy of the dirstate --
3208 Callbacks should not capture and use a cached copy of the dirstate --
3206 it might change in the meanwhile. Instead, they should access the
3209 it might change in the meanwhile. Instead, they should access the
3207 dirstate via wctx.repo().dirstate.
3210 dirstate via wctx.repo().dirstate.
3208
3211
3209 This list is emptied out after each status run -- extensions should
3212 This list is emptied out after each status run -- extensions should
3210 make sure it adds to this list each time dirstate.status is called.
3213 make sure it adds to this list each time dirstate.status is called.
3211 Extensions should also make sure they don't call this for statuses
3214 Extensions should also make sure they don't call this for statuses
3212 that don't involve the dirstate.
3215 that don't involve the dirstate.
3213 """
3216 """
3214
3217
3215 # The list is located here for uniqueness reasons -- it is actually
3218 # The list is located here for uniqueness reasons -- it is actually
3216 # managed by the workingctx, but that isn't unique per-repo.
3219 # managed by the workingctx, but that isn't unique per-repo.
3217 self._postdsstatus.append(ps)
3220 self._postdsstatus.append(ps)
3218
3221
3219 def postdsstatus(self):
3222 def postdsstatus(self):
3220 """Used by workingctx to get the list of post-dirstate-status hooks."""
3223 """Used by workingctx to get the list of post-dirstate-status hooks."""
3221 return self._postdsstatus
3224 return self._postdsstatus
3222
3225
3223 def clearpostdsstatus(self):
3226 def clearpostdsstatus(self):
3224 """Used by workingctx to clear post-dirstate-status hooks."""
3227 """Used by workingctx to clear post-dirstate-status hooks."""
3225 del self._postdsstatus[:]
3228 del self._postdsstatus[:]
3226
3229
3227 def heads(self, start=None):
3230 def heads(self, start=None):
3228 if start is None:
3231 if start is None:
3229 cl = self.changelog
3232 cl = self.changelog
3230 headrevs = reversed(cl.headrevs())
3233 headrevs = reversed(cl.headrevs())
3231 return [cl.node(rev) for rev in headrevs]
3234 return [cl.node(rev) for rev in headrevs]
3232
3235
3233 heads = self.changelog.heads(start)
3236 heads = self.changelog.heads(start)
3234 # sort the output in rev descending order
3237 # sort the output in rev descending order
3235 return sorted(heads, key=self.changelog.rev, reverse=True)
3238 return sorted(heads, key=self.changelog.rev, reverse=True)
3236
3239
3237 def branchheads(self, branch=None, start=None, closed=False):
3240 def branchheads(self, branch=None, start=None, closed=False):
3238 """return a (possibly filtered) list of heads for the given branch
3241 """return a (possibly filtered) list of heads for the given branch
3239
3242
3240 Heads are returned in topological order, from newest to oldest.
3243 Heads are returned in topological order, from newest to oldest.
3241 If branch is None, use the dirstate branch.
3244 If branch is None, use the dirstate branch.
3242 If start is not None, return only heads reachable from start.
3245 If start is not None, return only heads reachable from start.
3243 If closed is True, return heads that are marked as closed as well.
3246 If closed is True, return heads that are marked as closed as well.
3244 """
3247 """
3245 if branch is None:
3248 if branch is None:
3246 branch = self[None].branch()
3249 branch = self[None].branch()
3247 branches = self.branchmap()
3250 branches = self.branchmap()
3248 if not branches.hasbranch(branch):
3251 if not branches.hasbranch(branch):
3249 return []
3252 return []
3250 # the cache returns heads ordered lowest to highest
3253 # the cache returns heads ordered lowest to highest
3251 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3254 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3252 if start is not None:
3255 if start is not None:
3253 # filter out the heads that cannot be reached from startrev
3256 # filter out the heads that cannot be reached from startrev
3254 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3257 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3255 bheads = [h for h in bheads if h in fbheads]
3258 bheads = [h for h in bheads if h in fbheads]
3256 return bheads
3259 return bheads
3257
3260
3258 def branches(self, nodes):
3261 def branches(self, nodes):
3259 if not nodes:
3262 if not nodes:
3260 nodes = [self.changelog.tip()]
3263 nodes = [self.changelog.tip()]
3261 b = []
3264 b = []
3262 for n in nodes:
3265 for n in nodes:
3263 t = n
3266 t = n
3264 while True:
3267 while True:
3265 p = self.changelog.parents(n)
3268 p = self.changelog.parents(n)
3266 if p[1] != nullid or p[0] == nullid:
3269 if p[1] != nullid or p[0] == nullid:
3267 b.append((t, n, p[0], p[1]))
3270 b.append((t, n, p[0], p[1]))
3268 break
3271 break
3269 n = p[0]
3272 n = p[0]
3270 return b
3273 return b
3271
3274
3272 def between(self, pairs):
3275 def between(self, pairs):
3273 r = []
3276 r = []
3274
3277
3275 for top, bottom in pairs:
3278 for top, bottom in pairs:
3276 n, l, i = top, [], 0
3279 n, l, i = top, [], 0
3277 f = 1
3280 f = 1
3278
3281
3279 while n != bottom and n != nullid:
3282 while n != bottom and n != nullid:
3280 p = self.changelog.parents(n)[0]
3283 p = self.changelog.parents(n)[0]
3281 if i == f:
3284 if i == f:
3282 l.append(n)
3285 l.append(n)
3283 f = f * 2
3286 f = f * 2
3284 n = p
3287 n = p
3285 i += 1
3288 i += 1
3286
3289
3287 r.append(l)
3290 r.append(l)
3288
3291
3289 return r
3292 return r
3290
3293
3291 def checkpush(self, pushop):
3294 def checkpush(self, pushop):
3292 """Extensions can override this function if additional checks have
3295 """Extensions can override this function if additional checks have
3293 to be performed before pushing, or call it if they override push
3296 to be performed before pushing, or call it if they override push
3294 command.
3297 command.
3295 """
3298 """
3296
3299
3297 @unfilteredpropertycache
3300 @unfilteredpropertycache
3298 def prepushoutgoinghooks(self):
3301 def prepushoutgoinghooks(self):
3299 """Return util.hooks consists of a pushop with repo, remote, outgoing
3302 """Return util.hooks consists of a pushop with repo, remote, outgoing
3300 methods, which are called before pushing changesets.
3303 methods, which are called before pushing changesets.
3301 """
3304 """
3302 return util.hooks()
3305 return util.hooks()
3303
3306
3304 def pushkey(self, namespace, key, old, new):
3307 def pushkey(self, namespace, key, old, new):
3305 try:
3308 try:
3306 tr = self.currenttransaction()
3309 tr = self.currenttransaction()
3307 hookargs = {}
3310 hookargs = {}
3308 if tr is not None:
3311 if tr is not None:
3309 hookargs.update(tr.hookargs)
3312 hookargs.update(tr.hookargs)
3310 hookargs = pycompat.strkwargs(hookargs)
3313 hookargs = pycompat.strkwargs(hookargs)
3311 hookargs['namespace'] = namespace
3314 hookargs['namespace'] = namespace
3312 hookargs['key'] = key
3315 hookargs['key'] = key
3313 hookargs['old'] = old
3316 hookargs['old'] = old
3314 hookargs['new'] = new
3317 hookargs['new'] = new
3315 self.hook(b'prepushkey', throw=True, **hookargs)
3318 self.hook(b'prepushkey', throw=True, **hookargs)
3316 except error.HookAbort as exc:
3319 except error.HookAbort as exc:
3317 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3320 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3318 if exc.hint:
3321 if exc.hint:
3319 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3322 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3320 return False
3323 return False
3321 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3324 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3322 ret = pushkey.push(self, namespace, key, old, new)
3325 ret = pushkey.push(self, namespace, key, old, new)
3323
3326
3324 def runhook(unused_success):
3327 def runhook(unused_success):
3325 self.hook(
3328 self.hook(
3326 b'pushkey',
3329 b'pushkey',
3327 namespace=namespace,
3330 namespace=namespace,
3328 key=key,
3331 key=key,
3329 old=old,
3332 old=old,
3330 new=new,
3333 new=new,
3331 ret=ret,
3334 ret=ret,
3332 )
3335 )
3333
3336
3334 self._afterlock(runhook)
3337 self._afterlock(runhook)
3335 return ret
3338 return ret
3336
3339
3337 def listkeys(self, namespace):
3340 def listkeys(self, namespace):
3338 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3341 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3339 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3342 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3340 values = pushkey.list(self, namespace)
3343 values = pushkey.list(self, namespace)
3341 self.hook(b'listkeys', namespace=namespace, values=values)
3344 self.hook(b'listkeys', namespace=namespace, values=values)
3342 return values
3345 return values
3343
3346
3344 def debugwireargs(self, one, two, three=None, four=None, five=None):
3347 def debugwireargs(self, one, two, three=None, four=None, five=None):
3345 '''used to test argument passing over the wire'''
3348 '''used to test argument passing over the wire'''
3346 return b"%s %s %s %s %s" % (
3349 return b"%s %s %s %s %s" % (
3347 one,
3350 one,
3348 two,
3351 two,
3349 pycompat.bytestr(three),
3352 pycompat.bytestr(three),
3350 pycompat.bytestr(four),
3353 pycompat.bytestr(four),
3351 pycompat.bytestr(five),
3354 pycompat.bytestr(five),
3352 )
3355 )
3353
3356
3354 def savecommitmessage(self, text):
3357 def savecommitmessage(self, text):
3355 fp = self.vfs(b'last-message.txt', b'wb')
3358 fp = self.vfs(b'last-message.txt', b'wb')
3356 try:
3359 try:
3357 fp.write(text)
3360 fp.write(text)
3358 finally:
3361 finally:
3359 fp.close()
3362 fp.close()
3360 return self.pathto(fp.name[len(self.root) + 1 :])
3363 return self.pathto(fp.name[len(self.root) + 1 :])
3361
3364
3362 def register_wanted_sidedata(self, category):
3365 def register_wanted_sidedata(self, category):
3363 self._wanted_sidedata.add(pycompat.bytestr(category))
3366 self._wanted_sidedata.add(pycompat.bytestr(category))
3364
3367
3365 def register_sidedata_computer(self, kind, category, keys, computer):
3368 def register_sidedata_computer(self, kind, category, keys, computer):
3366 if kind not in (b"changelog", b"manifest", b"filelog"):
3369 if kind not in (b"changelog", b"manifest", b"filelog"):
3367 msg = _(b"unexpected revlog kind '%s'.")
3370 msg = _(b"unexpected revlog kind '%s'.")
3368 raise error.ProgrammingError(msg % kind)
3371 raise error.ProgrammingError(msg % kind)
3369 category = pycompat.bytestr(category)
3372 category = pycompat.bytestr(category)
3370 if category in self._sidedata_computers.get(kind, []):
3373 if category in self._sidedata_computers.get(kind, []):
3371 msg = _(
3374 msg = _(
3372 b"cannot register a sidedata computer twice for category '%s'."
3375 b"cannot register a sidedata computer twice for category '%s'."
3373 )
3376 )
3374 raise error.ProgrammingError(msg % category)
3377 raise error.ProgrammingError(msg % category)
3375 self._sidedata_computers.setdefault(kind, {})
3378 self._sidedata_computers.setdefault(kind, {})
3376 self._sidedata_computers[kind][category] = (keys, computer)
3379 self._sidedata_computers[kind][category] = (keys, computer)
3377
3380
3378
3381
3379 # used to avoid circular references so destructors work
3382 # used to avoid circular references so destructors work
3380 def aftertrans(files):
3383 def aftertrans(files):
3381 renamefiles = [tuple(t) for t in files]
3384 renamefiles = [tuple(t) for t in files]
3382
3385
3383 def a():
3386 def a():
3384 for vfs, src, dest in renamefiles:
3387 for vfs, src, dest in renamefiles:
3385 # if src and dest refer to a same file, vfs.rename is a no-op,
3388 # if src and dest refer to a same file, vfs.rename is a no-op,
3386 # leaving both src and dest on disk. delete dest to make sure
3389 # leaving both src and dest on disk. delete dest to make sure
3387 # the rename couldn't be such a no-op.
3390 # the rename couldn't be such a no-op.
3388 vfs.tryunlink(dest)
3391 vfs.tryunlink(dest)
3389 try:
3392 try:
3390 vfs.rename(src, dest)
3393 vfs.rename(src, dest)
3391 except OSError: # journal file does not yet exist
3394 except OSError: # journal file does not yet exist
3392 pass
3395 pass
3393
3396
3394 return a
3397 return a
3395
3398
3396
3399
3397 def undoname(fn):
3400 def undoname(fn):
3398 base, name = os.path.split(fn)
3401 base, name = os.path.split(fn)
3399 assert name.startswith(b'journal')
3402 assert name.startswith(b'journal')
3400 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3403 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3401
3404
3402
3405
3403 def instance(ui, path, create, intents=None, createopts=None):
3406 def instance(ui, path, create, intents=None, createopts=None):
3404 localpath = util.urllocalpath(path)
3407 localpath = util.urllocalpath(path)
3405 if create:
3408 if create:
3406 createrepository(ui, localpath, createopts=createopts)
3409 createrepository(ui, localpath, createopts=createopts)
3407
3410
3408 return makelocalrepository(ui, localpath, intents=intents)
3411 return makelocalrepository(ui, localpath, intents=intents)
3409
3412
3410
3413
3411 def islocal(path):
3414 def islocal(path):
3412 return True
3415 return True
3413
3416
3414
3417
3415 def defaultcreateopts(ui, createopts=None):
3418 def defaultcreateopts(ui, createopts=None):
3416 """Populate the default creation options for a repository.
3419 """Populate the default creation options for a repository.
3417
3420
3418 A dictionary of explicitly requested creation options can be passed
3421 A dictionary of explicitly requested creation options can be passed
3419 in. Missing keys will be populated.
3422 in. Missing keys will be populated.
3420 """
3423 """
3421 createopts = dict(createopts or {})
3424 createopts = dict(createopts or {})
3422
3425
3423 if b'backend' not in createopts:
3426 if b'backend' not in createopts:
3424 # experimental config: storage.new-repo-backend
3427 # experimental config: storage.new-repo-backend
3425 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3428 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3426
3429
3427 return createopts
3430 return createopts
3428
3431
3429
3432
3430 def newreporequirements(ui, createopts):
3433 def newreporequirements(ui, createopts):
3431 """Determine the set of requirements for a new local repository.
3434 """Determine the set of requirements for a new local repository.
3432
3435
3433 Extensions can wrap this function to specify custom requirements for
3436 Extensions can wrap this function to specify custom requirements for
3434 new repositories.
3437 new repositories.
3435 """
3438 """
3436 # If the repo is being created from a shared repository, we copy
3439 # If the repo is being created from a shared repository, we copy
3437 # its requirements.
3440 # its requirements.
3438 if b'sharedrepo' in createopts:
3441 if b'sharedrepo' in createopts:
3439 requirements = set(createopts[b'sharedrepo'].requirements)
3442 requirements = set(createopts[b'sharedrepo'].requirements)
3440 if createopts.get(b'sharedrelative'):
3443 if createopts.get(b'sharedrelative'):
3441 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3444 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3442 else:
3445 else:
3443 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3446 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3444
3447
3445 return requirements
3448 return requirements
3446
3449
3447 if b'backend' not in createopts:
3450 if b'backend' not in createopts:
3448 raise error.ProgrammingError(
3451 raise error.ProgrammingError(
3449 b'backend key not present in createopts; '
3452 b'backend key not present in createopts; '
3450 b'was defaultcreateopts() called?'
3453 b'was defaultcreateopts() called?'
3451 )
3454 )
3452
3455
3453 if createopts[b'backend'] != b'revlogv1':
3456 if createopts[b'backend'] != b'revlogv1':
3454 raise error.Abort(
3457 raise error.Abort(
3455 _(
3458 _(
3456 b'unable to determine repository requirements for '
3459 b'unable to determine repository requirements for '
3457 b'storage backend: %s'
3460 b'storage backend: %s'
3458 )
3461 )
3459 % createopts[b'backend']
3462 % createopts[b'backend']
3460 )
3463 )
3461
3464
3462 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3465 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3463 if ui.configbool(b'format', b'usestore'):
3466 if ui.configbool(b'format', b'usestore'):
3464 requirements.add(requirementsmod.STORE_REQUIREMENT)
3467 requirements.add(requirementsmod.STORE_REQUIREMENT)
3465 if ui.configbool(b'format', b'usefncache'):
3468 if ui.configbool(b'format', b'usefncache'):
3466 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3469 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3467 if ui.configbool(b'format', b'dotencode'):
3470 if ui.configbool(b'format', b'dotencode'):
3468 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3471 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3469
3472
3470 compengines = ui.configlist(b'format', b'revlog-compression')
3473 compengines = ui.configlist(b'format', b'revlog-compression')
3471 for compengine in compengines:
3474 for compengine in compengines:
3472 if compengine in util.compengines:
3475 if compengine in util.compengines:
3473 engine = util.compengines[compengine]
3476 engine = util.compengines[compengine]
3474 if engine.available() and engine.revlogheader():
3477 if engine.available() and engine.revlogheader():
3475 break
3478 break
3476 else:
3479 else:
3477 raise error.Abort(
3480 raise error.Abort(
3478 _(
3481 _(
3479 b'compression engines %s defined by '
3482 b'compression engines %s defined by '
3480 b'format.revlog-compression not available'
3483 b'format.revlog-compression not available'
3481 )
3484 )
3482 % b', '.join(b'"%s"' % e for e in compengines),
3485 % b', '.join(b'"%s"' % e for e in compengines),
3483 hint=_(
3486 hint=_(
3484 b'run "hg debuginstall" to list available '
3487 b'run "hg debuginstall" to list available '
3485 b'compression engines'
3488 b'compression engines'
3486 ),
3489 ),
3487 )
3490 )
3488
3491
3489 # zlib is the historical default and doesn't need an explicit requirement.
3492 # zlib is the historical default and doesn't need an explicit requirement.
3490 if compengine == b'zstd':
3493 if compengine == b'zstd':
3491 requirements.add(b'revlog-compression-zstd')
3494 requirements.add(b'revlog-compression-zstd')
3492 elif compengine != b'zlib':
3495 elif compengine != b'zlib':
3493 requirements.add(b'exp-compression-%s' % compengine)
3496 requirements.add(b'exp-compression-%s' % compengine)
3494
3497
3495 if scmutil.gdinitconfig(ui):
3498 if scmutil.gdinitconfig(ui):
3496 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3499 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3497 if ui.configbool(b'format', b'sparse-revlog'):
3500 if ui.configbool(b'format', b'sparse-revlog'):
3498 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3501 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3499
3502
3500 # experimental config: format.exp-use-side-data
3503 # experimental config: format.exp-use-side-data
3501 if ui.configbool(b'format', b'exp-use-side-data'):
3504 if ui.configbool(b'format', b'exp-use-side-data'):
3502 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3505 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3503 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3506 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3504 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3507 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3505 # experimental config: format.exp-use-copies-side-data-changeset
3508 # experimental config: format.exp-use-copies-side-data-changeset
3506 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3509 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3507 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3510 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3508 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3511 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3509 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3512 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3510 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3513 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3511 if ui.configbool(b'experimental', b'treemanifest'):
3514 if ui.configbool(b'experimental', b'treemanifest'):
3512 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3515 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3513
3516
3514 revlogv2 = ui.config(b'experimental', b'revlogv2')
3517 revlogv2 = ui.config(b'experimental', b'revlogv2')
3515 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3518 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3516 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3519 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3517 # generaldelta is implied by revlogv2.
3520 # generaldelta is implied by revlogv2.
3518 requirements.discard(requirementsmod.GENERALDELTA_REQUIREMENT)
3521 requirements.discard(requirementsmod.GENERALDELTA_REQUIREMENT)
3519 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3522 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3520 # experimental config: format.internal-phase
3523 # experimental config: format.internal-phase
3521 if ui.configbool(b'format', b'internal-phase'):
3524 if ui.configbool(b'format', b'internal-phase'):
3522 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3525 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3523
3526
3524 if createopts.get(b'narrowfiles'):
3527 if createopts.get(b'narrowfiles'):
3525 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3528 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3526
3529
3527 if createopts.get(b'lfs'):
3530 if createopts.get(b'lfs'):
3528 requirements.add(b'lfs')
3531 requirements.add(b'lfs')
3529
3532
3530 if ui.configbool(b'format', b'bookmarks-in-store'):
3533 if ui.configbool(b'format', b'bookmarks-in-store'):
3531 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3534 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3532
3535
3533 if ui.configbool(b'format', b'use-persistent-nodemap'):
3536 if ui.configbool(b'format', b'use-persistent-nodemap'):
3534 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3537 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3535
3538
3536 # if share-safe is enabled, let's create the new repository with the new
3539 # if share-safe is enabled, let's create the new repository with the new
3537 # requirement
3540 # requirement
3538 if ui.configbool(b'format', b'use-share-safe'):
3541 if ui.configbool(b'format', b'use-share-safe'):
3539 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3542 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3540
3543
3541 return requirements
3544 return requirements
3542
3545
3543
3546
3544 def checkrequirementscompat(ui, requirements):
3547 def checkrequirementscompat(ui, requirements):
3545 """Checks compatibility of repository requirements enabled and disabled.
3548 """Checks compatibility of repository requirements enabled and disabled.
3546
3549
3547 Returns a set of requirements which needs to be dropped because dependend
3550 Returns a set of requirements which needs to be dropped because dependend
3548 requirements are not enabled. Also warns users about it"""
3551 requirements are not enabled. Also warns users about it"""
3549
3552
3550 dropped = set()
3553 dropped = set()
3551
3554
3552 if requirementsmod.STORE_REQUIREMENT not in requirements:
3555 if requirementsmod.STORE_REQUIREMENT not in requirements:
3553 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3556 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3554 ui.warn(
3557 ui.warn(
3555 _(
3558 _(
3556 b'ignoring enabled \'format.bookmarks-in-store\' config '
3559 b'ignoring enabled \'format.bookmarks-in-store\' config '
3557 b'beacuse it is incompatible with disabled '
3560 b'beacuse it is incompatible with disabled '
3558 b'\'format.usestore\' config\n'
3561 b'\'format.usestore\' config\n'
3559 )
3562 )
3560 )
3563 )
3561 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3564 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3562
3565
3563 if (
3566 if (
3564 requirementsmod.SHARED_REQUIREMENT in requirements
3567 requirementsmod.SHARED_REQUIREMENT in requirements
3565 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3568 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3566 ):
3569 ):
3567 raise error.Abort(
3570 raise error.Abort(
3568 _(
3571 _(
3569 b"cannot create shared repository as source was created"
3572 b"cannot create shared repository as source was created"
3570 b" with 'format.usestore' config disabled"
3573 b" with 'format.usestore' config disabled"
3571 )
3574 )
3572 )
3575 )
3573
3576
3574 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3577 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3575 ui.warn(
3578 ui.warn(
3576 _(
3579 _(
3577 b"ignoring enabled 'format.use-share-safe' config because "
3580 b"ignoring enabled 'format.use-share-safe' config because "
3578 b"it is incompatible with disabled 'format.usestore'"
3581 b"it is incompatible with disabled 'format.usestore'"
3579 b" config\n"
3582 b" config\n"
3580 )
3583 )
3581 )
3584 )
3582 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3585 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3583
3586
3584 return dropped
3587 return dropped
3585
3588
3586
3589
3587 def filterknowncreateopts(ui, createopts):
3590 def filterknowncreateopts(ui, createopts):
3588 """Filters a dict of repo creation options against options that are known.
3591 """Filters a dict of repo creation options against options that are known.
3589
3592
3590 Receives a dict of repo creation options and returns a dict of those
3593 Receives a dict of repo creation options and returns a dict of those
3591 options that we don't know how to handle.
3594 options that we don't know how to handle.
3592
3595
3593 This function is called as part of repository creation. If the
3596 This function is called as part of repository creation. If the
3594 returned dict contains any items, repository creation will not
3597 returned dict contains any items, repository creation will not
3595 be allowed, as it means there was a request to create a repository
3598 be allowed, as it means there was a request to create a repository
3596 with options not recognized by loaded code.
3599 with options not recognized by loaded code.
3597
3600
3598 Extensions can wrap this function to filter out creation options
3601 Extensions can wrap this function to filter out creation options
3599 they know how to handle.
3602 they know how to handle.
3600 """
3603 """
3601 known = {
3604 known = {
3602 b'backend',
3605 b'backend',
3603 b'lfs',
3606 b'lfs',
3604 b'narrowfiles',
3607 b'narrowfiles',
3605 b'sharedrepo',
3608 b'sharedrepo',
3606 b'sharedrelative',
3609 b'sharedrelative',
3607 b'shareditems',
3610 b'shareditems',
3608 b'shallowfilestore',
3611 b'shallowfilestore',
3609 }
3612 }
3610
3613
3611 return {k: v for k, v in createopts.items() if k not in known}
3614 return {k: v for k, v in createopts.items() if k not in known}
3612
3615
3613
3616
3614 def createrepository(ui, path, createopts=None):
3617 def createrepository(ui, path, createopts=None):
3615 """Create a new repository in a vfs.
3618 """Create a new repository in a vfs.
3616
3619
3617 ``path`` path to the new repo's working directory.
3620 ``path`` path to the new repo's working directory.
3618 ``createopts`` options for the new repository.
3621 ``createopts`` options for the new repository.
3619
3622
3620 The following keys for ``createopts`` are recognized:
3623 The following keys for ``createopts`` are recognized:
3621
3624
3622 backend
3625 backend
3623 The storage backend to use.
3626 The storage backend to use.
3624 lfs
3627 lfs
3625 Repository will be created with ``lfs`` requirement. The lfs extension
3628 Repository will be created with ``lfs`` requirement. The lfs extension
3626 will automatically be loaded when the repository is accessed.
3629 will automatically be loaded when the repository is accessed.
3627 narrowfiles
3630 narrowfiles
3628 Set up repository to support narrow file storage.
3631 Set up repository to support narrow file storage.
3629 sharedrepo
3632 sharedrepo
3630 Repository object from which storage should be shared.
3633 Repository object from which storage should be shared.
3631 sharedrelative
3634 sharedrelative
3632 Boolean indicating if the path to the shared repo should be
3635 Boolean indicating if the path to the shared repo should be
3633 stored as relative. By default, the pointer to the "parent" repo
3636 stored as relative. By default, the pointer to the "parent" repo
3634 is stored as an absolute path.
3637 is stored as an absolute path.
3635 shareditems
3638 shareditems
3636 Set of items to share to the new repository (in addition to storage).
3639 Set of items to share to the new repository (in addition to storage).
3637 shallowfilestore
3640 shallowfilestore
3638 Indicates that storage for files should be shallow (not all ancestor
3641 Indicates that storage for files should be shallow (not all ancestor
3639 revisions are known).
3642 revisions are known).
3640 """
3643 """
3641 createopts = defaultcreateopts(ui, createopts=createopts)
3644 createopts = defaultcreateopts(ui, createopts=createopts)
3642
3645
3643 unknownopts = filterknowncreateopts(ui, createopts)
3646 unknownopts = filterknowncreateopts(ui, createopts)
3644
3647
3645 if not isinstance(unknownopts, dict):
3648 if not isinstance(unknownopts, dict):
3646 raise error.ProgrammingError(
3649 raise error.ProgrammingError(
3647 b'filterknowncreateopts() did not return a dict'
3650 b'filterknowncreateopts() did not return a dict'
3648 )
3651 )
3649
3652
3650 if unknownopts:
3653 if unknownopts:
3651 raise error.Abort(
3654 raise error.Abort(
3652 _(
3655 _(
3653 b'unable to create repository because of unknown '
3656 b'unable to create repository because of unknown '
3654 b'creation option: %s'
3657 b'creation option: %s'
3655 )
3658 )
3656 % b', '.join(sorted(unknownopts)),
3659 % b', '.join(sorted(unknownopts)),
3657 hint=_(b'is a required extension not loaded?'),
3660 hint=_(b'is a required extension not loaded?'),
3658 )
3661 )
3659
3662
3660 requirements = newreporequirements(ui, createopts=createopts)
3663 requirements = newreporequirements(ui, createopts=createopts)
3661 requirements -= checkrequirementscompat(ui, requirements)
3664 requirements -= checkrequirementscompat(ui, requirements)
3662
3665
3663 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3666 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3664
3667
3665 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3668 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3666 if hgvfs.exists():
3669 if hgvfs.exists():
3667 raise error.RepoError(_(b'repository %s already exists') % path)
3670 raise error.RepoError(_(b'repository %s already exists') % path)
3668
3671
3669 if b'sharedrepo' in createopts:
3672 if b'sharedrepo' in createopts:
3670 sharedpath = createopts[b'sharedrepo'].sharedpath
3673 sharedpath = createopts[b'sharedrepo'].sharedpath
3671
3674
3672 if createopts.get(b'sharedrelative'):
3675 if createopts.get(b'sharedrelative'):
3673 try:
3676 try:
3674 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3677 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3675 sharedpath = util.pconvert(sharedpath)
3678 sharedpath = util.pconvert(sharedpath)
3676 except (IOError, ValueError) as e:
3679 except (IOError, ValueError) as e:
3677 # ValueError is raised on Windows if the drive letters differ
3680 # ValueError is raised on Windows if the drive letters differ
3678 # on each path.
3681 # on each path.
3679 raise error.Abort(
3682 raise error.Abort(
3680 _(b'cannot calculate relative path'),
3683 _(b'cannot calculate relative path'),
3681 hint=stringutil.forcebytestr(e),
3684 hint=stringutil.forcebytestr(e),
3682 )
3685 )
3683
3686
3684 if not wdirvfs.exists():
3687 if not wdirvfs.exists():
3685 wdirvfs.makedirs()
3688 wdirvfs.makedirs()
3686
3689
3687 hgvfs.makedir(notindexed=True)
3690 hgvfs.makedir(notindexed=True)
3688 if b'sharedrepo' not in createopts:
3691 if b'sharedrepo' not in createopts:
3689 hgvfs.mkdir(b'cache')
3692 hgvfs.mkdir(b'cache')
3690 hgvfs.mkdir(b'wcache')
3693 hgvfs.mkdir(b'wcache')
3691
3694
3692 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3695 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3693 if has_store and b'sharedrepo' not in createopts:
3696 if has_store and b'sharedrepo' not in createopts:
3694 hgvfs.mkdir(b'store')
3697 hgvfs.mkdir(b'store')
3695
3698
3696 # We create an invalid changelog outside the store so very old
3699 # We create an invalid changelog outside the store so very old
3697 # Mercurial versions (which didn't know about the requirements
3700 # Mercurial versions (which didn't know about the requirements
3698 # file) encounter an error on reading the changelog. This
3701 # file) encounter an error on reading the changelog. This
3699 # effectively locks out old clients and prevents them from
3702 # effectively locks out old clients and prevents them from
3700 # mucking with a repo in an unknown format.
3703 # mucking with a repo in an unknown format.
3701 #
3704 #
3702 # The revlog header has version 65535, which won't be recognized by
3705 # The revlog header has version 65535, which won't be recognized by
3703 # such old clients.
3706 # such old clients.
3704 hgvfs.append(
3707 hgvfs.append(
3705 b'00changelog.i',
3708 b'00changelog.i',
3706 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3709 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3707 b'layout',
3710 b'layout',
3708 )
3711 )
3709
3712
3710 # Filter the requirements into working copy and store ones
3713 # Filter the requirements into working copy and store ones
3711 wcreq, storereq = scmutil.filterrequirements(requirements)
3714 wcreq, storereq = scmutil.filterrequirements(requirements)
3712 # write working copy ones
3715 # write working copy ones
3713 scmutil.writerequires(hgvfs, wcreq)
3716 scmutil.writerequires(hgvfs, wcreq)
3714 # If there are store requirements and the current repository
3717 # If there are store requirements and the current repository
3715 # is not a shared one, write stored requirements
3718 # is not a shared one, write stored requirements
3716 # For new shared repository, we don't need to write the store
3719 # For new shared repository, we don't need to write the store
3717 # requirements as they are already present in store requires
3720 # requirements as they are already present in store requires
3718 if storereq and b'sharedrepo' not in createopts:
3721 if storereq and b'sharedrepo' not in createopts:
3719 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3722 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3720 scmutil.writerequires(storevfs, storereq)
3723 scmutil.writerequires(storevfs, storereq)
3721
3724
3722 # Write out file telling readers where to find the shared store.
3725 # Write out file telling readers where to find the shared store.
3723 if b'sharedrepo' in createopts:
3726 if b'sharedrepo' in createopts:
3724 hgvfs.write(b'sharedpath', sharedpath)
3727 hgvfs.write(b'sharedpath', sharedpath)
3725
3728
3726 if createopts.get(b'shareditems'):
3729 if createopts.get(b'shareditems'):
3727 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3730 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3728 hgvfs.write(b'shared', shared)
3731 hgvfs.write(b'shared', shared)
3729
3732
3730
3733
3731 def poisonrepository(repo):
3734 def poisonrepository(repo):
3732 """Poison a repository instance so it can no longer be used."""
3735 """Poison a repository instance so it can no longer be used."""
3733 # Perform any cleanup on the instance.
3736 # Perform any cleanup on the instance.
3734 repo.close()
3737 repo.close()
3735
3738
3736 # Our strategy is to replace the type of the object with one that
3739 # Our strategy is to replace the type of the object with one that
3737 # has all attribute lookups result in error.
3740 # has all attribute lookups result in error.
3738 #
3741 #
3739 # But we have to allow the close() method because some constructors
3742 # But we have to allow the close() method because some constructors
3740 # of repos call close() on repo references.
3743 # of repos call close() on repo references.
3741 class poisonedrepository(object):
3744 class poisonedrepository(object):
3742 def __getattribute__(self, item):
3745 def __getattribute__(self, item):
3743 if item == 'close':
3746 if item == 'close':
3744 return object.__getattribute__(self, item)
3747 return object.__getattribute__(self, item)
3745
3748
3746 raise error.ProgrammingError(
3749 raise error.ProgrammingError(
3747 b'repo instances should not be used after unshare'
3750 b'repo instances should not be used after unshare'
3748 )
3751 )
3749
3752
3750 def close(self):
3753 def close(self):
3751 pass
3754 pass
3752
3755
3753 # We may have a repoview, which intercepts __setattr__. So be sure
3756 # We may have a repoview, which intercepts __setattr__. So be sure
3754 # we operate at the lowest level possible.
3757 # we operate at the lowest level possible.
3755 object.__setattr__(repo, '__class__', poisonedrepository)
3758 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,295 +1,297 b''
1 $ . "$TESTDIR/narrow-library.sh"
1 $ . "$TESTDIR/narrow-library.sh"
2
2
3 $ hg init master
3 $ hg init master
4 $ cd master
4 $ cd master
5 $ cat >> .hg/hgrc <<EOF
5 $ cat >> .hg/hgrc <<EOF
6 > [narrow]
6 > [narrow]
7 > serveellipses=True
7 > serveellipses=True
8 > EOF
8 > EOF
9 $ mkdir dir
9 $ mkdir dir
10 $ mkdir dir/src
10 $ mkdir dir/src
11 $ cd dir/src
11 $ cd dir/src
12 $ for x in `$TESTDIR/seq.py 20`; do echo $x > "f$x"; hg add "f$x"; hg commit -m "Commit src $x"; done
12 $ for x in `$TESTDIR/seq.py 20`; do echo $x > "f$x"; hg add "f$x"; hg commit -m "Commit src $x"; done
13 $ cd ..
13 $ cd ..
14 $ mkdir tests
14 $ mkdir tests
15 $ cd tests
15 $ cd tests
16 $ for x in `$TESTDIR/seq.py 20`; do echo $x > "t$x"; hg add "t$x"; hg commit -m "Commit test $x"; done
16 $ for x in `$TESTDIR/seq.py 20`; do echo $x > "t$x"; hg add "t$x"; hg commit -m "Commit test $x"; done
17 $ cd ../../..
17 $ cd ../../..
18
18
19 Only path: and rootfilesin: pattern prefixes are allowed
19 Only path: and rootfilesin: pattern prefixes are allowed
20
20
21 $ hg clone --narrow ssh://user@dummy/master badnarrow --noupdate --include 'glob:**'
21 $ hg clone --narrow ssh://user@dummy/master badnarrow --noupdate --include 'glob:**'
22 abort: invalid prefix on narrow pattern: glob:**
22 abort: invalid prefix on narrow pattern: glob:**
23 (narrow patterns must begin with one of the following: path:, rootfilesin:)
23 (narrow patterns must begin with one of the following: path:, rootfilesin:)
24 [255]
24 [255]
25
25
26 $ hg clone --narrow ssh://user@dummy/master badnarrow --noupdate --exclude 'set:ignored'
26 $ hg clone --narrow ssh://user@dummy/master badnarrow --noupdate --exclude 'set:ignored'
27 abort: invalid prefix on narrow pattern: set:ignored
27 abort: invalid prefix on narrow pattern: set:ignored
28 (narrow patterns must begin with one of the following: path:, rootfilesin:)
28 (narrow patterns must begin with one of the following: path:, rootfilesin:)
29 [255]
29 [255]
30
30
31 narrow clone a file, f10
31 narrow clone a file, f10
32
32
33 $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/f10"
33 $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/f10"
34 requesting all changes
34 requesting all changes
35 adding changesets
35 adding changesets
36 adding manifests
36 adding manifests
37 adding file changes
37 adding file changes
38 added 3 changesets with 1 changes to 1 files
38 added 3 changesets with 1 changes to 1 files
39 new changesets *:* (glob)
39 new changesets *:* (glob)
40 $ cd narrow
40 $ cd narrow
41 $ cat .hg/requires | grep -v generaldelta
41 $ cat .hg/requires | grep -v generaldelta
42 dotencode
42 dotencode
43 fncache
43 fncache
44 narrowhg-experimental
44 narrowhg-experimental
45 persistent-nodemap (rust !)
45 persistent-nodemap (rust !)
46 revlog-compression-zstd (zstd !)
46 revlog-compression-zstd (zstd !)
47 revlogv1
47 revlogv1
48 sparserevlog
48 sparserevlog
49 store
49 store
50 testonly-simplestore (reposimplestore !)
50 testonly-simplestore (reposimplestore !)
51
51
52 $ hg tracked
52 $ hg tracked
53 I path:dir/src/f10
53 I path:dir/src/f10
54 $ hg tracked
54 $ hg tracked
55 I path:dir/src/f10
55 I path:dir/src/f10
56 $ hg update
56 $ hg update
57 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
57 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
58 $ find * | sort
58 $ find * | sort
59 dir
59 dir
60 dir/src
60 dir/src
61 dir/src/f10
61 dir/src/f10
62 $ cat dir/src/f10
62 $ cat dir/src/f10
63 10
63 10
64
64
65 $ cd ..
65 $ cd ..
66
66
67 BUG: local-to-local narrow clones should work, but don't.
67 local-to-local narrow clones work
68
68
69 $ hg clone --narrow master narrow-via-localpeer --noupdate --include "dir/src/f10"
69 $ hg clone --narrow master narrow-via-localpeer --noupdate --include "dir/src/f10"
70 requesting all changes
70 requesting all changes
71 abort: server does not support narrow clones
71 adding changesets
72 [255]
72 adding manifests
73 adding file changes
74 added 3 changesets with 1 changes to 1 files
75 new changesets 5d21aaea77f8:26ce255d5b5d
73 $ hg tracked -R narrow-via-localpeer
76 $ hg tracked -R narrow-via-localpeer
74 abort: repository narrow-via-localpeer not found
77 I path:dir/src/f10
75 [255]
76 $ rm -Rf narrow-via-localpeer
78 $ rm -Rf narrow-via-localpeer
77
79
78 narrow clone with a newline should fail
80 narrow clone with a newline should fail
79
81
80 $ hg clone --narrow ssh://user@dummy/master narrow_fail --noupdate --include 'dir/src/f10
82 $ hg clone --narrow ssh://user@dummy/master narrow_fail --noupdate --include 'dir/src/f10
81 > '
83 > '
82 abort: newlines are not allowed in narrowspec paths
84 abort: newlines are not allowed in narrowspec paths
83 [255]
85 [255]
84
86
85 narrow clone a directory, tests/, except tests/t19
87 narrow clone a directory, tests/, except tests/t19
86
88
87 $ hg clone --narrow ssh://user@dummy/master narrowdir --noupdate --include "dir/tests/" --exclude "dir/tests/t19"
89 $ hg clone --narrow ssh://user@dummy/master narrowdir --noupdate --include "dir/tests/" --exclude "dir/tests/t19"
88 requesting all changes
90 requesting all changes
89 adding changesets
91 adding changesets
90 adding manifests
92 adding manifests
91 adding file changes
93 adding file changes
92 added 21 changesets with 19 changes to 19 files
94 added 21 changesets with 19 changes to 19 files
93 new changesets *:* (glob)
95 new changesets *:* (glob)
94 $ cd narrowdir
96 $ cd narrowdir
95 $ hg tracked
97 $ hg tracked
96 I path:dir/tests
98 I path:dir/tests
97 X path:dir/tests/t19
99 X path:dir/tests/t19
98 $ hg tracked
100 $ hg tracked
99 I path:dir/tests
101 I path:dir/tests
100 X path:dir/tests/t19
102 X path:dir/tests/t19
101 $ hg update
103 $ hg update
102 19 files updated, 0 files merged, 0 files removed, 0 files unresolved
104 19 files updated, 0 files merged, 0 files removed, 0 files unresolved
103 $ find * | sort
105 $ find * | sort
104 dir
106 dir
105 dir/tests
107 dir/tests
106 dir/tests/t1
108 dir/tests/t1
107 dir/tests/t10
109 dir/tests/t10
108 dir/tests/t11
110 dir/tests/t11
109 dir/tests/t12
111 dir/tests/t12
110 dir/tests/t13
112 dir/tests/t13
111 dir/tests/t14
113 dir/tests/t14
112 dir/tests/t15
114 dir/tests/t15
113 dir/tests/t16
115 dir/tests/t16
114 dir/tests/t17
116 dir/tests/t17
115 dir/tests/t18
117 dir/tests/t18
116 dir/tests/t2
118 dir/tests/t2
117 dir/tests/t20
119 dir/tests/t20
118 dir/tests/t3
120 dir/tests/t3
119 dir/tests/t4
121 dir/tests/t4
120 dir/tests/t5
122 dir/tests/t5
121 dir/tests/t6
123 dir/tests/t6
122 dir/tests/t7
124 dir/tests/t7
123 dir/tests/t8
125 dir/tests/t8
124 dir/tests/t9
126 dir/tests/t9
125
127
126 $ cd ..
128 $ cd ..
127
129
128 narrow clone everything but a directory (tests/)
130 narrow clone everything but a directory (tests/)
129
131
130 $ hg clone --narrow ssh://user@dummy/master narrowroot --noupdate --exclude "dir/tests"
132 $ hg clone --narrow ssh://user@dummy/master narrowroot --noupdate --exclude "dir/tests"
131 requesting all changes
133 requesting all changes
132 adding changesets
134 adding changesets
133 adding manifests
135 adding manifests
134 adding file changes
136 adding file changes
135 added 21 changesets with 20 changes to 20 files
137 added 21 changesets with 20 changes to 20 files
136 new changesets *:* (glob)
138 new changesets *:* (glob)
137 $ cd narrowroot
139 $ cd narrowroot
138 $ hg tracked
140 $ hg tracked
139 I path:.
141 I path:.
140 X path:dir/tests
142 X path:dir/tests
141 $ hg tracked
143 $ hg tracked
142 I path:.
144 I path:.
143 X path:dir/tests
145 X path:dir/tests
144 $ hg update
146 $ hg update
145 20 files updated, 0 files merged, 0 files removed, 0 files unresolved
147 20 files updated, 0 files merged, 0 files removed, 0 files unresolved
146 $ find * | sort
148 $ find * | sort
147 dir
149 dir
148 dir/src
150 dir/src
149 dir/src/f1
151 dir/src/f1
150 dir/src/f10
152 dir/src/f10
151 dir/src/f11
153 dir/src/f11
152 dir/src/f12
154 dir/src/f12
153 dir/src/f13
155 dir/src/f13
154 dir/src/f14
156 dir/src/f14
155 dir/src/f15
157 dir/src/f15
156 dir/src/f16
158 dir/src/f16
157 dir/src/f17
159 dir/src/f17
158 dir/src/f18
160 dir/src/f18
159 dir/src/f19
161 dir/src/f19
160 dir/src/f2
162 dir/src/f2
161 dir/src/f20
163 dir/src/f20
162 dir/src/f3
164 dir/src/f3
163 dir/src/f4
165 dir/src/f4
164 dir/src/f5
166 dir/src/f5
165 dir/src/f6
167 dir/src/f6
166 dir/src/f7
168 dir/src/f7
167 dir/src/f8
169 dir/src/f8
168 dir/src/f9
170 dir/src/f9
169
171
170 $ cd ..
172 $ cd ..
171
173
172 narrow clone no paths at all
174 narrow clone no paths at all
173
175
174 $ hg clone --narrow ssh://user@dummy/master narrowempty --noupdate
176 $ hg clone --narrow ssh://user@dummy/master narrowempty --noupdate
175 requesting all changes
177 requesting all changes
176 adding changesets
178 adding changesets
177 adding manifests
179 adding manifests
178 adding file changes
180 adding file changes
179 added 1 changesets with 0 changes to 0 files
181 added 1 changesets with 0 changes to 0 files
180 new changesets * (glob)
182 new changesets * (glob)
181 $ cd narrowempty
183 $ cd narrowempty
182 $ hg tracked
184 $ hg tracked
183 $ hg update
185 $ hg update
184 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
186 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
185 $ ls -A
187 $ ls -A
186 .hg
188 .hg
187
189
188 $ cd ..
190 $ cd ..
189
191
190 simple clone
192 simple clone
191 $ hg clone ssh://user@dummy/master simpleclone
193 $ hg clone ssh://user@dummy/master simpleclone
192 requesting all changes
194 requesting all changes
193 adding changesets
195 adding changesets
194 adding manifests
196 adding manifests
195 adding file changes
197 adding file changes
196 added 40 changesets with 40 changes to 40 files
198 added 40 changesets with 40 changes to 40 files
197 new changesets * (glob)
199 new changesets * (glob)
198 updating to branch default
200 updating to branch default
199 40 files updated, 0 files merged, 0 files removed, 0 files unresolved
201 40 files updated, 0 files merged, 0 files removed, 0 files unresolved
200 $ cd simpleclone
202 $ cd simpleclone
201 $ find * | sort
203 $ find * | sort
202 dir
204 dir
203 dir/src
205 dir/src
204 dir/src/f1
206 dir/src/f1
205 dir/src/f10
207 dir/src/f10
206 dir/src/f11
208 dir/src/f11
207 dir/src/f12
209 dir/src/f12
208 dir/src/f13
210 dir/src/f13
209 dir/src/f14
211 dir/src/f14
210 dir/src/f15
212 dir/src/f15
211 dir/src/f16
213 dir/src/f16
212 dir/src/f17
214 dir/src/f17
213 dir/src/f18
215 dir/src/f18
214 dir/src/f19
216 dir/src/f19
215 dir/src/f2
217 dir/src/f2
216 dir/src/f20
218 dir/src/f20
217 dir/src/f3
219 dir/src/f3
218 dir/src/f4
220 dir/src/f4
219 dir/src/f5
221 dir/src/f5
220 dir/src/f6
222 dir/src/f6
221 dir/src/f7
223 dir/src/f7
222 dir/src/f8
224 dir/src/f8
223 dir/src/f9
225 dir/src/f9
224 dir/tests
226 dir/tests
225 dir/tests/t1
227 dir/tests/t1
226 dir/tests/t10
228 dir/tests/t10
227 dir/tests/t11
229 dir/tests/t11
228 dir/tests/t12
230 dir/tests/t12
229 dir/tests/t13
231 dir/tests/t13
230 dir/tests/t14
232 dir/tests/t14
231 dir/tests/t15
233 dir/tests/t15
232 dir/tests/t16
234 dir/tests/t16
233 dir/tests/t17
235 dir/tests/t17
234 dir/tests/t18
236 dir/tests/t18
235 dir/tests/t19
237 dir/tests/t19
236 dir/tests/t2
238 dir/tests/t2
237 dir/tests/t20
239 dir/tests/t20
238 dir/tests/t3
240 dir/tests/t3
239 dir/tests/t4
241 dir/tests/t4
240 dir/tests/t5
242 dir/tests/t5
241 dir/tests/t6
243 dir/tests/t6
242 dir/tests/t7
244 dir/tests/t7
243 dir/tests/t8
245 dir/tests/t8
244 dir/tests/t9
246 dir/tests/t9
245
247
246 $ cd ..
248 $ cd ..
247
249
248 Testing the --narrowspec flag to clone
250 Testing the --narrowspec flag to clone
249
251
250 $ cat >> narrowspecs <<EOF
252 $ cat >> narrowspecs <<EOF
251 > %include foo
253 > %include foo
252 > [include]
254 > [include]
253 > path:dir/tests/
255 > path:dir/tests/
254 > path:dir/src/f12
256 > path:dir/src/f12
255 > EOF
257 > EOF
256
258
257 $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
259 $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
258 reading narrowspec from '$TESTTMP/narrowspecs'
260 reading narrowspec from '$TESTTMP/narrowspecs'
259 config error: cannot specify other files using '%include' in narrowspec
261 config error: cannot specify other files using '%include' in narrowspec
260 [30]
262 [30]
261
263
262 $ cat > narrowspecs <<EOF
264 $ cat > narrowspecs <<EOF
263 > [include]
265 > [include]
264 > path:dir/tests/
266 > path:dir/tests/
265 > path:dir/src/f12
267 > path:dir/src/f12
266 > EOF
268 > EOF
267
269
268 $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
270 $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
269 reading narrowspec from '$TESTTMP/narrowspecs'
271 reading narrowspec from '$TESTTMP/narrowspecs'
270 requesting all changes
272 requesting all changes
271 adding changesets
273 adding changesets
272 adding manifests
274 adding manifests
273 adding file changes
275 adding file changes
274 added 23 changesets with 21 changes to 21 files
276 added 23 changesets with 21 changes to 21 files
275 new changesets c13e3773edb4:26ce255d5b5d
277 new changesets c13e3773edb4:26ce255d5b5d
276 updating to branch default
278 updating to branch default
277 21 files updated, 0 files merged, 0 files removed, 0 files unresolved
279 21 files updated, 0 files merged, 0 files removed, 0 files unresolved
278 $ cd specfile
280 $ cd specfile
279 $ hg tracked
281 $ hg tracked
280 I path:dir/src/f12
282 I path:dir/src/f12
281 I path:dir/tests
283 I path:dir/tests
282 $ cd ..
284 $ cd ..
283
285
284 Narrow spec with invalid patterns is rejected
286 Narrow spec with invalid patterns is rejected
285
287
286 $ cat > narrowspecs <<EOF
288 $ cat > narrowspecs <<EOF
287 > [include]
289 > [include]
288 > glob:**
290 > glob:**
289 > EOF
291 > EOF
290
292
291 $ hg clone ssh://user@dummy/master badspecfile --narrowspec narrowspecs
293 $ hg clone ssh://user@dummy/master badspecfile --narrowspec narrowspecs
292 reading narrowspec from '$TESTTMP/narrowspecs'
294 reading narrowspec from '$TESTTMP/narrowspecs'
293 abort: invalid prefix on narrow pattern: glob:**
295 abort: invalid prefix on narrow pattern: glob:**
294 (narrow patterns must begin with one of the following: path:, rootfilesin:)
296 (narrow patterns must begin with one of the following: path:, rootfilesin:)
295 [255]
297 [255]
@@ -1,524 +1,524 b''
1 #testcases flat tree
1 #testcases flat tree
2 #testcases lfs-on lfs-off
2 #testcases lfs-on lfs-off
3
3
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [experimental]
5 > [experimental]
6 > evolution=createmarkers
6 > evolution=createmarkers
7 > EOF
7 > EOF
8
8
9 #if lfs-on
9 #if lfs-on
10 $ cat >> $HGRCPATH <<EOF
10 $ cat >> $HGRCPATH <<EOF
11 > [extensions]
11 > [extensions]
12 > lfs =
12 > lfs =
13 > EOF
13 > EOF
14 #endif
14 #endif
15
15
16 $ . "$TESTDIR/narrow-library.sh"
16 $ . "$TESTDIR/narrow-library.sh"
17
17
18 #if tree
18 #if tree
19 $ cat << EOF >> $HGRCPATH
19 $ cat << EOF >> $HGRCPATH
20 > [experimental]
20 > [experimental]
21 > treemanifest = 1
21 > treemanifest = 1
22 > EOF
22 > EOF
23 #endif
23 #endif
24
24
25 $ hg init master
25 $ hg init master
26 $ cd master
26 $ cd master
27 $ cat >> .hg/hgrc <<EOF
27 $ cat >> .hg/hgrc <<EOF
28 > [narrow]
28 > [narrow]
29 > serveellipses=True
29 > serveellipses=True
30 > EOF
30 > EOF
31 $ for x in `$TESTDIR/seq.py 0 10`
31 $ for x in `$TESTDIR/seq.py 0 10`
32 > do
32 > do
33 > mkdir d$x
33 > mkdir d$x
34 > echo $x > d$x/f
34 > echo $x > d$x/f
35 > hg add d$x/f
35 > hg add d$x/f
36 > hg commit -m "add d$x/f"
36 > hg commit -m "add d$x/f"
37 > done
37 > done
38 $ hg log -T "{rev}: {desc}\n"
38 $ hg log -T "{rev}: {desc}\n"
39 10: add d10/f
39 10: add d10/f
40 9: add d9/f
40 9: add d9/f
41 8: add d8/f
41 8: add d8/f
42 7: add d7/f
42 7: add d7/f
43 6: add d6/f
43 6: add d6/f
44 5: add d5/f
44 5: add d5/f
45 4: add d4/f
45 4: add d4/f
46 3: add d3/f
46 3: add d3/f
47 2: add d2/f
47 2: add d2/f
48 1: add d1/f
48 1: add d1/f
49 0: add d0/f
49 0: add d0/f
50 $ cd ..
50 $ cd ..
51
51
52 Error if '.' or '..' are in the directory to track.
52 Error if '.' or '..' are in the directory to track.
53 $ hg clone --narrow ssh://user@dummy/master foo --include ./asdf
53 $ hg clone --narrow ssh://user@dummy/master foo --include ./asdf
54 abort: "." and ".." are not allowed in narrowspec paths
54 abort: "." and ".." are not allowed in narrowspec paths
55 [255]
55 [255]
56 $ hg clone --narrow ssh://user@dummy/master foo --include asdf/..
56 $ hg clone --narrow ssh://user@dummy/master foo --include asdf/..
57 abort: "." and ".." are not allowed in narrowspec paths
57 abort: "." and ".." are not allowed in narrowspec paths
58 [255]
58 [255]
59 $ hg clone --narrow ssh://user@dummy/master foo --include a/./c
59 $ hg clone --narrow ssh://user@dummy/master foo --include a/./c
60 abort: "." and ".." are not allowed in narrowspec paths
60 abort: "." and ".." are not allowed in narrowspec paths
61 [255]
61 [255]
62
62
63 Names with '.' in them are OK.
63 Names with '.' in them are OK.
64 $ hg clone --narrow ssh://user@dummy/master should-work --include a/.b/c
64 $ hg clone --narrow ./master should-work --include a/.b/c
65 requesting all changes
65 requesting all changes
66 adding changesets
66 adding changesets
67 adding manifests
67 adding manifests
68 adding file changes
68 adding file changes
69 added 1 changesets with 0 changes to 0 files
69 added 1 changesets with 0 changes to 0 files
70 new changesets * (glob)
70 new changesets * (glob)
71 updating to branch default
71 updating to branch default
72 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
72 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
73
73
74 Test repo with local changes
74 Test repo with local changes
75 $ hg clone --narrow ssh://user@dummy/master narrow-local-changes --include d0 --include d3 --include d6
75 $ hg clone --narrow ssh://user@dummy/master narrow-local-changes --include d0 --include d3 --include d6
76 requesting all changes
76 requesting all changes
77 adding changesets
77 adding changesets
78 adding manifests
78 adding manifests
79 adding file changes
79 adding file changes
80 added 6 changesets with 3 changes to 3 files
80 added 6 changesets with 3 changes to 3 files
81 new changesets *:* (glob)
81 new changesets *:* (glob)
82 updating to branch default
82 updating to branch default
83 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
83 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
84 $ cd narrow-local-changes
84 $ cd narrow-local-changes
85 $ echo local change >> d0/f
85 $ echo local change >> d0/f
86 $ hg ci -m 'local change to d0'
86 $ hg ci -m 'local change to d0'
87 $ hg co '.^'
87 $ hg co '.^'
88 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
88 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 $ echo local change >> d3/f
89 $ echo local change >> d3/f
90 $ hg ci -m 'local hidden change to d3'
90 $ hg ci -m 'local hidden change to d3'
91 created new head
91 created new head
92 $ hg ci --amend -m 'local change to d3'
92 $ hg ci --amend -m 'local change to d3'
93 $ hg tracked --removeinclude d0
93 $ hg tracked --removeinclude d0
94 comparing with ssh://user@dummy/master
94 comparing with ssh://user@dummy/master
95 searching for changes
95 searching for changes
96 looking for local changes to affected paths
96 looking for local changes to affected paths
97 The following changeset(s) or their ancestors have local changes not on the remote:
97 The following changeset(s) or their ancestors have local changes not on the remote:
98 * (glob)
98 * (glob)
99 abort: local changes found
99 abort: local changes found
100 (use --force-delete-local-changes to ignore)
100 (use --force-delete-local-changes to ignore)
101 [20]
101 [20]
102 Check that nothing was removed by the failed attempts
102 Check that nothing was removed by the failed attempts
103 $ hg tracked
103 $ hg tracked
104 I path:d0
104 I path:d0
105 I path:d3
105 I path:d3
106 I path:d6
106 I path:d6
107 $ hg files
107 $ hg files
108 d0/f
108 d0/f
109 d3/f
109 d3/f
110 d6/f
110 d6/f
111 $ find *
111 $ find *
112 d0
112 d0
113 d0/f
113 d0/f
114 d3
114 d3
115 d3/f
115 d3/f
116 d6
116 d6
117 d6/f
117 d6/f
118 $ hg verify -q
118 $ hg verify -q
119 Force deletion of local changes
119 Force deletion of local changes
120 $ hg log -T "{rev}: {desc} {outsidenarrow}\n"
120 $ hg log -T "{rev}: {desc} {outsidenarrow}\n"
121 8: local change to d3
121 8: local change to d3
122 6: local change to d0
122 6: local change to d0
123 5: add d10/f outsidenarrow
123 5: add d10/f outsidenarrow
124 4: add d6/f
124 4: add d6/f
125 3: add d5/f outsidenarrow
125 3: add d5/f outsidenarrow
126 2: add d3/f
126 2: add d3/f
127 1: add d2/f outsidenarrow
127 1: add d2/f outsidenarrow
128 0: add d0/f
128 0: add d0/f
129 $ hg tracked --removeinclude d0 --force-delete-local-changes
129 $ hg tracked --removeinclude d0 --force-delete-local-changes
130 comparing with ssh://user@dummy/master
130 comparing with ssh://user@dummy/master
131 searching for changes
131 searching for changes
132 looking for local changes to affected paths
132 looking for local changes to affected paths
133 The following changeset(s) or their ancestors have local changes not on the remote:
133 The following changeset(s) or their ancestors have local changes not on the remote:
134 * (glob)
134 * (glob)
135 saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
135 saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
136 deleting data/d0/f.i (reporevlogstore !)
136 deleting data/d0/f.i (reporevlogstore !)
137 deleting meta/d0/00manifest.i (tree !)
137 deleting meta/d0/00manifest.i (tree !)
138 deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
138 deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
139 deleting data/d0/f/4374b5650fc5ae54ac857c0f0381971fdde376f7 (reposimplestore !)
139 deleting data/d0/f/4374b5650fc5ae54ac857c0f0381971fdde376f7 (reposimplestore !)
140 deleting data/d0/f/index (reposimplestore !)
140 deleting data/d0/f/index (reposimplestore !)
141
141
142 $ hg log -T "{rev}: {desc} {outsidenarrow}\n"
142 $ hg log -T "{rev}: {desc} {outsidenarrow}\n"
143 7: local change to d3
143 7: local change to d3
144 5: add d10/f outsidenarrow
144 5: add d10/f outsidenarrow
145 4: add d6/f
145 4: add d6/f
146 3: add d5/f outsidenarrow
146 3: add d5/f outsidenarrow
147 2: add d3/f
147 2: add d3/f
148 1: add d2/f outsidenarrow
148 1: add d2/f outsidenarrow
149 0: add d0/f outsidenarrow
149 0: add d0/f outsidenarrow
150 Can restore stripped local changes after widening
150 Can restore stripped local changes after widening
151 $ hg tracked --addinclude d0 -q
151 $ hg tracked --addinclude d0 -q
152 $ hg unbundle .hg/strip-backup/*-narrow.hg -q
152 $ hg unbundle .hg/strip-backup/*-narrow.hg -q
153 $ hg --hidden co -r 'desc("local change to d0")' -q
153 $ hg --hidden co -r 'desc("local change to d0")' -q
154 $ cat d0/f
154 $ cat d0/f
155 0
155 0
156 local change
156 local change
157 Pruned commits affecting removed paths should not prevent narrowing
157 Pruned commits affecting removed paths should not prevent narrowing
158 $ hg co '.^'
158 $ hg co '.^'
159 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
159 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
160 $ hg debugobsolete `hg log -T '{node}' -r 'desc("local change to d0")'`
160 $ hg debugobsolete `hg log -T '{node}' -r 'desc("local change to d0")'`
161 1 new obsolescence markers
161 1 new obsolescence markers
162 obsoleted 1 changesets
162 obsoleted 1 changesets
163 $ hg tracked --removeinclude d0
163 $ hg tracked --removeinclude d0
164 comparing with ssh://user@dummy/master
164 comparing with ssh://user@dummy/master
165 searching for changes
165 searching for changes
166 looking for local changes to affected paths
166 looking for local changes to affected paths
167 saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
167 saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
168 deleting data/d0/f.i (reporevlogstore !)
168 deleting data/d0/f.i (reporevlogstore !)
169 deleting meta/d0/00manifest.i (tree !)
169 deleting meta/d0/00manifest.i (tree !)
170 deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
170 deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
171 deleting data/d0/f/4374b5650fc5ae54ac857c0f0381971fdde376f7 (reposimplestore !)
171 deleting data/d0/f/4374b5650fc5ae54ac857c0f0381971fdde376f7 (reposimplestore !)
172 deleting data/d0/f/index (reposimplestore !)
172 deleting data/d0/f/index (reposimplestore !)
173
173
174 Updates off of stripped commit if necessary
174 Updates off of stripped commit if necessary
175 $ hg co -r 'desc("local change to d3")' -q
175 $ hg co -r 'desc("local change to d3")' -q
176 $ echo local change >> d6/f
176 $ echo local change >> d6/f
177 $ hg ci -m 'local change to d6'
177 $ hg ci -m 'local change to d6'
178 $ hg tracked --removeinclude d3 --force-delete-local-changes
178 $ hg tracked --removeinclude d3 --force-delete-local-changes
179 comparing with ssh://user@dummy/master
179 comparing with ssh://user@dummy/master
180 searching for changes
180 searching for changes
181 looking for local changes to affected paths
181 looking for local changes to affected paths
182 The following changeset(s) or their ancestors have local changes not on the remote:
182 The following changeset(s) or their ancestors have local changes not on the remote:
183 * (glob)
183 * (glob)
184 * (glob)
184 * (glob)
185 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
185 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
186 saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
186 saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
187 deleting data/d3/f.i (reporevlogstore !)
187 deleting data/d3/f.i (reporevlogstore !)
188 deleting meta/d3/00manifest.i (tree !)
188 deleting meta/d3/00manifest.i (tree !)
189 deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
189 deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
190 deleting data/d3/f/99fa7136105a15e2045ce3d9152e4837c5349e4d (reposimplestore !)
190 deleting data/d3/f/99fa7136105a15e2045ce3d9152e4837c5349e4d (reposimplestore !)
191 deleting data/d3/f/index (reposimplestore !)
191 deleting data/d3/f/index (reposimplestore !)
192 $ hg log -T '{desc}\n' -r .
192 $ hg log -T '{desc}\n' -r .
193 add d10/f
193 add d10/f
194 Updates to nullid if necessary
194 Updates to nullid if necessary
195 $ hg tracked --addinclude d3 -q
195 $ hg tracked --addinclude d3 -q
196 $ hg co null -q
196 $ hg co null -q
197 $ mkdir d3
197 $ mkdir d3
198 $ echo local change > d3/f
198 $ echo local change > d3/f
199 $ hg add d3/f
199 $ hg add d3/f
200 $ hg ci -m 'local change to d3'
200 $ hg ci -m 'local change to d3'
201 created new head
201 created new head
202 $ hg tracked --removeinclude d3 --force-delete-local-changes
202 $ hg tracked --removeinclude d3 --force-delete-local-changes
203 comparing with ssh://user@dummy/master
203 comparing with ssh://user@dummy/master
204 searching for changes
204 searching for changes
205 looking for local changes to affected paths
205 looking for local changes to affected paths
206 The following changeset(s) or their ancestors have local changes not on the remote:
206 The following changeset(s) or their ancestors have local changes not on the remote:
207 * (glob)
207 * (glob)
208 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
208 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
209 saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
209 saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
210 deleting data/d3/f.i (reporevlogstore !)
210 deleting data/d3/f.i (reporevlogstore !)
211 deleting meta/d3/00manifest.i (tree !)
211 deleting meta/d3/00manifest.i (tree !)
212 deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
212 deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
213 deleting data/d3/f/5ce0767945cbdbca3b924bb9fbf5143f72ab40ac (reposimplestore !)
213 deleting data/d3/f/5ce0767945cbdbca3b924bb9fbf5143f72ab40ac (reposimplestore !)
214 deleting data/d3/f/index (reposimplestore !)
214 deleting data/d3/f/index (reposimplestore !)
215 $ hg id
215 $ hg id
216 000000000000
216 000000000000
217 $ cd ..
217 $ cd ..
218
218
219 Narrowing doesn't resurrect old commits (unlike what regular `hg strip` does)
219 Narrowing doesn't resurrect old commits (unlike what regular `hg strip` does)
220 $ hg clone --narrow ssh://user@dummy/master narrow-obsmarkers --include d0 --include d3 -q
220 $ hg clone --narrow ssh://user@dummy/master narrow-obsmarkers --include d0 --include d3 -q
221 $ cd narrow-obsmarkers
221 $ cd narrow-obsmarkers
222 $ echo a >> d0/f2
222 $ echo a >> d0/f2
223 $ hg add d0/f2
223 $ hg add d0/f2
224 $ hg ci -m 'modify d0/'
224 $ hg ci -m 'modify d0/'
225 $ echo a >> d3/f2
225 $ echo a >> d3/f2
226 $ hg add d3/f2
226 $ hg add d3/f2
227 $ hg commit --amend -m 'modify d0/ and d3/'
227 $ hg commit --amend -m 'modify d0/ and d3/'
228 $ hg log -T "{rev}: {desc}\n"
228 $ hg log -T "{rev}: {desc}\n"
229 5: modify d0/ and d3/
229 5: modify d0/ and d3/
230 3: add d10/f
230 3: add d10/f
231 2: add d3/f
231 2: add d3/f
232 1: add d2/f
232 1: add d2/f
233 0: add d0/f
233 0: add d0/f
234 $ hg tracked --removeinclude d3 --force-delete-local-changes -q
234 $ hg tracked --removeinclude d3 --force-delete-local-changes -q
235 $ hg log -T "{rev}: {desc}\n"
235 $ hg log -T "{rev}: {desc}\n"
236 3: add d10/f
236 3: add d10/f
237 2: add d3/f
237 2: add d3/f
238 1: add d2/f
238 1: add d2/f
239 0: add d0/f
239 0: add d0/f
240 $ cd ..
240 $ cd ..
241
241
242 Widening doesn't lose bookmarks
242 Widening doesn't lose bookmarks
243 $ hg clone --narrow ssh://user@dummy/master widen-bookmarks --include d0 -q
243 $ hg clone --narrow ssh://user@dummy/master widen-bookmarks --include d0 -q
244 $ cd widen-bookmarks
244 $ cd widen-bookmarks
245 $ hg bookmark my-bookmark
245 $ hg bookmark my-bookmark
246 $ hg log -T "{rev}: {desc} {bookmarks}\n"
246 $ hg log -T "{rev}: {desc} {bookmarks}\n"
247 1: add d10/f my-bookmark
247 1: add d10/f my-bookmark
248 0: add d0/f
248 0: add d0/f
249 $ hg tracked --addinclude d3 -q
249 $ hg tracked --addinclude d3 -q
250 $ hg log -T "{rev}: {desc} {bookmarks}\n"
250 $ hg log -T "{rev}: {desc} {bookmarks}\n"
251 3: add d10/f my-bookmark
251 3: add d10/f my-bookmark
252 2: add d3/f
252 2: add d3/f
253 1: add d2/f
253 1: add d2/f
254 0: add d0/f
254 0: add d0/f
255 $ cd ..
255 $ cd ..
256
256
257 Can remove last include, making repo empty
257 Can remove last include, making repo empty
258 $ hg clone --narrow ssh://user@dummy/master narrow-empty --include d0 -r 5
258 $ hg clone --narrow ssh://user@dummy/master narrow-empty --include d0 -r 5
259 adding changesets
259 adding changesets
260 adding manifests
260 adding manifests
261 adding file changes
261 adding file changes
262 added 2 changesets with 1 changes to 1 files
262 added 2 changesets with 1 changes to 1 files
263 new changesets *:* (glob)
263 new changesets *:* (glob)
264 updating to branch default
264 updating to branch default
265 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
265 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
266 $ cd narrow-empty
266 $ cd narrow-empty
267 $ hg tracked --removeinclude d0
267 $ hg tracked --removeinclude d0
268 comparing with ssh://user@dummy/master
268 comparing with ssh://user@dummy/master
269 searching for changes
269 searching for changes
270 looking for local changes to affected paths
270 looking for local changes to affected paths
271 deleting data/d0/f.i (reporevlogstore !)
271 deleting data/d0/f.i (reporevlogstore !)
272 deleting meta/d0/00manifest.i (tree !)
272 deleting meta/d0/00manifest.i (tree !)
273 deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
273 deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
274 deleting data/d0/f/index (reposimplestore !)
274 deleting data/d0/f/index (reposimplestore !)
275 $ hg tracked
275 $ hg tracked
276 $ hg files
276 $ hg files
277 [1]
277 [1]
278 $ test -d d0
278 $ test -d d0
279 [1]
279 [1]
280 Do some work in the empty clone
280 Do some work in the empty clone
281 $ hg diff --change .
281 $ hg diff --change .
282 $ hg branch foo
282 $ hg branch foo
283 marked working directory as branch foo
283 marked working directory as branch foo
284 (branches are permanent and global, did you want a bookmark?)
284 (branches are permanent and global, did you want a bookmark?)
285 $ hg ci -m empty
285 $ hg ci -m empty
286 $ hg log -T "{rev}: {desc} {outsidenarrow}\n"
286 $ hg log -T "{rev}: {desc} {outsidenarrow}\n"
287 2: empty
287 2: empty
288 1: add d5/f outsidenarrow
288 1: add d5/f outsidenarrow
289 0: add d0/f outsidenarrow
289 0: add d0/f outsidenarrow
290 $ hg pull -q
290 $ hg pull -q
291 Can widen the empty clone
291 Can widen the empty clone
292 $ hg tracked --addinclude d0
292 $ hg tracked --addinclude d0
293 comparing with ssh://user@dummy/master
293 comparing with ssh://user@dummy/master
294 searching for changes
294 searching for changes
295 saved backup bundle to $TESTTMP/narrow-empty/.hg/strip-backup/*-widen.hg (glob)
295 saved backup bundle to $TESTTMP/narrow-empty/.hg/strip-backup/*-widen.hg (glob)
296 adding changesets
296 adding changesets
297 adding manifests
297 adding manifests
298 adding file changes
298 adding file changes
299 added 3 changesets with 1 changes to 1 files
299 added 3 changesets with 1 changes to 1 files
300 $ hg tracked
300 $ hg tracked
301 I path:d0
301 I path:d0
302 $ hg files
302 $ hg files
303 d0/f
303 d0/f
304 $ find *
304 $ find *
305 d0
305 d0
306 d0/f
306 d0/f
307 $ cd ..
307 $ cd ..
308
308
309 TODO(martinvonz): test including e.g. d3/g and then removing it once
309 TODO(martinvonz): test including e.g. d3/g and then removing it once
310 https://bitbucket.org/Google/narrowhg/issues/6 is fixed
310 https://bitbucket.org/Google/narrowhg/issues/6 is fixed
311
311
312 $ hg clone --narrow ssh://user@dummy/master narrow --include d0 --include d3 --include d6 --include d9
312 $ hg clone --narrow ssh://user@dummy/master narrow --include d0 --include d3 --include d6 --include d9
313 requesting all changes
313 requesting all changes
314 adding changesets
314 adding changesets
315 adding manifests
315 adding manifests
316 adding file changes
316 adding file changes
317 added 8 changesets with 4 changes to 4 files
317 added 8 changesets with 4 changes to 4 files
318 new changesets *:* (glob)
318 new changesets *:* (glob)
319 updating to branch default
319 updating to branch default
320 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
320 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
321 $ cd narrow
321 $ cd narrow
322 $ hg tracked
322 $ hg tracked
323 I path:d0
323 I path:d0
324 I path:d3
324 I path:d3
325 I path:d6
325 I path:d6
326 I path:d9
326 I path:d9
327 $ hg tracked --removeinclude d6
327 $ hg tracked --removeinclude d6
328 comparing with ssh://user@dummy/master
328 comparing with ssh://user@dummy/master
329 searching for changes
329 searching for changes
330 looking for local changes to affected paths
330 looking for local changes to affected paths
331 deleting data/d6/f.i (reporevlogstore !)
331 deleting data/d6/f.i (reporevlogstore !)
332 deleting meta/d6/00manifest.i (tree !)
332 deleting meta/d6/00manifest.i (tree !)
333 deleting data/d6/f/7339d30678f451ac8c3f38753beeb4cf2e1655c7 (reposimplestore !)
333 deleting data/d6/f/7339d30678f451ac8c3f38753beeb4cf2e1655c7 (reposimplestore !)
334 deleting data/d6/f/index (reposimplestore !)
334 deleting data/d6/f/index (reposimplestore !)
335 $ hg tracked
335 $ hg tracked
336 I path:d0
336 I path:d0
337 I path:d3
337 I path:d3
338 I path:d9
338 I path:d9
339 #if repofncache
339 #if repofncache
340 $ hg debugrebuildfncache
340 $ hg debugrebuildfncache
341 fncache already up to date
341 fncache already up to date
342 #endif
342 #endif
343 $ find *
343 $ find *
344 d0
344 d0
345 d0/f
345 d0/f
346 d3
346 d3
347 d3/f
347 d3/f
348 d9
348 d9
349 d9/f
349 d9/f
350 $ hg verify -q
350 $ hg verify -q
351 $ hg tracked --addexclude d3/f
351 $ hg tracked --addexclude d3/f
352 comparing with ssh://user@dummy/master
352 comparing with ssh://user@dummy/master
353 searching for changes
353 searching for changes
354 looking for local changes to affected paths
354 looking for local changes to affected paths
355 deleting data/d3/f.i (reporevlogstore !)
355 deleting data/d3/f.i (reporevlogstore !)
356 deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
356 deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
357 deleting data/d3/f/index (reposimplestore !)
357 deleting data/d3/f/index (reposimplestore !)
358 $ hg tracked
358 $ hg tracked
359 I path:d0
359 I path:d0
360 I path:d3
360 I path:d3
361 I path:d9
361 I path:d9
362 X path:d3/f
362 X path:d3/f
363 #if repofncache
363 #if repofncache
364 $ hg debugrebuildfncache
364 $ hg debugrebuildfncache
365 fncache already up to date
365 fncache already up to date
366 #endif
366 #endif
367 $ find *
367 $ find *
368 d0
368 d0
369 d0/f
369 d0/f
370 d9
370 d9
371 d9/f
371 d9/f
372 $ hg verify -q
372 $ hg verify -q
373 $ hg tracked --addexclude d0
373 $ hg tracked --addexclude d0
374 comparing with ssh://user@dummy/master
374 comparing with ssh://user@dummy/master
375 searching for changes
375 searching for changes
376 looking for local changes to affected paths
376 looking for local changes to affected paths
377 deleting data/d0/f.i (reporevlogstore !)
377 deleting data/d0/f.i (reporevlogstore !)
378 deleting meta/d0/00manifest.i (tree !)
378 deleting meta/d0/00manifest.i (tree !)
379 deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
379 deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
380 deleting data/d0/f/index (reposimplestore !)
380 deleting data/d0/f/index (reposimplestore !)
381 $ hg tracked
381 $ hg tracked
382 I path:d3
382 I path:d3
383 I path:d9
383 I path:d9
384 X path:d0
384 X path:d0
385 X path:d3/f
385 X path:d3/f
386 #if repofncache
386 #if repofncache
387 $ hg debugrebuildfncache
387 $ hg debugrebuildfncache
388 fncache already up to date
388 fncache already up to date
389 #endif
389 #endif
390 $ find *
390 $ find *
391 d9
391 d9
392 d9/f
392 d9/f
393
393
394 Make a 15 of changes to d9 to test the path without --verbose
394 Make a 15 of changes to d9 to test the path without --verbose
395 (Note: using regexes instead of "* (glob)" because if the test fails, it
395 (Note: using regexes instead of "* (glob)" because if the test fails, it
396 produces more sensible diffs)
396 produces more sensible diffs)
397 $ hg tracked
397 $ hg tracked
398 I path:d3
398 I path:d3
399 I path:d9
399 I path:d9
400 X path:d0
400 X path:d0
401 X path:d3/f
401 X path:d3/f
402 $ for x in `$TESTDIR/seq.py 1 15`
402 $ for x in `$TESTDIR/seq.py 1 15`
403 > do
403 > do
404 > echo local change >> d9/f
404 > echo local change >> d9/f
405 > hg commit -m "change $x to d9/f"
405 > hg commit -m "change $x to d9/f"
406 > done
406 > done
407 $ hg tracked --removeinclude d9
407 $ hg tracked --removeinclude d9
408 comparing with ssh://user@dummy/master
408 comparing with ssh://user@dummy/master
409 searching for changes
409 searching for changes
410 looking for local changes to affected paths
410 looking for local changes to affected paths
411 The following changeset(s) or their ancestors have local changes not on the remote:
411 The following changeset(s) or their ancestors have local changes not on the remote:
412 ^[0-9a-f]{12}$ (re)
412 ^[0-9a-f]{12}$ (re)
413 ^[0-9a-f]{12}$ (re)
413 ^[0-9a-f]{12}$ (re)
414 ^[0-9a-f]{12}$ (re)
414 ^[0-9a-f]{12}$ (re)
415 ^[0-9a-f]{12}$ (re)
415 ^[0-9a-f]{12}$ (re)
416 ^[0-9a-f]{12}$ (re)
416 ^[0-9a-f]{12}$ (re)
417 ^[0-9a-f]{12}$ (re)
417 ^[0-9a-f]{12}$ (re)
418 ^[0-9a-f]{12}$ (re)
418 ^[0-9a-f]{12}$ (re)
419 ^[0-9a-f]{12}$ (re)
419 ^[0-9a-f]{12}$ (re)
420 ^[0-9a-f]{12}$ (re)
420 ^[0-9a-f]{12}$ (re)
421 ^[0-9a-f]{12}$ (re)
421 ^[0-9a-f]{12}$ (re)
422 ...and 5 more, use --verbose to list all
422 ...and 5 more, use --verbose to list all
423 abort: local changes found
423 abort: local changes found
424 (use --force-delete-local-changes to ignore)
424 (use --force-delete-local-changes to ignore)
425 [20]
425 [20]
426 Now test it *with* verbose.
426 Now test it *with* verbose.
427 $ hg tracked --removeinclude d9 --verbose
427 $ hg tracked --removeinclude d9 --verbose
428 comparing with ssh://user@dummy/master
428 comparing with ssh://user@dummy/master
429 searching for changes
429 searching for changes
430 looking for local changes to affected paths
430 looking for local changes to affected paths
431 The following changeset(s) or their ancestors have local changes not on the remote:
431 The following changeset(s) or their ancestors have local changes not on the remote:
432 ^[0-9a-f]{12}$ (re)
432 ^[0-9a-f]{12}$ (re)
433 ^[0-9a-f]{12}$ (re)
433 ^[0-9a-f]{12}$ (re)
434 ^[0-9a-f]{12}$ (re)
434 ^[0-9a-f]{12}$ (re)
435 ^[0-9a-f]{12}$ (re)
435 ^[0-9a-f]{12}$ (re)
436 ^[0-9a-f]{12}$ (re)
436 ^[0-9a-f]{12}$ (re)
437 ^[0-9a-f]{12}$ (re)
437 ^[0-9a-f]{12}$ (re)
438 ^[0-9a-f]{12}$ (re)
438 ^[0-9a-f]{12}$ (re)
439 ^[0-9a-f]{12}$ (re)
439 ^[0-9a-f]{12}$ (re)
440 ^[0-9a-f]{12}$ (re)
440 ^[0-9a-f]{12}$ (re)
441 ^[0-9a-f]{12}$ (re)
441 ^[0-9a-f]{12}$ (re)
442 ^[0-9a-f]{12}$ (re)
442 ^[0-9a-f]{12}$ (re)
443 ^[0-9a-f]{12}$ (re)
443 ^[0-9a-f]{12}$ (re)
444 ^[0-9a-f]{12}$ (re)
444 ^[0-9a-f]{12}$ (re)
445 ^[0-9a-f]{12}$ (re)
445 ^[0-9a-f]{12}$ (re)
446 ^[0-9a-f]{12}$ (re)
446 ^[0-9a-f]{12}$ (re)
447 abort: local changes found
447 abort: local changes found
448 (use --force-delete-local-changes to ignore)
448 (use --force-delete-local-changes to ignore)
449 [20]
449 [20]
450 $ cd ..
450 $ cd ..
451
451
452 Test --auto-remove-includes
452 Test --auto-remove-includes
453 $ hg clone --narrow ssh://user@dummy/master narrow-auto-remove -q \
453 $ hg clone --narrow ssh://user@dummy/master narrow-auto-remove -q \
454 > --include d0 --include d1 --include d2
454 > --include d0 --include d1 --include d2
455 $ cd narrow-auto-remove
455 $ cd narrow-auto-remove
456 $ echo a >> d0/f
456 $ echo a >> d0/f
457 $ hg ci -m 'local change to d0'
457 $ hg ci -m 'local change to d0'
458 $ hg co '.^'
458 $ hg co '.^'
459 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
459 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
460 $ echo a >> d1/f
460 $ echo a >> d1/f
461 $ hg ci -m 'local change to d1'
461 $ hg ci -m 'local change to d1'
462 created new head
462 created new head
463 $ hg debugobsolete $(hg log -T '{node}' -r 'desc("local change to d0")')
463 $ hg debugobsolete $(hg log -T '{node}' -r 'desc("local change to d0")')
464 1 new obsolescence markers
464 1 new obsolescence markers
465 obsoleted 1 changesets
465 obsoleted 1 changesets
466 $ echo n | hg tracked --auto-remove-includes --config ui.interactive=yes
466 $ echo n | hg tracked --auto-remove-includes --config ui.interactive=yes
467 comparing with ssh://user@dummy/master
467 comparing with ssh://user@dummy/master
468 searching for changes
468 searching for changes
469 looking for unused includes to remove
469 looking for unused includes to remove
470 path:d0
470 path:d0
471 path:d2
471 path:d2
472 remove these unused includes (yn)? n
472 remove these unused includes (yn)? n
473 $ hg tracked --auto-remove-includes
473 $ hg tracked --auto-remove-includes
474 comparing with ssh://user@dummy/master
474 comparing with ssh://user@dummy/master
475 searching for changes
475 searching for changes
476 looking for unused includes to remove
476 looking for unused includes to remove
477 path:d0
477 path:d0
478 path:d2
478 path:d2
479 remove these unused includes (yn)? y
479 remove these unused includes (yn)? y
480 looking for local changes to affected paths
480 looking for local changes to affected paths
481 saved backup bundle to $TESTTMP/narrow-auto-remove/.hg/strip-backup/*-narrow.hg (glob)
481 saved backup bundle to $TESTTMP/narrow-auto-remove/.hg/strip-backup/*-narrow.hg (glob)
482 deleting data/d0/f.i
482 deleting data/d0/f.i
483 deleting data/d2/f.i
483 deleting data/d2/f.i
484 deleting meta/d0/00manifest.i (tree !)
484 deleting meta/d0/00manifest.i (tree !)
485 deleting meta/d2/00manifest.i (tree !)
485 deleting meta/d2/00manifest.i (tree !)
486 $ hg tracked
486 $ hg tracked
487 I path:d1
487 I path:d1
488 $ hg files
488 $ hg files
489 d1/f
489 d1/f
490 $ hg tracked --auto-remove-includes
490 $ hg tracked --auto-remove-includes
491 comparing with ssh://user@dummy/master
491 comparing with ssh://user@dummy/master
492 searching for changes
492 searching for changes
493 looking for unused includes to remove
493 looking for unused includes to remove
494 found no unused includes
494 found no unused includes
495 Test --no-backup
495 Test --no-backup
496 $ hg tracked --addinclude d0 --addinclude d2 -q
496 $ hg tracked --addinclude d0 --addinclude d2 -q
497 $ hg unbundle .hg/strip-backup/*-narrow.hg -q
497 $ hg unbundle .hg/strip-backup/*-narrow.hg -q
498 $ rm .hg/strip-backup/*
498 $ rm .hg/strip-backup/*
499 $ hg tracked --auto-remove-includes --no-backup
499 $ hg tracked --auto-remove-includes --no-backup
500 comparing with ssh://user@dummy/master
500 comparing with ssh://user@dummy/master
501 searching for changes
501 searching for changes
502 looking for unused includes to remove
502 looking for unused includes to remove
503 path:d0
503 path:d0
504 path:d2
504 path:d2
505 remove these unused includes (yn)? y
505 remove these unused includes (yn)? y
506 looking for local changes to affected paths
506 looking for local changes to affected paths
507 deleting data/d0/f.i
507 deleting data/d0/f.i
508 deleting data/d2/f.i
508 deleting data/d2/f.i
509 deleting meta/d0/00manifest.i (tree !)
509 deleting meta/d0/00manifest.i (tree !)
510 deleting meta/d2/00manifest.i (tree !)
510 deleting meta/d2/00manifest.i (tree !)
511 $ ls .hg/strip-backup/
511 $ ls .hg/strip-backup/
512
512
513
513
514 Test removing include while concurrently modifying file in that path
514 Test removing include while concurrently modifying file in that path
515 $ hg clone --narrow ssh://user@dummy/master narrow-concurrent-modify -q \
515 $ hg clone --narrow ssh://user@dummy/master narrow-concurrent-modify -q \
516 > --include d0 --include d1
516 > --include d0 --include d1
517 $ cd narrow-concurrent-modify
517 $ cd narrow-concurrent-modify
518 $ hg --config 'hooks.pretxnopen = echo modified >> d0/f' tracked --removeinclude d0
518 $ hg --config 'hooks.pretxnopen = echo modified >> d0/f' tracked --removeinclude d0
519 comparing with ssh://user@dummy/master
519 comparing with ssh://user@dummy/master
520 searching for changes
520 searching for changes
521 looking for local changes to affected paths
521 looking for local changes to affected paths
522 deleting data/d0/f.i
522 deleting data/d0/f.i
523 deleting meta/d0/00manifest.i (tree !)
523 deleting meta/d0/00manifest.i (tree !)
524 not deleting possibly dirty file d0/f
524 not deleting possibly dirty file d0/f
General Comments 0
You need to be logged in to leave comments. Login now