##// END OF EJS Templates
transaction: remove the `branch` backup for transaction...
marmoute -
r51162:f92afdf3 default
parent child Browse files
Show More
@@ -1,1806 +1,1802 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import collections
9 import collections
10 import contextlib
10 import contextlib
11 import os
11 import os
12 import stat
12 import stat
13 import uuid
13 import uuid
14
14
15 from .i18n import _
15 from .i18n import _
16 from .pycompat import delattr
16 from .pycompat import delattr
17
17
18 from hgdemandimport import tracing
18 from hgdemandimport import tracing
19
19
20 from . import (
20 from . import (
21 dirstatemap,
21 dirstatemap,
22 encoding,
22 encoding,
23 error,
23 error,
24 match as matchmod,
24 match as matchmod,
25 node,
25 node,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 txnutil,
30 txnutil,
31 util,
31 util,
32 )
32 )
33
33
34 from .dirstateutils import (
34 from .dirstateutils import (
35 timestamp,
35 timestamp,
36 )
36 )
37
37
38 from .interfaces import (
38 from .interfaces import (
39 dirstate as intdirstate,
39 dirstate as intdirstate,
40 util as interfaceutil,
40 util as interfaceutil,
41 )
41 )
42
42
43 parsers = policy.importmod('parsers')
43 parsers = policy.importmod('parsers')
44 rustmod = policy.importrust('dirstate')
44 rustmod = policy.importrust('dirstate')
45
45
46 # use to detect lack of a parameter
46 # use to detect lack of a parameter
47 SENTINEL = object()
47 SENTINEL = object()
48
48
49 HAS_FAST_DIRSTATE_V2 = rustmod is not None
49 HAS_FAST_DIRSTATE_V2 = rustmod is not None
50
50
51 propertycache = util.propertycache
51 propertycache = util.propertycache
52 filecache = scmutil.filecache
52 filecache = scmutil.filecache
53 _rangemask = dirstatemap.rangemask
53 _rangemask = dirstatemap.rangemask
54
54
55 DirstateItem = dirstatemap.DirstateItem
55 DirstateItem = dirstatemap.DirstateItem
56
56
57
57
58 class repocache(filecache):
58 class repocache(filecache):
59 """filecache for files in .hg/"""
59 """filecache for files in .hg/"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._opener.join(fname)
62 return obj._opener.join(fname)
63
63
64
64
65 class rootcache(filecache):
65 class rootcache(filecache):
66 """filecache for files in the repository root"""
66 """filecache for files in the repository root"""
67
67
68 def join(self, obj, fname):
68 def join(self, obj, fname):
69 return obj._join(fname)
69 return obj._join(fname)
70
70
71
71
72 def check_invalidated(func):
72 def check_invalidated(func):
73 """check that the func is called with a non-invalidated dirstate
73 """check that the func is called with a non-invalidated dirstate
74
74
75 The dirstate is in an "invalidated state" after an error occured during its
75 The dirstate is in an "invalidated state" after an error occured during its
76 modification and remains so until we exited the top level scope that framed
76 modification and remains so until we exited the top level scope that framed
77 such change.
77 such change.
78 """
78 """
79
79
80 def wrap(self, *args, **kwargs):
80 def wrap(self, *args, **kwargs):
81 if self._invalidated_context:
81 if self._invalidated_context:
82 msg = 'calling `%s` after the dirstate was invalidated'
82 msg = 'calling `%s` after the dirstate was invalidated'
83 msg %= func.__name__
83 msg %= func.__name__
84 raise error.ProgrammingError(msg)
84 raise error.ProgrammingError(msg)
85 return func(self, *args, **kwargs)
85 return func(self, *args, **kwargs)
86
86
87 return wrap
87 return wrap
88
88
89
89
90 def requires_changing_parents(func):
90 def requires_changing_parents(func):
91 def wrap(self, *args, **kwargs):
91 def wrap(self, *args, **kwargs):
92 if not self.is_changing_parents:
92 if not self.is_changing_parents:
93 msg = 'calling `%s` outside of a changing_parents context'
93 msg = 'calling `%s` outside of a changing_parents context'
94 msg %= func.__name__
94 msg %= func.__name__
95 raise error.ProgrammingError(msg)
95 raise error.ProgrammingError(msg)
96 return func(self, *args, **kwargs)
96 return func(self, *args, **kwargs)
97
97
98 return check_invalidated(wrap)
98 return check_invalidated(wrap)
99
99
100
100
101 def requires_changing_files(func):
101 def requires_changing_files(func):
102 def wrap(self, *args, **kwargs):
102 def wrap(self, *args, **kwargs):
103 if not self.is_changing_files:
103 if not self.is_changing_files:
104 msg = 'calling `%s` outside of a `changing_files`'
104 msg = 'calling `%s` outside of a `changing_files`'
105 msg %= func.__name__
105 msg %= func.__name__
106 raise error.ProgrammingError(msg)
106 raise error.ProgrammingError(msg)
107 return func(self, *args, **kwargs)
107 return func(self, *args, **kwargs)
108
108
109 return check_invalidated(wrap)
109 return check_invalidated(wrap)
110
110
111
111
112 def requires_changing_any(func):
112 def requires_changing_any(func):
113 def wrap(self, *args, **kwargs):
113 def wrap(self, *args, **kwargs):
114 if not self.is_changing_any:
114 if not self.is_changing_any:
115 msg = 'calling `%s` outside of a changing context'
115 msg = 'calling `%s` outside of a changing context'
116 msg %= func.__name__
116 msg %= func.__name__
117 raise error.ProgrammingError(msg)
117 raise error.ProgrammingError(msg)
118 return func(self, *args, **kwargs)
118 return func(self, *args, **kwargs)
119
119
120 return check_invalidated(wrap)
120 return check_invalidated(wrap)
121
121
122
122
123 def requires_changing_files_or_status(func):
123 def requires_changing_files_or_status(func):
124 def wrap(self, *args, **kwargs):
124 def wrap(self, *args, **kwargs):
125 if not (self.is_changing_files or self._running_status > 0):
125 if not (self.is_changing_files or self._running_status > 0):
126 msg = (
126 msg = (
127 'calling `%s` outside of a changing_files '
127 'calling `%s` outside of a changing_files '
128 'or running_status context'
128 'or running_status context'
129 )
129 )
130 msg %= func.__name__
130 msg %= func.__name__
131 raise error.ProgrammingError(msg)
131 raise error.ProgrammingError(msg)
132 return func(self, *args, **kwargs)
132 return func(self, *args, **kwargs)
133
133
134 return check_invalidated(wrap)
134 return check_invalidated(wrap)
135
135
136
136
137 CHANGE_TYPE_PARENTS = "parents"
137 CHANGE_TYPE_PARENTS = "parents"
138 CHANGE_TYPE_FILES = "files"
138 CHANGE_TYPE_FILES = "files"
139
139
140
140
141 @interfaceutil.implementer(intdirstate.idirstate)
141 @interfaceutil.implementer(intdirstate.idirstate)
142 class dirstate:
142 class dirstate:
143
143
144 # used by largefile to avoid overwritting transaction callback
144 # used by largefile to avoid overwritting transaction callback
145 _tr_key_suffix = b''
145 _tr_key_suffix = b''
146
146
147 def __init__(
147 def __init__(
148 self,
148 self,
149 opener,
149 opener,
150 ui,
150 ui,
151 root,
151 root,
152 validate,
152 validate,
153 sparsematchfn,
153 sparsematchfn,
154 nodeconstants,
154 nodeconstants,
155 use_dirstate_v2,
155 use_dirstate_v2,
156 use_tracked_hint=False,
156 use_tracked_hint=False,
157 ):
157 ):
158 """Create a new dirstate object.
158 """Create a new dirstate object.
159
159
160 opener is an open()-like callable that can be used to open the
160 opener is an open()-like callable that can be used to open the
161 dirstate file; root is the root of the directory tracked by
161 dirstate file; root is the root of the directory tracked by
162 the dirstate.
162 the dirstate.
163 """
163 """
164 self._use_dirstate_v2 = use_dirstate_v2
164 self._use_dirstate_v2 = use_dirstate_v2
165 self._use_tracked_hint = use_tracked_hint
165 self._use_tracked_hint = use_tracked_hint
166 self._nodeconstants = nodeconstants
166 self._nodeconstants = nodeconstants
167 self._opener = opener
167 self._opener = opener
168 self._validate = validate
168 self._validate = validate
169 self._root = root
169 self._root = root
170 # Either build a sparse-matcher or None if sparse is disabled
170 # Either build a sparse-matcher or None if sparse is disabled
171 self._sparsematchfn = sparsematchfn
171 self._sparsematchfn = sparsematchfn
172 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
172 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
173 # UNC path pointing to root share (issue4557)
173 # UNC path pointing to root share (issue4557)
174 self._rootdir = pathutil.normasprefix(root)
174 self._rootdir = pathutil.normasprefix(root)
175 # True is any internal state may be different
175 # True is any internal state may be different
176 self._dirty = False
176 self._dirty = False
177 # True if the set of tracked file may be different
177 # True if the set of tracked file may be different
178 self._dirty_tracked_set = False
178 self._dirty_tracked_set = False
179 self._ui = ui
179 self._ui = ui
180 self._filecache = {}
180 self._filecache = {}
181 # nesting level of `changing_parents` context
181 # nesting level of `changing_parents` context
182 self._changing_level = 0
182 self._changing_level = 0
183 # the change currently underway
183 # the change currently underway
184 self._change_type = None
184 self._change_type = None
185 # number of open _running_status context
185 # number of open _running_status context
186 self._running_status = 0
186 self._running_status = 0
187 # True if the current dirstate changing operations have been
187 # True if the current dirstate changing operations have been
188 # invalidated (used to make sure all nested contexts have been exited)
188 # invalidated (used to make sure all nested contexts have been exited)
189 self._invalidated_context = False
189 self._invalidated_context = False
190 self._attached_to_a_transaction = False
190 self._attached_to_a_transaction = False
191 self._filename = b'dirstate'
191 self._filename = b'dirstate'
192 self._filename_th = b'dirstate-tracked-hint'
192 self._filename_th = b'dirstate-tracked-hint'
193 self._pendingfilename = b'%s.pending' % self._filename
193 self._pendingfilename = b'%s.pending' % self._filename
194 self._plchangecallbacks = {}
194 self._plchangecallbacks = {}
195 self._origpl = None
195 self._origpl = None
196 self._mapcls = dirstatemap.dirstatemap
196 self._mapcls = dirstatemap.dirstatemap
197 # Access and cache cwd early, so we don't access it for the first time
197 # Access and cache cwd early, so we don't access it for the first time
198 # after a working-copy update caused it to not exist (accessing it then
198 # after a working-copy update caused it to not exist (accessing it then
199 # raises an exception).
199 # raises an exception).
200 self._cwd
200 self._cwd
201
201
202 def refresh(self):
202 def refresh(self):
203 if '_branch' in vars(self):
203 if '_branch' in vars(self):
204 del self._branch
204 del self._branch
205 if '_map' in vars(self) and self._map.may_need_refresh():
205 if '_map' in vars(self) and self._map.may_need_refresh():
206 self.invalidate()
206 self.invalidate()
207
207
208 def prefetch_parents(self):
208 def prefetch_parents(self):
209 """make sure the parents are loaded
209 """make sure the parents are loaded
210
210
211 Used to avoid a race condition.
211 Used to avoid a race condition.
212 """
212 """
213 self._pl
213 self._pl
214
214
215 @contextlib.contextmanager
215 @contextlib.contextmanager
216 @check_invalidated
216 @check_invalidated
217 def running_status(self, repo):
217 def running_status(self, repo):
218 """Wrap a status operation
218 """Wrap a status operation
219
219
220 This context is not mutally exclusive with the `changing_*` context. It
220 This context is not mutally exclusive with the `changing_*` context. It
221 also do not warrant for the `wlock` to be taken.
221 also do not warrant for the `wlock` to be taken.
222
222
223 If the wlock is taken, this context will behave in a simple way, and
223 If the wlock is taken, this context will behave in a simple way, and
224 ensure the data are scheduled for write when leaving the top level
224 ensure the data are scheduled for write when leaving the top level
225 context.
225 context.
226
226
227 If the lock is not taken, it will only warrant that the data are either
227 If the lock is not taken, it will only warrant that the data are either
228 committed (written) and rolled back (invalidated) when exiting the top
228 committed (written) and rolled back (invalidated) when exiting the top
229 level context. The write/invalidate action must be performed by the
229 level context. The write/invalidate action must be performed by the
230 wrapped code.
230 wrapped code.
231
231
232
232
233 The expected logic is:
233 The expected logic is:
234
234
235 A: read the dirstate
235 A: read the dirstate
236 B: run status
236 B: run status
237 This might make the dirstate dirty by updating cache,
237 This might make the dirstate dirty by updating cache,
238 especially in Rust.
238 especially in Rust.
239 C: do more "post status fixup if relevant
239 C: do more "post status fixup if relevant
240 D: try to take the w-lock (this will invalidate the changes if they were raced)
240 D: try to take the w-lock (this will invalidate the changes if they were raced)
241 E0: if dirstate changed on disk β†’ discard change (done by dirstate internal)
241 E0: if dirstate changed on disk β†’ discard change (done by dirstate internal)
242 E1: elif lock was acquired β†’ write the changes
242 E1: elif lock was acquired β†’ write the changes
243 E2: else β†’ discard the changes
243 E2: else β†’ discard the changes
244 """
244 """
245 has_lock = repo.currentwlock() is not None
245 has_lock = repo.currentwlock() is not None
246 is_changing = self.is_changing_any
246 is_changing = self.is_changing_any
247 tr = repo.currenttransaction()
247 tr = repo.currenttransaction()
248 has_tr = tr is not None
248 has_tr = tr is not None
249 nested = bool(self._running_status)
249 nested = bool(self._running_status)
250
250
251 first_and_alone = not (is_changing or has_tr or nested)
251 first_and_alone = not (is_changing or has_tr or nested)
252
252
253 # enforce no change happened outside of a proper context.
253 # enforce no change happened outside of a proper context.
254 if first_and_alone and self._dirty:
254 if first_and_alone and self._dirty:
255 has_tr = repo.currenttransaction() is not None
255 has_tr = repo.currenttransaction() is not None
256 if not has_tr and self._changing_level == 0 and self._dirty:
256 if not has_tr and self._changing_level == 0 and self._dirty:
257 msg = "entering a status context, but dirstate is already dirty"
257 msg = "entering a status context, but dirstate is already dirty"
258 raise error.ProgrammingError(msg)
258 raise error.ProgrammingError(msg)
259
259
260 should_write = has_lock and not (nested or is_changing)
260 should_write = has_lock and not (nested or is_changing)
261
261
262 self._running_status += 1
262 self._running_status += 1
263 try:
263 try:
264 yield
264 yield
265 except Exception:
265 except Exception:
266 self.invalidate()
266 self.invalidate()
267 raise
267 raise
268 finally:
268 finally:
269 self._running_status -= 1
269 self._running_status -= 1
270 if self._invalidated_context:
270 if self._invalidated_context:
271 should_write = False
271 should_write = False
272 self.invalidate()
272 self.invalidate()
273
273
274 if should_write:
274 if should_write:
275 assert repo.currenttransaction() is tr
275 assert repo.currenttransaction() is tr
276 self.write(tr)
276 self.write(tr)
277 elif not has_lock:
277 elif not has_lock:
278 if self._dirty:
278 if self._dirty:
279 msg = b'dirstate dirty while exiting an isolated status context'
279 msg = b'dirstate dirty while exiting an isolated status context'
280 repo.ui.develwarn(msg)
280 repo.ui.develwarn(msg)
281 self.invalidate()
281 self.invalidate()
282
282
283 @contextlib.contextmanager
283 @contextlib.contextmanager
284 @check_invalidated
284 @check_invalidated
285 def _changing(self, repo, change_type):
285 def _changing(self, repo, change_type):
286 if repo.currentwlock() is None:
286 if repo.currentwlock() is None:
287 msg = b"trying to change the dirstate without holding the wlock"
287 msg = b"trying to change the dirstate without holding the wlock"
288 raise error.ProgrammingError(msg)
288 raise error.ProgrammingError(msg)
289
289
290 has_tr = repo.currenttransaction() is not None
290 has_tr = repo.currenttransaction() is not None
291 if not has_tr and self._changing_level == 0 and self._dirty:
291 if not has_tr and self._changing_level == 0 and self._dirty:
292 msg = b"entering a changing context, but dirstate is already dirty"
292 msg = b"entering a changing context, but dirstate is already dirty"
293 repo.ui.develwarn(msg)
293 repo.ui.develwarn(msg)
294
294
295 assert self._changing_level >= 0
295 assert self._changing_level >= 0
296 # different type of change are mutually exclusive
296 # different type of change are mutually exclusive
297 if self._change_type is None:
297 if self._change_type is None:
298 assert self._changing_level == 0
298 assert self._changing_level == 0
299 self._change_type = change_type
299 self._change_type = change_type
300 elif self._change_type != change_type:
300 elif self._change_type != change_type:
301 msg = (
301 msg = (
302 'trying to open "%s" dirstate-changing context while a "%s" is'
302 'trying to open "%s" dirstate-changing context while a "%s" is'
303 ' already open'
303 ' already open'
304 )
304 )
305 msg %= (change_type, self._change_type)
305 msg %= (change_type, self._change_type)
306 raise error.ProgrammingError(msg)
306 raise error.ProgrammingError(msg)
307 should_write = False
307 should_write = False
308 self._changing_level += 1
308 self._changing_level += 1
309 try:
309 try:
310 yield
310 yield
311 except: # re-raises
311 except: # re-raises
312 self.invalidate() # this will set `_invalidated_context`
312 self.invalidate() # this will set `_invalidated_context`
313 raise
313 raise
314 finally:
314 finally:
315 assert self._changing_level > 0
315 assert self._changing_level > 0
316 self._changing_level -= 1
316 self._changing_level -= 1
317 # If the dirstate is being invalidated, call invalidate again.
317 # If the dirstate is being invalidated, call invalidate again.
318 # This will throw away anything added by a upper context and
318 # This will throw away anything added by a upper context and
319 # reset the `_invalidated_context` flag when relevant
319 # reset the `_invalidated_context` flag when relevant
320 if self._changing_level <= 0:
320 if self._changing_level <= 0:
321 self._change_type = None
321 self._change_type = None
322 assert self._changing_level == 0
322 assert self._changing_level == 0
323 if self._invalidated_context:
323 if self._invalidated_context:
324 # make sure we invalidate anything an upper context might
324 # make sure we invalidate anything an upper context might
325 # have changed.
325 # have changed.
326 self.invalidate()
326 self.invalidate()
327 else:
327 else:
328 should_write = self._changing_level <= 0
328 should_write = self._changing_level <= 0
329 tr = repo.currenttransaction()
329 tr = repo.currenttransaction()
330 if has_tr != (tr is not None):
330 if has_tr != (tr is not None):
331 if has_tr:
331 if has_tr:
332 m = "transaction vanished while changing dirstate"
332 m = "transaction vanished while changing dirstate"
333 else:
333 else:
334 m = "transaction appeared while changing dirstate"
334 m = "transaction appeared while changing dirstate"
335 raise error.ProgrammingError(m)
335 raise error.ProgrammingError(m)
336 if should_write:
336 if should_write:
337 self.write(tr)
337 self.write(tr)
338
338
339 @contextlib.contextmanager
339 @contextlib.contextmanager
340 def changing_parents(self, repo):
340 def changing_parents(self, repo):
341 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
341 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
342 yield c
342 yield c
343
343
344 @contextlib.contextmanager
344 @contextlib.contextmanager
345 def changing_files(self, repo):
345 def changing_files(self, repo):
346 with self._changing(repo, CHANGE_TYPE_FILES) as c:
346 with self._changing(repo, CHANGE_TYPE_FILES) as c:
347 yield c
347 yield c
348
348
349 # here to help migration to the new code
349 # here to help migration to the new code
350 def parentchange(self):
350 def parentchange(self):
351 msg = (
351 msg = (
352 "Mercurial 6.4 and later requires call to "
352 "Mercurial 6.4 and later requires call to "
353 "`dirstate.changing_parents(repo)`"
353 "`dirstate.changing_parents(repo)`"
354 )
354 )
355 raise error.ProgrammingError(msg)
355 raise error.ProgrammingError(msg)
356
356
357 @property
357 @property
358 def is_changing_any(self):
358 def is_changing_any(self):
359 """Returns true if the dirstate is in the middle of a set of changes.
359 """Returns true if the dirstate is in the middle of a set of changes.
360
360
361 This returns True for any kind of change.
361 This returns True for any kind of change.
362 """
362 """
363 return self._changing_level > 0
363 return self._changing_level > 0
364
364
365 def pendingparentchange(self):
365 def pendingparentchange(self):
366 return self.is_changing_parent()
366 return self.is_changing_parent()
367
367
368 def is_changing_parent(self):
368 def is_changing_parent(self):
369 """Returns true if the dirstate is in the middle of a set of changes
369 """Returns true if the dirstate is in the middle of a set of changes
370 that modify the dirstate parent.
370 that modify the dirstate parent.
371 """
371 """
372 self._ui.deprecwarn(b"dirstate.is_changing_parents", b"6.5")
372 self._ui.deprecwarn(b"dirstate.is_changing_parents", b"6.5")
373 return self.is_changing_parents
373 return self.is_changing_parents
374
374
375 @property
375 @property
376 def is_changing_parents(self):
376 def is_changing_parents(self):
377 """Returns true if the dirstate is in the middle of a set of changes
377 """Returns true if the dirstate is in the middle of a set of changes
378 that modify the dirstate parent.
378 that modify the dirstate parent.
379 """
379 """
380 if self._changing_level <= 0:
380 if self._changing_level <= 0:
381 return False
381 return False
382 return self._change_type == CHANGE_TYPE_PARENTS
382 return self._change_type == CHANGE_TYPE_PARENTS
383
383
384 @property
384 @property
385 def is_changing_files(self):
385 def is_changing_files(self):
386 """Returns true if the dirstate is in the middle of a set of changes
386 """Returns true if the dirstate is in the middle of a set of changes
387 that modify the files tracked or their sources.
387 that modify the files tracked or their sources.
388 """
388 """
389 if self._changing_level <= 0:
389 if self._changing_level <= 0:
390 return False
390 return False
391 return self._change_type == CHANGE_TYPE_FILES
391 return self._change_type == CHANGE_TYPE_FILES
392
392
393 @propertycache
393 @propertycache
394 def _map(self):
394 def _map(self):
395 """Return the dirstate contents (see documentation for dirstatemap)."""
395 """Return the dirstate contents (see documentation for dirstatemap)."""
396 return self._mapcls(
396 return self._mapcls(
397 self._ui,
397 self._ui,
398 self._opener,
398 self._opener,
399 self._root,
399 self._root,
400 self._nodeconstants,
400 self._nodeconstants,
401 self._use_dirstate_v2,
401 self._use_dirstate_v2,
402 )
402 )
403
403
404 @property
404 @property
405 def _sparsematcher(self):
405 def _sparsematcher(self):
406 """The matcher for the sparse checkout.
406 """The matcher for the sparse checkout.
407
407
408 The working directory may not include every file from a manifest. The
408 The working directory may not include every file from a manifest. The
409 matcher obtained by this property will match a path if it is to be
409 matcher obtained by this property will match a path if it is to be
410 included in the working directory.
410 included in the working directory.
411
411
412 When sparse if disabled, return None.
412 When sparse if disabled, return None.
413 """
413 """
414 if self._sparsematchfn is None:
414 if self._sparsematchfn is None:
415 return None
415 return None
416 # TODO there is potential to cache this property. For now, the matcher
416 # TODO there is potential to cache this property. For now, the matcher
417 # is resolved on every access. (But the called function does use a
417 # is resolved on every access. (But the called function does use a
418 # cache to keep the lookup fast.)
418 # cache to keep the lookup fast.)
419 return self._sparsematchfn()
419 return self._sparsematchfn()
420
420
421 @repocache(b'branch')
421 @repocache(b'branch')
422 def _branch(self):
422 def _branch(self):
423 f = None
423 f = None
424 data = b''
424 data = b''
425 try:
425 try:
426 f, mode = txnutil.trypending(self._root, self._opener, b'branch')
426 f, mode = txnutil.trypending(self._root, self._opener, b'branch')
427 data = f.read().strip()
427 data = f.read().strip()
428 except FileNotFoundError:
428 except FileNotFoundError:
429 pass
429 pass
430 finally:
430 finally:
431 if f is not None:
431 if f is not None:
432 f.close()
432 f.close()
433 if not data:
433 if not data:
434 return b"default"
434 return b"default"
435 return data
435 return data
436
436
437 @property
437 @property
438 def _pl(self):
438 def _pl(self):
439 return self._map.parents()
439 return self._map.parents()
440
440
441 def hasdir(self, d):
441 def hasdir(self, d):
442 return self._map.hastrackeddir(d)
442 return self._map.hastrackeddir(d)
443
443
444 @rootcache(b'.hgignore')
444 @rootcache(b'.hgignore')
445 def _ignore(self):
445 def _ignore(self):
446 files = self._ignorefiles()
446 files = self._ignorefiles()
447 if not files:
447 if not files:
448 return matchmod.never()
448 return matchmod.never()
449
449
450 pats = [b'include:%s' % f for f in files]
450 pats = [b'include:%s' % f for f in files]
451 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
451 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
452
452
453 @propertycache
453 @propertycache
454 def _slash(self):
454 def _slash(self):
455 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
455 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
456
456
457 @propertycache
457 @propertycache
458 def _checklink(self):
458 def _checklink(self):
459 return util.checklink(self._root)
459 return util.checklink(self._root)
460
460
461 @propertycache
461 @propertycache
462 def _checkexec(self):
462 def _checkexec(self):
463 return bool(util.checkexec(self._root))
463 return bool(util.checkexec(self._root))
464
464
465 @propertycache
465 @propertycache
466 def _checkcase(self):
466 def _checkcase(self):
467 return not util.fscasesensitive(self._join(b'.hg'))
467 return not util.fscasesensitive(self._join(b'.hg'))
468
468
469 def _join(self, f):
469 def _join(self, f):
470 # much faster than os.path.join()
470 # much faster than os.path.join()
471 # it's safe because f is always a relative path
471 # it's safe because f is always a relative path
472 return self._rootdir + f
472 return self._rootdir + f
473
473
474 def flagfunc(self, buildfallback):
474 def flagfunc(self, buildfallback):
475 """build a callable that returns flags associated with a filename
475 """build a callable that returns flags associated with a filename
476
476
477 The information is extracted from three possible layers:
477 The information is extracted from three possible layers:
478 1. the file system if it supports the information
478 1. the file system if it supports the information
479 2. the "fallback" information stored in the dirstate if any
479 2. the "fallback" information stored in the dirstate if any
480 3. a more expensive mechanism inferring the flags from the parents.
480 3. a more expensive mechanism inferring the flags from the parents.
481 """
481 """
482
482
483 # small hack to cache the result of buildfallback()
483 # small hack to cache the result of buildfallback()
484 fallback_func = []
484 fallback_func = []
485
485
486 def get_flags(x):
486 def get_flags(x):
487 entry = None
487 entry = None
488 fallback_value = None
488 fallback_value = None
489 try:
489 try:
490 st = os.lstat(self._join(x))
490 st = os.lstat(self._join(x))
491 except OSError:
491 except OSError:
492 return b''
492 return b''
493
493
494 if self._checklink:
494 if self._checklink:
495 if util.statislink(st):
495 if util.statislink(st):
496 return b'l'
496 return b'l'
497 else:
497 else:
498 entry = self.get_entry(x)
498 entry = self.get_entry(x)
499 if entry.has_fallback_symlink:
499 if entry.has_fallback_symlink:
500 if entry.fallback_symlink:
500 if entry.fallback_symlink:
501 return b'l'
501 return b'l'
502 else:
502 else:
503 if not fallback_func:
503 if not fallback_func:
504 fallback_func.append(buildfallback())
504 fallback_func.append(buildfallback())
505 fallback_value = fallback_func[0](x)
505 fallback_value = fallback_func[0](x)
506 if b'l' in fallback_value:
506 if b'l' in fallback_value:
507 return b'l'
507 return b'l'
508
508
509 if self._checkexec:
509 if self._checkexec:
510 if util.statisexec(st):
510 if util.statisexec(st):
511 return b'x'
511 return b'x'
512 else:
512 else:
513 if entry is None:
513 if entry is None:
514 entry = self.get_entry(x)
514 entry = self.get_entry(x)
515 if entry.has_fallback_exec:
515 if entry.has_fallback_exec:
516 if entry.fallback_exec:
516 if entry.fallback_exec:
517 return b'x'
517 return b'x'
518 else:
518 else:
519 if fallback_value is None:
519 if fallback_value is None:
520 if not fallback_func:
520 if not fallback_func:
521 fallback_func.append(buildfallback())
521 fallback_func.append(buildfallback())
522 fallback_value = fallback_func[0](x)
522 fallback_value = fallback_func[0](x)
523 if b'x' in fallback_value:
523 if b'x' in fallback_value:
524 return b'x'
524 return b'x'
525 return b''
525 return b''
526
526
527 return get_flags
527 return get_flags
528
528
529 @propertycache
529 @propertycache
530 def _cwd(self):
530 def _cwd(self):
531 # internal config: ui.forcecwd
531 # internal config: ui.forcecwd
532 forcecwd = self._ui.config(b'ui', b'forcecwd')
532 forcecwd = self._ui.config(b'ui', b'forcecwd')
533 if forcecwd:
533 if forcecwd:
534 return forcecwd
534 return forcecwd
535 return encoding.getcwd()
535 return encoding.getcwd()
536
536
537 def getcwd(self):
537 def getcwd(self):
538 """Return the path from which a canonical path is calculated.
538 """Return the path from which a canonical path is calculated.
539
539
540 This path should be used to resolve file patterns or to convert
540 This path should be used to resolve file patterns or to convert
541 canonical paths back to file paths for display. It shouldn't be
541 canonical paths back to file paths for display. It shouldn't be
542 used to get real file paths. Use vfs functions instead.
542 used to get real file paths. Use vfs functions instead.
543 """
543 """
544 cwd = self._cwd
544 cwd = self._cwd
545 if cwd == self._root:
545 if cwd == self._root:
546 return b''
546 return b''
547 # self._root ends with a path separator if self._root is '/' or 'C:\'
547 # self._root ends with a path separator if self._root is '/' or 'C:\'
548 rootsep = self._root
548 rootsep = self._root
549 if not util.endswithsep(rootsep):
549 if not util.endswithsep(rootsep):
550 rootsep += pycompat.ossep
550 rootsep += pycompat.ossep
551 if cwd.startswith(rootsep):
551 if cwd.startswith(rootsep):
552 return cwd[len(rootsep) :]
552 return cwd[len(rootsep) :]
553 else:
553 else:
554 # we're outside the repo. return an absolute path.
554 # we're outside the repo. return an absolute path.
555 return cwd
555 return cwd
556
556
557 def pathto(self, f, cwd=None):
557 def pathto(self, f, cwd=None):
558 if cwd is None:
558 if cwd is None:
559 cwd = self.getcwd()
559 cwd = self.getcwd()
560 path = util.pathto(self._root, cwd, f)
560 path = util.pathto(self._root, cwd, f)
561 if self._slash:
561 if self._slash:
562 return util.pconvert(path)
562 return util.pconvert(path)
563 return path
563 return path
564
564
565 def get_entry(self, path):
565 def get_entry(self, path):
566 """return a DirstateItem for the associated path"""
566 """return a DirstateItem for the associated path"""
567 entry = self._map.get(path)
567 entry = self._map.get(path)
568 if entry is None:
568 if entry is None:
569 return DirstateItem()
569 return DirstateItem()
570 return entry
570 return entry
571
571
572 def __contains__(self, key):
572 def __contains__(self, key):
573 return key in self._map
573 return key in self._map
574
574
575 def __iter__(self):
575 def __iter__(self):
576 return iter(sorted(self._map))
576 return iter(sorted(self._map))
577
577
578 def items(self):
578 def items(self):
579 return self._map.items()
579 return self._map.items()
580
580
581 iteritems = items
581 iteritems = items
582
582
583 def parents(self):
583 def parents(self):
584 return [self._validate(p) for p in self._pl]
584 return [self._validate(p) for p in self._pl]
585
585
586 def p1(self):
586 def p1(self):
587 return self._validate(self._pl[0])
587 return self._validate(self._pl[0])
588
588
589 def p2(self):
589 def p2(self):
590 return self._validate(self._pl[1])
590 return self._validate(self._pl[1])
591
591
592 @property
592 @property
593 def in_merge(self):
593 def in_merge(self):
594 """True if a merge is in progress"""
594 """True if a merge is in progress"""
595 return self._pl[1] != self._nodeconstants.nullid
595 return self._pl[1] != self._nodeconstants.nullid
596
596
597 def branch(self):
597 def branch(self):
598 return encoding.tolocal(self._branch)
598 return encoding.tolocal(self._branch)
599
599
600 @requires_changing_parents
600 @requires_changing_parents
601 def setparents(self, p1, p2=None):
601 def setparents(self, p1, p2=None):
602 """Set dirstate parents to p1 and p2.
602 """Set dirstate parents to p1 and p2.
603
603
604 When moving from two parents to one, "merged" entries a
604 When moving from two parents to one, "merged" entries a
605 adjusted to normal and previous copy records discarded and
605 adjusted to normal and previous copy records discarded and
606 returned by the call.
606 returned by the call.
607
607
608 See localrepo.setparents()
608 See localrepo.setparents()
609 """
609 """
610 if p2 is None:
610 if p2 is None:
611 p2 = self._nodeconstants.nullid
611 p2 = self._nodeconstants.nullid
612 if self._changing_level == 0:
612 if self._changing_level == 0:
613 raise ValueError(
613 raise ValueError(
614 b"cannot set dirstate parent outside of "
614 b"cannot set dirstate parent outside of "
615 b"dirstate.changing_parents context manager"
615 b"dirstate.changing_parents context manager"
616 )
616 )
617
617
618 self._dirty = True
618 self._dirty = True
619 oldp2 = self._pl[1]
619 oldp2 = self._pl[1]
620 if self._origpl is None:
620 if self._origpl is None:
621 self._origpl = self._pl
621 self._origpl = self._pl
622 nullid = self._nodeconstants.nullid
622 nullid = self._nodeconstants.nullid
623 # True if we need to fold p2 related state back to a linear case
623 # True if we need to fold p2 related state back to a linear case
624 fold_p2 = oldp2 != nullid and p2 == nullid
624 fold_p2 = oldp2 != nullid and p2 == nullid
625 return self._map.setparents(p1, p2, fold_p2=fold_p2)
625 return self._map.setparents(p1, p2, fold_p2=fold_p2)
626
626
627 def setbranch(self, branch, transaction=SENTINEL):
627 def setbranch(self, branch, transaction=SENTINEL):
628 self.__class__._branch.set(self, encoding.fromlocal(branch))
628 self.__class__._branch.set(self, encoding.fromlocal(branch))
629 if transaction is SENTINEL:
629 if transaction is SENTINEL:
630 msg = b"setbranch needs a `transaction` argument"
630 msg = b"setbranch needs a `transaction` argument"
631 self._ui.deprecwarn(msg, b'6.5')
631 self._ui.deprecwarn(msg, b'6.5')
632 transaction = None
632 transaction = None
633 if transaction is not None:
633 if transaction is not None:
634 self._setup_tr_abort(transaction)
634 self._setup_tr_abort(transaction)
635 transaction.addfilegenerator(
635 transaction.addfilegenerator(
636 b'dirstate-3-branch%s' % self._tr_key_suffix,
636 b'dirstate-3-branch%s' % self._tr_key_suffix,
637 (b'branch',),
637 (b'branch',),
638 self._write_branch,
638 self._write_branch,
639 location=b'plain',
639 location=b'plain',
640 post_finalize=True,
640 post_finalize=True,
641 )
641 )
642 return
642 return
643
643
644 vfs = self._opener
644 vfs = self._opener
645 with vfs(b'branch', b'w', atomictemp=True, checkambig=True) as f:
645 with vfs(b'branch', b'w', atomictemp=True, checkambig=True) as f:
646 self._write_branch(f)
646 self._write_branch(f)
647 # make sure filecache has the correct stat info for _branch after
647 # make sure filecache has the correct stat info for _branch after
648 # replacing the underlying file
648 # replacing the underlying file
649 #
649 #
650 # XXX do we actually need this,
650 # XXX do we actually need this,
651 # refreshing the attribute is quite cheap
651 # refreshing the attribute is quite cheap
652 ce = self._filecache[b'_branch']
652 ce = self._filecache[b'_branch']
653 if ce:
653 if ce:
654 ce.refresh()
654 ce.refresh()
655
655
656 def _write_branch(self, file_obj):
656 def _write_branch(self, file_obj):
657 file_obj.write(self._branch + b'\n')
657 file_obj.write(self._branch + b'\n')
658
658
659 def invalidate(self):
659 def invalidate(self):
660 """Causes the next access to reread the dirstate.
660 """Causes the next access to reread the dirstate.
661
661
662 This is different from localrepo.invalidatedirstate() because it always
662 This is different from localrepo.invalidatedirstate() because it always
663 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
663 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
664 check whether the dirstate has changed before rereading it."""
664 check whether the dirstate has changed before rereading it."""
665
665
666 for a in ("_map", "_branch", "_ignore"):
666 for a in ("_map", "_branch", "_ignore"):
667 if a in self.__dict__:
667 if a in self.__dict__:
668 delattr(self, a)
668 delattr(self, a)
669 self._dirty = False
669 self._dirty = False
670 self._dirty_tracked_set = False
670 self._dirty_tracked_set = False
671 self._invalidated_context = bool(
671 self._invalidated_context = bool(
672 self._changing_level > 0
672 self._changing_level > 0
673 or self._attached_to_a_transaction
673 or self._attached_to_a_transaction
674 or self._running_status
674 or self._running_status
675 )
675 )
676 self._origpl = None
676 self._origpl = None
677
677
678 @requires_changing_any
678 @requires_changing_any
679 def copy(self, source, dest):
679 def copy(self, source, dest):
680 """Mark dest as a copy of source. Unmark dest if source is None."""
680 """Mark dest as a copy of source. Unmark dest if source is None."""
681 if source == dest:
681 if source == dest:
682 return
682 return
683 self._dirty = True
683 self._dirty = True
684 if source is not None:
684 if source is not None:
685 self._check_sparse(source)
685 self._check_sparse(source)
686 self._map.copymap[dest] = source
686 self._map.copymap[dest] = source
687 else:
687 else:
688 self._map.copymap.pop(dest, None)
688 self._map.copymap.pop(dest, None)
689
689
690 def copied(self, file):
690 def copied(self, file):
691 return self._map.copymap.get(file, None)
691 return self._map.copymap.get(file, None)
692
692
693 def copies(self):
693 def copies(self):
694 return self._map.copymap
694 return self._map.copymap
695
695
696 @requires_changing_files
696 @requires_changing_files
697 def set_tracked(self, filename, reset_copy=False):
697 def set_tracked(self, filename, reset_copy=False):
698 """a "public" method for generic code to mark a file as tracked
698 """a "public" method for generic code to mark a file as tracked
699
699
700 This function is to be called outside of "update/merge" case. For
700 This function is to be called outside of "update/merge" case. For
701 example by a command like `hg add X`.
701 example by a command like `hg add X`.
702
702
703 if reset_copy is set, any existing copy information will be dropped.
703 if reset_copy is set, any existing copy information will be dropped.
704
704
705 return True the file was previously untracked, False otherwise.
705 return True the file was previously untracked, False otherwise.
706 """
706 """
707 self._dirty = True
707 self._dirty = True
708 entry = self._map.get(filename)
708 entry = self._map.get(filename)
709 if entry is None or not entry.tracked:
709 if entry is None or not entry.tracked:
710 self._check_new_tracked_filename(filename)
710 self._check_new_tracked_filename(filename)
711 pre_tracked = self._map.set_tracked(filename)
711 pre_tracked = self._map.set_tracked(filename)
712 if reset_copy:
712 if reset_copy:
713 self._map.copymap.pop(filename, None)
713 self._map.copymap.pop(filename, None)
714 if pre_tracked:
714 if pre_tracked:
715 self._dirty_tracked_set = True
715 self._dirty_tracked_set = True
716 return pre_tracked
716 return pre_tracked
717
717
718 @requires_changing_files
718 @requires_changing_files
719 def set_untracked(self, filename):
719 def set_untracked(self, filename):
720 """a "public" method for generic code to mark a file as untracked
720 """a "public" method for generic code to mark a file as untracked
721
721
722 This function is to be called outside of "update/merge" case. For
722 This function is to be called outside of "update/merge" case. For
723 example by a command like `hg remove X`.
723 example by a command like `hg remove X`.
724
724
725 return True the file was previously tracked, False otherwise.
725 return True the file was previously tracked, False otherwise.
726 """
726 """
727 ret = self._map.set_untracked(filename)
727 ret = self._map.set_untracked(filename)
728 if ret:
728 if ret:
729 self._dirty = True
729 self._dirty = True
730 self._dirty_tracked_set = True
730 self._dirty_tracked_set = True
731 return ret
731 return ret
732
732
733 @requires_changing_files_or_status
733 @requires_changing_files_or_status
734 def set_clean(self, filename, parentfiledata):
734 def set_clean(self, filename, parentfiledata):
735 """record that the current state of the file on disk is known to be clean"""
735 """record that the current state of the file on disk is known to be clean"""
736 self._dirty = True
736 self._dirty = True
737 if not self._map[filename].tracked:
737 if not self._map[filename].tracked:
738 self._check_new_tracked_filename(filename)
738 self._check_new_tracked_filename(filename)
739 (mode, size, mtime) = parentfiledata
739 (mode, size, mtime) = parentfiledata
740 self._map.set_clean(filename, mode, size, mtime)
740 self._map.set_clean(filename, mode, size, mtime)
741
741
742 @requires_changing_files_or_status
742 @requires_changing_files_or_status
743 def set_possibly_dirty(self, filename):
743 def set_possibly_dirty(self, filename):
744 """record that the current state of the file on disk is unknown"""
744 """record that the current state of the file on disk is unknown"""
745 self._dirty = True
745 self._dirty = True
746 self._map.set_possibly_dirty(filename)
746 self._map.set_possibly_dirty(filename)
747
747
748 @requires_changing_parents
748 @requires_changing_parents
749 def update_file_p1(
749 def update_file_p1(
750 self,
750 self,
751 filename,
751 filename,
752 p1_tracked,
752 p1_tracked,
753 ):
753 ):
754 """Set a file as tracked in the parent (or not)
754 """Set a file as tracked in the parent (or not)
755
755
756 This is to be called when adjust the dirstate to a new parent after an history
756 This is to be called when adjust the dirstate to a new parent after an history
757 rewriting operation.
757 rewriting operation.
758
758
759 It should not be called during a merge (p2 != nullid) and only within
759 It should not be called during a merge (p2 != nullid) and only within
760 a `with dirstate.changing_parents(repo):` context.
760 a `with dirstate.changing_parents(repo):` context.
761 """
761 """
762 if self.in_merge:
762 if self.in_merge:
763 msg = b'update_file_reference should not be called when merging'
763 msg = b'update_file_reference should not be called when merging'
764 raise error.ProgrammingError(msg)
764 raise error.ProgrammingError(msg)
765 entry = self._map.get(filename)
765 entry = self._map.get(filename)
766 if entry is None:
766 if entry is None:
767 wc_tracked = False
767 wc_tracked = False
768 else:
768 else:
769 wc_tracked = entry.tracked
769 wc_tracked = entry.tracked
770 if not (p1_tracked or wc_tracked):
770 if not (p1_tracked or wc_tracked):
771 # the file is no longer relevant to anyone
771 # the file is no longer relevant to anyone
772 if self._map.get(filename) is not None:
772 if self._map.get(filename) is not None:
773 self._map.reset_state(filename)
773 self._map.reset_state(filename)
774 self._dirty = True
774 self._dirty = True
775 elif (not p1_tracked) and wc_tracked:
775 elif (not p1_tracked) and wc_tracked:
776 if entry is not None and entry.added:
776 if entry is not None and entry.added:
777 return # avoid dropping copy information (maybe?)
777 return # avoid dropping copy information (maybe?)
778
778
779 self._map.reset_state(
779 self._map.reset_state(
780 filename,
780 filename,
781 wc_tracked,
781 wc_tracked,
782 p1_tracked,
782 p1_tracked,
783 # the underlying reference might have changed, we will have to
783 # the underlying reference might have changed, we will have to
784 # check it.
784 # check it.
785 has_meaningful_mtime=False,
785 has_meaningful_mtime=False,
786 )
786 )
787
787
788 @requires_changing_parents
788 @requires_changing_parents
789 def update_file(
789 def update_file(
790 self,
790 self,
791 filename,
791 filename,
792 wc_tracked,
792 wc_tracked,
793 p1_tracked,
793 p1_tracked,
794 p2_info=False,
794 p2_info=False,
795 possibly_dirty=False,
795 possibly_dirty=False,
796 parentfiledata=None,
796 parentfiledata=None,
797 ):
797 ):
798 """update the information about a file in the dirstate
798 """update the information about a file in the dirstate
799
799
800 This is to be called when the direstates parent changes to keep track
800 This is to be called when the direstates parent changes to keep track
801 of what is the file situation in regards to the working copy and its parent.
801 of what is the file situation in regards to the working copy and its parent.
802
802
803 This function must be called within a `dirstate.changing_parents` context.
803 This function must be called within a `dirstate.changing_parents` context.
804
804
805 note: the API is at an early stage and we might need to adjust it
805 note: the API is at an early stage and we might need to adjust it
806 depending of what information ends up being relevant and useful to
806 depending of what information ends up being relevant and useful to
807 other processing.
807 other processing.
808 """
808 """
809 self._update_file(
809 self._update_file(
810 filename=filename,
810 filename=filename,
811 wc_tracked=wc_tracked,
811 wc_tracked=wc_tracked,
812 p1_tracked=p1_tracked,
812 p1_tracked=p1_tracked,
813 p2_info=p2_info,
813 p2_info=p2_info,
814 possibly_dirty=possibly_dirty,
814 possibly_dirty=possibly_dirty,
815 parentfiledata=parentfiledata,
815 parentfiledata=parentfiledata,
816 )
816 )
817
817
818 def hacky_extension_update_file(self, *args, **kwargs):
818 def hacky_extension_update_file(self, *args, **kwargs):
819 """NEVER USE THIS, YOU DO NOT NEED IT
819 """NEVER USE THIS, YOU DO NOT NEED IT
820
820
821 This function is a variant of "update_file" to be called by a small set
821 This function is a variant of "update_file" to be called by a small set
822 of extensions, it also adjust the internal state of file, but can be
822 of extensions, it also adjust the internal state of file, but can be
823 called outside an `changing_parents` context.
823 called outside an `changing_parents` context.
824
824
825 A very small number of extension meddle with the working copy content
825 A very small number of extension meddle with the working copy content
826 in a way that requires to adjust the dirstate accordingly. At the time
826 in a way that requires to adjust the dirstate accordingly. At the time
827 this command is written they are :
827 this command is written they are :
828 - keyword,
828 - keyword,
829 - largefile,
829 - largefile,
830 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
830 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
831
831
832 This function could probably be replaced by more semantic one (like
832 This function could probably be replaced by more semantic one (like
833 "adjust expected size" or "always revalidate file content", etc)
833 "adjust expected size" or "always revalidate file content", etc)
834 however at the time where this is writen, this is too much of a detour
834 however at the time where this is writen, this is too much of a detour
835 to be considered.
835 to be considered.
836 """
836 """
837 if not (self._changing_level > 0 or self._running_status > 0):
837 if not (self._changing_level > 0 or self._running_status > 0):
838 msg = "requires a changes context"
838 msg = "requires a changes context"
839 raise error.ProgrammingError(msg)
839 raise error.ProgrammingError(msg)
840 self._update_file(
840 self._update_file(
841 *args,
841 *args,
842 **kwargs,
842 **kwargs,
843 )
843 )
844
844
845 def _update_file(
845 def _update_file(
846 self,
846 self,
847 filename,
847 filename,
848 wc_tracked,
848 wc_tracked,
849 p1_tracked,
849 p1_tracked,
850 p2_info=False,
850 p2_info=False,
851 possibly_dirty=False,
851 possibly_dirty=False,
852 parentfiledata=None,
852 parentfiledata=None,
853 ):
853 ):
854
854
855 # note: I do not think we need to double check name clash here since we
855 # note: I do not think we need to double check name clash here since we
856 # are in a update/merge case that should already have taken care of
856 # are in a update/merge case that should already have taken care of
857 # this. The test agrees
857 # this. The test agrees
858
858
859 self._dirty = True
859 self._dirty = True
860 old_entry = self._map.get(filename)
860 old_entry = self._map.get(filename)
861 if old_entry is None:
861 if old_entry is None:
862 prev_tracked = False
862 prev_tracked = False
863 else:
863 else:
864 prev_tracked = old_entry.tracked
864 prev_tracked = old_entry.tracked
865 if prev_tracked != wc_tracked:
865 if prev_tracked != wc_tracked:
866 self._dirty_tracked_set = True
866 self._dirty_tracked_set = True
867
867
868 self._map.reset_state(
868 self._map.reset_state(
869 filename,
869 filename,
870 wc_tracked,
870 wc_tracked,
871 p1_tracked,
871 p1_tracked,
872 p2_info=p2_info,
872 p2_info=p2_info,
873 has_meaningful_mtime=not possibly_dirty,
873 has_meaningful_mtime=not possibly_dirty,
874 parentfiledata=parentfiledata,
874 parentfiledata=parentfiledata,
875 )
875 )
876
876
877 def _check_new_tracked_filename(self, filename):
877 def _check_new_tracked_filename(self, filename):
878 scmutil.checkfilename(filename)
878 scmutil.checkfilename(filename)
879 if self._map.hastrackeddir(filename):
879 if self._map.hastrackeddir(filename):
880 msg = _(b'directory %r already in dirstate')
880 msg = _(b'directory %r already in dirstate')
881 msg %= pycompat.bytestr(filename)
881 msg %= pycompat.bytestr(filename)
882 raise error.Abort(msg)
882 raise error.Abort(msg)
883 # shadows
883 # shadows
884 for d in pathutil.finddirs(filename):
884 for d in pathutil.finddirs(filename):
885 if self._map.hastrackeddir(d):
885 if self._map.hastrackeddir(d):
886 break
886 break
887 entry = self._map.get(d)
887 entry = self._map.get(d)
888 if entry is not None and not entry.removed:
888 if entry is not None and not entry.removed:
889 msg = _(b'file %r in dirstate clashes with %r')
889 msg = _(b'file %r in dirstate clashes with %r')
890 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
890 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
891 raise error.Abort(msg)
891 raise error.Abort(msg)
892 self._check_sparse(filename)
892 self._check_sparse(filename)
893
893
894 def _check_sparse(self, filename):
894 def _check_sparse(self, filename):
895 """Check that a filename is inside the sparse profile"""
895 """Check that a filename is inside the sparse profile"""
896 sparsematch = self._sparsematcher
896 sparsematch = self._sparsematcher
897 if sparsematch is not None and not sparsematch.always():
897 if sparsematch is not None and not sparsematch.always():
898 if not sparsematch(filename):
898 if not sparsematch(filename):
899 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
899 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
900 hint = _(
900 hint = _(
901 b'include file with `hg debugsparse --include <pattern>` or use '
901 b'include file with `hg debugsparse --include <pattern>` or use '
902 b'`hg add -s <file>` to include file directory while adding'
902 b'`hg add -s <file>` to include file directory while adding'
903 )
903 )
904 raise error.Abort(msg % filename, hint=hint)
904 raise error.Abort(msg % filename, hint=hint)
905
905
906 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
906 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
907 if exists is None:
907 if exists is None:
908 exists = os.path.lexists(os.path.join(self._root, path))
908 exists = os.path.lexists(os.path.join(self._root, path))
909 if not exists:
909 if not exists:
910 # Maybe a path component exists
910 # Maybe a path component exists
911 if not ignoremissing and b'/' in path:
911 if not ignoremissing and b'/' in path:
912 d, f = path.rsplit(b'/', 1)
912 d, f = path.rsplit(b'/', 1)
913 d = self._normalize(d, False, ignoremissing, None)
913 d = self._normalize(d, False, ignoremissing, None)
914 folded = d + b"/" + f
914 folded = d + b"/" + f
915 else:
915 else:
916 # No path components, preserve original case
916 # No path components, preserve original case
917 folded = path
917 folded = path
918 else:
918 else:
919 # recursively normalize leading directory components
919 # recursively normalize leading directory components
920 # against dirstate
920 # against dirstate
921 if b'/' in normed:
921 if b'/' in normed:
922 d, f = normed.rsplit(b'/', 1)
922 d, f = normed.rsplit(b'/', 1)
923 d = self._normalize(d, False, ignoremissing, True)
923 d = self._normalize(d, False, ignoremissing, True)
924 r = self._root + b"/" + d
924 r = self._root + b"/" + d
925 folded = d + b"/" + util.fspath(f, r)
925 folded = d + b"/" + util.fspath(f, r)
926 else:
926 else:
927 folded = util.fspath(normed, self._root)
927 folded = util.fspath(normed, self._root)
928 storemap[normed] = folded
928 storemap[normed] = folded
929
929
930 return folded
930 return folded
931
931
932 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
932 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
933 normed = util.normcase(path)
933 normed = util.normcase(path)
934 folded = self._map.filefoldmap.get(normed, None)
934 folded = self._map.filefoldmap.get(normed, None)
935 if folded is None:
935 if folded is None:
936 if isknown:
936 if isknown:
937 folded = path
937 folded = path
938 else:
938 else:
939 folded = self._discoverpath(
939 folded = self._discoverpath(
940 path, normed, ignoremissing, exists, self._map.filefoldmap
940 path, normed, ignoremissing, exists, self._map.filefoldmap
941 )
941 )
942 return folded
942 return folded
943
943
944 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
944 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
945 normed = util.normcase(path)
945 normed = util.normcase(path)
946 folded = self._map.filefoldmap.get(normed, None)
946 folded = self._map.filefoldmap.get(normed, None)
947 if folded is None:
947 if folded is None:
948 folded = self._map.dirfoldmap.get(normed, None)
948 folded = self._map.dirfoldmap.get(normed, None)
949 if folded is None:
949 if folded is None:
950 if isknown:
950 if isknown:
951 folded = path
951 folded = path
952 else:
952 else:
953 # store discovered result in dirfoldmap so that future
953 # store discovered result in dirfoldmap so that future
954 # normalizefile calls don't start matching directories
954 # normalizefile calls don't start matching directories
955 folded = self._discoverpath(
955 folded = self._discoverpath(
956 path, normed, ignoremissing, exists, self._map.dirfoldmap
956 path, normed, ignoremissing, exists, self._map.dirfoldmap
957 )
957 )
958 return folded
958 return folded
959
959
960 def normalize(self, path, isknown=False, ignoremissing=False):
960 def normalize(self, path, isknown=False, ignoremissing=False):
961 """
961 """
962 normalize the case of a pathname when on a casefolding filesystem
962 normalize the case of a pathname when on a casefolding filesystem
963
963
964 isknown specifies whether the filename came from walking the
964 isknown specifies whether the filename came from walking the
965 disk, to avoid extra filesystem access.
965 disk, to avoid extra filesystem access.
966
966
967 If ignoremissing is True, missing path are returned
967 If ignoremissing is True, missing path are returned
968 unchanged. Otherwise, we try harder to normalize possibly
968 unchanged. Otherwise, we try harder to normalize possibly
969 existing path components.
969 existing path components.
970
970
971 The normalized case is determined based on the following precedence:
971 The normalized case is determined based on the following precedence:
972
972
973 - version of name already stored in the dirstate
973 - version of name already stored in the dirstate
974 - version of name stored on disk
974 - version of name stored on disk
975 - version provided via command arguments
975 - version provided via command arguments
976 """
976 """
977
977
978 if self._checkcase:
978 if self._checkcase:
979 return self._normalize(path, isknown, ignoremissing)
979 return self._normalize(path, isknown, ignoremissing)
980 return path
980 return path
981
981
982 # XXX this method is barely used, as a result:
982 # XXX this method is barely used, as a result:
983 # - its semantic is unclear
983 # - its semantic is unclear
984 # - do we really needs it ?
984 # - do we really needs it ?
985 @requires_changing_parents
985 @requires_changing_parents
986 def clear(self):
986 def clear(self):
987 self._map.clear()
987 self._map.clear()
988 self._dirty = True
988 self._dirty = True
989
989
990 @requires_changing_parents
990 @requires_changing_parents
991 def rebuild(self, parent, allfiles, changedfiles=None):
991 def rebuild(self, parent, allfiles, changedfiles=None):
992 matcher = self._sparsematcher
992 matcher = self._sparsematcher
993 if matcher is not None and not matcher.always():
993 if matcher is not None and not matcher.always():
994 # should not add non-matching files
994 # should not add non-matching files
995 allfiles = [f for f in allfiles if matcher(f)]
995 allfiles = [f for f in allfiles if matcher(f)]
996 if changedfiles:
996 if changedfiles:
997 changedfiles = [f for f in changedfiles if matcher(f)]
997 changedfiles = [f for f in changedfiles if matcher(f)]
998
998
999 if changedfiles is not None:
999 if changedfiles is not None:
1000 # these files will be deleted from the dirstate when they are
1000 # these files will be deleted from the dirstate when they are
1001 # not found to be in allfiles
1001 # not found to be in allfiles
1002 dirstatefilestoremove = {f for f in self if not matcher(f)}
1002 dirstatefilestoremove = {f for f in self if not matcher(f)}
1003 changedfiles = dirstatefilestoremove.union(changedfiles)
1003 changedfiles = dirstatefilestoremove.union(changedfiles)
1004
1004
1005 if changedfiles is None:
1005 if changedfiles is None:
1006 # Rebuild entire dirstate
1006 # Rebuild entire dirstate
1007 to_lookup = allfiles
1007 to_lookup = allfiles
1008 to_drop = []
1008 to_drop = []
1009 self.clear()
1009 self.clear()
1010 elif len(changedfiles) < 10:
1010 elif len(changedfiles) < 10:
1011 # Avoid turning allfiles into a set, which can be expensive if it's
1011 # Avoid turning allfiles into a set, which can be expensive if it's
1012 # large.
1012 # large.
1013 to_lookup = []
1013 to_lookup = []
1014 to_drop = []
1014 to_drop = []
1015 for f in changedfiles:
1015 for f in changedfiles:
1016 if f in allfiles:
1016 if f in allfiles:
1017 to_lookup.append(f)
1017 to_lookup.append(f)
1018 else:
1018 else:
1019 to_drop.append(f)
1019 to_drop.append(f)
1020 else:
1020 else:
1021 changedfilesset = set(changedfiles)
1021 changedfilesset = set(changedfiles)
1022 to_lookup = changedfilesset & set(allfiles)
1022 to_lookup = changedfilesset & set(allfiles)
1023 to_drop = changedfilesset - to_lookup
1023 to_drop = changedfilesset - to_lookup
1024
1024
1025 if self._origpl is None:
1025 if self._origpl is None:
1026 self._origpl = self._pl
1026 self._origpl = self._pl
1027 self._map.setparents(parent, self._nodeconstants.nullid)
1027 self._map.setparents(parent, self._nodeconstants.nullid)
1028
1028
1029 for f in to_lookup:
1029 for f in to_lookup:
1030 if self.in_merge:
1030 if self.in_merge:
1031 self.set_tracked(f)
1031 self.set_tracked(f)
1032 else:
1032 else:
1033 self._map.reset_state(
1033 self._map.reset_state(
1034 f,
1034 f,
1035 wc_tracked=True,
1035 wc_tracked=True,
1036 p1_tracked=True,
1036 p1_tracked=True,
1037 )
1037 )
1038 for f in to_drop:
1038 for f in to_drop:
1039 self._map.reset_state(f)
1039 self._map.reset_state(f)
1040
1040
1041 self._dirty = True
1041 self._dirty = True
1042
1042
1043 def _setup_tr_abort(self, tr):
1043 def _setup_tr_abort(self, tr):
1044 """make sure we invalidate the current change on abort"""
1044 """make sure we invalidate the current change on abort"""
1045 if tr is None:
1045 if tr is None:
1046 return
1046 return
1047
1047
1048 def on_abort(tr):
1048 def on_abort(tr):
1049 self._attached_to_a_transaction = False
1049 self._attached_to_a_transaction = False
1050 self.invalidate()
1050 self.invalidate()
1051
1051
1052 tr.addabort(
1052 tr.addabort(
1053 b'dirstate-invalidate%s' % self._tr_key_suffix,
1053 b'dirstate-invalidate%s' % self._tr_key_suffix,
1054 on_abort,
1054 on_abort,
1055 )
1055 )
1056
1056
1057 def write(self, tr):
1057 def write(self, tr):
1058 if not self._dirty:
1058 if not self._dirty:
1059 return
1059 return
1060 # make sure we don't request a write of invalidated content
1060 # make sure we don't request a write of invalidated content
1061 # XXX move before the dirty check once `unlock` stop calling `write`
1061 # XXX move before the dirty check once `unlock` stop calling `write`
1062 assert not self._invalidated_context
1062 assert not self._invalidated_context
1063
1063
1064 write_key = self._use_tracked_hint and self._dirty_tracked_set
1064 write_key = self._use_tracked_hint and self._dirty_tracked_set
1065 if tr:
1065 if tr:
1066
1066
1067 self._setup_tr_abort(tr)
1067 self._setup_tr_abort(tr)
1068 self._attached_to_a_transaction = True
1068 self._attached_to_a_transaction = True
1069
1069
1070 def on_success(f):
1070 def on_success(f):
1071 self._attached_to_a_transaction = False
1071 self._attached_to_a_transaction = False
1072 self._writedirstate(tr, f),
1072 self._writedirstate(tr, f),
1073
1073
1074 # delay writing in-memory changes out
1074 # delay writing in-memory changes out
1075 tr.addfilegenerator(
1075 tr.addfilegenerator(
1076 b'dirstate-1-main%s' % self._tr_key_suffix,
1076 b'dirstate-1-main%s' % self._tr_key_suffix,
1077 (self._filename,),
1077 (self._filename,),
1078 on_success,
1078 on_success,
1079 location=b'plain',
1079 location=b'plain',
1080 post_finalize=True,
1080 post_finalize=True,
1081 )
1081 )
1082 if write_key:
1082 if write_key:
1083 tr.addfilegenerator(
1083 tr.addfilegenerator(
1084 b'dirstate-2-key-post%s' % self._tr_key_suffix,
1084 b'dirstate-2-key-post%s' % self._tr_key_suffix,
1085 (self._filename_th,),
1085 (self._filename_th,),
1086 lambda f: self._write_tracked_hint(tr, f),
1086 lambda f: self._write_tracked_hint(tr, f),
1087 location=b'plain',
1087 location=b'plain',
1088 post_finalize=True,
1088 post_finalize=True,
1089 )
1089 )
1090 return
1090 return
1091
1091
1092 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
1092 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
1093 with file(self._filename) as f:
1093 with file(self._filename) as f:
1094 self._writedirstate(tr, f)
1094 self._writedirstate(tr, f)
1095 if write_key:
1095 if write_key:
1096 # we update the key-file after writing to make sure reader have a
1096 # we update the key-file after writing to make sure reader have a
1097 # key that match the newly written content
1097 # key that match the newly written content
1098 with file(self._filename_th) as f:
1098 with file(self._filename_th) as f:
1099 self._write_tracked_hint(tr, f)
1099 self._write_tracked_hint(tr, f)
1100
1100
1101 def delete_tracked_hint(self):
1101 def delete_tracked_hint(self):
1102 """remove the tracked_hint file
1102 """remove the tracked_hint file
1103
1103
1104 To be used by format downgrades operation"""
1104 To be used by format downgrades operation"""
1105 self._opener.unlink(self._filename_th)
1105 self._opener.unlink(self._filename_th)
1106 self._use_tracked_hint = False
1106 self._use_tracked_hint = False
1107
1107
1108 def addparentchangecallback(self, category, callback):
1108 def addparentchangecallback(self, category, callback):
1109 """add a callback to be called when the wd parents are changed
1109 """add a callback to be called when the wd parents are changed
1110
1110
1111 Callback will be called with the following arguments:
1111 Callback will be called with the following arguments:
1112 dirstate, (oldp1, oldp2), (newp1, newp2)
1112 dirstate, (oldp1, oldp2), (newp1, newp2)
1113
1113
1114 Category is a unique identifier to allow overwriting an old callback
1114 Category is a unique identifier to allow overwriting an old callback
1115 with a newer callback.
1115 with a newer callback.
1116 """
1116 """
1117 self._plchangecallbacks[category] = callback
1117 self._plchangecallbacks[category] = callback
1118
1118
1119 def _writedirstate(self, tr, st):
1119 def _writedirstate(self, tr, st):
1120 # make sure we don't write invalidated content
1120 # make sure we don't write invalidated content
1121 assert not self._invalidated_context
1121 assert not self._invalidated_context
1122 # notify callbacks about parents change
1122 # notify callbacks about parents change
1123 if self._origpl is not None and self._origpl != self._pl:
1123 if self._origpl is not None and self._origpl != self._pl:
1124 for c, callback in sorted(self._plchangecallbacks.items()):
1124 for c, callback in sorted(self._plchangecallbacks.items()):
1125 callback(self, self._origpl, self._pl)
1125 callback(self, self._origpl, self._pl)
1126 self._origpl = None
1126 self._origpl = None
1127 self._map.write(tr, st)
1127 self._map.write(tr, st)
1128 self._dirty = False
1128 self._dirty = False
1129 self._dirty_tracked_set = False
1129 self._dirty_tracked_set = False
1130
1130
1131 def _write_tracked_hint(self, tr, f):
1131 def _write_tracked_hint(self, tr, f):
1132 key = node.hex(uuid.uuid4().bytes)
1132 key = node.hex(uuid.uuid4().bytes)
1133 f.write(b"1\n%s\n" % key) # 1 is the format version
1133 f.write(b"1\n%s\n" % key) # 1 is the format version
1134
1134
1135 def _dirignore(self, f):
1135 def _dirignore(self, f):
1136 if self._ignore(f):
1136 if self._ignore(f):
1137 return True
1137 return True
1138 for p in pathutil.finddirs(f):
1138 for p in pathutil.finddirs(f):
1139 if self._ignore(p):
1139 if self._ignore(p):
1140 return True
1140 return True
1141 return False
1141 return False
1142
1142
1143 def _ignorefiles(self):
1143 def _ignorefiles(self):
1144 files = []
1144 files = []
1145 if os.path.exists(self._join(b'.hgignore')):
1145 if os.path.exists(self._join(b'.hgignore')):
1146 files.append(self._join(b'.hgignore'))
1146 files.append(self._join(b'.hgignore'))
1147 for name, path in self._ui.configitems(b"ui"):
1147 for name, path in self._ui.configitems(b"ui"):
1148 if name == b'ignore' or name.startswith(b'ignore.'):
1148 if name == b'ignore' or name.startswith(b'ignore.'):
1149 # we need to use os.path.join here rather than self._join
1149 # we need to use os.path.join here rather than self._join
1150 # because path is arbitrary and user-specified
1150 # because path is arbitrary and user-specified
1151 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1151 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1152 return files
1152 return files
1153
1153
1154 def _ignorefileandline(self, f):
1154 def _ignorefileandline(self, f):
1155 files = collections.deque(self._ignorefiles())
1155 files = collections.deque(self._ignorefiles())
1156 visited = set()
1156 visited = set()
1157 while files:
1157 while files:
1158 i = files.popleft()
1158 i = files.popleft()
1159 patterns = matchmod.readpatternfile(
1159 patterns = matchmod.readpatternfile(
1160 i, self._ui.warn, sourceinfo=True
1160 i, self._ui.warn, sourceinfo=True
1161 )
1161 )
1162 for pattern, lineno, line in patterns:
1162 for pattern, lineno, line in patterns:
1163 kind, p = matchmod._patsplit(pattern, b'glob')
1163 kind, p = matchmod._patsplit(pattern, b'glob')
1164 if kind == b"subinclude":
1164 if kind == b"subinclude":
1165 if p not in visited:
1165 if p not in visited:
1166 files.append(p)
1166 files.append(p)
1167 continue
1167 continue
1168 m = matchmod.match(
1168 m = matchmod.match(
1169 self._root, b'', [], [pattern], warn=self._ui.warn
1169 self._root, b'', [], [pattern], warn=self._ui.warn
1170 )
1170 )
1171 if m(f):
1171 if m(f):
1172 return (i, lineno, line)
1172 return (i, lineno, line)
1173 visited.add(i)
1173 visited.add(i)
1174 return (None, -1, b"")
1174 return (None, -1, b"")
1175
1175
1176 def _walkexplicit(self, match, subrepos):
1176 def _walkexplicit(self, match, subrepos):
1177 """Get stat data about the files explicitly specified by match.
1177 """Get stat data about the files explicitly specified by match.
1178
1178
1179 Return a triple (results, dirsfound, dirsnotfound).
1179 Return a triple (results, dirsfound, dirsnotfound).
1180 - results is a mapping from filename to stat result. It also contains
1180 - results is a mapping from filename to stat result. It also contains
1181 listings mapping subrepos and .hg to None.
1181 listings mapping subrepos and .hg to None.
1182 - dirsfound is a list of files found to be directories.
1182 - dirsfound is a list of files found to be directories.
1183 - dirsnotfound is a list of files that the dirstate thinks are
1183 - dirsnotfound is a list of files that the dirstate thinks are
1184 directories and that were not found."""
1184 directories and that were not found."""
1185
1185
1186 def badtype(mode):
1186 def badtype(mode):
1187 kind = _(b'unknown')
1187 kind = _(b'unknown')
1188 if stat.S_ISCHR(mode):
1188 if stat.S_ISCHR(mode):
1189 kind = _(b'character device')
1189 kind = _(b'character device')
1190 elif stat.S_ISBLK(mode):
1190 elif stat.S_ISBLK(mode):
1191 kind = _(b'block device')
1191 kind = _(b'block device')
1192 elif stat.S_ISFIFO(mode):
1192 elif stat.S_ISFIFO(mode):
1193 kind = _(b'fifo')
1193 kind = _(b'fifo')
1194 elif stat.S_ISSOCK(mode):
1194 elif stat.S_ISSOCK(mode):
1195 kind = _(b'socket')
1195 kind = _(b'socket')
1196 elif stat.S_ISDIR(mode):
1196 elif stat.S_ISDIR(mode):
1197 kind = _(b'directory')
1197 kind = _(b'directory')
1198 return _(b'unsupported file type (type is %s)') % kind
1198 return _(b'unsupported file type (type is %s)') % kind
1199
1199
1200 badfn = match.bad
1200 badfn = match.bad
1201 dmap = self._map
1201 dmap = self._map
1202 lstat = os.lstat
1202 lstat = os.lstat
1203 getkind = stat.S_IFMT
1203 getkind = stat.S_IFMT
1204 dirkind = stat.S_IFDIR
1204 dirkind = stat.S_IFDIR
1205 regkind = stat.S_IFREG
1205 regkind = stat.S_IFREG
1206 lnkkind = stat.S_IFLNK
1206 lnkkind = stat.S_IFLNK
1207 join = self._join
1207 join = self._join
1208 dirsfound = []
1208 dirsfound = []
1209 foundadd = dirsfound.append
1209 foundadd = dirsfound.append
1210 dirsnotfound = []
1210 dirsnotfound = []
1211 notfoundadd = dirsnotfound.append
1211 notfoundadd = dirsnotfound.append
1212
1212
1213 if not match.isexact() and self._checkcase:
1213 if not match.isexact() and self._checkcase:
1214 normalize = self._normalize
1214 normalize = self._normalize
1215 else:
1215 else:
1216 normalize = None
1216 normalize = None
1217
1217
1218 files = sorted(match.files())
1218 files = sorted(match.files())
1219 subrepos.sort()
1219 subrepos.sort()
1220 i, j = 0, 0
1220 i, j = 0, 0
1221 while i < len(files) and j < len(subrepos):
1221 while i < len(files) and j < len(subrepos):
1222 subpath = subrepos[j] + b"/"
1222 subpath = subrepos[j] + b"/"
1223 if files[i] < subpath:
1223 if files[i] < subpath:
1224 i += 1
1224 i += 1
1225 continue
1225 continue
1226 while i < len(files) and files[i].startswith(subpath):
1226 while i < len(files) and files[i].startswith(subpath):
1227 del files[i]
1227 del files[i]
1228 j += 1
1228 j += 1
1229
1229
1230 if not files or b'' in files:
1230 if not files or b'' in files:
1231 files = [b'']
1231 files = [b'']
1232 # constructing the foldmap is expensive, so don't do it for the
1232 # constructing the foldmap is expensive, so don't do it for the
1233 # common case where files is ['']
1233 # common case where files is ['']
1234 normalize = None
1234 normalize = None
1235 results = dict.fromkeys(subrepos)
1235 results = dict.fromkeys(subrepos)
1236 results[b'.hg'] = None
1236 results[b'.hg'] = None
1237
1237
1238 for ff in files:
1238 for ff in files:
1239 if normalize:
1239 if normalize:
1240 nf = normalize(ff, False, True)
1240 nf = normalize(ff, False, True)
1241 else:
1241 else:
1242 nf = ff
1242 nf = ff
1243 if nf in results:
1243 if nf in results:
1244 continue
1244 continue
1245
1245
1246 try:
1246 try:
1247 st = lstat(join(nf))
1247 st = lstat(join(nf))
1248 kind = getkind(st.st_mode)
1248 kind = getkind(st.st_mode)
1249 if kind == dirkind:
1249 if kind == dirkind:
1250 if nf in dmap:
1250 if nf in dmap:
1251 # file replaced by dir on disk but still in dirstate
1251 # file replaced by dir on disk but still in dirstate
1252 results[nf] = None
1252 results[nf] = None
1253 foundadd((nf, ff))
1253 foundadd((nf, ff))
1254 elif kind == regkind or kind == lnkkind:
1254 elif kind == regkind or kind == lnkkind:
1255 results[nf] = st
1255 results[nf] = st
1256 else:
1256 else:
1257 badfn(ff, badtype(kind))
1257 badfn(ff, badtype(kind))
1258 if nf in dmap:
1258 if nf in dmap:
1259 results[nf] = None
1259 results[nf] = None
1260 except (OSError) as inst:
1260 except (OSError) as inst:
1261 # nf not found on disk - it is dirstate only
1261 # nf not found on disk - it is dirstate only
1262 if nf in dmap: # does it exactly match a missing file?
1262 if nf in dmap: # does it exactly match a missing file?
1263 results[nf] = None
1263 results[nf] = None
1264 else: # does it match a missing directory?
1264 else: # does it match a missing directory?
1265 if self._map.hasdir(nf):
1265 if self._map.hasdir(nf):
1266 notfoundadd(nf)
1266 notfoundadd(nf)
1267 else:
1267 else:
1268 badfn(ff, encoding.strtolocal(inst.strerror))
1268 badfn(ff, encoding.strtolocal(inst.strerror))
1269
1269
1270 # match.files() may contain explicitly-specified paths that shouldn't
1270 # match.files() may contain explicitly-specified paths that shouldn't
1271 # be taken; drop them from the list of files found. dirsfound/notfound
1271 # be taken; drop them from the list of files found. dirsfound/notfound
1272 # aren't filtered here because they will be tested later.
1272 # aren't filtered here because they will be tested later.
1273 if match.anypats():
1273 if match.anypats():
1274 for f in list(results):
1274 for f in list(results):
1275 if f == b'.hg' or f in subrepos:
1275 if f == b'.hg' or f in subrepos:
1276 # keep sentinel to disable further out-of-repo walks
1276 # keep sentinel to disable further out-of-repo walks
1277 continue
1277 continue
1278 if not match(f):
1278 if not match(f):
1279 del results[f]
1279 del results[f]
1280
1280
1281 # Case insensitive filesystems cannot rely on lstat() failing to detect
1281 # Case insensitive filesystems cannot rely on lstat() failing to detect
1282 # a case-only rename. Prune the stat object for any file that does not
1282 # a case-only rename. Prune the stat object for any file that does not
1283 # match the case in the filesystem, if there are multiple files that
1283 # match the case in the filesystem, if there are multiple files that
1284 # normalize to the same path.
1284 # normalize to the same path.
1285 if match.isexact() and self._checkcase:
1285 if match.isexact() and self._checkcase:
1286 normed = {}
1286 normed = {}
1287
1287
1288 for f, st in results.items():
1288 for f, st in results.items():
1289 if st is None:
1289 if st is None:
1290 continue
1290 continue
1291
1291
1292 nc = util.normcase(f)
1292 nc = util.normcase(f)
1293 paths = normed.get(nc)
1293 paths = normed.get(nc)
1294
1294
1295 if paths is None:
1295 if paths is None:
1296 paths = set()
1296 paths = set()
1297 normed[nc] = paths
1297 normed[nc] = paths
1298
1298
1299 paths.add(f)
1299 paths.add(f)
1300
1300
1301 for norm, paths in normed.items():
1301 for norm, paths in normed.items():
1302 if len(paths) > 1:
1302 if len(paths) > 1:
1303 for path in paths:
1303 for path in paths:
1304 folded = self._discoverpath(
1304 folded = self._discoverpath(
1305 path, norm, True, None, self._map.dirfoldmap
1305 path, norm, True, None, self._map.dirfoldmap
1306 )
1306 )
1307 if path != folded:
1307 if path != folded:
1308 results[path] = None
1308 results[path] = None
1309
1309
1310 return results, dirsfound, dirsnotfound
1310 return results, dirsfound, dirsnotfound
1311
1311
1312 def walk(self, match, subrepos, unknown, ignored, full=True):
1312 def walk(self, match, subrepos, unknown, ignored, full=True):
1313 """
1313 """
1314 Walk recursively through the directory tree, finding all files
1314 Walk recursively through the directory tree, finding all files
1315 matched by match.
1315 matched by match.
1316
1316
1317 If full is False, maybe skip some known-clean files.
1317 If full is False, maybe skip some known-clean files.
1318
1318
1319 Return a dict mapping filename to stat-like object (either
1319 Return a dict mapping filename to stat-like object (either
1320 mercurial.osutil.stat instance or return value of os.stat()).
1320 mercurial.osutil.stat instance or return value of os.stat()).
1321
1321
1322 """
1322 """
1323 # full is a flag that extensions that hook into walk can use -- this
1323 # full is a flag that extensions that hook into walk can use -- this
1324 # implementation doesn't use it at all. This satisfies the contract
1324 # implementation doesn't use it at all. This satisfies the contract
1325 # because we only guarantee a "maybe".
1325 # because we only guarantee a "maybe".
1326
1326
1327 if ignored:
1327 if ignored:
1328 ignore = util.never
1328 ignore = util.never
1329 dirignore = util.never
1329 dirignore = util.never
1330 elif unknown:
1330 elif unknown:
1331 ignore = self._ignore
1331 ignore = self._ignore
1332 dirignore = self._dirignore
1332 dirignore = self._dirignore
1333 else:
1333 else:
1334 # if not unknown and not ignored, drop dir recursion and step 2
1334 # if not unknown and not ignored, drop dir recursion and step 2
1335 ignore = util.always
1335 ignore = util.always
1336 dirignore = util.always
1336 dirignore = util.always
1337
1337
1338 if self._sparsematchfn is not None:
1338 if self._sparsematchfn is not None:
1339 em = matchmod.exact(match.files())
1339 em = matchmod.exact(match.files())
1340 sm = matchmod.unionmatcher([self._sparsematcher, em])
1340 sm = matchmod.unionmatcher([self._sparsematcher, em])
1341 match = matchmod.intersectmatchers(match, sm)
1341 match = matchmod.intersectmatchers(match, sm)
1342
1342
1343 matchfn = match.matchfn
1343 matchfn = match.matchfn
1344 matchalways = match.always()
1344 matchalways = match.always()
1345 matchtdir = match.traversedir
1345 matchtdir = match.traversedir
1346 dmap = self._map
1346 dmap = self._map
1347 listdir = util.listdir
1347 listdir = util.listdir
1348 lstat = os.lstat
1348 lstat = os.lstat
1349 dirkind = stat.S_IFDIR
1349 dirkind = stat.S_IFDIR
1350 regkind = stat.S_IFREG
1350 regkind = stat.S_IFREG
1351 lnkkind = stat.S_IFLNK
1351 lnkkind = stat.S_IFLNK
1352 join = self._join
1352 join = self._join
1353
1353
1354 exact = skipstep3 = False
1354 exact = skipstep3 = False
1355 if match.isexact(): # match.exact
1355 if match.isexact(): # match.exact
1356 exact = True
1356 exact = True
1357 dirignore = util.always # skip step 2
1357 dirignore = util.always # skip step 2
1358 elif match.prefix(): # match.match, no patterns
1358 elif match.prefix(): # match.match, no patterns
1359 skipstep3 = True
1359 skipstep3 = True
1360
1360
1361 if not exact and self._checkcase:
1361 if not exact and self._checkcase:
1362 normalize = self._normalize
1362 normalize = self._normalize
1363 normalizefile = self._normalizefile
1363 normalizefile = self._normalizefile
1364 skipstep3 = False
1364 skipstep3 = False
1365 else:
1365 else:
1366 normalize = self._normalize
1366 normalize = self._normalize
1367 normalizefile = None
1367 normalizefile = None
1368
1368
1369 # step 1: find all explicit files
1369 # step 1: find all explicit files
1370 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1370 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1371 if matchtdir:
1371 if matchtdir:
1372 for d in work:
1372 for d in work:
1373 matchtdir(d[0])
1373 matchtdir(d[0])
1374 for d in dirsnotfound:
1374 for d in dirsnotfound:
1375 matchtdir(d)
1375 matchtdir(d)
1376
1376
1377 skipstep3 = skipstep3 and not (work or dirsnotfound)
1377 skipstep3 = skipstep3 and not (work or dirsnotfound)
1378 work = [d for d in work if not dirignore(d[0])]
1378 work = [d for d in work if not dirignore(d[0])]
1379
1379
1380 # step 2: visit subdirectories
1380 # step 2: visit subdirectories
1381 def traverse(work, alreadynormed):
1381 def traverse(work, alreadynormed):
1382 wadd = work.append
1382 wadd = work.append
1383 while work:
1383 while work:
1384 tracing.counter('dirstate.walk work', len(work))
1384 tracing.counter('dirstate.walk work', len(work))
1385 nd = work.pop()
1385 nd = work.pop()
1386 visitentries = match.visitchildrenset(nd)
1386 visitentries = match.visitchildrenset(nd)
1387 if not visitentries:
1387 if not visitentries:
1388 continue
1388 continue
1389 if visitentries == b'this' or visitentries == b'all':
1389 if visitentries == b'this' or visitentries == b'all':
1390 visitentries = None
1390 visitentries = None
1391 skip = None
1391 skip = None
1392 if nd != b'':
1392 if nd != b'':
1393 skip = b'.hg'
1393 skip = b'.hg'
1394 try:
1394 try:
1395 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1395 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1396 entries = listdir(join(nd), stat=True, skip=skip)
1396 entries = listdir(join(nd), stat=True, skip=skip)
1397 except (PermissionError, FileNotFoundError) as inst:
1397 except (PermissionError, FileNotFoundError) as inst:
1398 match.bad(
1398 match.bad(
1399 self.pathto(nd), encoding.strtolocal(inst.strerror)
1399 self.pathto(nd), encoding.strtolocal(inst.strerror)
1400 )
1400 )
1401 continue
1401 continue
1402 for f, kind, st in entries:
1402 for f, kind, st in entries:
1403 # Some matchers may return files in the visitentries set,
1403 # Some matchers may return files in the visitentries set,
1404 # instead of 'this', if the matcher explicitly mentions them
1404 # instead of 'this', if the matcher explicitly mentions them
1405 # and is not an exactmatcher. This is acceptable; we do not
1405 # and is not an exactmatcher. This is acceptable; we do not
1406 # make any hard assumptions about file-or-directory below
1406 # make any hard assumptions about file-or-directory below
1407 # based on the presence of `f` in visitentries. If
1407 # based on the presence of `f` in visitentries. If
1408 # visitchildrenset returned a set, we can always skip the
1408 # visitchildrenset returned a set, we can always skip the
1409 # entries *not* in the set it provided regardless of whether
1409 # entries *not* in the set it provided regardless of whether
1410 # they're actually a file or a directory.
1410 # they're actually a file or a directory.
1411 if visitentries and f not in visitentries:
1411 if visitentries and f not in visitentries:
1412 continue
1412 continue
1413 if normalizefile:
1413 if normalizefile:
1414 # even though f might be a directory, we're only
1414 # even though f might be a directory, we're only
1415 # interested in comparing it to files currently in the
1415 # interested in comparing it to files currently in the
1416 # dmap -- therefore normalizefile is enough
1416 # dmap -- therefore normalizefile is enough
1417 nf = normalizefile(
1417 nf = normalizefile(
1418 nd and (nd + b"/" + f) or f, True, True
1418 nd and (nd + b"/" + f) or f, True, True
1419 )
1419 )
1420 else:
1420 else:
1421 nf = nd and (nd + b"/" + f) or f
1421 nf = nd and (nd + b"/" + f) or f
1422 if nf not in results:
1422 if nf not in results:
1423 if kind == dirkind:
1423 if kind == dirkind:
1424 if not ignore(nf):
1424 if not ignore(nf):
1425 if matchtdir:
1425 if matchtdir:
1426 matchtdir(nf)
1426 matchtdir(nf)
1427 wadd(nf)
1427 wadd(nf)
1428 if nf in dmap and (matchalways or matchfn(nf)):
1428 if nf in dmap and (matchalways or matchfn(nf)):
1429 results[nf] = None
1429 results[nf] = None
1430 elif kind == regkind or kind == lnkkind:
1430 elif kind == regkind or kind == lnkkind:
1431 if nf in dmap:
1431 if nf in dmap:
1432 if matchalways or matchfn(nf):
1432 if matchalways or matchfn(nf):
1433 results[nf] = st
1433 results[nf] = st
1434 elif (matchalways or matchfn(nf)) and not ignore(
1434 elif (matchalways or matchfn(nf)) and not ignore(
1435 nf
1435 nf
1436 ):
1436 ):
1437 # unknown file -- normalize if necessary
1437 # unknown file -- normalize if necessary
1438 if not alreadynormed:
1438 if not alreadynormed:
1439 nf = normalize(nf, False, True)
1439 nf = normalize(nf, False, True)
1440 results[nf] = st
1440 results[nf] = st
1441 elif nf in dmap and (matchalways or matchfn(nf)):
1441 elif nf in dmap and (matchalways or matchfn(nf)):
1442 results[nf] = None
1442 results[nf] = None
1443
1443
1444 for nd, d in work:
1444 for nd, d in work:
1445 # alreadynormed means that processwork doesn't have to do any
1445 # alreadynormed means that processwork doesn't have to do any
1446 # expensive directory normalization
1446 # expensive directory normalization
1447 alreadynormed = not normalize or nd == d
1447 alreadynormed = not normalize or nd == d
1448 traverse([d], alreadynormed)
1448 traverse([d], alreadynormed)
1449
1449
1450 for s in subrepos:
1450 for s in subrepos:
1451 del results[s]
1451 del results[s]
1452 del results[b'.hg']
1452 del results[b'.hg']
1453
1453
1454 # step 3: visit remaining files from dmap
1454 # step 3: visit remaining files from dmap
1455 if not skipstep3 and not exact:
1455 if not skipstep3 and not exact:
1456 # If a dmap file is not in results yet, it was either
1456 # If a dmap file is not in results yet, it was either
1457 # a) not matching matchfn b) ignored, c) missing, or d) under a
1457 # a) not matching matchfn b) ignored, c) missing, or d) under a
1458 # symlink directory.
1458 # symlink directory.
1459 if not results and matchalways:
1459 if not results and matchalways:
1460 visit = [f for f in dmap]
1460 visit = [f for f in dmap]
1461 else:
1461 else:
1462 visit = [f for f in dmap if f not in results and matchfn(f)]
1462 visit = [f for f in dmap if f not in results and matchfn(f)]
1463 visit.sort()
1463 visit.sort()
1464
1464
1465 if unknown:
1465 if unknown:
1466 # unknown == True means we walked all dirs under the roots
1466 # unknown == True means we walked all dirs under the roots
1467 # that wasn't ignored, and everything that matched was stat'ed
1467 # that wasn't ignored, and everything that matched was stat'ed
1468 # and is already in results.
1468 # and is already in results.
1469 # The rest must thus be ignored or under a symlink.
1469 # The rest must thus be ignored or under a symlink.
1470 audit_path = pathutil.pathauditor(self._root, cached=True)
1470 audit_path = pathutil.pathauditor(self._root, cached=True)
1471
1471
1472 for nf in iter(visit):
1472 for nf in iter(visit):
1473 # If a stat for the same file was already added with a
1473 # If a stat for the same file was already added with a
1474 # different case, don't add one for this, since that would
1474 # different case, don't add one for this, since that would
1475 # make it appear as if the file exists under both names
1475 # make it appear as if the file exists under both names
1476 # on disk.
1476 # on disk.
1477 if (
1477 if (
1478 normalizefile
1478 normalizefile
1479 and normalizefile(nf, True, True) in results
1479 and normalizefile(nf, True, True) in results
1480 ):
1480 ):
1481 results[nf] = None
1481 results[nf] = None
1482 # Report ignored items in the dmap as long as they are not
1482 # Report ignored items in the dmap as long as they are not
1483 # under a symlink directory.
1483 # under a symlink directory.
1484 elif audit_path.check(nf):
1484 elif audit_path.check(nf):
1485 try:
1485 try:
1486 results[nf] = lstat(join(nf))
1486 results[nf] = lstat(join(nf))
1487 # file was just ignored, no links, and exists
1487 # file was just ignored, no links, and exists
1488 except OSError:
1488 except OSError:
1489 # file doesn't exist
1489 # file doesn't exist
1490 results[nf] = None
1490 results[nf] = None
1491 else:
1491 else:
1492 # It's either missing or under a symlink directory
1492 # It's either missing or under a symlink directory
1493 # which we in this case report as missing
1493 # which we in this case report as missing
1494 results[nf] = None
1494 results[nf] = None
1495 else:
1495 else:
1496 # We may not have walked the full directory tree above,
1496 # We may not have walked the full directory tree above,
1497 # so stat and check everything we missed.
1497 # so stat and check everything we missed.
1498 iv = iter(visit)
1498 iv = iter(visit)
1499 for st in util.statfiles([join(i) for i in visit]):
1499 for st in util.statfiles([join(i) for i in visit]):
1500 results[next(iv)] = st
1500 results[next(iv)] = st
1501 return results
1501 return results
1502
1502
1503 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1503 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1504 if self._sparsematchfn is not None:
1504 if self._sparsematchfn is not None:
1505 em = matchmod.exact(matcher.files())
1505 em = matchmod.exact(matcher.files())
1506 sm = matchmod.unionmatcher([self._sparsematcher, em])
1506 sm = matchmod.unionmatcher([self._sparsematcher, em])
1507 matcher = matchmod.intersectmatchers(matcher, sm)
1507 matcher = matchmod.intersectmatchers(matcher, sm)
1508 # Force Rayon (Rust parallelism library) to respect the number of
1508 # Force Rayon (Rust parallelism library) to respect the number of
1509 # workers. This is a temporary workaround until Rust code knows
1509 # workers. This is a temporary workaround until Rust code knows
1510 # how to read the config file.
1510 # how to read the config file.
1511 numcpus = self._ui.configint(b"worker", b"numcpus")
1511 numcpus = self._ui.configint(b"worker", b"numcpus")
1512 if numcpus is not None:
1512 if numcpus is not None:
1513 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1513 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1514
1514
1515 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1515 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1516 if not workers_enabled:
1516 if not workers_enabled:
1517 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1517 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1518
1518
1519 (
1519 (
1520 lookup,
1520 lookup,
1521 modified,
1521 modified,
1522 added,
1522 added,
1523 removed,
1523 removed,
1524 deleted,
1524 deleted,
1525 clean,
1525 clean,
1526 ignored,
1526 ignored,
1527 unknown,
1527 unknown,
1528 warnings,
1528 warnings,
1529 bad,
1529 bad,
1530 traversed,
1530 traversed,
1531 dirty,
1531 dirty,
1532 ) = rustmod.status(
1532 ) = rustmod.status(
1533 self._map._map,
1533 self._map._map,
1534 matcher,
1534 matcher,
1535 self._rootdir,
1535 self._rootdir,
1536 self._ignorefiles(),
1536 self._ignorefiles(),
1537 self._checkexec,
1537 self._checkexec,
1538 bool(list_clean),
1538 bool(list_clean),
1539 bool(list_ignored),
1539 bool(list_ignored),
1540 bool(list_unknown),
1540 bool(list_unknown),
1541 bool(matcher.traversedir),
1541 bool(matcher.traversedir),
1542 )
1542 )
1543
1543
1544 self._dirty |= dirty
1544 self._dirty |= dirty
1545
1545
1546 if matcher.traversedir:
1546 if matcher.traversedir:
1547 for dir in traversed:
1547 for dir in traversed:
1548 matcher.traversedir(dir)
1548 matcher.traversedir(dir)
1549
1549
1550 if self._ui.warn:
1550 if self._ui.warn:
1551 for item in warnings:
1551 for item in warnings:
1552 if isinstance(item, tuple):
1552 if isinstance(item, tuple):
1553 file_path, syntax = item
1553 file_path, syntax = item
1554 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1554 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1555 file_path,
1555 file_path,
1556 syntax,
1556 syntax,
1557 )
1557 )
1558 self._ui.warn(msg)
1558 self._ui.warn(msg)
1559 else:
1559 else:
1560 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1560 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1561 self._ui.warn(
1561 self._ui.warn(
1562 msg
1562 msg
1563 % (
1563 % (
1564 pathutil.canonpath(
1564 pathutil.canonpath(
1565 self._rootdir, self._rootdir, item
1565 self._rootdir, self._rootdir, item
1566 ),
1566 ),
1567 b"No such file or directory",
1567 b"No such file or directory",
1568 )
1568 )
1569 )
1569 )
1570
1570
1571 for fn, message in bad:
1571 for fn, message in bad:
1572 matcher.bad(fn, encoding.strtolocal(message))
1572 matcher.bad(fn, encoding.strtolocal(message))
1573
1573
1574 status = scmutil.status(
1574 status = scmutil.status(
1575 modified=modified,
1575 modified=modified,
1576 added=added,
1576 added=added,
1577 removed=removed,
1577 removed=removed,
1578 deleted=deleted,
1578 deleted=deleted,
1579 unknown=unknown,
1579 unknown=unknown,
1580 ignored=ignored,
1580 ignored=ignored,
1581 clean=clean,
1581 clean=clean,
1582 )
1582 )
1583 return (lookup, status)
1583 return (lookup, status)
1584
1584
1585 def status(self, match, subrepos, ignored, clean, unknown):
1585 def status(self, match, subrepos, ignored, clean, unknown):
1586 """Determine the status of the working copy relative to the
1586 """Determine the status of the working copy relative to the
1587 dirstate and return a pair of (unsure, status), where status is of type
1587 dirstate and return a pair of (unsure, status), where status is of type
1588 scmutil.status and:
1588 scmutil.status and:
1589
1589
1590 unsure:
1590 unsure:
1591 files that might have been modified since the dirstate was
1591 files that might have been modified since the dirstate was
1592 written, but need to be read to be sure (size is the same
1592 written, but need to be read to be sure (size is the same
1593 but mtime differs)
1593 but mtime differs)
1594 status.modified:
1594 status.modified:
1595 files that have definitely been modified since the dirstate
1595 files that have definitely been modified since the dirstate
1596 was written (different size or mode)
1596 was written (different size or mode)
1597 status.clean:
1597 status.clean:
1598 files that have definitely not been modified since the
1598 files that have definitely not been modified since the
1599 dirstate was written
1599 dirstate was written
1600 """
1600 """
1601 if not self._running_status:
1601 if not self._running_status:
1602 msg = "Calling `status` outside a `running_status` context"
1602 msg = "Calling `status` outside a `running_status` context"
1603 raise error.ProgrammingError(msg)
1603 raise error.ProgrammingError(msg)
1604 listignored, listclean, listunknown = ignored, clean, unknown
1604 listignored, listclean, listunknown = ignored, clean, unknown
1605 lookup, modified, added, unknown, ignored = [], [], [], [], []
1605 lookup, modified, added, unknown, ignored = [], [], [], [], []
1606 removed, deleted, clean = [], [], []
1606 removed, deleted, clean = [], [], []
1607
1607
1608 dmap = self._map
1608 dmap = self._map
1609 dmap.preload()
1609 dmap.preload()
1610
1610
1611 use_rust = True
1611 use_rust = True
1612
1612
1613 allowed_matchers = (
1613 allowed_matchers = (
1614 matchmod.alwaysmatcher,
1614 matchmod.alwaysmatcher,
1615 matchmod.differencematcher,
1615 matchmod.differencematcher,
1616 matchmod.exactmatcher,
1616 matchmod.exactmatcher,
1617 matchmod.includematcher,
1617 matchmod.includematcher,
1618 matchmod.intersectionmatcher,
1618 matchmod.intersectionmatcher,
1619 matchmod.nevermatcher,
1619 matchmod.nevermatcher,
1620 matchmod.unionmatcher,
1620 matchmod.unionmatcher,
1621 )
1621 )
1622
1622
1623 if rustmod is None:
1623 if rustmod is None:
1624 use_rust = False
1624 use_rust = False
1625 elif self._checkcase:
1625 elif self._checkcase:
1626 # Case-insensitive filesystems are not handled yet
1626 # Case-insensitive filesystems are not handled yet
1627 use_rust = False
1627 use_rust = False
1628 elif subrepos:
1628 elif subrepos:
1629 use_rust = False
1629 use_rust = False
1630 elif not isinstance(match, allowed_matchers):
1630 elif not isinstance(match, allowed_matchers):
1631 # Some matchers have yet to be implemented
1631 # Some matchers have yet to be implemented
1632 use_rust = False
1632 use_rust = False
1633
1633
1634 # Get the time from the filesystem so we can disambiguate files that
1634 # Get the time from the filesystem so we can disambiguate files that
1635 # appear modified in the present or future.
1635 # appear modified in the present or future.
1636 try:
1636 try:
1637 mtime_boundary = timestamp.get_fs_now(self._opener)
1637 mtime_boundary = timestamp.get_fs_now(self._opener)
1638 except OSError:
1638 except OSError:
1639 # In largefiles or readonly context
1639 # In largefiles or readonly context
1640 mtime_boundary = None
1640 mtime_boundary = None
1641
1641
1642 if use_rust:
1642 if use_rust:
1643 try:
1643 try:
1644 res = self._rust_status(
1644 res = self._rust_status(
1645 match, listclean, listignored, listunknown
1645 match, listclean, listignored, listunknown
1646 )
1646 )
1647 return res + (mtime_boundary,)
1647 return res + (mtime_boundary,)
1648 except rustmod.FallbackError:
1648 except rustmod.FallbackError:
1649 pass
1649 pass
1650
1650
1651 def noop(f):
1651 def noop(f):
1652 pass
1652 pass
1653
1653
1654 dcontains = dmap.__contains__
1654 dcontains = dmap.__contains__
1655 dget = dmap.__getitem__
1655 dget = dmap.__getitem__
1656 ladd = lookup.append # aka "unsure"
1656 ladd = lookup.append # aka "unsure"
1657 madd = modified.append
1657 madd = modified.append
1658 aadd = added.append
1658 aadd = added.append
1659 uadd = unknown.append if listunknown else noop
1659 uadd = unknown.append if listunknown else noop
1660 iadd = ignored.append if listignored else noop
1660 iadd = ignored.append if listignored else noop
1661 radd = removed.append
1661 radd = removed.append
1662 dadd = deleted.append
1662 dadd = deleted.append
1663 cadd = clean.append if listclean else noop
1663 cadd = clean.append if listclean else noop
1664 mexact = match.exact
1664 mexact = match.exact
1665 dirignore = self._dirignore
1665 dirignore = self._dirignore
1666 checkexec = self._checkexec
1666 checkexec = self._checkexec
1667 checklink = self._checklink
1667 checklink = self._checklink
1668 copymap = self._map.copymap
1668 copymap = self._map.copymap
1669
1669
1670 # We need to do full walks when either
1670 # We need to do full walks when either
1671 # - we're listing all clean files, or
1671 # - we're listing all clean files, or
1672 # - match.traversedir does something, because match.traversedir should
1672 # - match.traversedir does something, because match.traversedir should
1673 # be called for every dir in the working dir
1673 # be called for every dir in the working dir
1674 full = listclean or match.traversedir is not None
1674 full = listclean or match.traversedir is not None
1675 for fn, st in self.walk(
1675 for fn, st in self.walk(
1676 match, subrepos, listunknown, listignored, full=full
1676 match, subrepos, listunknown, listignored, full=full
1677 ).items():
1677 ).items():
1678 if not dcontains(fn):
1678 if not dcontains(fn):
1679 if (listignored or mexact(fn)) and dirignore(fn):
1679 if (listignored or mexact(fn)) and dirignore(fn):
1680 if listignored:
1680 if listignored:
1681 iadd(fn)
1681 iadd(fn)
1682 else:
1682 else:
1683 uadd(fn)
1683 uadd(fn)
1684 continue
1684 continue
1685
1685
1686 t = dget(fn)
1686 t = dget(fn)
1687 mode = t.mode
1687 mode = t.mode
1688 size = t.size
1688 size = t.size
1689
1689
1690 if not st and t.tracked:
1690 if not st and t.tracked:
1691 dadd(fn)
1691 dadd(fn)
1692 elif t.p2_info:
1692 elif t.p2_info:
1693 madd(fn)
1693 madd(fn)
1694 elif t.added:
1694 elif t.added:
1695 aadd(fn)
1695 aadd(fn)
1696 elif t.removed:
1696 elif t.removed:
1697 radd(fn)
1697 radd(fn)
1698 elif t.tracked:
1698 elif t.tracked:
1699 if not checklink and t.has_fallback_symlink:
1699 if not checklink and t.has_fallback_symlink:
1700 # If the file system does not support symlink, the mode
1700 # If the file system does not support symlink, the mode
1701 # might not be correctly stored in the dirstate, so do not
1701 # might not be correctly stored in the dirstate, so do not
1702 # trust it.
1702 # trust it.
1703 ladd(fn)
1703 ladd(fn)
1704 elif not checkexec and t.has_fallback_exec:
1704 elif not checkexec and t.has_fallback_exec:
1705 # If the file system does not support exec bits, the mode
1705 # If the file system does not support exec bits, the mode
1706 # might not be correctly stored in the dirstate, so do not
1706 # might not be correctly stored in the dirstate, so do not
1707 # trust it.
1707 # trust it.
1708 ladd(fn)
1708 ladd(fn)
1709 elif (
1709 elif (
1710 size >= 0
1710 size >= 0
1711 and (
1711 and (
1712 (size != st.st_size and size != st.st_size & _rangemask)
1712 (size != st.st_size and size != st.st_size & _rangemask)
1713 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1713 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1714 )
1714 )
1715 or fn in copymap
1715 or fn in copymap
1716 ):
1716 ):
1717 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1717 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1718 # issue6456: Size returned may be longer due to
1718 # issue6456: Size returned may be longer due to
1719 # encryption on EXT-4 fscrypt, undecided.
1719 # encryption on EXT-4 fscrypt, undecided.
1720 ladd(fn)
1720 ladd(fn)
1721 else:
1721 else:
1722 madd(fn)
1722 madd(fn)
1723 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1723 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1724 # There might be a change in the future if for example the
1724 # There might be a change in the future if for example the
1725 # internal clock is off, but this is a case where the issues
1725 # internal clock is off, but this is a case where the issues
1726 # the user would face would be a lot worse and there is
1726 # the user would face would be a lot worse and there is
1727 # nothing we can really do.
1727 # nothing we can really do.
1728 ladd(fn)
1728 ladd(fn)
1729 elif listclean:
1729 elif listclean:
1730 cadd(fn)
1730 cadd(fn)
1731 status = scmutil.status(
1731 status = scmutil.status(
1732 modified, added, removed, deleted, unknown, ignored, clean
1732 modified, added, removed, deleted, unknown, ignored, clean
1733 )
1733 )
1734 return (lookup, status, mtime_boundary)
1734 return (lookup, status, mtime_boundary)
1735
1735
1736 def matches(self, match):
1736 def matches(self, match):
1737 """
1737 """
1738 return files in the dirstate (in whatever state) filtered by match
1738 return files in the dirstate (in whatever state) filtered by match
1739 """
1739 """
1740 dmap = self._map
1740 dmap = self._map
1741 if rustmod is not None:
1741 if rustmod is not None:
1742 dmap = self._map._map
1742 dmap = self._map._map
1743
1743
1744 if match.always():
1744 if match.always():
1745 return dmap.keys()
1745 return dmap.keys()
1746 files = match.files()
1746 files = match.files()
1747 if match.isexact():
1747 if match.isexact():
1748 # fast path -- filter the other way around, since typically files is
1748 # fast path -- filter the other way around, since typically files is
1749 # much smaller than dmap
1749 # much smaller than dmap
1750 return [f for f in files if f in dmap]
1750 return [f for f in files if f in dmap]
1751 if match.prefix() and all(fn in dmap for fn in files):
1751 if match.prefix() and all(fn in dmap for fn in files):
1752 # fast path -- all the values are known to be files, so just return
1752 # fast path -- all the values are known to be files, so just return
1753 # that
1753 # that
1754 return list(files)
1754 return list(files)
1755 return [f for f in dmap if match(f)]
1755 return [f for f in dmap if match(f)]
1756
1756
1757 def _actualfilename(self, tr):
1757 def _actualfilename(self, tr):
1758 if tr:
1758 if tr:
1759 return self._pendingfilename
1759 return self._pendingfilename
1760 else:
1760 else:
1761 return self._filename
1761 return self._filename
1762
1762
1763 def all_file_names(self):
1763 def all_file_names(self):
1764 """list all filename currently used by this dirstate
1764 """list all filename currently used by this dirstate
1765
1765
1766 This is only used to do `hg rollback` related backup in the transaction
1766 This is only used to do `hg rollback` related backup in the transaction
1767 """
1767 """
1768 if not self._opener.exists(self._filename):
1768 files = [b'branch']
1769 # no data every written to disk yet
1769 if self._opener.exists(self._filename):
1770 return ()
1770 files.append(self._filename)
1771 elif self._use_dirstate_v2:
1771 if self._use_dirstate_v2:
1772 return (
1772 files.append(self._map.docket.data_filename())
1773 self._filename,
1773 return tuple(files)
1774 self._map.docket.data_filename(),
1775 )
1776 else:
1777 return (self._filename,)
1778
1774
1779 def verify(self, m1, m2, p1, narrow_matcher=None):
1775 def verify(self, m1, m2, p1, narrow_matcher=None):
1780 """
1776 """
1781 check the dirstate contents against the parent manifest and yield errors
1777 check the dirstate contents against the parent manifest and yield errors
1782 """
1778 """
1783 missing_from_p1 = _(
1779 missing_from_p1 = _(
1784 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1780 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1785 )
1781 )
1786 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1782 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1787 missing_from_ps = _(
1783 missing_from_ps = _(
1788 b"%s marked as modified, but not in either manifest\n"
1784 b"%s marked as modified, but not in either manifest\n"
1789 )
1785 )
1790 missing_from_ds = _(
1786 missing_from_ds = _(
1791 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1787 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1792 )
1788 )
1793 for f, entry in self.items():
1789 for f, entry in self.items():
1794 if entry.p1_tracked:
1790 if entry.p1_tracked:
1795 if entry.modified and f not in m1 and f not in m2:
1791 if entry.modified and f not in m1 and f not in m2:
1796 yield missing_from_ps % f
1792 yield missing_from_ps % f
1797 elif f not in m1:
1793 elif f not in m1:
1798 yield missing_from_p1 % (f, node.short(p1))
1794 yield missing_from_p1 % (f, node.short(p1))
1799 if entry.added and f in m1:
1795 if entry.added and f in m1:
1800 yield unexpected_in_p1 % f
1796 yield unexpected_in_p1 % f
1801 for f in m1:
1797 for f in m1:
1802 if narrow_matcher is not None and not narrow_matcher(f):
1798 if narrow_matcher is not None and not narrow_matcher(f):
1803 continue
1799 continue
1804 entry = self.get_entry(f)
1800 entry = self.get_entry(f)
1805 if not entry.p1_tracked:
1801 if not entry.p1_tracked:
1806 yield missing_from_ds % (f, node.short(p1))
1802 yield missing_from_ds % (f, node.short(p1))
@@ -1,3999 +1,3995 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import re
13 import re
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from concurrent import futures
18 from concurrent import futures
19 from typing import (
19 from typing import (
20 Optional,
20 Optional,
21 )
21 )
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullrev,
27 nullrev,
28 sha1nodeconstants,
28 sha1nodeconstants,
29 short,
29 short,
30 )
30 )
31 from .pycompat import (
31 from .pycompat import (
32 delattr,
32 delattr,
33 getattr,
33 getattr,
34 )
34 )
35 from . import (
35 from . import (
36 bookmarks,
36 bookmarks,
37 branchmap,
37 branchmap,
38 bundle2,
38 bundle2,
39 bundlecaches,
39 bundlecaches,
40 changegroup,
40 changegroup,
41 color,
41 color,
42 commit,
42 commit,
43 context,
43 context,
44 dirstate,
44 dirstate,
45 discovery,
45 discovery,
46 encoding,
46 encoding,
47 error,
47 error,
48 exchange,
48 exchange,
49 extensions,
49 extensions,
50 filelog,
50 filelog,
51 hook,
51 hook,
52 lock as lockmod,
52 lock as lockmod,
53 match as matchmod,
53 match as matchmod,
54 mergestate as mergestatemod,
54 mergestate as mergestatemod,
55 mergeutil,
55 mergeutil,
56 namespaces,
56 namespaces,
57 narrowspec,
57 narrowspec,
58 obsolete,
58 obsolete,
59 pathutil,
59 pathutil,
60 phases,
60 phases,
61 pushkey,
61 pushkey,
62 pycompat,
62 pycompat,
63 rcutil,
63 rcutil,
64 repoview,
64 repoview,
65 requirements as requirementsmod,
65 requirements as requirementsmod,
66 revlog,
66 revlog,
67 revset,
67 revset,
68 revsetlang,
68 revsetlang,
69 scmutil,
69 scmutil,
70 sparse,
70 sparse,
71 store as storemod,
71 store as storemod,
72 subrepoutil,
72 subrepoutil,
73 tags as tagsmod,
73 tags as tagsmod,
74 transaction,
74 transaction,
75 txnutil,
75 txnutil,
76 util,
76 util,
77 vfs as vfsmod,
77 vfs as vfsmod,
78 wireprototypes,
78 wireprototypes,
79 )
79 )
80
80
81 from .interfaces import (
81 from .interfaces import (
82 repository,
82 repository,
83 util as interfaceutil,
83 util as interfaceutil,
84 )
84 )
85
85
86 from .utils import (
86 from .utils import (
87 hashutil,
87 hashutil,
88 procutil,
88 procutil,
89 stringutil,
89 stringutil,
90 urlutil,
90 urlutil,
91 )
91 )
92
92
93 from .revlogutils import (
93 from .revlogutils import (
94 concurrency_checker as revlogchecker,
94 concurrency_checker as revlogchecker,
95 constants as revlogconst,
95 constants as revlogconst,
96 sidedata as sidedatamod,
96 sidedata as sidedatamod,
97 )
97 )
98
98
99 release = lockmod.release
99 release = lockmod.release
100 urlerr = util.urlerr
100 urlerr = util.urlerr
101 urlreq = util.urlreq
101 urlreq = util.urlreq
102
102
103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
104 b"^((dirstate|narrowspec.dirstate).*|branch$)"
104 b"^((dirstate|narrowspec.dirstate).*|branch$)"
105 )
105 )
106
106
107 # set of (path, vfs-location) tuples. vfs-location is:
107 # set of (path, vfs-location) tuples. vfs-location is:
108 # - 'plain for vfs relative paths
108 # - 'plain for vfs relative paths
109 # - '' for svfs relative paths
109 # - '' for svfs relative paths
110 _cachedfiles = set()
110 _cachedfiles = set()
111
111
112
112
113 class _basefilecache(scmutil.filecache):
113 class _basefilecache(scmutil.filecache):
114 """All filecache usage on repo are done for logic that should be unfiltered"""
114 """All filecache usage on repo are done for logic that should be unfiltered"""
115
115
116 def __get__(self, repo, type=None):
116 def __get__(self, repo, type=None):
117 if repo is None:
117 if repo is None:
118 return self
118 return self
119 # proxy to unfiltered __dict__ since filtered repo has no entry
119 # proxy to unfiltered __dict__ since filtered repo has no entry
120 unfi = repo.unfiltered()
120 unfi = repo.unfiltered()
121 try:
121 try:
122 return unfi.__dict__[self.sname]
122 return unfi.__dict__[self.sname]
123 except KeyError:
123 except KeyError:
124 pass
124 pass
125 return super(_basefilecache, self).__get__(unfi, type)
125 return super(_basefilecache, self).__get__(unfi, type)
126
126
127 def set(self, repo, value):
127 def set(self, repo, value):
128 return super(_basefilecache, self).set(repo.unfiltered(), value)
128 return super(_basefilecache, self).set(repo.unfiltered(), value)
129
129
130
130
131 class repofilecache(_basefilecache):
131 class repofilecache(_basefilecache):
132 """filecache for files in .hg but outside of .hg/store"""
132 """filecache for files in .hg but outside of .hg/store"""
133
133
134 def __init__(self, *paths):
134 def __init__(self, *paths):
135 super(repofilecache, self).__init__(*paths)
135 super(repofilecache, self).__init__(*paths)
136 for path in paths:
136 for path in paths:
137 _cachedfiles.add((path, b'plain'))
137 _cachedfiles.add((path, b'plain'))
138
138
139 def join(self, obj, fname):
139 def join(self, obj, fname):
140 return obj.vfs.join(fname)
140 return obj.vfs.join(fname)
141
141
142
142
143 class storecache(_basefilecache):
143 class storecache(_basefilecache):
144 """filecache for files in the store"""
144 """filecache for files in the store"""
145
145
146 def __init__(self, *paths):
146 def __init__(self, *paths):
147 super(storecache, self).__init__(*paths)
147 super(storecache, self).__init__(*paths)
148 for path in paths:
148 for path in paths:
149 _cachedfiles.add((path, b''))
149 _cachedfiles.add((path, b''))
150
150
151 def join(self, obj, fname):
151 def join(self, obj, fname):
152 return obj.sjoin(fname)
152 return obj.sjoin(fname)
153
153
154
154
155 class changelogcache(storecache):
155 class changelogcache(storecache):
156 """filecache for the changelog"""
156 """filecache for the changelog"""
157
157
158 def __init__(self):
158 def __init__(self):
159 super(changelogcache, self).__init__()
159 super(changelogcache, self).__init__()
160 _cachedfiles.add((b'00changelog.i', b''))
160 _cachedfiles.add((b'00changelog.i', b''))
161 _cachedfiles.add((b'00changelog.n', b''))
161 _cachedfiles.add((b'00changelog.n', b''))
162
162
163 def tracked_paths(self, obj):
163 def tracked_paths(self, obj):
164 paths = [self.join(obj, b'00changelog.i')]
164 paths = [self.join(obj, b'00changelog.i')]
165 if obj.store.opener.options.get(b'persistent-nodemap', False):
165 if obj.store.opener.options.get(b'persistent-nodemap', False):
166 paths.append(self.join(obj, b'00changelog.n'))
166 paths.append(self.join(obj, b'00changelog.n'))
167 return paths
167 return paths
168
168
169
169
170 class manifestlogcache(storecache):
170 class manifestlogcache(storecache):
171 """filecache for the manifestlog"""
171 """filecache for the manifestlog"""
172
172
173 def __init__(self):
173 def __init__(self):
174 super(manifestlogcache, self).__init__()
174 super(manifestlogcache, self).__init__()
175 _cachedfiles.add((b'00manifest.i', b''))
175 _cachedfiles.add((b'00manifest.i', b''))
176 _cachedfiles.add((b'00manifest.n', b''))
176 _cachedfiles.add((b'00manifest.n', b''))
177
177
178 def tracked_paths(self, obj):
178 def tracked_paths(self, obj):
179 paths = [self.join(obj, b'00manifest.i')]
179 paths = [self.join(obj, b'00manifest.i')]
180 if obj.store.opener.options.get(b'persistent-nodemap', False):
180 if obj.store.opener.options.get(b'persistent-nodemap', False):
181 paths.append(self.join(obj, b'00manifest.n'))
181 paths.append(self.join(obj, b'00manifest.n'))
182 return paths
182 return paths
183
183
184
184
185 class mixedrepostorecache(_basefilecache):
185 class mixedrepostorecache(_basefilecache):
186 """filecache for a mix files in .hg/store and outside"""
186 """filecache for a mix files in .hg/store and outside"""
187
187
188 def __init__(self, *pathsandlocations):
188 def __init__(self, *pathsandlocations):
189 # scmutil.filecache only uses the path for passing back into our
189 # scmutil.filecache only uses the path for passing back into our
190 # join(), so we can safely pass a list of paths and locations
190 # join(), so we can safely pass a list of paths and locations
191 super(mixedrepostorecache, self).__init__(*pathsandlocations)
191 super(mixedrepostorecache, self).__init__(*pathsandlocations)
192 _cachedfiles.update(pathsandlocations)
192 _cachedfiles.update(pathsandlocations)
193
193
194 def join(self, obj, fnameandlocation):
194 def join(self, obj, fnameandlocation):
195 fname, location = fnameandlocation
195 fname, location = fnameandlocation
196 if location == b'plain':
196 if location == b'plain':
197 return obj.vfs.join(fname)
197 return obj.vfs.join(fname)
198 else:
198 else:
199 if location != b'':
199 if location != b'':
200 raise error.ProgrammingError(
200 raise error.ProgrammingError(
201 b'unexpected location: %s' % location
201 b'unexpected location: %s' % location
202 )
202 )
203 return obj.sjoin(fname)
203 return obj.sjoin(fname)
204
204
205
205
206 def isfilecached(repo, name):
206 def isfilecached(repo, name):
207 """check if a repo has already cached "name" filecache-ed property
207 """check if a repo has already cached "name" filecache-ed property
208
208
209 This returns (cachedobj-or-None, iscached) tuple.
209 This returns (cachedobj-or-None, iscached) tuple.
210 """
210 """
211 cacheentry = repo.unfiltered()._filecache.get(name, None)
211 cacheentry = repo.unfiltered()._filecache.get(name, None)
212 if not cacheentry:
212 if not cacheentry:
213 return None, False
213 return None, False
214 return cacheentry.obj, True
214 return cacheentry.obj, True
215
215
216
216
217 class unfilteredpropertycache(util.propertycache):
217 class unfilteredpropertycache(util.propertycache):
218 """propertycache that apply to unfiltered repo only"""
218 """propertycache that apply to unfiltered repo only"""
219
219
220 def __get__(self, repo, type=None):
220 def __get__(self, repo, type=None):
221 unfi = repo.unfiltered()
221 unfi = repo.unfiltered()
222 if unfi is repo:
222 if unfi is repo:
223 return super(unfilteredpropertycache, self).__get__(unfi)
223 return super(unfilteredpropertycache, self).__get__(unfi)
224 return getattr(unfi, self.name)
224 return getattr(unfi, self.name)
225
225
226
226
227 class filteredpropertycache(util.propertycache):
227 class filteredpropertycache(util.propertycache):
228 """propertycache that must take filtering in account"""
228 """propertycache that must take filtering in account"""
229
229
230 def cachevalue(self, obj, value):
230 def cachevalue(self, obj, value):
231 object.__setattr__(obj, self.name, value)
231 object.__setattr__(obj, self.name, value)
232
232
233
233
234 def hasunfilteredcache(repo, name):
234 def hasunfilteredcache(repo, name):
235 """check if a repo has an unfilteredpropertycache value for <name>"""
235 """check if a repo has an unfilteredpropertycache value for <name>"""
236 return name in vars(repo.unfiltered())
236 return name in vars(repo.unfiltered())
237
237
238
238
239 def unfilteredmethod(orig):
239 def unfilteredmethod(orig):
240 """decorate method that always need to be run on unfiltered version"""
240 """decorate method that always need to be run on unfiltered version"""
241
241
242 @functools.wraps(orig)
242 @functools.wraps(orig)
243 def wrapper(repo, *args, **kwargs):
243 def wrapper(repo, *args, **kwargs):
244 return orig(repo.unfiltered(), *args, **kwargs)
244 return orig(repo.unfiltered(), *args, **kwargs)
245
245
246 return wrapper
246 return wrapper
247
247
248
248
249 moderncaps = {
249 moderncaps = {
250 b'lookup',
250 b'lookup',
251 b'branchmap',
251 b'branchmap',
252 b'pushkey',
252 b'pushkey',
253 b'known',
253 b'known',
254 b'getbundle',
254 b'getbundle',
255 b'unbundle',
255 b'unbundle',
256 }
256 }
257 legacycaps = moderncaps.union({b'changegroupsubset'})
257 legacycaps = moderncaps.union({b'changegroupsubset'})
258
258
259
259
260 @interfaceutil.implementer(repository.ipeercommandexecutor)
260 @interfaceutil.implementer(repository.ipeercommandexecutor)
261 class localcommandexecutor:
261 class localcommandexecutor:
262 def __init__(self, peer):
262 def __init__(self, peer):
263 self._peer = peer
263 self._peer = peer
264 self._sent = False
264 self._sent = False
265 self._closed = False
265 self._closed = False
266
266
267 def __enter__(self):
267 def __enter__(self):
268 return self
268 return self
269
269
270 def __exit__(self, exctype, excvalue, exctb):
270 def __exit__(self, exctype, excvalue, exctb):
271 self.close()
271 self.close()
272
272
273 def callcommand(self, command, args):
273 def callcommand(self, command, args):
274 if self._sent:
274 if self._sent:
275 raise error.ProgrammingError(
275 raise error.ProgrammingError(
276 b'callcommand() cannot be used after sendcommands()'
276 b'callcommand() cannot be used after sendcommands()'
277 )
277 )
278
278
279 if self._closed:
279 if self._closed:
280 raise error.ProgrammingError(
280 raise error.ProgrammingError(
281 b'callcommand() cannot be used after close()'
281 b'callcommand() cannot be used after close()'
282 )
282 )
283
283
284 # We don't need to support anything fancy. Just call the named
284 # We don't need to support anything fancy. Just call the named
285 # method on the peer and return a resolved future.
285 # method on the peer and return a resolved future.
286 fn = getattr(self._peer, pycompat.sysstr(command))
286 fn = getattr(self._peer, pycompat.sysstr(command))
287
287
288 f = futures.Future()
288 f = futures.Future()
289
289
290 try:
290 try:
291 result = fn(**pycompat.strkwargs(args))
291 result = fn(**pycompat.strkwargs(args))
292 except Exception:
292 except Exception:
293 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
293 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
294 else:
294 else:
295 f.set_result(result)
295 f.set_result(result)
296
296
297 return f
297 return f
298
298
299 def sendcommands(self):
299 def sendcommands(self):
300 self._sent = True
300 self._sent = True
301
301
302 def close(self):
302 def close(self):
303 self._closed = True
303 self._closed = True
304
304
305
305
306 @interfaceutil.implementer(repository.ipeercommands)
306 @interfaceutil.implementer(repository.ipeercommands)
307 class localpeer(repository.peer):
307 class localpeer(repository.peer):
308 '''peer for a local repo; reflects only the most recent API'''
308 '''peer for a local repo; reflects only the most recent API'''
309
309
310 def __init__(self, repo, caps=None, path=None):
310 def __init__(self, repo, caps=None, path=None):
311 super(localpeer, self).__init__(repo.ui, path=path)
311 super(localpeer, self).__init__(repo.ui, path=path)
312
312
313 if caps is None:
313 if caps is None:
314 caps = moderncaps.copy()
314 caps = moderncaps.copy()
315 self._repo = repo.filtered(b'served')
315 self._repo = repo.filtered(b'served')
316
316
317 if repo._wanted_sidedata:
317 if repo._wanted_sidedata:
318 formatted = bundle2.format_remote_wanted_sidedata(repo)
318 formatted = bundle2.format_remote_wanted_sidedata(repo)
319 caps.add(b'exp-wanted-sidedata=' + formatted)
319 caps.add(b'exp-wanted-sidedata=' + formatted)
320
320
321 self._caps = repo._restrictcapabilities(caps)
321 self._caps = repo._restrictcapabilities(caps)
322
322
323 # Begin of _basepeer interface.
323 # Begin of _basepeer interface.
324
324
325 def url(self):
325 def url(self):
326 return self._repo.url()
326 return self._repo.url()
327
327
328 def local(self):
328 def local(self):
329 return self._repo
329 return self._repo
330
330
331 def canpush(self):
331 def canpush(self):
332 return True
332 return True
333
333
334 def close(self):
334 def close(self):
335 self._repo.close()
335 self._repo.close()
336
336
337 # End of _basepeer interface.
337 # End of _basepeer interface.
338
338
339 # Begin of _basewirecommands interface.
339 # Begin of _basewirecommands interface.
340
340
341 def branchmap(self):
341 def branchmap(self):
342 return self._repo.branchmap()
342 return self._repo.branchmap()
343
343
344 def capabilities(self):
344 def capabilities(self):
345 return self._caps
345 return self._caps
346
346
347 def clonebundles(self):
347 def clonebundles(self):
348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
349
349
350 def debugwireargs(self, one, two, three=None, four=None, five=None):
350 def debugwireargs(self, one, two, three=None, four=None, five=None):
351 """Used to test argument passing over the wire"""
351 """Used to test argument passing over the wire"""
352 return b"%s %s %s %s %s" % (
352 return b"%s %s %s %s %s" % (
353 one,
353 one,
354 two,
354 two,
355 pycompat.bytestr(three),
355 pycompat.bytestr(three),
356 pycompat.bytestr(four),
356 pycompat.bytestr(four),
357 pycompat.bytestr(five),
357 pycompat.bytestr(five),
358 )
358 )
359
359
360 def getbundle(
360 def getbundle(
361 self,
361 self,
362 source,
362 source,
363 heads=None,
363 heads=None,
364 common=None,
364 common=None,
365 bundlecaps=None,
365 bundlecaps=None,
366 remote_sidedata=None,
366 remote_sidedata=None,
367 **kwargs
367 **kwargs
368 ):
368 ):
369 chunks = exchange.getbundlechunks(
369 chunks = exchange.getbundlechunks(
370 self._repo,
370 self._repo,
371 source,
371 source,
372 heads=heads,
372 heads=heads,
373 common=common,
373 common=common,
374 bundlecaps=bundlecaps,
374 bundlecaps=bundlecaps,
375 remote_sidedata=remote_sidedata,
375 remote_sidedata=remote_sidedata,
376 **kwargs
376 **kwargs
377 )[1]
377 )[1]
378 cb = util.chunkbuffer(chunks)
378 cb = util.chunkbuffer(chunks)
379
379
380 if exchange.bundle2requested(bundlecaps):
380 if exchange.bundle2requested(bundlecaps):
381 # When requesting a bundle2, getbundle returns a stream to make the
381 # When requesting a bundle2, getbundle returns a stream to make the
382 # wire level function happier. We need to build a proper object
382 # wire level function happier. We need to build a proper object
383 # from it in local peer.
383 # from it in local peer.
384 return bundle2.getunbundler(self.ui, cb)
384 return bundle2.getunbundler(self.ui, cb)
385 else:
385 else:
386 return changegroup.getunbundler(b'01', cb, None)
386 return changegroup.getunbundler(b'01', cb, None)
387
387
388 def heads(self):
388 def heads(self):
389 return self._repo.heads()
389 return self._repo.heads()
390
390
391 def known(self, nodes):
391 def known(self, nodes):
392 return self._repo.known(nodes)
392 return self._repo.known(nodes)
393
393
394 def listkeys(self, namespace):
394 def listkeys(self, namespace):
395 return self._repo.listkeys(namespace)
395 return self._repo.listkeys(namespace)
396
396
397 def lookup(self, key):
397 def lookup(self, key):
398 return self._repo.lookup(key)
398 return self._repo.lookup(key)
399
399
400 def pushkey(self, namespace, key, old, new):
400 def pushkey(self, namespace, key, old, new):
401 return self._repo.pushkey(namespace, key, old, new)
401 return self._repo.pushkey(namespace, key, old, new)
402
402
403 def stream_out(self):
403 def stream_out(self):
404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
405
405
406 def unbundle(self, bundle, heads, url):
406 def unbundle(self, bundle, heads, url):
407 """apply a bundle on a repo
407 """apply a bundle on a repo
408
408
409 This function handles the repo locking itself."""
409 This function handles the repo locking itself."""
410 try:
410 try:
411 try:
411 try:
412 bundle = exchange.readbundle(self.ui, bundle, None)
412 bundle = exchange.readbundle(self.ui, bundle, None)
413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
414 if util.safehasattr(ret, b'getchunks'):
414 if util.safehasattr(ret, b'getchunks'):
415 # This is a bundle20 object, turn it into an unbundler.
415 # This is a bundle20 object, turn it into an unbundler.
416 # This little dance should be dropped eventually when the
416 # This little dance should be dropped eventually when the
417 # API is finally improved.
417 # API is finally improved.
418 stream = util.chunkbuffer(ret.getchunks())
418 stream = util.chunkbuffer(ret.getchunks())
419 ret = bundle2.getunbundler(self.ui, stream)
419 ret = bundle2.getunbundler(self.ui, stream)
420 return ret
420 return ret
421 except Exception as exc:
421 except Exception as exc:
422 # If the exception contains output salvaged from a bundle2
422 # If the exception contains output salvaged from a bundle2
423 # reply, we need to make sure it is printed before continuing
423 # reply, we need to make sure it is printed before continuing
424 # to fail. So we build a bundle2 with such output and consume
424 # to fail. So we build a bundle2 with such output and consume
425 # it directly.
425 # it directly.
426 #
426 #
427 # This is not very elegant but allows a "simple" solution for
427 # This is not very elegant but allows a "simple" solution for
428 # issue4594
428 # issue4594
429 output = getattr(exc, '_bundle2salvagedoutput', ())
429 output = getattr(exc, '_bundle2salvagedoutput', ())
430 if output:
430 if output:
431 bundler = bundle2.bundle20(self._repo.ui)
431 bundler = bundle2.bundle20(self._repo.ui)
432 for out in output:
432 for out in output:
433 bundler.addpart(out)
433 bundler.addpart(out)
434 stream = util.chunkbuffer(bundler.getchunks())
434 stream = util.chunkbuffer(bundler.getchunks())
435 b = bundle2.getunbundler(self.ui, stream)
435 b = bundle2.getunbundler(self.ui, stream)
436 bundle2.processbundle(self._repo, b)
436 bundle2.processbundle(self._repo, b)
437 raise
437 raise
438 except error.PushRaced as exc:
438 except error.PushRaced as exc:
439 raise error.ResponseError(
439 raise error.ResponseError(
440 _(b'push failed:'), stringutil.forcebytestr(exc)
440 _(b'push failed:'), stringutil.forcebytestr(exc)
441 )
441 )
442
442
443 # End of _basewirecommands interface.
443 # End of _basewirecommands interface.
444
444
445 # Begin of peer interface.
445 # Begin of peer interface.
446
446
447 def commandexecutor(self):
447 def commandexecutor(self):
448 return localcommandexecutor(self)
448 return localcommandexecutor(self)
449
449
450 # End of peer interface.
450 # End of peer interface.
451
451
452
452
453 @interfaceutil.implementer(repository.ipeerlegacycommands)
453 @interfaceutil.implementer(repository.ipeerlegacycommands)
454 class locallegacypeer(localpeer):
454 class locallegacypeer(localpeer):
455 """peer extension which implements legacy methods too; used for tests with
455 """peer extension which implements legacy methods too; used for tests with
456 restricted capabilities"""
456 restricted capabilities"""
457
457
458 def __init__(self, repo, path=None):
458 def __init__(self, repo, path=None):
459 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
459 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
460
460
461 # Begin of baselegacywirecommands interface.
461 # Begin of baselegacywirecommands interface.
462
462
463 def between(self, pairs):
463 def between(self, pairs):
464 return self._repo.between(pairs)
464 return self._repo.between(pairs)
465
465
466 def branches(self, nodes):
466 def branches(self, nodes):
467 return self._repo.branches(nodes)
467 return self._repo.branches(nodes)
468
468
469 def changegroup(self, nodes, source):
469 def changegroup(self, nodes, source):
470 outgoing = discovery.outgoing(
470 outgoing = discovery.outgoing(
471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
472 )
472 )
473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
474
474
475 def changegroupsubset(self, bases, heads, source):
475 def changegroupsubset(self, bases, heads, source):
476 outgoing = discovery.outgoing(
476 outgoing = discovery.outgoing(
477 self._repo, missingroots=bases, ancestorsof=heads
477 self._repo, missingroots=bases, ancestorsof=heads
478 )
478 )
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
480
480
481 # End of baselegacywirecommands interface.
481 # End of baselegacywirecommands interface.
482
482
483
483
484 # Functions receiving (ui, features) that extensions can register to impact
484 # Functions receiving (ui, features) that extensions can register to impact
485 # the ability to load repositories with custom requirements. Only
485 # the ability to load repositories with custom requirements. Only
486 # functions defined in loaded extensions are called.
486 # functions defined in loaded extensions are called.
487 #
487 #
488 # The function receives a set of requirement strings that the repository
488 # The function receives a set of requirement strings that the repository
489 # is capable of opening. Functions will typically add elements to the
489 # is capable of opening. Functions will typically add elements to the
490 # set to reflect that the extension knows how to handle that requirements.
490 # set to reflect that the extension knows how to handle that requirements.
491 featuresetupfuncs = set()
491 featuresetupfuncs = set()
492
492
493
493
494 def _getsharedvfs(hgvfs, requirements):
494 def _getsharedvfs(hgvfs, requirements):
495 """returns the vfs object pointing to root of shared source
495 """returns the vfs object pointing to root of shared source
496 repo for a shared repository
496 repo for a shared repository
497
497
498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
499 requirements is a set of requirements of current repo (shared one)
499 requirements is a set of requirements of current repo (shared one)
500 """
500 """
501 # The ``shared`` or ``relshared`` requirements indicate the
501 # The ``shared`` or ``relshared`` requirements indicate the
502 # store lives in the path contained in the ``.hg/sharedpath`` file.
502 # store lives in the path contained in the ``.hg/sharedpath`` file.
503 # This is an absolute path for ``shared`` and relative to
503 # This is an absolute path for ``shared`` and relative to
504 # ``.hg/`` for ``relshared``.
504 # ``.hg/`` for ``relshared``.
505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
507 sharedpath = util.normpath(hgvfs.join(sharedpath))
507 sharedpath = util.normpath(hgvfs.join(sharedpath))
508
508
509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
510
510
511 if not sharedvfs.exists():
511 if not sharedvfs.exists():
512 raise error.RepoError(
512 raise error.RepoError(
513 _(b'.hg/sharedpath points to nonexistent directory %s')
513 _(b'.hg/sharedpath points to nonexistent directory %s')
514 % sharedvfs.base
514 % sharedvfs.base
515 )
515 )
516 return sharedvfs
516 return sharedvfs
517
517
518
518
519 def _readrequires(vfs, allowmissing):
519 def _readrequires(vfs, allowmissing):
520 """reads the require file present at root of this vfs
520 """reads the require file present at root of this vfs
521 and return a set of requirements
521 and return a set of requirements
522
522
523 If allowmissing is True, we suppress FileNotFoundError if raised"""
523 If allowmissing is True, we suppress FileNotFoundError if raised"""
524 # requires file contains a newline-delimited list of
524 # requires file contains a newline-delimited list of
525 # features/capabilities the opener (us) must have in order to use
525 # features/capabilities the opener (us) must have in order to use
526 # the repository. This file was introduced in Mercurial 0.9.2,
526 # the repository. This file was introduced in Mercurial 0.9.2,
527 # which means very old repositories may not have one. We assume
527 # which means very old repositories may not have one. We assume
528 # a missing file translates to no requirements.
528 # a missing file translates to no requirements.
529 read = vfs.tryread if allowmissing else vfs.read
529 read = vfs.tryread if allowmissing else vfs.read
530 return set(read(b'requires').splitlines())
530 return set(read(b'requires').splitlines())
531
531
532
532
533 def makelocalrepository(baseui, path: bytes, intents=None):
533 def makelocalrepository(baseui, path: bytes, intents=None):
534 """Create a local repository object.
534 """Create a local repository object.
535
535
536 Given arguments needed to construct a local repository, this function
536 Given arguments needed to construct a local repository, this function
537 performs various early repository loading functionality (such as
537 performs various early repository loading functionality (such as
538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
539 the repository can be opened, derives a type suitable for representing
539 the repository can be opened, derives a type suitable for representing
540 that repository, and returns an instance of it.
540 that repository, and returns an instance of it.
541
541
542 The returned object conforms to the ``repository.completelocalrepository``
542 The returned object conforms to the ``repository.completelocalrepository``
543 interface.
543 interface.
544
544
545 The repository type is derived by calling a series of factory functions
545 The repository type is derived by calling a series of factory functions
546 for each aspect/interface of the final repository. These are defined by
546 for each aspect/interface of the final repository. These are defined by
547 ``REPO_INTERFACES``.
547 ``REPO_INTERFACES``.
548
548
549 Each factory function is called to produce a type implementing a specific
549 Each factory function is called to produce a type implementing a specific
550 interface. The cumulative list of returned types will be combined into a
550 interface. The cumulative list of returned types will be combined into a
551 new type and that type will be instantiated to represent the local
551 new type and that type will be instantiated to represent the local
552 repository.
552 repository.
553
553
554 The factory functions each receive various state that may be consulted
554 The factory functions each receive various state that may be consulted
555 as part of deriving a type.
555 as part of deriving a type.
556
556
557 Extensions should wrap these factory functions to customize repository type
557 Extensions should wrap these factory functions to customize repository type
558 creation. Note that an extension's wrapped function may be called even if
558 creation. Note that an extension's wrapped function may be called even if
559 that extension is not loaded for the repo being constructed. Extensions
559 that extension is not loaded for the repo being constructed. Extensions
560 should check if their ``__name__`` appears in the
560 should check if their ``__name__`` appears in the
561 ``extensionmodulenames`` set passed to the factory function and no-op if
561 ``extensionmodulenames`` set passed to the factory function and no-op if
562 not.
562 not.
563 """
563 """
564 ui = baseui.copy()
564 ui = baseui.copy()
565 # Prevent copying repo configuration.
565 # Prevent copying repo configuration.
566 ui.copy = baseui.copy
566 ui.copy = baseui.copy
567
567
568 # Working directory VFS rooted at repository root.
568 # Working directory VFS rooted at repository root.
569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
570
570
571 # Main VFS for .hg/ directory.
571 # Main VFS for .hg/ directory.
572 hgpath = wdirvfs.join(b'.hg')
572 hgpath = wdirvfs.join(b'.hg')
573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
574 # Whether this repository is shared one or not
574 # Whether this repository is shared one or not
575 shared = False
575 shared = False
576 # If this repository is shared, vfs pointing to shared repo
576 # If this repository is shared, vfs pointing to shared repo
577 sharedvfs = None
577 sharedvfs = None
578
578
579 # The .hg/ path should exist and should be a directory. All other
579 # The .hg/ path should exist and should be a directory. All other
580 # cases are errors.
580 # cases are errors.
581 if not hgvfs.isdir():
581 if not hgvfs.isdir():
582 try:
582 try:
583 hgvfs.stat()
583 hgvfs.stat()
584 except FileNotFoundError:
584 except FileNotFoundError:
585 pass
585 pass
586 except ValueError as e:
586 except ValueError as e:
587 # Can be raised on Python 3.8 when path is invalid.
587 # Can be raised on Python 3.8 when path is invalid.
588 raise error.Abort(
588 raise error.Abort(
589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
590 )
590 )
591
591
592 raise error.RepoError(_(b'repository %s not found') % path)
592 raise error.RepoError(_(b'repository %s not found') % path)
593
593
594 requirements = _readrequires(hgvfs, True)
594 requirements = _readrequires(hgvfs, True)
595 shared = (
595 shared = (
596 requirementsmod.SHARED_REQUIREMENT in requirements
596 requirementsmod.SHARED_REQUIREMENT in requirements
597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
598 )
598 )
599 storevfs = None
599 storevfs = None
600 if shared:
600 if shared:
601 # This is a shared repo
601 # This is a shared repo
602 sharedvfs = _getsharedvfs(hgvfs, requirements)
602 sharedvfs = _getsharedvfs(hgvfs, requirements)
603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
604 else:
604 else:
605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
606
606
607 # if .hg/requires contains the sharesafe requirement, it means
607 # if .hg/requires contains the sharesafe requirement, it means
608 # there exists a `.hg/store/requires` too and we should read it
608 # there exists a `.hg/store/requires` too and we should read it
609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
611 # is not present, refer checkrequirementscompat() for that
611 # is not present, refer checkrequirementscompat() for that
612 #
612 #
613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
614 # repository was shared the old way. We check the share source .hg/requires
614 # repository was shared the old way. We check the share source .hg/requires
615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
616 # to be reshared
616 # to be reshared
617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
619 if (
619 if (
620 shared
620 shared
621 and requirementsmod.SHARESAFE_REQUIREMENT
621 and requirementsmod.SHARESAFE_REQUIREMENT
622 not in _readrequires(sharedvfs, True)
622 not in _readrequires(sharedvfs, True)
623 ):
623 ):
624 mismatch_warn = ui.configbool(
624 mismatch_warn = ui.configbool(
625 b'share', b'safe-mismatch.source-not-safe.warn'
625 b'share', b'safe-mismatch.source-not-safe.warn'
626 )
626 )
627 mismatch_config = ui.config(
627 mismatch_config = ui.config(
628 b'share', b'safe-mismatch.source-not-safe'
628 b'share', b'safe-mismatch.source-not-safe'
629 )
629 )
630 mismatch_verbose_upgrade = ui.configbool(
630 mismatch_verbose_upgrade = ui.configbool(
631 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
631 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
632 )
632 )
633 if mismatch_config in (
633 if mismatch_config in (
634 b'downgrade-allow',
634 b'downgrade-allow',
635 b'allow',
635 b'allow',
636 b'downgrade-abort',
636 b'downgrade-abort',
637 ):
637 ):
638 # prevent cyclic import localrepo -> upgrade -> localrepo
638 # prevent cyclic import localrepo -> upgrade -> localrepo
639 from . import upgrade
639 from . import upgrade
640
640
641 upgrade.downgrade_share_to_non_safe(
641 upgrade.downgrade_share_to_non_safe(
642 ui,
642 ui,
643 hgvfs,
643 hgvfs,
644 sharedvfs,
644 sharedvfs,
645 requirements,
645 requirements,
646 mismatch_config,
646 mismatch_config,
647 mismatch_warn,
647 mismatch_warn,
648 mismatch_verbose_upgrade,
648 mismatch_verbose_upgrade,
649 )
649 )
650 elif mismatch_config == b'abort':
650 elif mismatch_config == b'abort':
651 raise error.Abort(
651 raise error.Abort(
652 _(b"share source does not support share-safe requirement"),
652 _(b"share source does not support share-safe requirement"),
653 hint=hint,
653 hint=hint,
654 )
654 )
655 else:
655 else:
656 raise error.Abort(
656 raise error.Abort(
657 _(
657 _(
658 b"share-safe mismatch with source.\nUnrecognized"
658 b"share-safe mismatch with source.\nUnrecognized"
659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
660 b" set."
660 b" set."
661 )
661 )
662 % mismatch_config,
662 % mismatch_config,
663 hint=hint,
663 hint=hint,
664 )
664 )
665 else:
665 else:
666 requirements |= _readrequires(storevfs, False)
666 requirements |= _readrequires(storevfs, False)
667 elif shared:
667 elif shared:
668 sourcerequires = _readrequires(sharedvfs, False)
668 sourcerequires = _readrequires(sharedvfs, False)
669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
671 mismatch_warn = ui.configbool(
671 mismatch_warn = ui.configbool(
672 b'share', b'safe-mismatch.source-safe.warn'
672 b'share', b'safe-mismatch.source-safe.warn'
673 )
673 )
674 mismatch_verbose_upgrade = ui.configbool(
674 mismatch_verbose_upgrade = ui.configbool(
675 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
675 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
676 )
676 )
677 if mismatch_config in (
677 if mismatch_config in (
678 b'upgrade-allow',
678 b'upgrade-allow',
679 b'allow',
679 b'allow',
680 b'upgrade-abort',
680 b'upgrade-abort',
681 ):
681 ):
682 # prevent cyclic import localrepo -> upgrade -> localrepo
682 # prevent cyclic import localrepo -> upgrade -> localrepo
683 from . import upgrade
683 from . import upgrade
684
684
685 upgrade.upgrade_share_to_safe(
685 upgrade.upgrade_share_to_safe(
686 ui,
686 ui,
687 hgvfs,
687 hgvfs,
688 storevfs,
688 storevfs,
689 requirements,
689 requirements,
690 mismatch_config,
690 mismatch_config,
691 mismatch_warn,
691 mismatch_warn,
692 mismatch_verbose_upgrade,
692 mismatch_verbose_upgrade,
693 )
693 )
694 elif mismatch_config == b'abort':
694 elif mismatch_config == b'abort':
695 raise error.Abort(
695 raise error.Abort(
696 _(
696 _(
697 b'version mismatch: source uses share-safe'
697 b'version mismatch: source uses share-safe'
698 b' functionality while the current share does not'
698 b' functionality while the current share does not'
699 ),
699 ),
700 hint=hint,
700 hint=hint,
701 )
701 )
702 else:
702 else:
703 raise error.Abort(
703 raise error.Abort(
704 _(
704 _(
705 b"share-safe mismatch with source.\nUnrecognized"
705 b"share-safe mismatch with source.\nUnrecognized"
706 b" value '%s' of `share.safe-mismatch.source-safe` set."
706 b" value '%s' of `share.safe-mismatch.source-safe` set."
707 )
707 )
708 % mismatch_config,
708 % mismatch_config,
709 hint=hint,
709 hint=hint,
710 )
710 )
711
711
712 # The .hg/hgrc file may load extensions or contain config options
712 # The .hg/hgrc file may load extensions or contain config options
713 # that influence repository construction. Attempt to load it and
713 # that influence repository construction. Attempt to load it and
714 # process any new extensions that it may have pulled in.
714 # process any new extensions that it may have pulled in.
715 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
715 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
716 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
716 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
717 extensions.loadall(ui)
717 extensions.loadall(ui)
718 extensions.populateui(ui)
718 extensions.populateui(ui)
719
719
720 # Set of module names of extensions loaded for this repository.
720 # Set of module names of extensions loaded for this repository.
721 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
721 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
722
722
723 supportedrequirements = gathersupportedrequirements(ui)
723 supportedrequirements = gathersupportedrequirements(ui)
724
724
725 # We first validate the requirements are known.
725 # We first validate the requirements are known.
726 ensurerequirementsrecognized(requirements, supportedrequirements)
726 ensurerequirementsrecognized(requirements, supportedrequirements)
727
727
728 # Then we validate that the known set is reasonable to use together.
728 # Then we validate that the known set is reasonable to use together.
729 ensurerequirementscompatible(ui, requirements)
729 ensurerequirementscompatible(ui, requirements)
730
730
731 # TODO there are unhandled edge cases related to opening repositories with
731 # TODO there are unhandled edge cases related to opening repositories with
732 # shared storage. If storage is shared, we should also test for requirements
732 # shared storage. If storage is shared, we should also test for requirements
733 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
733 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
734 # that repo, as that repo may load extensions needed to open it. This is a
734 # that repo, as that repo may load extensions needed to open it. This is a
735 # bit complicated because we don't want the other hgrc to overwrite settings
735 # bit complicated because we don't want the other hgrc to overwrite settings
736 # in this hgrc.
736 # in this hgrc.
737 #
737 #
738 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
738 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
739 # file when sharing repos. But if a requirement is added after the share is
739 # file when sharing repos. But if a requirement is added after the share is
740 # performed, thereby introducing a new requirement for the opener, we may
740 # performed, thereby introducing a new requirement for the opener, we may
741 # will not see that and could encounter a run-time error interacting with
741 # will not see that and could encounter a run-time error interacting with
742 # that shared store since it has an unknown-to-us requirement.
742 # that shared store since it has an unknown-to-us requirement.
743
743
744 # At this point, we know we should be capable of opening the repository.
744 # At this point, we know we should be capable of opening the repository.
745 # Now get on with doing that.
745 # Now get on with doing that.
746
746
747 features = set()
747 features = set()
748
748
749 # The "store" part of the repository holds versioned data. How it is
749 # The "store" part of the repository holds versioned data. How it is
750 # accessed is determined by various requirements. If `shared` or
750 # accessed is determined by various requirements. If `shared` or
751 # `relshared` requirements are present, this indicates current repository
751 # `relshared` requirements are present, this indicates current repository
752 # is a share and store exists in path mentioned in `.hg/sharedpath`
752 # is a share and store exists in path mentioned in `.hg/sharedpath`
753 if shared:
753 if shared:
754 storebasepath = sharedvfs.base
754 storebasepath = sharedvfs.base
755 cachepath = sharedvfs.join(b'cache')
755 cachepath = sharedvfs.join(b'cache')
756 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
756 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
757 else:
757 else:
758 storebasepath = hgvfs.base
758 storebasepath = hgvfs.base
759 cachepath = hgvfs.join(b'cache')
759 cachepath = hgvfs.join(b'cache')
760 wcachepath = hgvfs.join(b'wcache')
760 wcachepath = hgvfs.join(b'wcache')
761
761
762 # The store has changed over time and the exact layout is dictated by
762 # The store has changed over time and the exact layout is dictated by
763 # requirements. The store interface abstracts differences across all
763 # requirements. The store interface abstracts differences across all
764 # of them.
764 # of them.
765 store = makestore(
765 store = makestore(
766 requirements,
766 requirements,
767 storebasepath,
767 storebasepath,
768 lambda base: vfsmod.vfs(base, cacheaudited=True),
768 lambda base: vfsmod.vfs(base, cacheaudited=True),
769 )
769 )
770 hgvfs.createmode = store.createmode
770 hgvfs.createmode = store.createmode
771
771
772 storevfs = store.vfs
772 storevfs = store.vfs
773 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
773 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
774
774
775 if (
775 if (
776 requirementsmod.REVLOGV2_REQUIREMENT in requirements
776 requirementsmod.REVLOGV2_REQUIREMENT in requirements
777 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
777 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
778 ):
778 ):
779 features.add(repository.REPO_FEATURE_SIDE_DATA)
779 features.add(repository.REPO_FEATURE_SIDE_DATA)
780 # the revlogv2 docket introduced race condition that we need to fix
780 # the revlogv2 docket introduced race condition that we need to fix
781 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
781 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
782
782
783 # The cache vfs is used to manage cache files.
783 # The cache vfs is used to manage cache files.
784 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
784 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
785 cachevfs.createmode = store.createmode
785 cachevfs.createmode = store.createmode
786 # The cache vfs is used to manage cache files related to the working copy
786 # The cache vfs is used to manage cache files related to the working copy
787 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
787 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
788 wcachevfs.createmode = store.createmode
788 wcachevfs.createmode = store.createmode
789
789
790 # Now resolve the type for the repository object. We do this by repeatedly
790 # Now resolve the type for the repository object. We do this by repeatedly
791 # calling a factory function to produces types for specific aspects of the
791 # calling a factory function to produces types for specific aspects of the
792 # repo's operation. The aggregate returned types are used as base classes
792 # repo's operation. The aggregate returned types are used as base classes
793 # for a dynamically-derived type, which will represent our new repository.
793 # for a dynamically-derived type, which will represent our new repository.
794
794
795 bases = []
795 bases = []
796 extrastate = {}
796 extrastate = {}
797
797
798 for iface, fn in REPO_INTERFACES:
798 for iface, fn in REPO_INTERFACES:
799 # We pass all potentially useful state to give extensions tons of
799 # We pass all potentially useful state to give extensions tons of
800 # flexibility.
800 # flexibility.
801 typ = fn()(
801 typ = fn()(
802 ui=ui,
802 ui=ui,
803 intents=intents,
803 intents=intents,
804 requirements=requirements,
804 requirements=requirements,
805 features=features,
805 features=features,
806 wdirvfs=wdirvfs,
806 wdirvfs=wdirvfs,
807 hgvfs=hgvfs,
807 hgvfs=hgvfs,
808 store=store,
808 store=store,
809 storevfs=storevfs,
809 storevfs=storevfs,
810 storeoptions=storevfs.options,
810 storeoptions=storevfs.options,
811 cachevfs=cachevfs,
811 cachevfs=cachevfs,
812 wcachevfs=wcachevfs,
812 wcachevfs=wcachevfs,
813 extensionmodulenames=extensionmodulenames,
813 extensionmodulenames=extensionmodulenames,
814 extrastate=extrastate,
814 extrastate=extrastate,
815 baseclasses=bases,
815 baseclasses=bases,
816 )
816 )
817
817
818 if not isinstance(typ, type):
818 if not isinstance(typ, type):
819 raise error.ProgrammingError(
819 raise error.ProgrammingError(
820 b'unable to construct type for %s' % iface
820 b'unable to construct type for %s' % iface
821 )
821 )
822
822
823 bases.append(typ)
823 bases.append(typ)
824
824
825 # type() allows you to use characters in type names that wouldn't be
825 # type() allows you to use characters in type names that wouldn't be
826 # recognized as Python symbols in source code. We abuse that to add
826 # recognized as Python symbols in source code. We abuse that to add
827 # rich information about our constructed repo.
827 # rich information about our constructed repo.
828 name = pycompat.sysstr(
828 name = pycompat.sysstr(
829 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
829 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
830 )
830 )
831
831
832 cls = type(name, tuple(bases), {})
832 cls = type(name, tuple(bases), {})
833
833
834 return cls(
834 return cls(
835 baseui=baseui,
835 baseui=baseui,
836 ui=ui,
836 ui=ui,
837 origroot=path,
837 origroot=path,
838 wdirvfs=wdirvfs,
838 wdirvfs=wdirvfs,
839 hgvfs=hgvfs,
839 hgvfs=hgvfs,
840 requirements=requirements,
840 requirements=requirements,
841 supportedrequirements=supportedrequirements,
841 supportedrequirements=supportedrequirements,
842 sharedpath=storebasepath,
842 sharedpath=storebasepath,
843 store=store,
843 store=store,
844 cachevfs=cachevfs,
844 cachevfs=cachevfs,
845 wcachevfs=wcachevfs,
845 wcachevfs=wcachevfs,
846 features=features,
846 features=features,
847 intents=intents,
847 intents=intents,
848 )
848 )
849
849
850
850
851 def loadhgrc(
851 def loadhgrc(
852 ui,
852 ui,
853 wdirvfs: vfsmod.vfs,
853 wdirvfs: vfsmod.vfs,
854 hgvfs: vfsmod.vfs,
854 hgvfs: vfsmod.vfs,
855 requirements,
855 requirements,
856 sharedvfs: Optional[vfsmod.vfs] = None,
856 sharedvfs: Optional[vfsmod.vfs] = None,
857 ):
857 ):
858 """Load hgrc files/content into a ui instance.
858 """Load hgrc files/content into a ui instance.
859
859
860 This is called during repository opening to load any additional
860 This is called during repository opening to load any additional
861 config files or settings relevant to the current repository.
861 config files or settings relevant to the current repository.
862
862
863 Returns a bool indicating whether any additional configs were loaded.
863 Returns a bool indicating whether any additional configs were loaded.
864
864
865 Extensions should monkeypatch this function to modify how per-repo
865 Extensions should monkeypatch this function to modify how per-repo
866 configs are loaded. For example, an extension may wish to pull in
866 configs are loaded. For example, an extension may wish to pull in
867 configs from alternate files or sources.
867 configs from alternate files or sources.
868
868
869 sharedvfs is vfs object pointing to source repo if the current one is a
869 sharedvfs is vfs object pointing to source repo if the current one is a
870 shared one
870 shared one
871 """
871 """
872 if not rcutil.use_repo_hgrc():
872 if not rcutil.use_repo_hgrc():
873 return False
873 return False
874
874
875 ret = False
875 ret = False
876 # first load config from shared source if we has to
876 # first load config from shared source if we has to
877 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
877 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
878 try:
878 try:
879 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
879 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
880 ret = True
880 ret = True
881 except IOError:
881 except IOError:
882 pass
882 pass
883
883
884 try:
884 try:
885 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
885 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
886 ret = True
886 ret = True
887 except IOError:
887 except IOError:
888 pass
888 pass
889
889
890 try:
890 try:
891 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
891 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
892 ret = True
892 ret = True
893 except IOError:
893 except IOError:
894 pass
894 pass
895
895
896 return ret
896 return ret
897
897
898
898
899 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
899 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
900 """Perform additional actions after .hg/hgrc is loaded.
900 """Perform additional actions after .hg/hgrc is loaded.
901
901
902 This function is called during repository loading immediately after
902 This function is called during repository loading immediately after
903 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
903 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
904
904
905 The function can be used to validate configs, automatically add
905 The function can be used to validate configs, automatically add
906 options (including extensions) based on requirements, etc.
906 options (including extensions) based on requirements, etc.
907 """
907 """
908
908
909 # Map of requirements to list of extensions to load automatically when
909 # Map of requirements to list of extensions to load automatically when
910 # requirement is present.
910 # requirement is present.
911 autoextensions = {
911 autoextensions = {
912 b'git': [b'git'],
912 b'git': [b'git'],
913 b'largefiles': [b'largefiles'],
913 b'largefiles': [b'largefiles'],
914 b'lfs': [b'lfs'],
914 b'lfs': [b'lfs'],
915 }
915 }
916
916
917 for requirement, names in sorted(autoextensions.items()):
917 for requirement, names in sorted(autoextensions.items()):
918 if requirement not in requirements:
918 if requirement not in requirements:
919 continue
919 continue
920
920
921 for name in names:
921 for name in names:
922 if not ui.hasconfig(b'extensions', name):
922 if not ui.hasconfig(b'extensions', name):
923 ui.setconfig(b'extensions', name, b'', source=b'autoload')
923 ui.setconfig(b'extensions', name, b'', source=b'autoload')
924
924
925
925
926 def gathersupportedrequirements(ui):
926 def gathersupportedrequirements(ui):
927 """Determine the complete set of recognized requirements."""
927 """Determine the complete set of recognized requirements."""
928 # Start with all requirements supported by this file.
928 # Start with all requirements supported by this file.
929 supported = set(localrepository._basesupported)
929 supported = set(localrepository._basesupported)
930
930
931 # Execute ``featuresetupfuncs`` entries if they belong to an extension
931 # Execute ``featuresetupfuncs`` entries if they belong to an extension
932 # relevant to this ui instance.
932 # relevant to this ui instance.
933 modules = {m.__name__ for n, m in extensions.extensions(ui)}
933 modules = {m.__name__ for n, m in extensions.extensions(ui)}
934
934
935 for fn in featuresetupfuncs:
935 for fn in featuresetupfuncs:
936 if fn.__module__ in modules:
936 if fn.__module__ in modules:
937 fn(ui, supported)
937 fn(ui, supported)
938
938
939 # Add derived requirements from registered compression engines.
939 # Add derived requirements from registered compression engines.
940 for name in util.compengines:
940 for name in util.compengines:
941 engine = util.compengines[name]
941 engine = util.compengines[name]
942 if engine.available() and engine.revlogheader():
942 if engine.available() and engine.revlogheader():
943 supported.add(b'exp-compression-%s' % name)
943 supported.add(b'exp-compression-%s' % name)
944 if engine.name() == b'zstd':
944 if engine.name() == b'zstd':
945 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
945 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
946
946
947 return supported
947 return supported
948
948
949
949
950 def ensurerequirementsrecognized(requirements, supported):
950 def ensurerequirementsrecognized(requirements, supported):
951 """Validate that a set of local requirements is recognized.
951 """Validate that a set of local requirements is recognized.
952
952
953 Receives a set of requirements. Raises an ``error.RepoError`` if there
953 Receives a set of requirements. Raises an ``error.RepoError`` if there
954 exists any requirement in that set that currently loaded code doesn't
954 exists any requirement in that set that currently loaded code doesn't
955 recognize.
955 recognize.
956
956
957 Returns a set of supported requirements.
957 Returns a set of supported requirements.
958 """
958 """
959 missing = set()
959 missing = set()
960
960
961 for requirement in requirements:
961 for requirement in requirements:
962 if requirement in supported:
962 if requirement in supported:
963 continue
963 continue
964
964
965 if not requirement or not requirement[0:1].isalnum():
965 if not requirement or not requirement[0:1].isalnum():
966 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
966 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
967
967
968 missing.add(requirement)
968 missing.add(requirement)
969
969
970 if missing:
970 if missing:
971 raise error.RequirementError(
971 raise error.RequirementError(
972 _(b'repository requires features unknown to this Mercurial: %s')
972 _(b'repository requires features unknown to this Mercurial: %s')
973 % b' '.join(sorted(missing)),
973 % b' '.join(sorted(missing)),
974 hint=_(
974 hint=_(
975 b'see https://mercurial-scm.org/wiki/MissingRequirement '
975 b'see https://mercurial-scm.org/wiki/MissingRequirement '
976 b'for more information'
976 b'for more information'
977 ),
977 ),
978 )
978 )
979
979
980
980
981 def ensurerequirementscompatible(ui, requirements):
981 def ensurerequirementscompatible(ui, requirements):
982 """Validates that a set of recognized requirements is mutually compatible.
982 """Validates that a set of recognized requirements is mutually compatible.
983
983
984 Some requirements may not be compatible with others or require
984 Some requirements may not be compatible with others or require
985 config options that aren't enabled. This function is called during
985 config options that aren't enabled. This function is called during
986 repository opening to ensure that the set of requirements needed
986 repository opening to ensure that the set of requirements needed
987 to open a repository is sane and compatible with config options.
987 to open a repository is sane and compatible with config options.
988
988
989 Extensions can monkeypatch this function to perform additional
989 Extensions can monkeypatch this function to perform additional
990 checking.
990 checking.
991
991
992 ``error.RepoError`` should be raised on failure.
992 ``error.RepoError`` should be raised on failure.
993 """
993 """
994 if (
994 if (
995 requirementsmod.SPARSE_REQUIREMENT in requirements
995 requirementsmod.SPARSE_REQUIREMENT in requirements
996 and not sparse.enabled
996 and not sparse.enabled
997 ):
997 ):
998 raise error.RepoError(
998 raise error.RepoError(
999 _(
999 _(
1000 b'repository is using sparse feature but '
1000 b'repository is using sparse feature but '
1001 b'sparse is not enabled; enable the '
1001 b'sparse is not enabled; enable the '
1002 b'"sparse" extensions to access'
1002 b'"sparse" extensions to access'
1003 )
1003 )
1004 )
1004 )
1005
1005
1006
1006
1007 def makestore(requirements, path, vfstype):
1007 def makestore(requirements, path, vfstype):
1008 """Construct a storage object for a repository."""
1008 """Construct a storage object for a repository."""
1009 if requirementsmod.STORE_REQUIREMENT in requirements:
1009 if requirementsmod.STORE_REQUIREMENT in requirements:
1010 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1010 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1011 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1011 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1012 return storemod.fncachestore(path, vfstype, dotencode)
1012 return storemod.fncachestore(path, vfstype, dotencode)
1013
1013
1014 return storemod.encodedstore(path, vfstype)
1014 return storemod.encodedstore(path, vfstype)
1015
1015
1016 return storemod.basicstore(path, vfstype)
1016 return storemod.basicstore(path, vfstype)
1017
1017
1018
1018
1019 def resolvestorevfsoptions(ui, requirements, features):
1019 def resolvestorevfsoptions(ui, requirements, features):
1020 """Resolve the options to pass to the store vfs opener.
1020 """Resolve the options to pass to the store vfs opener.
1021
1021
1022 The returned dict is used to influence behavior of the storage layer.
1022 The returned dict is used to influence behavior of the storage layer.
1023 """
1023 """
1024 options = {}
1024 options = {}
1025
1025
1026 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1026 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1027 options[b'treemanifest'] = True
1027 options[b'treemanifest'] = True
1028
1028
1029 # experimental config: format.manifestcachesize
1029 # experimental config: format.manifestcachesize
1030 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1030 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1031 if manifestcachesize is not None:
1031 if manifestcachesize is not None:
1032 options[b'manifestcachesize'] = manifestcachesize
1032 options[b'manifestcachesize'] = manifestcachesize
1033
1033
1034 # In the absence of another requirement superseding a revlog-related
1034 # In the absence of another requirement superseding a revlog-related
1035 # requirement, we have to assume the repo is using revlog version 0.
1035 # requirement, we have to assume the repo is using revlog version 0.
1036 # This revlog format is super old and we don't bother trying to parse
1036 # This revlog format is super old and we don't bother trying to parse
1037 # opener options for it because those options wouldn't do anything
1037 # opener options for it because those options wouldn't do anything
1038 # meaningful on such old repos.
1038 # meaningful on such old repos.
1039 if (
1039 if (
1040 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1040 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1041 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1041 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1042 ):
1042 ):
1043 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1043 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1044 else: # explicitly mark repo as using revlogv0
1044 else: # explicitly mark repo as using revlogv0
1045 options[b'revlogv0'] = True
1045 options[b'revlogv0'] = True
1046
1046
1047 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1047 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1048 options[b'copies-storage'] = b'changeset-sidedata'
1048 options[b'copies-storage'] = b'changeset-sidedata'
1049 else:
1049 else:
1050 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1050 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1051 copiesextramode = (b'changeset-only', b'compatibility')
1051 copiesextramode = (b'changeset-only', b'compatibility')
1052 if writecopiesto in copiesextramode:
1052 if writecopiesto in copiesextramode:
1053 options[b'copies-storage'] = b'extra'
1053 options[b'copies-storage'] = b'extra'
1054
1054
1055 return options
1055 return options
1056
1056
1057
1057
1058 def resolverevlogstorevfsoptions(ui, requirements, features):
1058 def resolverevlogstorevfsoptions(ui, requirements, features):
1059 """Resolve opener options specific to revlogs."""
1059 """Resolve opener options specific to revlogs."""
1060
1060
1061 options = {}
1061 options = {}
1062 options[b'flagprocessors'] = {}
1062 options[b'flagprocessors'] = {}
1063
1063
1064 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1064 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1065 options[b'revlogv1'] = True
1065 options[b'revlogv1'] = True
1066 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1066 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1067 options[b'revlogv2'] = True
1067 options[b'revlogv2'] = True
1068 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1068 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1069 options[b'changelogv2'] = True
1069 options[b'changelogv2'] = True
1070 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1070 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1071 options[b'changelogv2.compute-rank'] = cmp_rank
1071 options[b'changelogv2.compute-rank'] = cmp_rank
1072
1072
1073 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1073 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1074 options[b'generaldelta'] = True
1074 options[b'generaldelta'] = True
1075
1075
1076 # experimental config: format.chunkcachesize
1076 # experimental config: format.chunkcachesize
1077 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1077 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1078 if chunkcachesize is not None:
1078 if chunkcachesize is not None:
1079 options[b'chunkcachesize'] = chunkcachesize
1079 options[b'chunkcachesize'] = chunkcachesize
1080
1080
1081 deltabothparents = ui.configbool(
1081 deltabothparents = ui.configbool(
1082 b'storage', b'revlog.optimize-delta-parent-choice'
1082 b'storage', b'revlog.optimize-delta-parent-choice'
1083 )
1083 )
1084 options[b'deltabothparents'] = deltabothparents
1084 options[b'deltabothparents'] = deltabothparents
1085 dps_cgds = ui.configint(
1085 dps_cgds = ui.configint(
1086 b'storage',
1086 b'storage',
1087 b'revlog.delta-parent-search.candidate-group-chunk-size',
1087 b'revlog.delta-parent-search.candidate-group-chunk-size',
1088 )
1088 )
1089 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1089 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1090 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1090 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1091
1091
1092 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1092 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1093 options[b'issue6528.fix-incoming'] = issue6528
1093 options[b'issue6528.fix-incoming'] = issue6528
1094
1094
1095 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1095 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1096 lazydeltabase = False
1096 lazydeltabase = False
1097 if lazydelta:
1097 if lazydelta:
1098 lazydeltabase = ui.configbool(
1098 lazydeltabase = ui.configbool(
1099 b'storage', b'revlog.reuse-external-delta-parent'
1099 b'storage', b'revlog.reuse-external-delta-parent'
1100 )
1100 )
1101 if lazydeltabase is None:
1101 if lazydeltabase is None:
1102 lazydeltabase = not scmutil.gddeltaconfig(ui)
1102 lazydeltabase = not scmutil.gddeltaconfig(ui)
1103 options[b'lazydelta'] = lazydelta
1103 options[b'lazydelta'] = lazydelta
1104 options[b'lazydeltabase'] = lazydeltabase
1104 options[b'lazydeltabase'] = lazydeltabase
1105
1105
1106 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1106 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1107 if 0 <= chainspan:
1107 if 0 <= chainspan:
1108 options[b'maxdeltachainspan'] = chainspan
1108 options[b'maxdeltachainspan'] = chainspan
1109
1109
1110 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1110 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1111 if mmapindexthreshold is not None:
1111 if mmapindexthreshold is not None:
1112 options[b'mmapindexthreshold'] = mmapindexthreshold
1112 options[b'mmapindexthreshold'] = mmapindexthreshold
1113
1113
1114 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1114 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1115 srdensitythres = float(
1115 srdensitythres = float(
1116 ui.config(b'experimental', b'sparse-read.density-threshold')
1116 ui.config(b'experimental', b'sparse-read.density-threshold')
1117 )
1117 )
1118 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1118 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1119 options[b'with-sparse-read'] = withsparseread
1119 options[b'with-sparse-read'] = withsparseread
1120 options[b'sparse-read-density-threshold'] = srdensitythres
1120 options[b'sparse-read-density-threshold'] = srdensitythres
1121 options[b'sparse-read-min-gap-size'] = srmingapsize
1121 options[b'sparse-read-min-gap-size'] = srmingapsize
1122
1122
1123 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1123 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1124 options[b'sparse-revlog'] = sparserevlog
1124 options[b'sparse-revlog'] = sparserevlog
1125 if sparserevlog:
1125 if sparserevlog:
1126 options[b'generaldelta'] = True
1126 options[b'generaldelta'] = True
1127
1127
1128 maxchainlen = None
1128 maxchainlen = None
1129 if sparserevlog:
1129 if sparserevlog:
1130 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1130 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1131 # experimental config: format.maxchainlen
1131 # experimental config: format.maxchainlen
1132 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1132 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1133 if maxchainlen is not None:
1133 if maxchainlen is not None:
1134 options[b'maxchainlen'] = maxchainlen
1134 options[b'maxchainlen'] = maxchainlen
1135
1135
1136 for r in requirements:
1136 for r in requirements:
1137 # we allow multiple compression engine requirement to co-exist because
1137 # we allow multiple compression engine requirement to co-exist because
1138 # strickly speaking, revlog seems to support mixed compression style.
1138 # strickly speaking, revlog seems to support mixed compression style.
1139 #
1139 #
1140 # The compression used for new entries will be "the last one"
1140 # The compression used for new entries will be "the last one"
1141 prefix = r.startswith
1141 prefix = r.startswith
1142 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1142 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1143 options[b'compengine'] = r.split(b'-', 2)[2]
1143 options[b'compengine'] = r.split(b'-', 2)[2]
1144
1144
1145 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1145 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1146 if options[b'zlib.level'] is not None:
1146 if options[b'zlib.level'] is not None:
1147 if not (0 <= options[b'zlib.level'] <= 9):
1147 if not (0 <= options[b'zlib.level'] <= 9):
1148 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1148 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1149 raise error.Abort(msg % options[b'zlib.level'])
1149 raise error.Abort(msg % options[b'zlib.level'])
1150 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1150 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1151 if options[b'zstd.level'] is not None:
1151 if options[b'zstd.level'] is not None:
1152 if not (0 <= options[b'zstd.level'] <= 22):
1152 if not (0 <= options[b'zstd.level'] <= 22):
1153 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1153 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1154 raise error.Abort(msg % options[b'zstd.level'])
1154 raise error.Abort(msg % options[b'zstd.level'])
1155
1155
1156 if requirementsmod.NARROW_REQUIREMENT in requirements:
1156 if requirementsmod.NARROW_REQUIREMENT in requirements:
1157 options[b'enableellipsis'] = True
1157 options[b'enableellipsis'] = True
1158
1158
1159 if ui.configbool(b'experimental', b'rust.index'):
1159 if ui.configbool(b'experimental', b'rust.index'):
1160 options[b'rust.index'] = True
1160 options[b'rust.index'] = True
1161 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1161 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1162 slow_path = ui.config(
1162 slow_path = ui.config(
1163 b'storage', b'revlog.persistent-nodemap.slow-path'
1163 b'storage', b'revlog.persistent-nodemap.slow-path'
1164 )
1164 )
1165 if slow_path not in (b'allow', b'warn', b'abort'):
1165 if slow_path not in (b'allow', b'warn', b'abort'):
1166 default = ui.config_default(
1166 default = ui.config_default(
1167 b'storage', b'revlog.persistent-nodemap.slow-path'
1167 b'storage', b'revlog.persistent-nodemap.slow-path'
1168 )
1168 )
1169 msg = _(
1169 msg = _(
1170 b'unknown value for config '
1170 b'unknown value for config '
1171 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1171 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1172 )
1172 )
1173 ui.warn(msg % slow_path)
1173 ui.warn(msg % slow_path)
1174 if not ui.quiet:
1174 if not ui.quiet:
1175 ui.warn(_(b'falling back to default value: %s\n') % default)
1175 ui.warn(_(b'falling back to default value: %s\n') % default)
1176 slow_path = default
1176 slow_path = default
1177
1177
1178 msg = _(
1178 msg = _(
1179 b"accessing `persistent-nodemap` repository without associated "
1179 b"accessing `persistent-nodemap` repository without associated "
1180 b"fast implementation."
1180 b"fast implementation."
1181 )
1181 )
1182 hint = _(
1182 hint = _(
1183 b"check `hg help config.format.use-persistent-nodemap` "
1183 b"check `hg help config.format.use-persistent-nodemap` "
1184 b"for details"
1184 b"for details"
1185 )
1185 )
1186 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1186 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1187 if slow_path == b'warn':
1187 if slow_path == b'warn':
1188 msg = b"warning: " + msg + b'\n'
1188 msg = b"warning: " + msg + b'\n'
1189 ui.warn(msg)
1189 ui.warn(msg)
1190 if not ui.quiet:
1190 if not ui.quiet:
1191 hint = b'(' + hint + b')\n'
1191 hint = b'(' + hint + b')\n'
1192 ui.warn(hint)
1192 ui.warn(hint)
1193 if slow_path == b'abort':
1193 if slow_path == b'abort':
1194 raise error.Abort(msg, hint=hint)
1194 raise error.Abort(msg, hint=hint)
1195 options[b'persistent-nodemap'] = True
1195 options[b'persistent-nodemap'] = True
1196 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1196 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1197 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1197 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1198 if slow_path not in (b'allow', b'warn', b'abort'):
1198 if slow_path not in (b'allow', b'warn', b'abort'):
1199 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1199 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1200 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1200 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1201 ui.warn(msg % slow_path)
1201 ui.warn(msg % slow_path)
1202 if not ui.quiet:
1202 if not ui.quiet:
1203 ui.warn(_(b'falling back to default value: %s\n') % default)
1203 ui.warn(_(b'falling back to default value: %s\n') % default)
1204 slow_path = default
1204 slow_path = default
1205
1205
1206 msg = _(
1206 msg = _(
1207 b"accessing `dirstate-v2` repository without associated "
1207 b"accessing `dirstate-v2` repository without associated "
1208 b"fast implementation."
1208 b"fast implementation."
1209 )
1209 )
1210 hint = _(
1210 hint = _(
1211 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1211 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1212 )
1212 )
1213 if not dirstate.HAS_FAST_DIRSTATE_V2:
1213 if not dirstate.HAS_FAST_DIRSTATE_V2:
1214 if slow_path == b'warn':
1214 if slow_path == b'warn':
1215 msg = b"warning: " + msg + b'\n'
1215 msg = b"warning: " + msg + b'\n'
1216 ui.warn(msg)
1216 ui.warn(msg)
1217 if not ui.quiet:
1217 if not ui.quiet:
1218 hint = b'(' + hint + b')\n'
1218 hint = b'(' + hint + b')\n'
1219 ui.warn(hint)
1219 ui.warn(hint)
1220 if slow_path == b'abort':
1220 if slow_path == b'abort':
1221 raise error.Abort(msg, hint=hint)
1221 raise error.Abort(msg, hint=hint)
1222 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1222 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1223 options[b'persistent-nodemap.mmap'] = True
1223 options[b'persistent-nodemap.mmap'] = True
1224 if ui.configbool(b'devel', b'persistent-nodemap'):
1224 if ui.configbool(b'devel', b'persistent-nodemap'):
1225 options[b'devel-force-nodemap'] = True
1225 options[b'devel-force-nodemap'] = True
1226
1226
1227 return options
1227 return options
1228
1228
1229
1229
1230 def makemain(**kwargs):
1230 def makemain(**kwargs):
1231 """Produce a type conforming to ``ilocalrepositorymain``."""
1231 """Produce a type conforming to ``ilocalrepositorymain``."""
1232 return localrepository
1232 return localrepository
1233
1233
1234
1234
1235 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1235 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1236 class revlogfilestorage:
1236 class revlogfilestorage:
1237 """File storage when using revlogs."""
1237 """File storage when using revlogs."""
1238
1238
1239 def file(self, path):
1239 def file(self, path):
1240 if path.startswith(b'/'):
1240 if path.startswith(b'/'):
1241 path = path[1:]
1241 path = path[1:]
1242
1242
1243 return filelog.filelog(self.svfs, path)
1243 return filelog.filelog(self.svfs, path)
1244
1244
1245
1245
1246 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1246 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1247 class revlognarrowfilestorage:
1247 class revlognarrowfilestorage:
1248 """File storage when using revlogs and narrow files."""
1248 """File storage when using revlogs and narrow files."""
1249
1249
1250 def file(self, path):
1250 def file(self, path):
1251 if path.startswith(b'/'):
1251 if path.startswith(b'/'):
1252 path = path[1:]
1252 path = path[1:]
1253
1253
1254 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1254 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1255
1255
1256
1256
1257 def makefilestorage(requirements, features, **kwargs):
1257 def makefilestorage(requirements, features, **kwargs):
1258 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1258 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1259 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1259 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1260 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1260 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1261
1261
1262 if requirementsmod.NARROW_REQUIREMENT in requirements:
1262 if requirementsmod.NARROW_REQUIREMENT in requirements:
1263 return revlognarrowfilestorage
1263 return revlognarrowfilestorage
1264 else:
1264 else:
1265 return revlogfilestorage
1265 return revlogfilestorage
1266
1266
1267
1267
1268 # List of repository interfaces and factory functions for them. Each
1268 # List of repository interfaces and factory functions for them. Each
1269 # will be called in order during ``makelocalrepository()`` to iteratively
1269 # will be called in order during ``makelocalrepository()`` to iteratively
1270 # derive the final type for a local repository instance. We capture the
1270 # derive the final type for a local repository instance. We capture the
1271 # function as a lambda so we don't hold a reference and the module-level
1271 # function as a lambda so we don't hold a reference and the module-level
1272 # functions can be wrapped.
1272 # functions can be wrapped.
1273 REPO_INTERFACES = [
1273 REPO_INTERFACES = [
1274 (repository.ilocalrepositorymain, lambda: makemain),
1274 (repository.ilocalrepositorymain, lambda: makemain),
1275 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1275 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1276 ]
1276 ]
1277
1277
1278
1278
1279 @interfaceutil.implementer(repository.ilocalrepositorymain)
1279 @interfaceutil.implementer(repository.ilocalrepositorymain)
1280 class localrepository:
1280 class localrepository:
1281 """Main class for representing local repositories.
1281 """Main class for representing local repositories.
1282
1282
1283 All local repositories are instances of this class.
1283 All local repositories are instances of this class.
1284
1284
1285 Constructed on its own, instances of this class are not usable as
1285 Constructed on its own, instances of this class are not usable as
1286 repository objects. To obtain a usable repository object, call
1286 repository objects. To obtain a usable repository object, call
1287 ``hg.repository()``, ``localrepo.instance()``, or
1287 ``hg.repository()``, ``localrepo.instance()``, or
1288 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1288 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1289 ``instance()`` adds support for creating new repositories.
1289 ``instance()`` adds support for creating new repositories.
1290 ``hg.repository()`` adds more extension integration, including calling
1290 ``hg.repository()`` adds more extension integration, including calling
1291 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1291 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1292 used.
1292 used.
1293 """
1293 """
1294
1294
1295 _basesupported = {
1295 _basesupported = {
1296 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1296 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1297 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1297 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1298 requirementsmod.CHANGELOGV2_REQUIREMENT,
1298 requirementsmod.CHANGELOGV2_REQUIREMENT,
1299 requirementsmod.COPIESSDC_REQUIREMENT,
1299 requirementsmod.COPIESSDC_REQUIREMENT,
1300 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1300 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1301 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1301 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1302 requirementsmod.DOTENCODE_REQUIREMENT,
1302 requirementsmod.DOTENCODE_REQUIREMENT,
1303 requirementsmod.FNCACHE_REQUIREMENT,
1303 requirementsmod.FNCACHE_REQUIREMENT,
1304 requirementsmod.GENERALDELTA_REQUIREMENT,
1304 requirementsmod.GENERALDELTA_REQUIREMENT,
1305 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1305 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1306 requirementsmod.NODEMAP_REQUIREMENT,
1306 requirementsmod.NODEMAP_REQUIREMENT,
1307 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1307 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1308 requirementsmod.REVLOGV1_REQUIREMENT,
1308 requirementsmod.REVLOGV1_REQUIREMENT,
1309 requirementsmod.REVLOGV2_REQUIREMENT,
1309 requirementsmod.REVLOGV2_REQUIREMENT,
1310 requirementsmod.SHARED_REQUIREMENT,
1310 requirementsmod.SHARED_REQUIREMENT,
1311 requirementsmod.SHARESAFE_REQUIREMENT,
1311 requirementsmod.SHARESAFE_REQUIREMENT,
1312 requirementsmod.SPARSE_REQUIREMENT,
1312 requirementsmod.SPARSE_REQUIREMENT,
1313 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1313 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1314 requirementsmod.STORE_REQUIREMENT,
1314 requirementsmod.STORE_REQUIREMENT,
1315 requirementsmod.TREEMANIFEST_REQUIREMENT,
1315 requirementsmod.TREEMANIFEST_REQUIREMENT,
1316 }
1316 }
1317
1317
1318 # list of prefix for file which can be written without 'wlock'
1318 # list of prefix for file which can be written without 'wlock'
1319 # Extensions should extend this list when needed
1319 # Extensions should extend this list when needed
1320 _wlockfreeprefix = {
1320 _wlockfreeprefix = {
1321 # We migh consider requiring 'wlock' for the next
1321 # We migh consider requiring 'wlock' for the next
1322 # two, but pretty much all the existing code assume
1322 # two, but pretty much all the existing code assume
1323 # wlock is not needed so we keep them excluded for
1323 # wlock is not needed so we keep them excluded for
1324 # now.
1324 # now.
1325 b'hgrc',
1325 b'hgrc',
1326 b'requires',
1326 b'requires',
1327 # XXX cache is a complicatged business someone
1327 # XXX cache is a complicatged business someone
1328 # should investigate this in depth at some point
1328 # should investigate this in depth at some point
1329 b'cache/',
1329 b'cache/',
1330 # XXX bisect was still a bit too messy at the time
1330 # XXX bisect was still a bit too messy at the time
1331 # this changeset was introduced. Someone should fix
1331 # this changeset was introduced. Someone should fix
1332 # the remainig bit and drop this line
1332 # the remainig bit and drop this line
1333 b'bisect.state',
1333 b'bisect.state',
1334 }
1334 }
1335
1335
1336 def __init__(
1336 def __init__(
1337 self,
1337 self,
1338 baseui,
1338 baseui,
1339 ui,
1339 ui,
1340 origroot: bytes,
1340 origroot: bytes,
1341 wdirvfs: vfsmod.vfs,
1341 wdirvfs: vfsmod.vfs,
1342 hgvfs: vfsmod.vfs,
1342 hgvfs: vfsmod.vfs,
1343 requirements,
1343 requirements,
1344 supportedrequirements,
1344 supportedrequirements,
1345 sharedpath: bytes,
1345 sharedpath: bytes,
1346 store,
1346 store,
1347 cachevfs: vfsmod.vfs,
1347 cachevfs: vfsmod.vfs,
1348 wcachevfs: vfsmod.vfs,
1348 wcachevfs: vfsmod.vfs,
1349 features,
1349 features,
1350 intents=None,
1350 intents=None,
1351 ):
1351 ):
1352 """Create a new local repository instance.
1352 """Create a new local repository instance.
1353
1353
1354 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1354 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1355 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1355 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1356 object.
1356 object.
1357
1357
1358 Arguments:
1358 Arguments:
1359
1359
1360 baseui
1360 baseui
1361 ``ui.ui`` instance that ``ui`` argument was based off of.
1361 ``ui.ui`` instance that ``ui`` argument was based off of.
1362
1362
1363 ui
1363 ui
1364 ``ui.ui`` instance for use by the repository.
1364 ``ui.ui`` instance for use by the repository.
1365
1365
1366 origroot
1366 origroot
1367 ``bytes`` path to working directory root of this repository.
1367 ``bytes`` path to working directory root of this repository.
1368
1368
1369 wdirvfs
1369 wdirvfs
1370 ``vfs.vfs`` rooted at the working directory.
1370 ``vfs.vfs`` rooted at the working directory.
1371
1371
1372 hgvfs
1372 hgvfs
1373 ``vfs.vfs`` rooted at .hg/
1373 ``vfs.vfs`` rooted at .hg/
1374
1374
1375 requirements
1375 requirements
1376 ``set`` of bytestrings representing repository opening requirements.
1376 ``set`` of bytestrings representing repository opening requirements.
1377
1377
1378 supportedrequirements
1378 supportedrequirements
1379 ``set`` of bytestrings representing repository requirements that we
1379 ``set`` of bytestrings representing repository requirements that we
1380 know how to open. May be a supetset of ``requirements``.
1380 know how to open. May be a supetset of ``requirements``.
1381
1381
1382 sharedpath
1382 sharedpath
1383 ``bytes`` Defining path to storage base directory. Points to a
1383 ``bytes`` Defining path to storage base directory. Points to a
1384 ``.hg/`` directory somewhere.
1384 ``.hg/`` directory somewhere.
1385
1385
1386 store
1386 store
1387 ``store.basicstore`` (or derived) instance providing access to
1387 ``store.basicstore`` (or derived) instance providing access to
1388 versioned storage.
1388 versioned storage.
1389
1389
1390 cachevfs
1390 cachevfs
1391 ``vfs.vfs`` used for cache files.
1391 ``vfs.vfs`` used for cache files.
1392
1392
1393 wcachevfs
1393 wcachevfs
1394 ``vfs.vfs`` used for cache files related to the working copy.
1394 ``vfs.vfs`` used for cache files related to the working copy.
1395
1395
1396 features
1396 features
1397 ``set`` of bytestrings defining features/capabilities of this
1397 ``set`` of bytestrings defining features/capabilities of this
1398 instance.
1398 instance.
1399
1399
1400 intents
1400 intents
1401 ``set`` of system strings indicating what this repo will be used
1401 ``set`` of system strings indicating what this repo will be used
1402 for.
1402 for.
1403 """
1403 """
1404 self.baseui = baseui
1404 self.baseui = baseui
1405 self.ui = ui
1405 self.ui = ui
1406 self.origroot = origroot
1406 self.origroot = origroot
1407 # vfs rooted at working directory.
1407 # vfs rooted at working directory.
1408 self.wvfs = wdirvfs
1408 self.wvfs = wdirvfs
1409 self.root = wdirvfs.base
1409 self.root = wdirvfs.base
1410 # vfs rooted at .hg/. Used to access most non-store paths.
1410 # vfs rooted at .hg/. Used to access most non-store paths.
1411 self.vfs = hgvfs
1411 self.vfs = hgvfs
1412 self.path = hgvfs.base
1412 self.path = hgvfs.base
1413 self.requirements = requirements
1413 self.requirements = requirements
1414 self.nodeconstants = sha1nodeconstants
1414 self.nodeconstants = sha1nodeconstants
1415 self.nullid = self.nodeconstants.nullid
1415 self.nullid = self.nodeconstants.nullid
1416 self.supported = supportedrequirements
1416 self.supported = supportedrequirements
1417 self.sharedpath = sharedpath
1417 self.sharedpath = sharedpath
1418 self.store = store
1418 self.store = store
1419 self.cachevfs = cachevfs
1419 self.cachevfs = cachevfs
1420 self.wcachevfs = wcachevfs
1420 self.wcachevfs = wcachevfs
1421 self.features = features
1421 self.features = features
1422
1422
1423 self.filtername = None
1423 self.filtername = None
1424
1424
1425 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1425 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1426 b'devel', b'check-locks'
1426 b'devel', b'check-locks'
1427 ):
1427 ):
1428 self.vfs.audit = self._getvfsward(self.vfs.audit)
1428 self.vfs.audit = self._getvfsward(self.vfs.audit)
1429 # A list of callback to shape the phase if no data were found.
1429 # A list of callback to shape the phase if no data were found.
1430 # Callback are in the form: func(repo, roots) --> processed root.
1430 # Callback are in the form: func(repo, roots) --> processed root.
1431 # This list it to be filled by extension during repo setup
1431 # This list it to be filled by extension during repo setup
1432 self._phasedefaults = []
1432 self._phasedefaults = []
1433
1433
1434 color.setup(self.ui)
1434 color.setup(self.ui)
1435
1435
1436 self.spath = self.store.path
1436 self.spath = self.store.path
1437 self.svfs = self.store.vfs
1437 self.svfs = self.store.vfs
1438 self.sjoin = self.store.join
1438 self.sjoin = self.store.join
1439 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1439 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1440 b'devel', b'check-locks'
1440 b'devel', b'check-locks'
1441 ):
1441 ):
1442 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1442 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1443 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1443 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1444 else: # standard vfs
1444 else: # standard vfs
1445 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1445 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1446
1446
1447 self._dirstatevalidatewarned = False
1447 self._dirstatevalidatewarned = False
1448
1448
1449 self._branchcaches = branchmap.BranchMapCache()
1449 self._branchcaches = branchmap.BranchMapCache()
1450 self._revbranchcache = None
1450 self._revbranchcache = None
1451 self._filterpats = {}
1451 self._filterpats = {}
1452 self._datafilters = {}
1452 self._datafilters = {}
1453 self._transref = self._lockref = self._wlockref = None
1453 self._transref = self._lockref = self._wlockref = None
1454
1454
1455 # A cache for various files under .hg/ that tracks file changes,
1455 # A cache for various files under .hg/ that tracks file changes,
1456 # (used by the filecache decorator)
1456 # (used by the filecache decorator)
1457 #
1457 #
1458 # Maps a property name to its util.filecacheentry
1458 # Maps a property name to its util.filecacheentry
1459 self._filecache = {}
1459 self._filecache = {}
1460
1460
1461 # hold sets of revision to be filtered
1461 # hold sets of revision to be filtered
1462 # should be cleared when something might have changed the filter value:
1462 # should be cleared when something might have changed the filter value:
1463 # - new changesets,
1463 # - new changesets,
1464 # - phase change,
1464 # - phase change,
1465 # - new obsolescence marker,
1465 # - new obsolescence marker,
1466 # - working directory parent change,
1466 # - working directory parent change,
1467 # - bookmark changes
1467 # - bookmark changes
1468 self.filteredrevcache = {}
1468 self.filteredrevcache = {}
1469
1469
1470 self._dirstate = None
1470 self._dirstate = None
1471 # post-dirstate-status hooks
1471 # post-dirstate-status hooks
1472 self._postdsstatus = []
1472 self._postdsstatus = []
1473
1473
1474 self._pending_narrow_pats = None
1474 self._pending_narrow_pats = None
1475 self._pending_narrow_pats_dirstate = None
1475 self._pending_narrow_pats_dirstate = None
1476
1476
1477 # generic mapping between names and nodes
1477 # generic mapping between names and nodes
1478 self.names = namespaces.namespaces()
1478 self.names = namespaces.namespaces()
1479
1479
1480 # Key to signature value.
1480 # Key to signature value.
1481 self._sparsesignaturecache = {}
1481 self._sparsesignaturecache = {}
1482 # Signature to cached matcher instance.
1482 # Signature to cached matcher instance.
1483 self._sparsematchercache = {}
1483 self._sparsematchercache = {}
1484
1484
1485 self._extrafilterid = repoview.extrafilter(ui)
1485 self._extrafilterid = repoview.extrafilter(ui)
1486
1486
1487 self.filecopiesmode = None
1487 self.filecopiesmode = None
1488 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1488 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1489 self.filecopiesmode = b'changeset-sidedata'
1489 self.filecopiesmode = b'changeset-sidedata'
1490
1490
1491 self._wanted_sidedata = set()
1491 self._wanted_sidedata = set()
1492 self._sidedata_computers = {}
1492 self._sidedata_computers = {}
1493 sidedatamod.set_sidedata_spec_for_repo(self)
1493 sidedatamod.set_sidedata_spec_for_repo(self)
1494
1494
1495 def _getvfsward(self, origfunc):
1495 def _getvfsward(self, origfunc):
1496 """build a ward for self.vfs"""
1496 """build a ward for self.vfs"""
1497 rref = weakref.ref(self)
1497 rref = weakref.ref(self)
1498
1498
1499 def checkvfs(path, mode=None):
1499 def checkvfs(path, mode=None):
1500 ret = origfunc(path, mode=mode)
1500 ret = origfunc(path, mode=mode)
1501 repo = rref()
1501 repo = rref()
1502 if (
1502 if (
1503 repo is None
1503 repo is None
1504 or not util.safehasattr(repo, b'_wlockref')
1504 or not util.safehasattr(repo, b'_wlockref')
1505 or not util.safehasattr(repo, b'_lockref')
1505 or not util.safehasattr(repo, b'_lockref')
1506 ):
1506 ):
1507 return
1507 return
1508 if mode in (None, b'r', b'rb'):
1508 if mode in (None, b'r', b'rb'):
1509 return
1509 return
1510 if path.startswith(repo.path):
1510 if path.startswith(repo.path):
1511 # truncate name relative to the repository (.hg)
1511 # truncate name relative to the repository (.hg)
1512 path = path[len(repo.path) + 1 :]
1512 path = path[len(repo.path) + 1 :]
1513 if path.startswith(b'cache/'):
1513 if path.startswith(b'cache/'):
1514 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1514 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1515 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1515 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1516 # path prefixes covered by 'lock'
1516 # path prefixes covered by 'lock'
1517 vfs_path_prefixes = (
1517 vfs_path_prefixes = (
1518 b'journal.',
1518 b'journal.',
1519 b'undo.',
1519 b'undo.',
1520 b'strip-backup/',
1520 b'strip-backup/',
1521 b'cache/',
1521 b'cache/',
1522 )
1522 )
1523 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1523 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1524 if repo._currentlock(repo._lockref) is None:
1524 if repo._currentlock(repo._lockref) is None:
1525 repo.ui.develwarn(
1525 repo.ui.develwarn(
1526 b'write with no lock: "%s"' % path,
1526 b'write with no lock: "%s"' % path,
1527 stacklevel=3,
1527 stacklevel=3,
1528 config=b'check-locks',
1528 config=b'check-locks',
1529 )
1529 )
1530 elif repo._currentlock(repo._wlockref) is None:
1530 elif repo._currentlock(repo._wlockref) is None:
1531 # rest of vfs files are covered by 'wlock'
1531 # rest of vfs files are covered by 'wlock'
1532 #
1532 #
1533 # exclude special files
1533 # exclude special files
1534 for prefix in self._wlockfreeprefix:
1534 for prefix in self._wlockfreeprefix:
1535 if path.startswith(prefix):
1535 if path.startswith(prefix):
1536 return
1536 return
1537 repo.ui.develwarn(
1537 repo.ui.develwarn(
1538 b'write with no wlock: "%s"' % path,
1538 b'write with no wlock: "%s"' % path,
1539 stacklevel=3,
1539 stacklevel=3,
1540 config=b'check-locks',
1540 config=b'check-locks',
1541 )
1541 )
1542 return ret
1542 return ret
1543
1543
1544 return checkvfs
1544 return checkvfs
1545
1545
1546 def _getsvfsward(self, origfunc):
1546 def _getsvfsward(self, origfunc):
1547 """build a ward for self.svfs"""
1547 """build a ward for self.svfs"""
1548 rref = weakref.ref(self)
1548 rref = weakref.ref(self)
1549
1549
1550 def checksvfs(path, mode=None):
1550 def checksvfs(path, mode=None):
1551 ret = origfunc(path, mode=mode)
1551 ret = origfunc(path, mode=mode)
1552 repo = rref()
1552 repo = rref()
1553 if repo is None or not util.safehasattr(repo, b'_lockref'):
1553 if repo is None or not util.safehasattr(repo, b'_lockref'):
1554 return
1554 return
1555 if mode in (None, b'r', b'rb'):
1555 if mode in (None, b'r', b'rb'):
1556 return
1556 return
1557 if path.startswith(repo.sharedpath):
1557 if path.startswith(repo.sharedpath):
1558 # truncate name relative to the repository (.hg)
1558 # truncate name relative to the repository (.hg)
1559 path = path[len(repo.sharedpath) + 1 :]
1559 path = path[len(repo.sharedpath) + 1 :]
1560 if repo._currentlock(repo._lockref) is None:
1560 if repo._currentlock(repo._lockref) is None:
1561 repo.ui.develwarn(
1561 repo.ui.develwarn(
1562 b'write with no lock: "%s"' % path, stacklevel=4
1562 b'write with no lock: "%s"' % path, stacklevel=4
1563 )
1563 )
1564 return ret
1564 return ret
1565
1565
1566 return checksvfs
1566 return checksvfs
1567
1567
1568 def close(self):
1568 def close(self):
1569 self._writecaches()
1569 self._writecaches()
1570
1570
1571 def _writecaches(self):
1571 def _writecaches(self):
1572 if self._revbranchcache:
1572 if self._revbranchcache:
1573 self._revbranchcache.write()
1573 self._revbranchcache.write()
1574
1574
1575 def _restrictcapabilities(self, caps):
1575 def _restrictcapabilities(self, caps):
1576 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1576 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1577 caps = set(caps)
1577 caps = set(caps)
1578 capsblob = bundle2.encodecaps(
1578 capsblob = bundle2.encodecaps(
1579 bundle2.getrepocaps(self, role=b'client')
1579 bundle2.getrepocaps(self, role=b'client')
1580 )
1580 )
1581 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1581 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1582 if self.ui.configbool(b'experimental', b'narrow'):
1582 if self.ui.configbool(b'experimental', b'narrow'):
1583 caps.add(wireprototypes.NARROWCAP)
1583 caps.add(wireprototypes.NARROWCAP)
1584 return caps
1584 return caps
1585
1585
1586 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1586 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1587 # self -> auditor -> self._checknested -> self
1587 # self -> auditor -> self._checknested -> self
1588
1588
1589 @property
1589 @property
1590 def auditor(self):
1590 def auditor(self):
1591 # This is only used by context.workingctx.match in order to
1591 # This is only used by context.workingctx.match in order to
1592 # detect files in subrepos.
1592 # detect files in subrepos.
1593 return pathutil.pathauditor(self.root, callback=self._checknested)
1593 return pathutil.pathauditor(self.root, callback=self._checknested)
1594
1594
1595 @property
1595 @property
1596 def nofsauditor(self):
1596 def nofsauditor(self):
1597 # This is only used by context.basectx.match in order to detect
1597 # This is only used by context.basectx.match in order to detect
1598 # files in subrepos.
1598 # files in subrepos.
1599 return pathutil.pathauditor(
1599 return pathutil.pathauditor(
1600 self.root, callback=self._checknested, realfs=False, cached=True
1600 self.root, callback=self._checknested, realfs=False, cached=True
1601 )
1601 )
1602
1602
1603 def _checknested(self, path):
1603 def _checknested(self, path):
1604 """Determine if path is a legal nested repository."""
1604 """Determine if path is a legal nested repository."""
1605 if not path.startswith(self.root):
1605 if not path.startswith(self.root):
1606 return False
1606 return False
1607 subpath = path[len(self.root) + 1 :]
1607 subpath = path[len(self.root) + 1 :]
1608 normsubpath = util.pconvert(subpath)
1608 normsubpath = util.pconvert(subpath)
1609
1609
1610 # XXX: Checking against the current working copy is wrong in
1610 # XXX: Checking against the current working copy is wrong in
1611 # the sense that it can reject things like
1611 # the sense that it can reject things like
1612 #
1612 #
1613 # $ hg cat -r 10 sub/x.txt
1613 # $ hg cat -r 10 sub/x.txt
1614 #
1614 #
1615 # if sub/ is no longer a subrepository in the working copy
1615 # if sub/ is no longer a subrepository in the working copy
1616 # parent revision.
1616 # parent revision.
1617 #
1617 #
1618 # However, it can of course also allow things that would have
1618 # However, it can of course also allow things that would have
1619 # been rejected before, such as the above cat command if sub/
1619 # been rejected before, such as the above cat command if sub/
1620 # is a subrepository now, but was a normal directory before.
1620 # is a subrepository now, but was a normal directory before.
1621 # The old path auditor would have rejected by mistake since it
1621 # The old path auditor would have rejected by mistake since it
1622 # panics when it sees sub/.hg/.
1622 # panics when it sees sub/.hg/.
1623 #
1623 #
1624 # All in all, checking against the working copy seems sensible
1624 # All in all, checking against the working copy seems sensible
1625 # since we want to prevent access to nested repositories on
1625 # since we want to prevent access to nested repositories on
1626 # the filesystem *now*.
1626 # the filesystem *now*.
1627 ctx = self[None]
1627 ctx = self[None]
1628 parts = util.splitpath(subpath)
1628 parts = util.splitpath(subpath)
1629 while parts:
1629 while parts:
1630 prefix = b'/'.join(parts)
1630 prefix = b'/'.join(parts)
1631 if prefix in ctx.substate:
1631 if prefix in ctx.substate:
1632 if prefix == normsubpath:
1632 if prefix == normsubpath:
1633 return True
1633 return True
1634 else:
1634 else:
1635 sub = ctx.sub(prefix)
1635 sub = ctx.sub(prefix)
1636 return sub.checknested(subpath[len(prefix) + 1 :])
1636 return sub.checknested(subpath[len(prefix) + 1 :])
1637 else:
1637 else:
1638 parts.pop()
1638 parts.pop()
1639 return False
1639 return False
1640
1640
1641 def peer(self, path=None):
1641 def peer(self, path=None):
1642 return localpeer(self, path=path) # not cached to avoid reference cycle
1642 return localpeer(self, path=path) # not cached to avoid reference cycle
1643
1643
1644 def unfiltered(self):
1644 def unfiltered(self):
1645 """Return unfiltered version of the repository
1645 """Return unfiltered version of the repository
1646
1646
1647 Intended to be overwritten by filtered repo."""
1647 Intended to be overwritten by filtered repo."""
1648 return self
1648 return self
1649
1649
1650 def filtered(self, name, visibilityexceptions=None):
1650 def filtered(self, name, visibilityexceptions=None):
1651 """Return a filtered version of a repository
1651 """Return a filtered version of a repository
1652
1652
1653 The `name` parameter is the identifier of the requested view. This
1653 The `name` parameter is the identifier of the requested view. This
1654 will return a repoview object set "exactly" to the specified view.
1654 will return a repoview object set "exactly" to the specified view.
1655
1655
1656 This function does not apply recursive filtering to a repository. For
1656 This function does not apply recursive filtering to a repository. For
1657 example calling `repo.filtered("served")` will return a repoview using
1657 example calling `repo.filtered("served")` will return a repoview using
1658 the "served" view, regardless of the initial view used by `repo`.
1658 the "served" view, regardless of the initial view used by `repo`.
1659
1659
1660 In other word, there is always only one level of `repoview` "filtering".
1660 In other word, there is always only one level of `repoview` "filtering".
1661 """
1661 """
1662 if self._extrafilterid is not None and b'%' not in name:
1662 if self._extrafilterid is not None and b'%' not in name:
1663 name = name + b'%' + self._extrafilterid
1663 name = name + b'%' + self._extrafilterid
1664
1664
1665 cls = repoview.newtype(self.unfiltered().__class__)
1665 cls = repoview.newtype(self.unfiltered().__class__)
1666 return cls(self, name, visibilityexceptions)
1666 return cls(self, name, visibilityexceptions)
1667
1667
1668 @mixedrepostorecache(
1668 @mixedrepostorecache(
1669 (b'bookmarks', b'plain'),
1669 (b'bookmarks', b'plain'),
1670 (b'bookmarks.current', b'plain'),
1670 (b'bookmarks.current', b'plain'),
1671 (b'bookmarks', b''),
1671 (b'bookmarks', b''),
1672 (b'00changelog.i', b''),
1672 (b'00changelog.i', b''),
1673 )
1673 )
1674 def _bookmarks(self):
1674 def _bookmarks(self):
1675 # Since the multiple files involved in the transaction cannot be
1675 # Since the multiple files involved in the transaction cannot be
1676 # written atomically (with current repository format), there is a race
1676 # written atomically (with current repository format), there is a race
1677 # condition here.
1677 # condition here.
1678 #
1678 #
1679 # 1) changelog content A is read
1679 # 1) changelog content A is read
1680 # 2) outside transaction update changelog to content B
1680 # 2) outside transaction update changelog to content B
1681 # 3) outside transaction update bookmark file referring to content B
1681 # 3) outside transaction update bookmark file referring to content B
1682 # 4) bookmarks file content is read and filtered against changelog-A
1682 # 4) bookmarks file content is read and filtered against changelog-A
1683 #
1683 #
1684 # When this happens, bookmarks against nodes missing from A are dropped.
1684 # When this happens, bookmarks against nodes missing from A are dropped.
1685 #
1685 #
1686 # Having this happening during read is not great, but it become worse
1686 # Having this happening during read is not great, but it become worse
1687 # when this happen during write because the bookmarks to the "unknown"
1687 # when this happen during write because the bookmarks to the "unknown"
1688 # nodes will be dropped for good. However, writes happen within locks.
1688 # nodes will be dropped for good. However, writes happen within locks.
1689 # This locking makes it possible to have a race free consistent read.
1689 # This locking makes it possible to have a race free consistent read.
1690 # For this purpose data read from disc before locking are
1690 # For this purpose data read from disc before locking are
1691 # "invalidated" right after the locks are taken. This invalidations are
1691 # "invalidated" right after the locks are taken. This invalidations are
1692 # "light", the `filecache` mechanism keep the data in memory and will
1692 # "light", the `filecache` mechanism keep the data in memory and will
1693 # reuse them if the underlying files did not changed. Not parsing the
1693 # reuse them if the underlying files did not changed. Not parsing the
1694 # same data multiple times helps performances.
1694 # same data multiple times helps performances.
1695 #
1695 #
1696 # Unfortunately in the case describe above, the files tracked by the
1696 # Unfortunately in the case describe above, the files tracked by the
1697 # bookmarks file cache might not have changed, but the in-memory
1697 # bookmarks file cache might not have changed, but the in-memory
1698 # content is still "wrong" because we used an older changelog content
1698 # content is still "wrong" because we used an older changelog content
1699 # to process the on-disk data. So after locking, the changelog would be
1699 # to process the on-disk data. So after locking, the changelog would be
1700 # refreshed but `_bookmarks` would be preserved.
1700 # refreshed but `_bookmarks` would be preserved.
1701 # Adding `00changelog.i` to the list of tracked file is not
1701 # Adding `00changelog.i` to the list of tracked file is not
1702 # enough, because at the time we build the content for `_bookmarks` in
1702 # enough, because at the time we build the content for `_bookmarks` in
1703 # (4), the changelog file has already diverged from the content used
1703 # (4), the changelog file has already diverged from the content used
1704 # for loading `changelog` in (1)
1704 # for loading `changelog` in (1)
1705 #
1705 #
1706 # To prevent the issue, we force the changelog to be explicitly
1706 # To prevent the issue, we force the changelog to be explicitly
1707 # reloaded while computing `_bookmarks`. The data race can still happen
1707 # reloaded while computing `_bookmarks`. The data race can still happen
1708 # without the lock (with a narrower window), but it would no longer go
1708 # without the lock (with a narrower window), but it would no longer go
1709 # undetected during the lock time refresh.
1709 # undetected during the lock time refresh.
1710 #
1710 #
1711 # The new schedule is as follow
1711 # The new schedule is as follow
1712 #
1712 #
1713 # 1) filecache logic detect that `_bookmarks` needs to be computed
1713 # 1) filecache logic detect that `_bookmarks` needs to be computed
1714 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1714 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1715 # 3) We force `changelog` filecache to be tested
1715 # 3) We force `changelog` filecache to be tested
1716 # 4) cachestat for `changelog` are captured (for changelog)
1716 # 4) cachestat for `changelog` are captured (for changelog)
1717 # 5) `_bookmarks` is computed and cached
1717 # 5) `_bookmarks` is computed and cached
1718 #
1718 #
1719 # The step in (3) ensure we have a changelog at least as recent as the
1719 # The step in (3) ensure we have a changelog at least as recent as the
1720 # cache stat computed in (1). As a result at locking time:
1720 # cache stat computed in (1). As a result at locking time:
1721 # * if the changelog did not changed since (1) -> we can reuse the data
1721 # * if the changelog did not changed since (1) -> we can reuse the data
1722 # * otherwise -> the bookmarks get refreshed.
1722 # * otherwise -> the bookmarks get refreshed.
1723 self._refreshchangelog()
1723 self._refreshchangelog()
1724 return bookmarks.bmstore(self)
1724 return bookmarks.bmstore(self)
1725
1725
1726 def _refreshchangelog(self):
1726 def _refreshchangelog(self):
1727 """make sure the in memory changelog match the on-disk one"""
1727 """make sure the in memory changelog match the on-disk one"""
1728 if 'changelog' in vars(self) and self.currenttransaction() is None:
1728 if 'changelog' in vars(self) and self.currenttransaction() is None:
1729 del self.changelog
1729 del self.changelog
1730
1730
1731 @property
1731 @property
1732 def _activebookmark(self):
1732 def _activebookmark(self):
1733 return self._bookmarks.active
1733 return self._bookmarks.active
1734
1734
1735 # _phasesets depend on changelog. what we need is to call
1735 # _phasesets depend on changelog. what we need is to call
1736 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1736 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1737 # can't be easily expressed in filecache mechanism.
1737 # can't be easily expressed in filecache mechanism.
1738 @storecache(b'phaseroots', b'00changelog.i')
1738 @storecache(b'phaseroots', b'00changelog.i')
1739 def _phasecache(self):
1739 def _phasecache(self):
1740 return phases.phasecache(self, self._phasedefaults)
1740 return phases.phasecache(self, self._phasedefaults)
1741
1741
1742 @storecache(b'obsstore')
1742 @storecache(b'obsstore')
1743 def obsstore(self):
1743 def obsstore(self):
1744 return obsolete.makestore(self.ui, self)
1744 return obsolete.makestore(self.ui, self)
1745
1745
1746 @changelogcache()
1746 @changelogcache()
1747 def changelog(repo):
1747 def changelog(repo):
1748 # load dirstate before changelog to avoid race see issue6303
1748 # load dirstate before changelog to avoid race see issue6303
1749 repo.dirstate.prefetch_parents()
1749 repo.dirstate.prefetch_parents()
1750 return repo.store.changelog(
1750 return repo.store.changelog(
1751 txnutil.mayhavepending(repo.root),
1751 txnutil.mayhavepending(repo.root),
1752 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1752 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1753 )
1753 )
1754
1754
1755 @manifestlogcache()
1755 @manifestlogcache()
1756 def manifestlog(self):
1756 def manifestlog(self):
1757 return self.store.manifestlog(self, self._storenarrowmatch)
1757 return self.store.manifestlog(self, self._storenarrowmatch)
1758
1758
1759 @unfilteredpropertycache
1759 @unfilteredpropertycache
1760 def dirstate(self):
1760 def dirstate(self):
1761 if self._dirstate is None:
1761 if self._dirstate is None:
1762 self._dirstate = self._makedirstate()
1762 self._dirstate = self._makedirstate()
1763 else:
1763 else:
1764 self._dirstate.refresh()
1764 self._dirstate.refresh()
1765 return self._dirstate
1765 return self._dirstate
1766
1766
1767 def _makedirstate(self):
1767 def _makedirstate(self):
1768 """Extension point for wrapping the dirstate per-repo."""
1768 """Extension point for wrapping the dirstate per-repo."""
1769 sparsematchfn = None
1769 sparsematchfn = None
1770 if sparse.use_sparse(self):
1770 if sparse.use_sparse(self):
1771 sparsematchfn = lambda: sparse.matcher(self)
1771 sparsematchfn = lambda: sparse.matcher(self)
1772 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1772 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1773 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1773 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1774 use_dirstate_v2 = v2_req in self.requirements
1774 use_dirstate_v2 = v2_req in self.requirements
1775 use_tracked_hint = th in self.requirements
1775 use_tracked_hint = th in self.requirements
1776
1776
1777 return dirstate.dirstate(
1777 return dirstate.dirstate(
1778 self.vfs,
1778 self.vfs,
1779 self.ui,
1779 self.ui,
1780 self.root,
1780 self.root,
1781 self._dirstatevalidate,
1781 self._dirstatevalidate,
1782 sparsematchfn,
1782 sparsematchfn,
1783 self.nodeconstants,
1783 self.nodeconstants,
1784 use_dirstate_v2,
1784 use_dirstate_v2,
1785 use_tracked_hint=use_tracked_hint,
1785 use_tracked_hint=use_tracked_hint,
1786 )
1786 )
1787
1787
1788 def _dirstatevalidate(self, node):
1788 def _dirstatevalidate(self, node):
1789 try:
1789 try:
1790 self.changelog.rev(node)
1790 self.changelog.rev(node)
1791 return node
1791 return node
1792 except error.LookupError:
1792 except error.LookupError:
1793 if not self._dirstatevalidatewarned:
1793 if not self._dirstatevalidatewarned:
1794 self._dirstatevalidatewarned = True
1794 self._dirstatevalidatewarned = True
1795 self.ui.warn(
1795 self.ui.warn(
1796 _(b"warning: ignoring unknown working parent %s!\n")
1796 _(b"warning: ignoring unknown working parent %s!\n")
1797 % short(node)
1797 % short(node)
1798 )
1798 )
1799 return self.nullid
1799 return self.nullid
1800
1800
1801 @storecache(narrowspec.FILENAME)
1801 @storecache(narrowspec.FILENAME)
1802 def narrowpats(self):
1802 def narrowpats(self):
1803 """matcher patterns for this repository's narrowspec
1803 """matcher patterns for this repository's narrowspec
1804
1804
1805 A tuple of (includes, excludes).
1805 A tuple of (includes, excludes).
1806 """
1806 """
1807 # the narrow management should probably move into its own object
1807 # the narrow management should probably move into its own object
1808 val = self._pending_narrow_pats
1808 val = self._pending_narrow_pats
1809 if val is None:
1809 if val is None:
1810 val = narrowspec.load(self)
1810 val = narrowspec.load(self)
1811 return val
1811 return val
1812
1812
1813 @storecache(narrowspec.FILENAME)
1813 @storecache(narrowspec.FILENAME)
1814 def _storenarrowmatch(self):
1814 def _storenarrowmatch(self):
1815 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1815 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1816 return matchmod.always()
1816 return matchmod.always()
1817 include, exclude = self.narrowpats
1817 include, exclude = self.narrowpats
1818 return narrowspec.match(self.root, include=include, exclude=exclude)
1818 return narrowspec.match(self.root, include=include, exclude=exclude)
1819
1819
1820 @storecache(narrowspec.FILENAME)
1820 @storecache(narrowspec.FILENAME)
1821 def _narrowmatch(self):
1821 def _narrowmatch(self):
1822 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1822 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1823 return matchmod.always()
1823 return matchmod.always()
1824 narrowspec.checkworkingcopynarrowspec(self)
1824 narrowspec.checkworkingcopynarrowspec(self)
1825 include, exclude = self.narrowpats
1825 include, exclude = self.narrowpats
1826 return narrowspec.match(self.root, include=include, exclude=exclude)
1826 return narrowspec.match(self.root, include=include, exclude=exclude)
1827
1827
1828 def narrowmatch(self, match=None, includeexact=False):
1828 def narrowmatch(self, match=None, includeexact=False):
1829 """matcher corresponding the the repo's narrowspec
1829 """matcher corresponding the the repo's narrowspec
1830
1830
1831 If `match` is given, then that will be intersected with the narrow
1831 If `match` is given, then that will be intersected with the narrow
1832 matcher.
1832 matcher.
1833
1833
1834 If `includeexact` is True, then any exact matches from `match` will
1834 If `includeexact` is True, then any exact matches from `match` will
1835 be included even if they're outside the narrowspec.
1835 be included even if they're outside the narrowspec.
1836 """
1836 """
1837 if match:
1837 if match:
1838 if includeexact and not self._narrowmatch.always():
1838 if includeexact and not self._narrowmatch.always():
1839 # do not exclude explicitly-specified paths so that they can
1839 # do not exclude explicitly-specified paths so that they can
1840 # be warned later on
1840 # be warned later on
1841 em = matchmod.exact(match.files())
1841 em = matchmod.exact(match.files())
1842 nm = matchmod.unionmatcher([self._narrowmatch, em])
1842 nm = matchmod.unionmatcher([self._narrowmatch, em])
1843 return matchmod.intersectmatchers(match, nm)
1843 return matchmod.intersectmatchers(match, nm)
1844 return matchmod.intersectmatchers(match, self._narrowmatch)
1844 return matchmod.intersectmatchers(match, self._narrowmatch)
1845 return self._narrowmatch
1845 return self._narrowmatch
1846
1846
1847 def setnarrowpats(self, newincludes, newexcludes):
1847 def setnarrowpats(self, newincludes, newexcludes):
1848 narrowspec.save(self, newincludes, newexcludes)
1848 narrowspec.save(self, newincludes, newexcludes)
1849 self.invalidate(clearfilecache=True)
1849 self.invalidate(clearfilecache=True)
1850
1850
1851 @unfilteredpropertycache
1851 @unfilteredpropertycache
1852 def _quick_access_changeid_null(self):
1852 def _quick_access_changeid_null(self):
1853 return {
1853 return {
1854 b'null': (nullrev, self.nodeconstants.nullid),
1854 b'null': (nullrev, self.nodeconstants.nullid),
1855 nullrev: (nullrev, self.nodeconstants.nullid),
1855 nullrev: (nullrev, self.nodeconstants.nullid),
1856 self.nullid: (nullrev, self.nullid),
1856 self.nullid: (nullrev, self.nullid),
1857 }
1857 }
1858
1858
1859 @unfilteredpropertycache
1859 @unfilteredpropertycache
1860 def _quick_access_changeid_wc(self):
1860 def _quick_access_changeid_wc(self):
1861 # also fast path access to the working copy parents
1861 # also fast path access to the working copy parents
1862 # however, only do it for filter that ensure wc is visible.
1862 # however, only do it for filter that ensure wc is visible.
1863 quick = self._quick_access_changeid_null.copy()
1863 quick = self._quick_access_changeid_null.copy()
1864 cl = self.unfiltered().changelog
1864 cl = self.unfiltered().changelog
1865 for node in self.dirstate.parents():
1865 for node in self.dirstate.parents():
1866 if node == self.nullid:
1866 if node == self.nullid:
1867 continue
1867 continue
1868 rev = cl.index.get_rev(node)
1868 rev = cl.index.get_rev(node)
1869 if rev is None:
1869 if rev is None:
1870 # unknown working copy parent case:
1870 # unknown working copy parent case:
1871 #
1871 #
1872 # skip the fast path and let higher code deal with it
1872 # skip the fast path and let higher code deal with it
1873 continue
1873 continue
1874 pair = (rev, node)
1874 pair = (rev, node)
1875 quick[rev] = pair
1875 quick[rev] = pair
1876 quick[node] = pair
1876 quick[node] = pair
1877 # also add the parents of the parents
1877 # also add the parents of the parents
1878 for r in cl.parentrevs(rev):
1878 for r in cl.parentrevs(rev):
1879 if r == nullrev:
1879 if r == nullrev:
1880 continue
1880 continue
1881 n = cl.node(r)
1881 n = cl.node(r)
1882 pair = (r, n)
1882 pair = (r, n)
1883 quick[r] = pair
1883 quick[r] = pair
1884 quick[n] = pair
1884 quick[n] = pair
1885 p1node = self.dirstate.p1()
1885 p1node = self.dirstate.p1()
1886 if p1node != self.nullid:
1886 if p1node != self.nullid:
1887 quick[b'.'] = quick[p1node]
1887 quick[b'.'] = quick[p1node]
1888 return quick
1888 return quick
1889
1889
1890 @unfilteredmethod
1890 @unfilteredmethod
1891 def _quick_access_changeid_invalidate(self):
1891 def _quick_access_changeid_invalidate(self):
1892 if '_quick_access_changeid_wc' in vars(self):
1892 if '_quick_access_changeid_wc' in vars(self):
1893 del self.__dict__['_quick_access_changeid_wc']
1893 del self.__dict__['_quick_access_changeid_wc']
1894
1894
1895 @property
1895 @property
1896 def _quick_access_changeid(self):
1896 def _quick_access_changeid(self):
1897 """an helper dictionnary for __getitem__ calls
1897 """an helper dictionnary for __getitem__ calls
1898
1898
1899 This contains a list of symbol we can recognise right away without
1899 This contains a list of symbol we can recognise right away without
1900 further processing.
1900 further processing.
1901 """
1901 """
1902 if self.filtername in repoview.filter_has_wc:
1902 if self.filtername in repoview.filter_has_wc:
1903 return self._quick_access_changeid_wc
1903 return self._quick_access_changeid_wc
1904 return self._quick_access_changeid_null
1904 return self._quick_access_changeid_null
1905
1905
1906 def __getitem__(self, changeid):
1906 def __getitem__(self, changeid):
1907 # dealing with special cases
1907 # dealing with special cases
1908 if changeid is None:
1908 if changeid is None:
1909 return context.workingctx(self)
1909 return context.workingctx(self)
1910 if isinstance(changeid, context.basectx):
1910 if isinstance(changeid, context.basectx):
1911 return changeid
1911 return changeid
1912
1912
1913 # dealing with multiple revisions
1913 # dealing with multiple revisions
1914 if isinstance(changeid, slice):
1914 if isinstance(changeid, slice):
1915 # wdirrev isn't contiguous so the slice shouldn't include it
1915 # wdirrev isn't contiguous so the slice shouldn't include it
1916 return [
1916 return [
1917 self[i]
1917 self[i]
1918 for i in range(*changeid.indices(len(self)))
1918 for i in range(*changeid.indices(len(self)))
1919 if i not in self.changelog.filteredrevs
1919 if i not in self.changelog.filteredrevs
1920 ]
1920 ]
1921
1921
1922 # dealing with some special values
1922 # dealing with some special values
1923 quick_access = self._quick_access_changeid.get(changeid)
1923 quick_access = self._quick_access_changeid.get(changeid)
1924 if quick_access is not None:
1924 if quick_access is not None:
1925 rev, node = quick_access
1925 rev, node = quick_access
1926 return context.changectx(self, rev, node, maybe_filtered=False)
1926 return context.changectx(self, rev, node, maybe_filtered=False)
1927 if changeid == b'tip':
1927 if changeid == b'tip':
1928 node = self.changelog.tip()
1928 node = self.changelog.tip()
1929 rev = self.changelog.rev(node)
1929 rev = self.changelog.rev(node)
1930 return context.changectx(self, rev, node)
1930 return context.changectx(self, rev, node)
1931
1931
1932 # dealing with arbitrary values
1932 # dealing with arbitrary values
1933 try:
1933 try:
1934 if isinstance(changeid, int):
1934 if isinstance(changeid, int):
1935 node = self.changelog.node(changeid)
1935 node = self.changelog.node(changeid)
1936 rev = changeid
1936 rev = changeid
1937 elif changeid == b'.':
1937 elif changeid == b'.':
1938 # this is a hack to delay/avoid loading obsmarkers
1938 # this is a hack to delay/avoid loading obsmarkers
1939 # when we know that '.' won't be hidden
1939 # when we know that '.' won't be hidden
1940 node = self.dirstate.p1()
1940 node = self.dirstate.p1()
1941 rev = self.unfiltered().changelog.rev(node)
1941 rev = self.unfiltered().changelog.rev(node)
1942 elif len(changeid) == self.nodeconstants.nodelen:
1942 elif len(changeid) == self.nodeconstants.nodelen:
1943 try:
1943 try:
1944 node = changeid
1944 node = changeid
1945 rev = self.changelog.rev(changeid)
1945 rev = self.changelog.rev(changeid)
1946 except error.FilteredLookupError:
1946 except error.FilteredLookupError:
1947 changeid = hex(changeid) # for the error message
1947 changeid = hex(changeid) # for the error message
1948 raise
1948 raise
1949 except LookupError:
1949 except LookupError:
1950 # check if it might have come from damaged dirstate
1950 # check if it might have come from damaged dirstate
1951 #
1951 #
1952 # XXX we could avoid the unfiltered if we had a recognizable
1952 # XXX we could avoid the unfiltered if we had a recognizable
1953 # exception for filtered changeset access
1953 # exception for filtered changeset access
1954 if (
1954 if (
1955 self.local()
1955 self.local()
1956 and changeid in self.unfiltered().dirstate.parents()
1956 and changeid in self.unfiltered().dirstate.parents()
1957 ):
1957 ):
1958 msg = _(b"working directory has unknown parent '%s'!")
1958 msg = _(b"working directory has unknown parent '%s'!")
1959 raise error.Abort(msg % short(changeid))
1959 raise error.Abort(msg % short(changeid))
1960 changeid = hex(changeid) # for the error message
1960 changeid = hex(changeid) # for the error message
1961 raise
1961 raise
1962
1962
1963 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1963 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1964 node = bin(changeid)
1964 node = bin(changeid)
1965 rev = self.changelog.rev(node)
1965 rev = self.changelog.rev(node)
1966 else:
1966 else:
1967 raise error.ProgrammingError(
1967 raise error.ProgrammingError(
1968 b"unsupported changeid '%s' of type %s"
1968 b"unsupported changeid '%s' of type %s"
1969 % (changeid, pycompat.bytestr(type(changeid)))
1969 % (changeid, pycompat.bytestr(type(changeid)))
1970 )
1970 )
1971
1971
1972 return context.changectx(self, rev, node)
1972 return context.changectx(self, rev, node)
1973
1973
1974 except (error.FilteredIndexError, error.FilteredLookupError):
1974 except (error.FilteredIndexError, error.FilteredLookupError):
1975 raise error.FilteredRepoLookupError(
1975 raise error.FilteredRepoLookupError(
1976 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1976 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1977 )
1977 )
1978 except (IndexError, LookupError):
1978 except (IndexError, LookupError):
1979 raise error.RepoLookupError(
1979 raise error.RepoLookupError(
1980 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1980 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1981 )
1981 )
1982 except error.WdirUnsupported:
1982 except error.WdirUnsupported:
1983 return context.workingctx(self)
1983 return context.workingctx(self)
1984
1984
1985 def __contains__(self, changeid):
1985 def __contains__(self, changeid):
1986 """True if the given changeid exists"""
1986 """True if the given changeid exists"""
1987 try:
1987 try:
1988 self[changeid]
1988 self[changeid]
1989 return True
1989 return True
1990 except error.RepoLookupError:
1990 except error.RepoLookupError:
1991 return False
1991 return False
1992
1992
1993 def __nonzero__(self):
1993 def __nonzero__(self):
1994 return True
1994 return True
1995
1995
1996 __bool__ = __nonzero__
1996 __bool__ = __nonzero__
1997
1997
1998 def __len__(self):
1998 def __len__(self):
1999 # no need to pay the cost of repoview.changelog
1999 # no need to pay the cost of repoview.changelog
2000 unfi = self.unfiltered()
2000 unfi = self.unfiltered()
2001 return len(unfi.changelog)
2001 return len(unfi.changelog)
2002
2002
2003 def __iter__(self):
2003 def __iter__(self):
2004 return iter(self.changelog)
2004 return iter(self.changelog)
2005
2005
2006 def revs(self, expr: bytes, *args):
2006 def revs(self, expr: bytes, *args):
2007 """Find revisions matching a revset.
2007 """Find revisions matching a revset.
2008
2008
2009 The revset is specified as a string ``expr`` that may contain
2009 The revset is specified as a string ``expr`` that may contain
2010 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2010 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2011
2011
2012 Revset aliases from the configuration are not expanded. To expand
2012 Revset aliases from the configuration are not expanded. To expand
2013 user aliases, consider calling ``scmutil.revrange()`` or
2013 user aliases, consider calling ``scmutil.revrange()`` or
2014 ``repo.anyrevs([expr], user=True)``.
2014 ``repo.anyrevs([expr], user=True)``.
2015
2015
2016 Returns a smartset.abstractsmartset, which is a list-like interface
2016 Returns a smartset.abstractsmartset, which is a list-like interface
2017 that contains integer revisions.
2017 that contains integer revisions.
2018 """
2018 """
2019 tree = revsetlang.spectree(expr, *args)
2019 tree = revsetlang.spectree(expr, *args)
2020 return revset.makematcher(tree)(self)
2020 return revset.makematcher(tree)(self)
2021
2021
2022 def set(self, expr: bytes, *args):
2022 def set(self, expr: bytes, *args):
2023 """Find revisions matching a revset and emit changectx instances.
2023 """Find revisions matching a revset and emit changectx instances.
2024
2024
2025 This is a convenience wrapper around ``revs()`` that iterates the
2025 This is a convenience wrapper around ``revs()`` that iterates the
2026 result and is a generator of changectx instances.
2026 result and is a generator of changectx instances.
2027
2027
2028 Revset aliases from the configuration are not expanded. To expand
2028 Revset aliases from the configuration are not expanded. To expand
2029 user aliases, consider calling ``scmutil.revrange()``.
2029 user aliases, consider calling ``scmutil.revrange()``.
2030 """
2030 """
2031 for r in self.revs(expr, *args):
2031 for r in self.revs(expr, *args):
2032 yield self[r]
2032 yield self[r]
2033
2033
2034 def anyrevs(self, specs: bytes, user=False, localalias=None):
2034 def anyrevs(self, specs: bytes, user=False, localalias=None):
2035 """Find revisions matching one of the given revsets.
2035 """Find revisions matching one of the given revsets.
2036
2036
2037 Revset aliases from the configuration are not expanded by default. To
2037 Revset aliases from the configuration are not expanded by default. To
2038 expand user aliases, specify ``user=True``. To provide some local
2038 expand user aliases, specify ``user=True``. To provide some local
2039 definitions overriding user aliases, set ``localalias`` to
2039 definitions overriding user aliases, set ``localalias`` to
2040 ``{name: definitionstring}``.
2040 ``{name: definitionstring}``.
2041 """
2041 """
2042 if specs == [b'null']:
2042 if specs == [b'null']:
2043 return revset.baseset([nullrev])
2043 return revset.baseset([nullrev])
2044 if specs == [b'.']:
2044 if specs == [b'.']:
2045 quick_data = self._quick_access_changeid.get(b'.')
2045 quick_data = self._quick_access_changeid.get(b'.')
2046 if quick_data is not None:
2046 if quick_data is not None:
2047 return revset.baseset([quick_data[0]])
2047 return revset.baseset([quick_data[0]])
2048 if user:
2048 if user:
2049 m = revset.matchany(
2049 m = revset.matchany(
2050 self.ui,
2050 self.ui,
2051 specs,
2051 specs,
2052 lookup=revset.lookupfn(self),
2052 lookup=revset.lookupfn(self),
2053 localalias=localalias,
2053 localalias=localalias,
2054 )
2054 )
2055 else:
2055 else:
2056 m = revset.matchany(None, specs, localalias=localalias)
2056 m = revset.matchany(None, specs, localalias=localalias)
2057 return m(self)
2057 return m(self)
2058
2058
2059 def url(self) -> bytes:
2059 def url(self) -> bytes:
2060 return b'file:' + self.root
2060 return b'file:' + self.root
2061
2061
2062 def hook(self, name, throw=False, **args):
2062 def hook(self, name, throw=False, **args):
2063 """Call a hook, passing this repo instance.
2063 """Call a hook, passing this repo instance.
2064
2064
2065 This a convenience method to aid invoking hooks. Extensions likely
2065 This a convenience method to aid invoking hooks. Extensions likely
2066 won't call this unless they have registered a custom hook or are
2066 won't call this unless they have registered a custom hook or are
2067 replacing code that is expected to call a hook.
2067 replacing code that is expected to call a hook.
2068 """
2068 """
2069 return hook.hook(self.ui, self, name, throw, **args)
2069 return hook.hook(self.ui, self, name, throw, **args)
2070
2070
2071 @filteredpropertycache
2071 @filteredpropertycache
2072 def _tagscache(self):
2072 def _tagscache(self):
2073 """Returns a tagscache object that contains various tags related
2073 """Returns a tagscache object that contains various tags related
2074 caches."""
2074 caches."""
2075
2075
2076 # This simplifies its cache management by having one decorated
2076 # This simplifies its cache management by having one decorated
2077 # function (this one) and the rest simply fetch things from it.
2077 # function (this one) and the rest simply fetch things from it.
2078 class tagscache:
2078 class tagscache:
2079 def __init__(self):
2079 def __init__(self):
2080 # These two define the set of tags for this repository. tags
2080 # These two define the set of tags for this repository. tags
2081 # maps tag name to node; tagtypes maps tag name to 'global' or
2081 # maps tag name to node; tagtypes maps tag name to 'global' or
2082 # 'local'. (Global tags are defined by .hgtags across all
2082 # 'local'. (Global tags are defined by .hgtags across all
2083 # heads, and local tags are defined in .hg/localtags.)
2083 # heads, and local tags are defined in .hg/localtags.)
2084 # They constitute the in-memory cache of tags.
2084 # They constitute the in-memory cache of tags.
2085 self.tags = self.tagtypes = None
2085 self.tags = self.tagtypes = None
2086
2086
2087 self.nodetagscache = self.tagslist = None
2087 self.nodetagscache = self.tagslist = None
2088
2088
2089 cache = tagscache()
2089 cache = tagscache()
2090 cache.tags, cache.tagtypes = self._findtags()
2090 cache.tags, cache.tagtypes = self._findtags()
2091
2091
2092 return cache
2092 return cache
2093
2093
2094 def tags(self):
2094 def tags(self):
2095 '''return a mapping of tag to node'''
2095 '''return a mapping of tag to node'''
2096 t = {}
2096 t = {}
2097 if self.changelog.filteredrevs:
2097 if self.changelog.filteredrevs:
2098 tags, tt = self._findtags()
2098 tags, tt = self._findtags()
2099 else:
2099 else:
2100 tags = self._tagscache.tags
2100 tags = self._tagscache.tags
2101 rev = self.changelog.rev
2101 rev = self.changelog.rev
2102 for k, v in tags.items():
2102 for k, v in tags.items():
2103 try:
2103 try:
2104 # ignore tags to unknown nodes
2104 # ignore tags to unknown nodes
2105 rev(v)
2105 rev(v)
2106 t[k] = v
2106 t[k] = v
2107 except (error.LookupError, ValueError):
2107 except (error.LookupError, ValueError):
2108 pass
2108 pass
2109 return t
2109 return t
2110
2110
2111 def _findtags(self):
2111 def _findtags(self):
2112 """Do the hard work of finding tags. Return a pair of dicts
2112 """Do the hard work of finding tags. Return a pair of dicts
2113 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2113 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2114 maps tag name to a string like \'global\' or \'local\'.
2114 maps tag name to a string like \'global\' or \'local\'.
2115 Subclasses or extensions are free to add their own tags, but
2115 Subclasses or extensions are free to add their own tags, but
2116 should be aware that the returned dicts will be retained for the
2116 should be aware that the returned dicts will be retained for the
2117 duration of the localrepo object."""
2117 duration of the localrepo object."""
2118
2118
2119 # XXX what tagtype should subclasses/extensions use? Currently
2119 # XXX what tagtype should subclasses/extensions use? Currently
2120 # mq and bookmarks add tags, but do not set the tagtype at all.
2120 # mq and bookmarks add tags, but do not set the tagtype at all.
2121 # Should each extension invent its own tag type? Should there
2121 # Should each extension invent its own tag type? Should there
2122 # be one tagtype for all such "virtual" tags? Or is the status
2122 # be one tagtype for all such "virtual" tags? Or is the status
2123 # quo fine?
2123 # quo fine?
2124
2124
2125 # map tag name to (node, hist)
2125 # map tag name to (node, hist)
2126 alltags = tagsmod.findglobaltags(self.ui, self)
2126 alltags = tagsmod.findglobaltags(self.ui, self)
2127 # map tag name to tag type
2127 # map tag name to tag type
2128 tagtypes = {tag: b'global' for tag in alltags}
2128 tagtypes = {tag: b'global' for tag in alltags}
2129
2129
2130 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2130 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2131
2131
2132 # Build the return dicts. Have to re-encode tag names because
2132 # Build the return dicts. Have to re-encode tag names because
2133 # the tags module always uses UTF-8 (in order not to lose info
2133 # the tags module always uses UTF-8 (in order not to lose info
2134 # writing to the cache), but the rest of Mercurial wants them in
2134 # writing to the cache), but the rest of Mercurial wants them in
2135 # local encoding.
2135 # local encoding.
2136 tags = {}
2136 tags = {}
2137 for name, (node, hist) in alltags.items():
2137 for name, (node, hist) in alltags.items():
2138 if node != self.nullid:
2138 if node != self.nullid:
2139 tags[encoding.tolocal(name)] = node
2139 tags[encoding.tolocal(name)] = node
2140 tags[b'tip'] = self.changelog.tip()
2140 tags[b'tip'] = self.changelog.tip()
2141 tagtypes = {
2141 tagtypes = {
2142 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2142 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2143 }
2143 }
2144 return (tags, tagtypes)
2144 return (tags, tagtypes)
2145
2145
2146 def tagtype(self, tagname):
2146 def tagtype(self, tagname):
2147 """
2147 """
2148 return the type of the given tag. result can be:
2148 return the type of the given tag. result can be:
2149
2149
2150 'local' : a local tag
2150 'local' : a local tag
2151 'global' : a global tag
2151 'global' : a global tag
2152 None : tag does not exist
2152 None : tag does not exist
2153 """
2153 """
2154
2154
2155 return self._tagscache.tagtypes.get(tagname)
2155 return self._tagscache.tagtypes.get(tagname)
2156
2156
2157 def tagslist(self):
2157 def tagslist(self):
2158 '''return a list of tags ordered by revision'''
2158 '''return a list of tags ordered by revision'''
2159 if not self._tagscache.tagslist:
2159 if not self._tagscache.tagslist:
2160 l = []
2160 l = []
2161 for t, n in self.tags().items():
2161 for t, n in self.tags().items():
2162 l.append((self.changelog.rev(n), t, n))
2162 l.append((self.changelog.rev(n), t, n))
2163 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2163 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2164
2164
2165 return self._tagscache.tagslist
2165 return self._tagscache.tagslist
2166
2166
2167 def nodetags(self, node):
2167 def nodetags(self, node):
2168 '''return the tags associated with a node'''
2168 '''return the tags associated with a node'''
2169 if not self._tagscache.nodetagscache:
2169 if not self._tagscache.nodetagscache:
2170 nodetagscache = {}
2170 nodetagscache = {}
2171 for t, n in self._tagscache.tags.items():
2171 for t, n in self._tagscache.tags.items():
2172 nodetagscache.setdefault(n, []).append(t)
2172 nodetagscache.setdefault(n, []).append(t)
2173 for tags in nodetagscache.values():
2173 for tags in nodetagscache.values():
2174 tags.sort()
2174 tags.sort()
2175 self._tagscache.nodetagscache = nodetagscache
2175 self._tagscache.nodetagscache = nodetagscache
2176 return self._tagscache.nodetagscache.get(node, [])
2176 return self._tagscache.nodetagscache.get(node, [])
2177
2177
2178 def nodebookmarks(self, node):
2178 def nodebookmarks(self, node):
2179 """return the list of bookmarks pointing to the specified node"""
2179 """return the list of bookmarks pointing to the specified node"""
2180 return self._bookmarks.names(node)
2180 return self._bookmarks.names(node)
2181
2181
2182 def branchmap(self):
2182 def branchmap(self):
2183 """returns a dictionary {branch: [branchheads]} with branchheads
2183 """returns a dictionary {branch: [branchheads]} with branchheads
2184 ordered by increasing revision number"""
2184 ordered by increasing revision number"""
2185 return self._branchcaches[self]
2185 return self._branchcaches[self]
2186
2186
2187 @unfilteredmethod
2187 @unfilteredmethod
2188 def revbranchcache(self):
2188 def revbranchcache(self):
2189 if not self._revbranchcache:
2189 if not self._revbranchcache:
2190 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2190 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2191 return self._revbranchcache
2191 return self._revbranchcache
2192
2192
2193 def register_changeset(self, rev, changelogrevision):
2193 def register_changeset(self, rev, changelogrevision):
2194 self.revbranchcache().setdata(rev, changelogrevision)
2194 self.revbranchcache().setdata(rev, changelogrevision)
2195
2195
2196 def branchtip(self, branch, ignoremissing=False):
2196 def branchtip(self, branch, ignoremissing=False):
2197 """return the tip node for a given branch
2197 """return the tip node for a given branch
2198
2198
2199 If ignoremissing is True, then this method will not raise an error.
2199 If ignoremissing is True, then this method will not raise an error.
2200 This is helpful for callers that only expect None for a missing branch
2200 This is helpful for callers that only expect None for a missing branch
2201 (e.g. namespace).
2201 (e.g. namespace).
2202
2202
2203 """
2203 """
2204 try:
2204 try:
2205 return self.branchmap().branchtip(branch)
2205 return self.branchmap().branchtip(branch)
2206 except KeyError:
2206 except KeyError:
2207 if not ignoremissing:
2207 if not ignoremissing:
2208 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2208 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2209 else:
2209 else:
2210 pass
2210 pass
2211
2211
2212 def lookup(self, key):
2212 def lookup(self, key):
2213 node = scmutil.revsymbol(self, key).node()
2213 node = scmutil.revsymbol(self, key).node()
2214 if node is None:
2214 if node is None:
2215 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2215 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2216 return node
2216 return node
2217
2217
2218 def lookupbranch(self, key):
2218 def lookupbranch(self, key):
2219 if self.branchmap().hasbranch(key):
2219 if self.branchmap().hasbranch(key):
2220 return key
2220 return key
2221
2221
2222 return scmutil.revsymbol(self, key).branch()
2222 return scmutil.revsymbol(self, key).branch()
2223
2223
2224 def known(self, nodes):
2224 def known(self, nodes):
2225 cl = self.changelog
2225 cl = self.changelog
2226 get_rev = cl.index.get_rev
2226 get_rev = cl.index.get_rev
2227 filtered = cl.filteredrevs
2227 filtered = cl.filteredrevs
2228 result = []
2228 result = []
2229 for n in nodes:
2229 for n in nodes:
2230 r = get_rev(n)
2230 r = get_rev(n)
2231 resp = not (r is None or r in filtered)
2231 resp = not (r is None or r in filtered)
2232 result.append(resp)
2232 result.append(resp)
2233 return result
2233 return result
2234
2234
2235 def local(self):
2235 def local(self):
2236 return self
2236 return self
2237
2237
2238 def publishing(self):
2238 def publishing(self):
2239 # it's safe (and desirable) to trust the publish flag unconditionally
2239 # it's safe (and desirable) to trust the publish flag unconditionally
2240 # so that we don't finalize changes shared between users via ssh or nfs
2240 # so that we don't finalize changes shared between users via ssh or nfs
2241 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2241 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2242
2242
2243 def cancopy(self):
2243 def cancopy(self):
2244 # so statichttprepo's override of local() works
2244 # so statichttprepo's override of local() works
2245 if not self.local():
2245 if not self.local():
2246 return False
2246 return False
2247 if not self.publishing():
2247 if not self.publishing():
2248 return True
2248 return True
2249 # if publishing we can't copy if there is filtered content
2249 # if publishing we can't copy if there is filtered content
2250 return not self.filtered(b'visible').changelog.filteredrevs
2250 return not self.filtered(b'visible').changelog.filteredrevs
2251
2251
2252 def shared(self):
2252 def shared(self):
2253 '''the type of shared repository (None if not shared)'''
2253 '''the type of shared repository (None if not shared)'''
2254 if self.sharedpath != self.path:
2254 if self.sharedpath != self.path:
2255 return b'store'
2255 return b'store'
2256 return None
2256 return None
2257
2257
2258 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2258 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2259 return self.vfs.reljoin(self.root, f, *insidef)
2259 return self.vfs.reljoin(self.root, f, *insidef)
2260
2260
2261 def setparents(self, p1, p2=None):
2261 def setparents(self, p1, p2=None):
2262 if p2 is None:
2262 if p2 is None:
2263 p2 = self.nullid
2263 p2 = self.nullid
2264 self[None].setparents(p1, p2)
2264 self[None].setparents(p1, p2)
2265 self._quick_access_changeid_invalidate()
2265 self._quick_access_changeid_invalidate()
2266
2266
2267 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2267 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2268 """changeid must be a changeset revision, if specified.
2268 """changeid must be a changeset revision, if specified.
2269 fileid can be a file revision or node."""
2269 fileid can be a file revision or node."""
2270 return context.filectx(
2270 return context.filectx(
2271 self, path, changeid, fileid, changectx=changectx
2271 self, path, changeid, fileid, changectx=changectx
2272 )
2272 )
2273
2273
2274 def getcwd(self) -> bytes:
2274 def getcwd(self) -> bytes:
2275 return self.dirstate.getcwd()
2275 return self.dirstate.getcwd()
2276
2276
2277 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2277 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2278 return self.dirstate.pathto(f, cwd)
2278 return self.dirstate.pathto(f, cwd)
2279
2279
2280 def _loadfilter(self, filter):
2280 def _loadfilter(self, filter):
2281 if filter not in self._filterpats:
2281 if filter not in self._filterpats:
2282 l = []
2282 l = []
2283 for pat, cmd in self.ui.configitems(filter):
2283 for pat, cmd in self.ui.configitems(filter):
2284 if cmd == b'!':
2284 if cmd == b'!':
2285 continue
2285 continue
2286 mf = matchmod.match(self.root, b'', [pat])
2286 mf = matchmod.match(self.root, b'', [pat])
2287 fn = None
2287 fn = None
2288 params = cmd
2288 params = cmd
2289 for name, filterfn in self._datafilters.items():
2289 for name, filterfn in self._datafilters.items():
2290 if cmd.startswith(name):
2290 if cmd.startswith(name):
2291 fn = filterfn
2291 fn = filterfn
2292 params = cmd[len(name) :].lstrip()
2292 params = cmd[len(name) :].lstrip()
2293 break
2293 break
2294 if not fn:
2294 if not fn:
2295 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2295 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2296 fn.__name__ = 'commandfilter'
2296 fn.__name__ = 'commandfilter'
2297 # Wrap old filters not supporting keyword arguments
2297 # Wrap old filters not supporting keyword arguments
2298 if not pycompat.getargspec(fn)[2]:
2298 if not pycompat.getargspec(fn)[2]:
2299 oldfn = fn
2299 oldfn = fn
2300 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2300 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2301 fn.__name__ = 'compat-' + oldfn.__name__
2301 fn.__name__ = 'compat-' + oldfn.__name__
2302 l.append((mf, fn, params))
2302 l.append((mf, fn, params))
2303 self._filterpats[filter] = l
2303 self._filterpats[filter] = l
2304 return self._filterpats[filter]
2304 return self._filterpats[filter]
2305
2305
2306 def _filter(self, filterpats, filename, data):
2306 def _filter(self, filterpats, filename, data):
2307 for mf, fn, cmd in filterpats:
2307 for mf, fn, cmd in filterpats:
2308 if mf(filename):
2308 if mf(filename):
2309 self.ui.debug(
2309 self.ui.debug(
2310 b"filtering %s through %s\n"
2310 b"filtering %s through %s\n"
2311 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2311 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2312 )
2312 )
2313 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2313 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2314 break
2314 break
2315
2315
2316 return data
2316 return data
2317
2317
2318 @unfilteredpropertycache
2318 @unfilteredpropertycache
2319 def _encodefilterpats(self):
2319 def _encodefilterpats(self):
2320 return self._loadfilter(b'encode')
2320 return self._loadfilter(b'encode')
2321
2321
2322 @unfilteredpropertycache
2322 @unfilteredpropertycache
2323 def _decodefilterpats(self):
2323 def _decodefilterpats(self):
2324 return self._loadfilter(b'decode')
2324 return self._loadfilter(b'decode')
2325
2325
2326 def adddatafilter(self, name, filter):
2326 def adddatafilter(self, name, filter):
2327 self._datafilters[name] = filter
2327 self._datafilters[name] = filter
2328
2328
2329 def wread(self, filename: bytes) -> bytes:
2329 def wread(self, filename: bytes) -> bytes:
2330 if self.wvfs.islink(filename):
2330 if self.wvfs.islink(filename):
2331 data = self.wvfs.readlink(filename)
2331 data = self.wvfs.readlink(filename)
2332 else:
2332 else:
2333 data = self.wvfs.read(filename)
2333 data = self.wvfs.read(filename)
2334 return self._filter(self._encodefilterpats, filename, data)
2334 return self._filter(self._encodefilterpats, filename, data)
2335
2335
2336 def wwrite(
2336 def wwrite(
2337 self,
2337 self,
2338 filename: bytes,
2338 filename: bytes,
2339 data: bytes,
2339 data: bytes,
2340 flags: bytes,
2340 flags: bytes,
2341 backgroundclose=False,
2341 backgroundclose=False,
2342 **kwargs
2342 **kwargs
2343 ) -> int:
2343 ) -> int:
2344 """write ``data`` into ``filename`` in the working directory
2344 """write ``data`` into ``filename`` in the working directory
2345
2345
2346 This returns length of written (maybe decoded) data.
2346 This returns length of written (maybe decoded) data.
2347 """
2347 """
2348 data = self._filter(self._decodefilterpats, filename, data)
2348 data = self._filter(self._decodefilterpats, filename, data)
2349 if b'l' in flags:
2349 if b'l' in flags:
2350 self.wvfs.symlink(data, filename)
2350 self.wvfs.symlink(data, filename)
2351 else:
2351 else:
2352 self.wvfs.write(
2352 self.wvfs.write(
2353 filename, data, backgroundclose=backgroundclose, **kwargs
2353 filename, data, backgroundclose=backgroundclose, **kwargs
2354 )
2354 )
2355 if b'x' in flags:
2355 if b'x' in flags:
2356 self.wvfs.setflags(filename, False, True)
2356 self.wvfs.setflags(filename, False, True)
2357 else:
2357 else:
2358 self.wvfs.setflags(filename, False, False)
2358 self.wvfs.setflags(filename, False, False)
2359 return len(data)
2359 return len(data)
2360
2360
2361 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2361 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2362 return self._filter(self._decodefilterpats, filename, data)
2362 return self._filter(self._decodefilterpats, filename, data)
2363
2363
2364 def currenttransaction(self):
2364 def currenttransaction(self):
2365 """return the current transaction or None if non exists"""
2365 """return the current transaction or None if non exists"""
2366 if self._transref:
2366 if self._transref:
2367 tr = self._transref()
2367 tr = self._transref()
2368 else:
2368 else:
2369 tr = None
2369 tr = None
2370
2370
2371 if tr and tr.running():
2371 if tr and tr.running():
2372 return tr
2372 return tr
2373 return None
2373 return None
2374
2374
2375 def transaction(self, desc, report=None):
2375 def transaction(self, desc, report=None):
2376 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2376 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2377 b'devel', b'check-locks'
2377 b'devel', b'check-locks'
2378 ):
2378 ):
2379 if self._currentlock(self._lockref) is None:
2379 if self._currentlock(self._lockref) is None:
2380 raise error.ProgrammingError(b'transaction requires locking')
2380 raise error.ProgrammingError(b'transaction requires locking')
2381 tr = self.currenttransaction()
2381 tr = self.currenttransaction()
2382 if tr is not None:
2382 if tr is not None:
2383 return tr.nest(name=desc)
2383 return tr.nest(name=desc)
2384
2384
2385 # abort here if the journal already exists
2385 # abort here if the journal already exists
2386 if self.svfs.exists(b"journal"):
2386 if self.svfs.exists(b"journal"):
2387 raise error.RepoError(
2387 raise error.RepoError(
2388 _(b"abandoned transaction found"),
2388 _(b"abandoned transaction found"),
2389 hint=_(b"run 'hg recover' to clean up transaction"),
2389 hint=_(b"run 'hg recover' to clean up transaction"),
2390 )
2390 )
2391
2391
2392 # At that point your dirstate should be clean:
2392 # At that point your dirstate should be clean:
2393 #
2393 #
2394 # - If you don't have the wlock, why would you still have a dirty
2394 # - If you don't have the wlock, why would you still have a dirty
2395 # dirstate ?
2395 # dirstate ?
2396 #
2396 #
2397 # - If you hold the wlock, you should not be opening a transaction in
2397 # - If you hold the wlock, you should not be opening a transaction in
2398 # the middle of a `distate.changing_*` block. The transaction needs to
2398 # the middle of a `distate.changing_*` block. The transaction needs to
2399 # be open before that and wrap the change-context.
2399 # be open before that and wrap the change-context.
2400 #
2400 #
2401 # - If you are not within a `dirstate.changing_*` context, why is our
2401 # - If you are not within a `dirstate.changing_*` context, why is our
2402 # dirstate dirty?
2402 # dirstate dirty?
2403 if self.dirstate._dirty:
2403 if self.dirstate._dirty:
2404 m = "cannot open a transaction with a dirty dirstate"
2404 m = "cannot open a transaction with a dirty dirstate"
2405 raise error.ProgrammingError(m)
2405 raise error.ProgrammingError(m)
2406
2406
2407 idbase = b"%.40f#%f" % (random.random(), time.time())
2407 idbase = b"%.40f#%f" % (random.random(), time.time())
2408 ha = hex(hashutil.sha1(idbase).digest())
2408 ha = hex(hashutil.sha1(idbase).digest())
2409 txnid = b'TXN:' + ha
2409 txnid = b'TXN:' + ha
2410 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2410 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2411
2411
2412 self._writejournal(desc)
2412 self._writejournal(desc)
2413 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2413 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2414 if report:
2414 if report:
2415 rp = report
2415 rp = report
2416 else:
2416 else:
2417 rp = self.ui.warn
2417 rp = self.ui.warn
2418 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2418 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2419 # we must avoid cyclic reference between repo and transaction.
2419 # we must avoid cyclic reference between repo and transaction.
2420 reporef = weakref.ref(self)
2420 reporef = weakref.ref(self)
2421 # Code to track tag movement
2421 # Code to track tag movement
2422 #
2422 #
2423 # Since tags are all handled as file content, it is actually quite hard
2423 # Since tags are all handled as file content, it is actually quite hard
2424 # to track these movement from a code perspective. So we fallback to a
2424 # to track these movement from a code perspective. So we fallback to a
2425 # tracking at the repository level. One could envision to track changes
2425 # tracking at the repository level. One could envision to track changes
2426 # to the '.hgtags' file through changegroup apply but that fails to
2426 # to the '.hgtags' file through changegroup apply but that fails to
2427 # cope with case where transaction expose new heads without changegroup
2427 # cope with case where transaction expose new heads without changegroup
2428 # being involved (eg: phase movement).
2428 # being involved (eg: phase movement).
2429 #
2429 #
2430 # For now, We gate the feature behind a flag since this likely comes
2430 # For now, We gate the feature behind a flag since this likely comes
2431 # with performance impacts. The current code run more often than needed
2431 # with performance impacts. The current code run more often than needed
2432 # and do not use caches as much as it could. The current focus is on
2432 # and do not use caches as much as it could. The current focus is on
2433 # the behavior of the feature so we disable it by default. The flag
2433 # the behavior of the feature so we disable it by default. The flag
2434 # will be removed when we are happy with the performance impact.
2434 # will be removed when we are happy with the performance impact.
2435 #
2435 #
2436 # Once this feature is no longer experimental move the following
2436 # Once this feature is no longer experimental move the following
2437 # documentation to the appropriate help section:
2437 # documentation to the appropriate help section:
2438 #
2438 #
2439 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2439 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2440 # tags (new or changed or deleted tags). In addition the details of
2440 # tags (new or changed or deleted tags). In addition the details of
2441 # these changes are made available in a file at:
2441 # these changes are made available in a file at:
2442 # ``REPOROOT/.hg/changes/tags.changes``.
2442 # ``REPOROOT/.hg/changes/tags.changes``.
2443 # Make sure you check for HG_TAG_MOVED before reading that file as it
2443 # Make sure you check for HG_TAG_MOVED before reading that file as it
2444 # might exist from a previous transaction even if no tag were touched
2444 # might exist from a previous transaction even if no tag were touched
2445 # in this one. Changes are recorded in a line base format::
2445 # in this one. Changes are recorded in a line base format::
2446 #
2446 #
2447 # <action> <hex-node> <tag-name>\n
2447 # <action> <hex-node> <tag-name>\n
2448 #
2448 #
2449 # Actions are defined as follow:
2449 # Actions are defined as follow:
2450 # "-R": tag is removed,
2450 # "-R": tag is removed,
2451 # "+A": tag is added,
2451 # "+A": tag is added,
2452 # "-M": tag is moved (old value),
2452 # "-M": tag is moved (old value),
2453 # "+M": tag is moved (new value),
2453 # "+M": tag is moved (new value),
2454 tracktags = lambda x: None
2454 tracktags = lambda x: None
2455 # experimental config: experimental.hook-track-tags
2455 # experimental config: experimental.hook-track-tags
2456 shouldtracktags = self.ui.configbool(
2456 shouldtracktags = self.ui.configbool(
2457 b'experimental', b'hook-track-tags'
2457 b'experimental', b'hook-track-tags'
2458 )
2458 )
2459 if desc != b'strip' and shouldtracktags:
2459 if desc != b'strip' and shouldtracktags:
2460 oldheads = self.changelog.headrevs()
2460 oldheads = self.changelog.headrevs()
2461
2461
2462 def tracktags(tr2):
2462 def tracktags(tr2):
2463 repo = reporef()
2463 repo = reporef()
2464 assert repo is not None # help pytype
2464 assert repo is not None # help pytype
2465 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2465 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2466 newheads = repo.changelog.headrevs()
2466 newheads = repo.changelog.headrevs()
2467 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2467 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2468 # notes: we compare lists here.
2468 # notes: we compare lists here.
2469 # As we do it only once buiding set would not be cheaper
2469 # As we do it only once buiding set would not be cheaper
2470 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2470 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2471 if changes:
2471 if changes:
2472 tr2.hookargs[b'tag_moved'] = b'1'
2472 tr2.hookargs[b'tag_moved'] = b'1'
2473 with repo.vfs(
2473 with repo.vfs(
2474 b'changes/tags.changes', b'w', atomictemp=True
2474 b'changes/tags.changes', b'w', atomictemp=True
2475 ) as changesfile:
2475 ) as changesfile:
2476 # note: we do not register the file to the transaction
2476 # note: we do not register the file to the transaction
2477 # because we needs it to still exist on the transaction
2477 # because we needs it to still exist on the transaction
2478 # is close (for txnclose hooks)
2478 # is close (for txnclose hooks)
2479 tagsmod.writediff(changesfile, changes)
2479 tagsmod.writediff(changesfile, changes)
2480
2480
2481 def validate(tr2):
2481 def validate(tr2):
2482 """will run pre-closing hooks"""
2482 """will run pre-closing hooks"""
2483 # XXX the transaction API is a bit lacking here so we take a hacky
2483 # XXX the transaction API is a bit lacking here so we take a hacky
2484 # path for now
2484 # path for now
2485 #
2485 #
2486 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2486 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2487 # dict is copied before these run. In addition we needs the data
2487 # dict is copied before these run. In addition we needs the data
2488 # available to in memory hooks too.
2488 # available to in memory hooks too.
2489 #
2489 #
2490 # Moreover, we also need to make sure this runs before txnclose
2490 # Moreover, we also need to make sure this runs before txnclose
2491 # hooks and there is no "pending" mechanism that would execute
2491 # hooks and there is no "pending" mechanism that would execute
2492 # logic only if hooks are about to run.
2492 # logic only if hooks are about to run.
2493 #
2493 #
2494 # Fixing this limitation of the transaction is also needed to track
2494 # Fixing this limitation of the transaction is also needed to track
2495 # other families of changes (bookmarks, phases, obsolescence).
2495 # other families of changes (bookmarks, phases, obsolescence).
2496 #
2496 #
2497 # This will have to be fixed before we remove the experimental
2497 # This will have to be fixed before we remove the experimental
2498 # gating.
2498 # gating.
2499 tracktags(tr2)
2499 tracktags(tr2)
2500 repo = reporef()
2500 repo = reporef()
2501 assert repo is not None # help pytype
2501 assert repo is not None # help pytype
2502
2502
2503 singleheadopt = (b'experimental', b'single-head-per-branch')
2503 singleheadopt = (b'experimental', b'single-head-per-branch')
2504 singlehead = repo.ui.configbool(*singleheadopt)
2504 singlehead = repo.ui.configbool(*singleheadopt)
2505 if singlehead:
2505 if singlehead:
2506 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2506 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2507 accountclosed = singleheadsub.get(
2507 accountclosed = singleheadsub.get(
2508 b"account-closed-heads", False
2508 b"account-closed-heads", False
2509 )
2509 )
2510 if singleheadsub.get(b"public-changes-only", False):
2510 if singleheadsub.get(b"public-changes-only", False):
2511 filtername = b"immutable"
2511 filtername = b"immutable"
2512 else:
2512 else:
2513 filtername = b"visible"
2513 filtername = b"visible"
2514 scmutil.enforcesinglehead(
2514 scmutil.enforcesinglehead(
2515 repo, tr2, desc, accountclosed, filtername
2515 repo, tr2, desc, accountclosed, filtername
2516 )
2516 )
2517 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2517 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2518 for name, (old, new) in sorted(
2518 for name, (old, new) in sorted(
2519 tr.changes[b'bookmarks'].items()
2519 tr.changes[b'bookmarks'].items()
2520 ):
2520 ):
2521 args = tr.hookargs.copy()
2521 args = tr.hookargs.copy()
2522 args.update(bookmarks.preparehookargs(name, old, new))
2522 args.update(bookmarks.preparehookargs(name, old, new))
2523 repo.hook(
2523 repo.hook(
2524 b'pretxnclose-bookmark',
2524 b'pretxnclose-bookmark',
2525 throw=True,
2525 throw=True,
2526 **pycompat.strkwargs(args)
2526 **pycompat.strkwargs(args)
2527 )
2527 )
2528 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2528 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2529 cl = repo.unfiltered().changelog
2529 cl = repo.unfiltered().changelog
2530 for revs, (old, new) in tr.changes[b'phases']:
2530 for revs, (old, new) in tr.changes[b'phases']:
2531 for rev in revs:
2531 for rev in revs:
2532 args = tr.hookargs.copy()
2532 args = tr.hookargs.copy()
2533 node = hex(cl.node(rev))
2533 node = hex(cl.node(rev))
2534 args.update(phases.preparehookargs(node, old, new))
2534 args.update(phases.preparehookargs(node, old, new))
2535 repo.hook(
2535 repo.hook(
2536 b'pretxnclose-phase',
2536 b'pretxnclose-phase',
2537 throw=True,
2537 throw=True,
2538 **pycompat.strkwargs(args)
2538 **pycompat.strkwargs(args)
2539 )
2539 )
2540
2540
2541 repo.hook(
2541 repo.hook(
2542 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2542 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2543 )
2543 )
2544
2544
2545 def releasefn(tr, success):
2545 def releasefn(tr, success):
2546 repo = reporef()
2546 repo = reporef()
2547 if repo is None:
2547 if repo is None:
2548 # If the repo has been GC'd (and this release function is being
2548 # If the repo has been GC'd (and this release function is being
2549 # called from transaction.__del__), there's not much we can do,
2549 # called from transaction.__del__), there's not much we can do,
2550 # so just leave the unfinished transaction there and let the
2550 # so just leave the unfinished transaction there and let the
2551 # user run `hg recover`.
2551 # user run `hg recover`.
2552 return
2552 return
2553 if success:
2553 if success:
2554 # this should be explicitly invoked here, because
2554 # this should be explicitly invoked here, because
2555 # in-memory changes aren't written out at closing
2555 # in-memory changes aren't written out at closing
2556 # transaction, if tr.addfilegenerator (via
2556 # transaction, if tr.addfilegenerator (via
2557 # dirstate.write or so) isn't invoked while
2557 # dirstate.write or so) isn't invoked while
2558 # transaction running
2558 # transaction running
2559 repo.dirstate.write(None)
2559 repo.dirstate.write(None)
2560 else:
2560 else:
2561 # discard all changes (including ones already written
2561 # discard all changes (including ones already written
2562 # out) in this transaction
2562 # out) in this transaction
2563 repo.invalidate(clearfilecache=True)
2563 repo.invalidate(clearfilecache=True)
2564
2564
2565 tr = transaction.transaction(
2565 tr = transaction.transaction(
2566 rp,
2566 rp,
2567 self.svfs,
2567 self.svfs,
2568 vfsmap,
2568 vfsmap,
2569 b"journal",
2569 b"journal",
2570 b"undo",
2570 b"undo",
2571 aftertrans(renames),
2571 aftertrans(renames),
2572 self.store.createmode,
2572 self.store.createmode,
2573 validator=validate,
2573 validator=validate,
2574 releasefn=releasefn,
2574 releasefn=releasefn,
2575 checkambigfiles=_cachedfiles,
2575 checkambigfiles=_cachedfiles,
2576 name=desc,
2576 name=desc,
2577 )
2577 )
2578 tr.changes[b'origrepolen'] = len(self)
2578 tr.changes[b'origrepolen'] = len(self)
2579 tr.changes[b'obsmarkers'] = set()
2579 tr.changes[b'obsmarkers'] = set()
2580 tr.changes[b'phases'] = []
2580 tr.changes[b'phases'] = []
2581 tr.changes[b'bookmarks'] = {}
2581 tr.changes[b'bookmarks'] = {}
2582
2582
2583 tr.hookargs[b'txnid'] = txnid
2583 tr.hookargs[b'txnid'] = txnid
2584 tr.hookargs[b'txnname'] = desc
2584 tr.hookargs[b'txnname'] = desc
2585 tr.hookargs[b'changes'] = tr.changes
2585 tr.hookargs[b'changes'] = tr.changes
2586 # note: writing the fncache only during finalize mean that the file is
2586 # note: writing the fncache only during finalize mean that the file is
2587 # outdated when running hooks. As fncache is used for streaming clone,
2587 # outdated when running hooks. As fncache is used for streaming clone,
2588 # this is not expected to break anything that happen during the hooks.
2588 # this is not expected to break anything that happen during the hooks.
2589 tr.addfinalize(b'flush-fncache', self.store.write)
2589 tr.addfinalize(b'flush-fncache', self.store.write)
2590
2590
2591 def txnclosehook(tr2):
2591 def txnclosehook(tr2):
2592 """To be run if transaction is successful, will schedule a hook run"""
2592 """To be run if transaction is successful, will schedule a hook run"""
2593 # Don't reference tr2 in hook() so we don't hold a reference.
2593 # Don't reference tr2 in hook() so we don't hold a reference.
2594 # This reduces memory consumption when there are multiple
2594 # This reduces memory consumption when there are multiple
2595 # transactions per lock. This can likely go away if issue5045
2595 # transactions per lock. This can likely go away if issue5045
2596 # fixes the function accumulation.
2596 # fixes the function accumulation.
2597 hookargs = tr2.hookargs
2597 hookargs = tr2.hookargs
2598
2598
2599 def hookfunc(unused_success):
2599 def hookfunc(unused_success):
2600 repo = reporef()
2600 repo = reporef()
2601 assert repo is not None # help pytype
2601 assert repo is not None # help pytype
2602
2602
2603 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2603 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2604 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2604 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2605 for name, (old, new) in bmchanges:
2605 for name, (old, new) in bmchanges:
2606 args = tr.hookargs.copy()
2606 args = tr.hookargs.copy()
2607 args.update(bookmarks.preparehookargs(name, old, new))
2607 args.update(bookmarks.preparehookargs(name, old, new))
2608 repo.hook(
2608 repo.hook(
2609 b'txnclose-bookmark',
2609 b'txnclose-bookmark',
2610 throw=False,
2610 throw=False,
2611 **pycompat.strkwargs(args)
2611 **pycompat.strkwargs(args)
2612 )
2612 )
2613
2613
2614 if hook.hashook(repo.ui, b'txnclose-phase'):
2614 if hook.hashook(repo.ui, b'txnclose-phase'):
2615 cl = repo.unfiltered().changelog
2615 cl = repo.unfiltered().changelog
2616 phasemv = sorted(
2616 phasemv = sorted(
2617 tr.changes[b'phases'], key=lambda r: r[0][0]
2617 tr.changes[b'phases'], key=lambda r: r[0][0]
2618 )
2618 )
2619 for revs, (old, new) in phasemv:
2619 for revs, (old, new) in phasemv:
2620 for rev in revs:
2620 for rev in revs:
2621 args = tr.hookargs.copy()
2621 args = tr.hookargs.copy()
2622 node = hex(cl.node(rev))
2622 node = hex(cl.node(rev))
2623 args.update(phases.preparehookargs(node, old, new))
2623 args.update(phases.preparehookargs(node, old, new))
2624 repo.hook(
2624 repo.hook(
2625 b'txnclose-phase',
2625 b'txnclose-phase',
2626 throw=False,
2626 throw=False,
2627 **pycompat.strkwargs(args)
2627 **pycompat.strkwargs(args)
2628 )
2628 )
2629
2629
2630 repo.hook(
2630 repo.hook(
2631 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2631 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2632 )
2632 )
2633
2633
2634 repo = reporef()
2634 repo = reporef()
2635 assert repo is not None # help pytype
2635 assert repo is not None # help pytype
2636 repo._afterlock(hookfunc)
2636 repo._afterlock(hookfunc)
2637
2637
2638 tr.addfinalize(b'txnclose-hook', txnclosehook)
2638 tr.addfinalize(b'txnclose-hook', txnclosehook)
2639 # Include a leading "-" to make it happen before the transaction summary
2639 # Include a leading "-" to make it happen before the transaction summary
2640 # reports registered via scmutil.registersummarycallback() whose names
2640 # reports registered via scmutil.registersummarycallback() whose names
2641 # are 00-txnreport etc. That way, the caches will be warm when the
2641 # are 00-txnreport etc. That way, the caches will be warm when the
2642 # callbacks run.
2642 # callbacks run.
2643 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2643 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2644
2644
2645 def txnaborthook(tr2):
2645 def txnaborthook(tr2):
2646 """To be run if transaction is aborted"""
2646 """To be run if transaction is aborted"""
2647 repo = reporef()
2647 repo = reporef()
2648 assert repo is not None # help pytype
2648 assert repo is not None # help pytype
2649 repo.hook(
2649 repo.hook(
2650 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2650 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2651 )
2651 )
2652
2652
2653 tr.addabort(b'txnabort-hook', txnaborthook)
2653 tr.addabort(b'txnabort-hook', txnaborthook)
2654 # avoid eager cache invalidation. in-memory data should be identical
2654 # avoid eager cache invalidation. in-memory data should be identical
2655 # to stored data if transaction has no error.
2655 # to stored data if transaction has no error.
2656 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2656 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2657 self._transref = weakref.ref(tr)
2657 self._transref = weakref.ref(tr)
2658 scmutil.registersummarycallback(self, tr, desc)
2658 scmutil.registersummarycallback(self, tr, desc)
2659 # This only exist to deal with the need of rollback to have viable
2659 # This only exist to deal with the need of rollback to have viable
2660 # parents at the end of the operation. So backup viable parents at the
2660 # parents at the end of the operation. So backup viable parents at the
2661 # time of this operation.
2661 # time of this operation.
2662 #
2662 #
2663 # We only do it when the `wlock` is taken, otherwise other might be
2663 # We only do it when the `wlock` is taken, otherwise other might be
2664 # altering the dirstate under us.
2664 # altering the dirstate under us.
2665 #
2665 #
2666 # This is really not a great way to do this (first, because we cannot
2666 # This is really not a great way to do this (first, because we cannot
2667 # always do it). There are more viable alternative that exists
2667 # always do it). There are more viable alternative that exists
2668 #
2668 #
2669 # - backing only the working copy parent in a dedicated files and doing
2669 # - backing only the working copy parent in a dedicated files and doing
2670 # a clean "keep-update" to them on `hg rollback`.
2670 # a clean "keep-update" to them on `hg rollback`.
2671 #
2671 #
2672 # - slightly changing the behavior an applying a logic similar to "hg
2672 # - slightly changing the behavior an applying a logic similar to "hg
2673 # strip" to pick a working copy destination on `hg rollback`
2673 # strip" to pick a working copy destination on `hg rollback`
2674 if self.currentwlock() is not None:
2674 if self.currentwlock() is not None:
2675 ds = self.dirstate
2675 ds = self.dirstate
2676 if ds.branch() == b'default':
2676 if not self.vfs.exists(b'branch'):
2677 # force a file to be written if None exist
2677 # force a file to be written if None exist
2678 ds.setbranch(b'default', None)
2678 ds.setbranch(b'default', None)
2679 # we cannot simply add "branch" to `all_file_names` because branch
2680 # is written outside of the transaction control. So we need to
2681 # backup early.
2682 tr.addbackup(b"branch", hardlink=True, location=b'plain')
2683
2679
2684 def backup_dirstate(tr):
2680 def backup_dirstate(tr):
2685 for f in ds.all_file_names():
2681 for f in ds.all_file_names():
2686 # hardlink backup is okay because `dirstate` is always
2682 # hardlink backup is okay because `dirstate` is always
2687 # atomically written and possible data file are append only
2683 # atomically written and possible data file are append only
2688 # and resistant to trailing data.
2684 # and resistant to trailing data.
2689 tr.addbackup(f, hardlink=True, location=b'plain')
2685 tr.addbackup(f, hardlink=True, location=b'plain')
2690
2686
2691 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2687 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2692 return tr
2688 return tr
2693
2689
2694 def _journalfiles(self):
2690 def _journalfiles(self):
2695 return (
2691 return (
2696 (self.svfs, b'journal'),
2692 (self.svfs, b'journal'),
2697 (self.vfs, b'journal.desc'),
2693 (self.vfs, b'journal.desc'),
2698 )
2694 )
2699
2695
2700 def undofiles(self):
2696 def undofiles(self):
2701 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2697 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2702
2698
2703 @unfilteredmethod
2699 @unfilteredmethod
2704 def _writejournal(self, desc):
2700 def _writejournal(self, desc):
2705 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2701 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2706
2702
2707 def recover(self):
2703 def recover(self):
2708 with self.lock():
2704 with self.lock():
2709 if self.svfs.exists(b"journal"):
2705 if self.svfs.exists(b"journal"):
2710 self.ui.status(_(b"rolling back interrupted transaction\n"))
2706 self.ui.status(_(b"rolling back interrupted transaction\n"))
2711 vfsmap = {
2707 vfsmap = {
2712 b'': self.svfs,
2708 b'': self.svfs,
2713 b'plain': self.vfs,
2709 b'plain': self.vfs,
2714 }
2710 }
2715 transaction.rollback(
2711 transaction.rollback(
2716 self.svfs,
2712 self.svfs,
2717 vfsmap,
2713 vfsmap,
2718 b"journal",
2714 b"journal",
2719 self.ui.warn,
2715 self.ui.warn,
2720 checkambigfiles=_cachedfiles,
2716 checkambigfiles=_cachedfiles,
2721 )
2717 )
2722 self.invalidate()
2718 self.invalidate()
2723 return True
2719 return True
2724 else:
2720 else:
2725 self.ui.warn(_(b"no interrupted transaction available\n"))
2721 self.ui.warn(_(b"no interrupted transaction available\n"))
2726 return False
2722 return False
2727
2723
2728 def rollback(self, dryrun=False, force=False):
2724 def rollback(self, dryrun=False, force=False):
2729 wlock = lock = None
2725 wlock = lock = None
2730 try:
2726 try:
2731 wlock = self.wlock()
2727 wlock = self.wlock()
2732 lock = self.lock()
2728 lock = self.lock()
2733 if self.svfs.exists(b"undo"):
2729 if self.svfs.exists(b"undo"):
2734 return self._rollback(dryrun, force)
2730 return self._rollback(dryrun, force)
2735 else:
2731 else:
2736 self.ui.warn(_(b"no rollback information available\n"))
2732 self.ui.warn(_(b"no rollback information available\n"))
2737 return 1
2733 return 1
2738 finally:
2734 finally:
2739 release(lock, wlock)
2735 release(lock, wlock)
2740
2736
2741 @unfilteredmethod # Until we get smarter cache management
2737 @unfilteredmethod # Until we get smarter cache management
2742 def _rollback(self, dryrun, force):
2738 def _rollback(self, dryrun, force):
2743 ui = self.ui
2739 ui = self.ui
2744
2740
2745 parents = self.dirstate.parents()
2741 parents = self.dirstate.parents()
2746 try:
2742 try:
2747 args = self.vfs.read(b'undo.desc').splitlines()
2743 args = self.vfs.read(b'undo.desc').splitlines()
2748 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2744 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2749 if len(args) >= 3:
2745 if len(args) >= 3:
2750 detail = args[2]
2746 detail = args[2]
2751 oldtip = oldlen - 1
2747 oldtip = oldlen - 1
2752
2748
2753 if detail and ui.verbose:
2749 if detail and ui.verbose:
2754 msg = _(
2750 msg = _(
2755 b'repository tip rolled back to revision %d'
2751 b'repository tip rolled back to revision %d'
2756 b' (undo %s: %s)\n'
2752 b' (undo %s: %s)\n'
2757 ) % (oldtip, desc, detail)
2753 ) % (oldtip, desc, detail)
2758 else:
2754 else:
2759 msg = _(
2755 msg = _(
2760 b'repository tip rolled back to revision %d (undo %s)\n'
2756 b'repository tip rolled back to revision %d (undo %s)\n'
2761 ) % (oldtip, desc)
2757 ) % (oldtip, desc)
2762 parentgone = any(self[p].rev() > oldtip for p in parents)
2758 parentgone = any(self[p].rev() > oldtip for p in parents)
2763 except IOError:
2759 except IOError:
2764 msg = _(b'rolling back unknown transaction\n')
2760 msg = _(b'rolling back unknown transaction\n')
2765 desc = None
2761 desc = None
2766 parentgone = True
2762 parentgone = True
2767
2763
2768 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2764 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2769 raise error.Abort(
2765 raise error.Abort(
2770 _(
2766 _(
2771 b'rollback of last commit while not checked out '
2767 b'rollback of last commit while not checked out '
2772 b'may lose data'
2768 b'may lose data'
2773 ),
2769 ),
2774 hint=_(b'use -f to force'),
2770 hint=_(b'use -f to force'),
2775 )
2771 )
2776
2772
2777 ui.status(msg)
2773 ui.status(msg)
2778 if dryrun:
2774 if dryrun:
2779 return 0
2775 return 0
2780
2776
2781 self.destroying()
2777 self.destroying()
2782 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2778 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2783 skip_journal_pattern = None
2779 skip_journal_pattern = None
2784 if not parentgone:
2780 if not parentgone:
2785 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2781 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2786 transaction.rollback(
2782 transaction.rollback(
2787 self.svfs,
2783 self.svfs,
2788 vfsmap,
2784 vfsmap,
2789 b'undo',
2785 b'undo',
2790 ui.warn,
2786 ui.warn,
2791 checkambigfiles=_cachedfiles,
2787 checkambigfiles=_cachedfiles,
2792 skip_journal_pattern=skip_journal_pattern,
2788 skip_journal_pattern=skip_journal_pattern,
2793 )
2789 )
2794 self.invalidate()
2790 self.invalidate()
2795 self.dirstate.invalidate()
2791 self.dirstate.invalidate()
2796
2792
2797 if parentgone:
2793 if parentgone:
2798 # replace this with some explicit parent update in the future.
2794 # replace this with some explicit parent update in the future.
2799 has_node = self.changelog.index.has_node
2795 has_node = self.changelog.index.has_node
2800 if not all(has_node(p) for p in self.dirstate._pl):
2796 if not all(has_node(p) for p in self.dirstate._pl):
2801 # There was no dirstate to backup initially, we need to drop
2797 # There was no dirstate to backup initially, we need to drop
2802 # the existing one.
2798 # the existing one.
2803 with self.dirstate.changing_parents(self):
2799 with self.dirstate.changing_parents(self):
2804 self.dirstate.setparents(self.nullid)
2800 self.dirstate.setparents(self.nullid)
2805 self.dirstate.clear()
2801 self.dirstate.clear()
2806
2802
2807 parents = tuple([p.rev() for p in self[None].parents()])
2803 parents = tuple([p.rev() for p in self[None].parents()])
2808 if len(parents) > 1:
2804 if len(parents) > 1:
2809 ui.status(
2805 ui.status(
2810 _(
2806 _(
2811 b'working directory now based on '
2807 b'working directory now based on '
2812 b'revisions %d and %d\n'
2808 b'revisions %d and %d\n'
2813 )
2809 )
2814 % parents
2810 % parents
2815 )
2811 )
2816 else:
2812 else:
2817 ui.status(
2813 ui.status(
2818 _(b'working directory now based on revision %d\n') % parents
2814 _(b'working directory now based on revision %d\n') % parents
2819 )
2815 )
2820 mergestatemod.mergestate.clean(self)
2816 mergestatemod.mergestate.clean(self)
2821
2817
2822 # TODO: if we know which new heads may result from this rollback, pass
2818 # TODO: if we know which new heads may result from this rollback, pass
2823 # them to destroy(), which will prevent the branchhead cache from being
2819 # them to destroy(), which will prevent the branchhead cache from being
2824 # invalidated.
2820 # invalidated.
2825 self.destroyed()
2821 self.destroyed()
2826 return 0
2822 return 0
2827
2823
2828 def _buildcacheupdater(self, newtransaction):
2824 def _buildcacheupdater(self, newtransaction):
2829 """called during transaction to build the callback updating cache
2825 """called during transaction to build the callback updating cache
2830
2826
2831 Lives on the repository to help extension who might want to augment
2827 Lives on the repository to help extension who might want to augment
2832 this logic. For this purpose, the created transaction is passed to the
2828 this logic. For this purpose, the created transaction is passed to the
2833 method.
2829 method.
2834 """
2830 """
2835 # we must avoid cyclic reference between repo and transaction.
2831 # we must avoid cyclic reference between repo and transaction.
2836 reporef = weakref.ref(self)
2832 reporef = weakref.ref(self)
2837
2833
2838 def updater(tr):
2834 def updater(tr):
2839 repo = reporef()
2835 repo = reporef()
2840 assert repo is not None # help pytype
2836 assert repo is not None # help pytype
2841 repo.updatecaches(tr)
2837 repo.updatecaches(tr)
2842
2838
2843 return updater
2839 return updater
2844
2840
2845 @unfilteredmethod
2841 @unfilteredmethod
2846 def updatecaches(self, tr=None, full=False, caches=None):
2842 def updatecaches(self, tr=None, full=False, caches=None):
2847 """warm appropriate caches
2843 """warm appropriate caches
2848
2844
2849 If this function is called after a transaction closed. The transaction
2845 If this function is called after a transaction closed. The transaction
2850 will be available in the 'tr' argument. This can be used to selectively
2846 will be available in the 'tr' argument. This can be used to selectively
2851 update caches relevant to the changes in that transaction.
2847 update caches relevant to the changes in that transaction.
2852
2848
2853 If 'full' is set, make sure all caches the function knows about have
2849 If 'full' is set, make sure all caches the function knows about have
2854 up-to-date data. Even the ones usually loaded more lazily.
2850 up-to-date data. Even the ones usually loaded more lazily.
2855
2851
2856 The `full` argument can take a special "post-clone" value. In this case
2852 The `full` argument can take a special "post-clone" value. In this case
2857 the cache warming is made after a clone and of the slower cache might
2853 the cache warming is made after a clone and of the slower cache might
2858 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2854 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2859 as we plan for a cleaner way to deal with this for 5.9.
2855 as we plan for a cleaner way to deal with this for 5.9.
2860 """
2856 """
2861 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2857 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2862 # During strip, many caches are invalid but
2858 # During strip, many caches are invalid but
2863 # later call to `destroyed` will refresh them.
2859 # later call to `destroyed` will refresh them.
2864 return
2860 return
2865
2861
2866 unfi = self.unfiltered()
2862 unfi = self.unfiltered()
2867
2863
2868 if full:
2864 if full:
2869 msg = (
2865 msg = (
2870 "`full` argument for `repo.updatecaches` is deprecated\n"
2866 "`full` argument for `repo.updatecaches` is deprecated\n"
2871 "(use `caches=repository.CACHE_ALL` instead)"
2867 "(use `caches=repository.CACHE_ALL` instead)"
2872 )
2868 )
2873 self.ui.deprecwarn(msg, b"5.9")
2869 self.ui.deprecwarn(msg, b"5.9")
2874 caches = repository.CACHES_ALL
2870 caches = repository.CACHES_ALL
2875 if full == b"post-clone":
2871 if full == b"post-clone":
2876 caches = repository.CACHES_POST_CLONE
2872 caches = repository.CACHES_POST_CLONE
2877 caches = repository.CACHES_ALL
2873 caches = repository.CACHES_ALL
2878 elif caches is None:
2874 elif caches is None:
2879 caches = repository.CACHES_DEFAULT
2875 caches = repository.CACHES_DEFAULT
2880
2876
2881 if repository.CACHE_BRANCHMAP_SERVED in caches:
2877 if repository.CACHE_BRANCHMAP_SERVED in caches:
2882 if tr is None or tr.changes[b'origrepolen'] < len(self):
2878 if tr is None or tr.changes[b'origrepolen'] < len(self):
2883 # accessing the 'served' branchmap should refresh all the others,
2879 # accessing the 'served' branchmap should refresh all the others,
2884 self.ui.debug(b'updating the branch cache\n')
2880 self.ui.debug(b'updating the branch cache\n')
2885 self.filtered(b'served').branchmap()
2881 self.filtered(b'served').branchmap()
2886 self.filtered(b'served.hidden').branchmap()
2882 self.filtered(b'served.hidden').branchmap()
2887 # flush all possibly delayed write.
2883 # flush all possibly delayed write.
2888 self._branchcaches.write_delayed(self)
2884 self._branchcaches.write_delayed(self)
2889
2885
2890 if repository.CACHE_CHANGELOG_CACHE in caches:
2886 if repository.CACHE_CHANGELOG_CACHE in caches:
2891 self.changelog.update_caches(transaction=tr)
2887 self.changelog.update_caches(transaction=tr)
2892
2888
2893 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2889 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2894 self.manifestlog.update_caches(transaction=tr)
2890 self.manifestlog.update_caches(transaction=tr)
2895
2891
2896 if repository.CACHE_REV_BRANCH in caches:
2892 if repository.CACHE_REV_BRANCH in caches:
2897 rbc = unfi.revbranchcache()
2893 rbc = unfi.revbranchcache()
2898 for r in unfi.changelog:
2894 for r in unfi.changelog:
2899 rbc.branchinfo(r)
2895 rbc.branchinfo(r)
2900 rbc.write()
2896 rbc.write()
2901
2897
2902 if repository.CACHE_FULL_MANIFEST in caches:
2898 if repository.CACHE_FULL_MANIFEST in caches:
2903 # ensure the working copy parents are in the manifestfulltextcache
2899 # ensure the working copy parents are in the manifestfulltextcache
2904 for ctx in self[b'.'].parents():
2900 for ctx in self[b'.'].parents():
2905 ctx.manifest() # accessing the manifest is enough
2901 ctx.manifest() # accessing the manifest is enough
2906
2902
2907 if repository.CACHE_FILE_NODE_TAGS in caches:
2903 if repository.CACHE_FILE_NODE_TAGS in caches:
2908 # accessing fnode cache warms the cache
2904 # accessing fnode cache warms the cache
2909 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2905 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2910
2906
2911 if repository.CACHE_TAGS_DEFAULT in caches:
2907 if repository.CACHE_TAGS_DEFAULT in caches:
2912 # accessing tags warm the cache
2908 # accessing tags warm the cache
2913 self.tags()
2909 self.tags()
2914 if repository.CACHE_TAGS_SERVED in caches:
2910 if repository.CACHE_TAGS_SERVED in caches:
2915 self.filtered(b'served').tags()
2911 self.filtered(b'served').tags()
2916
2912
2917 if repository.CACHE_BRANCHMAP_ALL in caches:
2913 if repository.CACHE_BRANCHMAP_ALL in caches:
2918 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2914 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2919 # so we're forcing a write to cause these caches to be warmed up
2915 # so we're forcing a write to cause these caches to be warmed up
2920 # even if they haven't explicitly been requested yet (if they've
2916 # even if they haven't explicitly been requested yet (if they've
2921 # never been used by hg, they won't ever have been written, even if
2917 # never been used by hg, they won't ever have been written, even if
2922 # they're a subset of another kind of cache that *has* been used).
2918 # they're a subset of another kind of cache that *has* been used).
2923 for filt in repoview.filtertable.keys():
2919 for filt in repoview.filtertable.keys():
2924 filtered = self.filtered(filt)
2920 filtered = self.filtered(filt)
2925 filtered.branchmap().write(filtered)
2921 filtered.branchmap().write(filtered)
2926
2922
2927 def invalidatecaches(self):
2923 def invalidatecaches(self):
2928 if '_tagscache' in vars(self):
2924 if '_tagscache' in vars(self):
2929 # can't use delattr on proxy
2925 # can't use delattr on proxy
2930 del self.__dict__['_tagscache']
2926 del self.__dict__['_tagscache']
2931
2927
2932 self._branchcaches.clear()
2928 self._branchcaches.clear()
2933 self.invalidatevolatilesets()
2929 self.invalidatevolatilesets()
2934 self._sparsesignaturecache.clear()
2930 self._sparsesignaturecache.clear()
2935
2931
2936 def invalidatevolatilesets(self):
2932 def invalidatevolatilesets(self):
2937 self.filteredrevcache.clear()
2933 self.filteredrevcache.clear()
2938 obsolete.clearobscaches(self)
2934 obsolete.clearobscaches(self)
2939 self._quick_access_changeid_invalidate()
2935 self._quick_access_changeid_invalidate()
2940
2936
2941 def invalidatedirstate(self):
2937 def invalidatedirstate(self):
2942 """Invalidates the dirstate, causing the next call to dirstate
2938 """Invalidates the dirstate, causing the next call to dirstate
2943 to check if it was modified since the last time it was read,
2939 to check if it was modified since the last time it was read,
2944 rereading it if it has.
2940 rereading it if it has.
2945
2941
2946 This is different to dirstate.invalidate() that it doesn't always
2942 This is different to dirstate.invalidate() that it doesn't always
2947 rereads the dirstate. Use dirstate.invalidate() if you want to
2943 rereads the dirstate. Use dirstate.invalidate() if you want to
2948 explicitly read the dirstate again (i.e. restoring it to a previous
2944 explicitly read the dirstate again (i.e. restoring it to a previous
2949 known good state)."""
2945 known good state)."""
2950 unfi = self.unfiltered()
2946 unfi = self.unfiltered()
2951 if 'dirstate' in unfi.__dict__:
2947 if 'dirstate' in unfi.__dict__:
2952 del unfi.__dict__['dirstate']
2948 del unfi.__dict__['dirstate']
2953
2949
2954 def invalidate(self, clearfilecache=False):
2950 def invalidate(self, clearfilecache=False):
2955 """Invalidates both store and non-store parts other than dirstate
2951 """Invalidates both store and non-store parts other than dirstate
2956
2952
2957 If a transaction is running, invalidation of store is omitted,
2953 If a transaction is running, invalidation of store is omitted,
2958 because discarding in-memory changes might cause inconsistency
2954 because discarding in-memory changes might cause inconsistency
2959 (e.g. incomplete fncache causes unintentional failure, but
2955 (e.g. incomplete fncache causes unintentional failure, but
2960 redundant one doesn't).
2956 redundant one doesn't).
2961 """
2957 """
2962 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2958 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2963 for k in list(self._filecache.keys()):
2959 for k in list(self._filecache.keys()):
2964 if (
2960 if (
2965 k == b'changelog'
2961 k == b'changelog'
2966 and self.currenttransaction()
2962 and self.currenttransaction()
2967 and self.changelog._delayed
2963 and self.changelog._delayed
2968 ):
2964 ):
2969 # The changelog object may store unwritten revisions. We don't
2965 # The changelog object may store unwritten revisions. We don't
2970 # want to lose them.
2966 # want to lose them.
2971 # TODO: Solve the problem instead of working around it.
2967 # TODO: Solve the problem instead of working around it.
2972 continue
2968 continue
2973
2969
2974 if clearfilecache:
2970 if clearfilecache:
2975 del self._filecache[k]
2971 del self._filecache[k]
2976 try:
2972 try:
2977 delattr(unfiltered, k)
2973 delattr(unfiltered, k)
2978 except AttributeError:
2974 except AttributeError:
2979 pass
2975 pass
2980 self.invalidatecaches()
2976 self.invalidatecaches()
2981 if not self.currenttransaction():
2977 if not self.currenttransaction():
2982 # TODO: Changing contents of store outside transaction
2978 # TODO: Changing contents of store outside transaction
2983 # causes inconsistency. We should make in-memory store
2979 # causes inconsistency. We should make in-memory store
2984 # changes detectable, and abort if changed.
2980 # changes detectable, and abort if changed.
2985 self.store.invalidatecaches()
2981 self.store.invalidatecaches()
2986
2982
2987 def invalidateall(self):
2983 def invalidateall(self):
2988 """Fully invalidates both store and non-store parts, causing the
2984 """Fully invalidates both store and non-store parts, causing the
2989 subsequent operation to reread any outside changes."""
2985 subsequent operation to reread any outside changes."""
2990 # extension should hook this to invalidate its caches
2986 # extension should hook this to invalidate its caches
2991 self.invalidate()
2987 self.invalidate()
2992 self.invalidatedirstate()
2988 self.invalidatedirstate()
2993
2989
2994 @unfilteredmethod
2990 @unfilteredmethod
2995 def _refreshfilecachestats(self, tr):
2991 def _refreshfilecachestats(self, tr):
2996 """Reload stats of cached files so that they are flagged as valid"""
2992 """Reload stats of cached files so that they are flagged as valid"""
2997 for k, ce in self._filecache.items():
2993 for k, ce in self._filecache.items():
2998 k = pycompat.sysstr(k)
2994 k = pycompat.sysstr(k)
2999 if k == 'dirstate' or k not in self.__dict__:
2995 if k == 'dirstate' or k not in self.__dict__:
3000 continue
2996 continue
3001 ce.refresh()
2997 ce.refresh()
3002
2998
3003 def _lock(
2999 def _lock(
3004 self,
3000 self,
3005 vfs,
3001 vfs,
3006 lockname,
3002 lockname,
3007 wait,
3003 wait,
3008 releasefn,
3004 releasefn,
3009 acquirefn,
3005 acquirefn,
3010 desc,
3006 desc,
3011 ):
3007 ):
3012 timeout = 0
3008 timeout = 0
3013 warntimeout = 0
3009 warntimeout = 0
3014 if wait:
3010 if wait:
3015 timeout = self.ui.configint(b"ui", b"timeout")
3011 timeout = self.ui.configint(b"ui", b"timeout")
3016 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3012 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3017 # internal config: ui.signal-safe-lock
3013 # internal config: ui.signal-safe-lock
3018 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3014 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3019
3015
3020 l = lockmod.trylock(
3016 l = lockmod.trylock(
3021 self.ui,
3017 self.ui,
3022 vfs,
3018 vfs,
3023 lockname,
3019 lockname,
3024 timeout,
3020 timeout,
3025 warntimeout,
3021 warntimeout,
3026 releasefn=releasefn,
3022 releasefn=releasefn,
3027 acquirefn=acquirefn,
3023 acquirefn=acquirefn,
3028 desc=desc,
3024 desc=desc,
3029 signalsafe=signalsafe,
3025 signalsafe=signalsafe,
3030 )
3026 )
3031 return l
3027 return l
3032
3028
3033 def _afterlock(self, callback):
3029 def _afterlock(self, callback):
3034 """add a callback to be run when the repository is fully unlocked
3030 """add a callback to be run when the repository is fully unlocked
3035
3031
3036 The callback will be executed when the outermost lock is released
3032 The callback will be executed when the outermost lock is released
3037 (with wlock being higher level than 'lock')."""
3033 (with wlock being higher level than 'lock')."""
3038 for ref in (self._wlockref, self._lockref):
3034 for ref in (self._wlockref, self._lockref):
3039 l = ref and ref()
3035 l = ref and ref()
3040 if l and l.held:
3036 if l and l.held:
3041 l.postrelease.append(callback)
3037 l.postrelease.append(callback)
3042 break
3038 break
3043 else: # no lock have been found.
3039 else: # no lock have been found.
3044 callback(True)
3040 callback(True)
3045
3041
3046 def lock(self, wait=True):
3042 def lock(self, wait=True):
3047 """Lock the repository store (.hg/store) and return a weak reference
3043 """Lock the repository store (.hg/store) and return a weak reference
3048 to the lock. Use this before modifying the store (e.g. committing or
3044 to the lock. Use this before modifying the store (e.g. committing or
3049 stripping). If you are opening a transaction, get a lock as well.)
3045 stripping). If you are opening a transaction, get a lock as well.)
3050
3046
3051 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3047 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3052 'wlock' first to avoid a dead-lock hazard."""
3048 'wlock' first to avoid a dead-lock hazard."""
3053 l = self._currentlock(self._lockref)
3049 l = self._currentlock(self._lockref)
3054 if l is not None:
3050 if l is not None:
3055 l.lock()
3051 l.lock()
3056 return l
3052 return l
3057
3053
3058 l = self._lock(
3054 l = self._lock(
3059 vfs=self.svfs,
3055 vfs=self.svfs,
3060 lockname=b"lock",
3056 lockname=b"lock",
3061 wait=wait,
3057 wait=wait,
3062 releasefn=None,
3058 releasefn=None,
3063 acquirefn=self.invalidate,
3059 acquirefn=self.invalidate,
3064 desc=_(b'repository %s') % self.origroot,
3060 desc=_(b'repository %s') % self.origroot,
3065 )
3061 )
3066 self._lockref = weakref.ref(l)
3062 self._lockref = weakref.ref(l)
3067 return l
3063 return l
3068
3064
3069 def wlock(self, wait=True):
3065 def wlock(self, wait=True):
3070 """Lock the non-store parts of the repository (everything under
3066 """Lock the non-store parts of the repository (everything under
3071 .hg except .hg/store) and return a weak reference to the lock.
3067 .hg except .hg/store) and return a weak reference to the lock.
3072
3068
3073 Use this before modifying files in .hg.
3069 Use this before modifying files in .hg.
3074
3070
3075 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3071 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3076 'wlock' first to avoid a dead-lock hazard."""
3072 'wlock' first to avoid a dead-lock hazard."""
3077 l = self._wlockref() if self._wlockref else None
3073 l = self._wlockref() if self._wlockref else None
3078 if l is not None and l.held:
3074 if l is not None and l.held:
3079 l.lock()
3075 l.lock()
3080 return l
3076 return l
3081
3077
3082 # We do not need to check for non-waiting lock acquisition. Such
3078 # We do not need to check for non-waiting lock acquisition. Such
3083 # acquisition would not cause dead-lock as they would just fail.
3079 # acquisition would not cause dead-lock as they would just fail.
3084 if wait and (
3080 if wait and (
3085 self.ui.configbool(b'devel', b'all-warnings')
3081 self.ui.configbool(b'devel', b'all-warnings')
3086 or self.ui.configbool(b'devel', b'check-locks')
3082 or self.ui.configbool(b'devel', b'check-locks')
3087 ):
3083 ):
3088 if self._currentlock(self._lockref) is not None:
3084 if self._currentlock(self._lockref) is not None:
3089 self.ui.develwarn(b'"wlock" acquired after "lock"')
3085 self.ui.develwarn(b'"wlock" acquired after "lock"')
3090
3086
3091 def unlock():
3087 def unlock():
3092 if self.dirstate.is_changing_any:
3088 if self.dirstate.is_changing_any:
3093 msg = b"wlock release in the middle of a changing parents"
3089 msg = b"wlock release in the middle of a changing parents"
3094 self.ui.develwarn(msg)
3090 self.ui.develwarn(msg)
3095 self.dirstate.invalidate()
3091 self.dirstate.invalidate()
3096 else:
3092 else:
3097 if self.dirstate._dirty:
3093 if self.dirstate._dirty:
3098 msg = b"dirty dirstate on wlock release"
3094 msg = b"dirty dirstate on wlock release"
3099 self.ui.develwarn(msg)
3095 self.ui.develwarn(msg)
3100 self.dirstate.write(None)
3096 self.dirstate.write(None)
3101
3097
3102 unfi = self.unfiltered()
3098 unfi = self.unfiltered()
3103 if 'dirstate' in unfi.__dict__:
3099 if 'dirstate' in unfi.__dict__:
3104 del unfi.__dict__['dirstate']
3100 del unfi.__dict__['dirstate']
3105
3101
3106 l = self._lock(
3102 l = self._lock(
3107 self.vfs,
3103 self.vfs,
3108 b"wlock",
3104 b"wlock",
3109 wait,
3105 wait,
3110 unlock,
3106 unlock,
3111 self.invalidatedirstate,
3107 self.invalidatedirstate,
3112 _(b'working directory of %s') % self.origroot,
3108 _(b'working directory of %s') % self.origroot,
3113 )
3109 )
3114 self._wlockref = weakref.ref(l)
3110 self._wlockref = weakref.ref(l)
3115 return l
3111 return l
3116
3112
3117 def _currentlock(self, lockref):
3113 def _currentlock(self, lockref):
3118 """Returns the lock if it's held, or None if it's not."""
3114 """Returns the lock if it's held, or None if it's not."""
3119 if lockref is None:
3115 if lockref is None:
3120 return None
3116 return None
3121 l = lockref()
3117 l = lockref()
3122 if l is None or not l.held:
3118 if l is None or not l.held:
3123 return None
3119 return None
3124 return l
3120 return l
3125
3121
3126 def currentwlock(self):
3122 def currentwlock(self):
3127 """Returns the wlock if it's held, or None if it's not."""
3123 """Returns the wlock if it's held, or None if it's not."""
3128 return self._currentlock(self._wlockref)
3124 return self._currentlock(self._wlockref)
3129
3125
3130 def checkcommitpatterns(self, wctx, match, status, fail):
3126 def checkcommitpatterns(self, wctx, match, status, fail):
3131 """check for commit arguments that aren't committable"""
3127 """check for commit arguments that aren't committable"""
3132 if match.isexact() or match.prefix():
3128 if match.isexact() or match.prefix():
3133 matched = set(status.modified + status.added + status.removed)
3129 matched = set(status.modified + status.added + status.removed)
3134
3130
3135 for f in match.files():
3131 for f in match.files():
3136 f = self.dirstate.normalize(f)
3132 f = self.dirstate.normalize(f)
3137 if f == b'.' or f in matched or f in wctx.substate:
3133 if f == b'.' or f in matched or f in wctx.substate:
3138 continue
3134 continue
3139 if f in status.deleted:
3135 if f in status.deleted:
3140 fail(f, _(b'file not found!'))
3136 fail(f, _(b'file not found!'))
3141 # Is it a directory that exists or used to exist?
3137 # Is it a directory that exists or used to exist?
3142 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3138 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3143 d = f + b'/'
3139 d = f + b'/'
3144 for mf in matched:
3140 for mf in matched:
3145 if mf.startswith(d):
3141 if mf.startswith(d):
3146 break
3142 break
3147 else:
3143 else:
3148 fail(f, _(b"no match under directory!"))
3144 fail(f, _(b"no match under directory!"))
3149 elif f not in self.dirstate:
3145 elif f not in self.dirstate:
3150 fail(f, _(b"file not tracked!"))
3146 fail(f, _(b"file not tracked!"))
3151
3147
3152 @unfilteredmethod
3148 @unfilteredmethod
3153 def commit(
3149 def commit(
3154 self,
3150 self,
3155 text=b"",
3151 text=b"",
3156 user=None,
3152 user=None,
3157 date=None,
3153 date=None,
3158 match=None,
3154 match=None,
3159 force=False,
3155 force=False,
3160 editor=None,
3156 editor=None,
3161 extra=None,
3157 extra=None,
3162 ):
3158 ):
3163 """Add a new revision to current repository.
3159 """Add a new revision to current repository.
3164
3160
3165 Revision information is gathered from the working directory,
3161 Revision information is gathered from the working directory,
3166 match can be used to filter the committed files. If editor is
3162 match can be used to filter the committed files. If editor is
3167 supplied, it is called to get a commit message.
3163 supplied, it is called to get a commit message.
3168 """
3164 """
3169 if extra is None:
3165 if extra is None:
3170 extra = {}
3166 extra = {}
3171
3167
3172 def fail(f, msg):
3168 def fail(f, msg):
3173 raise error.InputError(b'%s: %s' % (f, msg))
3169 raise error.InputError(b'%s: %s' % (f, msg))
3174
3170
3175 if not match:
3171 if not match:
3176 match = matchmod.always()
3172 match = matchmod.always()
3177
3173
3178 if not force:
3174 if not force:
3179 match.bad = fail
3175 match.bad = fail
3180
3176
3181 # lock() for recent changelog (see issue4368)
3177 # lock() for recent changelog (see issue4368)
3182 with self.wlock(), self.lock():
3178 with self.wlock(), self.lock():
3183 wctx = self[None]
3179 wctx = self[None]
3184 merge = len(wctx.parents()) > 1
3180 merge = len(wctx.parents()) > 1
3185
3181
3186 if not force and merge and not match.always():
3182 if not force and merge and not match.always():
3187 raise error.Abort(
3183 raise error.Abort(
3188 _(
3184 _(
3189 b'cannot partially commit a merge '
3185 b'cannot partially commit a merge '
3190 b'(do not specify files or patterns)'
3186 b'(do not specify files or patterns)'
3191 )
3187 )
3192 )
3188 )
3193
3189
3194 status = self.status(match=match, clean=force)
3190 status = self.status(match=match, clean=force)
3195 if force:
3191 if force:
3196 status.modified.extend(
3192 status.modified.extend(
3197 status.clean
3193 status.clean
3198 ) # mq may commit clean files
3194 ) # mq may commit clean files
3199
3195
3200 # check subrepos
3196 # check subrepos
3201 subs, commitsubs, newstate = subrepoutil.precommit(
3197 subs, commitsubs, newstate = subrepoutil.precommit(
3202 self.ui, wctx, status, match, force=force
3198 self.ui, wctx, status, match, force=force
3203 )
3199 )
3204
3200
3205 # make sure all explicit patterns are matched
3201 # make sure all explicit patterns are matched
3206 if not force:
3202 if not force:
3207 self.checkcommitpatterns(wctx, match, status, fail)
3203 self.checkcommitpatterns(wctx, match, status, fail)
3208
3204
3209 cctx = context.workingcommitctx(
3205 cctx = context.workingcommitctx(
3210 self, status, text, user, date, extra
3206 self, status, text, user, date, extra
3211 )
3207 )
3212
3208
3213 ms = mergestatemod.mergestate.read(self)
3209 ms = mergestatemod.mergestate.read(self)
3214 mergeutil.checkunresolved(ms)
3210 mergeutil.checkunresolved(ms)
3215
3211
3216 # internal config: ui.allowemptycommit
3212 # internal config: ui.allowemptycommit
3217 if cctx.isempty() and not self.ui.configbool(
3213 if cctx.isempty() and not self.ui.configbool(
3218 b'ui', b'allowemptycommit'
3214 b'ui', b'allowemptycommit'
3219 ):
3215 ):
3220 self.ui.debug(b'nothing to commit, clearing merge state\n')
3216 self.ui.debug(b'nothing to commit, clearing merge state\n')
3221 ms.reset()
3217 ms.reset()
3222 return None
3218 return None
3223
3219
3224 if merge and cctx.deleted():
3220 if merge and cctx.deleted():
3225 raise error.Abort(_(b"cannot commit merge with missing files"))
3221 raise error.Abort(_(b"cannot commit merge with missing files"))
3226
3222
3227 if editor:
3223 if editor:
3228 cctx._text = editor(self, cctx, subs)
3224 cctx._text = editor(self, cctx, subs)
3229 edited = text != cctx._text
3225 edited = text != cctx._text
3230
3226
3231 # Save commit message in case this transaction gets rolled back
3227 # Save commit message in case this transaction gets rolled back
3232 # (e.g. by a pretxncommit hook). Leave the content alone on
3228 # (e.g. by a pretxncommit hook). Leave the content alone on
3233 # the assumption that the user will use the same editor again.
3229 # the assumption that the user will use the same editor again.
3234 msg_path = self.savecommitmessage(cctx._text)
3230 msg_path = self.savecommitmessage(cctx._text)
3235
3231
3236 # commit subs and write new state
3232 # commit subs and write new state
3237 if subs:
3233 if subs:
3238 uipathfn = scmutil.getuipathfn(self)
3234 uipathfn = scmutil.getuipathfn(self)
3239 for s in sorted(commitsubs):
3235 for s in sorted(commitsubs):
3240 sub = wctx.sub(s)
3236 sub = wctx.sub(s)
3241 self.ui.status(
3237 self.ui.status(
3242 _(b'committing subrepository %s\n')
3238 _(b'committing subrepository %s\n')
3243 % uipathfn(subrepoutil.subrelpath(sub))
3239 % uipathfn(subrepoutil.subrelpath(sub))
3244 )
3240 )
3245 sr = sub.commit(cctx._text, user, date)
3241 sr = sub.commit(cctx._text, user, date)
3246 newstate[s] = (newstate[s][0], sr)
3242 newstate[s] = (newstate[s][0], sr)
3247 subrepoutil.writestate(self, newstate)
3243 subrepoutil.writestate(self, newstate)
3248
3244
3249 p1, p2 = self.dirstate.parents()
3245 p1, p2 = self.dirstate.parents()
3250 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3246 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3251 try:
3247 try:
3252 self.hook(
3248 self.hook(
3253 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3249 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3254 )
3250 )
3255 with self.transaction(b'commit'):
3251 with self.transaction(b'commit'):
3256 ret = self.commitctx(cctx, True)
3252 ret = self.commitctx(cctx, True)
3257 # update bookmarks, dirstate and mergestate
3253 # update bookmarks, dirstate and mergestate
3258 bookmarks.update(self, [p1, p2], ret)
3254 bookmarks.update(self, [p1, p2], ret)
3259 cctx.markcommitted(ret)
3255 cctx.markcommitted(ret)
3260 ms.reset()
3256 ms.reset()
3261 except: # re-raises
3257 except: # re-raises
3262 if edited:
3258 if edited:
3263 self.ui.write(
3259 self.ui.write(
3264 _(b'note: commit message saved in %s\n') % msg_path
3260 _(b'note: commit message saved in %s\n') % msg_path
3265 )
3261 )
3266 self.ui.write(
3262 self.ui.write(
3267 _(
3263 _(
3268 b"note: use 'hg commit --logfile "
3264 b"note: use 'hg commit --logfile "
3269 b"%s --edit' to reuse it\n"
3265 b"%s --edit' to reuse it\n"
3270 )
3266 )
3271 % msg_path
3267 % msg_path
3272 )
3268 )
3273 raise
3269 raise
3274
3270
3275 def commithook(unused_success):
3271 def commithook(unused_success):
3276 # hack for command that use a temporary commit (eg: histedit)
3272 # hack for command that use a temporary commit (eg: histedit)
3277 # temporary commit got stripped before hook release
3273 # temporary commit got stripped before hook release
3278 if self.changelog.hasnode(ret):
3274 if self.changelog.hasnode(ret):
3279 self.hook(
3275 self.hook(
3280 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3276 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3281 )
3277 )
3282
3278
3283 self._afterlock(commithook)
3279 self._afterlock(commithook)
3284 return ret
3280 return ret
3285
3281
3286 @unfilteredmethod
3282 @unfilteredmethod
3287 def commitctx(self, ctx, error=False, origctx=None):
3283 def commitctx(self, ctx, error=False, origctx=None):
3288 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3284 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3289
3285
3290 @unfilteredmethod
3286 @unfilteredmethod
3291 def destroying(self):
3287 def destroying(self):
3292 """Inform the repository that nodes are about to be destroyed.
3288 """Inform the repository that nodes are about to be destroyed.
3293 Intended for use by strip and rollback, so there's a common
3289 Intended for use by strip and rollback, so there's a common
3294 place for anything that has to be done before destroying history.
3290 place for anything that has to be done before destroying history.
3295
3291
3296 This is mostly useful for saving state that is in memory and waiting
3292 This is mostly useful for saving state that is in memory and waiting
3297 to be flushed when the current lock is released. Because a call to
3293 to be flushed when the current lock is released. Because a call to
3298 destroyed is imminent, the repo will be invalidated causing those
3294 destroyed is imminent, the repo will be invalidated causing those
3299 changes to stay in memory (waiting for the next unlock), or vanish
3295 changes to stay in memory (waiting for the next unlock), or vanish
3300 completely.
3296 completely.
3301 """
3297 """
3302 # When using the same lock to commit and strip, the phasecache is left
3298 # When using the same lock to commit and strip, the phasecache is left
3303 # dirty after committing. Then when we strip, the repo is invalidated,
3299 # dirty after committing. Then when we strip, the repo is invalidated,
3304 # causing those changes to disappear.
3300 # causing those changes to disappear.
3305 if '_phasecache' in vars(self):
3301 if '_phasecache' in vars(self):
3306 self._phasecache.write()
3302 self._phasecache.write()
3307
3303
3308 @unfilteredmethod
3304 @unfilteredmethod
3309 def destroyed(self):
3305 def destroyed(self):
3310 """Inform the repository that nodes have been destroyed.
3306 """Inform the repository that nodes have been destroyed.
3311 Intended for use by strip and rollback, so there's a common
3307 Intended for use by strip and rollback, so there's a common
3312 place for anything that has to be done after destroying history.
3308 place for anything that has to be done after destroying history.
3313 """
3309 """
3314 # When one tries to:
3310 # When one tries to:
3315 # 1) destroy nodes thus calling this method (e.g. strip)
3311 # 1) destroy nodes thus calling this method (e.g. strip)
3316 # 2) use phasecache somewhere (e.g. commit)
3312 # 2) use phasecache somewhere (e.g. commit)
3317 #
3313 #
3318 # then 2) will fail because the phasecache contains nodes that were
3314 # then 2) will fail because the phasecache contains nodes that were
3319 # removed. We can either remove phasecache from the filecache,
3315 # removed. We can either remove phasecache from the filecache,
3320 # causing it to reload next time it is accessed, or simply filter
3316 # causing it to reload next time it is accessed, or simply filter
3321 # the removed nodes now and write the updated cache.
3317 # the removed nodes now and write the updated cache.
3322 self._phasecache.filterunknown(self)
3318 self._phasecache.filterunknown(self)
3323 self._phasecache.write()
3319 self._phasecache.write()
3324
3320
3325 # refresh all repository caches
3321 # refresh all repository caches
3326 self.updatecaches()
3322 self.updatecaches()
3327
3323
3328 # Ensure the persistent tag cache is updated. Doing it now
3324 # Ensure the persistent tag cache is updated. Doing it now
3329 # means that the tag cache only has to worry about destroyed
3325 # means that the tag cache only has to worry about destroyed
3330 # heads immediately after a strip/rollback. That in turn
3326 # heads immediately after a strip/rollback. That in turn
3331 # guarantees that "cachetip == currenttip" (comparing both rev
3327 # guarantees that "cachetip == currenttip" (comparing both rev
3332 # and node) always means no nodes have been added or destroyed.
3328 # and node) always means no nodes have been added or destroyed.
3333
3329
3334 # XXX this is suboptimal when qrefresh'ing: we strip the current
3330 # XXX this is suboptimal when qrefresh'ing: we strip the current
3335 # head, refresh the tag cache, then immediately add a new head.
3331 # head, refresh the tag cache, then immediately add a new head.
3336 # But I think doing it this way is necessary for the "instant
3332 # But I think doing it this way is necessary for the "instant
3337 # tag cache retrieval" case to work.
3333 # tag cache retrieval" case to work.
3338 self.invalidate()
3334 self.invalidate()
3339
3335
3340 def status(
3336 def status(
3341 self,
3337 self,
3342 node1=b'.',
3338 node1=b'.',
3343 node2=None,
3339 node2=None,
3344 match=None,
3340 match=None,
3345 ignored=False,
3341 ignored=False,
3346 clean=False,
3342 clean=False,
3347 unknown=False,
3343 unknown=False,
3348 listsubrepos=False,
3344 listsubrepos=False,
3349 ):
3345 ):
3350 '''a convenience method that calls node1.status(node2)'''
3346 '''a convenience method that calls node1.status(node2)'''
3351 return self[node1].status(
3347 return self[node1].status(
3352 node2, match, ignored, clean, unknown, listsubrepos
3348 node2, match, ignored, clean, unknown, listsubrepos
3353 )
3349 )
3354
3350
3355 def addpostdsstatus(self, ps):
3351 def addpostdsstatus(self, ps):
3356 """Add a callback to run within the wlock, at the point at which status
3352 """Add a callback to run within the wlock, at the point at which status
3357 fixups happen.
3353 fixups happen.
3358
3354
3359 On status completion, callback(wctx, status) will be called with the
3355 On status completion, callback(wctx, status) will be called with the
3360 wlock held, unless the dirstate has changed from underneath or the wlock
3356 wlock held, unless the dirstate has changed from underneath or the wlock
3361 couldn't be grabbed.
3357 couldn't be grabbed.
3362
3358
3363 Callbacks should not capture and use a cached copy of the dirstate --
3359 Callbacks should not capture and use a cached copy of the dirstate --
3364 it might change in the meanwhile. Instead, they should access the
3360 it might change in the meanwhile. Instead, they should access the
3365 dirstate via wctx.repo().dirstate.
3361 dirstate via wctx.repo().dirstate.
3366
3362
3367 This list is emptied out after each status run -- extensions should
3363 This list is emptied out after each status run -- extensions should
3368 make sure it adds to this list each time dirstate.status is called.
3364 make sure it adds to this list each time dirstate.status is called.
3369 Extensions should also make sure they don't call this for statuses
3365 Extensions should also make sure they don't call this for statuses
3370 that don't involve the dirstate.
3366 that don't involve the dirstate.
3371 """
3367 """
3372
3368
3373 # The list is located here for uniqueness reasons -- it is actually
3369 # The list is located here for uniqueness reasons -- it is actually
3374 # managed by the workingctx, but that isn't unique per-repo.
3370 # managed by the workingctx, but that isn't unique per-repo.
3375 self._postdsstatus.append(ps)
3371 self._postdsstatus.append(ps)
3376
3372
3377 def postdsstatus(self):
3373 def postdsstatus(self):
3378 """Used by workingctx to get the list of post-dirstate-status hooks."""
3374 """Used by workingctx to get the list of post-dirstate-status hooks."""
3379 return self._postdsstatus
3375 return self._postdsstatus
3380
3376
3381 def clearpostdsstatus(self):
3377 def clearpostdsstatus(self):
3382 """Used by workingctx to clear post-dirstate-status hooks."""
3378 """Used by workingctx to clear post-dirstate-status hooks."""
3383 del self._postdsstatus[:]
3379 del self._postdsstatus[:]
3384
3380
3385 def heads(self, start=None):
3381 def heads(self, start=None):
3386 if start is None:
3382 if start is None:
3387 cl = self.changelog
3383 cl = self.changelog
3388 headrevs = reversed(cl.headrevs())
3384 headrevs = reversed(cl.headrevs())
3389 return [cl.node(rev) for rev in headrevs]
3385 return [cl.node(rev) for rev in headrevs]
3390
3386
3391 heads = self.changelog.heads(start)
3387 heads = self.changelog.heads(start)
3392 # sort the output in rev descending order
3388 # sort the output in rev descending order
3393 return sorted(heads, key=self.changelog.rev, reverse=True)
3389 return sorted(heads, key=self.changelog.rev, reverse=True)
3394
3390
3395 def branchheads(self, branch=None, start=None, closed=False):
3391 def branchheads(self, branch=None, start=None, closed=False):
3396 """return a (possibly filtered) list of heads for the given branch
3392 """return a (possibly filtered) list of heads for the given branch
3397
3393
3398 Heads are returned in topological order, from newest to oldest.
3394 Heads are returned in topological order, from newest to oldest.
3399 If branch is None, use the dirstate branch.
3395 If branch is None, use the dirstate branch.
3400 If start is not None, return only heads reachable from start.
3396 If start is not None, return only heads reachable from start.
3401 If closed is True, return heads that are marked as closed as well.
3397 If closed is True, return heads that are marked as closed as well.
3402 """
3398 """
3403 if branch is None:
3399 if branch is None:
3404 branch = self[None].branch()
3400 branch = self[None].branch()
3405 branches = self.branchmap()
3401 branches = self.branchmap()
3406 if not branches.hasbranch(branch):
3402 if not branches.hasbranch(branch):
3407 return []
3403 return []
3408 # the cache returns heads ordered lowest to highest
3404 # the cache returns heads ordered lowest to highest
3409 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3405 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3410 if start is not None:
3406 if start is not None:
3411 # filter out the heads that cannot be reached from startrev
3407 # filter out the heads that cannot be reached from startrev
3412 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3408 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3413 bheads = [h for h in bheads if h in fbheads]
3409 bheads = [h for h in bheads if h in fbheads]
3414 return bheads
3410 return bheads
3415
3411
3416 def branches(self, nodes):
3412 def branches(self, nodes):
3417 if not nodes:
3413 if not nodes:
3418 nodes = [self.changelog.tip()]
3414 nodes = [self.changelog.tip()]
3419 b = []
3415 b = []
3420 for n in nodes:
3416 for n in nodes:
3421 t = n
3417 t = n
3422 while True:
3418 while True:
3423 p = self.changelog.parents(n)
3419 p = self.changelog.parents(n)
3424 if p[1] != self.nullid or p[0] == self.nullid:
3420 if p[1] != self.nullid or p[0] == self.nullid:
3425 b.append((t, n, p[0], p[1]))
3421 b.append((t, n, p[0], p[1]))
3426 break
3422 break
3427 n = p[0]
3423 n = p[0]
3428 return b
3424 return b
3429
3425
3430 def between(self, pairs):
3426 def between(self, pairs):
3431 r = []
3427 r = []
3432
3428
3433 for top, bottom in pairs:
3429 for top, bottom in pairs:
3434 n, l, i = top, [], 0
3430 n, l, i = top, [], 0
3435 f = 1
3431 f = 1
3436
3432
3437 while n != bottom and n != self.nullid:
3433 while n != bottom and n != self.nullid:
3438 p = self.changelog.parents(n)[0]
3434 p = self.changelog.parents(n)[0]
3439 if i == f:
3435 if i == f:
3440 l.append(n)
3436 l.append(n)
3441 f = f * 2
3437 f = f * 2
3442 n = p
3438 n = p
3443 i += 1
3439 i += 1
3444
3440
3445 r.append(l)
3441 r.append(l)
3446
3442
3447 return r
3443 return r
3448
3444
3449 def checkpush(self, pushop):
3445 def checkpush(self, pushop):
3450 """Extensions can override this function if additional checks have
3446 """Extensions can override this function if additional checks have
3451 to be performed before pushing, or call it if they override push
3447 to be performed before pushing, or call it if they override push
3452 command.
3448 command.
3453 """
3449 """
3454
3450
3455 @unfilteredpropertycache
3451 @unfilteredpropertycache
3456 def prepushoutgoinghooks(self):
3452 def prepushoutgoinghooks(self):
3457 """Return util.hooks consists of a pushop with repo, remote, outgoing
3453 """Return util.hooks consists of a pushop with repo, remote, outgoing
3458 methods, which are called before pushing changesets.
3454 methods, which are called before pushing changesets.
3459 """
3455 """
3460 return util.hooks()
3456 return util.hooks()
3461
3457
3462 def pushkey(self, namespace, key, old, new):
3458 def pushkey(self, namespace, key, old, new):
3463 try:
3459 try:
3464 tr = self.currenttransaction()
3460 tr = self.currenttransaction()
3465 hookargs = {}
3461 hookargs = {}
3466 if tr is not None:
3462 if tr is not None:
3467 hookargs.update(tr.hookargs)
3463 hookargs.update(tr.hookargs)
3468 hookargs = pycompat.strkwargs(hookargs)
3464 hookargs = pycompat.strkwargs(hookargs)
3469 hookargs['namespace'] = namespace
3465 hookargs['namespace'] = namespace
3470 hookargs['key'] = key
3466 hookargs['key'] = key
3471 hookargs['old'] = old
3467 hookargs['old'] = old
3472 hookargs['new'] = new
3468 hookargs['new'] = new
3473 self.hook(b'prepushkey', throw=True, **hookargs)
3469 self.hook(b'prepushkey', throw=True, **hookargs)
3474 except error.HookAbort as exc:
3470 except error.HookAbort as exc:
3475 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3471 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3476 if exc.hint:
3472 if exc.hint:
3477 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3473 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3478 return False
3474 return False
3479 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3475 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3480 ret = pushkey.push(self, namespace, key, old, new)
3476 ret = pushkey.push(self, namespace, key, old, new)
3481
3477
3482 def runhook(unused_success):
3478 def runhook(unused_success):
3483 self.hook(
3479 self.hook(
3484 b'pushkey',
3480 b'pushkey',
3485 namespace=namespace,
3481 namespace=namespace,
3486 key=key,
3482 key=key,
3487 old=old,
3483 old=old,
3488 new=new,
3484 new=new,
3489 ret=ret,
3485 ret=ret,
3490 )
3486 )
3491
3487
3492 self._afterlock(runhook)
3488 self._afterlock(runhook)
3493 return ret
3489 return ret
3494
3490
3495 def listkeys(self, namespace):
3491 def listkeys(self, namespace):
3496 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3492 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3497 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3493 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3498 values = pushkey.list(self, namespace)
3494 values = pushkey.list(self, namespace)
3499 self.hook(b'listkeys', namespace=namespace, values=values)
3495 self.hook(b'listkeys', namespace=namespace, values=values)
3500 return values
3496 return values
3501
3497
3502 def debugwireargs(self, one, two, three=None, four=None, five=None):
3498 def debugwireargs(self, one, two, three=None, four=None, five=None):
3503 '''used to test argument passing over the wire'''
3499 '''used to test argument passing over the wire'''
3504 return b"%s %s %s %s %s" % (
3500 return b"%s %s %s %s %s" % (
3505 one,
3501 one,
3506 two,
3502 two,
3507 pycompat.bytestr(three),
3503 pycompat.bytestr(three),
3508 pycompat.bytestr(four),
3504 pycompat.bytestr(four),
3509 pycompat.bytestr(five),
3505 pycompat.bytestr(five),
3510 )
3506 )
3511
3507
3512 def savecommitmessage(self, text):
3508 def savecommitmessage(self, text):
3513 fp = self.vfs(b'last-message.txt', b'wb')
3509 fp = self.vfs(b'last-message.txt', b'wb')
3514 try:
3510 try:
3515 fp.write(text)
3511 fp.write(text)
3516 finally:
3512 finally:
3517 fp.close()
3513 fp.close()
3518 return self.pathto(fp.name[len(self.root) + 1 :])
3514 return self.pathto(fp.name[len(self.root) + 1 :])
3519
3515
3520 def register_wanted_sidedata(self, category):
3516 def register_wanted_sidedata(self, category):
3521 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3517 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3522 # Only revlogv2 repos can want sidedata.
3518 # Only revlogv2 repos can want sidedata.
3523 return
3519 return
3524 self._wanted_sidedata.add(pycompat.bytestr(category))
3520 self._wanted_sidedata.add(pycompat.bytestr(category))
3525
3521
3526 def register_sidedata_computer(
3522 def register_sidedata_computer(
3527 self, kind, category, keys, computer, flags, replace=False
3523 self, kind, category, keys, computer, flags, replace=False
3528 ):
3524 ):
3529 if kind not in revlogconst.ALL_KINDS:
3525 if kind not in revlogconst.ALL_KINDS:
3530 msg = _(b"unexpected revlog kind '%s'.")
3526 msg = _(b"unexpected revlog kind '%s'.")
3531 raise error.ProgrammingError(msg % kind)
3527 raise error.ProgrammingError(msg % kind)
3532 category = pycompat.bytestr(category)
3528 category = pycompat.bytestr(category)
3533 already_registered = category in self._sidedata_computers.get(kind, [])
3529 already_registered = category in self._sidedata_computers.get(kind, [])
3534 if already_registered and not replace:
3530 if already_registered and not replace:
3535 msg = _(
3531 msg = _(
3536 b"cannot register a sidedata computer twice for category '%s'."
3532 b"cannot register a sidedata computer twice for category '%s'."
3537 )
3533 )
3538 raise error.ProgrammingError(msg % category)
3534 raise error.ProgrammingError(msg % category)
3539 if replace and not already_registered:
3535 if replace and not already_registered:
3540 msg = _(
3536 msg = _(
3541 b"cannot replace a sidedata computer that isn't registered "
3537 b"cannot replace a sidedata computer that isn't registered "
3542 b"for category '%s'."
3538 b"for category '%s'."
3543 )
3539 )
3544 raise error.ProgrammingError(msg % category)
3540 raise error.ProgrammingError(msg % category)
3545 self._sidedata_computers.setdefault(kind, {})
3541 self._sidedata_computers.setdefault(kind, {})
3546 self._sidedata_computers[kind][category] = (keys, computer, flags)
3542 self._sidedata_computers[kind][category] = (keys, computer, flags)
3547
3543
3548
3544
3549 # used to avoid circular references so destructors work
3545 # used to avoid circular references so destructors work
3550 def aftertrans(files):
3546 def aftertrans(files):
3551 renamefiles = [tuple(t) for t in files]
3547 renamefiles = [tuple(t) for t in files]
3552
3548
3553 def a():
3549 def a():
3554 for vfs, src, dest in renamefiles:
3550 for vfs, src, dest in renamefiles:
3555 # if src and dest refer to a same file, vfs.rename is a no-op,
3551 # if src and dest refer to a same file, vfs.rename is a no-op,
3556 # leaving both src and dest on disk. delete dest to make sure
3552 # leaving both src and dest on disk. delete dest to make sure
3557 # the rename couldn't be such a no-op.
3553 # the rename couldn't be such a no-op.
3558 vfs.tryunlink(dest)
3554 vfs.tryunlink(dest)
3559 try:
3555 try:
3560 vfs.rename(src, dest)
3556 vfs.rename(src, dest)
3561 except FileNotFoundError: # journal file does not yet exist
3557 except FileNotFoundError: # journal file does not yet exist
3562 pass
3558 pass
3563
3559
3564 return a
3560 return a
3565
3561
3566
3562
3567 def undoname(fn: bytes) -> bytes:
3563 def undoname(fn: bytes) -> bytes:
3568 base, name = os.path.split(fn)
3564 base, name = os.path.split(fn)
3569 assert name.startswith(b'journal')
3565 assert name.startswith(b'journal')
3570 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3566 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3571
3567
3572
3568
3573 def instance(ui, path: bytes, create, intents=None, createopts=None):
3569 def instance(ui, path: bytes, create, intents=None, createopts=None):
3574 # prevent cyclic import localrepo -> upgrade -> localrepo
3570 # prevent cyclic import localrepo -> upgrade -> localrepo
3575 from . import upgrade
3571 from . import upgrade
3576
3572
3577 localpath = urlutil.urllocalpath(path)
3573 localpath = urlutil.urllocalpath(path)
3578 if create:
3574 if create:
3579 createrepository(ui, localpath, createopts=createopts)
3575 createrepository(ui, localpath, createopts=createopts)
3580
3576
3581 def repo_maker():
3577 def repo_maker():
3582 return makelocalrepository(ui, localpath, intents=intents)
3578 return makelocalrepository(ui, localpath, intents=intents)
3583
3579
3584 repo = repo_maker()
3580 repo = repo_maker()
3585 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3581 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3586 return repo
3582 return repo
3587
3583
3588
3584
3589 def islocal(path: bytes) -> bool:
3585 def islocal(path: bytes) -> bool:
3590 return True
3586 return True
3591
3587
3592
3588
3593 def defaultcreateopts(ui, createopts=None):
3589 def defaultcreateopts(ui, createopts=None):
3594 """Populate the default creation options for a repository.
3590 """Populate the default creation options for a repository.
3595
3591
3596 A dictionary of explicitly requested creation options can be passed
3592 A dictionary of explicitly requested creation options can be passed
3597 in. Missing keys will be populated.
3593 in. Missing keys will be populated.
3598 """
3594 """
3599 createopts = dict(createopts or {})
3595 createopts = dict(createopts or {})
3600
3596
3601 if b'backend' not in createopts:
3597 if b'backend' not in createopts:
3602 # experimental config: storage.new-repo-backend
3598 # experimental config: storage.new-repo-backend
3603 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3599 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3604
3600
3605 return createopts
3601 return createopts
3606
3602
3607
3603
3608 def clone_requirements(ui, createopts, srcrepo):
3604 def clone_requirements(ui, createopts, srcrepo):
3609 """clone the requirements of a local repo for a local clone
3605 """clone the requirements of a local repo for a local clone
3610
3606
3611 The store requirements are unchanged while the working copy requirements
3607 The store requirements are unchanged while the working copy requirements
3612 depends on the configuration
3608 depends on the configuration
3613 """
3609 """
3614 target_requirements = set()
3610 target_requirements = set()
3615 if not srcrepo.requirements:
3611 if not srcrepo.requirements:
3616 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3612 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3617 # with it.
3613 # with it.
3618 return target_requirements
3614 return target_requirements
3619 createopts = defaultcreateopts(ui, createopts=createopts)
3615 createopts = defaultcreateopts(ui, createopts=createopts)
3620 for r in newreporequirements(ui, createopts):
3616 for r in newreporequirements(ui, createopts):
3621 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3617 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3622 target_requirements.add(r)
3618 target_requirements.add(r)
3623
3619
3624 for r in srcrepo.requirements:
3620 for r in srcrepo.requirements:
3625 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3621 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3626 target_requirements.add(r)
3622 target_requirements.add(r)
3627 return target_requirements
3623 return target_requirements
3628
3624
3629
3625
3630 def newreporequirements(ui, createopts):
3626 def newreporequirements(ui, createopts):
3631 """Determine the set of requirements for a new local repository.
3627 """Determine the set of requirements for a new local repository.
3632
3628
3633 Extensions can wrap this function to specify custom requirements for
3629 Extensions can wrap this function to specify custom requirements for
3634 new repositories.
3630 new repositories.
3635 """
3631 """
3636
3632
3637 if b'backend' not in createopts:
3633 if b'backend' not in createopts:
3638 raise error.ProgrammingError(
3634 raise error.ProgrammingError(
3639 b'backend key not present in createopts; '
3635 b'backend key not present in createopts; '
3640 b'was defaultcreateopts() called?'
3636 b'was defaultcreateopts() called?'
3641 )
3637 )
3642
3638
3643 if createopts[b'backend'] != b'revlogv1':
3639 if createopts[b'backend'] != b'revlogv1':
3644 raise error.Abort(
3640 raise error.Abort(
3645 _(
3641 _(
3646 b'unable to determine repository requirements for '
3642 b'unable to determine repository requirements for '
3647 b'storage backend: %s'
3643 b'storage backend: %s'
3648 )
3644 )
3649 % createopts[b'backend']
3645 % createopts[b'backend']
3650 )
3646 )
3651
3647
3652 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3648 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3653 if ui.configbool(b'format', b'usestore'):
3649 if ui.configbool(b'format', b'usestore'):
3654 requirements.add(requirementsmod.STORE_REQUIREMENT)
3650 requirements.add(requirementsmod.STORE_REQUIREMENT)
3655 if ui.configbool(b'format', b'usefncache'):
3651 if ui.configbool(b'format', b'usefncache'):
3656 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3652 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3657 if ui.configbool(b'format', b'dotencode'):
3653 if ui.configbool(b'format', b'dotencode'):
3658 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3654 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3659
3655
3660 compengines = ui.configlist(b'format', b'revlog-compression')
3656 compengines = ui.configlist(b'format', b'revlog-compression')
3661 for compengine in compengines:
3657 for compengine in compengines:
3662 if compengine in util.compengines:
3658 if compengine in util.compengines:
3663 engine = util.compengines[compengine]
3659 engine = util.compengines[compengine]
3664 if engine.available() and engine.revlogheader():
3660 if engine.available() and engine.revlogheader():
3665 break
3661 break
3666 else:
3662 else:
3667 raise error.Abort(
3663 raise error.Abort(
3668 _(
3664 _(
3669 b'compression engines %s defined by '
3665 b'compression engines %s defined by '
3670 b'format.revlog-compression not available'
3666 b'format.revlog-compression not available'
3671 )
3667 )
3672 % b', '.join(b'"%s"' % e for e in compengines),
3668 % b', '.join(b'"%s"' % e for e in compengines),
3673 hint=_(
3669 hint=_(
3674 b'run "hg debuginstall" to list available '
3670 b'run "hg debuginstall" to list available '
3675 b'compression engines'
3671 b'compression engines'
3676 ),
3672 ),
3677 )
3673 )
3678
3674
3679 # zlib is the historical default and doesn't need an explicit requirement.
3675 # zlib is the historical default and doesn't need an explicit requirement.
3680 if compengine == b'zstd':
3676 if compengine == b'zstd':
3681 requirements.add(b'revlog-compression-zstd')
3677 requirements.add(b'revlog-compression-zstd')
3682 elif compengine != b'zlib':
3678 elif compengine != b'zlib':
3683 requirements.add(b'exp-compression-%s' % compengine)
3679 requirements.add(b'exp-compression-%s' % compengine)
3684
3680
3685 if scmutil.gdinitconfig(ui):
3681 if scmutil.gdinitconfig(ui):
3686 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3682 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3687 if ui.configbool(b'format', b'sparse-revlog'):
3683 if ui.configbool(b'format', b'sparse-revlog'):
3688 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3684 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3689
3685
3690 # experimental config: format.use-dirstate-v2
3686 # experimental config: format.use-dirstate-v2
3691 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3687 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3692 if ui.configbool(b'format', b'use-dirstate-v2'):
3688 if ui.configbool(b'format', b'use-dirstate-v2'):
3693 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3689 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3694
3690
3695 # experimental config: format.exp-use-copies-side-data-changeset
3691 # experimental config: format.exp-use-copies-side-data-changeset
3696 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3692 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3697 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3693 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3698 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3694 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3699 if ui.configbool(b'experimental', b'treemanifest'):
3695 if ui.configbool(b'experimental', b'treemanifest'):
3700 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3696 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3701
3697
3702 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3698 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3703 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3699 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3704 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3700 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3705
3701
3706 revlogv2 = ui.config(b'experimental', b'revlogv2')
3702 revlogv2 = ui.config(b'experimental', b'revlogv2')
3707 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3703 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3708 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3704 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3709 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3705 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3710 # experimental config: format.internal-phase
3706 # experimental config: format.internal-phase
3711 if ui.configbool(b'format', b'use-internal-phase'):
3707 if ui.configbool(b'format', b'use-internal-phase'):
3712 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3708 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3713
3709
3714 # experimental config: format.exp-archived-phase
3710 # experimental config: format.exp-archived-phase
3715 if ui.configbool(b'format', b'exp-archived-phase'):
3711 if ui.configbool(b'format', b'exp-archived-phase'):
3716 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3712 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3717
3713
3718 if createopts.get(b'narrowfiles'):
3714 if createopts.get(b'narrowfiles'):
3719 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3715 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3720
3716
3721 if createopts.get(b'lfs'):
3717 if createopts.get(b'lfs'):
3722 requirements.add(b'lfs')
3718 requirements.add(b'lfs')
3723
3719
3724 if ui.configbool(b'format', b'bookmarks-in-store'):
3720 if ui.configbool(b'format', b'bookmarks-in-store'):
3725 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3721 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3726
3722
3727 if ui.configbool(b'format', b'use-persistent-nodemap'):
3723 if ui.configbool(b'format', b'use-persistent-nodemap'):
3728 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3724 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3729
3725
3730 # if share-safe is enabled, let's create the new repository with the new
3726 # if share-safe is enabled, let's create the new repository with the new
3731 # requirement
3727 # requirement
3732 if ui.configbool(b'format', b'use-share-safe'):
3728 if ui.configbool(b'format', b'use-share-safe'):
3733 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3729 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3734
3730
3735 # if we are creating a share-repoΒΉ we have to handle requirement
3731 # if we are creating a share-repoΒΉ we have to handle requirement
3736 # differently.
3732 # differently.
3737 #
3733 #
3738 # [1] (i.e. reusing the store from another repository, just having a
3734 # [1] (i.e. reusing the store from another repository, just having a
3739 # working copy)
3735 # working copy)
3740 if b'sharedrepo' in createopts:
3736 if b'sharedrepo' in createopts:
3741 source_requirements = set(createopts[b'sharedrepo'].requirements)
3737 source_requirements = set(createopts[b'sharedrepo'].requirements)
3742
3738
3743 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3739 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3744 # share to an old school repository, we have to copy the
3740 # share to an old school repository, we have to copy the
3745 # requirements and hope for the best.
3741 # requirements and hope for the best.
3746 requirements = source_requirements
3742 requirements = source_requirements
3747 else:
3743 else:
3748 # We have control on the working copy only, so "copy" the non
3744 # We have control on the working copy only, so "copy" the non
3749 # working copy part over, ignoring previous logic.
3745 # working copy part over, ignoring previous logic.
3750 to_drop = set()
3746 to_drop = set()
3751 for req in requirements:
3747 for req in requirements:
3752 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3748 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3753 continue
3749 continue
3754 if req in source_requirements:
3750 if req in source_requirements:
3755 continue
3751 continue
3756 to_drop.add(req)
3752 to_drop.add(req)
3757 requirements -= to_drop
3753 requirements -= to_drop
3758 requirements |= source_requirements
3754 requirements |= source_requirements
3759
3755
3760 if createopts.get(b'sharedrelative'):
3756 if createopts.get(b'sharedrelative'):
3761 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3757 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3762 else:
3758 else:
3763 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3759 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3764
3760
3765 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3761 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3766 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3762 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3767 msg = _(b"ignoring unknown tracked key version: %d\n")
3763 msg = _(b"ignoring unknown tracked key version: %d\n")
3768 hint = _(
3764 hint = _(
3769 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3765 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3770 )
3766 )
3771 if version != 1:
3767 if version != 1:
3772 ui.warn(msg % version, hint=hint)
3768 ui.warn(msg % version, hint=hint)
3773 else:
3769 else:
3774 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3770 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3775
3771
3776 return requirements
3772 return requirements
3777
3773
3778
3774
3779 def checkrequirementscompat(ui, requirements):
3775 def checkrequirementscompat(ui, requirements):
3780 """Checks compatibility of repository requirements enabled and disabled.
3776 """Checks compatibility of repository requirements enabled and disabled.
3781
3777
3782 Returns a set of requirements which needs to be dropped because dependend
3778 Returns a set of requirements which needs to be dropped because dependend
3783 requirements are not enabled. Also warns users about it"""
3779 requirements are not enabled. Also warns users about it"""
3784
3780
3785 dropped = set()
3781 dropped = set()
3786
3782
3787 if requirementsmod.STORE_REQUIREMENT not in requirements:
3783 if requirementsmod.STORE_REQUIREMENT not in requirements:
3788 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3784 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3789 ui.warn(
3785 ui.warn(
3790 _(
3786 _(
3791 b'ignoring enabled \'format.bookmarks-in-store\' config '
3787 b'ignoring enabled \'format.bookmarks-in-store\' config '
3792 b'beacuse it is incompatible with disabled '
3788 b'beacuse it is incompatible with disabled '
3793 b'\'format.usestore\' config\n'
3789 b'\'format.usestore\' config\n'
3794 )
3790 )
3795 )
3791 )
3796 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3792 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3797
3793
3798 if (
3794 if (
3799 requirementsmod.SHARED_REQUIREMENT in requirements
3795 requirementsmod.SHARED_REQUIREMENT in requirements
3800 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3796 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3801 ):
3797 ):
3802 raise error.Abort(
3798 raise error.Abort(
3803 _(
3799 _(
3804 b"cannot create shared repository as source was created"
3800 b"cannot create shared repository as source was created"
3805 b" with 'format.usestore' config disabled"
3801 b" with 'format.usestore' config disabled"
3806 )
3802 )
3807 )
3803 )
3808
3804
3809 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3805 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3810 if ui.hasconfig(b'format', b'use-share-safe'):
3806 if ui.hasconfig(b'format', b'use-share-safe'):
3811 msg = _(
3807 msg = _(
3812 b"ignoring enabled 'format.use-share-safe' config because "
3808 b"ignoring enabled 'format.use-share-safe' config because "
3813 b"it is incompatible with disabled 'format.usestore'"
3809 b"it is incompatible with disabled 'format.usestore'"
3814 b" config\n"
3810 b" config\n"
3815 )
3811 )
3816 ui.warn(msg)
3812 ui.warn(msg)
3817 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3813 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3818
3814
3819 return dropped
3815 return dropped
3820
3816
3821
3817
3822 def filterknowncreateopts(ui, createopts):
3818 def filterknowncreateopts(ui, createopts):
3823 """Filters a dict of repo creation options against options that are known.
3819 """Filters a dict of repo creation options against options that are known.
3824
3820
3825 Receives a dict of repo creation options and returns a dict of those
3821 Receives a dict of repo creation options and returns a dict of those
3826 options that we don't know how to handle.
3822 options that we don't know how to handle.
3827
3823
3828 This function is called as part of repository creation. If the
3824 This function is called as part of repository creation. If the
3829 returned dict contains any items, repository creation will not
3825 returned dict contains any items, repository creation will not
3830 be allowed, as it means there was a request to create a repository
3826 be allowed, as it means there was a request to create a repository
3831 with options not recognized by loaded code.
3827 with options not recognized by loaded code.
3832
3828
3833 Extensions can wrap this function to filter out creation options
3829 Extensions can wrap this function to filter out creation options
3834 they know how to handle.
3830 they know how to handle.
3835 """
3831 """
3836 known = {
3832 known = {
3837 b'backend',
3833 b'backend',
3838 b'lfs',
3834 b'lfs',
3839 b'narrowfiles',
3835 b'narrowfiles',
3840 b'sharedrepo',
3836 b'sharedrepo',
3841 b'sharedrelative',
3837 b'sharedrelative',
3842 b'shareditems',
3838 b'shareditems',
3843 b'shallowfilestore',
3839 b'shallowfilestore',
3844 }
3840 }
3845
3841
3846 return {k: v for k, v in createopts.items() if k not in known}
3842 return {k: v for k, v in createopts.items() if k not in known}
3847
3843
3848
3844
3849 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3845 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3850 """Create a new repository in a vfs.
3846 """Create a new repository in a vfs.
3851
3847
3852 ``path`` path to the new repo's working directory.
3848 ``path`` path to the new repo's working directory.
3853 ``createopts`` options for the new repository.
3849 ``createopts`` options for the new repository.
3854 ``requirement`` predefined set of requirements.
3850 ``requirement`` predefined set of requirements.
3855 (incompatible with ``createopts``)
3851 (incompatible with ``createopts``)
3856
3852
3857 The following keys for ``createopts`` are recognized:
3853 The following keys for ``createopts`` are recognized:
3858
3854
3859 backend
3855 backend
3860 The storage backend to use.
3856 The storage backend to use.
3861 lfs
3857 lfs
3862 Repository will be created with ``lfs`` requirement. The lfs extension
3858 Repository will be created with ``lfs`` requirement. The lfs extension
3863 will automatically be loaded when the repository is accessed.
3859 will automatically be loaded when the repository is accessed.
3864 narrowfiles
3860 narrowfiles
3865 Set up repository to support narrow file storage.
3861 Set up repository to support narrow file storage.
3866 sharedrepo
3862 sharedrepo
3867 Repository object from which storage should be shared.
3863 Repository object from which storage should be shared.
3868 sharedrelative
3864 sharedrelative
3869 Boolean indicating if the path to the shared repo should be
3865 Boolean indicating if the path to the shared repo should be
3870 stored as relative. By default, the pointer to the "parent" repo
3866 stored as relative. By default, the pointer to the "parent" repo
3871 is stored as an absolute path.
3867 is stored as an absolute path.
3872 shareditems
3868 shareditems
3873 Set of items to share to the new repository (in addition to storage).
3869 Set of items to share to the new repository (in addition to storage).
3874 shallowfilestore
3870 shallowfilestore
3875 Indicates that storage for files should be shallow (not all ancestor
3871 Indicates that storage for files should be shallow (not all ancestor
3876 revisions are known).
3872 revisions are known).
3877 """
3873 """
3878
3874
3879 if requirements is not None:
3875 if requirements is not None:
3880 if createopts is not None:
3876 if createopts is not None:
3881 msg = b'cannot specify both createopts and requirements'
3877 msg = b'cannot specify both createopts and requirements'
3882 raise error.ProgrammingError(msg)
3878 raise error.ProgrammingError(msg)
3883 createopts = {}
3879 createopts = {}
3884 else:
3880 else:
3885 createopts = defaultcreateopts(ui, createopts=createopts)
3881 createopts = defaultcreateopts(ui, createopts=createopts)
3886
3882
3887 unknownopts = filterknowncreateopts(ui, createopts)
3883 unknownopts = filterknowncreateopts(ui, createopts)
3888
3884
3889 if not isinstance(unknownopts, dict):
3885 if not isinstance(unknownopts, dict):
3890 raise error.ProgrammingError(
3886 raise error.ProgrammingError(
3891 b'filterknowncreateopts() did not return a dict'
3887 b'filterknowncreateopts() did not return a dict'
3892 )
3888 )
3893
3889
3894 if unknownopts:
3890 if unknownopts:
3895 raise error.Abort(
3891 raise error.Abort(
3896 _(
3892 _(
3897 b'unable to create repository because of unknown '
3893 b'unable to create repository because of unknown '
3898 b'creation option: %s'
3894 b'creation option: %s'
3899 )
3895 )
3900 % b', '.join(sorted(unknownopts)),
3896 % b', '.join(sorted(unknownopts)),
3901 hint=_(b'is a required extension not loaded?'),
3897 hint=_(b'is a required extension not loaded?'),
3902 )
3898 )
3903
3899
3904 requirements = newreporequirements(ui, createopts=createopts)
3900 requirements = newreporequirements(ui, createopts=createopts)
3905 requirements -= checkrequirementscompat(ui, requirements)
3901 requirements -= checkrequirementscompat(ui, requirements)
3906
3902
3907 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3903 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3908
3904
3909 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3905 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3910 if hgvfs.exists():
3906 if hgvfs.exists():
3911 raise error.RepoError(_(b'repository %s already exists') % path)
3907 raise error.RepoError(_(b'repository %s already exists') % path)
3912
3908
3913 if b'sharedrepo' in createopts:
3909 if b'sharedrepo' in createopts:
3914 sharedpath = createopts[b'sharedrepo'].sharedpath
3910 sharedpath = createopts[b'sharedrepo'].sharedpath
3915
3911
3916 if createopts.get(b'sharedrelative'):
3912 if createopts.get(b'sharedrelative'):
3917 try:
3913 try:
3918 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3914 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3919 sharedpath = util.pconvert(sharedpath)
3915 sharedpath = util.pconvert(sharedpath)
3920 except (IOError, ValueError) as e:
3916 except (IOError, ValueError) as e:
3921 # ValueError is raised on Windows if the drive letters differ
3917 # ValueError is raised on Windows if the drive letters differ
3922 # on each path.
3918 # on each path.
3923 raise error.Abort(
3919 raise error.Abort(
3924 _(b'cannot calculate relative path'),
3920 _(b'cannot calculate relative path'),
3925 hint=stringutil.forcebytestr(e),
3921 hint=stringutil.forcebytestr(e),
3926 )
3922 )
3927
3923
3928 if not wdirvfs.exists():
3924 if not wdirvfs.exists():
3929 wdirvfs.makedirs()
3925 wdirvfs.makedirs()
3930
3926
3931 hgvfs.makedir(notindexed=True)
3927 hgvfs.makedir(notindexed=True)
3932 if b'sharedrepo' not in createopts:
3928 if b'sharedrepo' not in createopts:
3933 hgvfs.mkdir(b'cache')
3929 hgvfs.mkdir(b'cache')
3934 hgvfs.mkdir(b'wcache')
3930 hgvfs.mkdir(b'wcache')
3935
3931
3936 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3932 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3937 if has_store and b'sharedrepo' not in createopts:
3933 if has_store and b'sharedrepo' not in createopts:
3938 hgvfs.mkdir(b'store')
3934 hgvfs.mkdir(b'store')
3939
3935
3940 # We create an invalid changelog outside the store so very old
3936 # We create an invalid changelog outside the store so very old
3941 # Mercurial versions (which didn't know about the requirements
3937 # Mercurial versions (which didn't know about the requirements
3942 # file) encounter an error on reading the changelog. This
3938 # file) encounter an error on reading the changelog. This
3943 # effectively locks out old clients and prevents them from
3939 # effectively locks out old clients and prevents them from
3944 # mucking with a repo in an unknown format.
3940 # mucking with a repo in an unknown format.
3945 #
3941 #
3946 # The revlog header has version 65535, which won't be recognized by
3942 # The revlog header has version 65535, which won't be recognized by
3947 # such old clients.
3943 # such old clients.
3948 hgvfs.append(
3944 hgvfs.append(
3949 b'00changelog.i',
3945 b'00changelog.i',
3950 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3946 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3951 b'layout',
3947 b'layout',
3952 )
3948 )
3953
3949
3954 # Filter the requirements into working copy and store ones
3950 # Filter the requirements into working copy and store ones
3955 wcreq, storereq = scmutil.filterrequirements(requirements)
3951 wcreq, storereq = scmutil.filterrequirements(requirements)
3956 # write working copy ones
3952 # write working copy ones
3957 scmutil.writerequires(hgvfs, wcreq)
3953 scmutil.writerequires(hgvfs, wcreq)
3958 # If there are store requirements and the current repository
3954 # If there are store requirements and the current repository
3959 # is not a shared one, write stored requirements
3955 # is not a shared one, write stored requirements
3960 # For new shared repository, we don't need to write the store
3956 # For new shared repository, we don't need to write the store
3961 # requirements as they are already present in store requires
3957 # requirements as they are already present in store requires
3962 if storereq and b'sharedrepo' not in createopts:
3958 if storereq and b'sharedrepo' not in createopts:
3963 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3959 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3964 scmutil.writerequires(storevfs, storereq)
3960 scmutil.writerequires(storevfs, storereq)
3965
3961
3966 # Write out file telling readers where to find the shared store.
3962 # Write out file telling readers where to find the shared store.
3967 if b'sharedrepo' in createopts:
3963 if b'sharedrepo' in createopts:
3968 hgvfs.write(b'sharedpath', sharedpath)
3964 hgvfs.write(b'sharedpath', sharedpath)
3969
3965
3970 if createopts.get(b'shareditems'):
3966 if createopts.get(b'shareditems'):
3971 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3967 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3972 hgvfs.write(b'shared', shared)
3968 hgvfs.write(b'shared', shared)
3973
3969
3974
3970
3975 def poisonrepository(repo):
3971 def poisonrepository(repo):
3976 """Poison a repository instance so it can no longer be used."""
3972 """Poison a repository instance so it can no longer be used."""
3977 # Perform any cleanup on the instance.
3973 # Perform any cleanup on the instance.
3978 repo.close()
3974 repo.close()
3979
3975
3980 # Our strategy is to replace the type of the object with one that
3976 # Our strategy is to replace the type of the object with one that
3981 # has all attribute lookups result in error.
3977 # has all attribute lookups result in error.
3982 #
3978 #
3983 # But we have to allow the close() method because some constructors
3979 # But we have to allow the close() method because some constructors
3984 # of repos call close() on repo references.
3980 # of repos call close() on repo references.
3985 class poisonedrepository:
3981 class poisonedrepository:
3986 def __getattribute__(self, item):
3982 def __getattribute__(self, item):
3987 if item == 'close':
3983 if item == 'close':
3988 return object.__getattribute__(self, item)
3984 return object.__getattribute__(self, item)
3989
3985
3990 raise error.ProgrammingError(
3986 raise error.ProgrammingError(
3991 b'repo instances should not be used after unshare'
3987 b'repo instances should not be used after unshare'
3992 )
3988 )
3993
3989
3994 def close(self):
3990 def close(self):
3995 pass
3991 pass
3996
3992
3997 # We may have a repoview, which intercepts __setattr__. So be sure
3993 # We may have a repoview, which intercepts __setattr__. So be sure
3998 # we operate at the lowest level possible.
3994 # we operate at the lowest level possible.
3999 object.__setattr__(repo, '__class__', poisonedrepository)
3995 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now