##// END OF EJS Templates
sparse: use None as the sparse matcher value when disabled...
marmoute -
r50250:0540c162 default
parent child Browse files
Show More
@@ -1,459 +1,461 b''
1 1 # sparse.py - allow sparse checkouts of the working directory
2 2 #
3 3 # Copyright 2014 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """allow sparse checkouts of the working directory (EXPERIMENTAL)
9 9
10 10 (This extension is not yet protected by backwards compatibility
11 11 guarantees. Any aspect may break in future releases until this
12 12 notice is removed.)
13 13
14 14 This extension allows the working directory to only consist of a
15 15 subset of files for the revision. This allows specific files or
16 16 directories to be explicitly included or excluded. Many repository
17 17 operations have performance proportional to the number of files in
18 18 the working directory. So only realizing a subset of files in the
19 19 working directory can improve performance.
20 20
21 21 Sparse Config Files
22 22 -------------------
23 23
24 24 The set of files that are part of a sparse checkout are defined by
25 25 a sparse config file. The file defines 3 things: includes (files to
26 26 include in the sparse checkout), excludes (files to exclude from the
27 27 sparse checkout), and profiles (links to other config files).
28 28
29 29 The file format is newline delimited. Empty lines and lines beginning
30 30 with ``#`` are ignored.
31 31
32 32 Lines beginning with ``%include `` denote another sparse config file
33 33 to include. e.g. ``%include tests.sparse``. The filename is relative
34 34 to the repository root.
35 35
36 36 The special lines ``[include]`` and ``[exclude]`` denote the section
37 37 for includes and excludes that follow, respectively. It is illegal to
38 38 have ``[include]`` after ``[exclude]``.
39 39
40 40 Non-special lines resemble file patterns to be added to either includes
41 41 or excludes. The syntax of these lines is documented by :hg:`help patterns`.
42 42 Patterns are interpreted as ``glob:`` by default and match against the
43 43 root of the repository.
44 44
45 45 Exclusion patterns take precedence over inclusion patterns. So even
46 46 if a file is explicitly included, an ``[exclude]`` entry can remove it.
47 47
48 48 For example, say you have a repository with 3 directories, ``frontend/``,
49 49 ``backend/``, and ``tools/``. ``frontend/`` and ``backend/`` correspond
50 50 to different projects and it is uncommon for someone working on one
51 51 to need the files for the other. But ``tools/`` contains files shared
52 52 between both projects. Your sparse config files may resemble::
53 53
54 54 # frontend.sparse
55 55 frontend/**
56 56 tools/**
57 57
58 58 # backend.sparse
59 59 backend/**
60 60 tools/**
61 61
62 62 Say the backend grows in size. Or there's a directory with thousands
63 63 of files you wish to exclude. You can modify the profile to exclude
64 64 certain files::
65 65
66 66 [include]
67 67 backend/**
68 68 tools/**
69 69
70 70 [exclude]
71 71 tools/tests/**
72 72 """
73 73
74 74
75 75 from mercurial.i18n import _
76 76 from mercurial.pycompat import setattr
77 77 from mercurial import (
78 78 cmdutil,
79 79 commands,
80 80 dirstate,
81 81 error,
82 82 extensions,
83 83 logcmdutil,
84 84 match as matchmod,
85 85 merge as mergemod,
86 86 pycompat,
87 87 registrar,
88 88 sparse,
89 89 util,
90 90 )
91 91
92 92 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
93 93 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
94 94 # be specifying the version(s) of Mercurial they are tested with, or
95 95 # leave the attribute unspecified.
96 96 testedwith = b'ships-with-hg-core'
97 97
98 98 cmdtable = {}
99 99 command = registrar.command(cmdtable)
100 100
101 101
102 102 def extsetup(ui):
103 103 sparse.enabled = True
104 104
105 105 _setupclone(ui)
106 106 _setuplog(ui)
107 107 _setupadd(ui)
108 108 _setupdirstate(ui)
109 109
110 110
111 111 def replacefilecache(cls, propname, replacement):
112 112 """Replace a filecache property with a new class. This allows changing the
113 113 cache invalidation condition."""
114 114 origcls = cls
115 115 assert callable(replacement)
116 116 while cls is not object:
117 117 if propname in cls.__dict__:
118 118 orig = cls.__dict__[propname]
119 119 setattr(cls, propname, replacement(orig))
120 120 break
121 121 cls = cls.__bases__[0]
122 122
123 123 if cls is object:
124 124 raise AttributeError(
125 125 _(b"type '%s' has no property '%s'") % (origcls, propname)
126 126 )
127 127
128 128
129 129 def _setuplog(ui):
130 130 entry = commands.table[b'log|history']
131 131 entry[1].append(
132 132 (
133 133 b'',
134 134 b'sparse',
135 135 None,
136 136 b"limit to changesets affecting the sparse checkout",
137 137 )
138 138 )
139 139
140 140 def _initialrevs(orig, repo, wopts):
141 141 revs = orig(repo, wopts)
142 142 if wopts.opts.get(b'sparse'):
143 143 sparsematch = sparse.matcher(repo)
144 144
145 145 def ctxmatch(rev):
146 146 ctx = repo[rev]
147 147 return any(f for f in ctx.files() if sparsematch(f))
148 148
149 149 revs = revs.filter(ctxmatch)
150 150 return revs
151 151
152 152 extensions.wrapfunction(logcmdutil, b'_initialrevs', _initialrevs)
153 153
154 154
155 155 def _clonesparsecmd(orig, ui, repo, *args, **opts):
156 156 include = opts.get('include')
157 157 exclude = opts.get('exclude')
158 158 enableprofile = opts.get('enable_profile')
159 159 narrow_pat = opts.get('narrow')
160 160
161 161 # if --narrow is passed, it means they are includes and excludes for narrow
162 162 # clone
163 163 if not narrow_pat and (include or exclude or enableprofile):
164 164
165 165 def clonesparse(orig, ctx, *args, **kwargs):
166 166 sparse.updateconfig(
167 167 ctx.repo().unfiltered(),
168 168 {},
169 169 include=include,
170 170 exclude=exclude,
171 171 enableprofile=enableprofile,
172 172 usereporootpaths=True,
173 173 )
174 174 return orig(ctx, *args, **kwargs)
175 175
176 176 extensions.wrapfunction(mergemod, b'update', clonesparse)
177 177 return orig(ui, repo, *args, **opts)
178 178
179 179
180 180 def _setupclone(ui):
181 181 entry = commands.table[b'clone']
182 182 entry[1].append((b'', b'enable-profile', [], b'enable a sparse profile'))
183 183 entry[1].append((b'', b'include', [], b'include sparse pattern'))
184 184 entry[1].append((b'', b'exclude', [], b'exclude sparse pattern'))
185 185 extensions.wrapcommand(commands.table, b'clone', _clonesparsecmd)
186 186
187 187
188 188 def _setupadd(ui):
189 189 entry = commands.table[b'add']
190 190 entry[1].append(
191 191 (
192 192 b's',
193 193 b'sparse',
194 194 None,
195 195 b'also include directories of added files in sparse config',
196 196 )
197 197 )
198 198
199 199 def _add(orig, ui, repo, *pats, **opts):
200 200 if opts.get('sparse'):
201 201 dirs = set()
202 202 for pat in pats:
203 203 dirname, basename = util.split(pat)
204 204 dirs.add(dirname)
205 205 sparse.updateconfig(repo, opts, include=list(dirs))
206 206 return orig(ui, repo, *pats, **opts)
207 207
208 208 extensions.wrapcommand(commands.table, b'add', _add)
209 209
210 210
211 211 def _setupdirstate(ui):
212 212 """Modify the dirstate to prevent stat'ing excluded files,
213 213 and to prevent modifications to files outside the checkout.
214 214 """
215 215
216 216 def walk(orig, self, match, subrepos, unknown, ignored, full=True):
217 217 # hack to not exclude explicitly-specified paths so that they can
218 218 # be warned later on e.g. dirstate.add()
219 em = matchmod.exact(match.files())
220 sm = matchmod.unionmatcher([self._sparsematcher, em])
221 match = matchmod.intersectmatchers(match, sm)
219 sparse_matcher = self._sparsematcher
220 if sparse_matcher is not None:
221 em = matchmod.exact(match.files())
222 sm = matchmod.unionmatcher([self._sparsematcher, em])
223 match = matchmod.intersectmatchers(match, sm)
222 224 return orig(self, match, subrepos, unknown, ignored, full)
223 225
224 226 extensions.wrapfunction(dirstate.dirstate, b'walk', walk)
225 227
226 228 # dirstate.rebuild should not add non-matching files
227 229 def _rebuild(orig, self, parent, allfiles, changedfiles=None):
228 230 matcher = self._sparsematcher
229 if not matcher.always():
231 if matcher is not None and not matcher.always():
230 232 allfiles = [f for f in allfiles if matcher(f)]
231 233 if changedfiles:
232 234 changedfiles = [f for f in changedfiles if matcher(f)]
233 235
234 236 if changedfiles is not None:
235 237 # In _rebuild, these files will be deleted from the dirstate
236 238 # when they are not found to be in allfiles
237 239 dirstatefilestoremove = {f for f in self if not matcher(f)}
238 240 changedfiles = dirstatefilestoremove.union(changedfiles)
239 241
240 242 return orig(self, parent, allfiles, changedfiles)
241 243
242 244 extensions.wrapfunction(dirstate.dirstate, b'rebuild', _rebuild)
243 245
244 246 # Prevent adding files that are outside the sparse checkout
245 247 editfuncs = [
246 248 b'set_tracked',
247 249 b'set_untracked',
248 250 b'copy',
249 251 ]
250 252 hint = _(
251 253 b'include file with `hg debugsparse --include <pattern>` or use '
252 254 + b'`hg add -s <file>` to include file directory while adding'
253 255 )
254 256 for func in editfuncs:
255 257
256 258 def _wrapper(orig, self, *args, **kwargs):
257 259 sparsematch = self._sparsematcher
258 if not sparsematch.always():
260 if sparsematch is not None and not sparsematch.always():
259 261 for f in args:
260 262 if f is not None and not sparsematch(f) and f not in self:
261 263 raise error.Abort(
262 264 _(
263 265 b"cannot add '%s' - it is outside "
264 266 b"the sparse checkout"
265 267 )
266 268 % f,
267 269 hint=hint,
268 270 )
269 271 return orig(self, *args, **kwargs)
270 272
271 273 extensions.wrapfunction(dirstate.dirstate, func, _wrapper)
272 274
273 275
274 276 @command(
275 277 b'debugsparse',
276 278 [
277 279 (
278 280 b'I',
279 281 b'include',
280 282 [],
281 283 _(b'include files in the sparse checkout'),
282 284 _(b'PATTERN'),
283 285 ),
284 286 (
285 287 b'X',
286 288 b'exclude',
287 289 [],
288 290 _(b'exclude files in the sparse checkout'),
289 291 _(b'PATTERN'),
290 292 ),
291 293 (
292 294 b'd',
293 295 b'delete',
294 296 [],
295 297 _(b'delete an include/exclude rule'),
296 298 _(b'PATTERN'),
297 299 ),
298 300 (
299 301 b'f',
300 302 b'force',
301 303 False,
302 304 _(b'allow changing rules even with pending changes'),
303 305 ),
304 306 (
305 307 b'',
306 308 b'enable-profile',
307 309 [],
308 310 _(b'enables the specified profile'),
309 311 _(b'PATTERN'),
310 312 ),
311 313 (
312 314 b'',
313 315 b'disable-profile',
314 316 [],
315 317 _(b'disables the specified profile'),
316 318 _(b'PATTERN'),
317 319 ),
318 320 (
319 321 b'',
320 322 b'import-rules',
321 323 [],
322 324 _(b'imports rules from a file'),
323 325 _(b'PATTERN'),
324 326 ),
325 327 (b'', b'clear-rules', False, _(b'clears local include/exclude rules')),
326 328 (
327 329 b'',
328 330 b'refresh',
329 331 False,
330 332 _(b'updates the working after sparseness changes'),
331 333 ),
332 334 (b'', b'reset', False, _(b'makes the repo full again')),
333 335 ]
334 336 + commands.templateopts,
335 337 _(b'[--OPTION]'),
336 338 helpbasic=True,
337 339 )
338 340 def debugsparse(ui, repo, **opts):
339 341 """make the current checkout sparse, or edit the existing checkout
340 342
341 343 The sparse command is used to make the current checkout sparse.
342 344 This means files that don't meet the sparse condition will not be
343 345 written to disk, or show up in any working copy operations. It does
344 346 not affect files in history in any way.
345 347
346 348 Passing no arguments prints the currently applied sparse rules.
347 349
348 350 --include and --exclude are used to add and remove files from the sparse
349 351 checkout. The effects of adding an include or exclude rule are applied
350 352 immediately. If applying the new rule would cause a file with pending
351 353 changes to be added or removed, the command will fail. Pass --force to
352 354 force a rule change even with pending changes (the changes on disk will
353 355 be preserved).
354 356
355 357 --delete removes an existing include/exclude rule. The effects are
356 358 immediate.
357 359
358 360 --refresh refreshes the files on disk based on the sparse rules. This is
359 361 only necessary if .hg/sparse was changed by hand.
360 362
361 363 --enable-profile and --disable-profile accept a path to a .hgsparse file.
362 364 This allows defining sparse checkouts and tracking them inside the
363 365 repository. This is useful for defining commonly used sparse checkouts for
364 366 many people to use. As the profile definition changes over time, the sparse
365 367 checkout will automatically be updated appropriately, depending on which
366 368 changeset is checked out. Changes to .hgsparse are not applied until they
367 369 have been committed.
368 370
369 371 --import-rules accepts a path to a file containing rules in the .hgsparse
370 372 format, allowing you to add --include, --exclude and --enable-profile rules
371 373 in bulk. Like the --include, --exclude and --enable-profile switches, the
372 374 changes are applied immediately.
373 375
374 376 --clear-rules removes all local include and exclude rules, while leaving
375 377 any enabled profiles in place.
376 378
377 379 Returns 0 if editing the sparse checkout succeeds.
378 380 """
379 381 opts = pycompat.byteskwargs(opts)
380 382 include = opts.get(b'include')
381 383 exclude = opts.get(b'exclude')
382 384 force = opts.get(b'force')
383 385 enableprofile = opts.get(b'enable_profile')
384 386 disableprofile = opts.get(b'disable_profile')
385 387 importrules = opts.get(b'import_rules')
386 388 clearrules = opts.get(b'clear_rules')
387 389 delete = opts.get(b'delete')
388 390 refresh = opts.get(b'refresh')
389 391 reset = opts.get(b'reset')
390 392 action = cmdutil.check_at_most_one_arg(
391 393 opts, b'import_rules', b'clear_rules', b'refresh'
392 394 )
393 395 updateconfig = bool(
394 396 include or exclude or delete or reset or enableprofile or disableprofile
395 397 )
396 398 count = sum([updateconfig, bool(action)])
397 399 if count > 1:
398 400 raise error.Abort(_(b"too many flags specified"))
399 401
400 402 # enable sparse on repo even if the requirements is missing.
401 403 repo._has_sparse = True
402 404
403 405 if count == 0:
404 406 if repo.vfs.exists(b'sparse'):
405 407 ui.status(repo.vfs.read(b"sparse") + b"\n")
406 408 temporaryincludes = sparse.readtemporaryincludes(repo)
407 409 if temporaryincludes:
408 410 ui.status(
409 411 _(b"Temporarily Included Files (for merge/rebase):\n")
410 412 )
411 413 ui.status((b"\n".join(temporaryincludes) + b"\n"))
412 414 return
413 415 else:
414 416 raise error.Abort(
415 417 _(
416 418 b'the debugsparse command is only supported on'
417 419 b' sparse repositories'
418 420 )
419 421 )
420 422
421 423 if updateconfig:
422 424 sparse.updateconfig(
423 425 repo,
424 426 opts,
425 427 include=include,
426 428 exclude=exclude,
427 429 reset=reset,
428 430 delete=delete,
429 431 enableprofile=enableprofile,
430 432 disableprofile=disableprofile,
431 433 force=force,
432 434 )
433 435
434 436 if importrules:
435 437 sparse.importfromfiles(repo, opts, importrules, force=force)
436 438
437 439 if clearrules:
438 440 sparse.clearrules(repo, force=force)
439 441
440 442 if refresh:
441 443 try:
442 444 wlock = repo.wlock()
443 445 fcounts = map(
444 446 len,
445 447 sparse.refreshwdir(
446 448 repo, repo.status(), sparse.matcher(repo), force=force
447 449 ),
448 450 )
449 451 sparse.printchanges(
450 452 ui,
451 453 opts,
452 454 added=fcounts[0],
453 455 dropped=fcounts[1],
454 456 conflicting=fcounts[2],
455 457 )
456 458 finally:
457 459 wlock.release()
458 460
459 461 del repo._has_sparse
@@ -1,1470 +1,1474 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import collections
10 10 import contextlib
11 11 import os
12 12 import stat
13 13 import uuid
14 14
15 15 from .i18n import _
16 16 from .pycompat import delattr
17 17
18 18 from hgdemandimport import tracing
19 19
20 20 from . import (
21 21 dirstatemap,
22 22 encoding,
23 23 error,
24 24 match as matchmod,
25 25 node,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 sparse,
31 30 util,
32 31 )
33 32
34 33 from .dirstateutils import (
35 34 timestamp,
36 35 )
37 36
38 37 from .interfaces import (
39 38 dirstate as intdirstate,
40 39 util as interfaceutil,
41 40 )
42 41
43 42 parsers = policy.importmod('parsers')
44 43 rustmod = policy.importrust('dirstate')
45 44
46 45 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47 46
48 47 propertycache = util.propertycache
49 48 filecache = scmutil.filecache
50 49 _rangemask = dirstatemap.rangemask
51 50
52 51 DirstateItem = dirstatemap.DirstateItem
53 52
54 53
55 54 class repocache(filecache):
56 55 """filecache for files in .hg/"""
57 56
58 57 def join(self, obj, fname):
59 58 return obj._opener.join(fname)
60 59
61 60
62 61 class rootcache(filecache):
63 62 """filecache for files in the repository root"""
64 63
65 64 def join(self, obj, fname):
66 65 return obj._join(fname)
67 66
68 67
69 68 def requires_parents_change(func):
70 69 def wrap(self, *args, **kwargs):
71 70 if not self.pendingparentchange():
72 71 msg = 'calling `%s` outside of a parentchange context'
73 72 msg %= func.__name__
74 73 raise error.ProgrammingError(msg)
75 74 return func(self, *args, **kwargs)
76 75
77 76 return wrap
78 77
79 78
80 79 def requires_no_parents_change(func):
81 80 def wrap(self, *args, **kwargs):
82 81 if self.pendingparentchange():
83 82 msg = 'calling `%s` inside of a parentchange context'
84 83 msg %= func.__name__
85 84 raise error.ProgrammingError(msg)
86 85 return func(self, *args, **kwargs)
87 86
88 87 return wrap
89 88
90 89
91 90 @interfaceutil.implementer(intdirstate.idirstate)
92 91 class dirstate:
93 92 def __init__(
94 93 self,
95 94 opener,
96 95 ui,
97 96 root,
98 97 validate,
99 98 sparsematchfn,
100 99 nodeconstants,
101 100 use_dirstate_v2,
102 101 use_tracked_hint=False,
103 102 ):
104 103 """Create a new dirstate object.
105 104
106 105 opener is an open()-like callable that can be used to open the
107 106 dirstate file; root is the root of the directory tracked by
108 107 the dirstate.
109 108 """
110 109 self._use_dirstate_v2 = use_dirstate_v2
111 110 self._use_tracked_hint = use_tracked_hint
112 111 self._nodeconstants = nodeconstants
113 112 self._opener = opener
114 113 self._validate = validate
115 114 self._root = root
115 # Either build a sparse-matcher or None if sparse is disabled
116 116 self._sparsematchfn = sparsematchfn
117 117 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
118 118 # UNC path pointing to root share (issue4557)
119 119 self._rootdir = pathutil.normasprefix(root)
120 120 # True is any internal state may be different
121 121 self._dirty = False
122 122 # True if the set of tracked file may be different
123 123 self._dirty_tracked_set = False
124 124 self._ui = ui
125 125 self._filecache = {}
126 126 self._parentwriters = 0
127 127 self._filename = b'dirstate'
128 128 self._filename_th = b'dirstate-tracked-hint'
129 129 self._pendingfilename = b'%s.pending' % self._filename
130 130 self._plchangecallbacks = {}
131 131 self._origpl = None
132 132 self._mapcls = dirstatemap.dirstatemap
133 133 # Access and cache cwd early, so we don't access it for the first time
134 134 # after a working-copy update caused it to not exist (accessing it then
135 135 # raises an exception).
136 136 self._cwd
137 137
138 138 def prefetch_parents(self):
139 139 """make sure the parents are loaded
140 140
141 141 Used to avoid a race condition.
142 142 """
143 143 self._pl
144 144
145 145 @contextlib.contextmanager
146 146 def parentchange(self):
147 147 """Context manager for handling dirstate parents.
148 148
149 149 If an exception occurs in the scope of the context manager,
150 150 the incoherent dirstate won't be written when wlock is
151 151 released.
152 152 """
153 153 self._parentwriters += 1
154 154 yield
155 155 # Typically we want the "undo" step of a context manager in a
156 156 # finally block so it happens even when an exception
157 157 # occurs. In this case, however, we only want to decrement
158 158 # parentwriters if the code in the with statement exits
159 159 # normally, so we don't have a try/finally here on purpose.
160 160 self._parentwriters -= 1
161 161
162 162 def pendingparentchange(self):
163 163 """Returns true if the dirstate is in the middle of a set of changes
164 164 that modify the dirstate parent.
165 165 """
166 166 return self._parentwriters > 0
167 167
168 168 @propertycache
169 169 def _map(self):
170 170 """Return the dirstate contents (see documentation for dirstatemap)."""
171 171 self._map = self._mapcls(
172 172 self._ui,
173 173 self._opener,
174 174 self._root,
175 175 self._nodeconstants,
176 176 self._use_dirstate_v2,
177 177 )
178 178 return self._map
179 179
180 180 @property
181 181 def _sparsematcher(self):
182 182 """The matcher for the sparse checkout.
183 183
184 184 The working directory may not include every file from a manifest. The
185 185 matcher obtained by this property will match a path if it is to be
186 186 included in the working directory.
187
188 When sparse if disabled, return None.
187 189 """
190 if self._sparsematchfn is None:
191 return None
188 192 # TODO there is potential to cache this property. For now, the matcher
189 193 # is resolved on every access. (But the called function does use a
190 194 # cache to keep the lookup fast.)
191 195 return self._sparsematchfn()
192 196
193 197 @repocache(b'branch')
194 198 def _branch(self):
195 199 try:
196 200 return self._opener.read(b"branch").strip() or b"default"
197 201 except FileNotFoundError:
198 202 return b"default"
199 203
200 204 @property
201 205 def _pl(self):
202 206 return self._map.parents()
203 207
204 208 def hasdir(self, d):
205 209 return self._map.hastrackeddir(d)
206 210
207 211 @rootcache(b'.hgignore')
208 212 def _ignore(self):
209 213 files = self._ignorefiles()
210 214 if not files:
211 215 return matchmod.never()
212 216
213 217 pats = [b'include:%s' % f for f in files]
214 218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
215 219
216 220 @propertycache
217 221 def _slash(self):
218 222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
219 223
220 224 @propertycache
221 225 def _checklink(self):
222 226 return util.checklink(self._root)
223 227
224 228 @propertycache
225 229 def _checkexec(self):
226 230 return bool(util.checkexec(self._root))
227 231
228 232 @propertycache
229 233 def _checkcase(self):
230 234 return not util.fscasesensitive(self._join(b'.hg'))
231 235
232 236 def _join(self, f):
233 237 # much faster than os.path.join()
234 238 # it's safe because f is always a relative path
235 239 return self._rootdir + f
236 240
237 241 def flagfunc(self, buildfallback):
238 242 """build a callable that returns flags associated with a filename
239 243
240 244 The information is extracted from three possible layers:
241 245 1. the file system if it supports the information
242 246 2. the "fallback" information stored in the dirstate if any
243 247 3. a more expensive mechanism inferring the flags from the parents.
244 248 """
245 249
246 250 # small hack to cache the result of buildfallback()
247 251 fallback_func = []
248 252
249 253 def get_flags(x):
250 254 entry = None
251 255 fallback_value = None
252 256 try:
253 257 st = os.lstat(self._join(x))
254 258 except OSError:
255 259 return b''
256 260
257 261 if self._checklink:
258 262 if util.statislink(st):
259 263 return b'l'
260 264 else:
261 265 entry = self.get_entry(x)
262 266 if entry.has_fallback_symlink:
263 267 if entry.fallback_symlink:
264 268 return b'l'
265 269 else:
266 270 if not fallback_func:
267 271 fallback_func.append(buildfallback())
268 272 fallback_value = fallback_func[0](x)
269 273 if b'l' in fallback_value:
270 274 return b'l'
271 275
272 276 if self._checkexec:
273 277 if util.statisexec(st):
274 278 return b'x'
275 279 else:
276 280 if entry is None:
277 281 entry = self.get_entry(x)
278 282 if entry.has_fallback_exec:
279 283 if entry.fallback_exec:
280 284 return b'x'
281 285 else:
282 286 if fallback_value is None:
283 287 if not fallback_func:
284 288 fallback_func.append(buildfallback())
285 289 fallback_value = fallback_func[0](x)
286 290 if b'x' in fallback_value:
287 291 return b'x'
288 292 return b''
289 293
290 294 return get_flags
291 295
292 296 @propertycache
293 297 def _cwd(self):
294 298 # internal config: ui.forcecwd
295 299 forcecwd = self._ui.config(b'ui', b'forcecwd')
296 300 if forcecwd:
297 301 return forcecwd
298 302 return encoding.getcwd()
299 303
300 304 def getcwd(self):
301 305 """Return the path from which a canonical path is calculated.
302 306
303 307 This path should be used to resolve file patterns or to convert
304 308 canonical paths back to file paths for display. It shouldn't be
305 309 used to get real file paths. Use vfs functions instead.
306 310 """
307 311 cwd = self._cwd
308 312 if cwd == self._root:
309 313 return b''
310 314 # self._root ends with a path separator if self._root is '/' or 'C:\'
311 315 rootsep = self._root
312 316 if not util.endswithsep(rootsep):
313 317 rootsep += pycompat.ossep
314 318 if cwd.startswith(rootsep):
315 319 return cwd[len(rootsep) :]
316 320 else:
317 321 # we're outside the repo. return an absolute path.
318 322 return cwd
319 323
320 324 def pathto(self, f, cwd=None):
321 325 if cwd is None:
322 326 cwd = self.getcwd()
323 327 path = util.pathto(self._root, cwd, f)
324 328 if self._slash:
325 329 return util.pconvert(path)
326 330 return path
327 331
328 332 def get_entry(self, path):
329 333 """return a DirstateItem for the associated path"""
330 334 entry = self._map.get(path)
331 335 if entry is None:
332 336 return DirstateItem()
333 337 return entry
334 338
335 339 def __contains__(self, key):
336 340 return key in self._map
337 341
338 342 def __iter__(self):
339 343 return iter(sorted(self._map))
340 344
341 345 def items(self):
342 346 return self._map.items()
343 347
344 348 iteritems = items
345 349
346 350 def parents(self):
347 351 return [self._validate(p) for p in self._pl]
348 352
349 353 def p1(self):
350 354 return self._validate(self._pl[0])
351 355
352 356 def p2(self):
353 357 return self._validate(self._pl[1])
354 358
355 359 @property
356 360 def in_merge(self):
357 361 """True if a merge is in progress"""
358 362 return self._pl[1] != self._nodeconstants.nullid
359 363
360 364 def branch(self):
361 365 return encoding.tolocal(self._branch)
362 366
363 367 def setparents(self, p1, p2=None):
364 368 """Set dirstate parents to p1 and p2.
365 369
366 370 When moving from two parents to one, "merged" entries a
367 371 adjusted to normal and previous copy records discarded and
368 372 returned by the call.
369 373
370 374 See localrepo.setparents()
371 375 """
372 376 if p2 is None:
373 377 p2 = self._nodeconstants.nullid
374 378 if self._parentwriters == 0:
375 379 raise ValueError(
376 380 b"cannot set dirstate parent outside of "
377 381 b"dirstate.parentchange context manager"
378 382 )
379 383
380 384 self._dirty = True
381 385 oldp2 = self._pl[1]
382 386 if self._origpl is None:
383 387 self._origpl = self._pl
384 388 nullid = self._nodeconstants.nullid
385 389 # True if we need to fold p2 related state back to a linear case
386 390 fold_p2 = oldp2 != nullid and p2 == nullid
387 391 return self._map.setparents(p1, p2, fold_p2=fold_p2)
388 392
389 393 def setbranch(self, branch):
390 394 self.__class__._branch.set(self, encoding.fromlocal(branch))
391 395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
392 396 try:
393 397 f.write(self._branch + b'\n')
394 398 f.close()
395 399
396 400 # make sure filecache has the correct stat info for _branch after
397 401 # replacing the underlying file
398 402 ce = self._filecache[b'_branch']
399 403 if ce:
400 404 ce.refresh()
401 405 except: # re-raises
402 406 f.discard()
403 407 raise
404 408
405 409 def invalidate(self):
406 410 """Causes the next access to reread the dirstate.
407 411
408 412 This is different from localrepo.invalidatedirstate() because it always
409 413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
410 414 check whether the dirstate has changed before rereading it."""
411 415
412 416 for a in ("_map", "_branch", "_ignore"):
413 417 if a in self.__dict__:
414 418 delattr(self, a)
415 419 self._dirty = False
416 420 self._dirty_tracked_set = False
417 421 self._parentwriters = 0
418 422 self._origpl = None
419 423
420 424 def copy(self, source, dest):
421 425 """Mark dest as a copy of source. Unmark dest if source is None."""
422 426 if source == dest:
423 427 return
424 428 self._dirty = True
425 429 if source is not None:
426 430 self._map.copymap[dest] = source
427 431 else:
428 432 self._map.copymap.pop(dest, None)
429 433
430 434 def copied(self, file):
431 435 return self._map.copymap.get(file, None)
432 436
433 437 def copies(self):
434 438 return self._map.copymap
435 439
436 440 @requires_no_parents_change
437 441 def set_tracked(self, filename, reset_copy=False):
438 442 """a "public" method for generic code to mark a file as tracked
439 443
440 444 This function is to be called outside of "update/merge" case. For
441 445 example by a command like `hg add X`.
442 446
443 447 if reset_copy is set, any existing copy information will be dropped.
444 448
445 449 return True the file was previously untracked, False otherwise.
446 450 """
447 451 self._dirty = True
448 452 entry = self._map.get(filename)
449 453 if entry is None or not entry.tracked:
450 454 self._check_new_tracked_filename(filename)
451 455 pre_tracked = self._map.set_tracked(filename)
452 456 if reset_copy:
453 457 self._map.copymap.pop(filename, None)
454 458 if pre_tracked:
455 459 self._dirty_tracked_set = True
456 460 return pre_tracked
457 461
458 462 @requires_no_parents_change
459 463 def set_untracked(self, filename):
460 464 """a "public" method for generic code to mark a file as untracked
461 465
462 466 This function is to be called outside of "update/merge" case. For
463 467 example by a command like `hg remove X`.
464 468
465 469 return True the file was previously tracked, False otherwise.
466 470 """
467 471 ret = self._map.set_untracked(filename)
468 472 if ret:
469 473 self._dirty = True
470 474 self._dirty_tracked_set = True
471 475 return ret
472 476
473 477 @requires_no_parents_change
474 478 def set_clean(self, filename, parentfiledata):
475 479 """record that the current state of the file on disk is known to be clean"""
476 480 self._dirty = True
477 481 if not self._map[filename].tracked:
478 482 self._check_new_tracked_filename(filename)
479 483 (mode, size, mtime) = parentfiledata
480 484 self._map.set_clean(filename, mode, size, mtime)
481 485
482 486 @requires_no_parents_change
483 487 def set_possibly_dirty(self, filename):
484 488 """record that the current state of the file on disk is unknown"""
485 489 self._dirty = True
486 490 self._map.set_possibly_dirty(filename)
487 491
488 492 @requires_parents_change
489 493 def update_file_p1(
490 494 self,
491 495 filename,
492 496 p1_tracked,
493 497 ):
494 498 """Set a file as tracked in the parent (or not)
495 499
496 500 This is to be called when adjust the dirstate to a new parent after an history
497 501 rewriting operation.
498 502
499 503 It should not be called during a merge (p2 != nullid) and only within
500 504 a `with dirstate.parentchange():` context.
501 505 """
502 506 if self.in_merge:
503 507 msg = b'update_file_reference should not be called when merging'
504 508 raise error.ProgrammingError(msg)
505 509 entry = self._map.get(filename)
506 510 if entry is None:
507 511 wc_tracked = False
508 512 else:
509 513 wc_tracked = entry.tracked
510 514 if not (p1_tracked or wc_tracked):
511 515 # the file is no longer relevant to anyone
512 516 if self._map.get(filename) is not None:
513 517 self._map.reset_state(filename)
514 518 self._dirty = True
515 519 elif (not p1_tracked) and wc_tracked:
516 520 if entry is not None and entry.added:
517 521 return # avoid dropping copy information (maybe?)
518 522
519 523 self._map.reset_state(
520 524 filename,
521 525 wc_tracked,
522 526 p1_tracked,
523 527 # the underlying reference might have changed, we will have to
524 528 # check it.
525 529 has_meaningful_mtime=False,
526 530 )
527 531
528 532 @requires_parents_change
529 533 def update_file(
530 534 self,
531 535 filename,
532 536 wc_tracked,
533 537 p1_tracked,
534 538 p2_info=False,
535 539 possibly_dirty=False,
536 540 parentfiledata=None,
537 541 ):
538 542 """update the information about a file in the dirstate
539 543
540 544 This is to be called when the direstates parent changes to keep track
541 545 of what is the file situation in regards to the working copy and its parent.
542 546
543 547 This function must be called within a `dirstate.parentchange` context.
544 548
545 549 note: the API is at an early stage and we might need to adjust it
546 550 depending of what information ends up being relevant and useful to
547 551 other processing.
548 552 """
549 553
550 554 # note: I do not think we need to double check name clash here since we
551 555 # are in a update/merge case that should already have taken care of
552 556 # this. The test agrees
553 557
554 558 self._dirty = True
555 559 old_entry = self._map.get(filename)
556 560 if old_entry is None:
557 561 prev_tracked = False
558 562 else:
559 563 prev_tracked = old_entry.tracked
560 564 if prev_tracked != wc_tracked:
561 565 self._dirty_tracked_set = True
562 566
563 567 self._map.reset_state(
564 568 filename,
565 569 wc_tracked,
566 570 p1_tracked,
567 571 p2_info=p2_info,
568 572 has_meaningful_mtime=not possibly_dirty,
569 573 parentfiledata=parentfiledata,
570 574 )
571 575
572 576 def _check_new_tracked_filename(self, filename):
573 577 scmutil.checkfilename(filename)
574 578 if self._map.hastrackeddir(filename):
575 579 msg = _(b'directory %r already in dirstate')
576 580 msg %= pycompat.bytestr(filename)
577 581 raise error.Abort(msg)
578 582 # shadows
579 583 for d in pathutil.finddirs(filename):
580 584 if self._map.hastrackeddir(d):
581 585 break
582 586 entry = self._map.get(d)
583 587 if entry is not None and not entry.removed:
584 588 msg = _(b'file %r in dirstate clashes with %r')
585 589 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
586 590 raise error.Abort(msg)
587 591
588 592 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
589 593 if exists is None:
590 594 exists = os.path.lexists(os.path.join(self._root, path))
591 595 if not exists:
592 596 # Maybe a path component exists
593 597 if not ignoremissing and b'/' in path:
594 598 d, f = path.rsplit(b'/', 1)
595 599 d = self._normalize(d, False, ignoremissing, None)
596 600 folded = d + b"/" + f
597 601 else:
598 602 # No path components, preserve original case
599 603 folded = path
600 604 else:
601 605 # recursively normalize leading directory components
602 606 # against dirstate
603 607 if b'/' in normed:
604 608 d, f = normed.rsplit(b'/', 1)
605 609 d = self._normalize(d, False, ignoremissing, True)
606 610 r = self._root + b"/" + d
607 611 folded = d + b"/" + util.fspath(f, r)
608 612 else:
609 613 folded = util.fspath(normed, self._root)
610 614 storemap[normed] = folded
611 615
612 616 return folded
613 617
614 618 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
615 619 normed = util.normcase(path)
616 620 folded = self._map.filefoldmap.get(normed, None)
617 621 if folded is None:
618 622 if isknown:
619 623 folded = path
620 624 else:
621 625 folded = self._discoverpath(
622 626 path, normed, ignoremissing, exists, self._map.filefoldmap
623 627 )
624 628 return folded
625 629
626 630 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
627 631 normed = util.normcase(path)
628 632 folded = self._map.filefoldmap.get(normed, None)
629 633 if folded is None:
630 634 folded = self._map.dirfoldmap.get(normed, None)
631 635 if folded is None:
632 636 if isknown:
633 637 folded = path
634 638 else:
635 639 # store discovered result in dirfoldmap so that future
636 640 # normalizefile calls don't start matching directories
637 641 folded = self._discoverpath(
638 642 path, normed, ignoremissing, exists, self._map.dirfoldmap
639 643 )
640 644 return folded
641 645
642 646 def normalize(self, path, isknown=False, ignoremissing=False):
643 647 """
644 648 normalize the case of a pathname when on a casefolding filesystem
645 649
646 650 isknown specifies whether the filename came from walking the
647 651 disk, to avoid extra filesystem access.
648 652
649 653 If ignoremissing is True, missing path are returned
650 654 unchanged. Otherwise, we try harder to normalize possibly
651 655 existing path components.
652 656
653 657 The normalized case is determined based on the following precedence:
654 658
655 659 - version of name already stored in the dirstate
656 660 - version of name stored on disk
657 661 - version provided via command arguments
658 662 """
659 663
660 664 if self._checkcase:
661 665 return self._normalize(path, isknown, ignoremissing)
662 666 return path
663 667
664 668 def clear(self):
665 669 self._map.clear()
666 670 self._dirty = True
667 671
668 672 def rebuild(self, parent, allfiles, changedfiles=None):
669 673 if changedfiles is None:
670 674 # Rebuild entire dirstate
671 675 to_lookup = allfiles
672 676 to_drop = []
673 677 self.clear()
674 678 elif len(changedfiles) < 10:
675 679 # Avoid turning allfiles into a set, which can be expensive if it's
676 680 # large.
677 681 to_lookup = []
678 682 to_drop = []
679 683 for f in changedfiles:
680 684 if f in allfiles:
681 685 to_lookup.append(f)
682 686 else:
683 687 to_drop.append(f)
684 688 else:
685 689 changedfilesset = set(changedfiles)
686 690 to_lookup = changedfilesset & set(allfiles)
687 691 to_drop = changedfilesset - to_lookup
688 692
689 693 if self._origpl is None:
690 694 self._origpl = self._pl
691 695 self._map.setparents(parent, self._nodeconstants.nullid)
692 696
693 697 for f in to_lookup:
694 698
695 699 if self.in_merge:
696 700 self.set_tracked(f)
697 701 else:
698 702 self._map.reset_state(
699 703 f,
700 704 wc_tracked=True,
701 705 p1_tracked=True,
702 706 )
703 707 for f in to_drop:
704 708 self._map.reset_state(f)
705 709
706 710 self._dirty = True
707 711
708 712 def identity(self):
709 713 """Return identity of dirstate itself to detect changing in storage
710 714
711 715 If identity of previous dirstate is equal to this, writing
712 716 changes based on the former dirstate out can keep consistency.
713 717 """
714 718 return self._map.identity
715 719
716 720 def write(self, tr):
717 721 if not self._dirty:
718 722 return
719 723
720 724 write_key = self._use_tracked_hint and self._dirty_tracked_set
721 725 if tr:
722 726 # delay writing in-memory changes out
723 727 tr.addfilegenerator(
724 728 b'dirstate-1-main',
725 729 (self._filename,),
726 730 lambda f: self._writedirstate(tr, f),
727 731 location=b'plain',
728 732 post_finalize=True,
729 733 )
730 734 if write_key:
731 735 tr.addfilegenerator(
732 736 b'dirstate-2-key-post',
733 737 (self._filename_th,),
734 738 lambda f: self._write_tracked_hint(tr, f),
735 739 location=b'plain',
736 740 post_finalize=True,
737 741 )
738 742 return
739 743
740 744 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
741 745 with file(self._filename) as f:
742 746 self._writedirstate(tr, f)
743 747 if write_key:
744 748 # we update the key-file after writing to make sure reader have a
745 749 # key that match the newly written content
746 750 with file(self._filename_th) as f:
747 751 self._write_tracked_hint(tr, f)
748 752
749 753 def delete_tracked_hint(self):
750 754 """remove the tracked_hint file
751 755
752 756 To be used by format downgrades operation"""
753 757 self._opener.unlink(self._filename_th)
754 758 self._use_tracked_hint = False
755 759
756 760 def addparentchangecallback(self, category, callback):
757 761 """add a callback to be called when the wd parents are changed
758 762
759 763 Callback will be called with the following arguments:
760 764 dirstate, (oldp1, oldp2), (newp1, newp2)
761 765
762 766 Category is a unique identifier to allow overwriting an old callback
763 767 with a newer callback.
764 768 """
765 769 self._plchangecallbacks[category] = callback
766 770
767 771 def _writedirstate(self, tr, st):
768 772 # notify callbacks about parents change
769 773 if self._origpl is not None and self._origpl != self._pl:
770 774 for c, callback in sorted(self._plchangecallbacks.items()):
771 775 callback(self, self._origpl, self._pl)
772 776 self._origpl = None
773 777 self._map.write(tr, st)
774 778 self._dirty = False
775 779 self._dirty_tracked_set = False
776 780
777 781 def _write_tracked_hint(self, tr, f):
778 782 key = node.hex(uuid.uuid4().bytes)
779 783 f.write(b"1\n%s\n" % key) # 1 is the format version
780 784
781 785 def _dirignore(self, f):
782 786 if self._ignore(f):
783 787 return True
784 788 for p in pathutil.finddirs(f):
785 789 if self._ignore(p):
786 790 return True
787 791 return False
788 792
789 793 def _ignorefiles(self):
790 794 files = []
791 795 if os.path.exists(self._join(b'.hgignore')):
792 796 files.append(self._join(b'.hgignore'))
793 797 for name, path in self._ui.configitems(b"ui"):
794 798 if name == b'ignore' or name.startswith(b'ignore.'):
795 799 # we need to use os.path.join here rather than self._join
796 800 # because path is arbitrary and user-specified
797 801 files.append(os.path.join(self._rootdir, util.expandpath(path)))
798 802 return files
799 803
800 804 def _ignorefileandline(self, f):
801 805 files = collections.deque(self._ignorefiles())
802 806 visited = set()
803 807 while files:
804 808 i = files.popleft()
805 809 patterns = matchmod.readpatternfile(
806 810 i, self._ui.warn, sourceinfo=True
807 811 )
808 812 for pattern, lineno, line in patterns:
809 813 kind, p = matchmod._patsplit(pattern, b'glob')
810 814 if kind == b"subinclude":
811 815 if p not in visited:
812 816 files.append(p)
813 817 continue
814 818 m = matchmod.match(
815 819 self._root, b'', [], [pattern], warn=self._ui.warn
816 820 )
817 821 if m(f):
818 822 return (i, lineno, line)
819 823 visited.add(i)
820 824 return (None, -1, b"")
821 825
822 826 def _walkexplicit(self, match, subrepos):
823 827 """Get stat data about the files explicitly specified by match.
824 828
825 829 Return a triple (results, dirsfound, dirsnotfound).
826 830 - results is a mapping from filename to stat result. It also contains
827 831 listings mapping subrepos and .hg to None.
828 832 - dirsfound is a list of files found to be directories.
829 833 - dirsnotfound is a list of files that the dirstate thinks are
830 834 directories and that were not found."""
831 835
832 836 def badtype(mode):
833 837 kind = _(b'unknown')
834 838 if stat.S_ISCHR(mode):
835 839 kind = _(b'character device')
836 840 elif stat.S_ISBLK(mode):
837 841 kind = _(b'block device')
838 842 elif stat.S_ISFIFO(mode):
839 843 kind = _(b'fifo')
840 844 elif stat.S_ISSOCK(mode):
841 845 kind = _(b'socket')
842 846 elif stat.S_ISDIR(mode):
843 847 kind = _(b'directory')
844 848 return _(b'unsupported file type (type is %s)') % kind
845 849
846 850 badfn = match.bad
847 851 dmap = self._map
848 852 lstat = os.lstat
849 853 getkind = stat.S_IFMT
850 854 dirkind = stat.S_IFDIR
851 855 regkind = stat.S_IFREG
852 856 lnkkind = stat.S_IFLNK
853 857 join = self._join
854 858 dirsfound = []
855 859 foundadd = dirsfound.append
856 860 dirsnotfound = []
857 861 notfoundadd = dirsnotfound.append
858 862
859 863 if not match.isexact() and self._checkcase:
860 864 normalize = self._normalize
861 865 else:
862 866 normalize = None
863 867
864 868 files = sorted(match.files())
865 869 subrepos.sort()
866 870 i, j = 0, 0
867 871 while i < len(files) and j < len(subrepos):
868 872 subpath = subrepos[j] + b"/"
869 873 if files[i] < subpath:
870 874 i += 1
871 875 continue
872 876 while i < len(files) and files[i].startswith(subpath):
873 877 del files[i]
874 878 j += 1
875 879
876 880 if not files or b'' in files:
877 881 files = [b'']
878 882 # constructing the foldmap is expensive, so don't do it for the
879 883 # common case where files is ['']
880 884 normalize = None
881 885 results = dict.fromkeys(subrepos)
882 886 results[b'.hg'] = None
883 887
884 888 for ff in files:
885 889 if normalize:
886 890 nf = normalize(ff, False, True)
887 891 else:
888 892 nf = ff
889 893 if nf in results:
890 894 continue
891 895
892 896 try:
893 897 st = lstat(join(nf))
894 898 kind = getkind(st.st_mode)
895 899 if kind == dirkind:
896 900 if nf in dmap:
897 901 # file replaced by dir on disk but still in dirstate
898 902 results[nf] = None
899 903 foundadd((nf, ff))
900 904 elif kind == regkind or kind == lnkkind:
901 905 results[nf] = st
902 906 else:
903 907 badfn(ff, badtype(kind))
904 908 if nf in dmap:
905 909 results[nf] = None
906 910 except OSError as inst: # nf not found on disk - it is dirstate only
907 911 if nf in dmap: # does it exactly match a missing file?
908 912 results[nf] = None
909 913 else: # does it match a missing directory?
910 914 if self._map.hasdir(nf):
911 915 notfoundadd(nf)
912 916 else:
913 917 badfn(ff, encoding.strtolocal(inst.strerror))
914 918
915 919 # match.files() may contain explicitly-specified paths that shouldn't
916 920 # be taken; drop them from the list of files found. dirsfound/notfound
917 921 # aren't filtered here because they will be tested later.
918 922 if match.anypats():
919 923 for f in list(results):
920 924 if f == b'.hg' or f in subrepos:
921 925 # keep sentinel to disable further out-of-repo walks
922 926 continue
923 927 if not match(f):
924 928 del results[f]
925 929
926 930 # Case insensitive filesystems cannot rely on lstat() failing to detect
927 931 # a case-only rename. Prune the stat object for any file that does not
928 932 # match the case in the filesystem, if there are multiple files that
929 933 # normalize to the same path.
930 934 if match.isexact() and self._checkcase:
931 935 normed = {}
932 936
933 937 for f, st in results.items():
934 938 if st is None:
935 939 continue
936 940
937 941 nc = util.normcase(f)
938 942 paths = normed.get(nc)
939 943
940 944 if paths is None:
941 945 paths = set()
942 946 normed[nc] = paths
943 947
944 948 paths.add(f)
945 949
946 950 for norm, paths in normed.items():
947 951 if len(paths) > 1:
948 952 for path in paths:
949 953 folded = self._discoverpath(
950 954 path, norm, True, None, self._map.dirfoldmap
951 955 )
952 956 if path != folded:
953 957 results[path] = None
954 958
955 959 return results, dirsfound, dirsnotfound
956 960
957 961 def walk(self, match, subrepos, unknown, ignored, full=True):
958 962 """
959 963 Walk recursively through the directory tree, finding all files
960 964 matched by match.
961 965
962 966 If full is False, maybe skip some known-clean files.
963 967
964 968 Return a dict mapping filename to stat-like object (either
965 969 mercurial.osutil.stat instance or return value of os.stat()).
966 970
967 971 """
968 972 # full is a flag that extensions that hook into walk can use -- this
969 973 # implementation doesn't use it at all. This satisfies the contract
970 974 # because we only guarantee a "maybe".
971 975
972 976 if ignored:
973 977 ignore = util.never
974 978 dirignore = util.never
975 979 elif unknown:
976 980 ignore = self._ignore
977 981 dirignore = self._dirignore
978 982 else:
979 983 # if not unknown and not ignored, drop dir recursion and step 2
980 984 ignore = util.always
981 985 dirignore = util.always
982 986
983 987 matchfn = match.matchfn
984 988 matchalways = match.always()
985 989 matchtdir = match.traversedir
986 990 dmap = self._map
987 991 listdir = util.listdir
988 992 lstat = os.lstat
989 993 dirkind = stat.S_IFDIR
990 994 regkind = stat.S_IFREG
991 995 lnkkind = stat.S_IFLNK
992 996 join = self._join
993 997
994 998 exact = skipstep3 = False
995 999 if match.isexact(): # match.exact
996 1000 exact = True
997 1001 dirignore = util.always # skip step 2
998 1002 elif match.prefix(): # match.match, no patterns
999 1003 skipstep3 = True
1000 1004
1001 1005 if not exact and self._checkcase:
1002 1006 normalize = self._normalize
1003 1007 normalizefile = self._normalizefile
1004 1008 skipstep3 = False
1005 1009 else:
1006 1010 normalize = self._normalize
1007 1011 normalizefile = None
1008 1012
1009 1013 # step 1: find all explicit files
1010 1014 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1011 1015 if matchtdir:
1012 1016 for d in work:
1013 1017 matchtdir(d[0])
1014 1018 for d in dirsnotfound:
1015 1019 matchtdir(d)
1016 1020
1017 1021 skipstep3 = skipstep3 and not (work or dirsnotfound)
1018 1022 work = [d for d in work if not dirignore(d[0])]
1019 1023
1020 1024 # step 2: visit subdirectories
1021 1025 def traverse(work, alreadynormed):
1022 1026 wadd = work.append
1023 1027 while work:
1024 1028 tracing.counter('dirstate.walk work', len(work))
1025 1029 nd = work.pop()
1026 1030 visitentries = match.visitchildrenset(nd)
1027 1031 if not visitentries:
1028 1032 continue
1029 1033 if visitentries == b'this' or visitentries == b'all':
1030 1034 visitentries = None
1031 1035 skip = None
1032 1036 if nd != b'':
1033 1037 skip = b'.hg'
1034 1038 try:
1035 1039 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1036 1040 entries = listdir(join(nd), stat=True, skip=skip)
1037 1041 except (PermissionError, FileNotFoundError) as inst:
1038 1042 match.bad(
1039 1043 self.pathto(nd), encoding.strtolocal(inst.strerror)
1040 1044 )
1041 1045 continue
1042 1046 for f, kind, st in entries:
1043 1047 # Some matchers may return files in the visitentries set,
1044 1048 # instead of 'this', if the matcher explicitly mentions them
1045 1049 # and is not an exactmatcher. This is acceptable; we do not
1046 1050 # make any hard assumptions about file-or-directory below
1047 1051 # based on the presence of `f` in visitentries. If
1048 1052 # visitchildrenset returned a set, we can always skip the
1049 1053 # entries *not* in the set it provided regardless of whether
1050 1054 # they're actually a file or a directory.
1051 1055 if visitentries and f not in visitentries:
1052 1056 continue
1053 1057 if normalizefile:
1054 1058 # even though f might be a directory, we're only
1055 1059 # interested in comparing it to files currently in the
1056 1060 # dmap -- therefore normalizefile is enough
1057 1061 nf = normalizefile(
1058 1062 nd and (nd + b"/" + f) or f, True, True
1059 1063 )
1060 1064 else:
1061 1065 nf = nd and (nd + b"/" + f) or f
1062 1066 if nf not in results:
1063 1067 if kind == dirkind:
1064 1068 if not ignore(nf):
1065 1069 if matchtdir:
1066 1070 matchtdir(nf)
1067 1071 wadd(nf)
1068 1072 if nf in dmap and (matchalways or matchfn(nf)):
1069 1073 results[nf] = None
1070 1074 elif kind == regkind or kind == lnkkind:
1071 1075 if nf in dmap:
1072 1076 if matchalways or matchfn(nf):
1073 1077 results[nf] = st
1074 1078 elif (matchalways or matchfn(nf)) and not ignore(
1075 1079 nf
1076 1080 ):
1077 1081 # unknown file -- normalize if necessary
1078 1082 if not alreadynormed:
1079 1083 nf = normalize(nf, False, True)
1080 1084 results[nf] = st
1081 1085 elif nf in dmap and (matchalways or matchfn(nf)):
1082 1086 results[nf] = None
1083 1087
1084 1088 for nd, d in work:
1085 1089 # alreadynormed means that processwork doesn't have to do any
1086 1090 # expensive directory normalization
1087 1091 alreadynormed = not normalize or nd == d
1088 1092 traverse([d], alreadynormed)
1089 1093
1090 1094 for s in subrepos:
1091 1095 del results[s]
1092 1096 del results[b'.hg']
1093 1097
1094 1098 # step 3: visit remaining files from dmap
1095 1099 if not skipstep3 and not exact:
1096 1100 # If a dmap file is not in results yet, it was either
1097 1101 # a) not matching matchfn b) ignored, c) missing, or d) under a
1098 1102 # symlink directory.
1099 1103 if not results and matchalways:
1100 1104 visit = [f for f in dmap]
1101 1105 else:
1102 1106 visit = [f for f in dmap if f not in results and matchfn(f)]
1103 1107 visit.sort()
1104 1108
1105 1109 if unknown:
1106 1110 # unknown == True means we walked all dirs under the roots
1107 1111 # that wasn't ignored, and everything that matched was stat'ed
1108 1112 # and is already in results.
1109 1113 # The rest must thus be ignored or under a symlink.
1110 1114 audit_path = pathutil.pathauditor(self._root, cached=True)
1111 1115
1112 1116 for nf in iter(visit):
1113 1117 # If a stat for the same file was already added with a
1114 1118 # different case, don't add one for this, since that would
1115 1119 # make it appear as if the file exists under both names
1116 1120 # on disk.
1117 1121 if (
1118 1122 normalizefile
1119 1123 and normalizefile(nf, True, True) in results
1120 1124 ):
1121 1125 results[nf] = None
1122 1126 # Report ignored items in the dmap as long as they are not
1123 1127 # under a symlink directory.
1124 1128 elif audit_path.check(nf):
1125 1129 try:
1126 1130 results[nf] = lstat(join(nf))
1127 1131 # file was just ignored, no links, and exists
1128 1132 except OSError:
1129 1133 # file doesn't exist
1130 1134 results[nf] = None
1131 1135 else:
1132 1136 # It's either missing or under a symlink directory
1133 1137 # which we in this case report as missing
1134 1138 results[nf] = None
1135 1139 else:
1136 1140 # We may not have walked the full directory tree above,
1137 1141 # so stat and check everything we missed.
1138 1142 iv = iter(visit)
1139 1143 for st in util.statfiles([join(i) for i in visit]):
1140 1144 results[next(iv)] = st
1141 1145 return results
1142 1146
1143 1147 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1144 1148 # Force Rayon (Rust parallelism library) to respect the number of
1145 1149 # workers. This is a temporary workaround until Rust code knows
1146 1150 # how to read the config file.
1147 1151 numcpus = self._ui.configint(b"worker", b"numcpus")
1148 1152 if numcpus is not None:
1149 1153 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1150 1154
1151 1155 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1152 1156 if not workers_enabled:
1153 1157 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1154 1158
1155 1159 (
1156 1160 lookup,
1157 1161 modified,
1158 1162 added,
1159 1163 removed,
1160 1164 deleted,
1161 1165 clean,
1162 1166 ignored,
1163 1167 unknown,
1164 1168 warnings,
1165 1169 bad,
1166 1170 traversed,
1167 1171 dirty,
1168 1172 ) = rustmod.status(
1169 1173 self._map._map,
1170 1174 matcher,
1171 1175 self._rootdir,
1172 1176 self._ignorefiles(),
1173 1177 self._checkexec,
1174 1178 bool(list_clean),
1175 1179 bool(list_ignored),
1176 1180 bool(list_unknown),
1177 1181 bool(matcher.traversedir),
1178 1182 )
1179 1183
1180 1184 self._dirty |= dirty
1181 1185
1182 1186 if matcher.traversedir:
1183 1187 for dir in traversed:
1184 1188 matcher.traversedir(dir)
1185 1189
1186 1190 if self._ui.warn:
1187 1191 for item in warnings:
1188 1192 if isinstance(item, tuple):
1189 1193 file_path, syntax = item
1190 1194 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1191 1195 file_path,
1192 1196 syntax,
1193 1197 )
1194 1198 self._ui.warn(msg)
1195 1199 else:
1196 1200 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1197 1201 self._ui.warn(
1198 1202 msg
1199 1203 % (
1200 1204 pathutil.canonpath(
1201 1205 self._rootdir, self._rootdir, item
1202 1206 ),
1203 1207 b"No such file or directory",
1204 1208 )
1205 1209 )
1206 1210
1207 1211 for (fn, message) in bad:
1208 1212 matcher.bad(fn, encoding.strtolocal(message))
1209 1213
1210 1214 status = scmutil.status(
1211 1215 modified=modified,
1212 1216 added=added,
1213 1217 removed=removed,
1214 1218 deleted=deleted,
1215 1219 unknown=unknown,
1216 1220 ignored=ignored,
1217 1221 clean=clean,
1218 1222 )
1219 1223 return (lookup, status)
1220 1224
1221 1225 def status(self, match, subrepos, ignored, clean, unknown):
1222 1226 """Determine the status of the working copy relative to the
1223 1227 dirstate and return a pair of (unsure, status), where status is of type
1224 1228 scmutil.status and:
1225 1229
1226 1230 unsure:
1227 1231 files that might have been modified since the dirstate was
1228 1232 written, but need to be read to be sure (size is the same
1229 1233 but mtime differs)
1230 1234 status.modified:
1231 1235 files that have definitely been modified since the dirstate
1232 1236 was written (different size or mode)
1233 1237 status.clean:
1234 1238 files that have definitely not been modified since the
1235 1239 dirstate was written
1236 1240 """
1237 1241 listignored, listclean, listunknown = ignored, clean, unknown
1238 1242 lookup, modified, added, unknown, ignored = [], [], [], [], []
1239 1243 removed, deleted, clean = [], [], []
1240 1244
1241 1245 dmap = self._map
1242 1246 dmap.preload()
1243 1247
1244 1248 use_rust = True
1245 1249
1246 1250 allowed_matchers = (
1247 1251 matchmod.alwaysmatcher,
1248 1252 matchmod.exactmatcher,
1249 1253 matchmod.includematcher,
1250 1254 matchmod.intersectionmatcher,
1251 1255 matchmod.nevermatcher,
1252 1256 matchmod.unionmatcher,
1253 1257 )
1254 1258
1255 1259 if rustmod is None:
1256 1260 use_rust = False
1257 1261 elif self._checkcase:
1258 1262 # Case-insensitive filesystems are not handled yet
1259 1263 use_rust = False
1260 1264 elif subrepos:
1261 1265 use_rust = False
1262 elif sparse.enabled:
1266 elif self._sparsematchfn is not None:
1263 1267 use_rust = False
1264 1268 elif not isinstance(match, allowed_matchers):
1265 1269 # Some matchers have yet to be implemented
1266 1270 use_rust = False
1267 1271
1268 1272 # Get the time from the filesystem so we can disambiguate files that
1269 1273 # appear modified in the present or future.
1270 1274 try:
1271 1275 mtime_boundary = timestamp.get_fs_now(self._opener)
1272 1276 except OSError:
1273 1277 # In largefiles or readonly context
1274 1278 mtime_boundary = None
1275 1279
1276 1280 if use_rust:
1277 1281 try:
1278 1282 res = self._rust_status(
1279 1283 match, listclean, listignored, listunknown
1280 1284 )
1281 1285 return res + (mtime_boundary,)
1282 1286 except rustmod.FallbackError:
1283 1287 pass
1284 1288
1285 1289 def noop(f):
1286 1290 pass
1287 1291
1288 1292 dcontains = dmap.__contains__
1289 1293 dget = dmap.__getitem__
1290 1294 ladd = lookup.append # aka "unsure"
1291 1295 madd = modified.append
1292 1296 aadd = added.append
1293 1297 uadd = unknown.append if listunknown else noop
1294 1298 iadd = ignored.append if listignored else noop
1295 1299 radd = removed.append
1296 1300 dadd = deleted.append
1297 1301 cadd = clean.append if listclean else noop
1298 1302 mexact = match.exact
1299 1303 dirignore = self._dirignore
1300 1304 checkexec = self._checkexec
1301 1305 checklink = self._checklink
1302 1306 copymap = self._map.copymap
1303 1307
1304 1308 # We need to do full walks when either
1305 1309 # - we're listing all clean files, or
1306 1310 # - match.traversedir does something, because match.traversedir should
1307 1311 # be called for every dir in the working dir
1308 1312 full = listclean or match.traversedir is not None
1309 1313 for fn, st in self.walk(
1310 1314 match, subrepos, listunknown, listignored, full=full
1311 1315 ).items():
1312 1316 if not dcontains(fn):
1313 1317 if (listignored or mexact(fn)) and dirignore(fn):
1314 1318 if listignored:
1315 1319 iadd(fn)
1316 1320 else:
1317 1321 uadd(fn)
1318 1322 continue
1319 1323
1320 1324 t = dget(fn)
1321 1325 mode = t.mode
1322 1326 size = t.size
1323 1327
1324 1328 if not st and t.tracked:
1325 1329 dadd(fn)
1326 1330 elif t.p2_info:
1327 1331 madd(fn)
1328 1332 elif t.added:
1329 1333 aadd(fn)
1330 1334 elif t.removed:
1331 1335 radd(fn)
1332 1336 elif t.tracked:
1333 1337 if not checklink and t.has_fallback_symlink:
1334 1338 # If the file system does not support symlink, the mode
1335 1339 # might not be correctly stored in the dirstate, so do not
1336 1340 # trust it.
1337 1341 ladd(fn)
1338 1342 elif not checkexec and t.has_fallback_exec:
1339 1343 # If the file system does not support exec bits, the mode
1340 1344 # might not be correctly stored in the dirstate, so do not
1341 1345 # trust it.
1342 1346 ladd(fn)
1343 1347 elif (
1344 1348 size >= 0
1345 1349 and (
1346 1350 (size != st.st_size and size != st.st_size & _rangemask)
1347 1351 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1348 1352 )
1349 1353 or fn in copymap
1350 1354 ):
1351 1355 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1352 1356 # issue6456: Size returned may be longer due to
1353 1357 # encryption on EXT-4 fscrypt, undecided.
1354 1358 ladd(fn)
1355 1359 else:
1356 1360 madd(fn)
1357 1361 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1358 1362 # There might be a change in the future if for example the
1359 1363 # internal clock is off, but this is a case where the issues
1360 1364 # the user would face would be a lot worse and there is
1361 1365 # nothing we can really do.
1362 1366 ladd(fn)
1363 1367 elif listclean:
1364 1368 cadd(fn)
1365 1369 status = scmutil.status(
1366 1370 modified, added, removed, deleted, unknown, ignored, clean
1367 1371 )
1368 1372 return (lookup, status, mtime_boundary)
1369 1373
1370 1374 def matches(self, match):
1371 1375 """
1372 1376 return files in the dirstate (in whatever state) filtered by match
1373 1377 """
1374 1378 dmap = self._map
1375 1379 if rustmod is not None:
1376 1380 dmap = self._map._map
1377 1381
1378 1382 if match.always():
1379 1383 return dmap.keys()
1380 1384 files = match.files()
1381 1385 if match.isexact():
1382 1386 # fast path -- filter the other way around, since typically files is
1383 1387 # much smaller than dmap
1384 1388 return [f for f in files if f in dmap]
1385 1389 if match.prefix() and all(fn in dmap for fn in files):
1386 1390 # fast path -- all the values are known to be files, so just return
1387 1391 # that
1388 1392 return list(files)
1389 1393 return [f for f in dmap if match(f)]
1390 1394
1391 1395 def _actualfilename(self, tr):
1392 1396 if tr:
1393 1397 return self._pendingfilename
1394 1398 else:
1395 1399 return self._filename
1396 1400
1397 1401 def savebackup(self, tr, backupname):
1398 1402 '''Save current dirstate into backup file'''
1399 1403 filename = self._actualfilename(tr)
1400 1404 assert backupname != filename
1401 1405
1402 1406 # use '_writedirstate' instead of 'write' to write changes certainly,
1403 1407 # because the latter omits writing out if transaction is running.
1404 1408 # output file will be used to create backup of dirstate at this point.
1405 1409 if self._dirty or not self._opener.exists(filename):
1406 1410 self._writedirstate(
1407 1411 tr,
1408 1412 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1409 1413 )
1410 1414
1411 1415 if tr:
1412 1416 # ensure that subsequent tr.writepending returns True for
1413 1417 # changes written out above, even if dirstate is never
1414 1418 # changed after this
1415 1419 tr.addfilegenerator(
1416 1420 b'dirstate-1-main',
1417 1421 (self._filename,),
1418 1422 lambda f: self._writedirstate(tr, f),
1419 1423 location=b'plain',
1420 1424 post_finalize=True,
1421 1425 )
1422 1426
1423 1427 # ensure that pending file written above is unlinked at
1424 1428 # failure, even if tr.writepending isn't invoked until the
1425 1429 # end of this transaction
1426 1430 tr.registertmp(filename, location=b'plain')
1427 1431
1428 1432 self._opener.tryunlink(backupname)
1429 1433 # hardlink backup is okay because _writedirstate is always called
1430 1434 # with an "atomictemp=True" file.
1431 1435 util.copyfile(
1432 1436 self._opener.join(filename),
1433 1437 self._opener.join(backupname),
1434 1438 hardlink=True,
1435 1439 )
1436 1440
1437 1441 def restorebackup(self, tr, backupname):
1438 1442 '''Restore dirstate by backup file'''
1439 1443 # this "invalidate()" prevents "wlock.release()" from writing
1440 1444 # changes of dirstate out after restoring from backup file
1441 1445 self.invalidate()
1442 1446 filename = self._actualfilename(tr)
1443 1447 o = self._opener
1444 1448 if util.samefile(o.join(backupname), o.join(filename)):
1445 1449 o.unlink(backupname)
1446 1450 else:
1447 1451 o.rename(backupname, filename, checkambig=True)
1448 1452
1449 1453 def clearbackup(self, tr, backupname):
1450 1454 '''Clear backup file'''
1451 1455 self._opener.unlink(backupname)
1452 1456
1453 1457 def verify(self, m1, m2):
1454 1458 """check the dirstate content again the parent manifest and yield errors"""
1455 1459 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1456 1460 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1457 1461 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1458 1462 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1459 1463 for f, entry in self.items():
1460 1464 state = entry.state
1461 1465 if state in b"nr" and f not in m1:
1462 1466 yield (missing_from_p1, f, state)
1463 1467 if state in b"a" and f in m1:
1464 1468 yield (unexpected_in_p1, f, state)
1465 1469 if state in b"m" and f not in m1 and f not in m2:
1466 1470 yield (missing_from_ps, f, state)
1467 1471 for f in m1:
1468 1472 state = self.get_entry(f).state
1469 1473 if state not in b"nrm":
1470 1474 yield (missing_from_ds, f, state)
@@ -1,3944 +1,3946 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import functools
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from concurrent import futures
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullrev,
23 23 sha1nodeconstants,
24 24 short,
25 25 )
26 26 from .pycompat import (
27 27 delattr,
28 28 getattr,
29 29 )
30 30 from . import (
31 31 bookmarks,
32 32 branchmap,
33 33 bundle2,
34 34 bundlecaches,
35 35 changegroup,
36 36 color,
37 37 commit,
38 38 context,
39 39 dirstate,
40 40 dirstateguard,
41 41 discovery,
42 42 encoding,
43 43 error,
44 44 exchange,
45 45 extensions,
46 46 filelog,
47 47 hook,
48 48 lock as lockmod,
49 49 match as matchmod,
50 50 mergestate as mergestatemod,
51 51 mergeutil,
52 52 namespaces,
53 53 narrowspec,
54 54 obsolete,
55 55 pathutil,
56 56 phases,
57 57 pushkey,
58 58 pycompat,
59 59 rcutil,
60 60 repoview,
61 61 requirements as requirementsmod,
62 62 revlog,
63 63 revset,
64 64 revsetlang,
65 65 scmutil,
66 66 sparse,
67 67 store as storemod,
68 68 subrepoutil,
69 69 tags as tagsmod,
70 70 transaction,
71 71 txnutil,
72 72 util,
73 73 vfs as vfsmod,
74 74 wireprototypes,
75 75 )
76 76
77 77 from .interfaces import (
78 78 repository,
79 79 util as interfaceutil,
80 80 )
81 81
82 82 from .utils import (
83 83 hashutil,
84 84 procutil,
85 85 stringutil,
86 86 urlutil,
87 87 )
88 88
89 89 from .revlogutils import (
90 90 concurrency_checker as revlogchecker,
91 91 constants as revlogconst,
92 92 sidedata as sidedatamod,
93 93 )
94 94
95 95 release = lockmod.release
96 96 urlerr = util.urlerr
97 97 urlreq = util.urlreq
98 98
99 99 # set of (path, vfs-location) tuples. vfs-location is:
100 100 # - 'plain for vfs relative paths
101 101 # - '' for svfs relative paths
102 102 _cachedfiles = set()
103 103
104 104
105 105 class _basefilecache(scmutil.filecache):
106 106 """All filecache usage on repo are done for logic that should be unfiltered"""
107 107
108 108 def __get__(self, repo, type=None):
109 109 if repo is None:
110 110 return self
111 111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 112 unfi = repo.unfiltered()
113 113 try:
114 114 return unfi.__dict__[self.sname]
115 115 except KeyError:
116 116 pass
117 117 return super(_basefilecache, self).__get__(unfi, type)
118 118
119 119 def set(self, repo, value):
120 120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121 121
122 122
123 123 class repofilecache(_basefilecache):
124 124 """filecache for files in .hg but outside of .hg/store"""
125 125
126 126 def __init__(self, *paths):
127 127 super(repofilecache, self).__init__(*paths)
128 128 for path in paths:
129 129 _cachedfiles.add((path, b'plain'))
130 130
131 131 def join(self, obj, fname):
132 132 return obj.vfs.join(fname)
133 133
134 134
135 135 class storecache(_basefilecache):
136 136 """filecache for files in the store"""
137 137
138 138 def __init__(self, *paths):
139 139 super(storecache, self).__init__(*paths)
140 140 for path in paths:
141 141 _cachedfiles.add((path, b''))
142 142
143 143 def join(self, obj, fname):
144 144 return obj.sjoin(fname)
145 145
146 146
147 147 class changelogcache(storecache):
148 148 """filecache for the changelog"""
149 149
150 150 def __init__(self):
151 151 super(changelogcache, self).__init__()
152 152 _cachedfiles.add((b'00changelog.i', b''))
153 153 _cachedfiles.add((b'00changelog.n', b''))
154 154
155 155 def tracked_paths(self, obj):
156 156 paths = [self.join(obj, b'00changelog.i')]
157 157 if obj.store.opener.options.get(b'persistent-nodemap', False):
158 158 paths.append(self.join(obj, b'00changelog.n'))
159 159 return paths
160 160
161 161
162 162 class manifestlogcache(storecache):
163 163 """filecache for the manifestlog"""
164 164
165 165 def __init__(self):
166 166 super(manifestlogcache, self).__init__()
167 167 _cachedfiles.add((b'00manifest.i', b''))
168 168 _cachedfiles.add((b'00manifest.n', b''))
169 169
170 170 def tracked_paths(self, obj):
171 171 paths = [self.join(obj, b'00manifest.i')]
172 172 if obj.store.opener.options.get(b'persistent-nodemap', False):
173 173 paths.append(self.join(obj, b'00manifest.n'))
174 174 return paths
175 175
176 176
177 177 class mixedrepostorecache(_basefilecache):
178 178 """filecache for a mix files in .hg/store and outside"""
179 179
180 180 def __init__(self, *pathsandlocations):
181 181 # scmutil.filecache only uses the path for passing back into our
182 182 # join(), so we can safely pass a list of paths and locations
183 183 super(mixedrepostorecache, self).__init__(*pathsandlocations)
184 184 _cachedfiles.update(pathsandlocations)
185 185
186 186 def join(self, obj, fnameandlocation):
187 187 fname, location = fnameandlocation
188 188 if location == b'plain':
189 189 return obj.vfs.join(fname)
190 190 else:
191 191 if location != b'':
192 192 raise error.ProgrammingError(
193 193 b'unexpected location: %s' % location
194 194 )
195 195 return obj.sjoin(fname)
196 196
197 197
198 198 def isfilecached(repo, name):
199 199 """check if a repo has already cached "name" filecache-ed property
200 200
201 201 This returns (cachedobj-or-None, iscached) tuple.
202 202 """
203 203 cacheentry = repo.unfiltered()._filecache.get(name, None)
204 204 if not cacheentry:
205 205 return None, False
206 206 return cacheentry.obj, True
207 207
208 208
209 209 class unfilteredpropertycache(util.propertycache):
210 210 """propertycache that apply to unfiltered repo only"""
211 211
212 212 def __get__(self, repo, type=None):
213 213 unfi = repo.unfiltered()
214 214 if unfi is repo:
215 215 return super(unfilteredpropertycache, self).__get__(unfi)
216 216 return getattr(unfi, self.name)
217 217
218 218
219 219 class filteredpropertycache(util.propertycache):
220 220 """propertycache that must take filtering in account"""
221 221
222 222 def cachevalue(self, obj, value):
223 223 object.__setattr__(obj, self.name, value)
224 224
225 225
226 226 def hasunfilteredcache(repo, name):
227 227 """check if a repo has an unfilteredpropertycache value for <name>"""
228 228 return name in vars(repo.unfiltered())
229 229
230 230
231 231 def unfilteredmethod(orig):
232 232 """decorate method that always need to be run on unfiltered version"""
233 233
234 234 @functools.wraps(orig)
235 235 def wrapper(repo, *args, **kwargs):
236 236 return orig(repo.unfiltered(), *args, **kwargs)
237 237
238 238 return wrapper
239 239
240 240
241 241 moderncaps = {
242 242 b'lookup',
243 243 b'branchmap',
244 244 b'pushkey',
245 245 b'known',
246 246 b'getbundle',
247 247 b'unbundle',
248 248 }
249 249 legacycaps = moderncaps.union({b'changegroupsubset'})
250 250
251 251
252 252 @interfaceutil.implementer(repository.ipeercommandexecutor)
253 253 class localcommandexecutor:
254 254 def __init__(self, peer):
255 255 self._peer = peer
256 256 self._sent = False
257 257 self._closed = False
258 258
259 259 def __enter__(self):
260 260 return self
261 261
262 262 def __exit__(self, exctype, excvalue, exctb):
263 263 self.close()
264 264
265 265 def callcommand(self, command, args):
266 266 if self._sent:
267 267 raise error.ProgrammingError(
268 268 b'callcommand() cannot be used after sendcommands()'
269 269 )
270 270
271 271 if self._closed:
272 272 raise error.ProgrammingError(
273 273 b'callcommand() cannot be used after close()'
274 274 )
275 275
276 276 # We don't need to support anything fancy. Just call the named
277 277 # method on the peer and return a resolved future.
278 278 fn = getattr(self._peer, pycompat.sysstr(command))
279 279
280 280 f = futures.Future()
281 281
282 282 try:
283 283 result = fn(**pycompat.strkwargs(args))
284 284 except Exception:
285 285 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
286 286 else:
287 287 f.set_result(result)
288 288
289 289 return f
290 290
291 291 def sendcommands(self):
292 292 self._sent = True
293 293
294 294 def close(self):
295 295 self._closed = True
296 296
297 297
298 298 @interfaceutil.implementer(repository.ipeercommands)
299 299 class localpeer(repository.peer):
300 300 '''peer for a local repo; reflects only the most recent API'''
301 301
302 302 def __init__(self, repo, caps=None):
303 303 super(localpeer, self).__init__()
304 304
305 305 if caps is None:
306 306 caps = moderncaps.copy()
307 307 self._repo = repo.filtered(b'served')
308 308 self.ui = repo.ui
309 309
310 310 if repo._wanted_sidedata:
311 311 formatted = bundle2.format_remote_wanted_sidedata(repo)
312 312 caps.add(b'exp-wanted-sidedata=' + formatted)
313 313
314 314 self._caps = repo._restrictcapabilities(caps)
315 315
316 316 # Begin of _basepeer interface.
317 317
318 318 def url(self):
319 319 return self._repo.url()
320 320
321 321 def local(self):
322 322 return self._repo
323 323
324 324 def peer(self):
325 325 return self
326 326
327 327 def canpush(self):
328 328 return True
329 329
330 330 def close(self):
331 331 self._repo.close()
332 332
333 333 # End of _basepeer interface.
334 334
335 335 # Begin of _basewirecommands interface.
336 336
337 337 def branchmap(self):
338 338 return self._repo.branchmap()
339 339
340 340 def capabilities(self):
341 341 return self._caps
342 342
343 343 def clonebundles(self):
344 344 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
345 345
346 346 def debugwireargs(self, one, two, three=None, four=None, five=None):
347 347 """Used to test argument passing over the wire"""
348 348 return b"%s %s %s %s %s" % (
349 349 one,
350 350 two,
351 351 pycompat.bytestr(three),
352 352 pycompat.bytestr(four),
353 353 pycompat.bytestr(five),
354 354 )
355 355
356 356 def getbundle(
357 357 self,
358 358 source,
359 359 heads=None,
360 360 common=None,
361 361 bundlecaps=None,
362 362 remote_sidedata=None,
363 363 **kwargs
364 364 ):
365 365 chunks = exchange.getbundlechunks(
366 366 self._repo,
367 367 source,
368 368 heads=heads,
369 369 common=common,
370 370 bundlecaps=bundlecaps,
371 371 remote_sidedata=remote_sidedata,
372 372 **kwargs
373 373 )[1]
374 374 cb = util.chunkbuffer(chunks)
375 375
376 376 if exchange.bundle2requested(bundlecaps):
377 377 # When requesting a bundle2, getbundle returns a stream to make the
378 378 # wire level function happier. We need to build a proper object
379 379 # from it in local peer.
380 380 return bundle2.getunbundler(self.ui, cb)
381 381 else:
382 382 return changegroup.getunbundler(b'01', cb, None)
383 383
384 384 def heads(self):
385 385 return self._repo.heads()
386 386
387 387 def known(self, nodes):
388 388 return self._repo.known(nodes)
389 389
390 390 def listkeys(self, namespace):
391 391 return self._repo.listkeys(namespace)
392 392
393 393 def lookup(self, key):
394 394 return self._repo.lookup(key)
395 395
396 396 def pushkey(self, namespace, key, old, new):
397 397 return self._repo.pushkey(namespace, key, old, new)
398 398
399 399 def stream_out(self):
400 400 raise error.Abort(_(b'cannot perform stream clone against local peer'))
401 401
402 402 def unbundle(self, bundle, heads, url):
403 403 """apply a bundle on a repo
404 404
405 405 This function handles the repo locking itself."""
406 406 try:
407 407 try:
408 408 bundle = exchange.readbundle(self.ui, bundle, None)
409 409 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
410 410 if util.safehasattr(ret, b'getchunks'):
411 411 # This is a bundle20 object, turn it into an unbundler.
412 412 # This little dance should be dropped eventually when the
413 413 # API is finally improved.
414 414 stream = util.chunkbuffer(ret.getchunks())
415 415 ret = bundle2.getunbundler(self.ui, stream)
416 416 return ret
417 417 except Exception as exc:
418 418 # If the exception contains output salvaged from a bundle2
419 419 # reply, we need to make sure it is printed before continuing
420 420 # to fail. So we build a bundle2 with such output and consume
421 421 # it directly.
422 422 #
423 423 # This is not very elegant but allows a "simple" solution for
424 424 # issue4594
425 425 output = getattr(exc, '_bundle2salvagedoutput', ())
426 426 if output:
427 427 bundler = bundle2.bundle20(self._repo.ui)
428 428 for out in output:
429 429 bundler.addpart(out)
430 430 stream = util.chunkbuffer(bundler.getchunks())
431 431 b = bundle2.getunbundler(self.ui, stream)
432 432 bundle2.processbundle(self._repo, b)
433 433 raise
434 434 except error.PushRaced as exc:
435 435 raise error.ResponseError(
436 436 _(b'push failed:'), stringutil.forcebytestr(exc)
437 437 )
438 438
439 439 # End of _basewirecommands interface.
440 440
441 441 # Begin of peer interface.
442 442
443 443 def commandexecutor(self):
444 444 return localcommandexecutor(self)
445 445
446 446 # End of peer interface.
447 447
448 448
449 449 @interfaceutil.implementer(repository.ipeerlegacycommands)
450 450 class locallegacypeer(localpeer):
451 451 """peer extension which implements legacy methods too; used for tests with
452 452 restricted capabilities"""
453 453
454 454 def __init__(self, repo):
455 455 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
456 456
457 457 # Begin of baselegacywirecommands interface.
458 458
459 459 def between(self, pairs):
460 460 return self._repo.between(pairs)
461 461
462 462 def branches(self, nodes):
463 463 return self._repo.branches(nodes)
464 464
465 465 def changegroup(self, nodes, source):
466 466 outgoing = discovery.outgoing(
467 467 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
468 468 )
469 469 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
470 470
471 471 def changegroupsubset(self, bases, heads, source):
472 472 outgoing = discovery.outgoing(
473 473 self._repo, missingroots=bases, ancestorsof=heads
474 474 )
475 475 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
476 476
477 477 # End of baselegacywirecommands interface.
478 478
479 479
480 480 # Functions receiving (ui, features) that extensions can register to impact
481 481 # the ability to load repositories with custom requirements. Only
482 482 # functions defined in loaded extensions are called.
483 483 #
484 484 # The function receives a set of requirement strings that the repository
485 485 # is capable of opening. Functions will typically add elements to the
486 486 # set to reflect that the extension knows how to handle that requirements.
487 487 featuresetupfuncs = set()
488 488
489 489
490 490 def _getsharedvfs(hgvfs, requirements):
491 491 """returns the vfs object pointing to root of shared source
492 492 repo for a shared repository
493 493
494 494 hgvfs is vfs pointing at .hg/ of current repo (shared one)
495 495 requirements is a set of requirements of current repo (shared one)
496 496 """
497 497 # The ``shared`` or ``relshared`` requirements indicate the
498 498 # store lives in the path contained in the ``.hg/sharedpath`` file.
499 499 # This is an absolute path for ``shared`` and relative to
500 500 # ``.hg/`` for ``relshared``.
501 501 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
502 502 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
503 503 sharedpath = util.normpath(hgvfs.join(sharedpath))
504 504
505 505 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
506 506
507 507 if not sharedvfs.exists():
508 508 raise error.RepoError(
509 509 _(b'.hg/sharedpath points to nonexistent directory %s')
510 510 % sharedvfs.base
511 511 )
512 512 return sharedvfs
513 513
514 514
515 515 def _readrequires(vfs, allowmissing):
516 516 """reads the require file present at root of this vfs
517 517 and return a set of requirements
518 518
519 519 If allowmissing is True, we suppress FileNotFoundError if raised"""
520 520 # requires file contains a newline-delimited list of
521 521 # features/capabilities the opener (us) must have in order to use
522 522 # the repository. This file was introduced in Mercurial 0.9.2,
523 523 # which means very old repositories may not have one. We assume
524 524 # a missing file translates to no requirements.
525 525 try:
526 526 return set(vfs.read(b'requires').splitlines())
527 527 except FileNotFoundError:
528 528 if not allowmissing:
529 529 raise
530 530 return set()
531 531
532 532
533 533 def makelocalrepository(baseui, path, intents=None):
534 534 """Create a local repository object.
535 535
536 536 Given arguments needed to construct a local repository, this function
537 537 performs various early repository loading functionality (such as
538 538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
539 539 the repository can be opened, derives a type suitable for representing
540 540 that repository, and returns an instance of it.
541 541
542 542 The returned object conforms to the ``repository.completelocalrepository``
543 543 interface.
544 544
545 545 The repository type is derived by calling a series of factory functions
546 546 for each aspect/interface of the final repository. These are defined by
547 547 ``REPO_INTERFACES``.
548 548
549 549 Each factory function is called to produce a type implementing a specific
550 550 interface. The cumulative list of returned types will be combined into a
551 551 new type and that type will be instantiated to represent the local
552 552 repository.
553 553
554 554 The factory functions each receive various state that may be consulted
555 555 as part of deriving a type.
556 556
557 557 Extensions should wrap these factory functions to customize repository type
558 558 creation. Note that an extension's wrapped function may be called even if
559 559 that extension is not loaded for the repo being constructed. Extensions
560 560 should check if their ``__name__`` appears in the
561 561 ``extensionmodulenames`` set passed to the factory function and no-op if
562 562 not.
563 563 """
564 564 ui = baseui.copy()
565 565 # Prevent copying repo configuration.
566 566 ui.copy = baseui.copy
567 567
568 568 # Working directory VFS rooted at repository root.
569 569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
570 570
571 571 # Main VFS for .hg/ directory.
572 572 hgpath = wdirvfs.join(b'.hg')
573 573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
574 574 # Whether this repository is shared one or not
575 575 shared = False
576 576 # If this repository is shared, vfs pointing to shared repo
577 577 sharedvfs = None
578 578
579 579 # The .hg/ path should exist and should be a directory. All other
580 580 # cases are errors.
581 581 if not hgvfs.isdir():
582 582 try:
583 583 hgvfs.stat()
584 584 except FileNotFoundError:
585 585 pass
586 586 except ValueError as e:
587 587 # Can be raised on Python 3.8 when path is invalid.
588 588 raise error.Abort(
589 589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
590 590 )
591 591
592 592 raise error.RepoError(_(b'repository %s not found') % path)
593 593
594 594 requirements = _readrequires(hgvfs, True)
595 595 shared = (
596 596 requirementsmod.SHARED_REQUIREMENT in requirements
597 597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
598 598 )
599 599 storevfs = None
600 600 if shared:
601 601 # This is a shared repo
602 602 sharedvfs = _getsharedvfs(hgvfs, requirements)
603 603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
604 604 else:
605 605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
606 606
607 607 # if .hg/requires contains the sharesafe requirement, it means
608 608 # there exists a `.hg/store/requires` too and we should read it
609 609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
610 610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
611 611 # is not present, refer checkrequirementscompat() for that
612 612 #
613 613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
614 614 # repository was shared the old way. We check the share source .hg/requires
615 615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
616 616 # to be reshared
617 617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
618 618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
619 619
620 620 if (
621 621 shared
622 622 and requirementsmod.SHARESAFE_REQUIREMENT
623 623 not in _readrequires(sharedvfs, True)
624 624 ):
625 625 mismatch_warn = ui.configbool(
626 626 b'share', b'safe-mismatch.source-not-safe.warn'
627 627 )
628 628 mismatch_config = ui.config(
629 629 b'share', b'safe-mismatch.source-not-safe'
630 630 )
631 631 mismatch_verbose_upgrade = ui.configbool(
632 632 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
633 633 )
634 634 if mismatch_config in (
635 635 b'downgrade-allow',
636 636 b'allow',
637 637 b'downgrade-abort',
638 638 ):
639 639 # prevent cyclic import localrepo -> upgrade -> localrepo
640 640 from . import upgrade
641 641
642 642 upgrade.downgrade_share_to_non_safe(
643 643 ui,
644 644 hgvfs,
645 645 sharedvfs,
646 646 requirements,
647 647 mismatch_config,
648 648 mismatch_warn,
649 649 mismatch_verbose_upgrade,
650 650 )
651 651 elif mismatch_config == b'abort':
652 652 raise error.Abort(
653 653 _(b"share source does not support share-safe requirement"),
654 654 hint=hint,
655 655 )
656 656 else:
657 657 raise error.Abort(
658 658 _(
659 659 b"share-safe mismatch with source.\nUnrecognized"
660 660 b" value '%s' of `share.safe-mismatch.source-not-safe`"
661 661 b" set."
662 662 )
663 663 % mismatch_config,
664 664 hint=hint,
665 665 )
666 666 else:
667 667 requirements |= _readrequires(storevfs, False)
668 668 elif shared:
669 669 sourcerequires = _readrequires(sharedvfs, False)
670 670 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
671 671 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
672 672 mismatch_warn = ui.configbool(
673 673 b'share', b'safe-mismatch.source-safe.warn'
674 674 )
675 675 mismatch_verbose_upgrade = ui.configbool(
676 676 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
677 677 )
678 678 if mismatch_config in (
679 679 b'upgrade-allow',
680 680 b'allow',
681 681 b'upgrade-abort',
682 682 ):
683 683 # prevent cyclic import localrepo -> upgrade -> localrepo
684 684 from . import upgrade
685 685
686 686 upgrade.upgrade_share_to_safe(
687 687 ui,
688 688 hgvfs,
689 689 storevfs,
690 690 requirements,
691 691 mismatch_config,
692 692 mismatch_warn,
693 693 mismatch_verbose_upgrade,
694 694 )
695 695 elif mismatch_config == b'abort':
696 696 raise error.Abort(
697 697 _(
698 698 b'version mismatch: source uses share-safe'
699 699 b' functionality while the current share does not'
700 700 ),
701 701 hint=hint,
702 702 )
703 703 else:
704 704 raise error.Abort(
705 705 _(
706 706 b"share-safe mismatch with source.\nUnrecognized"
707 707 b" value '%s' of `share.safe-mismatch.source-safe` set."
708 708 )
709 709 % mismatch_config,
710 710 hint=hint,
711 711 )
712 712
713 713 # The .hg/hgrc file may load extensions or contain config options
714 714 # that influence repository construction. Attempt to load it and
715 715 # process any new extensions that it may have pulled in.
716 716 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
717 717 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
718 718 extensions.loadall(ui)
719 719 extensions.populateui(ui)
720 720
721 721 # Set of module names of extensions loaded for this repository.
722 722 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
723 723
724 724 supportedrequirements = gathersupportedrequirements(ui)
725 725
726 726 # We first validate the requirements are known.
727 727 ensurerequirementsrecognized(requirements, supportedrequirements)
728 728
729 729 # Then we validate that the known set is reasonable to use together.
730 730 ensurerequirementscompatible(ui, requirements)
731 731
732 732 # TODO there are unhandled edge cases related to opening repositories with
733 733 # shared storage. If storage is shared, we should also test for requirements
734 734 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
735 735 # that repo, as that repo may load extensions needed to open it. This is a
736 736 # bit complicated because we don't want the other hgrc to overwrite settings
737 737 # in this hgrc.
738 738 #
739 739 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
740 740 # file when sharing repos. But if a requirement is added after the share is
741 741 # performed, thereby introducing a new requirement for the opener, we may
742 742 # will not see that and could encounter a run-time error interacting with
743 743 # that shared store since it has an unknown-to-us requirement.
744 744
745 745 # At this point, we know we should be capable of opening the repository.
746 746 # Now get on with doing that.
747 747
748 748 features = set()
749 749
750 750 # The "store" part of the repository holds versioned data. How it is
751 751 # accessed is determined by various requirements. If `shared` or
752 752 # `relshared` requirements are present, this indicates current repository
753 753 # is a share and store exists in path mentioned in `.hg/sharedpath`
754 754 if shared:
755 755 storebasepath = sharedvfs.base
756 756 cachepath = sharedvfs.join(b'cache')
757 757 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
758 758 else:
759 759 storebasepath = hgvfs.base
760 760 cachepath = hgvfs.join(b'cache')
761 761 wcachepath = hgvfs.join(b'wcache')
762 762
763 763 # The store has changed over time and the exact layout is dictated by
764 764 # requirements. The store interface abstracts differences across all
765 765 # of them.
766 766 store = makestore(
767 767 requirements,
768 768 storebasepath,
769 769 lambda base: vfsmod.vfs(base, cacheaudited=True),
770 770 )
771 771 hgvfs.createmode = store.createmode
772 772
773 773 storevfs = store.vfs
774 774 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
775 775
776 776 if (
777 777 requirementsmod.REVLOGV2_REQUIREMENT in requirements
778 778 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
779 779 ):
780 780 features.add(repository.REPO_FEATURE_SIDE_DATA)
781 781 # the revlogv2 docket introduced race condition that we need to fix
782 782 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
783 783
784 784 # The cache vfs is used to manage cache files.
785 785 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
786 786 cachevfs.createmode = store.createmode
787 787 # The cache vfs is used to manage cache files related to the working copy
788 788 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
789 789 wcachevfs.createmode = store.createmode
790 790
791 791 # Now resolve the type for the repository object. We do this by repeatedly
792 792 # calling a factory function to produces types for specific aspects of the
793 793 # repo's operation. The aggregate returned types are used as base classes
794 794 # for a dynamically-derived type, which will represent our new repository.
795 795
796 796 bases = []
797 797 extrastate = {}
798 798
799 799 for iface, fn in REPO_INTERFACES:
800 800 # We pass all potentially useful state to give extensions tons of
801 801 # flexibility.
802 802 typ = fn()(
803 803 ui=ui,
804 804 intents=intents,
805 805 requirements=requirements,
806 806 features=features,
807 807 wdirvfs=wdirvfs,
808 808 hgvfs=hgvfs,
809 809 store=store,
810 810 storevfs=storevfs,
811 811 storeoptions=storevfs.options,
812 812 cachevfs=cachevfs,
813 813 wcachevfs=wcachevfs,
814 814 extensionmodulenames=extensionmodulenames,
815 815 extrastate=extrastate,
816 816 baseclasses=bases,
817 817 )
818 818
819 819 if not isinstance(typ, type):
820 820 raise error.ProgrammingError(
821 821 b'unable to construct type for %s' % iface
822 822 )
823 823
824 824 bases.append(typ)
825 825
826 826 # type() allows you to use characters in type names that wouldn't be
827 827 # recognized as Python symbols in source code. We abuse that to add
828 828 # rich information about our constructed repo.
829 829 name = pycompat.sysstr(
830 830 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
831 831 )
832 832
833 833 cls = type(name, tuple(bases), {})
834 834
835 835 return cls(
836 836 baseui=baseui,
837 837 ui=ui,
838 838 origroot=path,
839 839 wdirvfs=wdirvfs,
840 840 hgvfs=hgvfs,
841 841 requirements=requirements,
842 842 supportedrequirements=supportedrequirements,
843 843 sharedpath=storebasepath,
844 844 store=store,
845 845 cachevfs=cachevfs,
846 846 wcachevfs=wcachevfs,
847 847 features=features,
848 848 intents=intents,
849 849 )
850 850
851 851
852 852 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
853 853 """Load hgrc files/content into a ui instance.
854 854
855 855 This is called during repository opening to load any additional
856 856 config files or settings relevant to the current repository.
857 857
858 858 Returns a bool indicating whether any additional configs were loaded.
859 859
860 860 Extensions should monkeypatch this function to modify how per-repo
861 861 configs are loaded. For example, an extension may wish to pull in
862 862 configs from alternate files or sources.
863 863
864 864 sharedvfs is vfs object pointing to source repo if the current one is a
865 865 shared one
866 866 """
867 867 if not rcutil.use_repo_hgrc():
868 868 return False
869 869
870 870 ret = False
871 871 # first load config from shared source if we has to
872 872 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
873 873 try:
874 874 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
875 875 ret = True
876 876 except IOError:
877 877 pass
878 878
879 879 try:
880 880 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
881 881 ret = True
882 882 except IOError:
883 883 pass
884 884
885 885 try:
886 886 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
887 887 ret = True
888 888 except IOError:
889 889 pass
890 890
891 891 return ret
892 892
893 893
894 894 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
895 895 """Perform additional actions after .hg/hgrc is loaded.
896 896
897 897 This function is called during repository loading immediately after
898 898 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
899 899
900 900 The function can be used to validate configs, automatically add
901 901 options (including extensions) based on requirements, etc.
902 902 """
903 903
904 904 # Map of requirements to list of extensions to load automatically when
905 905 # requirement is present.
906 906 autoextensions = {
907 907 b'git': [b'git'],
908 908 b'largefiles': [b'largefiles'],
909 909 b'lfs': [b'lfs'],
910 910 }
911 911
912 912 for requirement, names in sorted(autoextensions.items()):
913 913 if requirement not in requirements:
914 914 continue
915 915
916 916 for name in names:
917 917 if not ui.hasconfig(b'extensions', name):
918 918 ui.setconfig(b'extensions', name, b'', source=b'autoload')
919 919
920 920
921 921 def gathersupportedrequirements(ui):
922 922 """Determine the complete set of recognized requirements."""
923 923 # Start with all requirements supported by this file.
924 924 supported = set(localrepository._basesupported)
925 925
926 926 # Execute ``featuresetupfuncs`` entries if they belong to an extension
927 927 # relevant to this ui instance.
928 928 modules = {m.__name__ for n, m in extensions.extensions(ui)}
929 929
930 930 for fn in featuresetupfuncs:
931 931 if fn.__module__ in modules:
932 932 fn(ui, supported)
933 933
934 934 # Add derived requirements from registered compression engines.
935 935 for name in util.compengines:
936 936 engine = util.compengines[name]
937 937 if engine.available() and engine.revlogheader():
938 938 supported.add(b'exp-compression-%s' % name)
939 939 if engine.name() == b'zstd':
940 940 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
941 941
942 942 return supported
943 943
944 944
945 945 def ensurerequirementsrecognized(requirements, supported):
946 946 """Validate that a set of local requirements is recognized.
947 947
948 948 Receives a set of requirements. Raises an ``error.RepoError`` if there
949 949 exists any requirement in that set that currently loaded code doesn't
950 950 recognize.
951 951
952 952 Returns a set of supported requirements.
953 953 """
954 954 missing = set()
955 955
956 956 for requirement in requirements:
957 957 if requirement in supported:
958 958 continue
959 959
960 960 if not requirement or not requirement[0:1].isalnum():
961 961 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
962 962
963 963 missing.add(requirement)
964 964
965 965 if missing:
966 966 raise error.RequirementError(
967 967 _(b'repository requires features unknown to this Mercurial: %s')
968 968 % b' '.join(sorted(missing)),
969 969 hint=_(
970 970 b'see https://mercurial-scm.org/wiki/MissingRequirement '
971 971 b'for more information'
972 972 ),
973 973 )
974 974
975 975
976 976 def ensurerequirementscompatible(ui, requirements):
977 977 """Validates that a set of recognized requirements is mutually compatible.
978 978
979 979 Some requirements may not be compatible with others or require
980 980 config options that aren't enabled. This function is called during
981 981 repository opening to ensure that the set of requirements needed
982 982 to open a repository is sane and compatible with config options.
983 983
984 984 Extensions can monkeypatch this function to perform additional
985 985 checking.
986 986
987 987 ``error.RepoError`` should be raised on failure.
988 988 """
989 989 if (
990 990 requirementsmod.SPARSE_REQUIREMENT in requirements
991 991 and not sparse.enabled
992 992 ):
993 993 raise error.RepoError(
994 994 _(
995 995 b'repository is using sparse feature but '
996 996 b'sparse is not enabled; enable the '
997 997 b'"sparse" extensions to access'
998 998 )
999 999 )
1000 1000
1001 1001
1002 1002 def makestore(requirements, path, vfstype):
1003 1003 """Construct a storage object for a repository."""
1004 1004 if requirementsmod.STORE_REQUIREMENT in requirements:
1005 1005 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1006 1006 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1007 1007 return storemod.fncachestore(path, vfstype, dotencode)
1008 1008
1009 1009 return storemod.encodedstore(path, vfstype)
1010 1010
1011 1011 return storemod.basicstore(path, vfstype)
1012 1012
1013 1013
1014 1014 def resolvestorevfsoptions(ui, requirements, features):
1015 1015 """Resolve the options to pass to the store vfs opener.
1016 1016
1017 1017 The returned dict is used to influence behavior of the storage layer.
1018 1018 """
1019 1019 options = {}
1020 1020
1021 1021 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1022 1022 options[b'treemanifest'] = True
1023 1023
1024 1024 # experimental config: format.manifestcachesize
1025 1025 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1026 1026 if manifestcachesize is not None:
1027 1027 options[b'manifestcachesize'] = manifestcachesize
1028 1028
1029 1029 # In the absence of another requirement superseding a revlog-related
1030 1030 # requirement, we have to assume the repo is using revlog version 0.
1031 1031 # This revlog format is super old and we don't bother trying to parse
1032 1032 # opener options for it because those options wouldn't do anything
1033 1033 # meaningful on such old repos.
1034 1034 if (
1035 1035 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1036 1036 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1037 1037 ):
1038 1038 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1039 1039 else: # explicitly mark repo as using revlogv0
1040 1040 options[b'revlogv0'] = True
1041 1041
1042 1042 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1043 1043 options[b'copies-storage'] = b'changeset-sidedata'
1044 1044 else:
1045 1045 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1046 1046 copiesextramode = (b'changeset-only', b'compatibility')
1047 1047 if writecopiesto in copiesextramode:
1048 1048 options[b'copies-storage'] = b'extra'
1049 1049
1050 1050 return options
1051 1051
1052 1052
1053 1053 def resolverevlogstorevfsoptions(ui, requirements, features):
1054 1054 """Resolve opener options specific to revlogs."""
1055 1055
1056 1056 options = {}
1057 1057 options[b'flagprocessors'] = {}
1058 1058
1059 1059 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1060 1060 options[b'revlogv1'] = True
1061 1061 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1062 1062 options[b'revlogv2'] = True
1063 1063 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1064 1064 options[b'changelogv2'] = True
1065 1065
1066 1066 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1067 1067 options[b'generaldelta'] = True
1068 1068
1069 1069 # experimental config: format.chunkcachesize
1070 1070 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1071 1071 if chunkcachesize is not None:
1072 1072 options[b'chunkcachesize'] = chunkcachesize
1073 1073
1074 1074 deltabothparents = ui.configbool(
1075 1075 b'storage', b'revlog.optimize-delta-parent-choice'
1076 1076 )
1077 1077 options[b'deltabothparents'] = deltabothparents
1078 1078 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1079 1079
1080 1080 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1081 1081 options[b'issue6528.fix-incoming'] = issue6528
1082 1082
1083 1083 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1084 1084 lazydeltabase = False
1085 1085 if lazydelta:
1086 1086 lazydeltabase = ui.configbool(
1087 1087 b'storage', b'revlog.reuse-external-delta-parent'
1088 1088 )
1089 1089 if lazydeltabase is None:
1090 1090 lazydeltabase = not scmutil.gddeltaconfig(ui)
1091 1091 options[b'lazydelta'] = lazydelta
1092 1092 options[b'lazydeltabase'] = lazydeltabase
1093 1093
1094 1094 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1095 1095 if 0 <= chainspan:
1096 1096 options[b'maxdeltachainspan'] = chainspan
1097 1097
1098 1098 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1099 1099 if mmapindexthreshold is not None:
1100 1100 options[b'mmapindexthreshold'] = mmapindexthreshold
1101 1101
1102 1102 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1103 1103 srdensitythres = float(
1104 1104 ui.config(b'experimental', b'sparse-read.density-threshold')
1105 1105 )
1106 1106 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1107 1107 options[b'with-sparse-read'] = withsparseread
1108 1108 options[b'sparse-read-density-threshold'] = srdensitythres
1109 1109 options[b'sparse-read-min-gap-size'] = srmingapsize
1110 1110
1111 1111 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1112 1112 options[b'sparse-revlog'] = sparserevlog
1113 1113 if sparserevlog:
1114 1114 options[b'generaldelta'] = True
1115 1115
1116 1116 maxchainlen = None
1117 1117 if sparserevlog:
1118 1118 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1119 1119 # experimental config: format.maxchainlen
1120 1120 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1121 1121 if maxchainlen is not None:
1122 1122 options[b'maxchainlen'] = maxchainlen
1123 1123
1124 1124 for r in requirements:
1125 1125 # we allow multiple compression engine requirement to co-exist because
1126 1126 # strickly speaking, revlog seems to support mixed compression style.
1127 1127 #
1128 1128 # The compression used for new entries will be "the last one"
1129 1129 prefix = r.startswith
1130 1130 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1131 1131 options[b'compengine'] = r.split(b'-', 2)[2]
1132 1132
1133 1133 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1134 1134 if options[b'zlib.level'] is not None:
1135 1135 if not (0 <= options[b'zlib.level'] <= 9):
1136 1136 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1137 1137 raise error.Abort(msg % options[b'zlib.level'])
1138 1138 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1139 1139 if options[b'zstd.level'] is not None:
1140 1140 if not (0 <= options[b'zstd.level'] <= 22):
1141 1141 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1142 1142 raise error.Abort(msg % options[b'zstd.level'])
1143 1143
1144 1144 if requirementsmod.NARROW_REQUIREMENT in requirements:
1145 1145 options[b'enableellipsis'] = True
1146 1146
1147 1147 if ui.configbool(b'experimental', b'rust.index'):
1148 1148 options[b'rust.index'] = True
1149 1149 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1150 1150 slow_path = ui.config(
1151 1151 b'storage', b'revlog.persistent-nodemap.slow-path'
1152 1152 )
1153 1153 if slow_path not in (b'allow', b'warn', b'abort'):
1154 1154 default = ui.config_default(
1155 1155 b'storage', b'revlog.persistent-nodemap.slow-path'
1156 1156 )
1157 1157 msg = _(
1158 1158 b'unknown value for config '
1159 1159 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1160 1160 )
1161 1161 ui.warn(msg % slow_path)
1162 1162 if not ui.quiet:
1163 1163 ui.warn(_(b'falling back to default value: %s\n') % default)
1164 1164 slow_path = default
1165 1165
1166 1166 msg = _(
1167 1167 b"accessing `persistent-nodemap` repository without associated "
1168 1168 b"fast implementation."
1169 1169 )
1170 1170 hint = _(
1171 1171 b"check `hg help config.format.use-persistent-nodemap` "
1172 1172 b"for details"
1173 1173 )
1174 1174 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1175 1175 if slow_path == b'warn':
1176 1176 msg = b"warning: " + msg + b'\n'
1177 1177 ui.warn(msg)
1178 1178 if not ui.quiet:
1179 1179 hint = b'(' + hint + b')\n'
1180 1180 ui.warn(hint)
1181 1181 if slow_path == b'abort':
1182 1182 raise error.Abort(msg, hint=hint)
1183 1183 options[b'persistent-nodemap'] = True
1184 1184 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1185 1185 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1186 1186 if slow_path not in (b'allow', b'warn', b'abort'):
1187 1187 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1188 1188 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1189 1189 ui.warn(msg % slow_path)
1190 1190 if not ui.quiet:
1191 1191 ui.warn(_(b'falling back to default value: %s\n') % default)
1192 1192 slow_path = default
1193 1193
1194 1194 msg = _(
1195 1195 b"accessing `dirstate-v2` repository without associated "
1196 1196 b"fast implementation."
1197 1197 )
1198 1198 hint = _(
1199 1199 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1200 1200 )
1201 1201 if not dirstate.HAS_FAST_DIRSTATE_V2:
1202 1202 if slow_path == b'warn':
1203 1203 msg = b"warning: " + msg + b'\n'
1204 1204 ui.warn(msg)
1205 1205 if not ui.quiet:
1206 1206 hint = b'(' + hint + b')\n'
1207 1207 ui.warn(hint)
1208 1208 if slow_path == b'abort':
1209 1209 raise error.Abort(msg, hint=hint)
1210 1210 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1211 1211 options[b'persistent-nodemap.mmap'] = True
1212 1212 if ui.configbool(b'devel', b'persistent-nodemap'):
1213 1213 options[b'devel-force-nodemap'] = True
1214 1214
1215 1215 return options
1216 1216
1217 1217
1218 1218 def makemain(**kwargs):
1219 1219 """Produce a type conforming to ``ilocalrepositorymain``."""
1220 1220 return localrepository
1221 1221
1222 1222
1223 1223 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1224 1224 class revlogfilestorage:
1225 1225 """File storage when using revlogs."""
1226 1226
1227 1227 def file(self, path):
1228 1228 if path.startswith(b'/'):
1229 1229 path = path[1:]
1230 1230
1231 1231 return filelog.filelog(self.svfs, path)
1232 1232
1233 1233
1234 1234 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1235 1235 class revlognarrowfilestorage:
1236 1236 """File storage when using revlogs and narrow files."""
1237 1237
1238 1238 def file(self, path):
1239 1239 if path.startswith(b'/'):
1240 1240 path = path[1:]
1241 1241
1242 1242 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1243 1243
1244 1244
1245 1245 def makefilestorage(requirements, features, **kwargs):
1246 1246 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1247 1247 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1248 1248 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1249 1249
1250 1250 if requirementsmod.NARROW_REQUIREMENT in requirements:
1251 1251 return revlognarrowfilestorage
1252 1252 else:
1253 1253 return revlogfilestorage
1254 1254
1255 1255
1256 1256 # List of repository interfaces and factory functions for them. Each
1257 1257 # will be called in order during ``makelocalrepository()`` to iteratively
1258 1258 # derive the final type for a local repository instance. We capture the
1259 1259 # function as a lambda so we don't hold a reference and the module-level
1260 1260 # functions can be wrapped.
1261 1261 REPO_INTERFACES = [
1262 1262 (repository.ilocalrepositorymain, lambda: makemain),
1263 1263 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1264 1264 ]
1265 1265
1266 1266
1267 1267 @interfaceutil.implementer(repository.ilocalrepositorymain)
1268 1268 class localrepository:
1269 1269 """Main class for representing local repositories.
1270 1270
1271 1271 All local repositories are instances of this class.
1272 1272
1273 1273 Constructed on its own, instances of this class are not usable as
1274 1274 repository objects. To obtain a usable repository object, call
1275 1275 ``hg.repository()``, ``localrepo.instance()``, or
1276 1276 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1277 1277 ``instance()`` adds support for creating new repositories.
1278 1278 ``hg.repository()`` adds more extension integration, including calling
1279 1279 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1280 1280 used.
1281 1281 """
1282 1282
1283 1283 _basesupported = {
1284 1284 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1285 1285 requirementsmod.CHANGELOGV2_REQUIREMENT,
1286 1286 requirementsmod.COPIESSDC_REQUIREMENT,
1287 1287 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1288 1288 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1289 1289 requirementsmod.DOTENCODE_REQUIREMENT,
1290 1290 requirementsmod.FNCACHE_REQUIREMENT,
1291 1291 requirementsmod.GENERALDELTA_REQUIREMENT,
1292 1292 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1293 1293 requirementsmod.NODEMAP_REQUIREMENT,
1294 1294 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1295 1295 requirementsmod.REVLOGV1_REQUIREMENT,
1296 1296 requirementsmod.REVLOGV2_REQUIREMENT,
1297 1297 requirementsmod.SHARED_REQUIREMENT,
1298 1298 requirementsmod.SHARESAFE_REQUIREMENT,
1299 1299 requirementsmod.SPARSE_REQUIREMENT,
1300 1300 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1301 1301 requirementsmod.STORE_REQUIREMENT,
1302 1302 requirementsmod.TREEMANIFEST_REQUIREMENT,
1303 1303 }
1304 1304
1305 1305 # list of prefix for file which can be written without 'wlock'
1306 1306 # Extensions should extend this list when needed
1307 1307 _wlockfreeprefix = {
1308 1308 # We migh consider requiring 'wlock' for the next
1309 1309 # two, but pretty much all the existing code assume
1310 1310 # wlock is not needed so we keep them excluded for
1311 1311 # now.
1312 1312 b'hgrc',
1313 1313 b'requires',
1314 1314 # XXX cache is a complicatged business someone
1315 1315 # should investigate this in depth at some point
1316 1316 b'cache/',
1317 1317 # XXX shouldn't be dirstate covered by the wlock?
1318 1318 b'dirstate',
1319 1319 # XXX bisect was still a bit too messy at the time
1320 1320 # this changeset was introduced. Someone should fix
1321 1321 # the remainig bit and drop this line
1322 1322 b'bisect.state',
1323 1323 }
1324 1324
1325 1325 def __init__(
1326 1326 self,
1327 1327 baseui,
1328 1328 ui,
1329 1329 origroot,
1330 1330 wdirvfs,
1331 1331 hgvfs,
1332 1332 requirements,
1333 1333 supportedrequirements,
1334 1334 sharedpath,
1335 1335 store,
1336 1336 cachevfs,
1337 1337 wcachevfs,
1338 1338 features,
1339 1339 intents=None,
1340 1340 ):
1341 1341 """Create a new local repository instance.
1342 1342
1343 1343 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1344 1344 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1345 1345 object.
1346 1346
1347 1347 Arguments:
1348 1348
1349 1349 baseui
1350 1350 ``ui.ui`` instance that ``ui`` argument was based off of.
1351 1351
1352 1352 ui
1353 1353 ``ui.ui`` instance for use by the repository.
1354 1354
1355 1355 origroot
1356 1356 ``bytes`` path to working directory root of this repository.
1357 1357
1358 1358 wdirvfs
1359 1359 ``vfs.vfs`` rooted at the working directory.
1360 1360
1361 1361 hgvfs
1362 1362 ``vfs.vfs`` rooted at .hg/
1363 1363
1364 1364 requirements
1365 1365 ``set`` of bytestrings representing repository opening requirements.
1366 1366
1367 1367 supportedrequirements
1368 1368 ``set`` of bytestrings representing repository requirements that we
1369 1369 know how to open. May be a supetset of ``requirements``.
1370 1370
1371 1371 sharedpath
1372 1372 ``bytes`` Defining path to storage base directory. Points to a
1373 1373 ``.hg/`` directory somewhere.
1374 1374
1375 1375 store
1376 1376 ``store.basicstore`` (or derived) instance providing access to
1377 1377 versioned storage.
1378 1378
1379 1379 cachevfs
1380 1380 ``vfs.vfs`` used for cache files.
1381 1381
1382 1382 wcachevfs
1383 1383 ``vfs.vfs`` used for cache files related to the working copy.
1384 1384
1385 1385 features
1386 1386 ``set`` of bytestrings defining features/capabilities of this
1387 1387 instance.
1388 1388
1389 1389 intents
1390 1390 ``set`` of system strings indicating what this repo will be used
1391 1391 for.
1392 1392 """
1393 1393 self.baseui = baseui
1394 1394 self.ui = ui
1395 1395 self.origroot = origroot
1396 1396 # vfs rooted at working directory.
1397 1397 self.wvfs = wdirvfs
1398 1398 self.root = wdirvfs.base
1399 1399 # vfs rooted at .hg/. Used to access most non-store paths.
1400 1400 self.vfs = hgvfs
1401 1401 self.path = hgvfs.base
1402 1402 self.requirements = requirements
1403 1403 self.nodeconstants = sha1nodeconstants
1404 1404 self.nullid = self.nodeconstants.nullid
1405 1405 self.supported = supportedrequirements
1406 1406 self.sharedpath = sharedpath
1407 1407 self.store = store
1408 1408 self.cachevfs = cachevfs
1409 1409 self.wcachevfs = wcachevfs
1410 1410 self.features = features
1411 1411
1412 1412 self.filtername = None
1413 1413
1414 1414 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1415 1415 b'devel', b'check-locks'
1416 1416 ):
1417 1417 self.vfs.audit = self._getvfsward(self.vfs.audit)
1418 1418 # A list of callback to shape the phase if no data were found.
1419 1419 # Callback are in the form: func(repo, roots) --> processed root.
1420 1420 # This list it to be filled by extension during repo setup
1421 1421 self._phasedefaults = []
1422 1422
1423 1423 color.setup(self.ui)
1424 1424
1425 1425 self.spath = self.store.path
1426 1426 self.svfs = self.store.vfs
1427 1427 self.sjoin = self.store.join
1428 1428 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1429 1429 b'devel', b'check-locks'
1430 1430 ):
1431 1431 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1432 1432 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1433 1433 else: # standard vfs
1434 1434 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1435 1435
1436 1436 self._dirstatevalidatewarned = False
1437 1437
1438 1438 self._branchcaches = branchmap.BranchMapCache()
1439 1439 self._revbranchcache = None
1440 1440 self._filterpats = {}
1441 1441 self._datafilters = {}
1442 1442 self._transref = self._lockref = self._wlockref = None
1443 1443
1444 1444 # A cache for various files under .hg/ that tracks file changes,
1445 1445 # (used by the filecache decorator)
1446 1446 #
1447 1447 # Maps a property name to its util.filecacheentry
1448 1448 self._filecache = {}
1449 1449
1450 1450 # hold sets of revision to be filtered
1451 1451 # should be cleared when something might have changed the filter value:
1452 1452 # - new changesets,
1453 1453 # - phase change,
1454 1454 # - new obsolescence marker,
1455 1455 # - working directory parent change,
1456 1456 # - bookmark changes
1457 1457 self.filteredrevcache = {}
1458 1458
1459 1459 # post-dirstate-status hooks
1460 1460 self._postdsstatus = []
1461 1461
1462 1462 # generic mapping between names and nodes
1463 1463 self.names = namespaces.namespaces()
1464 1464
1465 1465 # Key to signature value.
1466 1466 self._sparsesignaturecache = {}
1467 1467 # Signature to cached matcher instance.
1468 1468 self._sparsematchercache = {}
1469 1469
1470 1470 self._extrafilterid = repoview.extrafilter(ui)
1471 1471
1472 1472 self.filecopiesmode = None
1473 1473 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1474 1474 self.filecopiesmode = b'changeset-sidedata'
1475 1475
1476 1476 self._wanted_sidedata = set()
1477 1477 self._sidedata_computers = {}
1478 1478 sidedatamod.set_sidedata_spec_for_repo(self)
1479 1479
1480 1480 def _getvfsward(self, origfunc):
1481 1481 """build a ward for self.vfs"""
1482 1482 rref = weakref.ref(self)
1483 1483
1484 1484 def checkvfs(path, mode=None):
1485 1485 ret = origfunc(path, mode=mode)
1486 1486 repo = rref()
1487 1487 if (
1488 1488 repo is None
1489 1489 or not util.safehasattr(repo, b'_wlockref')
1490 1490 or not util.safehasattr(repo, b'_lockref')
1491 1491 ):
1492 1492 return
1493 1493 if mode in (None, b'r', b'rb'):
1494 1494 return
1495 1495 if path.startswith(repo.path):
1496 1496 # truncate name relative to the repository (.hg)
1497 1497 path = path[len(repo.path) + 1 :]
1498 1498 if path.startswith(b'cache/'):
1499 1499 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1500 1500 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1501 1501 # path prefixes covered by 'lock'
1502 1502 vfs_path_prefixes = (
1503 1503 b'journal.',
1504 1504 b'undo.',
1505 1505 b'strip-backup/',
1506 1506 b'cache/',
1507 1507 )
1508 1508 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1509 1509 if repo._currentlock(repo._lockref) is None:
1510 1510 repo.ui.develwarn(
1511 1511 b'write with no lock: "%s"' % path,
1512 1512 stacklevel=3,
1513 1513 config=b'check-locks',
1514 1514 )
1515 1515 elif repo._currentlock(repo._wlockref) is None:
1516 1516 # rest of vfs files are covered by 'wlock'
1517 1517 #
1518 1518 # exclude special files
1519 1519 for prefix in self._wlockfreeprefix:
1520 1520 if path.startswith(prefix):
1521 1521 return
1522 1522 repo.ui.develwarn(
1523 1523 b'write with no wlock: "%s"' % path,
1524 1524 stacklevel=3,
1525 1525 config=b'check-locks',
1526 1526 )
1527 1527 return ret
1528 1528
1529 1529 return checkvfs
1530 1530
1531 1531 def _getsvfsward(self, origfunc):
1532 1532 """build a ward for self.svfs"""
1533 1533 rref = weakref.ref(self)
1534 1534
1535 1535 def checksvfs(path, mode=None):
1536 1536 ret = origfunc(path, mode=mode)
1537 1537 repo = rref()
1538 1538 if repo is None or not util.safehasattr(repo, b'_lockref'):
1539 1539 return
1540 1540 if mode in (None, b'r', b'rb'):
1541 1541 return
1542 1542 if path.startswith(repo.sharedpath):
1543 1543 # truncate name relative to the repository (.hg)
1544 1544 path = path[len(repo.sharedpath) + 1 :]
1545 1545 if repo._currentlock(repo._lockref) is None:
1546 1546 repo.ui.develwarn(
1547 1547 b'write with no lock: "%s"' % path, stacklevel=4
1548 1548 )
1549 1549 return ret
1550 1550
1551 1551 return checksvfs
1552 1552
1553 1553 def close(self):
1554 1554 self._writecaches()
1555 1555
1556 1556 def _writecaches(self):
1557 1557 if self._revbranchcache:
1558 1558 self._revbranchcache.write()
1559 1559
1560 1560 def _restrictcapabilities(self, caps):
1561 1561 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1562 1562 caps = set(caps)
1563 1563 capsblob = bundle2.encodecaps(
1564 1564 bundle2.getrepocaps(self, role=b'client')
1565 1565 )
1566 1566 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1567 1567 if self.ui.configbool(b'experimental', b'narrow'):
1568 1568 caps.add(wireprototypes.NARROWCAP)
1569 1569 return caps
1570 1570
1571 1571 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1572 1572 # self -> auditor -> self._checknested -> self
1573 1573
1574 1574 @property
1575 1575 def auditor(self):
1576 1576 # This is only used by context.workingctx.match in order to
1577 1577 # detect files in subrepos.
1578 1578 return pathutil.pathauditor(self.root, callback=self._checknested)
1579 1579
1580 1580 @property
1581 1581 def nofsauditor(self):
1582 1582 # This is only used by context.basectx.match in order to detect
1583 1583 # files in subrepos.
1584 1584 return pathutil.pathauditor(
1585 1585 self.root, callback=self._checknested, realfs=False, cached=True
1586 1586 )
1587 1587
1588 1588 def _checknested(self, path):
1589 1589 """Determine if path is a legal nested repository."""
1590 1590 if not path.startswith(self.root):
1591 1591 return False
1592 1592 subpath = path[len(self.root) + 1 :]
1593 1593 normsubpath = util.pconvert(subpath)
1594 1594
1595 1595 # XXX: Checking against the current working copy is wrong in
1596 1596 # the sense that it can reject things like
1597 1597 #
1598 1598 # $ hg cat -r 10 sub/x.txt
1599 1599 #
1600 1600 # if sub/ is no longer a subrepository in the working copy
1601 1601 # parent revision.
1602 1602 #
1603 1603 # However, it can of course also allow things that would have
1604 1604 # been rejected before, such as the above cat command if sub/
1605 1605 # is a subrepository now, but was a normal directory before.
1606 1606 # The old path auditor would have rejected by mistake since it
1607 1607 # panics when it sees sub/.hg/.
1608 1608 #
1609 1609 # All in all, checking against the working copy seems sensible
1610 1610 # since we want to prevent access to nested repositories on
1611 1611 # the filesystem *now*.
1612 1612 ctx = self[None]
1613 1613 parts = util.splitpath(subpath)
1614 1614 while parts:
1615 1615 prefix = b'/'.join(parts)
1616 1616 if prefix in ctx.substate:
1617 1617 if prefix == normsubpath:
1618 1618 return True
1619 1619 else:
1620 1620 sub = ctx.sub(prefix)
1621 1621 return sub.checknested(subpath[len(prefix) + 1 :])
1622 1622 else:
1623 1623 parts.pop()
1624 1624 return False
1625 1625
1626 1626 def peer(self):
1627 1627 return localpeer(self) # not cached to avoid reference cycle
1628 1628
1629 1629 def unfiltered(self):
1630 1630 """Return unfiltered version of the repository
1631 1631
1632 1632 Intended to be overwritten by filtered repo."""
1633 1633 return self
1634 1634
1635 1635 def filtered(self, name, visibilityexceptions=None):
1636 1636 """Return a filtered version of a repository
1637 1637
1638 1638 The `name` parameter is the identifier of the requested view. This
1639 1639 will return a repoview object set "exactly" to the specified view.
1640 1640
1641 1641 This function does not apply recursive filtering to a repository. For
1642 1642 example calling `repo.filtered("served")` will return a repoview using
1643 1643 the "served" view, regardless of the initial view used by `repo`.
1644 1644
1645 1645 In other word, there is always only one level of `repoview` "filtering".
1646 1646 """
1647 1647 if self._extrafilterid is not None and b'%' not in name:
1648 1648 name = name + b'%' + self._extrafilterid
1649 1649
1650 1650 cls = repoview.newtype(self.unfiltered().__class__)
1651 1651 return cls(self, name, visibilityexceptions)
1652 1652
1653 1653 @mixedrepostorecache(
1654 1654 (b'bookmarks', b'plain'),
1655 1655 (b'bookmarks.current', b'plain'),
1656 1656 (b'bookmarks', b''),
1657 1657 (b'00changelog.i', b''),
1658 1658 )
1659 1659 def _bookmarks(self):
1660 1660 # Since the multiple files involved in the transaction cannot be
1661 1661 # written atomically (with current repository format), there is a race
1662 1662 # condition here.
1663 1663 #
1664 1664 # 1) changelog content A is read
1665 1665 # 2) outside transaction update changelog to content B
1666 1666 # 3) outside transaction update bookmark file referring to content B
1667 1667 # 4) bookmarks file content is read and filtered against changelog-A
1668 1668 #
1669 1669 # When this happens, bookmarks against nodes missing from A are dropped.
1670 1670 #
1671 1671 # Having this happening during read is not great, but it become worse
1672 1672 # when this happen during write because the bookmarks to the "unknown"
1673 1673 # nodes will be dropped for good. However, writes happen within locks.
1674 1674 # This locking makes it possible to have a race free consistent read.
1675 1675 # For this purpose data read from disc before locking are
1676 1676 # "invalidated" right after the locks are taken. This invalidations are
1677 1677 # "light", the `filecache` mechanism keep the data in memory and will
1678 1678 # reuse them if the underlying files did not changed. Not parsing the
1679 1679 # same data multiple times helps performances.
1680 1680 #
1681 1681 # Unfortunately in the case describe above, the files tracked by the
1682 1682 # bookmarks file cache might not have changed, but the in-memory
1683 1683 # content is still "wrong" because we used an older changelog content
1684 1684 # to process the on-disk data. So after locking, the changelog would be
1685 1685 # refreshed but `_bookmarks` would be preserved.
1686 1686 # Adding `00changelog.i` to the list of tracked file is not
1687 1687 # enough, because at the time we build the content for `_bookmarks` in
1688 1688 # (4), the changelog file has already diverged from the content used
1689 1689 # for loading `changelog` in (1)
1690 1690 #
1691 1691 # To prevent the issue, we force the changelog to be explicitly
1692 1692 # reloaded while computing `_bookmarks`. The data race can still happen
1693 1693 # without the lock (with a narrower window), but it would no longer go
1694 1694 # undetected during the lock time refresh.
1695 1695 #
1696 1696 # The new schedule is as follow
1697 1697 #
1698 1698 # 1) filecache logic detect that `_bookmarks` needs to be computed
1699 1699 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1700 1700 # 3) We force `changelog` filecache to be tested
1701 1701 # 4) cachestat for `changelog` are captured (for changelog)
1702 1702 # 5) `_bookmarks` is computed and cached
1703 1703 #
1704 1704 # The step in (3) ensure we have a changelog at least as recent as the
1705 1705 # cache stat computed in (1). As a result at locking time:
1706 1706 # * if the changelog did not changed since (1) -> we can reuse the data
1707 1707 # * otherwise -> the bookmarks get refreshed.
1708 1708 self._refreshchangelog()
1709 1709 return bookmarks.bmstore(self)
1710 1710
1711 1711 def _refreshchangelog(self):
1712 1712 """make sure the in memory changelog match the on-disk one"""
1713 1713 if 'changelog' in vars(self) and self.currenttransaction() is None:
1714 1714 del self.changelog
1715 1715
1716 1716 @property
1717 1717 def _activebookmark(self):
1718 1718 return self._bookmarks.active
1719 1719
1720 1720 # _phasesets depend on changelog. what we need is to call
1721 1721 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1722 1722 # can't be easily expressed in filecache mechanism.
1723 1723 @storecache(b'phaseroots', b'00changelog.i')
1724 1724 def _phasecache(self):
1725 1725 return phases.phasecache(self, self._phasedefaults)
1726 1726
1727 1727 @storecache(b'obsstore')
1728 1728 def obsstore(self):
1729 1729 return obsolete.makestore(self.ui, self)
1730 1730
1731 1731 @changelogcache()
1732 1732 def changelog(repo):
1733 1733 # load dirstate before changelog to avoid race see issue6303
1734 1734 repo.dirstate.prefetch_parents()
1735 1735 return repo.store.changelog(
1736 1736 txnutil.mayhavepending(repo.root),
1737 1737 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1738 1738 )
1739 1739
1740 1740 @manifestlogcache()
1741 1741 def manifestlog(self):
1742 1742 return self.store.manifestlog(self, self._storenarrowmatch)
1743 1743
1744 1744 @repofilecache(b'dirstate')
1745 1745 def dirstate(self):
1746 1746 return self._makedirstate()
1747 1747
1748 1748 def _makedirstate(self):
1749 1749 """Extension point for wrapping the dirstate per-repo."""
1750 sparsematchfn = lambda: sparse.matcher(self)
1750 sparsematchfn = None
1751 if sparse.use_sparse(self):
1752 sparsematchfn = lambda: sparse.matcher(self)
1751 1753 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1752 1754 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1753 1755 use_dirstate_v2 = v2_req in self.requirements
1754 1756 use_tracked_hint = th in self.requirements
1755 1757
1756 1758 return dirstate.dirstate(
1757 1759 self.vfs,
1758 1760 self.ui,
1759 1761 self.root,
1760 1762 self._dirstatevalidate,
1761 1763 sparsematchfn,
1762 1764 self.nodeconstants,
1763 1765 use_dirstate_v2,
1764 1766 use_tracked_hint=use_tracked_hint,
1765 1767 )
1766 1768
1767 1769 def _dirstatevalidate(self, node):
1768 1770 try:
1769 1771 self.changelog.rev(node)
1770 1772 return node
1771 1773 except error.LookupError:
1772 1774 if not self._dirstatevalidatewarned:
1773 1775 self._dirstatevalidatewarned = True
1774 1776 self.ui.warn(
1775 1777 _(b"warning: ignoring unknown working parent %s!\n")
1776 1778 % short(node)
1777 1779 )
1778 1780 return self.nullid
1779 1781
1780 1782 @storecache(narrowspec.FILENAME)
1781 1783 def narrowpats(self):
1782 1784 """matcher patterns for this repository's narrowspec
1783 1785
1784 1786 A tuple of (includes, excludes).
1785 1787 """
1786 1788 return narrowspec.load(self)
1787 1789
1788 1790 @storecache(narrowspec.FILENAME)
1789 1791 def _storenarrowmatch(self):
1790 1792 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1791 1793 return matchmod.always()
1792 1794 include, exclude = self.narrowpats
1793 1795 return narrowspec.match(self.root, include=include, exclude=exclude)
1794 1796
1795 1797 @storecache(narrowspec.FILENAME)
1796 1798 def _narrowmatch(self):
1797 1799 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1798 1800 return matchmod.always()
1799 1801 narrowspec.checkworkingcopynarrowspec(self)
1800 1802 include, exclude = self.narrowpats
1801 1803 return narrowspec.match(self.root, include=include, exclude=exclude)
1802 1804
1803 1805 def narrowmatch(self, match=None, includeexact=False):
1804 1806 """matcher corresponding the the repo's narrowspec
1805 1807
1806 1808 If `match` is given, then that will be intersected with the narrow
1807 1809 matcher.
1808 1810
1809 1811 If `includeexact` is True, then any exact matches from `match` will
1810 1812 be included even if they're outside the narrowspec.
1811 1813 """
1812 1814 if match:
1813 1815 if includeexact and not self._narrowmatch.always():
1814 1816 # do not exclude explicitly-specified paths so that they can
1815 1817 # be warned later on
1816 1818 em = matchmod.exact(match.files())
1817 1819 nm = matchmod.unionmatcher([self._narrowmatch, em])
1818 1820 return matchmod.intersectmatchers(match, nm)
1819 1821 return matchmod.intersectmatchers(match, self._narrowmatch)
1820 1822 return self._narrowmatch
1821 1823
1822 1824 def setnarrowpats(self, newincludes, newexcludes):
1823 1825 narrowspec.save(self, newincludes, newexcludes)
1824 1826 self.invalidate(clearfilecache=True)
1825 1827
1826 1828 @unfilteredpropertycache
1827 1829 def _quick_access_changeid_null(self):
1828 1830 return {
1829 1831 b'null': (nullrev, self.nodeconstants.nullid),
1830 1832 nullrev: (nullrev, self.nodeconstants.nullid),
1831 1833 self.nullid: (nullrev, self.nullid),
1832 1834 }
1833 1835
1834 1836 @unfilteredpropertycache
1835 1837 def _quick_access_changeid_wc(self):
1836 1838 # also fast path access to the working copy parents
1837 1839 # however, only do it for filter that ensure wc is visible.
1838 1840 quick = self._quick_access_changeid_null.copy()
1839 1841 cl = self.unfiltered().changelog
1840 1842 for node in self.dirstate.parents():
1841 1843 if node == self.nullid:
1842 1844 continue
1843 1845 rev = cl.index.get_rev(node)
1844 1846 if rev is None:
1845 1847 # unknown working copy parent case:
1846 1848 #
1847 1849 # skip the fast path and let higher code deal with it
1848 1850 continue
1849 1851 pair = (rev, node)
1850 1852 quick[rev] = pair
1851 1853 quick[node] = pair
1852 1854 # also add the parents of the parents
1853 1855 for r in cl.parentrevs(rev):
1854 1856 if r == nullrev:
1855 1857 continue
1856 1858 n = cl.node(r)
1857 1859 pair = (r, n)
1858 1860 quick[r] = pair
1859 1861 quick[n] = pair
1860 1862 p1node = self.dirstate.p1()
1861 1863 if p1node != self.nullid:
1862 1864 quick[b'.'] = quick[p1node]
1863 1865 return quick
1864 1866
1865 1867 @unfilteredmethod
1866 1868 def _quick_access_changeid_invalidate(self):
1867 1869 if '_quick_access_changeid_wc' in vars(self):
1868 1870 del self.__dict__['_quick_access_changeid_wc']
1869 1871
1870 1872 @property
1871 1873 def _quick_access_changeid(self):
1872 1874 """an helper dictionnary for __getitem__ calls
1873 1875
1874 1876 This contains a list of symbol we can recognise right away without
1875 1877 further processing.
1876 1878 """
1877 1879 if self.filtername in repoview.filter_has_wc:
1878 1880 return self._quick_access_changeid_wc
1879 1881 return self._quick_access_changeid_null
1880 1882
1881 1883 def __getitem__(self, changeid):
1882 1884 # dealing with special cases
1883 1885 if changeid is None:
1884 1886 return context.workingctx(self)
1885 1887 if isinstance(changeid, context.basectx):
1886 1888 return changeid
1887 1889
1888 1890 # dealing with multiple revisions
1889 1891 if isinstance(changeid, slice):
1890 1892 # wdirrev isn't contiguous so the slice shouldn't include it
1891 1893 return [
1892 1894 self[i]
1893 1895 for i in range(*changeid.indices(len(self)))
1894 1896 if i not in self.changelog.filteredrevs
1895 1897 ]
1896 1898
1897 1899 # dealing with some special values
1898 1900 quick_access = self._quick_access_changeid.get(changeid)
1899 1901 if quick_access is not None:
1900 1902 rev, node = quick_access
1901 1903 return context.changectx(self, rev, node, maybe_filtered=False)
1902 1904 if changeid == b'tip':
1903 1905 node = self.changelog.tip()
1904 1906 rev = self.changelog.rev(node)
1905 1907 return context.changectx(self, rev, node)
1906 1908
1907 1909 # dealing with arbitrary values
1908 1910 try:
1909 1911 if isinstance(changeid, int):
1910 1912 node = self.changelog.node(changeid)
1911 1913 rev = changeid
1912 1914 elif changeid == b'.':
1913 1915 # this is a hack to delay/avoid loading obsmarkers
1914 1916 # when we know that '.' won't be hidden
1915 1917 node = self.dirstate.p1()
1916 1918 rev = self.unfiltered().changelog.rev(node)
1917 1919 elif len(changeid) == self.nodeconstants.nodelen:
1918 1920 try:
1919 1921 node = changeid
1920 1922 rev = self.changelog.rev(changeid)
1921 1923 except error.FilteredLookupError:
1922 1924 changeid = hex(changeid) # for the error message
1923 1925 raise
1924 1926 except LookupError:
1925 1927 # check if it might have come from damaged dirstate
1926 1928 #
1927 1929 # XXX we could avoid the unfiltered if we had a recognizable
1928 1930 # exception for filtered changeset access
1929 1931 if (
1930 1932 self.local()
1931 1933 and changeid in self.unfiltered().dirstate.parents()
1932 1934 ):
1933 1935 msg = _(b"working directory has unknown parent '%s'!")
1934 1936 raise error.Abort(msg % short(changeid))
1935 1937 changeid = hex(changeid) # for the error message
1936 1938 raise
1937 1939
1938 1940 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1939 1941 node = bin(changeid)
1940 1942 rev = self.changelog.rev(node)
1941 1943 else:
1942 1944 raise error.ProgrammingError(
1943 1945 b"unsupported changeid '%s' of type %s"
1944 1946 % (changeid, pycompat.bytestr(type(changeid)))
1945 1947 )
1946 1948
1947 1949 return context.changectx(self, rev, node)
1948 1950
1949 1951 except (error.FilteredIndexError, error.FilteredLookupError):
1950 1952 raise error.FilteredRepoLookupError(
1951 1953 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1952 1954 )
1953 1955 except (IndexError, LookupError):
1954 1956 raise error.RepoLookupError(
1955 1957 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1956 1958 )
1957 1959 except error.WdirUnsupported:
1958 1960 return context.workingctx(self)
1959 1961
1960 1962 def __contains__(self, changeid):
1961 1963 """True if the given changeid exists"""
1962 1964 try:
1963 1965 self[changeid]
1964 1966 return True
1965 1967 except error.RepoLookupError:
1966 1968 return False
1967 1969
1968 1970 def __nonzero__(self):
1969 1971 return True
1970 1972
1971 1973 __bool__ = __nonzero__
1972 1974
1973 1975 def __len__(self):
1974 1976 # no need to pay the cost of repoview.changelog
1975 1977 unfi = self.unfiltered()
1976 1978 return len(unfi.changelog)
1977 1979
1978 1980 def __iter__(self):
1979 1981 return iter(self.changelog)
1980 1982
1981 1983 def revs(self, expr, *args):
1982 1984 """Find revisions matching a revset.
1983 1985
1984 1986 The revset is specified as a string ``expr`` that may contain
1985 1987 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1986 1988
1987 1989 Revset aliases from the configuration are not expanded. To expand
1988 1990 user aliases, consider calling ``scmutil.revrange()`` or
1989 1991 ``repo.anyrevs([expr], user=True)``.
1990 1992
1991 1993 Returns a smartset.abstractsmartset, which is a list-like interface
1992 1994 that contains integer revisions.
1993 1995 """
1994 1996 tree = revsetlang.spectree(expr, *args)
1995 1997 return revset.makematcher(tree)(self)
1996 1998
1997 1999 def set(self, expr, *args):
1998 2000 """Find revisions matching a revset and emit changectx instances.
1999 2001
2000 2002 This is a convenience wrapper around ``revs()`` that iterates the
2001 2003 result and is a generator of changectx instances.
2002 2004
2003 2005 Revset aliases from the configuration are not expanded. To expand
2004 2006 user aliases, consider calling ``scmutil.revrange()``.
2005 2007 """
2006 2008 for r in self.revs(expr, *args):
2007 2009 yield self[r]
2008 2010
2009 2011 def anyrevs(self, specs, user=False, localalias=None):
2010 2012 """Find revisions matching one of the given revsets.
2011 2013
2012 2014 Revset aliases from the configuration are not expanded by default. To
2013 2015 expand user aliases, specify ``user=True``. To provide some local
2014 2016 definitions overriding user aliases, set ``localalias`` to
2015 2017 ``{name: definitionstring}``.
2016 2018 """
2017 2019 if specs == [b'null']:
2018 2020 return revset.baseset([nullrev])
2019 2021 if specs == [b'.']:
2020 2022 quick_data = self._quick_access_changeid.get(b'.')
2021 2023 if quick_data is not None:
2022 2024 return revset.baseset([quick_data[0]])
2023 2025 if user:
2024 2026 m = revset.matchany(
2025 2027 self.ui,
2026 2028 specs,
2027 2029 lookup=revset.lookupfn(self),
2028 2030 localalias=localalias,
2029 2031 )
2030 2032 else:
2031 2033 m = revset.matchany(None, specs, localalias=localalias)
2032 2034 return m(self)
2033 2035
2034 2036 def url(self):
2035 2037 return b'file:' + self.root
2036 2038
2037 2039 def hook(self, name, throw=False, **args):
2038 2040 """Call a hook, passing this repo instance.
2039 2041
2040 2042 This a convenience method to aid invoking hooks. Extensions likely
2041 2043 won't call this unless they have registered a custom hook or are
2042 2044 replacing code that is expected to call a hook.
2043 2045 """
2044 2046 return hook.hook(self.ui, self, name, throw, **args)
2045 2047
2046 2048 @filteredpropertycache
2047 2049 def _tagscache(self):
2048 2050 """Returns a tagscache object that contains various tags related
2049 2051 caches."""
2050 2052
2051 2053 # This simplifies its cache management by having one decorated
2052 2054 # function (this one) and the rest simply fetch things from it.
2053 2055 class tagscache:
2054 2056 def __init__(self):
2055 2057 # These two define the set of tags for this repository. tags
2056 2058 # maps tag name to node; tagtypes maps tag name to 'global' or
2057 2059 # 'local'. (Global tags are defined by .hgtags across all
2058 2060 # heads, and local tags are defined in .hg/localtags.)
2059 2061 # They constitute the in-memory cache of tags.
2060 2062 self.tags = self.tagtypes = None
2061 2063
2062 2064 self.nodetagscache = self.tagslist = None
2063 2065
2064 2066 cache = tagscache()
2065 2067 cache.tags, cache.tagtypes = self._findtags()
2066 2068
2067 2069 return cache
2068 2070
2069 2071 def tags(self):
2070 2072 '''return a mapping of tag to node'''
2071 2073 t = {}
2072 2074 if self.changelog.filteredrevs:
2073 2075 tags, tt = self._findtags()
2074 2076 else:
2075 2077 tags = self._tagscache.tags
2076 2078 rev = self.changelog.rev
2077 2079 for k, v in tags.items():
2078 2080 try:
2079 2081 # ignore tags to unknown nodes
2080 2082 rev(v)
2081 2083 t[k] = v
2082 2084 except (error.LookupError, ValueError):
2083 2085 pass
2084 2086 return t
2085 2087
2086 2088 def _findtags(self):
2087 2089 """Do the hard work of finding tags. Return a pair of dicts
2088 2090 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2089 2091 maps tag name to a string like \'global\' or \'local\'.
2090 2092 Subclasses or extensions are free to add their own tags, but
2091 2093 should be aware that the returned dicts will be retained for the
2092 2094 duration of the localrepo object."""
2093 2095
2094 2096 # XXX what tagtype should subclasses/extensions use? Currently
2095 2097 # mq and bookmarks add tags, but do not set the tagtype at all.
2096 2098 # Should each extension invent its own tag type? Should there
2097 2099 # be one tagtype for all such "virtual" tags? Or is the status
2098 2100 # quo fine?
2099 2101
2100 2102 # map tag name to (node, hist)
2101 2103 alltags = tagsmod.findglobaltags(self.ui, self)
2102 2104 # map tag name to tag type
2103 2105 tagtypes = {tag: b'global' for tag in alltags}
2104 2106
2105 2107 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2106 2108
2107 2109 # Build the return dicts. Have to re-encode tag names because
2108 2110 # the tags module always uses UTF-8 (in order not to lose info
2109 2111 # writing to the cache), but the rest of Mercurial wants them in
2110 2112 # local encoding.
2111 2113 tags = {}
2112 2114 for (name, (node, hist)) in alltags.items():
2113 2115 if node != self.nullid:
2114 2116 tags[encoding.tolocal(name)] = node
2115 2117 tags[b'tip'] = self.changelog.tip()
2116 2118 tagtypes = {
2117 2119 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2118 2120 }
2119 2121 return (tags, tagtypes)
2120 2122
2121 2123 def tagtype(self, tagname):
2122 2124 """
2123 2125 return the type of the given tag. result can be:
2124 2126
2125 2127 'local' : a local tag
2126 2128 'global' : a global tag
2127 2129 None : tag does not exist
2128 2130 """
2129 2131
2130 2132 return self._tagscache.tagtypes.get(tagname)
2131 2133
2132 2134 def tagslist(self):
2133 2135 '''return a list of tags ordered by revision'''
2134 2136 if not self._tagscache.tagslist:
2135 2137 l = []
2136 2138 for t, n in self.tags().items():
2137 2139 l.append((self.changelog.rev(n), t, n))
2138 2140 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2139 2141
2140 2142 return self._tagscache.tagslist
2141 2143
2142 2144 def nodetags(self, node):
2143 2145 '''return the tags associated with a node'''
2144 2146 if not self._tagscache.nodetagscache:
2145 2147 nodetagscache = {}
2146 2148 for t, n in self._tagscache.tags.items():
2147 2149 nodetagscache.setdefault(n, []).append(t)
2148 2150 for tags in nodetagscache.values():
2149 2151 tags.sort()
2150 2152 self._tagscache.nodetagscache = nodetagscache
2151 2153 return self._tagscache.nodetagscache.get(node, [])
2152 2154
2153 2155 def nodebookmarks(self, node):
2154 2156 """return the list of bookmarks pointing to the specified node"""
2155 2157 return self._bookmarks.names(node)
2156 2158
2157 2159 def branchmap(self):
2158 2160 """returns a dictionary {branch: [branchheads]} with branchheads
2159 2161 ordered by increasing revision number"""
2160 2162 return self._branchcaches[self]
2161 2163
2162 2164 @unfilteredmethod
2163 2165 def revbranchcache(self):
2164 2166 if not self._revbranchcache:
2165 2167 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2166 2168 return self._revbranchcache
2167 2169
2168 2170 def register_changeset(self, rev, changelogrevision):
2169 2171 self.revbranchcache().setdata(rev, changelogrevision)
2170 2172
2171 2173 def branchtip(self, branch, ignoremissing=False):
2172 2174 """return the tip node for a given branch
2173 2175
2174 2176 If ignoremissing is True, then this method will not raise an error.
2175 2177 This is helpful for callers that only expect None for a missing branch
2176 2178 (e.g. namespace).
2177 2179
2178 2180 """
2179 2181 try:
2180 2182 return self.branchmap().branchtip(branch)
2181 2183 except KeyError:
2182 2184 if not ignoremissing:
2183 2185 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2184 2186 else:
2185 2187 pass
2186 2188
2187 2189 def lookup(self, key):
2188 2190 node = scmutil.revsymbol(self, key).node()
2189 2191 if node is None:
2190 2192 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2191 2193 return node
2192 2194
2193 2195 def lookupbranch(self, key):
2194 2196 if self.branchmap().hasbranch(key):
2195 2197 return key
2196 2198
2197 2199 return scmutil.revsymbol(self, key).branch()
2198 2200
2199 2201 def known(self, nodes):
2200 2202 cl = self.changelog
2201 2203 get_rev = cl.index.get_rev
2202 2204 filtered = cl.filteredrevs
2203 2205 result = []
2204 2206 for n in nodes:
2205 2207 r = get_rev(n)
2206 2208 resp = not (r is None or r in filtered)
2207 2209 result.append(resp)
2208 2210 return result
2209 2211
2210 2212 def local(self):
2211 2213 return self
2212 2214
2213 2215 def publishing(self):
2214 2216 # it's safe (and desirable) to trust the publish flag unconditionally
2215 2217 # so that we don't finalize changes shared between users via ssh or nfs
2216 2218 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2217 2219
2218 2220 def cancopy(self):
2219 2221 # so statichttprepo's override of local() works
2220 2222 if not self.local():
2221 2223 return False
2222 2224 if not self.publishing():
2223 2225 return True
2224 2226 # if publishing we can't copy if there is filtered content
2225 2227 return not self.filtered(b'visible').changelog.filteredrevs
2226 2228
2227 2229 def shared(self):
2228 2230 '''the type of shared repository (None if not shared)'''
2229 2231 if self.sharedpath != self.path:
2230 2232 return b'store'
2231 2233 return None
2232 2234
2233 2235 def wjoin(self, f, *insidef):
2234 2236 return self.vfs.reljoin(self.root, f, *insidef)
2235 2237
2236 2238 def setparents(self, p1, p2=None):
2237 2239 if p2 is None:
2238 2240 p2 = self.nullid
2239 2241 self[None].setparents(p1, p2)
2240 2242 self._quick_access_changeid_invalidate()
2241 2243
2242 2244 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2243 2245 """changeid must be a changeset revision, if specified.
2244 2246 fileid can be a file revision or node."""
2245 2247 return context.filectx(
2246 2248 self, path, changeid, fileid, changectx=changectx
2247 2249 )
2248 2250
2249 2251 def getcwd(self):
2250 2252 return self.dirstate.getcwd()
2251 2253
2252 2254 def pathto(self, f, cwd=None):
2253 2255 return self.dirstate.pathto(f, cwd)
2254 2256
2255 2257 def _loadfilter(self, filter):
2256 2258 if filter not in self._filterpats:
2257 2259 l = []
2258 2260 for pat, cmd in self.ui.configitems(filter):
2259 2261 if cmd == b'!':
2260 2262 continue
2261 2263 mf = matchmod.match(self.root, b'', [pat])
2262 2264 fn = None
2263 2265 params = cmd
2264 2266 for name, filterfn in self._datafilters.items():
2265 2267 if cmd.startswith(name):
2266 2268 fn = filterfn
2267 2269 params = cmd[len(name) :].lstrip()
2268 2270 break
2269 2271 if not fn:
2270 2272 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2271 2273 fn.__name__ = 'commandfilter'
2272 2274 # Wrap old filters not supporting keyword arguments
2273 2275 if not pycompat.getargspec(fn)[2]:
2274 2276 oldfn = fn
2275 2277 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2276 2278 fn.__name__ = 'compat-' + oldfn.__name__
2277 2279 l.append((mf, fn, params))
2278 2280 self._filterpats[filter] = l
2279 2281 return self._filterpats[filter]
2280 2282
2281 2283 def _filter(self, filterpats, filename, data):
2282 2284 for mf, fn, cmd in filterpats:
2283 2285 if mf(filename):
2284 2286 self.ui.debug(
2285 2287 b"filtering %s through %s\n"
2286 2288 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2287 2289 )
2288 2290 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2289 2291 break
2290 2292
2291 2293 return data
2292 2294
2293 2295 @unfilteredpropertycache
2294 2296 def _encodefilterpats(self):
2295 2297 return self._loadfilter(b'encode')
2296 2298
2297 2299 @unfilteredpropertycache
2298 2300 def _decodefilterpats(self):
2299 2301 return self._loadfilter(b'decode')
2300 2302
2301 2303 def adddatafilter(self, name, filter):
2302 2304 self._datafilters[name] = filter
2303 2305
2304 2306 def wread(self, filename):
2305 2307 if self.wvfs.islink(filename):
2306 2308 data = self.wvfs.readlink(filename)
2307 2309 else:
2308 2310 data = self.wvfs.read(filename)
2309 2311 return self._filter(self._encodefilterpats, filename, data)
2310 2312
2311 2313 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2312 2314 """write ``data`` into ``filename`` in the working directory
2313 2315
2314 2316 This returns length of written (maybe decoded) data.
2315 2317 """
2316 2318 data = self._filter(self._decodefilterpats, filename, data)
2317 2319 if b'l' in flags:
2318 2320 self.wvfs.symlink(data, filename)
2319 2321 else:
2320 2322 self.wvfs.write(
2321 2323 filename, data, backgroundclose=backgroundclose, **kwargs
2322 2324 )
2323 2325 if b'x' in flags:
2324 2326 self.wvfs.setflags(filename, False, True)
2325 2327 else:
2326 2328 self.wvfs.setflags(filename, False, False)
2327 2329 return len(data)
2328 2330
2329 2331 def wwritedata(self, filename, data):
2330 2332 return self._filter(self._decodefilterpats, filename, data)
2331 2333
2332 2334 def currenttransaction(self):
2333 2335 """return the current transaction or None if non exists"""
2334 2336 if self._transref:
2335 2337 tr = self._transref()
2336 2338 else:
2337 2339 tr = None
2338 2340
2339 2341 if tr and tr.running():
2340 2342 return tr
2341 2343 return None
2342 2344
2343 2345 def transaction(self, desc, report=None):
2344 2346 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2345 2347 b'devel', b'check-locks'
2346 2348 ):
2347 2349 if self._currentlock(self._lockref) is None:
2348 2350 raise error.ProgrammingError(b'transaction requires locking')
2349 2351 tr = self.currenttransaction()
2350 2352 if tr is not None:
2351 2353 return tr.nest(name=desc)
2352 2354
2353 2355 # abort here if the journal already exists
2354 2356 if self.svfs.exists(b"journal"):
2355 2357 raise error.RepoError(
2356 2358 _(b"abandoned transaction found"),
2357 2359 hint=_(b"run 'hg recover' to clean up transaction"),
2358 2360 )
2359 2361
2360 2362 idbase = b"%.40f#%f" % (random.random(), time.time())
2361 2363 ha = hex(hashutil.sha1(idbase).digest())
2362 2364 txnid = b'TXN:' + ha
2363 2365 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2364 2366
2365 2367 self._writejournal(desc)
2366 2368 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2367 2369 if report:
2368 2370 rp = report
2369 2371 else:
2370 2372 rp = self.ui.warn
2371 2373 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2372 2374 # we must avoid cyclic reference between repo and transaction.
2373 2375 reporef = weakref.ref(self)
2374 2376 # Code to track tag movement
2375 2377 #
2376 2378 # Since tags are all handled as file content, it is actually quite hard
2377 2379 # to track these movement from a code perspective. So we fallback to a
2378 2380 # tracking at the repository level. One could envision to track changes
2379 2381 # to the '.hgtags' file through changegroup apply but that fails to
2380 2382 # cope with case where transaction expose new heads without changegroup
2381 2383 # being involved (eg: phase movement).
2382 2384 #
2383 2385 # For now, We gate the feature behind a flag since this likely comes
2384 2386 # with performance impacts. The current code run more often than needed
2385 2387 # and do not use caches as much as it could. The current focus is on
2386 2388 # the behavior of the feature so we disable it by default. The flag
2387 2389 # will be removed when we are happy with the performance impact.
2388 2390 #
2389 2391 # Once this feature is no longer experimental move the following
2390 2392 # documentation to the appropriate help section:
2391 2393 #
2392 2394 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2393 2395 # tags (new or changed or deleted tags). In addition the details of
2394 2396 # these changes are made available in a file at:
2395 2397 # ``REPOROOT/.hg/changes/tags.changes``.
2396 2398 # Make sure you check for HG_TAG_MOVED before reading that file as it
2397 2399 # might exist from a previous transaction even if no tag were touched
2398 2400 # in this one. Changes are recorded in a line base format::
2399 2401 #
2400 2402 # <action> <hex-node> <tag-name>\n
2401 2403 #
2402 2404 # Actions are defined as follow:
2403 2405 # "-R": tag is removed,
2404 2406 # "+A": tag is added,
2405 2407 # "-M": tag is moved (old value),
2406 2408 # "+M": tag is moved (new value),
2407 2409 tracktags = lambda x: None
2408 2410 # experimental config: experimental.hook-track-tags
2409 2411 shouldtracktags = self.ui.configbool(
2410 2412 b'experimental', b'hook-track-tags'
2411 2413 )
2412 2414 if desc != b'strip' and shouldtracktags:
2413 2415 oldheads = self.changelog.headrevs()
2414 2416
2415 2417 def tracktags(tr2):
2416 2418 repo = reporef()
2417 2419 assert repo is not None # help pytype
2418 2420 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2419 2421 newheads = repo.changelog.headrevs()
2420 2422 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2421 2423 # notes: we compare lists here.
2422 2424 # As we do it only once buiding set would not be cheaper
2423 2425 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2424 2426 if changes:
2425 2427 tr2.hookargs[b'tag_moved'] = b'1'
2426 2428 with repo.vfs(
2427 2429 b'changes/tags.changes', b'w', atomictemp=True
2428 2430 ) as changesfile:
2429 2431 # note: we do not register the file to the transaction
2430 2432 # because we needs it to still exist on the transaction
2431 2433 # is close (for txnclose hooks)
2432 2434 tagsmod.writediff(changesfile, changes)
2433 2435
2434 2436 def validate(tr2):
2435 2437 """will run pre-closing hooks"""
2436 2438 # XXX the transaction API is a bit lacking here so we take a hacky
2437 2439 # path for now
2438 2440 #
2439 2441 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2440 2442 # dict is copied before these run. In addition we needs the data
2441 2443 # available to in memory hooks too.
2442 2444 #
2443 2445 # Moreover, we also need to make sure this runs before txnclose
2444 2446 # hooks and there is no "pending" mechanism that would execute
2445 2447 # logic only if hooks are about to run.
2446 2448 #
2447 2449 # Fixing this limitation of the transaction is also needed to track
2448 2450 # other families of changes (bookmarks, phases, obsolescence).
2449 2451 #
2450 2452 # This will have to be fixed before we remove the experimental
2451 2453 # gating.
2452 2454 tracktags(tr2)
2453 2455 repo = reporef()
2454 2456 assert repo is not None # help pytype
2455 2457
2456 2458 singleheadopt = (b'experimental', b'single-head-per-branch')
2457 2459 singlehead = repo.ui.configbool(*singleheadopt)
2458 2460 if singlehead:
2459 2461 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2460 2462 accountclosed = singleheadsub.get(
2461 2463 b"account-closed-heads", False
2462 2464 )
2463 2465 if singleheadsub.get(b"public-changes-only", False):
2464 2466 filtername = b"immutable"
2465 2467 else:
2466 2468 filtername = b"visible"
2467 2469 scmutil.enforcesinglehead(
2468 2470 repo, tr2, desc, accountclosed, filtername
2469 2471 )
2470 2472 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2471 2473 for name, (old, new) in sorted(
2472 2474 tr.changes[b'bookmarks'].items()
2473 2475 ):
2474 2476 args = tr.hookargs.copy()
2475 2477 args.update(bookmarks.preparehookargs(name, old, new))
2476 2478 repo.hook(
2477 2479 b'pretxnclose-bookmark',
2478 2480 throw=True,
2479 2481 **pycompat.strkwargs(args)
2480 2482 )
2481 2483 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2482 2484 cl = repo.unfiltered().changelog
2483 2485 for revs, (old, new) in tr.changes[b'phases']:
2484 2486 for rev in revs:
2485 2487 args = tr.hookargs.copy()
2486 2488 node = hex(cl.node(rev))
2487 2489 args.update(phases.preparehookargs(node, old, new))
2488 2490 repo.hook(
2489 2491 b'pretxnclose-phase',
2490 2492 throw=True,
2491 2493 **pycompat.strkwargs(args)
2492 2494 )
2493 2495
2494 2496 repo.hook(
2495 2497 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2496 2498 )
2497 2499
2498 2500 def releasefn(tr, success):
2499 2501 repo = reporef()
2500 2502 if repo is None:
2501 2503 # If the repo has been GC'd (and this release function is being
2502 2504 # called from transaction.__del__), there's not much we can do,
2503 2505 # so just leave the unfinished transaction there and let the
2504 2506 # user run `hg recover`.
2505 2507 return
2506 2508 if success:
2507 2509 # this should be explicitly invoked here, because
2508 2510 # in-memory changes aren't written out at closing
2509 2511 # transaction, if tr.addfilegenerator (via
2510 2512 # dirstate.write or so) isn't invoked while
2511 2513 # transaction running
2512 2514 repo.dirstate.write(None)
2513 2515 else:
2514 2516 # discard all changes (including ones already written
2515 2517 # out) in this transaction
2516 2518 narrowspec.restorebackup(self, b'journal.narrowspec')
2517 2519 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2518 2520 repo.dirstate.restorebackup(None, b'journal.dirstate')
2519 2521
2520 2522 repo.invalidate(clearfilecache=True)
2521 2523
2522 2524 tr = transaction.transaction(
2523 2525 rp,
2524 2526 self.svfs,
2525 2527 vfsmap,
2526 2528 b"journal",
2527 2529 b"undo",
2528 2530 aftertrans(renames),
2529 2531 self.store.createmode,
2530 2532 validator=validate,
2531 2533 releasefn=releasefn,
2532 2534 checkambigfiles=_cachedfiles,
2533 2535 name=desc,
2534 2536 )
2535 2537 tr.changes[b'origrepolen'] = len(self)
2536 2538 tr.changes[b'obsmarkers'] = set()
2537 2539 tr.changes[b'phases'] = []
2538 2540 tr.changes[b'bookmarks'] = {}
2539 2541
2540 2542 tr.hookargs[b'txnid'] = txnid
2541 2543 tr.hookargs[b'txnname'] = desc
2542 2544 tr.hookargs[b'changes'] = tr.changes
2543 2545 # note: writing the fncache only during finalize mean that the file is
2544 2546 # outdated when running hooks. As fncache is used for streaming clone,
2545 2547 # this is not expected to break anything that happen during the hooks.
2546 2548 tr.addfinalize(b'flush-fncache', self.store.write)
2547 2549
2548 2550 def txnclosehook(tr2):
2549 2551 """To be run if transaction is successful, will schedule a hook run"""
2550 2552 # Don't reference tr2 in hook() so we don't hold a reference.
2551 2553 # This reduces memory consumption when there are multiple
2552 2554 # transactions per lock. This can likely go away if issue5045
2553 2555 # fixes the function accumulation.
2554 2556 hookargs = tr2.hookargs
2555 2557
2556 2558 def hookfunc(unused_success):
2557 2559 repo = reporef()
2558 2560 assert repo is not None # help pytype
2559 2561
2560 2562 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2561 2563 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2562 2564 for name, (old, new) in bmchanges:
2563 2565 args = tr.hookargs.copy()
2564 2566 args.update(bookmarks.preparehookargs(name, old, new))
2565 2567 repo.hook(
2566 2568 b'txnclose-bookmark',
2567 2569 throw=False,
2568 2570 **pycompat.strkwargs(args)
2569 2571 )
2570 2572
2571 2573 if hook.hashook(repo.ui, b'txnclose-phase'):
2572 2574 cl = repo.unfiltered().changelog
2573 2575 phasemv = sorted(
2574 2576 tr.changes[b'phases'], key=lambda r: r[0][0]
2575 2577 )
2576 2578 for revs, (old, new) in phasemv:
2577 2579 for rev in revs:
2578 2580 args = tr.hookargs.copy()
2579 2581 node = hex(cl.node(rev))
2580 2582 args.update(phases.preparehookargs(node, old, new))
2581 2583 repo.hook(
2582 2584 b'txnclose-phase',
2583 2585 throw=False,
2584 2586 **pycompat.strkwargs(args)
2585 2587 )
2586 2588
2587 2589 repo.hook(
2588 2590 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2589 2591 )
2590 2592
2591 2593 repo = reporef()
2592 2594 assert repo is not None # help pytype
2593 2595 repo._afterlock(hookfunc)
2594 2596
2595 2597 tr.addfinalize(b'txnclose-hook', txnclosehook)
2596 2598 # Include a leading "-" to make it happen before the transaction summary
2597 2599 # reports registered via scmutil.registersummarycallback() whose names
2598 2600 # are 00-txnreport etc. That way, the caches will be warm when the
2599 2601 # callbacks run.
2600 2602 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2601 2603
2602 2604 def txnaborthook(tr2):
2603 2605 """To be run if transaction is aborted"""
2604 2606 repo = reporef()
2605 2607 assert repo is not None # help pytype
2606 2608 repo.hook(
2607 2609 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2608 2610 )
2609 2611
2610 2612 tr.addabort(b'txnabort-hook', txnaborthook)
2611 2613 # avoid eager cache invalidation. in-memory data should be identical
2612 2614 # to stored data if transaction has no error.
2613 2615 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2614 2616 self._transref = weakref.ref(tr)
2615 2617 scmutil.registersummarycallback(self, tr, desc)
2616 2618 return tr
2617 2619
2618 2620 def _journalfiles(self):
2619 2621 return (
2620 2622 (self.svfs, b'journal'),
2621 2623 (self.svfs, b'journal.narrowspec'),
2622 2624 (self.vfs, b'journal.narrowspec.dirstate'),
2623 2625 (self.vfs, b'journal.dirstate'),
2624 2626 (self.vfs, b'journal.branch'),
2625 2627 (self.vfs, b'journal.desc'),
2626 2628 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2627 2629 (self.svfs, b'journal.phaseroots'),
2628 2630 )
2629 2631
2630 2632 def undofiles(self):
2631 2633 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2632 2634
2633 2635 @unfilteredmethod
2634 2636 def _writejournal(self, desc):
2635 2637 self.dirstate.savebackup(None, b'journal.dirstate')
2636 2638 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2637 2639 narrowspec.savebackup(self, b'journal.narrowspec')
2638 2640 self.vfs.write(
2639 2641 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2640 2642 )
2641 2643 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2642 2644 bookmarksvfs = bookmarks.bookmarksvfs(self)
2643 2645 bookmarksvfs.write(
2644 2646 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2645 2647 )
2646 2648 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2647 2649
2648 2650 def recover(self):
2649 2651 with self.lock():
2650 2652 if self.svfs.exists(b"journal"):
2651 2653 self.ui.status(_(b"rolling back interrupted transaction\n"))
2652 2654 vfsmap = {
2653 2655 b'': self.svfs,
2654 2656 b'plain': self.vfs,
2655 2657 }
2656 2658 transaction.rollback(
2657 2659 self.svfs,
2658 2660 vfsmap,
2659 2661 b"journal",
2660 2662 self.ui.warn,
2661 2663 checkambigfiles=_cachedfiles,
2662 2664 )
2663 2665 self.invalidate()
2664 2666 return True
2665 2667 else:
2666 2668 self.ui.warn(_(b"no interrupted transaction available\n"))
2667 2669 return False
2668 2670
2669 2671 def rollback(self, dryrun=False, force=False):
2670 2672 wlock = lock = dsguard = None
2671 2673 try:
2672 2674 wlock = self.wlock()
2673 2675 lock = self.lock()
2674 2676 if self.svfs.exists(b"undo"):
2675 2677 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2676 2678
2677 2679 return self._rollback(dryrun, force, dsguard)
2678 2680 else:
2679 2681 self.ui.warn(_(b"no rollback information available\n"))
2680 2682 return 1
2681 2683 finally:
2682 2684 release(dsguard, lock, wlock)
2683 2685
2684 2686 @unfilteredmethod # Until we get smarter cache management
2685 2687 def _rollback(self, dryrun, force, dsguard):
2686 2688 ui = self.ui
2687 2689 try:
2688 2690 args = self.vfs.read(b'undo.desc').splitlines()
2689 2691 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2690 2692 if len(args) >= 3:
2691 2693 detail = args[2]
2692 2694 oldtip = oldlen - 1
2693 2695
2694 2696 if detail and ui.verbose:
2695 2697 msg = _(
2696 2698 b'repository tip rolled back to revision %d'
2697 2699 b' (undo %s: %s)\n'
2698 2700 ) % (oldtip, desc, detail)
2699 2701 else:
2700 2702 msg = _(
2701 2703 b'repository tip rolled back to revision %d (undo %s)\n'
2702 2704 ) % (oldtip, desc)
2703 2705 except IOError:
2704 2706 msg = _(b'rolling back unknown transaction\n')
2705 2707 desc = None
2706 2708
2707 2709 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2708 2710 raise error.Abort(
2709 2711 _(
2710 2712 b'rollback of last commit while not checked out '
2711 2713 b'may lose data'
2712 2714 ),
2713 2715 hint=_(b'use -f to force'),
2714 2716 )
2715 2717
2716 2718 ui.status(msg)
2717 2719 if dryrun:
2718 2720 return 0
2719 2721
2720 2722 parents = self.dirstate.parents()
2721 2723 self.destroying()
2722 2724 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2723 2725 transaction.rollback(
2724 2726 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2725 2727 )
2726 2728 bookmarksvfs = bookmarks.bookmarksvfs(self)
2727 2729 if bookmarksvfs.exists(b'undo.bookmarks'):
2728 2730 bookmarksvfs.rename(
2729 2731 b'undo.bookmarks', b'bookmarks', checkambig=True
2730 2732 )
2731 2733 if self.svfs.exists(b'undo.phaseroots'):
2732 2734 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2733 2735 self.invalidate()
2734 2736
2735 2737 has_node = self.changelog.index.has_node
2736 2738 parentgone = any(not has_node(p) for p in parents)
2737 2739 if parentgone:
2738 2740 # prevent dirstateguard from overwriting already restored one
2739 2741 dsguard.close()
2740 2742
2741 2743 narrowspec.restorebackup(self, b'undo.narrowspec')
2742 2744 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2743 2745 self.dirstate.restorebackup(None, b'undo.dirstate')
2744 2746 try:
2745 2747 branch = self.vfs.read(b'undo.branch')
2746 2748 self.dirstate.setbranch(encoding.tolocal(branch))
2747 2749 except IOError:
2748 2750 ui.warn(
2749 2751 _(
2750 2752 b'named branch could not be reset: '
2751 2753 b'current branch is still \'%s\'\n'
2752 2754 )
2753 2755 % self.dirstate.branch()
2754 2756 )
2755 2757
2756 2758 parents = tuple([p.rev() for p in self[None].parents()])
2757 2759 if len(parents) > 1:
2758 2760 ui.status(
2759 2761 _(
2760 2762 b'working directory now based on '
2761 2763 b'revisions %d and %d\n'
2762 2764 )
2763 2765 % parents
2764 2766 )
2765 2767 else:
2766 2768 ui.status(
2767 2769 _(b'working directory now based on revision %d\n') % parents
2768 2770 )
2769 2771 mergestatemod.mergestate.clean(self)
2770 2772
2771 2773 # TODO: if we know which new heads may result from this rollback, pass
2772 2774 # them to destroy(), which will prevent the branchhead cache from being
2773 2775 # invalidated.
2774 2776 self.destroyed()
2775 2777 return 0
2776 2778
2777 2779 def _buildcacheupdater(self, newtransaction):
2778 2780 """called during transaction to build the callback updating cache
2779 2781
2780 2782 Lives on the repository to help extension who might want to augment
2781 2783 this logic. For this purpose, the created transaction is passed to the
2782 2784 method.
2783 2785 """
2784 2786 # we must avoid cyclic reference between repo and transaction.
2785 2787 reporef = weakref.ref(self)
2786 2788
2787 2789 def updater(tr):
2788 2790 repo = reporef()
2789 2791 assert repo is not None # help pytype
2790 2792 repo.updatecaches(tr)
2791 2793
2792 2794 return updater
2793 2795
2794 2796 @unfilteredmethod
2795 2797 def updatecaches(self, tr=None, full=False, caches=None):
2796 2798 """warm appropriate caches
2797 2799
2798 2800 If this function is called after a transaction closed. The transaction
2799 2801 will be available in the 'tr' argument. This can be used to selectively
2800 2802 update caches relevant to the changes in that transaction.
2801 2803
2802 2804 If 'full' is set, make sure all caches the function knows about have
2803 2805 up-to-date data. Even the ones usually loaded more lazily.
2804 2806
2805 2807 The `full` argument can take a special "post-clone" value. In this case
2806 2808 the cache warming is made after a clone and of the slower cache might
2807 2809 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2808 2810 as we plan for a cleaner way to deal with this for 5.9.
2809 2811 """
2810 2812 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2811 2813 # During strip, many caches are invalid but
2812 2814 # later call to `destroyed` will refresh them.
2813 2815 return
2814 2816
2815 2817 unfi = self.unfiltered()
2816 2818
2817 2819 if full:
2818 2820 msg = (
2819 2821 "`full` argument for `repo.updatecaches` is deprecated\n"
2820 2822 "(use `caches=repository.CACHE_ALL` instead)"
2821 2823 )
2822 2824 self.ui.deprecwarn(msg, b"5.9")
2823 2825 caches = repository.CACHES_ALL
2824 2826 if full == b"post-clone":
2825 2827 caches = repository.CACHES_POST_CLONE
2826 2828 caches = repository.CACHES_ALL
2827 2829 elif caches is None:
2828 2830 caches = repository.CACHES_DEFAULT
2829 2831
2830 2832 if repository.CACHE_BRANCHMAP_SERVED in caches:
2831 2833 if tr is None or tr.changes[b'origrepolen'] < len(self):
2832 2834 # accessing the 'served' branchmap should refresh all the others,
2833 2835 self.ui.debug(b'updating the branch cache\n')
2834 2836 self.filtered(b'served').branchmap()
2835 2837 self.filtered(b'served.hidden').branchmap()
2836 2838 # flush all possibly delayed write.
2837 2839 self._branchcaches.write_delayed(self)
2838 2840
2839 2841 if repository.CACHE_CHANGELOG_CACHE in caches:
2840 2842 self.changelog.update_caches(transaction=tr)
2841 2843
2842 2844 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2843 2845 self.manifestlog.update_caches(transaction=tr)
2844 2846
2845 2847 if repository.CACHE_REV_BRANCH in caches:
2846 2848 rbc = unfi.revbranchcache()
2847 2849 for r in unfi.changelog:
2848 2850 rbc.branchinfo(r)
2849 2851 rbc.write()
2850 2852
2851 2853 if repository.CACHE_FULL_MANIFEST in caches:
2852 2854 # ensure the working copy parents are in the manifestfulltextcache
2853 2855 for ctx in self[b'.'].parents():
2854 2856 ctx.manifest() # accessing the manifest is enough
2855 2857
2856 2858 if repository.CACHE_FILE_NODE_TAGS in caches:
2857 2859 # accessing fnode cache warms the cache
2858 2860 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2859 2861
2860 2862 if repository.CACHE_TAGS_DEFAULT in caches:
2861 2863 # accessing tags warm the cache
2862 2864 self.tags()
2863 2865 if repository.CACHE_TAGS_SERVED in caches:
2864 2866 self.filtered(b'served').tags()
2865 2867
2866 2868 if repository.CACHE_BRANCHMAP_ALL in caches:
2867 2869 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2868 2870 # so we're forcing a write to cause these caches to be warmed up
2869 2871 # even if they haven't explicitly been requested yet (if they've
2870 2872 # never been used by hg, they won't ever have been written, even if
2871 2873 # they're a subset of another kind of cache that *has* been used).
2872 2874 for filt in repoview.filtertable.keys():
2873 2875 filtered = self.filtered(filt)
2874 2876 filtered.branchmap().write(filtered)
2875 2877
2876 2878 def invalidatecaches(self):
2877 2879
2878 2880 if '_tagscache' in vars(self):
2879 2881 # can't use delattr on proxy
2880 2882 del self.__dict__['_tagscache']
2881 2883
2882 2884 self._branchcaches.clear()
2883 2885 self.invalidatevolatilesets()
2884 2886 self._sparsesignaturecache.clear()
2885 2887
2886 2888 def invalidatevolatilesets(self):
2887 2889 self.filteredrevcache.clear()
2888 2890 obsolete.clearobscaches(self)
2889 2891 self._quick_access_changeid_invalidate()
2890 2892
2891 2893 def invalidatedirstate(self):
2892 2894 """Invalidates the dirstate, causing the next call to dirstate
2893 2895 to check if it was modified since the last time it was read,
2894 2896 rereading it if it has.
2895 2897
2896 2898 This is different to dirstate.invalidate() that it doesn't always
2897 2899 rereads the dirstate. Use dirstate.invalidate() if you want to
2898 2900 explicitly read the dirstate again (i.e. restoring it to a previous
2899 2901 known good state)."""
2900 2902 if hasunfilteredcache(self, 'dirstate'):
2901 2903 for k in self.dirstate._filecache:
2902 2904 try:
2903 2905 delattr(self.dirstate, k)
2904 2906 except AttributeError:
2905 2907 pass
2906 2908 delattr(self.unfiltered(), 'dirstate')
2907 2909
2908 2910 def invalidate(self, clearfilecache=False):
2909 2911 """Invalidates both store and non-store parts other than dirstate
2910 2912
2911 2913 If a transaction is running, invalidation of store is omitted,
2912 2914 because discarding in-memory changes might cause inconsistency
2913 2915 (e.g. incomplete fncache causes unintentional failure, but
2914 2916 redundant one doesn't).
2915 2917 """
2916 2918 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2917 2919 for k in list(self._filecache.keys()):
2918 2920 # dirstate is invalidated separately in invalidatedirstate()
2919 2921 if k == b'dirstate':
2920 2922 continue
2921 2923 if (
2922 2924 k == b'changelog'
2923 2925 and self.currenttransaction()
2924 2926 and self.changelog._delayed
2925 2927 ):
2926 2928 # The changelog object may store unwritten revisions. We don't
2927 2929 # want to lose them.
2928 2930 # TODO: Solve the problem instead of working around it.
2929 2931 continue
2930 2932
2931 2933 if clearfilecache:
2932 2934 del self._filecache[k]
2933 2935 try:
2934 2936 delattr(unfiltered, k)
2935 2937 except AttributeError:
2936 2938 pass
2937 2939 self.invalidatecaches()
2938 2940 if not self.currenttransaction():
2939 2941 # TODO: Changing contents of store outside transaction
2940 2942 # causes inconsistency. We should make in-memory store
2941 2943 # changes detectable, and abort if changed.
2942 2944 self.store.invalidatecaches()
2943 2945
2944 2946 def invalidateall(self):
2945 2947 """Fully invalidates both store and non-store parts, causing the
2946 2948 subsequent operation to reread any outside changes."""
2947 2949 # extension should hook this to invalidate its caches
2948 2950 self.invalidate()
2949 2951 self.invalidatedirstate()
2950 2952
2951 2953 @unfilteredmethod
2952 2954 def _refreshfilecachestats(self, tr):
2953 2955 """Reload stats of cached files so that they are flagged as valid"""
2954 2956 for k, ce in self._filecache.items():
2955 2957 k = pycompat.sysstr(k)
2956 2958 if k == 'dirstate' or k not in self.__dict__:
2957 2959 continue
2958 2960 ce.refresh()
2959 2961
2960 2962 def _lock(
2961 2963 self,
2962 2964 vfs,
2963 2965 lockname,
2964 2966 wait,
2965 2967 releasefn,
2966 2968 acquirefn,
2967 2969 desc,
2968 2970 ):
2969 2971 timeout = 0
2970 2972 warntimeout = 0
2971 2973 if wait:
2972 2974 timeout = self.ui.configint(b"ui", b"timeout")
2973 2975 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2974 2976 # internal config: ui.signal-safe-lock
2975 2977 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2976 2978
2977 2979 l = lockmod.trylock(
2978 2980 self.ui,
2979 2981 vfs,
2980 2982 lockname,
2981 2983 timeout,
2982 2984 warntimeout,
2983 2985 releasefn=releasefn,
2984 2986 acquirefn=acquirefn,
2985 2987 desc=desc,
2986 2988 signalsafe=signalsafe,
2987 2989 )
2988 2990 return l
2989 2991
2990 2992 def _afterlock(self, callback):
2991 2993 """add a callback to be run when the repository is fully unlocked
2992 2994
2993 2995 The callback will be executed when the outermost lock is released
2994 2996 (with wlock being higher level than 'lock')."""
2995 2997 for ref in (self._wlockref, self._lockref):
2996 2998 l = ref and ref()
2997 2999 if l and l.held:
2998 3000 l.postrelease.append(callback)
2999 3001 break
3000 3002 else: # no lock have been found.
3001 3003 callback(True)
3002 3004
3003 3005 def lock(self, wait=True):
3004 3006 """Lock the repository store (.hg/store) and return a weak reference
3005 3007 to the lock. Use this before modifying the store (e.g. committing or
3006 3008 stripping). If you are opening a transaction, get a lock as well.)
3007 3009
3008 3010 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3009 3011 'wlock' first to avoid a dead-lock hazard."""
3010 3012 l = self._currentlock(self._lockref)
3011 3013 if l is not None:
3012 3014 l.lock()
3013 3015 return l
3014 3016
3015 3017 l = self._lock(
3016 3018 vfs=self.svfs,
3017 3019 lockname=b"lock",
3018 3020 wait=wait,
3019 3021 releasefn=None,
3020 3022 acquirefn=self.invalidate,
3021 3023 desc=_(b'repository %s') % self.origroot,
3022 3024 )
3023 3025 self._lockref = weakref.ref(l)
3024 3026 return l
3025 3027
3026 3028 def wlock(self, wait=True):
3027 3029 """Lock the non-store parts of the repository (everything under
3028 3030 .hg except .hg/store) and return a weak reference to the lock.
3029 3031
3030 3032 Use this before modifying files in .hg.
3031 3033
3032 3034 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3033 3035 'wlock' first to avoid a dead-lock hazard."""
3034 3036 l = self._wlockref() if self._wlockref else None
3035 3037 if l is not None and l.held:
3036 3038 l.lock()
3037 3039 return l
3038 3040
3039 3041 # We do not need to check for non-waiting lock acquisition. Such
3040 3042 # acquisition would not cause dead-lock as they would just fail.
3041 3043 if wait and (
3042 3044 self.ui.configbool(b'devel', b'all-warnings')
3043 3045 or self.ui.configbool(b'devel', b'check-locks')
3044 3046 ):
3045 3047 if self._currentlock(self._lockref) is not None:
3046 3048 self.ui.develwarn(b'"wlock" acquired after "lock"')
3047 3049
3048 3050 def unlock():
3049 3051 if self.dirstate.pendingparentchange():
3050 3052 self.dirstate.invalidate()
3051 3053 else:
3052 3054 self.dirstate.write(None)
3053 3055
3054 3056 self._filecache[b'dirstate'].refresh()
3055 3057
3056 3058 l = self._lock(
3057 3059 self.vfs,
3058 3060 b"wlock",
3059 3061 wait,
3060 3062 unlock,
3061 3063 self.invalidatedirstate,
3062 3064 _(b'working directory of %s') % self.origroot,
3063 3065 )
3064 3066 self._wlockref = weakref.ref(l)
3065 3067 return l
3066 3068
3067 3069 def _currentlock(self, lockref):
3068 3070 """Returns the lock if it's held, or None if it's not."""
3069 3071 if lockref is None:
3070 3072 return None
3071 3073 l = lockref()
3072 3074 if l is None or not l.held:
3073 3075 return None
3074 3076 return l
3075 3077
3076 3078 def currentwlock(self):
3077 3079 """Returns the wlock if it's held, or None if it's not."""
3078 3080 return self._currentlock(self._wlockref)
3079 3081
3080 3082 def checkcommitpatterns(self, wctx, match, status, fail):
3081 3083 """check for commit arguments that aren't committable"""
3082 3084 if match.isexact() or match.prefix():
3083 3085 matched = set(status.modified + status.added + status.removed)
3084 3086
3085 3087 for f in match.files():
3086 3088 f = self.dirstate.normalize(f)
3087 3089 if f == b'.' or f in matched or f in wctx.substate:
3088 3090 continue
3089 3091 if f in status.deleted:
3090 3092 fail(f, _(b'file not found!'))
3091 3093 # Is it a directory that exists or used to exist?
3092 3094 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3093 3095 d = f + b'/'
3094 3096 for mf in matched:
3095 3097 if mf.startswith(d):
3096 3098 break
3097 3099 else:
3098 3100 fail(f, _(b"no match under directory!"))
3099 3101 elif f not in self.dirstate:
3100 3102 fail(f, _(b"file not tracked!"))
3101 3103
3102 3104 @unfilteredmethod
3103 3105 def commit(
3104 3106 self,
3105 3107 text=b"",
3106 3108 user=None,
3107 3109 date=None,
3108 3110 match=None,
3109 3111 force=False,
3110 3112 editor=None,
3111 3113 extra=None,
3112 3114 ):
3113 3115 """Add a new revision to current repository.
3114 3116
3115 3117 Revision information is gathered from the working directory,
3116 3118 match can be used to filter the committed files. If editor is
3117 3119 supplied, it is called to get a commit message.
3118 3120 """
3119 3121 if extra is None:
3120 3122 extra = {}
3121 3123
3122 3124 def fail(f, msg):
3123 3125 raise error.InputError(b'%s: %s' % (f, msg))
3124 3126
3125 3127 if not match:
3126 3128 match = matchmod.always()
3127 3129
3128 3130 if not force:
3129 3131 match.bad = fail
3130 3132
3131 3133 # lock() for recent changelog (see issue4368)
3132 3134 with self.wlock(), self.lock():
3133 3135 wctx = self[None]
3134 3136 merge = len(wctx.parents()) > 1
3135 3137
3136 3138 if not force and merge and not match.always():
3137 3139 raise error.Abort(
3138 3140 _(
3139 3141 b'cannot partially commit a merge '
3140 3142 b'(do not specify files or patterns)'
3141 3143 )
3142 3144 )
3143 3145
3144 3146 status = self.status(match=match, clean=force)
3145 3147 if force:
3146 3148 status.modified.extend(
3147 3149 status.clean
3148 3150 ) # mq may commit clean files
3149 3151
3150 3152 # check subrepos
3151 3153 subs, commitsubs, newstate = subrepoutil.precommit(
3152 3154 self.ui, wctx, status, match, force=force
3153 3155 )
3154 3156
3155 3157 # make sure all explicit patterns are matched
3156 3158 if not force:
3157 3159 self.checkcommitpatterns(wctx, match, status, fail)
3158 3160
3159 3161 cctx = context.workingcommitctx(
3160 3162 self, status, text, user, date, extra
3161 3163 )
3162 3164
3163 3165 ms = mergestatemod.mergestate.read(self)
3164 3166 mergeutil.checkunresolved(ms)
3165 3167
3166 3168 # internal config: ui.allowemptycommit
3167 3169 if cctx.isempty() and not self.ui.configbool(
3168 3170 b'ui', b'allowemptycommit'
3169 3171 ):
3170 3172 self.ui.debug(b'nothing to commit, clearing merge state\n')
3171 3173 ms.reset()
3172 3174 return None
3173 3175
3174 3176 if merge and cctx.deleted():
3175 3177 raise error.Abort(_(b"cannot commit merge with missing files"))
3176 3178
3177 3179 if editor:
3178 3180 cctx._text = editor(self, cctx, subs)
3179 3181 edited = text != cctx._text
3180 3182
3181 3183 # Save commit message in case this transaction gets rolled back
3182 3184 # (e.g. by a pretxncommit hook). Leave the content alone on
3183 3185 # the assumption that the user will use the same editor again.
3184 3186 msg_path = self.savecommitmessage(cctx._text)
3185 3187
3186 3188 # commit subs and write new state
3187 3189 if subs:
3188 3190 uipathfn = scmutil.getuipathfn(self)
3189 3191 for s in sorted(commitsubs):
3190 3192 sub = wctx.sub(s)
3191 3193 self.ui.status(
3192 3194 _(b'committing subrepository %s\n')
3193 3195 % uipathfn(subrepoutil.subrelpath(sub))
3194 3196 )
3195 3197 sr = sub.commit(cctx._text, user, date)
3196 3198 newstate[s] = (newstate[s][0], sr)
3197 3199 subrepoutil.writestate(self, newstate)
3198 3200
3199 3201 p1, p2 = self.dirstate.parents()
3200 3202 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3201 3203 try:
3202 3204 self.hook(
3203 3205 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3204 3206 )
3205 3207 with self.transaction(b'commit'):
3206 3208 ret = self.commitctx(cctx, True)
3207 3209 # update bookmarks, dirstate and mergestate
3208 3210 bookmarks.update(self, [p1, p2], ret)
3209 3211 cctx.markcommitted(ret)
3210 3212 ms.reset()
3211 3213 except: # re-raises
3212 3214 if edited:
3213 3215 self.ui.write(
3214 3216 _(b'note: commit message saved in %s\n') % msg_path
3215 3217 )
3216 3218 self.ui.write(
3217 3219 _(
3218 3220 b"note: use 'hg commit --logfile "
3219 3221 b"%s --edit' to reuse it\n"
3220 3222 )
3221 3223 % msg_path
3222 3224 )
3223 3225 raise
3224 3226
3225 3227 def commithook(unused_success):
3226 3228 # hack for command that use a temporary commit (eg: histedit)
3227 3229 # temporary commit got stripped before hook release
3228 3230 if self.changelog.hasnode(ret):
3229 3231 self.hook(
3230 3232 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3231 3233 )
3232 3234
3233 3235 self._afterlock(commithook)
3234 3236 return ret
3235 3237
3236 3238 @unfilteredmethod
3237 3239 def commitctx(self, ctx, error=False, origctx=None):
3238 3240 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3239 3241
3240 3242 @unfilteredmethod
3241 3243 def destroying(self):
3242 3244 """Inform the repository that nodes are about to be destroyed.
3243 3245 Intended for use by strip and rollback, so there's a common
3244 3246 place for anything that has to be done before destroying history.
3245 3247
3246 3248 This is mostly useful for saving state that is in memory and waiting
3247 3249 to be flushed when the current lock is released. Because a call to
3248 3250 destroyed is imminent, the repo will be invalidated causing those
3249 3251 changes to stay in memory (waiting for the next unlock), or vanish
3250 3252 completely.
3251 3253 """
3252 3254 # When using the same lock to commit and strip, the phasecache is left
3253 3255 # dirty after committing. Then when we strip, the repo is invalidated,
3254 3256 # causing those changes to disappear.
3255 3257 if '_phasecache' in vars(self):
3256 3258 self._phasecache.write()
3257 3259
3258 3260 @unfilteredmethod
3259 3261 def destroyed(self):
3260 3262 """Inform the repository that nodes have been destroyed.
3261 3263 Intended for use by strip and rollback, so there's a common
3262 3264 place for anything that has to be done after destroying history.
3263 3265 """
3264 3266 # When one tries to:
3265 3267 # 1) destroy nodes thus calling this method (e.g. strip)
3266 3268 # 2) use phasecache somewhere (e.g. commit)
3267 3269 #
3268 3270 # then 2) will fail because the phasecache contains nodes that were
3269 3271 # removed. We can either remove phasecache from the filecache,
3270 3272 # causing it to reload next time it is accessed, or simply filter
3271 3273 # the removed nodes now and write the updated cache.
3272 3274 self._phasecache.filterunknown(self)
3273 3275 self._phasecache.write()
3274 3276
3275 3277 # refresh all repository caches
3276 3278 self.updatecaches()
3277 3279
3278 3280 # Ensure the persistent tag cache is updated. Doing it now
3279 3281 # means that the tag cache only has to worry about destroyed
3280 3282 # heads immediately after a strip/rollback. That in turn
3281 3283 # guarantees that "cachetip == currenttip" (comparing both rev
3282 3284 # and node) always means no nodes have been added or destroyed.
3283 3285
3284 3286 # XXX this is suboptimal when qrefresh'ing: we strip the current
3285 3287 # head, refresh the tag cache, then immediately add a new head.
3286 3288 # But I think doing it this way is necessary for the "instant
3287 3289 # tag cache retrieval" case to work.
3288 3290 self.invalidate()
3289 3291
3290 3292 def status(
3291 3293 self,
3292 3294 node1=b'.',
3293 3295 node2=None,
3294 3296 match=None,
3295 3297 ignored=False,
3296 3298 clean=False,
3297 3299 unknown=False,
3298 3300 listsubrepos=False,
3299 3301 ):
3300 3302 '''a convenience method that calls node1.status(node2)'''
3301 3303 return self[node1].status(
3302 3304 node2, match, ignored, clean, unknown, listsubrepos
3303 3305 )
3304 3306
3305 3307 def addpostdsstatus(self, ps):
3306 3308 """Add a callback to run within the wlock, at the point at which status
3307 3309 fixups happen.
3308 3310
3309 3311 On status completion, callback(wctx, status) will be called with the
3310 3312 wlock held, unless the dirstate has changed from underneath or the wlock
3311 3313 couldn't be grabbed.
3312 3314
3313 3315 Callbacks should not capture and use a cached copy of the dirstate --
3314 3316 it might change in the meanwhile. Instead, they should access the
3315 3317 dirstate via wctx.repo().dirstate.
3316 3318
3317 3319 This list is emptied out after each status run -- extensions should
3318 3320 make sure it adds to this list each time dirstate.status is called.
3319 3321 Extensions should also make sure they don't call this for statuses
3320 3322 that don't involve the dirstate.
3321 3323 """
3322 3324
3323 3325 # The list is located here for uniqueness reasons -- it is actually
3324 3326 # managed by the workingctx, but that isn't unique per-repo.
3325 3327 self._postdsstatus.append(ps)
3326 3328
3327 3329 def postdsstatus(self):
3328 3330 """Used by workingctx to get the list of post-dirstate-status hooks."""
3329 3331 return self._postdsstatus
3330 3332
3331 3333 def clearpostdsstatus(self):
3332 3334 """Used by workingctx to clear post-dirstate-status hooks."""
3333 3335 del self._postdsstatus[:]
3334 3336
3335 3337 def heads(self, start=None):
3336 3338 if start is None:
3337 3339 cl = self.changelog
3338 3340 headrevs = reversed(cl.headrevs())
3339 3341 return [cl.node(rev) for rev in headrevs]
3340 3342
3341 3343 heads = self.changelog.heads(start)
3342 3344 # sort the output in rev descending order
3343 3345 return sorted(heads, key=self.changelog.rev, reverse=True)
3344 3346
3345 3347 def branchheads(self, branch=None, start=None, closed=False):
3346 3348 """return a (possibly filtered) list of heads for the given branch
3347 3349
3348 3350 Heads are returned in topological order, from newest to oldest.
3349 3351 If branch is None, use the dirstate branch.
3350 3352 If start is not None, return only heads reachable from start.
3351 3353 If closed is True, return heads that are marked as closed as well.
3352 3354 """
3353 3355 if branch is None:
3354 3356 branch = self[None].branch()
3355 3357 branches = self.branchmap()
3356 3358 if not branches.hasbranch(branch):
3357 3359 return []
3358 3360 # the cache returns heads ordered lowest to highest
3359 3361 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3360 3362 if start is not None:
3361 3363 # filter out the heads that cannot be reached from startrev
3362 3364 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3363 3365 bheads = [h for h in bheads if h in fbheads]
3364 3366 return bheads
3365 3367
3366 3368 def branches(self, nodes):
3367 3369 if not nodes:
3368 3370 nodes = [self.changelog.tip()]
3369 3371 b = []
3370 3372 for n in nodes:
3371 3373 t = n
3372 3374 while True:
3373 3375 p = self.changelog.parents(n)
3374 3376 if p[1] != self.nullid or p[0] == self.nullid:
3375 3377 b.append((t, n, p[0], p[1]))
3376 3378 break
3377 3379 n = p[0]
3378 3380 return b
3379 3381
3380 3382 def between(self, pairs):
3381 3383 r = []
3382 3384
3383 3385 for top, bottom in pairs:
3384 3386 n, l, i = top, [], 0
3385 3387 f = 1
3386 3388
3387 3389 while n != bottom and n != self.nullid:
3388 3390 p = self.changelog.parents(n)[0]
3389 3391 if i == f:
3390 3392 l.append(n)
3391 3393 f = f * 2
3392 3394 n = p
3393 3395 i += 1
3394 3396
3395 3397 r.append(l)
3396 3398
3397 3399 return r
3398 3400
3399 3401 def checkpush(self, pushop):
3400 3402 """Extensions can override this function if additional checks have
3401 3403 to be performed before pushing, or call it if they override push
3402 3404 command.
3403 3405 """
3404 3406
3405 3407 @unfilteredpropertycache
3406 3408 def prepushoutgoinghooks(self):
3407 3409 """Return util.hooks consists of a pushop with repo, remote, outgoing
3408 3410 methods, which are called before pushing changesets.
3409 3411 """
3410 3412 return util.hooks()
3411 3413
3412 3414 def pushkey(self, namespace, key, old, new):
3413 3415 try:
3414 3416 tr = self.currenttransaction()
3415 3417 hookargs = {}
3416 3418 if tr is not None:
3417 3419 hookargs.update(tr.hookargs)
3418 3420 hookargs = pycompat.strkwargs(hookargs)
3419 3421 hookargs['namespace'] = namespace
3420 3422 hookargs['key'] = key
3421 3423 hookargs['old'] = old
3422 3424 hookargs['new'] = new
3423 3425 self.hook(b'prepushkey', throw=True, **hookargs)
3424 3426 except error.HookAbort as exc:
3425 3427 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3426 3428 if exc.hint:
3427 3429 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3428 3430 return False
3429 3431 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3430 3432 ret = pushkey.push(self, namespace, key, old, new)
3431 3433
3432 3434 def runhook(unused_success):
3433 3435 self.hook(
3434 3436 b'pushkey',
3435 3437 namespace=namespace,
3436 3438 key=key,
3437 3439 old=old,
3438 3440 new=new,
3439 3441 ret=ret,
3440 3442 )
3441 3443
3442 3444 self._afterlock(runhook)
3443 3445 return ret
3444 3446
3445 3447 def listkeys(self, namespace):
3446 3448 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3447 3449 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3448 3450 values = pushkey.list(self, namespace)
3449 3451 self.hook(b'listkeys', namespace=namespace, values=values)
3450 3452 return values
3451 3453
3452 3454 def debugwireargs(self, one, two, three=None, four=None, five=None):
3453 3455 '''used to test argument passing over the wire'''
3454 3456 return b"%s %s %s %s %s" % (
3455 3457 one,
3456 3458 two,
3457 3459 pycompat.bytestr(three),
3458 3460 pycompat.bytestr(four),
3459 3461 pycompat.bytestr(five),
3460 3462 )
3461 3463
3462 3464 def savecommitmessage(self, text):
3463 3465 fp = self.vfs(b'last-message.txt', b'wb')
3464 3466 try:
3465 3467 fp.write(text)
3466 3468 finally:
3467 3469 fp.close()
3468 3470 return self.pathto(fp.name[len(self.root) + 1 :])
3469 3471
3470 3472 def register_wanted_sidedata(self, category):
3471 3473 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3472 3474 # Only revlogv2 repos can want sidedata.
3473 3475 return
3474 3476 self._wanted_sidedata.add(pycompat.bytestr(category))
3475 3477
3476 3478 def register_sidedata_computer(
3477 3479 self, kind, category, keys, computer, flags, replace=False
3478 3480 ):
3479 3481 if kind not in revlogconst.ALL_KINDS:
3480 3482 msg = _(b"unexpected revlog kind '%s'.")
3481 3483 raise error.ProgrammingError(msg % kind)
3482 3484 category = pycompat.bytestr(category)
3483 3485 already_registered = category in self._sidedata_computers.get(kind, [])
3484 3486 if already_registered and not replace:
3485 3487 msg = _(
3486 3488 b"cannot register a sidedata computer twice for category '%s'."
3487 3489 )
3488 3490 raise error.ProgrammingError(msg % category)
3489 3491 if replace and not already_registered:
3490 3492 msg = _(
3491 3493 b"cannot replace a sidedata computer that isn't registered "
3492 3494 b"for category '%s'."
3493 3495 )
3494 3496 raise error.ProgrammingError(msg % category)
3495 3497 self._sidedata_computers.setdefault(kind, {})
3496 3498 self._sidedata_computers[kind][category] = (keys, computer, flags)
3497 3499
3498 3500
3499 3501 # used to avoid circular references so destructors work
3500 3502 def aftertrans(files):
3501 3503 renamefiles = [tuple(t) for t in files]
3502 3504
3503 3505 def a():
3504 3506 for vfs, src, dest in renamefiles:
3505 3507 # if src and dest refer to a same file, vfs.rename is a no-op,
3506 3508 # leaving both src and dest on disk. delete dest to make sure
3507 3509 # the rename couldn't be such a no-op.
3508 3510 vfs.tryunlink(dest)
3509 3511 try:
3510 3512 vfs.rename(src, dest)
3511 3513 except FileNotFoundError: # journal file does not yet exist
3512 3514 pass
3513 3515
3514 3516 return a
3515 3517
3516 3518
3517 3519 def undoname(fn):
3518 3520 base, name = os.path.split(fn)
3519 3521 assert name.startswith(b'journal')
3520 3522 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3521 3523
3522 3524
3523 3525 def instance(ui, path, create, intents=None, createopts=None):
3524 3526
3525 3527 # prevent cyclic import localrepo -> upgrade -> localrepo
3526 3528 from . import upgrade
3527 3529
3528 3530 localpath = urlutil.urllocalpath(path)
3529 3531 if create:
3530 3532 createrepository(ui, localpath, createopts=createopts)
3531 3533
3532 3534 def repo_maker():
3533 3535 return makelocalrepository(ui, localpath, intents=intents)
3534 3536
3535 3537 repo = repo_maker()
3536 3538 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3537 3539 return repo
3538 3540
3539 3541
3540 3542 def islocal(path):
3541 3543 return True
3542 3544
3543 3545
3544 3546 def defaultcreateopts(ui, createopts=None):
3545 3547 """Populate the default creation options for a repository.
3546 3548
3547 3549 A dictionary of explicitly requested creation options can be passed
3548 3550 in. Missing keys will be populated.
3549 3551 """
3550 3552 createopts = dict(createopts or {})
3551 3553
3552 3554 if b'backend' not in createopts:
3553 3555 # experimental config: storage.new-repo-backend
3554 3556 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3555 3557
3556 3558 return createopts
3557 3559
3558 3560
3559 3561 def clone_requirements(ui, createopts, srcrepo):
3560 3562 """clone the requirements of a local repo for a local clone
3561 3563
3562 3564 The store requirements are unchanged while the working copy requirements
3563 3565 depends on the configuration
3564 3566 """
3565 3567 target_requirements = set()
3566 3568 if not srcrepo.requirements:
3567 3569 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3568 3570 # with it.
3569 3571 return target_requirements
3570 3572 createopts = defaultcreateopts(ui, createopts=createopts)
3571 3573 for r in newreporequirements(ui, createopts):
3572 3574 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3573 3575 target_requirements.add(r)
3574 3576
3575 3577 for r in srcrepo.requirements:
3576 3578 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3577 3579 target_requirements.add(r)
3578 3580 return target_requirements
3579 3581
3580 3582
3581 3583 def newreporequirements(ui, createopts):
3582 3584 """Determine the set of requirements for a new local repository.
3583 3585
3584 3586 Extensions can wrap this function to specify custom requirements for
3585 3587 new repositories.
3586 3588 """
3587 3589
3588 3590 if b'backend' not in createopts:
3589 3591 raise error.ProgrammingError(
3590 3592 b'backend key not present in createopts; '
3591 3593 b'was defaultcreateopts() called?'
3592 3594 )
3593 3595
3594 3596 if createopts[b'backend'] != b'revlogv1':
3595 3597 raise error.Abort(
3596 3598 _(
3597 3599 b'unable to determine repository requirements for '
3598 3600 b'storage backend: %s'
3599 3601 )
3600 3602 % createopts[b'backend']
3601 3603 )
3602 3604
3603 3605 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3604 3606 if ui.configbool(b'format', b'usestore'):
3605 3607 requirements.add(requirementsmod.STORE_REQUIREMENT)
3606 3608 if ui.configbool(b'format', b'usefncache'):
3607 3609 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3608 3610 if ui.configbool(b'format', b'dotencode'):
3609 3611 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3610 3612
3611 3613 compengines = ui.configlist(b'format', b'revlog-compression')
3612 3614 for compengine in compengines:
3613 3615 if compengine in util.compengines:
3614 3616 engine = util.compengines[compengine]
3615 3617 if engine.available() and engine.revlogheader():
3616 3618 break
3617 3619 else:
3618 3620 raise error.Abort(
3619 3621 _(
3620 3622 b'compression engines %s defined by '
3621 3623 b'format.revlog-compression not available'
3622 3624 )
3623 3625 % b', '.join(b'"%s"' % e for e in compengines),
3624 3626 hint=_(
3625 3627 b'run "hg debuginstall" to list available '
3626 3628 b'compression engines'
3627 3629 ),
3628 3630 )
3629 3631
3630 3632 # zlib is the historical default and doesn't need an explicit requirement.
3631 3633 if compengine == b'zstd':
3632 3634 requirements.add(b'revlog-compression-zstd')
3633 3635 elif compengine != b'zlib':
3634 3636 requirements.add(b'exp-compression-%s' % compengine)
3635 3637
3636 3638 if scmutil.gdinitconfig(ui):
3637 3639 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3638 3640 if ui.configbool(b'format', b'sparse-revlog'):
3639 3641 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3640 3642
3641 3643 # experimental config: format.use-dirstate-v2
3642 3644 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3643 3645 if ui.configbool(b'format', b'use-dirstate-v2'):
3644 3646 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3645 3647
3646 3648 # experimental config: format.exp-use-copies-side-data-changeset
3647 3649 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3648 3650 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3649 3651 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3650 3652 if ui.configbool(b'experimental', b'treemanifest'):
3651 3653 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3652 3654
3653 3655 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3654 3656 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3655 3657 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3656 3658
3657 3659 revlogv2 = ui.config(b'experimental', b'revlogv2')
3658 3660 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3659 3661 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3660 3662 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3661 3663 # experimental config: format.internal-phase
3662 3664 if ui.configbool(b'format', b'internal-phase'):
3663 3665 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3664 3666
3665 3667 if createopts.get(b'narrowfiles'):
3666 3668 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3667 3669
3668 3670 if createopts.get(b'lfs'):
3669 3671 requirements.add(b'lfs')
3670 3672
3671 3673 if ui.configbool(b'format', b'bookmarks-in-store'):
3672 3674 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3673 3675
3674 3676 if ui.configbool(b'format', b'use-persistent-nodemap'):
3675 3677 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3676 3678
3677 3679 # if share-safe is enabled, let's create the new repository with the new
3678 3680 # requirement
3679 3681 if ui.configbool(b'format', b'use-share-safe'):
3680 3682 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3681 3683
3682 3684 # if we are creating a share-repoΒΉ we have to handle requirement
3683 3685 # differently.
3684 3686 #
3685 3687 # [1] (i.e. reusing the store from another repository, just having a
3686 3688 # working copy)
3687 3689 if b'sharedrepo' in createopts:
3688 3690 source_requirements = set(createopts[b'sharedrepo'].requirements)
3689 3691
3690 3692 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3691 3693 # share to an old school repository, we have to copy the
3692 3694 # requirements and hope for the best.
3693 3695 requirements = source_requirements
3694 3696 else:
3695 3697 # We have control on the working copy only, so "copy" the non
3696 3698 # working copy part over, ignoring previous logic.
3697 3699 to_drop = set()
3698 3700 for req in requirements:
3699 3701 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3700 3702 continue
3701 3703 if req in source_requirements:
3702 3704 continue
3703 3705 to_drop.add(req)
3704 3706 requirements -= to_drop
3705 3707 requirements |= source_requirements
3706 3708
3707 3709 if createopts.get(b'sharedrelative'):
3708 3710 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3709 3711 else:
3710 3712 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3711 3713
3712 3714 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3713 3715 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3714 3716 msg = _("ignoring unknown tracked key version: %d\n")
3715 3717 hint = _("see `hg help config.format.use-dirstate-tracked-hint-version")
3716 3718 if version != 1:
3717 3719 ui.warn(msg % version, hint=hint)
3718 3720 else:
3719 3721 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3720 3722
3721 3723 return requirements
3722 3724
3723 3725
3724 3726 def checkrequirementscompat(ui, requirements):
3725 3727 """Checks compatibility of repository requirements enabled and disabled.
3726 3728
3727 3729 Returns a set of requirements which needs to be dropped because dependend
3728 3730 requirements are not enabled. Also warns users about it"""
3729 3731
3730 3732 dropped = set()
3731 3733
3732 3734 if requirementsmod.STORE_REQUIREMENT not in requirements:
3733 3735 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3734 3736 ui.warn(
3735 3737 _(
3736 3738 b'ignoring enabled \'format.bookmarks-in-store\' config '
3737 3739 b'beacuse it is incompatible with disabled '
3738 3740 b'\'format.usestore\' config\n'
3739 3741 )
3740 3742 )
3741 3743 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3742 3744
3743 3745 if (
3744 3746 requirementsmod.SHARED_REQUIREMENT in requirements
3745 3747 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3746 3748 ):
3747 3749 raise error.Abort(
3748 3750 _(
3749 3751 b"cannot create shared repository as source was created"
3750 3752 b" with 'format.usestore' config disabled"
3751 3753 )
3752 3754 )
3753 3755
3754 3756 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3755 3757 if ui.hasconfig(b'format', b'use-share-safe'):
3756 3758 msg = _(
3757 3759 b"ignoring enabled 'format.use-share-safe' config because "
3758 3760 b"it is incompatible with disabled 'format.usestore'"
3759 3761 b" config\n"
3760 3762 )
3761 3763 ui.warn(msg)
3762 3764 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3763 3765
3764 3766 return dropped
3765 3767
3766 3768
3767 3769 def filterknowncreateopts(ui, createopts):
3768 3770 """Filters a dict of repo creation options against options that are known.
3769 3771
3770 3772 Receives a dict of repo creation options and returns a dict of those
3771 3773 options that we don't know how to handle.
3772 3774
3773 3775 This function is called as part of repository creation. If the
3774 3776 returned dict contains any items, repository creation will not
3775 3777 be allowed, as it means there was a request to create a repository
3776 3778 with options not recognized by loaded code.
3777 3779
3778 3780 Extensions can wrap this function to filter out creation options
3779 3781 they know how to handle.
3780 3782 """
3781 3783 known = {
3782 3784 b'backend',
3783 3785 b'lfs',
3784 3786 b'narrowfiles',
3785 3787 b'sharedrepo',
3786 3788 b'sharedrelative',
3787 3789 b'shareditems',
3788 3790 b'shallowfilestore',
3789 3791 }
3790 3792
3791 3793 return {k: v for k, v in createopts.items() if k not in known}
3792 3794
3793 3795
3794 3796 def createrepository(ui, path, createopts=None, requirements=None):
3795 3797 """Create a new repository in a vfs.
3796 3798
3797 3799 ``path`` path to the new repo's working directory.
3798 3800 ``createopts`` options for the new repository.
3799 3801 ``requirement`` predefined set of requirements.
3800 3802 (incompatible with ``createopts``)
3801 3803
3802 3804 The following keys for ``createopts`` are recognized:
3803 3805
3804 3806 backend
3805 3807 The storage backend to use.
3806 3808 lfs
3807 3809 Repository will be created with ``lfs`` requirement. The lfs extension
3808 3810 will automatically be loaded when the repository is accessed.
3809 3811 narrowfiles
3810 3812 Set up repository to support narrow file storage.
3811 3813 sharedrepo
3812 3814 Repository object from which storage should be shared.
3813 3815 sharedrelative
3814 3816 Boolean indicating if the path to the shared repo should be
3815 3817 stored as relative. By default, the pointer to the "parent" repo
3816 3818 is stored as an absolute path.
3817 3819 shareditems
3818 3820 Set of items to share to the new repository (in addition to storage).
3819 3821 shallowfilestore
3820 3822 Indicates that storage for files should be shallow (not all ancestor
3821 3823 revisions are known).
3822 3824 """
3823 3825
3824 3826 if requirements is not None:
3825 3827 if createopts is not None:
3826 3828 msg = b'cannot specify both createopts and requirements'
3827 3829 raise error.ProgrammingError(msg)
3828 3830 createopts = {}
3829 3831 else:
3830 3832 createopts = defaultcreateopts(ui, createopts=createopts)
3831 3833
3832 3834 unknownopts = filterknowncreateopts(ui, createopts)
3833 3835
3834 3836 if not isinstance(unknownopts, dict):
3835 3837 raise error.ProgrammingError(
3836 3838 b'filterknowncreateopts() did not return a dict'
3837 3839 )
3838 3840
3839 3841 if unknownopts:
3840 3842 raise error.Abort(
3841 3843 _(
3842 3844 b'unable to create repository because of unknown '
3843 3845 b'creation option: %s'
3844 3846 )
3845 3847 % b', '.join(sorted(unknownopts)),
3846 3848 hint=_(b'is a required extension not loaded?'),
3847 3849 )
3848 3850
3849 3851 requirements = newreporequirements(ui, createopts=createopts)
3850 3852 requirements -= checkrequirementscompat(ui, requirements)
3851 3853
3852 3854 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3853 3855
3854 3856 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3855 3857 if hgvfs.exists():
3856 3858 raise error.RepoError(_(b'repository %s already exists') % path)
3857 3859
3858 3860 if b'sharedrepo' in createopts:
3859 3861 sharedpath = createopts[b'sharedrepo'].sharedpath
3860 3862
3861 3863 if createopts.get(b'sharedrelative'):
3862 3864 try:
3863 3865 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3864 3866 sharedpath = util.pconvert(sharedpath)
3865 3867 except (IOError, ValueError) as e:
3866 3868 # ValueError is raised on Windows if the drive letters differ
3867 3869 # on each path.
3868 3870 raise error.Abort(
3869 3871 _(b'cannot calculate relative path'),
3870 3872 hint=stringutil.forcebytestr(e),
3871 3873 )
3872 3874
3873 3875 if not wdirvfs.exists():
3874 3876 wdirvfs.makedirs()
3875 3877
3876 3878 hgvfs.makedir(notindexed=True)
3877 3879 if b'sharedrepo' not in createopts:
3878 3880 hgvfs.mkdir(b'cache')
3879 3881 hgvfs.mkdir(b'wcache')
3880 3882
3881 3883 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3882 3884 if has_store and b'sharedrepo' not in createopts:
3883 3885 hgvfs.mkdir(b'store')
3884 3886
3885 3887 # We create an invalid changelog outside the store so very old
3886 3888 # Mercurial versions (which didn't know about the requirements
3887 3889 # file) encounter an error on reading the changelog. This
3888 3890 # effectively locks out old clients and prevents them from
3889 3891 # mucking with a repo in an unknown format.
3890 3892 #
3891 3893 # The revlog header has version 65535, which won't be recognized by
3892 3894 # such old clients.
3893 3895 hgvfs.append(
3894 3896 b'00changelog.i',
3895 3897 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3896 3898 b'layout',
3897 3899 )
3898 3900
3899 3901 # Filter the requirements into working copy and store ones
3900 3902 wcreq, storereq = scmutil.filterrequirements(requirements)
3901 3903 # write working copy ones
3902 3904 scmutil.writerequires(hgvfs, wcreq)
3903 3905 # If there are store requirements and the current repository
3904 3906 # is not a shared one, write stored requirements
3905 3907 # For new shared repository, we don't need to write the store
3906 3908 # requirements as they are already present in store requires
3907 3909 if storereq and b'sharedrepo' not in createopts:
3908 3910 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3909 3911 scmutil.writerequires(storevfs, storereq)
3910 3912
3911 3913 # Write out file telling readers where to find the shared store.
3912 3914 if b'sharedrepo' in createopts:
3913 3915 hgvfs.write(b'sharedpath', sharedpath)
3914 3916
3915 3917 if createopts.get(b'shareditems'):
3916 3918 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3917 3919 hgvfs.write(b'shared', shared)
3918 3920
3919 3921
3920 3922 def poisonrepository(repo):
3921 3923 """Poison a repository instance so it can no longer be used."""
3922 3924 # Perform any cleanup on the instance.
3923 3925 repo.close()
3924 3926
3925 3927 # Our strategy is to replace the type of the object with one that
3926 3928 # has all attribute lookups result in error.
3927 3929 #
3928 3930 # But we have to allow the close() method because some constructors
3929 3931 # of repos call close() on repo references.
3930 3932 class poisonedrepository:
3931 3933 def __getattribute__(self, item):
3932 3934 if item == 'close':
3933 3935 return object.__getattribute__(self, item)
3934 3936
3935 3937 raise error.ProgrammingError(
3936 3938 b'repo instances should not be used after unshare'
3937 3939 )
3938 3940
3939 3941 def close(self):
3940 3942 pass
3941 3943
3942 3944 # We may have a repoview, which intercepts __setattr__. So be sure
3943 3945 # we operate at the lowest level possible.
3944 3946 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now