##// END OF EJS Templates
revlog: add a mechanism to verify expected file position before appending...
Kyle Lippincott -
r47349:e9901d01 default
parent child Browse files
Show More
@@ -0,0 +1,38 b''
1 from ..i18n import _
2 from .. import error
3
4
5 def get_checker(ui, revlog_name=b'changelog'):
6 """Get a function that checks file handle position is as expected.
7
8 This is used to ensure that files haven't been modified outside of our
9 knowledge (such as on a networked filesystem, if `hg debuglocks` was used,
10 or writes to .hg that ignored locks happened).
11
12 Due to revlogs supporting a concept of buffered, delayed, or diverted
13 writes, we're allowing the files to be shorter than expected (the data may
14 not have been written yet), but they can't be longer.
15
16 Please note that this check is not perfect; it can't detect all cases (there
17 may be false-negatives/false-OKs), but it should never claim there's an
18 issue when there isn't (false-positives/false-failures).
19 """
20
21 vpos = ui.config(b'debug', b'revlog.verifyposition.' + revlog_name)
22 # Avoid any `fh.tell` cost if this isn't enabled.
23 if not vpos or vpos not in [b'log', b'warn', b'fail']:
24 return None
25
26 def _checker(fh, fn, expected):
27 if fh.tell() <= expected:
28 return
29
30 msg = _(b'%s: file cursor at position %d, expected %d')
31 # Always log if we're going to warn or fail.
32 ui.log(b'debug', msg + b'\n', fn, fh.tell(), expected)
33 if vpos == b'warn':
34 ui.warn((msg + b'\n') % (fn, fh.tell(), expected))
35 elif vpos == b'fail':
36 raise error.RevlogError(msg % (fn, fh.tell(), expected))
37
38 return _checker
@@ -0,0 +1,102 b''
1 #testcases skip-detection fail-if-detected
2
3 Test situations that "should" only be reproducible:
4 - on networked filesystems, or
5 - user using `hg debuglocks` to eliminate the lock file, or
6 - something (that doesn't respect the lock file) writing to the .hg directory
7 while we're running
8
9 $ hg init a
10 $ cd a
11
12 $ cat > "$TESTTMP/waitlock_editor.sh" <<EOF
13 > [ -n "\${WAITLOCK_ANNOUNCE:-}" ] && touch "\${WAITLOCK_ANNOUNCE}"
14 > f="\${WAITLOCK_FILE}"
15 > start=\`date +%s\`
16 > timeout=5
17 > while [ \\( ! -f \$f \\) -a \\( ! -L \$f \\) ]; do
18 > now=\`date +%s\`
19 > if [ "\`expr \$now - \$start\`" -gt \$timeout ]; then
20 > echo "timeout: \$f was not created in \$timeout seconds (it is now \$(date +%s))"
21 > exit 1
22 > fi
23 > sleep 0.1
24 > done
25 > if [ \$# -gt 1 ]; then
26 > cat "\$@"
27 > fi
28 > EOF
29 $ chmod +x "$TESTTMP/waitlock_editor.sh"
30
31 Things behave differently if we don't already have a 00changelog.i file when
32 this all starts, so let's make one.
33
34 $ echo r0 > r0
35 $ hg commit -qAm 'r0'
36
37 Start an hg commit that will take a while
38 $ EDITOR_STARTED="$(pwd)/.editor_started"
39 $ MISCHIEF_MANAGED="$(pwd)/.mischief_managed"
40 $ JOBS_FINISHED="$(pwd)/.jobs_finished"
41
42 #if fail-if-detected
43 $ cat >> .hg/hgrc << EOF
44 > [debug]
45 > revlog.verifyposition.changelog = fail
46 > EOF
47 #endif
48
49 $ echo foo > foo
50 $ (WAITLOCK_ANNOUNCE="${EDITOR_STARTED}" \
51 > WAITLOCK_FILE="${MISCHIEF_MANAGED}" \
52 > HGEDITOR="$TESTTMP/waitlock_editor.sh" \
53 > hg commit -qAm 'r1 (foo)' --edit foo > .foo_commit_out 2>&1 ; touch "${JOBS_FINISHED}") &
54
55 Wait for the "editor" to actually start
56 $ WAITLOCK_FILE="${EDITOR_STARTED}" "$TESTTMP/waitlock_editor.sh"
57
58 Break the locks, and make another commit.
59 $ hg debuglocks -LW
60 $ echo bar > bar
61 $ hg commit -qAm 'r2 (bar)' bar
62 $ hg debugrevlogindex -c
63 rev linkrev nodeid p1 p2
64 0 0 222799e2f90b 000000000000 000000000000
65 1 1 6f124f6007a0 222799e2f90b 000000000000
66
67 Awaken the editor from that first commit
68 $ touch "${MISCHIEF_MANAGED}"
69 And wait for it to finish
70 $ WAITLOCK_FILE="${JOBS_FINISHED}" "$TESTTMP/waitlock_editor.sh"
71
72 #if skip-detection
73 (Ensure there was no output)
74 $ cat .foo_commit_out
75 And observe a corrupted repository -- rev 2's linkrev is 1, which should never
76 happen for the changelog (the linkrev should always refer to itself).
77 $ hg debugrevlogindex -c
78 rev linkrev nodeid p1 p2
79 0 0 222799e2f90b 000000000000 000000000000
80 1 1 6f124f6007a0 222799e2f90b 000000000000
81 2 1 ac80e6205bb2 222799e2f90b 000000000000
82 #endif
83
84 #if fail-if-detected
85 $ cat .foo_commit_out
86 transaction abort!
87 rollback completed
88 note: commit message saved in .hg/last-message.txt
89 note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
90 abort: 00changelog.i: file cursor at position 249, expected 121
91 And no corruption in the changelog.
92 $ hg debugrevlogindex -c
93 rev linkrev nodeid p1 p2
94 0 0 222799e2f90b 000000000000 000000000000
95 1 1 6f124f6007a0 222799e2f90b 000000000000
96 And, because of transactions, there's none in the manifestlog either.
97 $ hg debugrevlogindex -m
98 rev linkrev nodeid p1 p2
99 0 0 7b7020262a56 000000000000 000000000000
100 1 1 ad3fe36d86d9 7b7020262a56 000000000000
101 #endif
102
@@ -1,343 +1,343 b''
1 1 """grant Mercurial the ability to operate on Git repositories. (EXPERIMENTAL)
2 2
3 3 This is currently super experimental. It probably will consume your
4 4 firstborn a la Rumpelstiltskin, etc.
5 5 """
6 6
7 7 from __future__ import absolute_import
8 8
9 9 import os
10 10
11 11 from mercurial.i18n import _
12 12
13 13 from mercurial import (
14 14 commands,
15 15 error,
16 16 extensions,
17 17 localrepo,
18 18 pycompat,
19 19 registrar,
20 20 scmutil,
21 21 store,
22 22 util,
23 23 )
24 24
25 25 from . import (
26 26 dirstate,
27 27 gitlog,
28 28 gitutil,
29 29 index,
30 30 )
31 31
32 32 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
33 33 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
34 34 # be specifying the version(s) of Mercurial they are tested with, or
35 35 # leave the attribute unspecified.
36 36 testedwith = b'ships-with-hg-core'
37 37
38 38 configtable = {}
39 39 configitem = registrar.configitem(configtable)
40 40 # git.log-index-cache-miss: internal knob for testing
41 41 configitem(
42 42 b"git",
43 43 b"log-index-cache-miss",
44 44 default=False,
45 45 )
46 46
47 47 getversion = gitutil.pygit2_version
48 48
49 49
50 50 # TODO: extract an interface for this in core
51 51 class gitstore(object): # store.basicstore):
52 52 def __init__(self, path, vfstype):
53 53 self.vfs = vfstype(path)
54 54 self.path = self.vfs.base
55 55 self.createmode = store._calcmode(self.vfs)
56 56 # above lines should go away in favor of:
57 57 # super(gitstore, self).__init__(path, vfstype)
58 58
59 59 self.git = gitutil.get_pygit2().Repository(
60 60 os.path.normpath(os.path.join(path, b'..', b'.git'))
61 61 )
62 62 self._progress_factory = lambda *args, **kwargs: None
63 63 self._logfn = lambda x: None
64 64
65 65 @util.propertycache
66 66 def _db(self):
67 67 # We lazy-create the database because we want to thread a
68 68 # progress callback down to the indexing process if it's
69 69 # required, and we don't have a ui handle in makestore().
70 70 return index.get_index(self.git, self._logfn, self._progress_factory)
71 71
72 72 def join(self, f):
73 73 """Fake store.join method for git repositories.
74 74
75 75 For the most part, store.join is used for @storecache
76 76 decorators to invalidate caches when various files
77 77 change. We'll map the ones we care about, and ignore the rest.
78 78 """
79 79 if f in (b'00changelog.i', b'00manifest.i'):
80 80 # This is close enough: in order for the changelog cache
81 81 # to be invalidated, HEAD will have to change.
82 82 return os.path.join(self.path, b'HEAD')
83 83 elif f == b'lock':
84 84 # TODO: we probably want to map this to a git lock, I
85 85 # suspect index.lock. We should figure out what the
86 86 # most-alike file is in git-land. For now we're risking
87 87 # bad concurrency errors if another git client is used.
88 88 return os.path.join(self.path, b'hgit-bogus-lock')
89 89 elif f in (b'obsstore', b'phaseroots', b'narrowspec', b'bookmarks'):
90 90 return os.path.join(self.path, b'..', b'.hg', f)
91 91 raise NotImplementedError(b'Need to pick file for %s.' % f)
92 92
93 def changelog(self, trypending):
93 def changelog(self, trypending, concurrencychecker):
94 94 # TODO we don't have a plan for trypending in hg's git support yet
95 95 return gitlog.changelog(self.git, self._db)
96 96
97 97 def manifestlog(self, repo, storenarrowmatch):
98 98 # TODO handle storenarrowmatch and figure out if we need the repo arg
99 99 return gitlog.manifestlog(self.git, self._db)
100 100
101 101 def invalidatecaches(self):
102 102 pass
103 103
104 104 def write(self, tr=None):
105 105 # normally this handles things like fncache writes, which we don't have
106 106 pass
107 107
108 108
109 109 def _makestore(orig, requirements, storebasepath, vfstype):
110 110 if b'git' in requirements:
111 111 if not os.path.exists(os.path.join(storebasepath, b'..', b'.git')):
112 112 raise error.Abort(
113 113 _(
114 114 b'repository specified git format in '
115 115 b'.hg/requires but has no .git directory'
116 116 )
117 117 )
118 118 # Check for presence of pygit2 only here. The assumption is that we'll
119 119 # run this code iff we'll later need pygit2.
120 120 if gitutil.get_pygit2() is None:
121 121 raise error.Abort(
122 122 _(
123 123 b'the git extension requires the Python '
124 124 b'pygit2 library to be installed'
125 125 )
126 126 )
127 127
128 128 return gitstore(storebasepath, vfstype)
129 129 return orig(requirements, storebasepath, vfstype)
130 130
131 131
132 132 class gitfilestorage(object):
133 133 def file(self, path):
134 134 if path[0:1] == b'/':
135 135 path = path[1:]
136 136 return gitlog.filelog(self.store.git, self.store._db, path)
137 137
138 138
139 139 def _makefilestorage(orig, requirements, features, **kwargs):
140 140 store = kwargs['store']
141 141 if isinstance(store, gitstore):
142 142 return gitfilestorage
143 143 return orig(requirements, features, **kwargs)
144 144
145 145
146 146 def _setupdothg(ui, path):
147 147 dothg = os.path.join(path, b'.hg')
148 148 if os.path.exists(dothg):
149 149 ui.warn(_(b'git repo already initialized for hg\n'))
150 150 else:
151 151 os.mkdir(os.path.join(path, b'.hg'))
152 152 # TODO is it ok to extend .git/info/exclude like this?
153 153 with open(
154 154 os.path.join(path, b'.git', b'info', b'exclude'), 'ab'
155 155 ) as exclude:
156 156 exclude.write(b'\n.hg\n')
157 157 with open(os.path.join(dothg, b'requires'), 'wb') as f:
158 158 f.write(b'git\n')
159 159
160 160
161 161 _BMS_PREFIX = 'refs/heads/'
162 162
163 163
164 164 class gitbmstore(object):
165 165 def __init__(self, gitrepo):
166 166 self.gitrepo = gitrepo
167 167 self._aclean = True
168 168 self._active = gitrepo.references['HEAD'] # git head, not mark
169 169
170 170 def __contains__(self, name):
171 171 return (
172 172 _BMS_PREFIX + pycompat.fsdecode(name)
173 173 ) in self.gitrepo.references
174 174
175 175 def __iter__(self):
176 176 for r in self.gitrepo.listall_references():
177 177 if r.startswith(_BMS_PREFIX):
178 178 yield pycompat.fsencode(r[len(_BMS_PREFIX) :])
179 179
180 180 def __getitem__(self, k):
181 181 return (
182 182 self.gitrepo.references[_BMS_PREFIX + pycompat.fsdecode(k)]
183 183 .peel()
184 184 .id.raw
185 185 )
186 186
187 187 def get(self, k, default=None):
188 188 try:
189 189 if k in self:
190 190 return self[k]
191 191 return default
192 192 except gitutil.get_pygit2().InvalidSpecError:
193 193 return default
194 194
195 195 @property
196 196 def active(self):
197 197 h = self.gitrepo.references['HEAD']
198 198 if not isinstance(h.target, str) or not h.target.startswith(
199 199 _BMS_PREFIX
200 200 ):
201 201 return None
202 202 return pycompat.fsencode(h.target[len(_BMS_PREFIX) :])
203 203
204 204 @active.setter
205 205 def active(self, mark):
206 206 githead = mark is not None and (_BMS_PREFIX + mark) or None
207 207 if githead is not None and githead not in self.gitrepo.references:
208 208 raise AssertionError(b'bookmark %s does not exist!' % mark)
209 209
210 210 self._active = githead
211 211 self._aclean = False
212 212
213 213 def _writeactive(self):
214 214 if self._aclean:
215 215 return
216 216 self.gitrepo.references.create('HEAD', self._active, True)
217 217 self._aclean = True
218 218
219 219 def names(self, node):
220 220 r = []
221 221 for ref in self.gitrepo.listall_references():
222 222 if not ref.startswith(_BMS_PREFIX):
223 223 continue
224 224 if self.gitrepo.references[ref].peel().id.raw != node:
225 225 continue
226 226 r.append(pycompat.fsencode(ref[len(_BMS_PREFIX) :]))
227 227 return r
228 228
229 229 # Cleanup opportunity: this is *identical* to core's bookmarks store.
230 230 def expandname(self, bname):
231 231 if bname == b'.':
232 232 if self.active:
233 233 return self.active
234 234 raise error.RepoLookupError(_(b"no active bookmark"))
235 235 return bname
236 236
237 237 def applychanges(self, repo, tr, changes):
238 238 """Apply a list of changes to bookmarks"""
239 239 # TODO: this should respect transactions, but that's going to
240 240 # require enlarging the gitbmstore to know how to do in-memory
241 241 # temporary writes and read those back prior to transaction
242 242 # finalization.
243 243 for name, node in changes:
244 244 if node is None:
245 245 self.gitrepo.references.delete(
246 246 _BMS_PREFIX + pycompat.fsdecode(name)
247 247 )
248 248 else:
249 249 self.gitrepo.references.create(
250 250 _BMS_PREFIX + pycompat.fsdecode(name),
251 251 gitutil.togitnode(node),
252 252 force=True,
253 253 )
254 254
255 255 def checkconflict(self, mark, force=False, target=None):
256 256 githead = _BMS_PREFIX + mark
257 257 cur = self.gitrepo.references['HEAD']
258 258 if githead in self.gitrepo.references and not force:
259 259 if target:
260 260 if self.gitrepo.references[githead] == target and target == cur:
261 261 # re-activating a bookmark
262 262 return []
263 263 # moving a bookmark - forward?
264 264 raise NotImplementedError
265 265 raise error.Abort(
266 266 _(b"bookmark '%s' already exists (use -f to force)") % mark
267 267 )
268 268 if len(mark) > 3 and not force:
269 269 try:
270 270 shadowhash = scmutil.isrevsymbol(self._repo, mark)
271 271 except error.LookupError: # ambiguous identifier
272 272 shadowhash = False
273 273 if shadowhash:
274 274 self._repo.ui.warn(
275 275 _(
276 276 b"bookmark %s matches a changeset hash\n"
277 277 b"(did you leave a -r out of an 'hg bookmark' "
278 278 b"command?)\n"
279 279 )
280 280 % mark
281 281 )
282 282 return []
283 283
284 284
285 285 def init(orig, ui, dest=b'.', **opts):
286 286 if opts.get('git', False):
287 287 path = os.path.abspath(dest)
288 288 # TODO: walk up looking for the git repo
289 289 _setupdothg(ui, path)
290 290 return 0
291 291 return orig(ui, dest=dest, **opts)
292 292
293 293
294 294 def reposetup(ui, repo):
295 295 if repo.local() and isinstance(repo.store, gitstore):
296 296 orig = repo.__class__
297 297 repo.store._progress_factory = repo.ui.makeprogress
298 298 if ui.configbool(b'git', b'log-index-cache-miss'):
299 299 repo.store._logfn = repo.ui.warn
300 300
301 301 class gitlocalrepo(orig):
302 302 def _makedirstate(self):
303 303 # TODO narrow support here
304 304 return dirstate.gitdirstate(
305 305 self.ui, self.vfs.base, self.store.git
306 306 )
307 307
308 308 def commit(self, *args, **kwargs):
309 309 ret = orig.commit(self, *args, **kwargs)
310 310 if ret is None:
311 311 # there was nothing to commit, so we should skip
312 312 # the index fixup logic we'd otherwise do.
313 313 return None
314 314 tid = self.store.git[gitutil.togitnode(ret)].tree.id
315 315 # DANGER! This will flush any writes staged to the
316 316 # index in Git, but we're sidestepping the index in a
317 317 # way that confuses git when we commit. Alas.
318 318 self.store.git.index.read_tree(tid)
319 319 self.store.git.index.write()
320 320 return ret
321 321
322 322 @property
323 323 def _bookmarks(self):
324 324 return gitbmstore(self.store.git)
325 325
326 326 repo.__class__ = gitlocalrepo
327 327 return repo
328 328
329 329
330 330 def _featuresetup(ui, supported):
331 331 # don't die on seeing a repo with the git requirement
332 332 supported |= {b'git'}
333 333
334 334
335 335 def extsetup(ui):
336 336 extensions.wrapfunction(localrepo, b'makestore', _makestore)
337 337 extensions.wrapfunction(localrepo, b'makefilestorage', _makefilestorage)
338 338 # Inject --git flag for `hg init`
339 339 entry = extensions.wrapcommand(commands.table, b'init', init)
340 340 entry[1].extend(
341 341 [(b'', b'git', None, b'setup up a git repository instead of hg')]
342 342 )
343 343 localrepo.featuresetupfuncs.add(_featuresetup)
@@ -1,618 +1,622 b''
1 1 # changelog.py - changelog class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from .node import (
12 12 bin,
13 13 hex,
14 14 nullid,
15 15 )
16 16 from .thirdparty import attr
17 17
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 metadata,
22 22 pycompat,
23 23 revlog,
24 24 )
25 25 from .utils import (
26 26 dateutil,
27 27 stringutil,
28 28 )
29 29 from .revlogutils import flagutil
30 30
31 31 _defaultextra = {b'branch': b'default'}
32 32
33 33
34 34 def _string_escape(text):
35 35 """
36 36 >>> from .pycompat import bytechr as chr
37 37 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
38 38 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
39 39 >>> s
40 40 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
41 41 >>> res = _string_escape(s)
42 42 >>> s == _string_unescape(res)
43 43 True
44 44 """
45 45 # subset of the string_escape codec
46 46 text = (
47 47 text.replace(b'\\', b'\\\\')
48 48 .replace(b'\n', b'\\n')
49 49 .replace(b'\r', b'\\r')
50 50 )
51 51 return text.replace(b'\0', b'\\0')
52 52
53 53
54 54 def _string_unescape(text):
55 55 if b'\\0' in text:
56 56 # fix up \0 without getting into trouble with \\0
57 57 text = text.replace(b'\\\\', b'\\\\\n')
58 58 text = text.replace(b'\\0', b'\0')
59 59 text = text.replace(b'\n', b'')
60 60 return stringutil.unescapestr(text)
61 61
62 62
63 63 def decodeextra(text):
64 64 """
65 65 >>> from .pycompat import bytechr as chr
66 66 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
67 67 ... ).items())
68 68 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
69 69 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
70 70 ... b'baz': chr(92) + chr(0) + b'2'})
71 71 ... ).items())
72 72 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
73 73 """
74 74 extra = _defaultextra.copy()
75 75 for l in text.split(b'\0'):
76 76 if l:
77 77 k, v = _string_unescape(l).split(b':', 1)
78 78 extra[k] = v
79 79 return extra
80 80
81 81
82 82 def encodeextra(d):
83 83 # keys must be sorted to produce a deterministic changelog entry
84 84 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
85 85 return b"\0".join(items)
86 86
87 87
88 88 def stripdesc(desc):
89 89 """strip trailing whitespace and leading and trailing empty lines"""
90 90 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
91 91
92 92
93 93 class appender(object):
94 94 """the changelog index must be updated last on disk, so we use this class
95 95 to delay writes to it"""
96 96
97 97 def __init__(self, vfs, name, mode, buf):
98 98 self.data = buf
99 99 fp = vfs(name, mode)
100 100 self.fp = fp
101 101 self.offset = fp.tell()
102 102 self.size = vfs.fstat(fp).st_size
103 103 self._end = self.size
104 104
105 105 def end(self):
106 106 return self._end
107 107
108 108 def tell(self):
109 109 return self.offset
110 110
111 111 def flush(self):
112 112 pass
113 113
114 114 @property
115 115 def closed(self):
116 116 return self.fp.closed
117 117
118 118 def close(self):
119 119 self.fp.close()
120 120
121 121 def seek(self, offset, whence=0):
122 122 '''virtual file offset spans real file and data'''
123 123 if whence == 0:
124 124 self.offset = offset
125 125 elif whence == 1:
126 126 self.offset += offset
127 127 elif whence == 2:
128 128 self.offset = self.end() + offset
129 129 if self.offset < self.size:
130 130 self.fp.seek(self.offset)
131 131
132 132 def read(self, count=-1):
133 133 '''only trick here is reads that span real file and data'''
134 134 ret = b""
135 135 if self.offset < self.size:
136 136 s = self.fp.read(count)
137 137 ret = s
138 138 self.offset += len(s)
139 139 if count > 0:
140 140 count -= len(s)
141 141 if count != 0:
142 142 doff = self.offset - self.size
143 143 self.data.insert(0, b"".join(self.data))
144 144 del self.data[1:]
145 145 s = self.data[0][doff : doff + count]
146 146 self.offset += len(s)
147 147 ret += s
148 148 return ret
149 149
150 150 def write(self, s):
151 151 self.data.append(bytes(s))
152 152 self.offset += len(s)
153 153 self._end += len(s)
154 154
155 155 def __enter__(self):
156 156 self.fp.__enter__()
157 157 return self
158 158
159 159 def __exit__(self, *args):
160 160 return self.fp.__exit__(*args)
161 161
162 162
163 163 class _divertopener(object):
164 164 def __init__(self, opener, target):
165 165 self._opener = opener
166 166 self._target = target
167 167
168 168 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
169 169 if name != self._target:
170 170 return self._opener(name, mode, **kwargs)
171 171 return self._opener(name + b".a", mode, **kwargs)
172 172
173 173 def __getattr__(self, attr):
174 174 return getattr(self._opener, attr)
175 175
176 176
177 177 def _delayopener(opener, target, buf):
178 178 """build an opener that stores chunks in 'buf' instead of 'target'"""
179 179
180 180 def _delay(name, mode=b'r', checkambig=False, **kwargs):
181 181 if name != target:
182 182 return opener(name, mode, **kwargs)
183 183 assert not kwargs
184 184 return appender(opener, name, mode, buf)
185 185
186 186 return _delay
187 187
188 188
189 189 @attr.s
190 190 class _changelogrevision(object):
191 191 # Extensions might modify _defaultextra, so let the constructor below pass
192 192 # it in
193 193 extra = attr.ib()
194 194 manifest = attr.ib(default=nullid)
195 195 user = attr.ib(default=b'')
196 196 date = attr.ib(default=(0, 0))
197 197 files = attr.ib(default=attr.Factory(list))
198 198 filesadded = attr.ib(default=None)
199 199 filesremoved = attr.ib(default=None)
200 200 p1copies = attr.ib(default=None)
201 201 p2copies = attr.ib(default=None)
202 202 description = attr.ib(default=b'')
203 203 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
204 204
205 205
206 206 class changelogrevision(object):
207 207 """Holds results of a parsed changelog revision.
208 208
209 209 Changelog revisions consist of multiple pieces of data, including
210 210 the manifest node, user, and date. This object exposes a view into
211 211 the parsed object.
212 212 """
213 213
214 214 __slots__ = (
215 215 '_offsets',
216 216 '_text',
217 217 '_sidedata',
218 218 '_cpsd',
219 219 '_changes',
220 220 )
221 221
222 222 def __new__(cls, text, sidedata, cpsd):
223 223 if not text:
224 224 return _changelogrevision(extra=_defaultextra)
225 225
226 226 self = super(changelogrevision, cls).__new__(cls)
227 227 # We could return here and implement the following as an __init__.
228 228 # But doing it here is equivalent and saves an extra function call.
229 229
230 230 # format used:
231 231 # nodeid\n : manifest node in ascii
232 232 # user\n : user, no \n or \r allowed
233 233 # time tz extra\n : date (time is int or float, timezone is int)
234 234 # : extra is metadata, encoded and separated by '\0'
235 235 # : older versions ignore it
236 236 # files\n\n : files modified by the cset, no \n or \r allowed
237 237 # (.*) : comment (free text, ideally utf-8)
238 238 #
239 239 # changelog v0 doesn't use extra
240 240
241 241 nl1 = text.index(b'\n')
242 242 nl2 = text.index(b'\n', nl1 + 1)
243 243 nl3 = text.index(b'\n', nl2 + 1)
244 244
245 245 # The list of files may be empty. Which means nl3 is the first of the
246 246 # double newline that precedes the description.
247 247 if text[nl3 + 1 : nl3 + 2] == b'\n':
248 248 doublenl = nl3
249 249 else:
250 250 doublenl = text.index(b'\n\n', nl3 + 1)
251 251
252 252 self._offsets = (nl1, nl2, nl3, doublenl)
253 253 self._text = text
254 254 self._sidedata = sidedata
255 255 self._cpsd = cpsd
256 256 self._changes = None
257 257
258 258 return self
259 259
260 260 @property
261 261 def manifest(self):
262 262 return bin(self._text[0 : self._offsets[0]])
263 263
264 264 @property
265 265 def user(self):
266 266 off = self._offsets
267 267 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
268 268
269 269 @property
270 270 def _rawdate(self):
271 271 off = self._offsets
272 272 dateextra = self._text[off[1] + 1 : off[2]]
273 273 return dateextra.split(b' ', 2)[0:2]
274 274
275 275 @property
276 276 def _rawextra(self):
277 277 off = self._offsets
278 278 dateextra = self._text[off[1] + 1 : off[2]]
279 279 fields = dateextra.split(b' ', 2)
280 280 if len(fields) != 3:
281 281 return None
282 282
283 283 return fields[2]
284 284
285 285 @property
286 286 def date(self):
287 287 raw = self._rawdate
288 288 time = float(raw[0])
289 289 # Various tools did silly things with the timezone.
290 290 try:
291 291 timezone = int(raw[1])
292 292 except ValueError:
293 293 timezone = 0
294 294
295 295 return time, timezone
296 296
297 297 @property
298 298 def extra(self):
299 299 raw = self._rawextra
300 300 if raw is None:
301 301 return _defaultextra
302 302
303 303 return decodeextra(raw)
304 304
305 305 @property
306 306 def changes(self):
307 307 if self._changes is not None:
308 308 return self._changes
309 309 if self._cpsd:
310 310 changes = metadata.decode_files_sidedata(self._sidedata)
311 311 else:
312 312 changes = metadata.ChangingFiles(
313 313 touched=self.files or (),
314 314 added=self.filesadded or (),
315 315 removed=self.filesremoved or (),
316 316 p1_copies=self.p1copies or {},
317 317 p2_copies=self.p2copies or {},
318 318 )
319 319 self._changes = changes
320 320 return changes
321 321
322 322 @property
323 323 def files(self):
324 324 if self._cpsd:
325 325 return sorted(self.changes.touched)
326 326 off = self._offsets
327 327 if off[2] == off[3]:
328 328 return []
329 329
330 330 return self._text[off[2] + 1 : off[3]].split(b'\n')
331 331
332 332 @property
333 333 def filesadded(self):
334 334 if self._cpsd:
335 335 return self.changes.added
336 336 else:
337 337 rawindices = self.extra.get(b'filesadded')
338 338 if rawindices is None:
339 339 return None
340 340 return metadata.decodefileindices(self.files, rawindices)
341 341
342 342 @property
343 343 def filesremoved(self):
344 344 if self._cpsd:
345 345 return self.changes.removed
346 346 else:
347 347 rawindices = self.extra.get(b'filesremoved')
348 348 if rawindices is None:
349 349 return None
350 350 return metadata.decodefileindices(self.files, rawindices)
351 351
352 352 @property
353 353 def p1copies(self):
354 354 if self._cpsd:
355 355 return self.changes.copied_from_p1
356 356 else:
357 357 rawcopies = self.extra.get(b'p1copies')
358 358 if rawcopies is None:
359 359 return None
360 360 return metadata.decodecopies(self.files, rawcopies)
361 361
362 362 @property
363 363 def p2copies(self):
364 364 if self._cpsd:
365 365 return self.changes.copied_from_p2
366 366 else:
367 367 rawcopies = self.extra.get(b'p2copies')
368 368 if rawcopies is None:
369 369 return None
370 370 return metadata.decodecopies(self.files, rawcopies)
371 371
372 372 @property
373 373 def description(self):
374 374 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
375 375
376 376 @property
377 377 def branchinfo(self):
378 378 extra = self.extra
379 379 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
380 380
381 381
382 382 class changelog(revlog.revlog):
383 def __init__(self, opener, trypending=False):
383 def __init__(self, opener, trypending=False, concurrencychecker=None):
384 384 """Load a changelog revlog using an opener.
385 385
386 386 If ``trypending`` is true, we attempt to load the index from a
387 387 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
388 388 The ``00changelog.i.a`` file contains index (and possibly inline
389 389 revision) data for a transaction that hasn't been finalized yet.
390 390 It exists in a separate file to facilitate readers (such as
391 391 hooks processes) accessing data before a transaction is finalized.
392
393 ``concurrencychecker`` will be passed to the revlog init function, see
394 the documentation there.
392 395 """
393 396 if trypending and opener.exists(b'00changelog.i.a'):
394 397 indexfile = b'00changelog.i.a'
395 398 else:
396 399 indexfile = b'00changelog.i'
397 400
398 401 datafile = b'00changelog.d'
399 402 revlog.revlog.__init__(
400 403 self,
401 404 opener,
402 405 indexfile,
403 406 datafile=datafile,
404 407 checkambig=True,
405 408 mmaplargeindex=True,
406 409 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
410 concurrencychecker=concurrencychecker,
407 411 )
408 412
409 413 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
410 414 # changelogs don't benefit from generaldelta.
411 415
412 416 self.version &= ~revlog.FLAG_GENERALDELTA
413 417 self._generaldelta = False
414 418
415 419 # Delta chains for changelogs tend to be very small because entries
416 420 # tend to be small and don't delta well with each. So disable delta
417 421 # chains.
418 422 self._storedeltachains = False
419 423
420 424 self._realopener = opener
421 425 self._delayed = False
422 426 self._delaybuf = None
423 427 self._divert = False
424 428 self._filteredrevs = frozenset()
425 429 self._filteredrevs_hashcache = {}
426 430 self._copiesstorage = opener.options.get(b'copies-storage')
427 431
428 432 @property
429 433 def filteredrevs(self):
430 434 return self._filteredrevs
431 435
432 436 @filteredrevs.setter
433 437 def filteredrevs(self, val):
434 438 # Ensure all updates go through this function
435 439 assert isinstance(val, frozenset)
436 440 self._filteredrevs = val
437 441 self._filteredrevs_hashcache = {}
438 442
439 443 def delayupdate(self, tr):
440 444 """delay visibility of index updates to other readers"""
441 445
442 446 if not self._delayed:
443 447 if len(self) == 0:
444 448 self._divert = True
445 449 if self._realopener.exists(self.indexfile + b'.a'):
446 450 self._realopener.unlink(self.indexfile + b'.a')
447 451 self.opener = _divertopener(self._realopener, self.indexfile)
448 452 else:
449 453 self._delaybuf = []
450 454 self.opener = _delayopener(
451 455 self._realopener, self.indexfile, self._delaybuf
452 456 )
453 457 self._delayed = True
454 458 tr.addpending(b'cl-%i' % id(self), self._writepending)
455 459 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
456 460
457 461 def _finalize(self, tr):
458 462 """finalize index updates"""
459 463 self._delayed = False
460 464 self.opener = self._realopener
461 465 # move redirected index data back into place
462 466 if self._divert:
463 467 assert not self._delaybuf
464 468 tmpname = self.indexfile + b".a"
465 469 nfile = self.opener.open(tmpname)
466 470 nfile.close()
467 471 self.opener.rename(tmpname, self.indexfile, checkambig=True)
468 472 elif self._delaybuf:
469 473 fp = self.opener(self.indexfile, b'a', checkambig=True)
470 474 fp.write(b"".join(self._delaybuf))
471 475 fp.close()
472 476 self._delaybuf = None
473 477 self._divert = False
474 478 # split when we're done
475 479 self._enforceinlinesize(tr)
476 480
477 481 def _writepending(self, tr):
478 482 """create a file containing the unfinalized state for
479 483 pretxnchangegroup"""
480 484 if self._delaybuf:
481 485 # make a temporary copy of the index
482 486 fp1 = self._realopener(self.indexfile)
483 487 pendingfilename = self.indexfile + b".a"
484 488 # register as a temp file to ensure cleanup on failure
485 489 tr.registertmp(pendingfilename)
486 490 # write existing data
487 491 fp2 = self._realopener(pendingfilename, b"w")
488 492 fp2.write(fp1.read())
489 493 # add pending data
490 494 fp2.write(b"".join(self._delaybuf))
491 495 fp2.close()
492 496 # switch modes so finalize can simply rename
493 497 self._delaybuf = None
494 498 self._divert = True
495 499 self.opener = _divertopener(self._realopener, self.indexfile)
496 500
497 501 if self._divert:
498 502 return True
499 503
500 504 return False
501 505
502 506 def _enforceinlinesize(self, tr, fp=None):
503 507 if not self._delayed:
504 508 revlog.revlog._enforceinlinesize(self, tr, fp)
505 509
506 510 def read(self, node):
507 511 """Obtain data from a parsed changelog revision.
508 512
509 513 Returns a 6-tuple of:
510 514
511 515 - manifest node in binary
512 516 - author/user as a localstr
513 517 - date as a 2-tuple of (time, timezone)
514 518 - list of files
515 519 - commit message as a localstr
516 520 - dict of extra metadata
517 521
518 522 Unless you need to access all fields, consider calling
519 523 ``changelogrevision`` instead, as it is faster for partial object
520 524 access.
521 525 """
522 526 d, s = self._revisiondata(node)
523 527 c = changelogrevision(
524 528 d, s, self._copiesstorage == b'changeset-sidedata'
525 529 )
526 530 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
527 531
528 532 def changelogrevision(self, nodeorrev):
529 533 """Obtain a ``changelogrevision`` for a node or revision."""
530 534 text, sidedata = self._revisiondata(nodeorrev)
531 535 return changelogrevision(
532 536 text, sidedata, self._copiesstorage == b'changeset-sidedata'
533 537 )
534 538
535 539 def readfiles(self, node):
536 540 """
537 541 short version of read that only returns the files modified by the cset
538 542 """
539 543 text = self.revision(node)
540 544 if not text:
541 545 return []
542 546 last = text.index(b"\n\n")
543 547 l = text[:last].split(b'\n')
544 548 return l[3:]
545 549
546 550 def add(
547 551 self,
548 552 manifest,
549 553 files,
550 554 desc,
551 555 transaction,
552 556 p1,
553 557 p2,
554 558 user,
555 559 date=None,
556 560 extra=None,
557 561 ):
558 562 # Convert to UTF-8 encoded bytestrings as the very first
559 563 # thing: calling any method on a localstr object will turn it
560 564 # into a str object and the cached UTF-8 string is thus lost.
561 565 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
562 566
563 567 user = user.strip()
564 568 # An empty username or a username with a "\n" will make the
565 569 # revision text contain two "\n\n" sequences -> corrupt
566 570 # repository since read cannot unpack the revision.
567 571 if not user:
568 572 raise error.StorageError(_(b"empty username"))
569 573 if b"\n" in user:
570 574 raise error.StorageError(
571 575 _(b"username %r contains a newline") % pycompat.bytestr(user)
572 576 )
573 577
574 578 desc = stripdesc(desc)
575 579
576 580 if date:
577 581 parseddate = b"%d %d" % dateutil.parsedate(date)
578 582 else:
579 583 parseddate = b"%d %d" % dateutil.makedate()
580 584 if extra:
581 585 branch = extra.get(b"branch")
582 586 if branch in (b"default", b""):
583 587 del extra[b"branch"]
584 588 elif branch in (b".", b"null", b"tip"):
585 589 raise error.StorageError(
586 590 _(b'the name \'%s\' is reserved') % branch
587 591 )
588 592 sortedfiles = sorted(files.touched)
589 593 flags = 0
590 594 sidedata = None
591 595 if self._copiesstorage == b'changeset-sidedata':
592 596 if files.has_copies_info:
593 597 flags |= flagutil.REVIDX_HASCOPIESINFO
594 598 sidedata = metadata.encode_files_sidedata(files)
595 599
596 600 if extra:
597 601 extra = encodeextra(extra)
598 602 parseddate = b"%s %s" % (parseddate, extra)
599 603 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
600 604 text = b"\n".join(l)
601 605 rev = self.addrevision(
602 606 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
603 607 )
604 608 return self.node(rev)
605 609
606 610 def branchinfo(self, rev):
607 611 """return the branch name and open/close state of a revision
608 612
609 613 This function exists because creating a changectx object
610 614 just to access this is costly."""
611 615 return self.changelogrevision(rev).branchinfo
612 616
613 617 def _nodeduplicatecallback(self, transaction, rev):
614 618 # keep track of revisions that got "re-added", eg: unbunde of know rev.
615 619 #
616 620 # We track them in a list to preserve their order from the source bundle
617 621 duplicates = transaction.changes.setdefault(b'revduplicates', [])
618 622 duplicates.append(rev)
@@ -1,2622 +1,2627 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18
19 19 def loadconfigtable(ui, extname, configtable):
20 20 """update config item known to the ui with the extension ones"""
21 21 for section, items in sorted(configtable.items()):
22 22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 23 knownkeys = set(knownitems)
24 24 newkeys = set(items)
25 25 for key in sorted(knownkeys & newkeys):
26 26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 27 msg %= (extname, section, key)
28 28 ui.develwarn(msg, config=b'warn-config')
29 29
30 30 knownitems.update(items)
31 31
32 32
33 33 class configitem(object):
34 34 """represent a known config item
35 35
36 36 :section: the official config section where to find this item,
37 37 :name: the official name within the section,
38 38 :default: default value for this item,
39 39 :alias: optional list of tuples as alternatives,
40 40 :generic: this is a generic definition, match name using regular expression.
41 41 """
42 42
43 43 def __init__(
44 44 self,
45 45 section,
46 46 name,
47 47 default=None,
48 48 alias=(),
49 49 generic=False,
50 50 priority=0,
51 51 experimental=False,
52 52 ):
53 53 self.section = section
54 54 self.name = name
55 55 self.default = default
56 56 self.alias = list(alias)
57 57 self.generic = generic
58 58 self.priority = priority
59 59 self.experimental = experimental
60 60 self._re = None
61 61 if generic:
62 62 self._re = re.compile(self.name)
63 63
64 64
65 65 class itemregister(dict):
66 66 """A specialized dictionary that can handle wild-card selection"""
67 67
68 68 def __init__(self):
69 69 super(itemregister, self).__init__()
70 70 self._generics = set()
71 71
72 72 def update(self, other):
73 73 super(itemregister, self).update(other)
74 74 self._generics.update(other._generics)
75 75
76 76 def __setitem__(self, key, item):
77 77 super(itemregister, self).__setitem__(key, item)
78 78 if item.generic:
79 79 self._generics.add(item)
80 80
81 81 def get(self, key):
82 82 baseitem = super(itemregister, self).get(key)
83 83 if baseitem is not None and not baseitem.generic:
84 84 return baseitem
85 85
86 86 # search for a matching generic item
87 87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 88 for item in generics:
89 89 # we use 'match' instead of 'search' to make the matching simpler
90 90 # for people unfamiliar with regular expression. Having the match
91 91 # rooted to the start of the string will produce less surprising
92 92 # result for user writing simple regex for sub-attribute.
93 93 #
94 94 # For example using "color\..*" match produces an unsurprising
95 95 # result, while using search could suddenly match apparently
96 96 # unrelated configuration that happens to contains "color."
97 97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 98 # some match to avoid the need to prefix most pattern with "^".
99 99 # The "^" seems more error prone.
100 100 if item._re.match(key):
101 101 return item
102 102
103 103 return None
104 104
105 105
106 106 coreitems = {}
107 107
108 108
109 109 def _register(configtable, *args, **kwargs):
110 110 item = configitem(*args, **kwargs)
111 111 section = configtable.setdefault(item.section, itemregister())
112 112 if item.name in section:
113 113 msg = b"duplicated config item registration for '%s.%s'"
114 114 raise error.ProgrammingError(msg % (item.section, item.name))
115 115 section[item.name] = item
116 116
117 117
118 118 # special value for case where the default is derived from other values
119 119 dynamicdefault = object()
120 120
121 121 # Registering actual config items
122 122
123 123
124 124 def getitemregister(configtable):
125 125 f = functools.partial(_register, configtable)
126 126 # export pseudo enum as configitem.*
127 127 f.dynamicdefault = dynamicdefault
128 128 return f
129 129
130 130
131 131 coreconfigitem = getitemregister(coreitems)
132 132
133 133
134 134 def _registerdiffopts(section, configprefix=b''):
135 135 coreconfigitem(
136 136 section,
137 137 configprefix + b'nodates',
138 138 default=False,
139 139 )
140 140 coreconfigitem(
141 141 section,
142 142 configprefix + b'showfunc',
143 143 default=False,
144 144 )
145 145 coreconfigitem(
146 146 section,
147 147 configprefix + b'unified',
148 148 default=None,
149 149 )
150 150 coreconfigitem(
151 151 section,
152 152 configprefix + b'git',
153 153 default=False,
154 154 )
155 155 coreconfigitem(
156 156 section,
157 157 configprefix + b'ignorews',
158 158 default=False,
159 159 )
160 160 coreconfigitem(
161 161 section,
162 162 configprefix + b'ignorewsamount',
163 163 default=False,
164 164 )
165 165 coreconfigitem(
166 166 section,
167 167 configprefix + b'ignoreblanklines',
168 168 default=False,
169 169 )
170 170 coreconfigitem(
171 171 section,
172 172 configprefix + b'ignorewseol',
173 173 default=False,
174 174 )
175 175 coreconfigitem(
176 176 section,
177 177 configprefix + b'nobinary',
178 178 default=False,
179 179 )
180 180 coreconfigitem(
181 181 section,
182 182 configprefix + b'noprefix',
183 183 default=False,
184 184 )
185 185 coreconfigitem(
186 186 section,
187 187 configprefix + b'word-diff',
188 188 default=False,
189 189 )
190 190
191 191
192 192 coreconfigitem(
193 193 b'alias',
194 194 b'.*',
195 195 default=dynamicdefault,
196 196 generic=True,
197 197 )
198 198 coreconfigitem(
199 199 b'auth',
200 200 b'cookiefile',
201 201 default=None,
202 202 )
203 203 _registerdiffopts(section=b'annotate')
204 204 # bookmarks.pushing: internal hack for discovery
205 205 coreconfigitem(
206 206 b'bookmarks',
207 207 b'pushing',
208 208 default=list,
209 209 )
210 210 # bundle.mainreporoot: internal hack for bundlerepo
211 211 coreconfigitem(
212 212 b'bundle',
213 213 b'mainreporoot',
214 214 default=b'',
215 215 )
216 216 coreconfigitem(
217 217 b'censor',
218 218 b'policy',
219 219 default=b'abort',
220 220 experimental=True,
221 221 )
222 222 coreconfigitem(
223 223 b'chgserver',
224 224 b'idletimeout',
225 225 default=3600,
226 226 )
227 227 coreconfigitem(
228 228 b'chgserver',
229 229 b'skiphash',
230 230 default=False,
231 231 )
232 232 coreconfigitem(
233 233 b'cmdserver',
234 234 b'log',
235 235 default=None,
236 236 )
237 237 coreconfigitem(
238 238 b'cmdserver',
239 239 b'max-log-files',
240 240 default=7,
241 241 )
242 242 coreconfigitem(
243 243 b'cmdserver',
244 244 b'max-log-size',
245 245 default=b'1 MB',
246 246 )
247 247 coreconfigitem(
248 248 b'cmdserver',
249 249 b'max-repo-cache',
250 250 default=0,
251 251 experimental=True,
252 252 )
253 253 coreconfigitem(
254 254 b'cmdserver',
255 255 b'message-encodings',
256 256 default=list,
257 257 )
258 258 coreconfigitem(
259 259 b'cmdserver',
260 260 b'track-log',
261 261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 262 )
263 263 coreconfigitem(
264 264 b'cmdserver',
265 265 b'shutdown-on-interrupt',
266 266 default=True,
267 267 )
268 268 coreconfigitem(
269 269 b'color',
270 270 b'.*',
271 271 default=None,
272 272 generic=True,
273 273 )
274 274 coreconfigitem(
275 275 b'color',
276 276 b'mode',
277 277 default=b'auto',
278 278 )
279 279 coreconfigitem(
280 280 b'color',
281 281 b'pagermode',
282 282 default=dynamicdefault,
283 283 )
284 284 coreconfigitem(
285 285 b'command-templates',
286 286 b'graphnode',
287 287 default=None,
288 288 alias=[(b'ui', b'graphnodetemplate')],
289 289 )
290 290 coreconfigitem(
291 291 b'command-templates',
292 292 b'log',
293 293 default=None,
294 294 alias=[(b'ui', b'logtemplate')],
295 295 )
296 296 coreconfigitem(
297 297 b'command-templates',
298 298 b'mergemarker',
299 299 default=(
300 300 b'{node|short} '
301 301 b'{ifeq(tags, "tip", "", '
302 302 b'ifeq(tags, "", "", "{tags} "))}'
303 303 b'{if(bookmarks, "{bookmarks} ")}'
304 304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 305 b'- {author|user}: {desc|firstline}'
306 306 ),
307 307 alias=[(b'ui', b'mergemarkertemplate')],
308 308 )
309 309 coreconfigitem(
310 310 b'command-templates',
311 311 b'pre-merge-tool-output',
312 312 default=None,
313 313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 314 )
315 315 coreconfigitem(
316 316 b'command-templates',
317 317 b'oneline-summary',
318 318 default=None,
319 319 )
320 320 coreconfigitem(
321 321 b'command-templates',
322 322 b'oneline-summary.*',
323 323 default=dynamicdefault,
324 324 generic=True,
325 325 )
326 326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 327 coreconfigitem(
328 328 b'commands',
329 329 b'commit.post-status',
330 330 default=False,
331 331 )
332 332 coreconfigitem(
333 333 b'commands',
334 334 b'grep.all-files',
335 335 default=False,
336 336 experimental=True,
337 337 )
338 338 coreconfigitem(
339 339 b'commands',
340 340 b'merge.require-rev',
341 341 default=False,
342 342 )
343 343 coreconfigitem(
344 344 b'commands',
345 345 b'push.require-revs',
346 346 default=False,
347 347 )
348 348 coreconfigitem(
349 349 b'commands',
350 350 b'resolve.confirm',
351 351 default=False,
352 352 )
353 353 coreconfigitem(
354 354 b'commands',
355 355 b'resolve.explicit-re-merge',
356 356 default=False,
357 357 )
358 358 coreconfigitem(
359 359 b'commands',
360 360 b'resolve.mark-check',
361 361 default=b'none',
362 362 )
363 363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 364 coreconfigitem(
365 365 b'commands',
366 366 b'show.aliasprefix',
367 367 default=list,
368 368 )
369 369 coreconfigitem(
370 370 b'commands',
371 371 b'status.relative',
372 372 default=False,
373 373 )
374 374 coreconfigitem(
375 375 b'commands',
376 376 b'status.skipstates',
377 377 default=[],
378 378 experimental=True,
379 379 )
380 380 coreconfigitem(
381 381 b'commands',
382 382 b'status.terse',
383 383 default=b'',
384 384 )
385 385 coreconfigitem(
386 386 b'commands',
387 387 b'status.verbose',
388 388 default=False,
389 389 )
390 390 coreconfigitem(
391 391 b'commands',
392 392 b'update.check',
393 393 default=None,
394 394 )
395 395 coreconfigitem(
396 396 b'commands',
397 397 b'update.requiredest',
398 398 default=False,
399 399 )
400 400 coreconfigitem(
401 401 b'committemplate',
402 402 b'.*',
403 403 default=None,
404 404 generic=True,
405 405 )
406 406 coreconfigitem(
407 407 b'convert',
408 408 b'bzr.saverev',
409 409 default=True,
410 410 )
411 411 coreconfigitem(
412 412 b'convert',
413 413 b'cvsps.cache',
414 414 default=True,
415 415 )
416 416 coreconfigitem(
417 417 b'convert',
418 418 b'cvsps.fuzz',
419 419 default=60,
420 420 )
421 421 coreconfigitem(
422 422 b'convert',
423 423 b'cvsps.logencoding',
424 424 default=None,
425 425 )
426 426 coreconfigitem(
427 427 b'convert',
428 428 b'cvsps.mergefrom',
429 429 default=None,
430 430 )
431 431 coreconfigitem(
432 432 b'convert',
433 433 b'cvsps.mergeto',
434 434 default=None,
435 435 )
436 436 coreconfigitem(
437 437 b'convert',
438 438 b'git.committeractions',
439 439 default=lambda: [b'messagedifferent'],
440 440 )
441 441 coreconfigitem(
442 442 b'convert',
443 443 b'git.extrakeys',
444 444 default=list,
445 445 )
446 446 coreconfigitem(
447 447 b'convert',
448 448 b'git.findcopiesharder',
449 449 default=False,
450 450 )
451 451 coreconfigitem(
452 452 b'convert',
453 453 b'git.remoteprefix',
454 454 default=b'remote',
455 455 )
456 456 coreconfigitem(
457 457 b'convert',
458 458 b'git.renamelimit',
459 459 default=400,
460 460 )
461 461 coreconfigitem(
462 462 b'convert',
463 463 b'git.saverev',
464 464 default=True,
465 465 )
466 466 coreconfigitem(
467 467 b'convert',
468 468 b'git.similarity',
469 469 default=50,
470 470 )
471 471 coreconfigitem(
472 472 b'convert',
473 473 b'git.skipsubmodules',
474 474 default=False,
475 475 )
476 476 coreconfigitem(
477 477 b'convert',
478 478 b'hg.clonebranches',
479 479 default=False,
480 480 )
481 481 coreconfigitem(
482 482 b'convert',
483 483 b'hg.ignoreerrors',
484 484 default=False,
485 485 )
486 486 coreconfigitem(
487 487 b'convert',
488 488 b'hg.preserve-hash',
489 489 default=False,
490 490 )
491 491 coreconfigitem(
492 492 b'convert',
493 493 b'hg.revs',
494 494 default=None,
495 495 )
496 496 coreconfigitem(
497 497 b'convert',
498 498 b'hg.saverev',
499 499 default=False,
500 500 )
501 501 coreconfigitem(
502 502 b'convert',
503 503 b'hg.sourcename',
504 504 default=None,
505 505 )
506 506 coreconfigitem(
507 507 b'convert',
508 508 b'hg.startrev',
509 509 default=None,
510 510 )
511 511 coreconfigitem(
512 512 b'convert',
513 513 b'hg.tagsbranch',
514 514 default=b'default',
515 515 )
516 516 coreconfigitem(
517 517 b'convert',
518 518 b'hg.usebranchnames',
519 519 default=True,
520 520 )
521 521 coreconfigitem(
522 522 b'convert',
523 523 b'ignoreancestorcheck',
524 524 default=False,
525 525 experimental=True,
526 526 )
527 527 coreconfigitem(
528 528 b'convert',
529 529 b'localtimezone',
530 530 default=False,
531 531 )
532 532 coreconfigitem(
533 533 b'convert',
534 534 b'p4.encoding',
535 535 default=dynamicdefault,
536 536 )
537 537 coreconfigitem(
538 538 b'convert',
539 539 b'p4.startrev',
540 540 default=0,
541 541 )
542 542 coreconfigitem(
543 543 b'convert',
544 544 b'skiptags',
545 545 default=False,
546 546 )
547 547 coreconfigitem(
548 548 b'convert',
549 549 b'svn.debugsvnlog',
550 550 default=True,
551 551 )
552 552 coreconfigitem(
553 553 b'convert',
554 554 b'svn.trunk',
555 555 default=None,
556 556 )
557 557 coreconfigitem(
558 558 b'convert',
559 559 b'svn.tags',
560 560 default=None,
561 561 )
562 562 coreconfigitem(
563 563 b'convert',
564 564 b'svn.branches',
565 565 default=None,
566 566 )
567 567 coreconfigitem(
568 568 b'convert',
569 569 b'svn.startrev',
570 570 default=0,
571 571 )
572 572 coreconfigitem(
573 573 b'convert',
574 574 b'svn.dangerous-set-commit-dates',
575 575 default=False,
576 576 )
577 577 coreconfigitem(
578 578 b'debug',
579 579 b'dirstate.delaywrite',
580 580 default=0,
581 581 )
582 582 coreconfigitem(
583 b'debug',
584 b'revlog.verifyposition.changelog',
585 default=b'',
586 )
587 coreconfigitem(
583 588 b'defaults',
584 589 b'.*',
585 590 default=None,
586 591 generic=True,
587 592 )
588 593 coreconfigitem(
589 594 b'devel',
590 595 b'all-warnings',
591 596 default=False,
592 597 )
593 598 coreconfigitem(
594 599 b'devel',
595 600 b'bundle2.debug',
596 601 default=False,
597 602 )
598 603 coreconfigitem(
599 604 b'devel',
600 605 b'bundle.delta',
601 606 default=b'',
602 607 )
603 608 coreconfigitem(
604 609 b'devel',
605 610 b'cache-vfs',
606 611 default=None,
607 612 )
608 613 coreconfigitem(
609 614 b'devel',
610 615 b'check-locks',
611 616 default=False,
612 617 )
613 618 coreconfigitem(
614 619 b'devel',
615 620 b'check-relroot',
616 621 default=False,
617 622 )
618 623 # Track copy information for all file, not just "added" one (very slow)
619 624 coreconfigitem(
620 625 b'devel',
621 626 b'copy-tracing.trace-all-files',
622 627 default=False,
623 628 )
624 629 coreconfigitem(
625 630 b'devel',
626 631 b'default-date',
627 632 default=None,
628 633 )
629 634 coreconfigitem(
630 635 b'devel',
631 636 b'deprec-warn',
632 637 default=False,
633 638 )
634 639 coreconfigitem(
635 640 b'devel',
636 641 b'disableloaddefaultcerts',
637 642 default=False,
638 643 )
639 644 coreconfigitem(
640 645 b'devel',
641 646 b'warn-empty-changegroup',
642 647 default=False,
643 648 )
644 649 coreconfigitem(
645 650 b'devel',
646 651 b'legacy.exchange',
647 652 default=list,
648 653 )
649 654 # When True, revlogs use a special reference version of the nodemap, that is not
650 655 # performant but is "known" to behave properly.
651 656 coreconfigitem(
652 657 b'devel',
653 658 b'persistent-nodemap',
654 659 default=False,
655 660 )
656 661 coreconfigitem(
657 662 b'devel',
658 663 b'servercafile',
659 664 default=b'',
660 665 )
661 666 coreconfigitem(
662 667 b'devel',
663 668 b'serverexactprotocol',
664 669 default=b'',
665 670 )
666 671 coreconfigitem(
667 672 b'devel',
668 673 b'serverrequirecert',
669 674 default=False,
670 675 )
671 676 coreconfigitem(
672 677 b'devel',
673 678 b'strip-obsmarkers',
674 679 default=True,
675 680 )
676 681 coreconfigitem(
677 682 b'devel',
678 683 b'warn-config',
679 684 default=None,
680 685 )
681 686 coreconfigitem(
682 687 b'devel',
683 688 b'warn-config-default',
684 689 default=None,
685 690 )
686 691 coreconfigitem(
687 692 b'devel',
688 693 b'user.obsmarker',
689 694 default=None,
690 695 )
691 696 coreconfigitem(
692 697 b'devel',
693 698 b'warn-config-unknown',
694 699 default=None,
695 700 )
696 701 coreconfigitem(
697 702 b'devel',
698 703 b'debug.copies',
699 704 default=False,
700 705 )
701 706 coreconfigitem(
702 707 b'devel',
703 708 b'copy-tracing.multi-thread',
704 709 default=True,
705 710 )
706 711 coreconfigitem(
707 712 b'devel',
708 713 b'debug.extensions',
709 714 default=False,
710 715 )
711 716 coreconfigitem(
712 717 b'devel',
713 718 b'debug.repo-filters',
714 719 default=False,
715 720 )
716 721 coreconfigitem(
717 722 b'devel',
718 723 b'debug.peer-request',
719 724 default=False,
720 725 )
721 726 # If discovery.exchange-heads is False, the discovery will not start with
722 727 # remote head fetching and local head querying.
723 728 coreconfigitem(
724 729 b'devel',
725 730 b'discovery.exchange-heads',
726 731 default=True,
727 732 )
728 733 # If discovery.grow-sample is False, the sample size used in set discovery will
729 734 # not be increased through the process
730 735 coreconfigitem(
731 736 b'devel',
732 737 b'discovery.grow-sample',
733 738 default=True,
734 739 )
735 740 # discovery.grow-sample.rate control the rate at which the sample grow
736 741 coreconfigitem(
737 742 b'devel',
738 743 b'discovery.grow-sample.rate',
739 744 default=1.05,
740 745 )
741 746 # If discovery.randomize is False, random sampling during discovery are
742 747 # deterministic. It is meant for integration tests.
743 748 coreconfigitem(
744 749 b'devel',
745 750 b'discovery.randomize',
746 751 default=True,
747 752 )
748 753 # Control the initial size of the discovery sample
749 754 coreconfigitem(
750 755 b'devel',
751 756 b'discovery.sample-size',
752 757 default=200,
753 758 )
754 759 # Control the initial size of the discovery for initial change
755 760 coreconfigitem(
756 761 b'devel',
757 762 b'discovery.sample-size.initial',
758 763 default=100,
759 764 )
760 765 _registerdiffopts(section=b'diff')
761 766 coreconfigitem(
762 767 b'diff',
763 768 b'merge',
764 769 default=False,
765 770 experimental=True,
766 771 )
767 772 coreconfigitem(
768 773 b'email',
769 774 b'bcc',
770 775 default=None,
771 776 )
772 777 coreconfigitem(
773 778 b'email',
774 779 b'cc',
775 780 default=None,
776 781 )
777 782 coreconfigitem(
778 783 b'email',
779 784 b'charsets',
780 785 default=list,
781 786 )
782 787 coreconfigitem(
783 788 b'email',
784 789 b'from',
785 790 default=None,
786 791 )
787 792 coreconfigitem(
788 793 b'email',
789 794 b'method',
790 795 default=b'smtp',
791 796 )
792 797 coreconfigitem(
793 798 b'email',
794 799 b'reply-to',
795 800 default=None,
796 801 )
797 802 coreconfigitem(
798 803 b'email',
799 804 b'to',
800 805 default=None,
801 806 )
802 807 coreconfigitem(
803 808 b'experimental',
804 809 b'archivemetatemplate',
805 810 default=dynamicdefault,
806 811 )
807 812 coreconfigitem(
808 813 b'experimental',
809 814 b'auto-publish',
810 815 default=b'publish',
811 816 )
812 817 coreconfigitem(
813 818 b'experimental',
814 819 b'bundle-phases',
815 820 default=False,
816 821 )
817 822 coreconfigitem(
818 823 b'experimental',
819 824 b'bundle2-advertise',
820 825 default=True,
821 826 )
822 827 coreconfigitem(
823 828 b'experimental',
824 829 b'bundle2-output-capture',
825 830 default=False,
826 831 )
827 832 coreconfigitem(
828 833 b'experimental',
829 834 b'bundle2.pushback',
830 835 default=False,
831 836 )
832 837 coreconfigitem(
833 838 b'experimental',
834 839 b'bundle2lazylocking',
835 840 default=False,
836 841 )
837 842 coreconfigitem(
838 843 b'experimental',
839 844 b'bundlecomplevel',
840 845 default=None,
841 846 )
842 847 coreconfigitem(
843 848 b'experimental',
844 849 b'bundlecomplevel.bzip2',
845 850 default=None,
846 851 )
847 852 coreconfigitem(
848 853 b'experimental',
849 854 b'bundlecomplevel.gzip',
850 855 default=None,
851 856 )
852 857 coreconfigitem(
853 858 b'experimental',
854 859 b'bundlecomplevel.none',
855 860 default=None,
856 861 )
857 862 coreconfigitem(
858 863 b'experimental',
859 864 b'bundlecomplevel.zstd',
860 865 default=None,
861 866 )
862 867 coreconfigitem(
863 868 b'experimental',
864 869 b'changegroup3',
865 870 default=False,
866 871 )
867 872 coreconfigitem(
868 873 b'experimental',
869 874 b'cleanup-as-archived',
870 875 default=False,
871 876 )
872 877 coreconfigitem(
873 878 b'experimental',
874 879 b'clientcompressionengines',
875 880 default=list,
876 881 )
877 882 coreconfigitem(
878 883 b'experimental',
879 884 b'copytrace',
880 885 default=b'on',
881 886 )
882 887 coreconfigitem(
883 888 b'experimental',
884 889 b'copytrace.movecandidateslimit',
885 890 default=100,
886 891 )
887 892 coreconfigitem(
888 893 b'experimental',
889 894 b'copytrace.sourcecommitlimit',
890 895 default=100,
891 896 )
892 897 coreconfigitem(
893 898 b'experimental',
894 899 b'copies.read-from',
895 900 default=b"filelog-only",
896 901 )
897 902 coreconfigitem(
898 903 b'experimental',
899 904 b'copies.write-to',
900 905 default=b'filelog-only',
901 906 )
902 907 coreconfigitem(
903 908 b'experimental',
904 909 b'crecordtest',
905 910 default=None,
906 911 )
907 912 coreconfigitem(
908 913 b'experimental',
909 914 b'directaccess',
910 915 default=False,
911 916 )
912 917 coreconfigitem(
913 918 b'experimental',
914 919 b'directaccess.revnums',
915 920 default=False,
916 921 )
917 922 coreconfigitem(
918 923 b'experimental',
919 924 b'editortmpinhg',
920 925 default=False,
921 926 )
922 927 coreconfigitem(
923 928 b'experimental',
924 929 b'evolution',
925 930 default=list,
926 931 )
927 932 coreconfigitem(
928 933 b'experimental',
929 934 b'evolution.allowdivergence',
930 935 default=False,
931 936 alias=[(b'experimental', b'allowdivergence')],
932 937 )
933 938 coreconfigitem(
934 939 b'experimental',
935 940 b'evolution.allowunstable',
936 941 default=None,
937 942 )
938 943 coreconfigitem(
939 944 b'experimental',
940 945 b'evolution.createmarkers',
941 946 default=None,
942 947 )
943 948 coreconfigitem(
944 949 b'experimental',
945 950 b'evolution.effect-flags',
946 951 default=True,
947 952 alias=[(b'experimental', b'effect-flags')],
948 953 )
949 954 coreconfigitem(
950 955 b'experimental',
951 956 b'evolution.exchange',
952 957 default=None,
953 958 )
954 959 coreconfigitem(
955 960 b'experimental',
956 961 b'evolution.bundle-obsmarker',
957 962 default=False,
958 963 )
959 964 coreconfigitem(
960 965 b'experimental',
961 966 b'evolution.bundle-obsmarker:mandatory',
962 967 default=True,
963 968 )
964 969 coreconfigitem(
965 970 b'experimental',
966 971 b'log.topo',
967 972 default=False,
968 973 )
969 974 coreconfigitem(
970 975 b'experimental',
971 976 b'evolution.report-instabilities',
972 977 default=True,
973 978 )
974 979 coreconfigitem(
975 980 b'experimental',
976 981 b'evolution.track-operation',
977 982 default=True,
978 983 )
979 984 # repo-level config to exclude a revset visibility
980 985 #
981 986 # The target use case is to use `share` to expose different subset of the same
982 987 # repository, especially server side. See also `server.view`.
983 988 coreconfigitem(
984 989 b'experimental',
985 990 b'extra-filter-revs',
986 991 default=None,
987 992 )
988 993 coreconfigitem(
989 994 b'experimental',
990 995 b'maxdeltachainspan',
991 996 default=-1,
992 997 )
993 998 # tracks files which were undeleted (merge might delete them but we explicitly
994 999 # kept/undeleted them) and creates new filenodes for them
995 1000 coreconfigitem(
996 1001 b'experimental',
997 1002 b'merge-track-salvaged',
998 1003 default=False,
999 1004 )
1000 1005 coreconfigitem(
1001 1006 b'experimental',
1002 1007 b'mergetempdirprefix',
1003 1008 default=None,
1004 1009 )
1005 1010 coreconfigitem(
1006 1011 b'experimental',
1007 1012 b'mmapindexthreshold',
1008 1013 default=None,
1009 1014 )
1010 1015 coreconfigitem(
1011 1016 b'experimental',
1012 1017 b'narrow',
1013 1018 default=False,
1014 1019 )
1015 1020 coreconfigitem(
1016 1021 b'experimental',
1017 1022 b'nonnormalparanoidcheck',
1018 1023 default=False,
1019 1024 )
1020 1025 coreconfigitem(
1021 1026 b'experimental',
1022 1027 b'exportableenviron',
1023 1028 default=list,
1024 1029 )
1025 1030 coreconfigitem(
1026 1031 b'experimental',
1027 1032 b'extendedheader.index',
1028 1033 default=None,
1029 1034 )
1030 1035 coreconfigitem(
1031 1036 b'experimental',
1032 1037 b'extendedheader.similarity',
1033 1038 default=False,
1034 1039 )
1035 1040 coreconfigitem(
1036 1041 b'experimental',
1037 1042 b'graphshorten',
1038 1043 default=False,
1039 1044 )
1040 1045 coreconfigitem(
1041 1046 b'experimental',
1042 1047 b'graphstyle.parent',
1043 1048 default=dynamicdefault,
1044 1049 )
1045 1050 coreconfigitem(
1046 1051 b'experimental',
1047 1052 b'graphstyle.missing',
1048 1053 default=dynamicdefault,
1049 1054 )
1050 1055 coreconfigitem(
1051 1056 b'experimental',
1052 1057 b'graphstyle.grandparent',
1053 1058 default=dynamicdefault,
1054 1059 )
1055 1060 coreconfigitem(
1056 1061 b'experimental',
1057 1062 b'hook-track-tags',
1058 1063 default=False,
1059 1064 )
1060 1065 coreconfigitem(
1061 1066 b'experimental',
1062 1067 b'httppeer.advertise-v2',
1063 1068 default=False,
1064 1069 )
1065 1070 coreconfigitem(
1066 1071 b'experimental',
1067 1072 b'httppeer.v2-encoder-order',
1068 1073 default=None,
1069 1074 )
1070 1075 coreconfigitem(
1071 1076 b'experimental',
1072 1077 b'httppostargs',
1073 1078 default=False,
1074 1079 )
1075 1080 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1076 1081 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1077 1082
1078 1083 coreconfigitem(
1079 1084 b'experimental',
1080 1085 b'obsmarkers-exchange-debug',
1081 1086 default=False,
1082 1087 )
1083 1088 coreconfigitem(
1084 1089 b'experimental',
1085 1090 b'remotenames',
1086 1091 default=False,
1087 1092 )
1088 1093 coreconfigitem(
1089 1094 b'experimental',
1090 1095 b'removeemptydirs',
1091 1096 default=True,
1092 1097 )
1093 1098 coreconfigitem(
1094 1099 b'experimental',
1095 1100 b'revert.interactive.select-to-keep',
1096 1101 default=False,
1097 1102 )
1098 1103 coreconfigitem(
1099 1104 b'experimental',
1100 1105 b'revisions.prefixhexnode',
1101 1106 default=False,
1102 1107 )
1103 1108 coreconfigitem(
1104 1109 b'experimental',
1105 1110 b'revlogv2',
1106 1111 default=None,
1107 1112 )
1108 1113 coreconfigitem(
1109 1114 b'experimental',
1110 1115 b'revisions.disambiguatewithin',
1111 1116 default=None,
1112 1117 )
1113 1118 coreconfigitem(
1114 1119 b'experimental',
1115 1120 b'rust.index',
1116 1121 default=False,
1117 1122 )
1118 1123 coreconfigitem(
1119 1124 b'experimental',
1120 1125 b'server.filesdata.recommended-batch-size',
1121 1126 default=50000,
1122 1127 )
1123 1128 coreconfigitem(
1124 1129 b'experimental',
1125 1130 b'server.manifestdata.recommended-batch-size',
1126 1131 default=100000,
1127 1132 )
1128 1133 coreconfigitem(
1129 1134 b'experimental',
1130 1135 b'server.stream-narrow-clones',
1131 1136 default=False,
1132 1137 )
1133 1138 coreconfigitem(
1134 1139 b'experimental',
1135 1140 b'single-head-per-branch',
1136 1141 default=False,
1137 1142 )
1138 1143 coreconfigitem(
1139 1144 b'experimental',
1140 1145 b'single-head-per-branch:account-closed-heads',
1141 1146 default=False,
1142 1147 )
1143 1148 coreconfigitem(
1144 1149 b'experimental',
1145 1150 b'single-head-per-branch:public-changes-only',
1146 1151 default=False,
1147 1152 )
1148 1153 coreconfigitem(
1149 1154 b'experimental',
1150 1155 b'sshserver.support-v2',
1151 1156 default=False,
1152 1157 )
1153 1158 coreconfigitem(
1154 1159 b'experimental',
1155 1160 b'sparse-read',
1156 1161 default=False,
1157 1162 )
1158 1163 coreconfigitem(
1159 1164 b'experimental',
1160 1165 b'sparse-read.density-threshold',
1161 1166 default=0.50,
1162 1167 )
1163 1168 coreconfigitem(
1164 1169 b'experimental',
1165 1170 b'sparse-read.min-gap-size',
1166 1171 default=b'65K',
1167 1172 )
1168 1173 coreconfigitem(
1169 1174 b'experimental',
1170 1175 b'treemanifest',
1171 1176 default=False,
1172 1177 )
1173 1178 coreconfigitem(
1174 1179 b'experimental',
1175 1180 b'update.atomic-file',
1176 1181 default=False,
1177 1182 )
1178 1183 coreconfigitem(
1179 1184 b'experimental',
1180 1185 b'sshpeer.advertise-v2',
1181 1186 default=False,
1182 1187 )
1183 1188 coreconfigitem(
1184 1189 b'experimental',
1185 1190 b'web.apiserver',
1186 1191 default=False,
1187 1192 )
1188 1193 coreconfigitem(
1189 1194 b'experimental',
1190 1195 b'web.api.http-v2',
1191 1196 default=False,
1192 1197 )
1193 1198 coreconfigitem(
1194 1199 b'experimental',
1195 1200 b'web.api.debugreflect',
1196 1201 default=False,
1197 1202 )
1198 1203 coreconfigitem(
1199 1204 b'experimental',
1200 1205 b'worker.wdir-get-thread-safe',
1201 1206 default=False,
1202 1207 )
1203 1208 coreconfigitem(
1204 1209 b'experimental',
1205 1210 b'worker.repository-upgrade',
1206 1211 default=False,
1207 1212 )
1208 1213 coreconfigitem(
1209 1214 b'experimental',
1210 1215 b'xdiff',
1211 1216 default=False,
1212 1217 )
1213 1218 coreconfigitem(
1214 1219 b'extensions',
1215 1220 b'.*',
1216 1221 default=None,
1217 1222 generic=True,
1218 1223 )
1219 1224 coreconfigitem(
1220 1225 b'extdata',
1221 1226 b'.*',
1222 1227 default=None,
1223 1228 generic=True,
1224 1229 )
1225 1230 coreconfigitem(
1226 1231 b'format',
1227 1232 b'bookmarks-in-store',
1228 1233 default=False,
1229 1234 )
1230 1235 coreconfigitem(
1231 1236 b'format',
1232 1237 b'chunkcachesize',
1233 1238 default=None,
1234 1239 experimental=True,
1235 1240 )
1236 1241 coreconfigitem(
1237 1242 b'format',
1238 1243 b'dotencode',
1239 1244 default=True,
1240 1245 )
1241 1246 coreconfigitem(
1242 1247 b'format',
1243 1248 b'generaldelta',
1244 1249 default=False,
1245 1250 experimental=True,
1246 1251 )
1247 1252 coreconfigitem(
1248 1253 b'format',
1249 1254 b'manifestcachesize',
1250 1255 default=None,
1251 1256 experimental=True,
1252 1257 )
1253 1258 coreconfigitem(
1254 1259 b'format',
1255 1260 b'maxchainlen',
1256 1261 default=dynamicdefault,
1257 1262 experimental=True,
1258 1263 )
1259 1264 coreconfigitem(
1260 1265 b'format',
1261 1266 b'obsstore-version',
1262 1267 default=None,
1263 1268 )
1264 1269 coreconfigitem(
1265 1270 b'format',
1266 1271 b'sparse-revlog',
1267 1272 default=True,
1268 1273 )
1269 1274 coreconfigitem(
1270 1275 b'format',
1271 1276 b'revlog-compression',
1272 1277 default=lambda: [b'zlib'],
1273 1278 alias=[(b'experimental', b'format.compression')],
1274 1279 )
1275 1280 coreconfigitem(
1276 1281 b'format',
1277 1282 b'usefncache',
1278 1283 default=True,
1279 1284 )
1280 1285 coreconfigitem(
1281 1286 b'format',
1282 1287 b'usegeneraldelta',
1283 1288 default=True,
1284 1289 )
1285 1290 coreconfigitem(
1286 1291 b'format',
1287 1292 b'usestore',
1288 1293 default=True,
1289 1294 )
1290 1295 coreconfigitem(
1291 1296 b'format',
1292 1297 b'use-persistent-nodemap',
1293 1298 default=False,
1294 1299 )
1295 1300 coreconfigitem(
1296 1301 b'format',
1297 1302 b'exp-use-copies-side-data-changeset',
1298 1303 default=False,
1299 1304 experimental=True,
1300 1305 )
1301 1306 coreconfigitem(
1302 1307 b'format',
1303 1308 b'exp-use-side-data',
1304 1309 default=False,
1305 1310 experimental=True,
1306 1311 )
1307 1312 coreconfigitem(
1308 1313 b'format',
1309 1314 b'use-share-safe',
1310 1315 default=False,
1311 1316 )
1312 1317 coreconfigitem(
1313 1318 b'format',
1314 1319 b'internal-phase',
1315 1320 default=False,
1316 1321 experimental=True,
1317 1322 )
1318 1323 coreconfigitem(
1319 1324 b'fsmonitor',
1320 1325 b'warn_when_unused',
1321 1326 default=True,
1322 1327 )
1323 1328 coreconfigitem(
1324 1329 b'fsmonitor',
1325 1330 b'warn_update_file_count',
1326 1331 default=50000,
1327 1332 )
1328 1333 coreconfigitem(
1329 1334 b'fsmonitor',
1330 1335 b'warn_update_file_count_rust',
1331 1336 default=400000,
1332 1337 )
1333 1338 coreconfigitem(
1334 1339 b'help',
1335 1340 br'hidden-command\..*',
1336 1341 default=False,
1337 1342 generic=True,
1338 1343 )
1339 1344 coreconfigitem(
1340 1345 b'help',
1341 1346 br'hidden-topic\..*',
1342 1347 default=False,
1343 1348 generic=True,
1344 1349 )
1345 1350 coreconfigitem(
1346 1351 b'hooks',
1347 1352 b'[^:]*',
1348 1353 default=dynamicdefault,
1349 1354 generic=True,
1350 1355 )
1351 1356 coreconfigitem(
1352 1357 b'hooks',
1353 1358 b'.*:run-with-plain',
1354 1359 default=True,
1355 1360 generic=True,
1356 1361 )
1357 1362 coreconfigitem(
1358 1363 b'hgweb-paths',
1359 1364 b'.*',
1360 1365 default=list,
1361 1366 generic=True,
1362 1367 )
1363 1368 coreconfigitem(
1364 1369 b'hostfingerprints',
1365 1370 b'.*',
1366 1371 default=list,
1367 1372 generic=True,
1368 1373 )
1369 1374 coreconfigitem(
1370 1375 b'hostsecurity',
1371 1376 b'ciphers',
1372 1377 default=None,
1373 1378 )
1374 1379 coreconfigitem(
1375 1380 b'hostsecurity',
1376 1381 b'minimumprotocol',
1377 1382 default=dynamicdefault,
1378 1383 )
1379 1384 coreconfigitem(
1380 1385 b'hostsecurity',
1381 1386 b'.*:minimumprotocol$',
1382 1387 default=dynamicdefault,
1383 1388 generic=True,
1384 1389 )
1385 1390 coreconfigitem(
1386 1391 b'hostsecurity',
1387 1392 b'.*:ciphers$',
1388 1393 default=dynamicdefault,
1389 1394 generic=True,
1390 1395 )
1391 1396 coreconfigitem(
1392 1397 b'hostsecurity',
1393 1398 b'.*:fingerprints$',
1394 1399 default=list,
1395 1400 generic=True,
1396 1401 )
1397 1402 coreconfigitem(
1398 1403 b'hostsecurity',
1399 1404 b'.*:verifycertsfile$',
1400 1405 default=None,
1401 1406 generic=True,
1402 1407 )
1403 1408
1404 1409 coreconfigitem(
1405 1410 b'http_proxy',
1406 1411 b'always',
1407 1412 default=False,
1408 1413 )
1409 1414 coreconfigitem(
1410 1415 b'http_proxy',
1411 1416 b'host',
1412 1417 default=None,
1413 1418 )
1414 1419 coreconfigitem(
1415 1420 b'http_proxy',
1416 1421 b'no',
1417 1422 default=list,
1418 1423 )
1419 1424 coreconfigitem(
1420 1425 b'http_proxy',
1421 1426 b'passwd',
1422 1427 default=None,
1423 1428 )
1424 1429 coreconfigitem(
1425 1430 b'http_proxy',
1426 1431 b'user',
1427 1432 default=None,
1428 1433 )
1429 1434
1430 1435 coreconfigitem(
1431 1436 b'http',
1432 1437 b'timeout',
1433 1438 default=None,
1434 1439 )
1435 1440
1436 1441 coreconfigitem(
1437 1442 b'logtoprocess',
1438 1443 b'commandexception',
1439 1444 default=None,
1440 1445 )
1441 1446 coreconfigitem(
1442 1447 b'logtoprocess',
1443 1448 b'commandfinish',
1444 1449 default=None,
1445 1450 )
1446 1451 coreconfigitem(
1447 1452 b'logtoprocess',
1448 1453 b'command',
1449 1454 default=None,
1450 1455 )
1451 1456 coreconfigitem(
1452 1457 b'logtoprocess',
1453 1458 b'develwarn',
1454 1459 default=None,
1455 1460 )
1456 1461 coreconfigitem(
1457 1462 b'logtoprocess',
1458 1463 b'uiblocked',
1459 1464 default=None,
1460 1465 )
1461 1466 coreconfigitem(
1462 1467 b'merge',
1463 1468 b'checkunknown',
1464 1469 default=b'abort',
1465 1470 )
1466 1471 coreconfigitem(
1467 1472 b'merge',
1468 1473 b'checkignored',
1469 1474 default=b'abort',
1470 1475 )
1471 1476 coreconfigitem(
1472 1477 b'experimental',
1473 1478 b'merge.checkpathconflicts',
1474 1479 default=False,
1475 1480 )
1476 1481 coreconfigitem(
1477 1482 b'merge',
1478 1483 b'followcopies',
1479 1484 default=True,
1480 1485 )
1481 1486 coreconfigitem(
1482 1487 b'merge',
1483 1488 b'on-failure',
1484 1489 default=b'continue',
1485 1490 )
1486 1491 coreconfigitem(
1487 1492 b'merge',
1488 1493 b'preferancestor',
1489 1494 default=lambda: [b'*'],
1490 1495 experimental=True,
1491 1496 )
1492 1497 coreconfigitem(
1493 1498 b'merge',
1494 1499 b'strict-capability-check',
1495 1500 default=False,
1496 1501 )
1497 1502 coreconfigitem(
1498 1503 b'merge-tools',
1499 1504 b'.*',
1500 1505 default=None,
1501 1506 generic=True,
1502 1507 )
1503 1508 coreconfigitem(
1504 1509 b'merge-tools',
1505 1510 br'.*\.args$',
1506 1511 default=b"$local $base $other",
1507 1512 generic=True,
1508 1513 priority=-1,
1509 1514 )
1510 1515 coreconfigitem(
1511 1516 b'merge-tools',
1512 1517 br'.*\.binary$',
1513 1518 default=False,
1514 1519 generic=True,
1515 1520 priority=-1,
1516 1521 )
1517 1522 coreconfigitem(
1518 1523 b'merge-tools',
1519 1524 br'.*\.check$',
1520 1525 default=list,
1521 1526 generic=True,
1522 1527 priority=-1,
1523 1528 )
1524 1529 coreconfigitem(
1525 1530 b'merge-tools',
1526 1531 br'.*\.checkchanged$',
1527 1532 default=False,
1528 1533 generic=True,
1529 1534 priority=-1,
1530 1535 )
1531 1536 coreconfigitem(
1532 1537 b'merge-tools',
1533 1538 br'.*\.executable$',
1534 1539 default=dynamicdefault,
1535 1540 generic=True,
1536 1541 priority=-1,
1537 1542 )
1538 1543 coreconfigitem(
1539 1544 b'merge-tools',
1540 1545 br'.*\.fixeol$',
1541 1546 default=False,
1542 1547 generic=True,
1543 1548 priority=-1,
1544 1549 )
1545 1550 coreconfigitem(
1546 1551 b'merge-tools',
1547 1552 br'.*\.gui$',
1548 1553 default=False,
1549 1554 generic=True,
1550 1555 priority=-1,
1551 1556 )
1552 1557 coreconfigitem(
1553 1558 b'merge-tools',
1554 1559 br'.*\.mergemarkers$',
1555 1560 default=b'basic',
1556 1561 generic=True,
1557 1562 priority=-1,
1558 1563 )
1559 1564 coreconfigitem(
1560 1565 b'merge-tools',
1561 1566 br'.*\.mergemarkertemplate$',
1562 1567 default=dynamicdefault, # take from command-templates.mergemarker
1563 1568 generic=True,
1564 1569 priority=-1,
1565 1570 )
1566 1571 coreconfigitem(
1567 1572 b'merge-tools',
1568 1573 br'.*\.priority$',
1569 1574 default=0,
1570 1575 generic=True,
1571 1576 priority=-1,
1572 1577 )
1573 1578 coreconfigitem(
1574 1579 b'merge-tools',
1575 1580 br'.*\.premerge$',
1576 1581 default=dynamicdefault,
1577 1582 generic=True,
1578 1583 priority=-1,
1579 1584 )
1580 1585 coreconfigitem(
1581 1586 b'merge-tools',
1582 1587 br'.*\.symlink$',
1583 1588 default=False,
1584 1589 generic=True,
1585 1590 priority=-1,
1586 1591 )
1587 1592 coreconfigitem(
1588 1593 b'pager',
1589 1594 b'attend-.*',
1590 1595 default=dynamicdefault,
1591 1596 generic=True,
1592 1597 )
1593 1598 coreconfigitem(
1594 1599 b'pager',
1595 1600 b'ignore',
1596 1601 default=list,
1597 1602 )
1598 1603 coreconfigitem(
1599 1604 b'pager',
1600 1605 b'pager',
1601 1606 default=dynamicdefault,
1602 1607 )
1603 1608 coreconfigitem(
1604 1609 b'patch',
1605 1610 b'eol',
1606 1611 default=b'strict',
1607 1612 )
1608 1613 coreconfigitem(
1609 1614 b'patch',
1610 1615 b'fuzz',
1611 1616 default=2,
1612 1617 )
1613 1618 coreconfigitem(
1614 1619 b'paths',
1615 1620 b'default',
1616 1621 default=None,
1617 1622 )
1618 1623 coreconfigitem(
1619 1624 b'paths',
1620 1625 b'default-push',
1621 1626 default=None,
1622 1627 )
1623 1628 coreconfigitem(
1624 1629 b'paths',
1625 1630 b'.*',
1626 1631 default=None,
1627 1632 generic=True,
1628 1633 )
1629 1634 coreconfigitem(
1630 1635 b'phases',
1631 1636 b'checksubrepos',
1632 1637 default=b'follow',
1633 1638 )
1634 1639 coreconfigitem(
1635 1640 b'phases',
1636 1641 b'new-commit',
1637 1642 default=b'draft',
1638 1643 )
1639 1644 coreconfigitem(
1640 1645 b'phases',
1641 1646 b'publish',
1642 1647 default=True,
1643 1648 )
1644 1649 coreconfigitem(
1645 1650 b'profiling',
1646 1651 b'enabled',
1647 1652 default=False,
1648 1653 )
1649 1654 coreconfigitem(
1650 1655 b'profiling',
1651 1656 b'format',
1652 1657 default=b'text',
1653 1658 )
1654 1659 coreconfigitem(
1655 1660 b'profiling',
1656 1661 b'freq',
1657 1662 default=1000,
1658 1663 )
1659 1664 coreconfigitem(
1660 1665 b'profiling',
1661 1666 b'limit',
1662 1667 default=30,
1663 1668 )
1664 1669 coreconfigitem(
1665 1670 b'profiling',
1666 1671 b'nested',
1667 1672 default=0,
1668 1673 )
1669 1674 coreconfigitem(
1670 1675 b'profiling',
1671 1676 b'output',
1672 1677 default=None,
1673 1678 )
1674 1679 coreconfigitem(
1675 1680 b'profiling',
1676 1681 b'showmax',
1677 1682 default=0.999,
1678 1683 )
1679 1684 coreconfigitem(
1680 1685 b'profiling',
1681 1686 b'showmin',
1682 1687 default=dynamicdefault,
1683 1688 )
1684 1689 coreconfigitem(
1685 1690 b'profiling',
1686 1691 b'showtime',
1687 1692 default=True,
1688 1693 )
1689 1694 coreconfigitem(
1690 1695 b'profiling',
1691 1696 b'sort',
1692 1697 default=b'inlinetime',
1693 1698 )
1694 1699 coreconfigitem(
1695 1700 b'profiling',
1696 1701 b'statformat',
1697 1702 default=b'hotpath',
1698 1703 )
1699 1704 coreconfigitem(
1700 1705 b'profiling',
1701 1706 b'time-track',
1702 1707 default=dynamicdefault,
1703 1708 )
1704 1709 coreconfigitem(
1705 1710 b'profiling',
1706 1711 b'type',
1707 1712 default=b'stat',
1708 1713 )
1709 1714 coreconfigitem(
1710 1715 b'progress',
1711 1716 b'assume-tty',
1712 1717 default=False,
1713 1718 )
1714 1719 coreconfigitem(
1715 1720 b'progress',
1716 1721 b'changedelay',
1717 1722 default=1,
1718 1723 )
1719 1724 coreconfigitem(
1720 1725 b'progress',
1721 1726 b'clear-complete',
1722 1727 default=True,
1723 1728 )
1724 1729 coreconfigitem(
1725 1730 b'progress',
1726 1731 b'debug',
1727 1732 default=False,
1728 1733 )
1729 1734 coreconfigitem(
1730 1735 b'progress',
1731 1736 b'delay',
1732 1737 default=3,
1733 1738 )
1734 1739 coreconfigitem(
1735 1740 b'progress',
1736 1741 b'disable',
1737 1742 default=False,
1738 1743 )
1739 1744 coreconfigitem(
1740 1745 b'progress',
1741 1746 b'estimateinterval',
1742 1747 default=60.0,
1743 1748 )
1744 1749 coreconfigitem(
1745 1750 b'progress',
1746 1751 b'format',
1747 1752 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1748 1753 )
1749 1754 coreconfigitem(
1750 1755 b'progress',
1751 1756 b'refresh',
1752 1757 default=0.1,
1753 1758 )
1754 1759 coreconfigitem(
1755 1760 b'progress',
1756 1761 b'width',
1757 1762 default=dynamicdefault,
1758 1763 )
1759 1764 coreconfigitem(
1760 1765 b'pull',
1761 1766 b'confirm',
1762 1767 default=False,
1763 1768 )
1764 1769 coreconfigitem(
1765 1770 b'push',
1766 1771 b'pushvars.server',
1767 1772 default=False,
1768 1773 )
1769 1774 coreconfigitem(
1770 1775 b'rewrite',
1771 1776 b'backup-bundle',
1772 1777 default=True,
1773 1778 alias=[(b'ui', b'history-editing-backup')],
1774 1779 )
1775 1780 coreconfigitem(
1776 1781 b'rewrite',
1777 1782 b'update-timestamp',
1778 1783 default=False,
1779 1784 )
1780 1785 coreconfigitem(
1781 1786 b'rewrite',
1782 1787 b'empty-successor',
1783 1788 default=b'skip',
1784 1789 experimental=True,
1785 1790 )
1786 1791 coreconfigitem(
1787 1792 b'storage',
1788 1793 b'new-repo-backend',
1789 1794 default=b'revlogv1',
1790 1795 experimental=True,
1791 1796 )
1792 1797 coreconfigitem(
1793 1798 b'storage',
1794 1799 b'revlog.optimize-delta-parent-choice',
1795 1800 default=True,
1796 1801 alias=[(b'format', b'aggressivemergedeltas')],
1797 1802 )
1798 1803 # experimental as long as rust is experimental (or a C version is implemented)
1799 1804 coreconfigitem(
1800 1805 b'storage',
1801 1806 b'revlog.persistent-nodemap.mmap',
1802 1807 default=True,
1803 1808 )
1804 1809 # experimental as long as format.use-persistent-nodemap is.
1805 1810 coreconfigitem(
1806 1811 b'storage',
1807 1812 b'revlog.persistent-nodemap.slow-path',
1808 1813 default=b"abort",
1809 1814 )
1810 1815
1811 1816 coreconfigitem(
1812 1817 b'storage',
1813 1818 b'revlog.reuse-external-delta',
1814 1819 default=True,
1815 1820 )
1816 1821 coreconfigitem(
1817 1822 b'storage',
1818 1823 b'revlog.reuse-external-delta-parent',
1819 1824 default=None,
1820 1825 )
1821 1826 coreconfigitem(
1822 1827 b'storage',
1823 1828 b'revlog.zlib.level',
1824 1829 default=None,
1825 1830 )
1826 1831 coreconfigitem(
1827 1832 b'storage',
1828 1833 b'revlog.zstd.level',
1829 1834 default=None,
1830 1835 )
1831 1836 coreconfigitem(
1832 1837 b'server',
1833 1838 b'bookmarks-pushkey-compat',
1834 1839 default=True,
1835 1840 )
1836 1841 coreconfigitem(
1837 1842 b'server',
1838 1843 b'bundle1',
1839 1844 default=True,
1840 1845 )
1841 1846 coreconfigitem(
1842 1847 b'server',
1843 1848 b'bundle1gd',
1844 1849 default=None,
1845 1850 )
1846 1851 coreconfigitem(
1847 1852 b'server',
1848 1853 b'bundle1.pull',
1849 1854 default=None,
1850 1855 )
1851 1856 coreconfigitem(
1852 1857 b'server',
1853 1858 b'bundle1gd.pull',
1854 1859 default=None,
1855 1860 )
1856 1861 coreconfigitem(
1857 1862 b'server',
1858 1863 b'bundle1.push',
1859 1864 default=None,
1860 1865 )
1861 1866 coreconfigitem(
1862 1867 b'server',
1863 1868 b'bundle1gd.push',
1864 1869 default=None,
1865 1870 )
1866 1871 coreconfigitem(
1867 1872 b'server',
1868 1873 b'bundle2.stream',
1869 1874 default=True,
1870 1875 alias=[(b'experimental', b'bundle2.stream')],
1871 1876 )
1872 1877 coreconfigitem(
1873 1878 b'server',
1874 1879 b'compressionengines',
1875 1880 default=list,
1876 1881 )
1877 1882 coreconfigitem(
1878 1883 b'server',
1879 1884 b'concurrent-push-mode',
1880 1885 default=b'check-related',
1881 1886 )
1882 1887 coreconfigitem(
1883 1888 b'server',
1884 1889 b'disablefullbundle',
1885 1890 default=False,
1886 1891 )
1887 1892 coreconfigitem(
1888 1893 b'server',
1889 1894 b'maxhttpheaderlen',
1890 1895 default=1024,
1891 1896 )
1892 1897 coreconfigitem(
1893 1898 b'server',
1894 1899 b'pullbundle',
1895 1900 default=False,
1896 1901 )
1897 1902 coreconfigitem(
1898 1903 b'server',
1899 1904 b'preferuncompressed',
1900 1905 default=False,
1901 1906 )
1902 1907 coreconfigitem(
1903 1908 b'server',
1904 1909 b'streamunbundle',
1905 1910 default=False,
1906 1911 )
1907 1912 coreconfigitem(
1908 1913 b'server',
1909 1914 b'uncompressed',
1910 1915 default=True,
1911 1916 )
1912 1917 coreconfigitem(
1913 1918 b'server',
1914 1919 b'uncompressedallowsecret',
1915 1920 default=False,
1916 1921 )
1917 1922 coreconfigitem(
1918 1923 b'server',
1919 1924 b'view',
1920 1925 default=b'served',
1921 1926 )
1922 1927 coreconfigitem(
1923 1928 b'server',
1924 1929 b'validate',
1925 1930 default=False,
1926 1931 )
1927 1932 coreconfigitem(
1928 1933 b'server',
1929 1934 b'zliblevel',
1930 1935 default=-1,
1931 1936 )
1932 1937 coreconfigitem(
1933 1938 b'server',
1934 1939 b'zstdlevel',
1935 1940 default=3,
1936 1941 )
1937 1942 coreconfigitem(
1938 1943 b'share',
1939 1944 b'pool',
1940 1945 default=None,
1941 1946 )
1942 1947 coreconfigitem(
1943 1948 b'share',
1944 1949 b'poolnaming',
1945 1950 default=b'identity',
1946 1951 )
1947 1952 coreconfigitem(
1948 1953 b'share',
1949 1954 b'safe-mismatch.source-not-safe',
1950 1955 default=b'abort',
1951 1956 )
1952 1957 coreconfigitem(
1953 1958 b'share',
1954 1959 b'safe-mismatch.source-safe',
1955 1960 default=b'abort',
1956 1961 )
1957 1962 coreconfigitem(
1958 1963 b'share',
1959 1964 b'safe-mismatch.source-not-safe.warn',
1960 1965 default=True,
1961 1966 )
1962 1967 coreconfigitem(
1963 1968 b'share',
1964 1969 b'safe-mismatch.source-safe.warn',
1965 1970 default=True,
1966 1971 )
1967 1972 coreconfigitem(
1968 1973 b'shelve',
1969 1974 b'maxbackups',
1970 1975 default=10,
1971 1976 )
1972 1977 coreconfigitem(
1973 1978 b'smtp',
1974 1979 b'host',
1975 1980 default=None,
1976 1981 )
1977 1982 coreconfigitem(
1978 1983 b'smtp',
1979 1984 b'local_hostname',
1980 1985 default=None,
1981 1986 )
1982 1987 coreconfigitem(
1983 1988 b'smtp',
1984 1989 b'password',
1985 1990 default=None,
1986 1991 )
1987 1992 coreconfigitem(
1988 1993 b'smtp',
1989 1994 b'port',
1990 1995 default=dynamicdefault,
1991 1996 )
1992 1997 coreconfigitem(
1993 1998 b'smtp',
1994 1999 b'tls',
1995 2000 default=b'none',
1996 2001 )
1997 2002 coreconfigitem(
1998 2003 b'smtp',
1999 2004 b'username',
2000 2005 default=None,
2001 2006 )
2002 2007 coreconfigitem(
2003 2008 b'sparse',
2004 2009 b'missingwarning',
2005 2010 default=True,
2006 2011 experimental=True,
2007 2012 )
2008 2013 coreconfigitem(
2009 2014 b'subrepos',
2010 2015 b'allowed',
2011 2016 default=dynamicdefault, # to make backporting simpler
2012 2017 )
2013 2018 coreconfigitem(
2014 2019 b'subrepos',
2015 2020 b'hg:allowed',
2016 2021 default=dynamicdefault,
2017 2022 )
2018 2023 coreconfigitem(
2019 2024 b'subrepos',
2020 2025 b'git:allowed',
2021 2026 default=dynamicdefault,
2022 2027 )
2023 2028 coreconfigitem(
2024 2029 b'subrepos',
2025 2030 b'svn:allowed',
2026 2031 default=dynamicdefault,
2027 2032 )
2028 2033 coreconfigitem(
2029 2034 b'templates',
2030 2035 b'.*',
2031 2036 default=None,
2032 2037 generic=True,
2033 2038 )
2034 2039 coreconfigitem(
2035 2040 b'templateconfig',
2036 2041 b'.*',
2037 2042 default=dynamicdefault,
2038 2043 generic=True,
2039 2044 )
2040 2045 coreconfigitem(
2041 2046 b'trusted',
2042 2047 b'groups',
2043 2048 default=list,
2044 2049 )
2045 2050 coreconfigitem(
2046 2051 b'trusted',
2047 2052 b'users',
2048 2053 default=list,
2049 2054 )
2050 2055 coreconfigitem(
2051 2056 b'ui',
2052 2057 b'_usedassubrepo',
2053 2058 default=False,
2054 2059 )
2055 2060 coreconfigitem(
2056 2061 b'ui',
2057 2062 b'allowemptycommit',
2058 2063 default=False,
2059 2064 )
2060 2065 coreconfigitem(
2061 2066 b'ui',
2062 2067 b'archivemeta',
2063 2068 default=True,
2064 2069 )
2065 2070 coreconfigitem(
2066 2071 b'ui',
2067 2072 b'askusername',
2068 2073 default=False,
2069 2074 )
2070 2075 coreconfigitem(
2071 2076 b'ui',
2072 2077 b'available-memory',
2073 2078 default=None,
2074 2079 )
2075 2080
2076 2081 coreconfigitem(
2077 2082 b'ui',
2078 2083 b'clonebundlefallback',
2079 2084 default=False,
2080 2085 )
2081 2086 coreconfigitem(
2082 2087 b'ui',
2083 2088 b'clonebundleprefers',
2084 2089 default=list,
2085 2090 )
2086 2091 coreconfigitem(
2087 2092 b'ui',
2088 2093 b'clonebundles',
2089 2094 default=True,
2090 2095 )
2091 2096 coreconfigitem(
2092 2097 b'ui',
2093 2098 b'color',
2094 2099 default=b'auto',
2095 2100 )
2096 2101 coreconfigitem(
2097 2102 b'ui',
2098 2103 b'commitsubrepos',
2099 2104 default=False,
2100 2105 )
2101 2106 coreconfigitem(
2102 2107 b'ui',
2103 2108 b'debug',
2104 2109 default=False,
2105 2110 )
2106 2111 coreconfigitem(
2107 2112 b'ui',
2108 2113 b'debugger',
2109 2114 default=None,
2110 2115 )
2111 2116 coreconfigitem(
2112 2117 b'ui',
2113 2118 b'editor',
2114 2119 default=dynamicdefault,
2115 2120 )
2116 2121 coreconfigitem(
2117 2122 b'ui',
2118 2123 b'detailed-exit-code',
2119 2124 default=False,
2120 2125 experimental=True,
2121 2126 )
2122 2127 coreconfigitem(
2123 2128 b'ui',
2124 2129 b'fallbackencoding',
2125 2130 default=None,
2126 2131 )
2127 2132 coreconfigitem(
2128 2133 b'ui',
2129 2134 b'forcecwd',
2130 2135 default=None,
2131 2136 )
2132 2137 coreconfigitem(
2133 2138 b'ui',
2134 2139 b'forcemerge',
2135 2140 default=None,
2136 2141 )
2137 2142 coreconfigitem(
2138 2143 b'ui',
2139 2144 b'formatdebug',
2140 2145 default=False,
2141 2146 )
2142 2147 coreconfigitem(
2143 2148 b'ui',
2144 2149 b'formatjson',
2145 2150 default=False,
2146 2151 )
2147 2152 coreconfigitem(
2148 2153 b'ui',
2149 2154 b'formatted',
2150 2155 default=None,
2151 2156 )
2152 2157 coreconfigitem(
2153 2158 b'ui',
2154 2159 b'interactive',
2155 2160 default=None,
2156 2161 )
2157 2162 coreconfigitem(
2158 2163 b'ui',
2159 2164 b'interface',
2160 2165 default=None,
2161 2166 )
2162 2167 coreconfigitem(
2163 2168 b'ui',
2164 2169 b'interface.chunkselector',
2165 2170 default=None,
2166 2171 )
2167 2172 coreconfigitem(
2168 2173 b'ui',
2169 2174 b'large-file-limit',
2170 2175 default=10000000,
2171 2176 )
2172 2177 coreconfigitem(
2173 2178 b'ui',
2174 2179 b'logblockedtimes',
2175 2180 default=False,
2176 2181 )
2177 2182 coreconfigitem(
2178 2183 b'ui',
2179 2184 b'merge',
2180 2185 default=None,
2181 2186 )
2182 2187 coreconfigitem(
2183 2188 b'ui',
2184 2189 b'mergemarkers',
2185 2190 default=b'basic',
2186 2191 )
2187 2192 coreconfigitem(
2188 2193 b'ui',
2189 2194 b'message-output',
2190 2195 default=b'stdio',
2191 2196 )
2192 2197 coreconfigitem(
2193 2198 b'ui',
2194 2199 b'nontty',
2195 2200 default=False,
2196 2201 )
2197 2202 coreconfigitem(
2198 2203 b'ui',
2199 2204 b'origbackuppath',
2200 2205 default=None,
2201 2206 )
2202 2207 coreconfigitem(
2203 2208 b'ui',
2204 2209 b'paginate',
2205 2210 default=True,
2206 2211 )
2207 2212 coreconfigitem(
2208 2213 b'ui',
2209 2214 b'patch',
2210 2215 default=None,
2211 2216 )
2212 2217 coreconfigitem(
2213 2218 b'ui',
2214 2219 b'portablefilenames',
2215 2220 default=b'warn',
2216 2221 )
2217 2222 coreconfigitem(
2218 2223 b'ui',
2219 2224 b'promptecho',
2220 2225 default=False,
2221 2226 )
2222 2227 coreconfigitem(
2223 2228 b'ui',
2224 2229 b'quiet',
2225 2230 default=False,
2226 2231 )
2227 2232 coreconfigitem(
2228 2233 b'ui',
2229 2234 b'quietbookmarkmove',
2230 2235 default=False,
2231 2236 )
2232 2237 coreconfigitem(
2233 2238 b'ui',
2234 2239 b'relative-paths',
2235 2240 default=b'legacy',
2236 2241 )
2237 2242 coreconfigitem(
2238 2243 b'ui',
2239 2244 b'remotecmd',
2240 2245 default=b'hg',
2241 2246 )
2242 2247 coreconfigitem(
2243 2248 b'ui',
2244 2249 b'report_untrusted',
2245 2250 default=True,
2246 2251 )
2247 2252 coreconfigitem(
2248 2253 b'ui',
2249 2254 b'rollback',
2250 2255 default=True,
2251 2256 )
2252 2257 coreconfigitem(
2253 2258 b'ui',
2254 2259 b'signal-safe-lock',
2255 2260 default=True,
2256 2261 )
2257 2262 coreconfigitem(
2258 2263 b'ui',
2259 2264 b'slash',
2260 2265 default=False,
2261 2266 )
2262 2267 coreconfigitem(
2263 2268 b'ui',
2264 2269 b'ssh',
2265 2270 default=b'ssh',
2266 2271 )
2267 2272 coreconfigitem(
2268 2273 b'ui',
2269 2274 b'ssherrorhint',
2270 2275 default=None,
2271 2276 )
2272 2277 coreconfigitem(
2273 2278 b'ui',
2274 2279 b'statuscopies',
2275 2280 default=False,
2276 2281 )
2277 2282 coreconfigitem(
2278 2283 b'ui',
2279 2284 b'strict',
2280 2285 default=False,
2281 2286 )
2282 2287 coreconfigitem(
2283 2288 b'ui',
2284 2289 b'style',
2285 2290 default=b'',
2286 2291 )
2287 2292 coreconfigitem(
2288 2293 b'ui',
2289 2294 b'supportcontact',
2290 2295 default=None,
2291 2296 )
2292 2297 coreconfigitem(
2293 2298 b'ui',
2294 2299 b'textwidth',
2295 2300 default=78,
2296 2301 )
2297 2302 coreconfigitem(
2298 2303 b'ui',
2299 2304 b'timeout',
2300 2305 default=b'600',
2301 2306 )
2302 2307 coreconfigitem(
2303 2308 b'ui',
2304 2309 b'timeout.warn',
2305 2310 default=0,
2306 2311 )
2307 2312 coreconfigitem(
2308 2313 b'ui',
2309 2314 b'timestamp-output',
2310 2315 default=False,
2311 2316 )
2312 2317 coreconfigitem(
2313 2318 b'ui',
2314 2319 b'traceback',
2315 2320 default=False,
2316 2321 )
2317 2322 coreconfigitem(
2318 2323 b'ui',
2319 2324 b'tweakdefaults',
2320 2325 default=False,
2321 2326 )
2322 2327 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2323 2328 coreconfigitem(
2324 2329 b'ui',
2325 2330 b'verbose',
2326 2331 default=False,
2327 2332 )
2328 2333 coreconfigitem(
2329 2334 b'verify',
2330 2335 b'skipflags',
2331 2336 default=None,
2332 2337 )
2333 2338 coreconfigitem(
2334 2339 b'web',
2335 2340 b'allowbz2',
2336 2341 default=False,
2337 2342 )
2338 2343 coreconfigitem(
2339 2344 b'web',
2340 2345 b'allowgz',
2341 2346 default=False,
2342 2347 )
2343 2348 coreconfigitem(
2344 2349 b'web',
2345 2350 b'allow-pull',
2346 2351 alias=[(b'web', b'allowpull')],
2347 2352 default=True,
2348 2353 )
2349 2354 coreconfigitem(
2350 2355 b'web',
2351 2356 b'allow-push',
2352 2357 alias=[(b'web', b'allow_push')],
2353 2358 default=list,
2354 2359 )
2355 2360 coreconfigitem(
2356 2361 b'web',
2357 2362 b'allowzip',
2358 2363 default=False,
2359 2364 )
2360 2365 coreconfigitem(
2361 2366 b'web',
2362 2367 b'archivesubrepos',
2363 2368 default=False,
2364 2369 )
2365 2370 coreconfigitem(
2366 2371 b'web',
2367 2372 b'cache',
2368 2373 default=True,
2369 2374 )
2370 2375 coreconfigitem(
2371 2376 b'web',
2372 2377 b'comparisoncontext',
2373 2378 default=5,
2374 2379 )
2375 2380 coreconfigitem(
2376 2381 b'web',
2377 2382 b'contact',
2378 2383 default=None,
2379 2384 )
2380 2385 coreconfigitem(
2381 2386 b'web',
2382 2387 b'deny_push',
2383 2388 default=list,
2384 2389 )
2385 2390 coreconfigitem(
2386 2391 b'web',
2387 2392 b'guessmime',
2388 2393 default=False,
2389 2394 )
2390 2395 coreconfigitem(
2391 2396 b'web',
2392 2397 b'hidden',
2393 2398 default=False,
2394 2399 )
2395 2400 coreconfigitem(
2396 2401 b'web',
2397 2402 b'labels',
2398 2403 default=list,
2399 2404 )
2400 2405 coreconfigitem(
2401 2406 b'web',
2402 2407 b'logoimg',
2403 2408 default=b'hglogo.png',
2404 2409 )
2405 2410 coreconfigitem(
2406 2411 b'web',
2407 2412 b'logourl',
2408 2413 default=b'https://mercurial-scm.org/',
2409 2414 )
2410 2415 coreconfigitem(
2411 2416 b'web',
2412 2417 b'accesslog',
2413 2418 default=b'-',
2414 2419 )
2415 2420 coreconfigitem(
2416 2421 b'web',
2417 2422 b'address',
2418 2423 default=b'',
2419 2424 )
2420 2425 coreconfigitem(
2421 2426 b'web',
2422 2427 b'allow-archive',
2423 2428 alias=[(b'web', b'allow_archive')],
2424 2429 default=list,
2425 2430 )
2426 2431 coreconfigitem(
2427 2432 b'web',
2428 2433 b'allow_read',
2429 2434 default=list,
2430 2435 )
2431 2436 coreconfigitem(
2432 2437 b'web',
2433 2438 b'baseurl',
2434 2439 default=None,
2435 2440 )
2436 2441 coreconfigitem(
2437 2442 b'web',
2438 2443 b'cacerts',
2439 2444 default=None,
2440 2445 )
2441 2446 coreconfigitem(
2442 2447 b'web',
2443 2448 b'certificate',
2444 2449 default=None,
2445 2450 )
2446 2451 coreconfigitem(
2447 2452 b'web',
2448 2453 b'collapse',
2449 2454 default=False,
2450 2455 )
2451 2456 coreconfigitem(
2452 2457 b'web',
2453 2458 b'csp',
2454 2459 default=None,
2455 2460 )
2456 2461 coreconfigitem(
2457 2462 b'web',
2458 2463 b'deny_read',
2459 2464 default=list,
2460 2465 )
2461 2466 coreconfigitem(
2462 2467 b'web',
2463 2468 b'descend',
2464 2469 default=True,
2465 2470 )
2466 2471 coreconfigitem(
2467 2472 b'web',
2468 2473 b'description',
2469 2474 default=b"",
2470 2475 )
2471 2476 coreconfigitem(
2472 2477 b'web',
2473 2478 b'encoding',
2474 2479 default=lambda: encoding.encoding,
2475 2480 )
2476 2481 coreconfigitem(
2477 2482 b'web',
2478 2483 b'errorlog',
2479 2484 default=b'-',
2480 2485 )
2481 2486 coreconfigitem(
2482 2487 b'web',
2483 2488 b'ipv6',
2484 2489 default=False,
2485 2490 )
2486 2491 coreconfigitem(
2487 2492 b'web',
2488 2493 b'maxchanges',
2489 2494 default=10,
2490 2495 )
2491 2496 coreconfigitem(
2492 2497 b'web',
2493 2498 b'maxfiles',
2494 2499 default=10,
2495 2500 )
2496 2501 coreconfigitem(
2497 2502 b'web',
2498 2503 b'maxshortchanges',
2499 2504 default=60,
2500 2505 )
2501 2506 coreconfigitem(
2502 2507 b'web',
2503 2508 b'motd',
2504 2509 default=b'',
2505 2510 )
2506 2511 coreconfigitem(
2507 2512 b'web',
2508 2513 b'name',
2509 2514 default=dynamicdefault,
2510 2515 )
2511 2516 coreconfigitem(
2512 2517 b'web',
2513 2518 b'port',
2514 2519 default=8000,
2515 2520 )
2516 2521 coreconfigitem(
2517 2522 b'web',
2518 2523 b'prefix',
2519 2524 default=b'',
2520 2525 )
2521 2526 coreconfigitem(
2522 2527 b'web',
2523 2528 b'push_ssl',
2524 2529 default=True,
2525 2530 )
2526 2531 coreconfigitem(
2527 2532 b'web',
2528 2533 b'refreshinterval',
2529 2534 default=20,
2530 2535 )
2531 2536 coreconfigitem(
2532 2537 b'web',
2533 2538 b'server-header',
2534 2539 default=None,
2535 2540 )
2536 2541 coreconfigitem(
2537 2542 b'web',
2538 2543 b'static',
2539 2544 default=None,
2540 2545 )
2541 2546 coreconfigitem(
2542 2547 b'web',
2543 2548 b'staticurl',
2544 2549 default=None,
2545 2550 )
2546 2551 coreconfigitem(
2547 2552 b'web',
2548 2553 b'stripes',
2549 2554 default=1,
2550 2555 )
2551 2556 coreconfigitem(
2552 2557 b'web',
2553 2558 b'style',
2554 2559 default=b'paper',
2555 2560 )
2556 2561 coreconfigitem(
2557 2562 b'web',
2558 2563 b'templates',
2559 2564 default=None,
2560 2565 )
2561 2566 coreconfigitem(
2562 2567 b'web',
2563 2568 b'view',
2564 2569 default=b'served',
2565 2570 experimental=True,
2566 2571 )
2567 2572 coreconfigitem(
2568 2573 b'worker',
2569 2574 b'backgroundclose',
2570 2575 default=dynamicdefault,
2571 2576 )
2572 2577 # Windows defaults to a limit of 512 open files. A buffer of 128
2573 2578 # should give us enough headway.
2574 2579 coreconfigitem(
2575 2580 b'worker',
2576 2581 b'backgroundclosemaxqueue',
2577 2582 default=384,
2578 2583 )
2579 2584 coreconfigitem(
2580 2585 b'worker',
2581 2586 b'backgroundcloseminfilecount',
2582 2587 default=2048,
2583 2588 )
2584 2589 coreconfigitem(
2585 2590 b'worker',
2586 2591 b'backgroundclosethreadcount',
2587 2592 default=4,
2588 2593 )
2589 2594 coreconfigitem(
2590 2595 b'worker',
2591 2596 b'enabled',
2592 2597 default=True,
2593 2598 )
2594 2599 coreconfigitem(
2595 2600 b'worker',
2596 2601 b'numcpus',
2597 2602 default=None,
2598 2603 )
2599 2604
2600 2605 # Rebase related configuration moved to core because other extension are doing
2601 2606 # strange things. For example, shelve import the extensions to reuse some bit
2602 2607 # without formally loading it.
2603 2608 coreconfigitem(
2604 2609 b'commands',
2605 2610 b'rebase.requiredest',
2606 2611 default=False,
2607 2612 )
2608 2613 coreconfigitem(
2609 2614 b'experimental',
2610 2615 b'rebaseskipobsolete',
2611 2616 default=True,
2612 2617 )
2613 2618 coreconfigitem(
2614 2619 b'rebase',
2615 2620 b'singletransaction',
2616 2621 default=False,
2617 2622 )
2618 2623 coreconfigitem(
2619 2624 b'rebase',
2620 2625 b'experimental.inmemory',
2621 2626 default=False,
2622 2627 )
@@ -1,3692 +1,3698 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import functools
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from .pycompat import (
27 27 delattr,
28 28 getattr,
29 29 )
30 30 from . import (
31 31 bookmarks,
32 32 branchmap,
33 33 bundle2,
34 34 bundlecaches,
35 35 changegroup,
36 36 color,
37 37 commit,
38 38 context,
39 39 dirstate,
40 40 dirstateguard,
41 41 discovery,
42 42 encoding,
43 43 error,
44 44 exchange,
45 45 extensions,
46 46 filelog,
47 47 hook,
48 48 lock as lockmod,
49 49 match as matchmod,
50 50 mergestate as mergestatemod,
51 51 mergeutil,
52 52 namespaces,
53 53 narrowspec,
54 54 obsolete,
55 55 pathutil,
56 56 phases,
57 57 pushkey,
58 58 pycompat,
59 59 rcutil,
60 60 repoview,
61 61 requirements as requirementsmod,
62 62 revlog,
63 63 revset,
64 64 revsetlang,
65 65 scmutil,
66 66 sparse,
67 67 store as storemod,
68 68 subrepoutil,
69 69 tags as tagsmod,
70 70 transaction,
71 71 txnutil,
72 72 util,
73 73 vfs as vfsmod,
74 74 )
75 75
76 76 from .interfaces import (
77 77 repository,
78 78 util as interfaceutil,
79 79 )
80 80
81 81 from .utils import (
82 82 hashutil,
83 83 procutil,
84 84 stringutil,
85 85 )
86 86
87 from .revlogutils import constants as revlogconst
87 from .revlogutils import (
88 concurrency_checker as revlogchecker,
89 constants as revlogconst,
90 )
88 91
89 92 release = lockmod.release
90 93 urlerr = util.urlerr
91 94 urlreq = util.urlreq
92 95
93 96 # set of (path, vfs-location) tuples. vfs-location is:
94 97 # - 'plain for vfs relative paths
95 98 # - '' for svfs relative paths
96 99 _cachedfiles = set()
97 100
98 101
99 102 class _basefilecache(scmutil.filecache):
100 103 """All filecache usage on repo are done for logic that should be unfiltered"""
101 104
102 105 def __get__(self, repo, type=None):
103 106 if repo is None:
104 107 return self
105 108 # proxy to unfiltered __dict__ since filtered repo has no entry
106 109 unfi = repo.unfiltered()
107 110 try:
108 111 return unfi.__dict__[self.sname]
109 112 except KeyError:
110 113 pass
111 114 return super(_basefilecache, self).__get__(unfi, type)
112 115
113 116 def set(self, repo, value):
114 117 return super(_basefilecache, self).set(repo.unfiltered(), value)
115 118
116 119
117 120 class repofilecache(_basefilecache):
118 121 """filecache for files in .hg but outside of .hg/store"""
119 122
120 123 def __init__(self, *paths):
121 124 super(repofilecache, self).__init__(*paths)
122 125 for path in paths:
123 126 _cachedfiles.add((path, b'plain'))
124 127
125 128 def join(self, obj, fname):
126 129 return obj.vfs.join(fname)
127 130
128 131
129 132 class storecache(_basefilecache):
130 133 """filecache for files in the store"""
131 134
132 135 def __init__(self, *paths):
133 136 super(storecache, self).__init__(*paths)
134 137 for path in paths:
135 138 _cachedfiles.add((path, b''))
136 139
137 140 def join(self, obj, fname):
138 141 return obj.sjoin(fname)
139 142
140 143
141 144 class mixedrepostorecache(_basefilecache):
142 145 """filecache for a mix files in .hg/store and outside"""
143 146
144 147 def __init__(self, *pathsandlocations):
145 148 # scmutil.filecache only uses the path for passing back into our
146 149 # join(), so we can safely pass a list of paths and locations
147 150 super(mixedrepostorecache, self).__init__(*pathsandlocations)
148 151 _cachedfiles.update(pathsandlocations)
149 152
150 153 def join(self, obj, fnameandlocation):
151 154 fname, location = fnameandlocation
152 155 if location == b'plain':
153 156 return obj.vfs.join(fname)
154 157 else:
155 158 if location != b'':
156 159 raise error.ProgrammingError(
157 160 b'unexpected location: %s' % location
158 161 )
159 162 return obj.sjoin(fname)
160 163
161 164
162 165 def isfilecached(repo, name):
163 166 """check if a repo has already cached "name" filecache-ed property
164 167
165 168 This returns (cachedobj-or-None, iscached) tuple.
166 169 """
167 170 cacheentry = repo.unfiltered()._filecache.get(name, None)
168 171 if not cacheentry:
169 172 return None, False
170 173 return cacheentry.obj, True
171 174
172 175
173 176 class unfilteredpropertycache(util.propertycache):
174 177 """propertycache that apply to unfiltered repo only"""
175 178
176 179 def __get__(self, repo, type=None):
177 180 unfi = repo.unfiltered()
178 181 if unfi is repo:
179 182 return super(unfilteredpropertycache, self).__get__(unfi)
180 183 return getattr(unfi, self.name)
181 184
182 185
183 186 class filteredpropertycache(util.propertycache):
184 187 """propertycache that must take filtering in account"""
185 188
186 189 def cachevalue(self, obj, value):
187 190 object.__setattr__(obj, self.name, value)
188 191
189 192
190 193 def hasunfilteredcache(repo, name):
191 194 """check if a repo has an unfilteredpropertycache value for <name>"""
192 195 return name in vars(repo.unfiltered())
193 196
194 197
195 198 def unfilteredmethod(orig):
196 199 """decorate method that always need to be run on unfiltered version"""
197 200
198 201 @functools.wraps(orig)
199 202 def wrapper(repo, *args, **kwargs):
200 203 return orig(repo.unfiltered(), *args, **kwargs)
201 204
202 205 return wrapper
203 206
204 207
205 208 moderncaps = {
206 209 b'lookup',
207 210 b'branchmap',
208 211 b'pushkey',
209 212 b'known',
210 213 b'getbundle',
211 214 b'unbundle',
212 215 }
213 216 legacycaps = moderncaps.union({b'changegroupsubset'})
214 217
215 218
216 219 @interfaceutil.implementer(repository.ipeercommandexecutor)
217 220 class localcommandexecutor(object):
218 221 def __init__(self, peer):
219 222 self._peer = peer
220 223 self._sent = False
221 224 self._closed = False
222 225
223 226 def __enter__(self):
224 227 return self
225 228
226 229 def __exit__(self, exctype, excvalue, exctb):
227 230 self.close()
228 231
229 232 def callcommand(self, command, args):
230 233 if self._sent:
231 234 raise error.ProgrammingError(
232 235 b'callcommand() cannot be used after sendcommands()'
233 236 )
234 237
235 238 if self._closed:
236 239 raise error.ProgrammingError(
237 240 b'callcommand() cannot be used after close()'
238 241 )
239 242
240 243 # We don't need to support anything fancy. Just call the named
241 244 # method on the peer and return a resolved future.
242 245 fn = getattr(self._peer, pycompat.sysstr(command))
243 246
244 247 f = pycompat.futures.Future()
245 248
246 249 try:
247 250 result = fn(**pycompat.strkwargs(args))
248 251 except Exception:
249 252 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
250 253 else:
251 254 f.set_result(result)
252 255
253 256 return f
254 257
255 258 def sendcommands(self):
256 259 self._sent = True
257 260
258 261 def close(self):
259 262 self._closed = True
260 263
261 264
262 265 @interfaceutil.implementer(repository.ipeercommands)
263 266 class localpeer(repository.peer):
264 267 '''peer for a local repo; reflects only the most recent API'''
265 268
266 269 def __init__(self, repo, caps=None):
267 270 super(localpeer, self).__init__()
268 271
269 272 if caps is None:
270 273 caps = moderncaps.copy()
271 274 self._repo = repo.filtered(b'served')
272 275 self.ui = repo.ui
273 276 self._caps = repo._restrictcapabilities(caps)
274 277
275 278 # Begin of _basepeer interface.
276 279
277 280 def url(self):
278 281 return self._repo.url()
279 282
280 283 def local(self):
281 284 return self._repo
282 285
283 286 def peer(self):
284 287 return self
285 288
286 289 def canpush(self):
287 290 return True
288 291
289 292 def close(self):
290 293 self._repo.close()
291 294
292 295 # End of _basepeer interface.
293 296
294 297 # Begin of _basewirecommands interface.
295 298
296 299 def branchmap(self):
297 300 return self._repo.branchmap()
298 301
299 302 def capabilities(self):
300 303 return self._caps
301 304
302 305 def clonebundles(self):
303 306 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
304 307
305 308 def debugwireargs(self, one, two, three=None, four=None, five=None):
306 309 """Used to test argument passing over the wire"""
307 310 return b"%s %s %s %s %s" % (
308 311 one,
309 312 two,
310 313 pycompat.bytestr(three),
311 314 pycompat.bytestr(four),
312 315 pycompat.bytestr(five),
313 316 )
314 317
315 318 def getbundle(
316 319 self, source, heads=None, common=None, bundlecaps=None, **kwargs
317 320 ):
318 321 chunks = exchange.getbundlechunks(
319 322 self._repo,
320 323 source,
321 324 heads=heads,
322 325 common=common,
323 326 bundlecaps=bundlecaps,
324 327 **kwargs
325 328 )[1]
326 329 cb = util.chunkbuffer(chunks)
327 330
328 331 if exchange.bundle2requested(bundlecaps):
329 332 # When requesting a bundle2, getbundle returns a stream to make the
330 333 # wire level function happier. We need to build a proper object
331 334 # from it in local peer.
332 335 return bundle2.getunbundler(self.ui, cb)
333 336 else:
334 337 return changegroup.getunbundler(b'01', cb, None)
335 338
336 339 def heads(self):
337 340 return self._repo.heads()
338 341
339 342 def known(self, nodes):
340 343 return self._repo.known(nodes)
341 344
342 345 def listkeys(self, namespace):
343 346 return self._repo.listkeys(namespace)
344 347
345 348 def lookup(self, key):
346 349 return self._repo.lookup(key)
347 350
348 351 def pushkey(self, namespace, key, old, new):
349 352 return self._repo.pushkey(namespace, key, old, new)
350 353
351 354 def stream_out(self):
352 355 raise error.Abort(_(b'cannot perform stream clone against local peer'))
353 356
354 357 def unbundle(self, bundle, heads, url):
355 358 """apply a bundle on a repo
356 359
357 360 This function handles the repo locking itself."""
358 361 try:
359 362 try:
360 363 bundle = exchange.readbundle(self.ui, bundle, None)
361 364 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
362 365 if util.safehasattr(ret, b'getchunks'):
363 366 # This is a bundle20 object, turn it into an unbundler.
364 367 # This little dance should be dropped eventually when the
365 368 # API is finally improved.
366 369 stream = util.chunkbuffer(ret.getchunks())
367 370 ret = bundle2.getunbundler(self.ui, stream)
368 371 return ret
369 372 except Exception as exc:
370 373 # If the exception contains output salvaged from a bundle2
371 374 # reply, we need to make sure it is printed before continuing
372 375 # to fail. So we build a bundle2 with such output and consume
373 376 # it directly.
374 377 #
375 378 # This is not very elegant but allows a "simple" solution for
376 379 # issue4594
377 380 output = getattr(exc, '_bundle2salvagedoutput', ())
378 381 if output:
379 382 bundler = bundle2.bundle20(self._repo.ui)
380 383 for out in output:
381 384 bundler.addpart(out)
382 385 stream = util.chunkbuffer(bundler.getchunks())
383 386 b = bundle2.getunbundler(self.ui, stream)
384 387 bundle2.processbundle(self._repo, b)
385 388 raise
386 389 except error.PushRaced as exc:
387 390 raise error.ResponseError(
388 391 _(b'push failed:'), stringutil.forcebytestr(exc)
389 392 )
390 393
391 394 # End of _basewirecommands interface.
392 395
393 396 # Begin of peer interface.
394 397
395 398 def commandexecutor(self):
396 399 return localcommandexecutor(self)
397 400
398 401 # End of peer interface.
399 402
400 403
401 404 @interfaceutil.implementer(repository.ipeerlegacycommands)
402 405 class locallegacypeer(localpeer):
403 406 """peer extension which implements legacy methods too; used for tests with
404 407 restricted capabilities"""
405 408
406 409 def __init__(self, repo):
407 410 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
408 411
409 412 # Begin of baselegacywirecommands interface.
410 413
411 414 def between(self, pairs):
412 415 return self._repo.between(pairs)
413 416
414 417 def branches(self, nodes):
415 418 return self._repo.branches(nodes)
416 419
417 420 def changegroup(self, nodes, source):
418 421 outgoing = discovery.outgoing(
419 422 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
420 423 )
421 424 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422 425
423 426 def changegroupsubset(self, bases, heads, source):
424 427 outgoing = discovery.outgoing(
425 428 self._repo, missingroots=bases, ancestorsof=heads
426 429 )
427 430 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
428 431
429 432 # End of baselegacywirecommands interface.
430 433
431 434
432 435 # Functions receiving (ui, features) that extensions can register to impact
433 436 # the ability to load repositories with custom requirements. Only
434 437 # functions defined in loaded extensions are called.
435 438 #
436 439 # The function receives a set of requirement strings that the repository
437 440 # is capable of opening. Functions will typically add elements to the
438 441 # set to reflect that the extension knows how to handle that requirements.
439 442 featuresetupfuncs = set()
440 443
441 444
442 445 def _getsharedvfs(hgvfs, requirements):
443 446 """returns the vfs object pointing to root of shared source
444 447 repo for a shared repository
445 448
446 449 hgvfs is vfs pointing at .hg/ of current repo (shared one)
447 450 requirements is a set of requirements of current repo (shared one)
448 451 """
449 452 # The ``shared`` or ``relshared`` requirements indicate the
450 453 # store lives in the path contained in the ``.hg/sharedpath`` file.
451 454 # This is an absolute path for ``shared`` and relative to
452 455 # ``.hg/`` for ``relshared``.
453 456 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
454 457 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
455 458 sharedpath = hgvfs.join(sharedpath)
456 459
457 460 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
458 461
459 462 if not sharedvfs.exists():
460 463 raise error.RepoError(
461 464 _(b'.hg/sharedpath points to nonexistent directory %s')
462 465 % sharedvfs.base
463 466 )
464 467 return sharedvfs
465 468
466 469
467 470 def _readrequires(vfs, allowmissing):
468 471 """reads the require file present at root of this vfs
469 472 and return a set of requirements
470 473
471 474 If allowmissing is True, we suppress ENOENT if raised"""
472 475 # requires file contains a newline-delimited list of
473 476 # features/capabilities the opener (us) must have in order to use
474 477 # the repository. This file was introduced in Mercurial 0.9.2,
475 478 # which means very old repositories may not have one. We assume
476 479 # a missing file translates to no requirements.
477 480 try:
478 481 requirements = set(vfs.read(b'requires').splitlines())
479 482 except IOError as e:
480 483 if not (allowmissing and e.errno == errno.ENOENT):
481 484 raise
482 485 requirements = set()
483 486 return requirements
484 487
485 488
486 489 def makelocalrepository(baseui, path, intents=None):
487 490 """Create a local repository object.
488 491
489 492 Given arguments needed to construct a local repository, this function
490 493 performs various early repository loading functionality (such as
491 494 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
492 495 the repository can be opened, derives a type suitable for representing
493 496 that repository, and returns an instance of it.
494 497
495 498 The returned object conforms to the ``repository.completelocalrepository``
496 499 interface.
497 500
498 501 The repository type is derived by calling a series of factory functions
499 502 for each aspect/interface of the final repository. These are defined by
500 503 ``REPO_INTERFACES``.
501 504
502 505 Each factory function is called to produce a type implementing a specific
503 506 interface. The cumulative list of returned types will be combined into a
504 507 new type and that type will be instantiated to represent the local
505 508 repository.
506 509
507 510 The factory functions each receive various state that may be consulted
508 511 as part of deriving a type.
509 512
510 513 Extensions should wrap these factory functions to customize repository type
511 514 creation. Note that an extension's wrapped function may be called even if
512 515 that extension is not loaded for the repo being constructed. Extensions
513 516 should check if their ``__name__`` appears in the
514 517 ``extensionmodulenames`` set passed to the factory function and no-op if
515 518 not.
516 519 """
517 520 ui = baseui.copy()
518 521 # Prevent copying repo configuration.
519 522 ui.copy = baseui.copy
520 523
521 524 # Working directory VFS rooted at repository root.
522 525 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
523 526
524 527 # Main VFS for .hg/ directory.
525 528 hgpath = wdirvfs.join(b'.hg')
526 529 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
527 530 # Whether this repository is shared one or not
528 531 shared = False
529 532 # If this repository is shared, vfs pointing to shared repo
530 533 sharedvfs = None
531 534
532 535 # The .hg/ path should exist and should be a directory. All other
533 536 # cases are errors.
534 537 if not hgvfs.isdir():
535 538 try:
536 539 hgvfs.stat()
537 540 except OSError as e:
538 541 if e.errno != errno.ENOENT:
539 542 raise
540 543 except ValueError as e:
541 544 # Can be raised on Python 3.8 when path is invalid.
542 545 raise error.Abort(
543 546 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
544 547 )
545 548
546 549 raise error.RepoError(_(b'repository %s not found') % path)
547 550
548 551 requirements = _readrequires(hgvfs, True)
549 552 shared = (
550 553 requirementsmod.SHARED_REQUIREMENT in requirements
551 554 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
552 555 )
553 556 storevfs = None
554 557 if shared:
555 558 # This is a shared repo
556 559 sharedvfs = _getsharedvfs(hgvfs, requirements)
557 560 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
558 561 else:
559 562 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
560 563
561 564 # if .hg/requires contains the sharesafe requirement, it means
562 565 # there exists a `.hg/store/requires` too and we should read it
563 566 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
564 567 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
565 568 # is not present, refer checkrequirementscompat() for that
566 569 #
567 570 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
568 571 # repository was shared the old way. We check the share source .hg/requires
569 572 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
570 573 # to be reshared
571 574 hint = _("see `hg help config.format.use-share-safe` for more information")
572 575 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
573 576
574 577 if (
575 578 shared
576 579 and requirementsmod.SHARESAFE_REQUIREMENT
577 580 not in _readrequires(sharedvfs, True)
578 581 ):
579 582 mismatch_warn = ui.configbool(
580 583 b'share', b'safe-mismatch.source-not-safe.warn'
581 584 )
582 585 mismatch_config = ui.config(
583 586 b'share', b'safe-mismatch.source-not-safe'
584 587 )
585 588 if mismatch_config in (
586 589 b'downgrade-allow',
587 590 b'allow',
588 591 b'downgrade-abort',
589 592 ):
590 593 # prevent cyclic import localrepo -> upgrade -> localrepo
591 594 from . import upgrade
592 595
593 596 upgrade.downgrade_share_to_non_safe(
594 597 ui,
595 598 hgvfs,
596 599 sharedvfs,
597 600 requirements,
598 601 mismatch_config,
599 602 mismatch_warn,
600 603 )
601 604 elif mismatch_config == b'abort':
602 605 raise error.Abort(
603 606 _(b"share source does not support share-safe requirement"),
604 607 hint=hint,
605 608 )
606 609 else:
607 610 raise error.Abort(
608 611 _(
609 612 b"share-safe mismatch with source.\nUnrecognized"
610 613 b" value '%s' of `share.safe-mismatch.source-not-safe`"
611 614 b" set."
612 615 )
613 616 % mismatch_config,
614 617 hint=hint,
615 618 )
616 619 else:
617 620 requirements |= _readrequires(storevfs, False)
618 621 elif shared:
619 622 sourcerequires = _readrequires(sharedvfs, False)
620 623 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
621 624 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
622 625 mismatch_warn = ui.configbool(
623 626 b'share', b'safe-mismatch.source-safe.warn'
624 627 )
625 628 if mismatch_config in (
626 629 b'upgrade-allow',
627 630 b'allow',
628 631 b'upgrade-abort',
629 632 ):
630 633 # prevent cyclic import localrepo -> upgrade -> localrepo
631 634 from . import upgrade
632 635
633 636 upgrade.upgrade_share_to_safe(
634 637 ui,
635 638 hgvfs,
636 639 storevfs,
637 640 requirements,
638 641 mismatch_config,
639 642 mismatch_warn,
640 643 )
641 644 elif mismatch_config == b'abort':
642 645 raise error.Abort(
643 646 _(
644 647 b'version mismatch: source uses share-safe'
645 648 b' functionality while the current share does not'
646 649 ),
647 650 hint=hint,
648 651 )
649 652 else:
650 653 raise error.Abort(
651 654 _(
652 655 b"share-safe mismatch with source.\nUnrecognized"
653 656 b" value '%s' of `share.safe-mismatch.source-safe` set."
654 657 )
655 658 % mismatch_config,
656 659 hint=hint,
657 660 )
658 661
659 662 # The .hg/hgrc file may load extensions or contain config options
660 663 # that influence repository construction. Attempt to load it and
661 664 # process any new extensions that it may have pulled in.
662 665 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
663 666 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
664 667 extensions.loadall(ui)
665 668 extensions.populateui(ui)
666 669
667 670 # Set of module names of extensions loaded for this repository.
668 671 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
669 672
670 673 supportedrequirements = gathersupportedrequirements(ui)
671 674
672 675 # We first validate the requirements are known.
673 676 ensurerequirementsrecognized(requirements, supportedrequirements)
674 677
675 678 # Then we validate that the known set is reasonable to use together.
676 679 ensurerequirementscompatible(ui, requirements)
677 680
678 681 # TODO there are unhandled edge cases related to opening repositories with
679 682 # shared storage. If storage is shared, we should also test for requirements
680 683 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
681 684 # that repo, as that repo may load extensions needed to open it. This is a
682 685 # bit complicated because we don't want the other hgrc to overwrite settings
683 686 # in this hgrc.
684 687 #
685 688 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
686 689 # file when sharing repos. But if a requirement is added after the share is
687 690 # performed, thereby introducing a new requirement for the opener, we may
688 691 # will not see that and could encounter a run-time error interacting with
689 692 # that shared store since it has an unknown-to-us requirement.
690 693
691 694 # At this point, we know we should be capable of opening the repository.
692 695 # Now get on with doing that.
693 696
694 697 features = set()
695 698
696 699 # The "store" part of the repository holds versioned data. How it is
697 700 # accessed is determined by various requirements. If `shared` or
698 701 # `relshared` requirements are present, this indicates current repository
699 702 # is a share and store exists in path mentioned in `.hg/sharedpath`
700 703 if shared:
701 704 storebasepath = sharedvfs.base
702 705 cachepath = sharedvfs.join(b'cache')
703 706 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
704 707 else:
705 708 storebasepath = hgvfs.base
706 709 cachepath = hgvfs.join(b'cache')
707 710 wcachepath = hgvfs.join(b'wcache')
708 711
709 712 # The store has changed over time and the exact layout is dictated by
710 713 # requirements. The store interface abstracts differences across all
711 714 # of them.
712 715 store = makestore(
713 716 requirements,
714 717 storebasepath,
715 718 lambda base: vfsmod.vfs(base, cacheaudited=True),
716 719 )
717 720 hgvfs.createmode = store.createmode
718 721
719 722 storevfs = store.vfs
720 723 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
721 724
722 725 # The cache vfs is used to manage cache files.
723 726 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
724 727 cachevfs.createmode = store.createmode
725 728 # The cache vfs is used to manage cache files related to the working copy
726 729 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
727 730 wcachevfs.createmode = store.createmode
728 731
729 732 # Now resolve the type for the repository object. We do this by repeatedly
730 733 # calling a factory function to produces types for specific aspects of the
731 734 # repo's operation. The aggregate returned types are used as base classes
732 735 # for a dynamically-derived type, which will represent our new repository.
733 736
734 737 bases = []
735 738 extrastate = {}
736 739
737 740 for iface, fn in REPO_INTERFACES:
738 741 # We pass all potentially useful state to give extensions tons of
739 742 # flexibility.
740 743 typ = fn()(
741 744 ui=ui,
742 745 intents=intents,
743 746 requirements=requirements,
744 747 features=features,
745 748 wdirvfs=wdirvfs,
746 749 hgvfs=hgvfs,
747 750 store=store,
748 751 storevfs=storevfs,
749 752 storeoptions=storevfs.options,
750 753 cachevfs=cachevfs,
751 754 wcachevfs=wcachevfs,
752 755 extensionmodulenames=extensionmodulenames,
753 756 extrastate=extrastate,
754 757 baseclasses=bases,
755 758 )
756 759
757 760 if not isinstance(typ, type):
758 761 raise error.ProgrammingError(
759 762 b'unable to construct type for %s' % iface
760 763 )
761 764
762 765 bases.append(typ)
763 766
764 767 # type() allows you to use characters in type names that wouldn't be
765 768 # recognized as Python symbols in source code. We abuse that to add
766 769 # rich information about our constructed repo.
767 770 name = pycompat.sysstr(
768 771 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
769 772 )
770 773
771 774 cls = type(name, tuple(bases), {})
772 775
773 776 return cls(
774 777 baseui=baseui,
775 778 ui=ui,
776 779 origroot=path,
777 780 wdirvfs=wdirvfs,
778 781 hgvfs=hgvfs,
779 782 requirements=requirements,
780 783 supportedrequirements=supportedrequirements,
781 784 sharedpath=storebasepath,
782 785 store=store,
783 786 cachevfs=cachevfs,
784 787 wcachevfs=wcachevfs,
785 788 features=features,
786 789 intents=intents,
787 790 )
788 791
789 792
790 793 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
791 794 """Load hgrc files/content into a ui instance.
792 795
793 796 This is called during repository opening to load any additional
794 797 config files or settings relevant to the current repository.
795 798
796 799 Returns a bool indicating whether any additional configs were loaded.
797 800
798 801 Extensions should monkeypatch this function to modify how per-repo
799 802 configs are loaded. For example, an extension may wish to pull in
800 803 configs from alternate files or sources.
801 804
802 805 sharedvfs is vfs object pointing to source repo if the current one is a
803 806 shared one
804 807 """
805 808 if not rcutil.use_repo_hgrc():
806 809 return False
807 810
808 811 ret = False
809 812 # first load config from shared source if we has to
810 813 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
811 814 try:
812 815 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
813 816 ret = True
814 817 except IOError:
815 818 pass
816 819
817 820 try:
818 821 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
819 822 ret = True
820 823 except IOError:
821 824 pass
822 825
823 826 try:
824 827 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
825 828 ret = True
826 829 except IOError:
827 830 pass
828 831
829 832 return ret
830 833
831 834
832 835 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
833 836 """Perform additional actions after .hg/hgrc is loaded.
834 837
835 838 This function is called during repository loading immediately after
836 839 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
837 840
838 841 The function can be used to validate configs, automatically add
839 842 options (including extensions) based on requirements, etc.
840 843 """
841 844
842 845 # Map of requirements to list of extensions to load automatically when
843 846 # requirement is present.
844 847 autoextensions = {
845 848 b'git': [b'git'],
846 849 b'largefiles': [b'largefiles'],
847 850 b'lfs': [b'lfs'],
848 851 }
849 852
850 853 for requirement, names in sorted(autoextensions.items()):
851 854 if requirement not in requirements:
852 855 continue
853 856
854 857 for name in names:
855 858 if not ui.hasconfig(b'extensions', name):
856 859 ui.setconfig(b'extensions', name, b'', source=b'autoload')
857 860
858 861
859 862 def gathersupportedrequirements(ui):
860 863 """Determine the complete set of recognized requirements."""
861 864 # Start with all requirements supported by this file.
862 865 supported = set(localrepository._basesupported)
863 866
864 867 # Execute ``featuresetupfuncs`` entries if they belong to an extension
865 868 # relevant to this ui instance.
866 869 modules = {m.__name__ for n, m in extensions.extensions(ui)}
867 870
868 871 for fn in featuresetupfuncs:
869 872 if fn.__module__ in modules:
870 873 fn(ui, supported)
871 874
872 875 # Add derived requirements from registered compression engines.
873 876 for name in util.compengines:
874 877 engine = util.compengines[name]
875 878 if engine.available() and engine.revlogheader():
876 879 supported.add(b'exp-compression-%s' % name)
877 880 if engine.name() == b'zstd':
878 881 supported.add(b'revlog-compression-zstd')
879 882
880 883 return supported
881 884
882 885
883 886 def ensurerequirementsrecognized(requirements, supported):
884 887 """Validate that a set of local requirements is recognized.
885 888
886 889 Receives a set of requirements. Raises an ``error.RepoError`` if there
887 890 exists any requirement in that set that currently loaded code doesn't
888 891 recognize.
889 892
890 893 Returns a set of supported requirements.
891 894 """
892 895 missing = set()
893 896
894 897 for requirement in requirements:
895 898 if requirement in supported:
896 899 continue
897 900
898 901 if not requirement or not requirement[0:1].isalnum():
899 902 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
900 903
901 904 missing.add(requirement)
902 905
903 906 if missing:
904 907 raise error.RequirementError(
905 908 _(b'repository requires features unknown to this Mercurial: %s')
906 909 % b' '.join(sorted(missing)),
907 910 hint=_(
908 911 b'see https://mercurial-scm.org/wiki/MissingRequirement '
909 912 b'for more information'
910 913 ),
911 914 )
912 915
913 916
914 917 def ensurerequirementscompatible(ui, requirements):
915 918 """Validates that a set of recognized requirements is mutually compatible.
916 919
917 920 Some requirements may not be compatible with others or require
918 921 config options that aren't enabled. This function is called during
919 922 repository opening to ensure that the set of requirements needed
920 923 to open a repository is sane and compatible with config options.
921 924
922 925 Extensions can monkeypatch this function to perform additional
923 926 checking.
924 927
925 928 ``error.RepoError`` should be raised on failure.
926 929 """
927 930 if (
928 931 requirementsmod.SPARSE_REQUIREMENT in requirements
929 932 and not sparse.enabled
930 933 ):
931 934 raise error.RepoError(
932 935 _(
933 936 b'repository is using sparse feature but '
934 937 b'sparse is not enabled; enable the '
935 938 b'"sparse" extensions to access'
936 939 )
937 940 )
938 941
939 942
940 943 def makestore(requirements, path, vfstype):
941 944 """Construct a storage object for a repository."""
942 945 if b'store' in requirements:
943 946 if b'fncache' in requirements:
944 947 return storemod.fncachestore(
945 948 path, vfstype, b'dotencode' in requirements
946 949 )
947 950
948 951 return storemod.encodedstore(path, vfstype)
949 952
950 953 return storemod.basicstore(path, vfstype)
951 954
952 955
953 956 def resolvestorevfsoptions(ui, requirements, features):
954 957 """Resolve the options to pass to the store vfs opener.
955 958
956 959 The returned dict is used to influence behavior of the storage layer.
957 960 """
958 961 options = {}
959 962
960 963 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
961 964 options[b'treemanifest'] = True
962 965
963 966 # experimental config: format.manifestcachesize
964 967 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
965 968 if manifestcachesize is not None:
966 969 options[b'manifestcachesize'] = manifestcachesize
967 970
968 971 # In the absence of another requirement superseding a revlog-related
969 972 # requirement, we have to assume the repo is using revlog version 0.
970 973 # This revlog format is super old and we don't bother trying to parse
971 974 # opener options for it because those options wouldn't do anything
972 975 # meaningful on such old repos.
973 976 if (
974 977 b'revlogv1' in requirements
975 978 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
976 979 ):
977 980 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
978 981 else: # explicitly mark repo as using revlogv0
979 982 options[b'revlogv0'] = True
980 983
981 984 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
982 985 options[b'copies-storage'] = b'changeset-sidedata'
983 986 else:
984 987 writecopiesto = ui.config(b'experimental', b'copies.write-to')
985 988 copiesextramode = (b'changeset-only', b'compatibility')
986 989 if writecopiesto in copiesextramode:
987 990 options[b'copies-storage'] = b'extra'
988 991
989 992 return options
990 993
991 994
992 995 def resolverevlogstorevfsoptions(ui, requirements, features):
993 996 """Resolve opener options specific to revlogs."""
994 997
995 998 options = {}
996 999 options[b'flagprocessors'] = {}
997 1000
998 1001 if b'revlogv1' in requirements:
999 1002 options[b'revlogv1'] = True
1000 1003 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1001 1004 options[b'revlogv2'] = True
1002 1005
1003 1006 if b'generaldelta' in requirements:
1004 1007 options[b'generaldelta'] = True
1005 1008
1006 1009 # experimental config: format.chunkcachesize
1007 1010 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1008 1011 if chunkcachesize is not None:
1009 1012 options[b'chunkcachesize'] = chunkcachesize
1010 1013
1011 1014 deltabothparents = ui.configbool(
1012 1015 b'storage', b'revlog.optimize-delta-parent-choice'
1013 1016 )
1014 1017 options[b'deltabothparents'] = deltabothparents
1015 1018
1016 1019 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1017 1020 lazydeltabase = False
1018 1021 if lazydelta:
1019 1022 lazydeltabase = ui.configbool(
1020 1023 b'storage', b'revlog.reuse-external-delta-parent'
1021 1024 )
1022 1025 if lazydeltabase is None:
1023 1026 lazydeltabase = not scmutil.gddeltaconfig(ui)
1024 1027 options[b'lazydelta'] = lazydelta
1025 1028 options[b'lazydeltabase'] = lazydeltabase
1026 1029
1027 1030 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1028 1031 if 0 <= chainspan:
1029 1032 options[b'maxdeltachainspan'] = chainspan
1030 1033
1031 1034 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1032 1035 if mmapindexthreshold is not None:
1033 1036 options[b'mmapindexthreshold'] = mmapindexthreshold
1034 1037
1035 1038 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1036 1039 srdensitythres = float(
1037 1040 ui.config(b'experimental', b'sparse-read.density-threshold')
1038 1041 )
1039 1042 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1040 1043 options[b'with-sparse-read'] = withsparseread
1041 1044 options[b'sparse-read-density-threshold'] = srdensitythres
1042 1045 options[b'sparse-read-min-gap-size'] = srmingapsize
1043 1046
1044 1047 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1045 1048 options[b'sparse-revlog'] = sparserevlog
1046 1049 if sparserevlog:
1047 1050 options[b'generaldelta'] = True
1048 1051
1049 1052 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1050 1053 options[b'side-data'] = sidedata
1051 1054
1052 1055 maxchainlen = None
1053 1056 if sparserevlog:
1054 1057 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1055 1058 # experimental config: format.maxchainlen
1056 1059 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1057 1060 if maxchainlen is not None:
1058 1061 options[b'maxchainlen'] = maxchainlen
1059 1062
1060 1063 for r in requirements:
1061 1064 # we allow multiple compression engine requirement to co-exist because
1062 1065 # strickly speaking, revlog seems to support mixed compression style.
1063 1066 #
1064 1067 # The compression used for new entries will be "the last one"
1065 1068 prefix = r.startswith
1066 1069 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1067 1070 options[b'compengine'] = r.split(b'-', 2)[2]
1068 1071
1069 1072 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1070 1073 if options[b'zlib.level'] is not None:
1071 1074 if not (0 <= options[b'zlib.level'] <= 9):
1072 1075 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1073 1076 raise error.Abort(msg % options[b'zlib.level'])
1074 1077 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1075 1078 if options[b'zstd.level'] is not None:
1076 1079 if not (0 <= options[b'zstd.level'] <= 22):
1077 1080 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1078 1081 raise error.Abort(msg % options[b'zstd.level'])
1079 1082
1080 1083 if requirementsmod.NARROW_REQUIREMENT in requirements:
1081 1084 options[b'enableellipsis'] = True
1082 1085
1083 1086 if ui.configbool(b'experimental', b'rust.index'):
1084 1087 options[b'rust.index'] = True
1085 1088 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1086 1089 slow_path = ui.config(
1087 1090 b'storage', b'revlog.persistent-nodemap.slow-path'
1088 1091 )
1089 1092 if slow_path not in (b'allow', b'warn', b'abort'):
1090 1093 default = ui.config_default(
1091 1094 b'storage', b'revlog.persistent-nodemap.slow-path'
1092 1095 )
1093 1096 msg = _(
1094 1097 b'unknown value for config '
1095 1098 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1096 1099 )
1097 1100 ui.warn(msg % slow_path)
1098 1101 if not ui.quiet:
1099 1102 ui.warn(_(b'falling back to default value: %s\n') % default)
1100 1103 slow_path = default
1101 1104
1102 1105 msg = _(
1103 1106 b"accessing `persistent-nodemap` repository without associated "
1104 1107 b"fast implementation."
1105 1108 )
1106 1109 hint = _(
1107 1110 b"check `hg help config.format.use-persistent-nodemap` "
1108 1111 b"for details"
1109 1112 )
1110 1113 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1111 1114 if slow_path == b'warn':
1112 1115 msg = b"warning: " + msg + b'\n'
1113 1116 ui.warn(msg)
1114 1117 if not ui.quiet:
1115 1118 hint = b'(' + hint + b')\n'
1116 1119 ui.warn(hint)
1117 1120 if slow_path == b'abort':
1118 1121 raise error.Abort(msg, hint=hint)
1119 1122 options[b'persistent-nodemap'] = True
1120 1123 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1121 1124 options[b'persistent-nodemap.mmap'] = True
1122 1125 if ui.configbool(b'devel', b'persistent-nodemap'):
1123 1126 options[b'devel-force-nodemap'] = True
1124 1127
1125 1128 return options
1126 1129
1127 1130
1128 1131 def makemain(**kwargs):
1129 1132 """Produce a type conforming to ``ilocalrepositorymain``."""
1130 1133 return localrepository
1131 1134
1132 1135
1133 1136 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1134 1137 class revlogfilestorage(object):
1135 1138 """File storage when using revlogs."""
1136 1139
1137 1140 def file(self, path):
1138 1141 if path[0] == b'/':
1139 1142 path = path[1:]
1140 1143
1141 1144 return filelog.filelog(self.svfs, path)
1142 1145
1143 1146
1144 1147 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1145 1148 class revlognarrowfilestorage(object):
1146 1149 """File storage when using revlogs and narrow files."""
1147 1150
1148 1151 def file(self, path):
1149 1152 if path[0] == b'/':
1150 1153 path = path[1:]
1151 1154
1152 1155 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1153 1156
1154 1157
1155 1158 def makefilestorage(requirements, features, **kwargs):
1156 1159 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1157 1160 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1158 1161 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1159 1162
1160 1163 if requirementsmod.NARROW_REQUIREMENT in requirements:
1161 1164 return revlognarrowfilestorage
1162 1165 else:
1163 1166 return revlogfilestorage
1164 1167
1165 1168
1166 1169 # List of repository interfaces and factory functions for them. Each
1167 1170 # will be called in order during ``makelocalrepository()`` to iteratively
1168 1171 # derive the final type for a local repository instance. We capture the
1169 1172 # function as a lambda so we don't hold a reference and the module-level
1170 1173 # functions can be wrapped.
1171 1174 REPO_INTERFACES = [
1172 1175 (repository.ilocalrepositorymain, lambda: makemain),
1173 1176 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1174 1177 ]
1175 1178
1176 1179
1177 1180 @interfaceutil.implementer(repository.ilocalrepositorymain)
1178 1181 class localrepository(object):
1179 1182 """Main class for representing local repositories.
1180 1183
1181 1184 All local repositories are instances of this class.
1182 1185
1183 1186 Constructed on its own, instances of this class are not usable as
1184 1187 repository objects. To obtain a usable repository object, call
1185 1188 ``hg.repository()``, ``localrepo.instance()``, or
1186 1189 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1187 1190 ``instance()`` adds support for creating new repositories.
1188 1191 ``hg.repository()`` adds more extension integration, including calling
1189 1192 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1190 1193 used.
1191 1194 """
1192 1195
1193 1196 # obsolete experimental requirements:
1194 1197 # - manifestv2: An experimental new manifest format that allowed
1195 1198 # for stem compression of long paths. Experiment ended up not
1196 1199 # being successful (repository sizes went up due to worse delta
1197 1200 # chains), and the code was deleted in 4.6.
1198 1201 supportedformats = {
1199 1202 b'revlogv1',
1200 1203 b'generaldelta',
1201 1204 requirementsmod.TREEMANIFEST_REQUIREMENT,
1202 1205 requirementsmod.COPIESSDC_REQUIREMENT,
1203 1206 requirementsmod.REVLOGV2_REQUIREMENT,
1204 1207 requirementsmod.SIDEDATA_REQUIREMENT,
1205 1208 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1206 1209 requirementsmod.NODEMAP_REQUIREMENT,
1207 1210 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1208 1211 requirementsmod.SHARESAFE_REQUIREMENT,
1209 1212 }
1210 1213 _basesupported = supportedformats | {
1211 1214 b'store',
1212 1215 b'fncache',
1213 1216 requirementsmod.SHARED_REQUIREMENT,
1214 1217 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1215 1218 b'dotencode',
1216 1219 requirementsmod.SPARSE_REQUIREMENT,
1217 1220 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1218 1221 }
1219 1222
1220 1223 # list of prefix for file which can be written without 'wlock'
1221 1224 # Extensions should extend this list when needed
1222 1225 _wlockfreeprefix = {
1223 1226 # We migh consider requiring 'wlock' for the next
1224 1227 # two, but pretty much all the existing code assume
1225 1228 # wlock is not needed so we keep them excluded for
1226 1229 # now.
1227 1230 b'hgrc',
1228 1231 b'requires',
1229 1232 # XXX cache is a complicatged business someone
1230 1233 # should investigate this in depth at some point
1231 1234 b'cache/',
1232 1235 # XXX shouldn't be dirstate covered by the wlock?
1233 1236 b'dirstate',
1234 1237 # XXX bisect was still a bit too messy at the time
1235 1238 # this changeset was introduced. Someone should fix
1236 1239 # the remainig bit and drop this line
1237 1240 b'bisect.state',
1238 1241 }
1239 1242
1240 1243 def __init__(
1241 1244 self,
1242 1245 baseui,
1243 1246 ui,
1244 1247 origroot,
1245 1248 wdirvfs,
1246 1249 hgvfs,
1247 1250 requirements,
1248 1251 supportedrequirements,
1249 1252 sharedpath,
1250 1253 store,
1251 1254 cachevfs,
1252 1255 wcachevfs,
1253 1256 features,
1254 1257 intents=None,
1255 1258 ):
1256 1259 """Create a new local repository instance.
1257 1260
1258 1261 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1259 1262 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1260 1263 object.
1261 1264
1262 1265 Arguments:
1263 1266
1264 1267 baseui
1265 1268 ``ui.ui`` instance that ``ui`` argument was based off of.
1266 1269
1267 1270 ui
1268 1271 ``ui.ui`` instance for use by the repository.
1269 1272
1270 1273 origroot
1271 1274 ``bytes`` path to working directory root of this repository.
1272 1275
1273 1276 wdirvfs
1274 1277 ``vfs.vfs`` rooted at the working directory.
1275 1278
1276 1279 hgvfs
1277 1280 ``vfs.vfs`` rooted at .hg/
1278 1281
1279 1282 requirements
1280 1283 ``set`` of bytestrings representing repository opening requirements.
1281 1284
1282 1285 supportedrequirements
1283 1286 ``set`` of bytestrings representing repository requirements that we
1284 1287 know how to open. May be a supetset of ``requirements``.
1285 1288
1286 1289 sharedpath
1287 1290 ``bytes`` Defining path to storage base directory. Points to a
1288 1291 ``.hg/`` directory somewhere.
1289 1292
1290 1293 store
1291 1294 ``store.basicstore`` (or derived) instance providing access to
1292 1295 versioned storage.
1293 1296
1294 1297 cachevfs
1295 1298 ``vfs.vfs`` used for cache files.
1296 1299
1297 1300 wcachevfs
1298 1301 ``vfs.vfs`` used for cache files related to the working copy.
1299 1302
1300 1303 features
1301 1304 ``set`` of bytestrings defining features/capabilities of this
1302 1305 instance.
1303 1306
1304 1307 intents
1305 1308 ``set`` of system strings indicating what this repo will be used
1306 1309 for.
1307 1310 """
1308 1311 self.baseui = baseui
1309 1312 self.ui = ui
1310 1313 self.origroot = origroot
1311 1314 # vfs rooted at working directory.
1312 1315 self.wvfs = wdirvfs
1313 1316 self.root = wdirvfs.base
1314 1317 # vfs rooted at .hg/. Used to access most non-store paths.
1315 1318 self.vfs = hgvfs
1316 1319 self.path = hgvfs.base
1317 1320 self.requirements = requirements
1318 1321 self.supported = supportedrequirements
1319 1322 self.sharedpath = sharedpath
1320 1323 self.store = store
1321 1324 self.cachevfs = cachevfs
1322 1325 self.wcachevfs = wcachevfs
1323 1326 self.features = features
1324 1327
1325 1328 self.filtername = None
1326 1329
1327 1330 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1328 1331 b'devel', b'check-locks'
1329 1332 ):
1330 1333 self.vfs.audit = self._getvfsward(self.vfs.audit)
1331 1334 # A list of callback to shape the phase if no data were found.
1332 1335 # Callback are in the form: func(repo, roots) --> processed root.
1333 1336 # This list it to be filled by extension during repo setup
1334 1337 self._phasedefaults = []
1335 1338
1336 1339 color.setup(self.ui)
1337 1340
1338 1341 self.spath = self.store.path
1339 1342 self.svfs = self.store.vfs
1340 1343 self.sjoin = self.store.join
1341 1344 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1342 1345 b'devel', b'check-locks'
1343 1346 ):
1344 1347 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1345 1348 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1346 1349 else: # standard vfs
1347 1350 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1348 1351
1349 1352 self._dirstatevalidatewarned = False
1350 1353
1351 1354 self._branchcaches = branchmap.BranchMapCache()
1352 1355 self._revbranchcache = None
1353 1356 self._filterpats = {}
1354 1357 self._datafilters = {}
1355 1358 self._transref = self._lockref = self._wlockref = None
1356 1359
1357 1360 # A cache for various files under .hg/ that tracks file changes,
1358 1361 # (used by the filecache decorator)
1359 1362 #
1360 1363 # Maps a property name to its util.filecacheentry
1361 1364 self._filecache = {}
1362 1365
1363 1366 # hold sets of revision to be filtered
1364 1367 # should be cleared when something might have changed the filter value:
1365 1368 # - new changesets,
1366 1369 # - phase change,
1367 1370 # - new obsolescence marker,
1368 1371 # - working directory parent change,
1369 1372 # - bookmark changes
1370 1373 self.filteredrevcache = {}
1371 1374
1372 1375 # post-dirstate-status hooks
1373 1376 self._postdsstatus = []
1374 1377
1375 1378 # generic mapping between names and nodes
1376 1379 self.names = namespaces.namespaces()
1377 1380
1378 1381 # Key to signature value.
1379 1382 self._sparsesignaturecache = {}
1380 1383 # Signature to cached matcher instance.
1381 1384 self._sparsematchercache = {}
1382 1385
1383 1386 self._extrafilterid = repoview.extrafilter(ui)
1384 1387
1385 1388 self.filecopiesmode = None
1386 1389 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1387 1390 self.filecopiesmode = b'changeset-sidedata'
1388 1391
1389 1392 def _getvfsward(self, origfunc):
1390 1393 """build a ward for self.vfs"""
1391 1394 rref = weakref.ref(self)
1392 1395
1393 1396 def checkvfs(path, mode=None):
1394 1397 ret = origfunc(path, mode=mode)
1395 1398 repo = rref()
1396 1399 if (
1397 1400 repo is None
1398 1401 or not util.safehasattr(repo, b'_wlockref')
1399 1402 or not util.safehasattr(repo, b'_lockref')
1400 1403 ):
1401 1404 return
1402 1405 if mode in (None, b'r', b'rb'):
1403 1406 return
1404 1407 if path.startswith(repo.path):
1405 1408 # truncate name relative to the repository (.hg)
1406 1409 path = path[len(repo.path) + 1 :]
1407 1410 if path.startswith(b'cache/'):
1408 1411 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1409 1412 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1410 1413 # path prefixes covered by 'lock'
1411 1414 vfs_path_prefixes = (
1412 1415 b'journal.',
1413 1416 b'undo.',
1414 1417 b'strip-backup/',
1415 1418 b'cache/',
1416 1419 )
1417 1420 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1418 1421 if repo._currentlock(repo._lockref) is None:
1419 1422 repo.ui.develwarn(
1420 1423 b'write with no lock: "%s"' % path,
1421 1424 stacklevel=3,
1422 1425 config=b'check-locks',
1423 1426 )
1424 1427 elif repo._currentlock(repo._wlockref) is None:
1425 1428 # rest of vfs files are covered by 'wlock'
1426 1429 #
1427 1430 # exclude special files
1428 1431 for prefix in self._wlockfreeprefix:
1429 1432 if path.startswith(prefix):
1430 1433 return
1431 1434 repo.ui.develwarn(
1432 1435 b'write with no wlock: "%s"' % path,
1433 1436 stacklevel=3,
1434 1437 config=b'check-locks',
1435 1438 )
1436 1439 return ret
1437 1440
1438 1441 return checkvfs
1439 1442
1440 1443 def _getsvfsward(self, origfunc):
1441 1444 """build a ward for self.svfs"""
1442 1445 rref = weakref.ref(self)
1443 1446
1444 1447 def checksvfs(path, mode=None):
1445 1448 ret = origfunc(path, mode=mode)
1446 1449 repo = rref()
1447 1450 if repo is None or not util.safehasattr(repo, b'_lockref'):
1448 1451 return
1449 1452 if mode in (None, b'r', b'rb'):
1450 1453 return
1451 1454 if path.startswith(repo.sharedpath):
1452 1455 # truncate name relative to the repository (.hg)
1453 1456 path = path[len(repo.sharedpath) + 1 :]
1454 1457 if repo._currentlock(repo._lockref) is None:
1455 1458 repo.ui.develwarn(
1456 1459 b'write with no lock: "%s"' % path, stacklevel=4
1457 1460 )
1458 1461 return ret
1459 1462
1460 1463 return checksvfs
1461 1464
1462 1465 def close(self):
1463 1466 self._writecaches()
1464 1467
1465 1468 def _writecaches(self):
1466 1469 if self._revbranchcache:
1467 1470 self._revbranchcache.write()
1468 1471
1469 1472 def _restrictcapabilities(self, caps):
1470 1473 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1471 1474 caps = set(caps)
1472 1475 capsblob = bundle2.encodecaps(
1473 1476 bundle2.getrepocaps(self, role=b'client')
1474 1477 )
1475 1478 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1476 1479 return caps
1477 1480
1478 1481 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1479 1482 # self -> auditor -> self._checknested -> self
1480 1483
1481 1484 @property
1482 1485 def auditor(self):
1483 1486 # This is only used by context.workingctx.match in order to
1484 1487 # detect files in subrepos.
1485 1488 return pathutil.pathauditor(self.root, callback=self._checknested)
1486 1489
1487 1490 @property
1488 1491 def nofsauditor(self):
1489 1492 # This is only used by context.basectx.match in order to detect
1490 1493 # files in subrepos.
1491 1494 return pathutil.pathauditor(
1492 1495 self.root, callback=self._checknested, realfs=False, cached=True
1493 1496 )
1494 1497
1495 1498 def _checknested(self, path):
1496 1499 """Determine if path is a legal nested repository."""
1497 1500 if not path.startswith(self.root):
1498 1501 return False
1499 1502 subpath = path[len(self.root) + 1 :]
1500 1503 normsubpath = util.pconvert(subpath)
1501 1504
1502 1505 # XXX: Checking against the current working copy is wrong in
1503 1506 # the sense that it can reject things like
1504 1507 #
1505 1508 # $ hg cat -r 10 sub/x.txt
1506 1509 #
1507 1510 # if sub/ is no longer a subrepository in the working copy
1508 1511 # parent revision.
1509 1512 #
1510 1513 # However, it can of course also allow things that would have
1511 1514 # been rejected before, such as the above cat command if sub/
1512 1515 # is a subrepository now, but was a normal directory before.
1513 1516 # The old path auditor would have rejected by mistake since it
1514 1517 # panics when it sees sub/.hg/.
1515 1518 #
1516 1519 # All in all, checking against the working copy seems sensible
1517 1520 # since we want to prevent access to nested repositories on
1518 1521 # the filesystem *now*.
1519 1522 ctx = self[None]
1520 1523 parts = util.splitpath(subpath)
1521 1524 while parts:
1522 1525 prefix = b'/'.join(parts)
1523 1526 if prefix in ctx.substate:
1524 1527 if prefix == normsubpath:
1525 1528 return True
1526 1529 else:
1527 1530 sub = ctx.sub(prefix)
1528 1531 return sub.checknested(subpath[len(prefix) + 1 :])
1529 1532 else:
1530 1533 parts.pop()
1531 1534 return False
1532 1535
1533 1536 def peer(self):
1534 1537 return localpeer(self) # not cached to avoid reference cycle
1535 1538
1536 1539 def unfiltered(self):
1537 1540 """Return unfiltered version of the repository
1538 1541
1539 1542 Intended to be overwritten by filtered repo."""
1540 1543 return self
1541 1544
1542 1545 def filtered(self, name, visibilityexceptions=None):
1543 1546 """Return a filtered version of a repository
1544 1547
1545 1548 The `name` parameter is the identifier of the requested view. This
1546 1549 will return a repoview object set "exactly" to the specified view.
1547 1550
1548 1551 This function does not apply recursive filtering to a repository. For
1549 1552 example calling `repo.filtered("served")` will return a repoview using
1550 1553 the "served" view, regardless of the initial view used by `repo`.
1551 1554
1552 1555 In other word, there is always only one level of `repoview` "filtering".
1553 1556 """
1554 1557 if self._extrafilterid is not None and b'%' not in name:
1555 1558 name = name + b'%' + self._extrafilterid
1556 1559
1557 1560 cls = repoview.newtype(self.unfiltered().__class__)
1558 1561 return cls(self, name, visibilityexceptions)
1559 1562
1560 1563 @mixedrepostorecache(
1561 1564 (b'bookmarks', b'plain'),
1562 1565 (b'bookmarks.current', b'plain'),
1563 1566 (b'bookmarks', b''),
1564 1567 (b'00changelog.i', b''),
1565 1568 )
1566 1569 def _bookmarks(self):
1567 1570 # Since the multiple files involved in the transaction cannot be
1568 1571 # written atomically (with current repository format), there is a race
1569 1572 # condition here.
1570 1573 #
1571 1574 # 1) changelog content A is read
1572 1575 # 2) outside transaction update changelog to content B
1573 1576 # 3) outside transaction update bookmark file referring to content B
1574 1577 # 4) bookmarks file content is read and filtered against changelog-A
1575 1578 #
1576 1579 # When this happens, bookmarks against nodes missing from A are dropped.
1577 1580 #
1578 1581 # Having this happening during read is not great, but it become worse
1579 1582 # when this happen during write because the bookmarks to the "unknown"
1580 1583 # nodes will be dropped for good. However, writes happen within locks.
1581 1584 # This locking makes it possible to have a race free consistent read.
1582 1585 # For this purpose data read from disc before locking are
1583 1586 # "invalidated" right after the locks are taken. This invalidations are
1584 1587 # "light", the `filecache` mechanism keep the data in memory and will
1585 1588 # reuse them if the underlying files did not changed. Not parsing the
1586 1589 # same data multiple times helps performances.
1587 1590 #
1588 1591 # Unfortunately in the case describe above, the files tracked by the
1589 1592 # bookmarks file cache might not have changed, but the in-memory
1590 1593 # content is still "wrong" because we used an older changelog content
1591 1594 # to process the on-disk data. So after locking, the changelog would be
1592 1595 # refreshed but `_bookmarks` would be preserved.
1593 1596 # Adding `00changelog.i` to the list of tracked file is not
1594 1597 # enough, because at the time we build the content for `_bookmarks` in
1595 1598 # (4), the changelog file has already diverged from the content used
1596 1599 # for loading `changelog` in (1)
1597 1600 #
1598 1601 # To prevent the issue, we force the changelog to be explicitly
1599 1602 # reloaded while computing `_bookmarks`. The data race can still happen
1600 1603 # without the lock (with a narrower window), but it would no longer go
1601 1604 # undetected during the lock time refresh.
1602 1605 #
1603 1606 # The new schedule is as follow
1604 1607 #
1605 1608 # 1) filecache logic detect that `_bookmarks` needs to be computed
1606 1609 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1607 1610 # 3) We force `changelog` filecache to be tested
1608 1611 # 4) cachestat for `changelog` are captured (for changelog)
1609 1612 # 5) `_bookmarks` is computed and cached
1610 1613 #
1611 1614 # The step in (3) ensure we have a changelog at least as recent as the
1612 1615 # cache stat computed in (1). As a result at locking time:
1613 1616 # * if the changelog did not changed since (1) -> we can reuse the data
1614 1617 # * otherwise -> the bookmarks get refreshed.
1615 1618 self._refreshchangelog()
1616 1619 return bookmarks.bmstore(self)
1617 1620
1618 1621 def _refreshchangelog(self):
1619 1622 """make sure the in memory changelog match the on-disk one"""
1620 1623 if 'changelog' in vars(self) and self.currenttransaction() is None:
1621 1624 del self.changelog
1622 1625
1623 1626 @property
1624 1627 def _activebookmark(self):
1625 1628 return self._bookmarks.active
1626 1629
1627 1630 # _phasesets depend on changelog. what we need is to call
1628 1631 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1629 1632 # can't be easily expressed in filecache mechanism.
1630 1633 @storecache(b'phaseroots', b'00changelog.i')
1631 1634 def _phasecache(self):
1632 1635 return phases.phasecache(self, self._phasedefaults)
1633 1636
1634 1637 @storecache(b'obsstore')
1635 1638 def obsstore(self):
1636 1639 return obsolete.makestore(self.ui, self)
1637 1640
1638 1641 @storecache(b'00changelog.i')
1639 1642 def changelog(self):
1640 1643 # load dirstate before changelog to avoid race see issue6303
1641 1644 self.dirstate.prefetch_parents()
1642 return self.store.changelog(txnutil.mayhavepending(self.root))
1645 return self.store.changelog(
1646 txnutil.mayhavepending(self.root),
1647 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1648 )
1643 1649
1644 1650 @storecache(b'00manifest.i')
1645 1651 def manifestlog(self):
1646 1652 return self.store.manifestlog(self, self._storenarrowmatch)
1647 1653
1648 1654 @repofilecache(b'dirstate')
1649 1655 def dirstate(self):
1650 1656 return self._makedirstate()
1651 1657
1652 1658 def _makedirstate(self):
1653 1659 """Extension point for wrapping the dirstate per-repo."""
1654 1660 sparsematchfn = lambda: sparse.matcher(self)
1655 1661
1656 1662 return dirstate.dirstate(
1657 1663 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1658 1664 )
1659 1665
1660 1666 def _dirstatevalidate(self, node):
1661 1667 try:
1662 1668 self.changelog.rev(node)
1663 1669 return node
1664 1670 except error.LookupError:
1665 1671 if not self._dirstatevalidatewarned:
1666 1672 self._dirstatevalidatewarned = True
1667 1673 self.ui.warn(
1668 1674 _(b"warning: ignoring unknown working parent %s!\n")
1669 1675 % short(node)
1670 1676 )
1671 1677 return nullid
1672 1678
1673 1679 @storecache(narrowspec.FILENAME)
1674 1680 def narrowpats(self):
1675 1681 """matcher patterns for this repository's narrowspec
1676 1682
1677 1683 A tuple of (includes, excludes).
1678 1684 """
1679 1685 return narrowspec.load(self)
1680 1686
1681 1687 @storecache(narrowspec.FILENAME)
1682 1688 def _storenarrowmatch(self):
1683 1689 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1684 1690 return matchmod.always()
1685 1691 include, exclude = self.narrowpats
1686 1692 return narrowspec.match(self.root, include=include, exclude=exclude)
1687 1693
1688 1694 @storecache(narrowspec.FILENAME)
1689 1695 def _narrowmatch(self):
1690 1696 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1691 1697 return matchmod.always()
1692 1698 narrowspec.checkworkingcopynarrowspec(self)
1693 1699 include, exclude = self.narrowpats
1694 1700 return narrowspec.match(self.root, include=include, exclude=exclude)
1695 1701
1696 1702 def narrowmatch(self, match=None, includeexact=False):
1697 1703 """matcher corresponding the the repo's narrowspec
1698 1704
1699 1705 If `match` is given, then that will be intersected with the narrow
1700 1706 matcher.
1701 1707
1702 1708 If `includeexact` is True, then any exact matches from `match` will
1703 1709 be included even if they're outside the narrowspec.
1704 1710 """
1705 1711 if match:
1706 1712 if includeexact and not self._narrowmatch.always():
1707 1713 # do not exclude explicitly-specified paths so that they can
1708 1714 # be warned later on
1709 1715 em = matchmod.exact(match.files())
1710 1716 nm = matchmod.unionmatcher([self._narrowmatch, em])
1711 1717 return matchmod.intersectmatchers(match, nm)
1712 1718 return matchmod.intersectmatchers(match, self._narrowmatch)
1713 1719 return self._narrowmatch
1714 1720
1715 1721 def setnarrowpats(self, newincludes, newexcludes):
1716 1722 narrowspec.save(self, newincludes, newexcludes)
1717 1723 self.invalidate(clearfilecache=True)
1718 1724
1719 1725 @unfilteredpropertycache
1720 1726 def _quick_access_changeid_null(self):
1721 1727 return {
1722 1728 b'null': (nullrev, nullid),
1723 1729 nullrev: (nullrev, nullid),
1724 1730 nullid: (nullrev, nullid),
1725 1731 }
1726 1732
1727 1733 @unfilteredpropertycache
1728 1734 def _quick_access_changeid_wc(self):
1729 1735 # also fast path access to the working copy parents
1730 1736 # however, only do it for filter that ensure wc is visible.
1731 1737 quick = self._quick_access_changeid_null.copy()
1732 1738 cl = self.unfiltered().changelog
1733 1739 for node in self.dirstate.parents():
1734 1740 if node == nullid:
1735 1741 continue
1736 1742 rev = cl.index.get_rev(node)
1737 1743 if rev is None:
1738 1744 # unknown working copy parent case:
1739 1745 #
1740 1746 # skip the fast path and let higher code deal with it
1741 1747 continue
1742 1748 pair = (rev, node)
1743 1749 quick[rev] = pair
1744 1750 quick[node] = pair
1745 1751 # also add the parents of the parents
1746 1752 for r in cl.parentrevs(rev):
1747 1753 if r == nullrev:
1748 1754 continue
1749 1755 n = cl.node(r)
1750 1756 pair = (r, n)
1751 1757 quick[r] = pair
1752 1758 quick[n] = pair
1753 1759 p1node = self.dirstate.p1()
1754 1760 if p1node != nullid:
1755 1761 quick[b'.'] = quick[p1node]
1756 1762 return quick
1757 1763
1758 1764 @unfilteredmethod
1759 1765 def _quick_access_changeid_invalidate(self):
1760 1766 if '_quick_access_changeid_wc' in vars(self):
1761 1767 del self.__dict__['_quick_access_changeid_wc']
1762 1768
1763 1769 @property
1764 1770 def _quick_access_changeid(self):
1765 1771 """an helper dictionnary for __getitem__ calls
1766 1772
1767 1773 This contains a list of symbol we can recognise right away without
1768 1774 further processing.
1769 1775 """
1770 1776 if self.filtername in repoview.filter_has_wc:
1771 1777 return self._quick_access_changeid_wc
1772 1778 return self._quick_access_changeid_null
1773 1779
1774 1780 def __getitem__(self, changeid):
1775 1781 # dealing with special cases
1776 1782 if changeid is None:
1777 1783 return context.workingctx(self)
1778 1784 if isinstance(changeid, context.basectx):
1779 1785 return changeid
1780 1786
1781 1787 # dealing with multiple revisions
1782 1788 if isinstance(changeid, slice):
1783 1789 # wdirrev isn't contiguous so the slice shouldn't include it
1784 1790 return [
1785 1791 self[i]
1786 1792 for i in pycompat.xrange(*changeid.indices(len(self)))
1787 1793 if i not in self.changelog.filteredrevs
1788 1794 ]
1789 1795
1790 1796 # dealing with some special values
1791 1797 quick_access = self._quick_access_changeid.get(changeid)
1792 1798 if quick_access is not None:
1793 1799 rev, node = quick_access
1794 1800 return context.changectx(self, rev, node, maybe_filtered=False)
1795 1801 if changeid == b'tip':
1796 1802 node = self.changelog.tip()
1797 1803 rev = self.changelog.rev(node)
1798 1804 return context.changectx(self, rev, node)
1799 1805
1800 1806 # dealing with arbitrary values
1801 1807 try:
1802 1808 if isinstance(changeid, int):
1803 1809 node = self.changelog.node(changeid)
1804 1810 rev = changeid
1805 1811 elif changeid == b'.':
1806 1812 # this is a hack to delay/avoid loading obsmarkers
1807 1813 # when we know that '.' won't be hidden
1808 1814 node = self.dirstate.p1()
1809 1815 rev = self.unfiltered().changelog.rev(node)
1810 1816 elif len(changeid) == 20:
1811 1817 try:
1812 1818 node = changeid
1813 1819 rev = self.changelog.rev(changeid)
1814 1820 except error.FilteredLookupError:
1815 1821 changeid = hex(changeid) # for the error message
1816 1822 raise
1817 1823 except LookupError:
1818 1824 # check if it might have come from damaged dirstate
1819 1825 #
1820 1826 # XXX we could avoid the unfiltered if we had a recognizable
1821 1827 # exception for filtered changeset access
1822 1828 if (
1823 1829 self.local()
1824 1830 and changeid in self.unfiltered().dirstate.parents()
1825 1831 ):
1826 1832 msg = _(b"working directory has unknown parent '%s'!")
1827 1833 raise error.Abort(msg % short(changeid))
1828 1834 changeid = hex(changeid) # for the error message
1829 1835 raise
1830 1836
1831 1837 elif len(changeid) == 40:
1832 1838 node = bin(changeid)
1833 1839 rev = self.changelog.rev(node)
1834 1840 else:
1835 1841 raise error.ProgrammingError(
1836 1842 b"unsupported changeid '%s' of type %s"
1837 1843 % (changeid, pycompat.bytestr(type(changeid)))
1838 1844 )
1839 1845
1840 1846 return context.changectx(self, rev, node)
1841 1847
1842 1848 except (error.FilteredIndexError, error.FilteredLookupError):
1843 1849 raise error.FilteredRepoLookupError(
1844 1850 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1845 1851 )
1846 1852 except (IndexError, LookupError):
1847 1853 raise error.RepoLookupError(
1848 1854 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1849 1855 )
1850 1856 except error.WdirUnsupported:
1851 1857 return context.workingctx(self)
1852 1858
1853 1859 def __contains__(self, changeid):
1854 1860 """True if the given changeid exists"""
1855 1861 try:
1856 1862 self[changeid]
1857 1863 return True
1858 1864 except error.RepoLookupError:
1859 1865 return False
1860 1866
1861 1867 def __nonzero__(self):
1862 1868 return True
1863 1869
1864 1870 __bool__ = __nonzero__
1865 1871
1866 1872 def __len__(self):
1867 1873 # no need to pay the cost of repoview.changelog
1868 1874 unfi = self.unfiltered()
1869 1875 return len(unfi.changelog)
1870 1876
1871 1877 def __iter__(self):
1872 1878 return iter(self.changelog)
1873 1879
1874 1880 def revs(self, expr, *args):
1875 1881 """Find revisions matching a revset.
1876 1882
1877 1883 The revset is specified as a string ``expr`` that may contain
1878 1884 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1879 1885
1880 1886 Revset aliases from the configuration are not expanded. To expand
1881 1887 user aliases, consider calling ``scmutil.revrange()`` or
1882 1888 ``repo.anyrevs([expr], user=True)``.
1883 1889
1884 1890 Returns a smartset.abstractsmartset, which is a list-like interface
1885 1891 that contains integer revisions.
1886 1892 """
1887 1893 tree = revsetlang.spectree(expr, *args)
1888 1894 return revset.makematcher(tree)(self)
1889 1895
1890 1896 def set(self, expr, *args):
1891 1897 """Find revisions matching a revset and emit changectx instances.
1892 1898
1893 1899 This is a convenience wrapper around ``revs()`` that iterates the
1894 1900 result and is a generator of changectx instances.
1895 1901
1896 1902 Revset aliases from the configuration are not expanded. To expand
1897 1903 user aliases, consider calling ``scmutil.revrange()``.
1898 1904 """
1899 1905 for r in self.revs(expr, *args):
1900 1906 yield self[r]
1901 1907
1902 1908 def anyrevs(self, specs, user=False, localalias=None):
1903 1909 """Find revisions matching one of the given revsets.
1904 1910
1905 1911 Revset aliases from the configuration are not expanded by default. To
1906 1912 expand user aliases, specify ``user=True``. To provide some local
1907 1913 definitions overriding user aliases, set ``localalias`` to
1908 1914 ``{name: definitionstring}``.
1909 1915 """
1910 1916 if specs == [b'null']:
1911 1917 return revset.baseset([nullrev])
1912 1918 if specs == [b'.']:
1913 1919 quick_data = self._quick_access_changeid.get(b'.')
1914 1920 if quick_data is not None:
1915 1921 return revset.baseset([quick_data[0]])
1916 1922 if user:
1917 1923 m = revset.matchany(
1918 1924 self.ui,
1919 1925 specs,
1920 1926 lookup=revset.lookupfn(self),
1921 1927 localalias=localalias,
1922 1928 )
1923 1929 else:
1924 1930 m = revset.matchany(None, specs, localalias=localalias)
1925 1931 return m(self)
1926 1932
1927 1933 def url(self):
1928 1934 return b'file:' + self.root
1929 1935
1930 1936 def hook(self, name, throw=False, **args):
1931 1937 """Call a hook, passing this repo instance.
1932 1938
1933 1939 This a convenience method to aid invoking hooks. Extensions likely
1934 1940 won't call this unless they have registered a custom hook or are
1935 1941 replacing code that is expected to call a hook.
1936 1942 """
1937 1943 return hook.hook(self.ui, self, name, throw, **args)
1938 1944
1939 1945 @filteredpropertycache
1940 1946 def _tagscache(self):
1941 1947 """Returns a tagscache object that contains various tags related
1942 1948 caches."""
1943 1949
1944 1950 # This simplifies its cache management by having one decorated
1945 1951 # function (this one) and the rest simply fetch things from it.
1946 1952 class tagscache(object):
1947 1953 def __init__(self):
1948 1954 # These two define the set of tags for this repository. tags
1949 1955 # maps tag name to node; tagtypes maps tag name to 'global' or
1950 1956 # 'local'. (Global tags are defined by .hgtags across all
1951 1957 # heads, and local tags are defined in .hg/localtags.)
1952 1958 # They constitute the in-memory cache of tags.
1953 1959 self.tags = self.tagtypes = None
1954 1960
1955 1961 self.nodetagscache = self.tagslist = None
1956 1962
1957 1963 cache = tagscache()
1958 1964 cache.tags, cache.tagtypes = self._findtags()
1959 1965
1960 1966 return cache
1961 1967
1962 1968 def tags(self):
1963 1969 '''return a mapping of tag to node'''
1964 1970 t = {}
1965 1971 if self.changelog.filteredrevs:
1966 1972 tags, tt = self._findtags()
1967 1973 else:
1968 1974 tags = self._tagscache.tags
1969 1975 rev = self.changelog.rev
1970 1976 for k, v in pycompat.iteritems(tags):
1971 1977 try:
1972 1978 # ignore tags to unknown nodes
1973 1979 rev(v)
1974 1980 t[k] = v
1975 1981 except (error.LookupError, ValueError):
1976 1982 pass
1977 1983 return t
1978 1984
1979 1985 def _findtags(self):
1980 1986 """Do the hard work of finding tags. Return a pair of dicts
1981 1987 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1982 1988 maps tag name to a string like \'global\' or \'local\'.
1983 1989 Subclasses or extensions are free to add their own tags, but
1984 1990 should be aware that the returned dicts will be retained for the
1985 1991 duration of the localrepo object."""
1986 1992
1987 1993 # XXX what tagtype should subclasses/extensions use? Currently
1988 1994 # mq and bookmarks add tags, but do not set the tagtype at all.
1989 1995 # Should each extension invent its own tag type? Should there
1990 1996 # be one tagtype for all such "virtual" tags? Or is the status
1991 1997 # quo fine?
1992 1998
1993 1999 # map tag name to (node, hist)
1994 2000 alltags = tagsmod.findglobaltags(self.ui, self)
1995 2001 # map tag name to tag type
1996 2002 tagtypes = {tag: b'global' for tag in alltags}
1997 2003
1998 2004 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1999 2005
2000 2006 # Build the return dicts. Have to re-encode tag names because
2001 2007 # the tags module always uses UTF-8 (in order not to lose info
2002 2008 # writing to the cache), but the rest of Mercurial wants them in
2003 2009 # local encoding.
2004 2010 tags = {}
2005 2011 for (name, (node, hist)) in pycompat.iteritems(alltags):
2006 2012 if node != nullid:
2007 2013 tags[encoding.tolocal(name)] = node
2008 2014 tags[b'tip'] = self.changelog.tip()
2009 2015 tagtypes = {
2010 2016 encoding.tolocal(name): value
2011 2017 for (name, value) in pycompat.iteritems(tagtypes)
2012 2018 }
2013 2019 return (tags, tagtypes)
2014 2020
2015 2021 def tagtype(self, tagname):
2016 2022 """
2017 2023 return the type of the given tag. result can be:
2018 2024
2019 2025 'local' : a local tag
2020 2026 'global' : a global tag
2021 2027 None : tag does not exist
2022 2028 """
2023 2029
2024 2030 return self._tagscache.tagtypes.get(tagname)
2025 2031
2026 2032 def tagslist(self):
2027 2033 '''return a list of tags ordered by revision'''
2028 2034 if not self._tagscache.tagslist:
2029 2035 l = []
2030 2036 for t, n in pycompat.iteritems(self.tags()):
2031 2037 l.append((self.changelog.rev(n), t, n))
2032 2038 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2033 2039
2034 2040 return self._tagscache.tagslist
2035 2041
2036 2042 def nodetags(self, node):
2037 2043 '''return the tags associated with a node'''
2038 2044 if not self._tagscache.nodetagscache:
2039 2045 nodetagscache = {}
2040 2046 for t, n in pycompat.iteritems(self._tagscache.tags):
2041 2047 nodetagscache.setdefault(n, []).append(t)
2042 2048 for tags in pycompat.itervalues(nodetagscache):
2043 2049 tags.sort()
2044 2050 self._tagscache.nodetagscache = nodetagscache
2045 2051 return self._tagscache.nodetagscache.get(node, [])
2046 2052
2047 2053 def nodebookmarks(self, node):
2048 2054 """return the list of bookmarks pointing to the specified node"""
2049 2055 return self._bookmarks.names(node)
2050 2056
2051 2057 def branchmap(self):
2052 2058 """returns a dictionary {branch: [branchheads]} with branchheads
2053 2059 ordered by increasing revision number"""
2054 2060 return self._branchcaches[self]
2055 2061
2056 2062 @unfilteredmethod
2057 2063 def revbranchcache(self):
2058 2064 if not self._revbranchcache:
2059 2065 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2060 2066 return self._revbranchcache
2061 2067
2062 2068 def register_changeset(self, rev, changelogrevision):
2063 2069 self.revbranchcache().setdata(rev, changelogrevision)
2064 2070
2065 2071 def branchtip(self, branch, ignoremissing=False):
2066 2072 """return the tip node for a given branch
2067 2073
2068 2074 If ignoremissing is True, then this method will not raise an error.
2069 2075 This is helpful for callers that only expect None for a missing branch
2070 2076 (e.g. namespace).
2071 2077
2072 2078 """
2073 2079 try:
2074 2080 return self.branchmap().branchtip(branch)
2075 2081 except KeyError:
2076 2082 if not ignoremissing:
2077 2083 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2078 2084 else:
2079 2085 pass
2080 2086
2081 2087 def lookup(self, key):
2082 2088 node = scmutil.revsymbol(self, key).node()
2083 2089 if node is None:
2084 2090 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2085 2091 return node
2086 2092
2087 2093 def lookupbranch(self, key):
2088 2094 if self.branchmap().hasbranch(key):
2089 2095 return key
2090 2096
2091 2097 return scmutil.revsymbol(self, key).branch()
2092 2098
2093 2099 def known(self, nodes):
2094 2100 cl = self.changelog
2095 2101 get_rev = cl.index.get_rev
2096 2102 filtered = cl.filteredrevs
2097 2103 result = []
2098 2104 for n in nodes:
2099 2105 r = get_rev(n)
2100 2106 resp = not (r is None or r in filtered)
2101 2107 result.append(resp)
2102 2108 return result
2103 2109
2104 2110 def local(self):
2105 2111 return self
2106 2112
2107 2113 def publishing(self):
2108 2114 # it's safe (and desirable) to trust the publish flag unconditionally
2109 2115 # so that we don't finalize changes shared between users via ssh or nfs
2110 2116 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2111 2117
2112 2118 def cancopy(self):
2113 2119 # so statichttprepo's override of local() works
2114 2120 if not self.local():
2115 2121 return False
2116 2122 if not self.publishing():
2117 2123 return True
2118 2124 # if publishing we can't copy if there is filtered content
2119 2125 return not self.filtered(b'visible').changelog.filteredrevs
2120 2126
2121 2127 def shared(self):
2122 2128 '''the type of shared repository (None if not shared)'''
2123 2129 if self.sharedpath != self.path:
2124 2130 return b'store'
2125 2131 return None
2126 2132
2127 2133 def wjoin(self, f, *insidef):
2128 2134 return self.vfs.reljoin(self.root, f, *insidef)
2129 2135
2130 2136 def setparents(self, p1, p2=nullid):
2131 2137 self[None].setparents(p1, p2)
2132 2138 self._quick_access_changeid_invalidate()
2133 2139
2134 2140 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2135 2141 """changeid must be a changeset revision, if specified.
2136 2142 fileid can be a file revision or node."""
2137 2143 return context.filectx(
2138 2144 self, path, changeid, fileid, changectx=changectx
2139 2145 )
2140 2146
2141 2147 def getcwd(self):
2142 2148 return self.dirstate.getcwd()
2143 2149
2144 2150 def pathto(self, f, cwd=None):
2145 2151 return self.dirstate.pathto(f, cwd)
2146 2152
2147 2153 def _loadfilter(self, filter):
2148 2154 if filter not in self._filterpats:
2149 2155 l = []
2150 2156 for pat, cmd in self.ui.configitems(filter):
2151 2157 if cmd == b'!':
2152 2158 continue
2153 2159 mf = matchmod.match(self.root, b'', [pat])
2154 2160 fn = None
2155 2161 params = cmd
2156 2162 for name, filterfn in pycompat.iteritems(self._datafilters):
2157 2163 if cmd.startswith(name):
2158 2164 fn = filterfn
2159 2165 params = cmd[len(name) :].lstrip()
2160 2166 break
2161 2167 if not fn:
2162 2168 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2163 2169 fn.__name__ = 'commandfilter'
2164 2170 # Wrap old filters not supporting keyword arguments
2165 2171 if not pycompat.getargspec(fn)[2]:
2166 2172 oldfn = fn
2167 2173 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2168 2174 fn.__name__ = 'compat-' + oldfn.__name__
2169 2175 l.append((mf, fn, params))
2170 2176 self._filterpats[filter] = l
2171 2177 return self._filterpats[filter]
2172 2178
2173 2179 def _filter(self, filterpats, filename, data):
2174 2180 for mf, fn, cmd in filterpats:
2175 2181 if mf(filename):
2176 2182 self.ui.debug(
2177 2183 b"filtering %s through %s\n"
2178 2184 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2179 2185 )
2180 2186 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2181 2187 break
2182 2188
2183 2189 return data
2184 2190
2185 2191 @unfilteredpropertycache
2186 2192 def _encodefilterpats(self):
2187 2193 return self._loadfilter(b'encode')
2188 2194
2189 2195 @unfilteredpropertycache
2190 2196 def _decodefilterpats(self):
2191 2197 return self._loadfilter(b'decode')
2192 2198
2193 2199 def adddatafilter(self, name, filter):
2194 2200 self._datafilters[name] = filter
2195 2201
2196 2202 def wread(self, filename):
2197 2203 if self.wvfs.islink(filename):
2198 2204 data = self.wvfs.readlink(filename)
2199 2205 else:
2200 2206 data = self.wvfs.read(filename)
2201 2207 return self._filter(self._encodefilterpats, filename, data)
2202 2208
2203 2209 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2204 2210 """write ``data`` into ``filename`` in the working directory
2205 2211
2206 2212 This returns length of written (maybe decoded) data.
2207 2213 """
2208 2214 data = self._filter(self._decodefilterpats, filename, data)
2209 2215 if b'l' in flags:
2210 2216 self.wvfs.symlink(data, filename)
2211 2217 else:
2212 2218 self.wvfs.write(
2213 2219 filename, data, backgroundclose=backgroundclose, **kwargs
2214 2220 )
2215 2221 if b'x' in flags:
2216 2222 self.wvfs.setflags(filename, False, True)
2217 2223 else:
2218 2224 self.wvfs.setflags(filename, False, False)
2219 2225 return len(data)
2220 2226
2221 2227 def wwritedata(self, filename, data):
2222 2228 return self._filter(self._decodefilterpats, filename, data)
2223 2229
2224 2230 def currenttransaction(self):
2225 2231 """return the current transaction or None if non exists"""
2226 2232 if self._transref:
2227 2233 tr = self._transref()
2228 2234 else:
2229 2235 tr = None
2230 2236
2231 2237 if tr and tr.running():
2232 2238 return tr
2233 2239 return None
2234 2240
2235 2241 def transaction(self, desc, report=None):
2236 2242 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2237 2243 b'devel', b'check-locks'
2238 2244 ):
2239 2245 if self._currentlock(self._lockref) is None:
2240 2246 raise error.ProgrammingError(b'transaction requires locking')
2241 2247 tr = self.currenttransaction()
2242 2248 if tr is not None:
2243 2249 return tr.nest(name=desc)
2244 2250
2245 2251 # abort here if the journal already exists
2246 2252 if self.svfs.exists(b"journal"):
2247 2253 raise error.RepoError(
2248 2254 _(b"abandoned transaction found"),
2249 2255 hint=_(b"run 'hg recover' to clean up transaction"),
2250 2256 )
2251 2257
2252 2258 idbase = b"%.40f#%f" % (random.random(), time.time())
2253 2259 ha = hex(hashutil.sha1(idbase).digest())
2254 2260 txnid = b'TXN:' + ha
2255 2261 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2256 2262
2257 2263 self._writejournal(desc)
2258 2264 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2259 2265 if report:
2260 2266 rp = report
2261 2267 else:
2262 2268 rp = self.ui.warn
2263 2269 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2264 2270 # we must avoid cyclic reference between repo and transaction.
2265 2271 reporef = weakref.ref(self)
2266 2272 # Code to track tag movement
2267 2273 #
2268 2274 # Since tags are all handled as file content, it is actually quite hard
2269 2275 # to track these movement from a code perspective. So we fallback to a
2270 2276 # tracking at the repository level. One could envision to track changes
2271 2277 # to the '.hgtags' file through changegroup apply but that fails to
2272 2278 # cope with case where transaction expose new heads without changegroup
2273 2279 # being involved (eg: phase movement).
2274 2280 #
2275 2281 # For now, We gate the feature behind a flag since this likely comes
2276 2282 # with performance impacts. The current code run more often than needed
2277 2283 # and do not use caches as much as it could. The current focus is on
2278 2284 # the behavior of the feature so we disable it by default. The flag
2279 2285 # will be removed when we are happy with the performance impact.
2280 2286 #
2281 2287 # Once this feature is no longer experimental move the following
2282 2288 # documentation to the appropriate help section:
2283 2289 #
2284 2290 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2285 2291 # tags (new or changed or deleted tags). In addition the details of
2286 2292 # these changes are made available in a file at:
2287 2293 # ``REPOROOT/.hg/changes/tags.changes``.
2288 2294 # Make sure you check for HG_TAG_MOVED before reading that file as it
2289 2295 # might exist from a previous transaction even if no tag were touched
2290 2296 # in this one. Changes are recorded in a line base format::
2291 2297 #
2292 2298 # <action> <hex-node> <tag-name>\n
2293 2299 #
2294 2300 # Actions are defined as follow:
2295 2301 # "-R": tag is removed,
2296 2302 # "+A": tag is added,
2297 2303 # "-M": tag is moved (old value),
2298 2304 # "+M": tag is moved (new value),
2299 2305 tracktags = lambda x: None
2300 2306 # experimental config: experimental.hook-track-tags
2301 2307 shouldtracktags = self.ui.configbool(
2302 2308 b'experimental', b'hook-track-tags'
2303 2309 )
2304 2310 if desc != b'strip' and shouldtracktags:
2305 2311 oldheads = self.changelog.headrevs()
2306 2312
2307 2313 def tracktags(tr2):
2308 2314 repo = reporef()
2309 2315 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2310 2316 newheads = repo.changelog.headrevs()
2311 2317 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2312 2318 # notes: we compare lists here.
2313 2319 # As we do it only once buiding set would not be cheaper
2314 2320 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2315 2321 if changes:
2316 2322 tr2.hookargs[b'tag_moved'] = b'1'
2317 2323 with repo.vfs(
2318 2324 b'changes/tags.changes', b'w', atomictemp=True
2319 2325 ) as changesfile:
2320 2326 # note: we do not register the file to the transaction
2321 2327 # because we needs it to still exist on the transaction
2322 2328 # is close (for txnclose hooks)
2323 2329 tagsmod.writediff(changesfile, changes)
2324 2330
2325 2331 def validate(tr2):
2326 2332 """will run pre-closing hooks"""
2327 2333 # XXX the transaction API is a bit lacking here so we take a hacky
2328 2334 # path for now
2329 2335 #
2330 2336 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2331 2337 # dict is copied before these run. In addition we needs the data
2332 2338 # available to in memory hooks too.
2333 2339 #
2334 2340 # Moreover, we also need to make sure this runs before txnclose
2335 2341 # hooks and there is no "pending" mechanism that would execute
2336 2342 # logic only if hooks are about to run.
2337 2343 #
2338 2344 # Fixing this limitation of the transaction is also needed to track
2339 2345 # other families of changes (bookmarks, phases, obsolescence).
2340 2346 #
2341 2347 # This will have to be fixed before we remove the experimental
2342 2348 # gating.
2343 2349 tracktags(tr2)
2344 2350 repo = reporef()
2345 2351
2346 2352 singleheadopt = (b'experimental', b'single-head-per-branch')
2347 2353 singlehead = repo.ui.configbool(*singleheadopt)
2348 2354 if singlehead:
2349 2355 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2350 2356 accountclosed = singleheadsub.get(
2351 2357 b"account-closed-heads", False
2352 2358 )
2353 2359 if singleheadsub.get(b"public-changes-only", False):
2354 2360 filtername = b"immutable"
2355 2361 else:
2356 2362 filtername = b"visible"
2357 2363 scmutil.enforcesinglehead(
2358 2364 repo, tr2, desc, accountclosed, filtername
2359 2365 )
2360 2366 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2361 2367 for name, (old, new) in sorted(
2362 2368 tr.changes[b'bookmarks'].items()
2363 2369 ):
2364 2370 args = tr.hookargs.copy()
2365 2371 args.update(bookmarks.preparehookargs(name, old, new))
2366 2372 repo.hook(
2367 2373 b'pretxnclose-bookmark',
2368 2374 throw=True,
2369 2375 **pycompat.strkwargs(args)
2370 2376 )
2371 2377 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2372 2378 cl = repo.unfiltered().changelog
2373 2379 for revs, (old, new) in tr.changes[b'phases']:
2374 2380 for rev in revs:
2375 2381 args = tr.hookargs.copy()
2376 2382 node = hex(cl.node(rev))
2377 2383 args.update(phases.preparehookargs(node, old, new))
2378 2384 repo.hook(
2379 2385 b'pretxnclose-phase',
2380 2386 throw=True,
2381 2387 **pycompat.strkwargs(args)
2382 2388 )
2383 2389
2384 2390 repo.hook(
2385 2391 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2386 2392 )
2387 2393
2388 2394 def releasefn(tr, success):
2389 2395 repo = reporef()
2390 2396 if repo is None:
2391 2397 # If the repo has been GC'd (and this release function is being
2392 2398 # called from transaction.__del__), there's not much we can do,
2393 2399 # so just leave the unfinished transaction there and let the
2394 2400 # user run `hg recover`.
2395 2401 return
2396 2402 if success:
2397 2403 # this should be explicitly invoked here, because
2398 2404 # in-memory changes aren't written out at closing
2399 2405 # transaction, if tr.addfilegenerator (via
2400 2406 # dirstate.write or so) isn't invoked while
2401 2407 # transaction running
2402 2408 repo.dirstate.write(None)
2403 2409 else:
2404 2410 # discard all changes (including ones already written
2405 2411 # out) in this transaction
2406 2412 narrowspec.restorebackup(self, b'journal.narrowspec')
2407 2413 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2408 2414 repo.dirstate.restorebackup(None, b'journal.dirstate')
2409 2415
2410 2416 repo.invalidate(clearfilecache=True)
2411 2417
2412 2418 tr = transaction.transaction(
2413 2419 rp,
2414 2420 self.svfs,
2415 2421 vfsmap,
2416 2422 b"journal",
2417 2423 b"undo",
2418 2424 aftertrans(renames),
2419 2425 self.store.createmode,
2420 2426 validator=validate,
2421 2427 releasefn=releasefn,
2422 2428 checkambigfiles=_cachedfiles,
2423 2429 name=desc,
2424 2430 )
2425 2431 tr.changes[b'origrepolen'] = len(self)
2426 2432 tr.changes[b'obsmarkers'] = set()
2427 2433 tr.changes[b'phases'] = []
2428 2434 tr.changes[b'bookmarks'] = {}
2429 2435
2430 2436 tr.hookargs[b'txnid'] = txnid
2431 2437 tr.hookargs[b'txnname'] = desc
2432 2438 tr.hookargs[b'changes'] = tr.changes
2433 2439 # note: writing the fncache only during finalize mean that the file is
2434 2440 # outdated when running hooks. As fncache is used for streaming clone,
2435 2441 # this is not expected to break anything that happen during the hooks.
2436 2442 tr.addfinalize(b'flush-fncache', self.store.write)
2437 2443
2438 2444 def txnclosehook(tr2):
2439 2445 """To be run if transaction is successful, will schedule a hook run"""
2440 2446 # Don't reference tr2 in hook() so we don't hold a reference.
2441 2447 # This reduces memory consumption when there are multiple
2442 2448 # transactions per lock. This can likely go away if issue5045
2443 2449 # fixes the function accumulation.
2444 2450 hookargs = tr2.hookargs
2445 2451
2446 2452 def hookfunc(unused_success):
2447 2453 repo = reporef()
2448 2454 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2449 2455 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2450 2456 for name, (old, new) in bmchanges:
2451 2457 args = tr.hookargs.copy()
2452 2458 args.update(bookmarks.preparehookargs(name, old, new))
2453 2459 repo.hook(
2454 2460 b'txnclose-bookmark',
2455 2461 throw=False,
2456 2462 **pycompat.strkwargs(args)
2457 2463 )
2458 2464
2459 2465 if hook.hashook(repo.ui, b'txnclose-phase'):
2460 2466 cl = repo.unfiltered().changelog
2461 2467 phasemv = sorted(
2462 2468 tr.changes[b'phases'], key=lambda r: r[0][0]
2463 2469 )
2464 2470 for revs, (old, new) in phasemv:
2465 2471 for rev in revs:
2466 2472 args = tr.hookargs.copy()
2467 2473 node = hex(cl.node(rev))
2468 2474 args.update(phases.preparehookargs(node, old, new))
2469 2475 repo.hook(
2470 2476 b'txnclose-phase',
2471 2477 throw=False,
2472 2478 **pycompat.strkwargs(args)
2473 2479 )
2474 2480
2475 2481 repo.hook(
2476 2482 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2477 2483 )
2478 2484
2479 2485 reporef()._afterlock(hookfunc)
2480 2486
2481 2487 tr.addfinalize(b'txnclose-hook', txnclosehook)
2482 2488 # Include a leading "-" to make it happen before the transaction summary
2483 2489 # reports registered via scmutil.registersummarycallback() whose names
2484 2490 # are 00-txnreport etc. That way, the caches will be warm when the
2485 2491 # callbacks run.
2486 2492 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2487 2493
2488 2494 def txnaborthook(tr2):
2489 2495 """To be run if transaction is aborted"""
2490 2496 reporef().hook(
2491 2497 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2492 2498 )
2493 2499
2494 2500 tr.addabort(b'txnabort-hook', txnaborthook)
2495 2501 # avoid eager cache invalidation. in-memory data should be identical
2496 2502 # to stored data if transaction has no error.
2497 2503 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2498 2504 self._transref = weakref.ref(tr)
2499 2505 scmutil.registersummarycallback(self, tr, desc)
2500 2506 return tr
2501 2507
2502 2508 def _journalfiles(self):
2503 2509 return (
2504 2510 (self.svfs, b'journal'),
2505 2511 (self.svfs, b'journal.narrowspec'),
2506 2512 (self.vfs, b'journal.narrowspec.dirstate'),
2507 2513 (self.vfs, b'journal.dirstate'),
2508 2514 (self.vfs, b'journal.branch'),
2509 2515 (self.vfs, b'journal.desc'),
2510 2516 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2511 2517 (self.svfs, b'journal.phaseroots'),
2512 2518 )
2513 2519
2514 2520 def undofiles(self):
2515 2521 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2516 2522
2517 2523 @unfilteredmethod
2518 2524 def _writejournal(self, desc):
2519 2525 self.dirstate.savebackup(None, b'journal.dirstate')
2520 2526 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2521 2527 narrowspec.savebackup(self, b'journal.narrowspec')
2522 2528 self.vfs.write(
2523 2529 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2524 2530 )
2525 2531 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2526 2532 bookmarksvfs = bookmarks.bookmarksvfs(self)
2527 2533 bookmarksvfs.write(
2528 2534 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2529 2535 )
2530 2536 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2531 2537
2532 2538 def recover(self):
2533 2539 with self.lock():
2534 2540 if self.svfs.exists(b"journal"):
2535 2541 self.ui.status(_(b"rolling back interrupted transaction\n"))
2536 2542 vfsmap = {
2537 2543 b'': self.svfs,
2538 2544 b'plain': self.vfs,
2539 2545 }
2540 2546 transaction.rollback(
2541 2547 self.svfs,
2542 2548 vfsmap,
2543 2549 b"journal",
2544 2550 self.ui.warn,
2545 2551 checkambigfiles=_cachedfiles,
2546 2552 )
2547 2553 self.invalidate()
2548 2554 return True
2549 2555 else:
2550 2556 self.ui.warn(_(b"no interrupted transaction available\n"))
2551 2557 return False
2552 2558
2553 2559 def rollback(self, dryrun=False, force=False):
2554 2560 wlock = lock = dsguard = None
2555 2561 try:
2556 2562 wlock = self.wlock()
2557 2563 lock = self.lock()
2558 2564 if self.svfs.exists(b"undo"):
2559 2565 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2560 2566
2561 2567 return self._rollback(dryrun, force, dsguard)
2562 2568 else:
2563 2569 self.ui.warn(_(b"no rollback information available\n"))
2564 2570 return 1
2565 2571 finally:
2566 2572 release(dsguard, lock, wlock)
2567 2573
2568 2574 @unfilteredmethod # Until we get smarter cache management
2569 2575 def _rollback(self, dryrun, force, dsguard):
2570 2576 ui = self.ui
2571 2577 try:
2572 2578 args = self.vfs.read(b'undo.desc').splitlines()
2573 2579 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2574 2580 if len(args) >= 3:
2575 2581 detail = args[2]
2576 2582 oldtip = oldlen - 1
2577 2583
2578 2584 if detail and ui.verbose:
2579 2585 msg = _(
2580 2586 b'repository tip rolled back to revision %d'
2581 2587 b' (undo %s: %s)\n'
2582 2588 ) % (oldtip, desc, detail)
2583 2589 else:
2584 2590 msg = _(
2585 2591 b'repository tip rolled back to revision %d (undo %s)\n'
2586 2592 ) % (oldtip, desc)
2587 2593 except IOError:
2588 2594 msg = _(b'rolling back unknown transaction\n')
2589 2595 desc = None
2590 2596
2591 2597 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2592 2598 raise error.Abort(
2593 2599 _(
2594 2600 b'rollback of last commit while not checked out '
2595 2601 b'may lose data'
2596 2602 ),
2597 2603 hint=_(b'use -f to force'),
2598 2604 )
2599 2605
2600 2606 ui.status(msg)
2601 2607 if dryrun:
2602 2608 return 0
2603 2609
2604 2610 parents = self.dirstate.parents()
2605 2611 self.destroying()
2606 2612 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2607 2613 transaction.rollback(
2608 2614 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2609 2615 )
2610 2616 bookmarksvfs = bookmarks.bookmarksvfs(self)
2611 2617 if bookmarksvfs.exists(b'undo.bookmarks'):
2612 2618 bookmarksvfs.rename(
2613 2619 b'undo.bookmarks', b'bookmarks', checkambig=True
2614 2620 )
2615 2621 if self.svfs.exists(b'undo.phaseroots'):
2616 2622 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2617 2623 self.invalidate()
2618 2624
2619 2625 has_node = self.changelog.index.has_node
2620 2626 parentgone = any(not has_node(p) for p in parents)
2621 2627 if parentgone:
2622 2628 # prevent dirstateguard from overwriting already restored one
2623 2629 dsguard.close()
2624 2630
2625 2631 narrowspec.restorebackup(self, b'undo.narrowspec')
2626 2632 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2627 2633 self.dirstate.restorebackup(None, b'undo.dirstate')
2628 2634 try:
2629 2635 branch = self.vfs.read(b'undo.branch')
2630 2636 self.dirstate.setbranch(encoding.tolocal(branch))
2631 2637 except IOError:
2632 2638 ui.warn(
2633 2639 _(
2634 2640 b'named branch could not be reset: '
2635 2641 b'current branch is still \'%s\'\n'
2636 2642 )
2637 2643 % self.dirstate.branch()
2638 2644 )
2639 2645
2640 2646 parents = tuple([p.rev() for p in self[None].parents()])
2641 2647 if len(parents) > 1:
2642 2648 ui.status(
2643 2649 _(
2644 2650 b'working directory now based on '
2645 2651 b'revisions %d and %d\n'
2646 2652 )
2647 2653 % parents
2648 2654 )
2649 2655 else:
2650 2656 ui.status(
2651 2657 _(b'working directory now based on revision %d\n') % parents
2652 2658 )
2653 2659 mergestatemod.mergestate.clean(self)
2654 2660
2655 2661 # TODO: if we know which new heads may result from this rollback, pass
2656 2662 # them to destroy(), which will prevent the branchhead cache from being
2657 2663 # invalidated.
2658 2664 self.destroyed()
2659 2665 return 0
2660 2666
2661 2667 def _buildcacheupdater(self, newtransaction):
2662 2668 """called during transaction to build the callback updating cache
2663 2669
2664 2670 Lives on the repository to help extension who might want to augment
2665 2671 this logic. For this purpose, the created transaction is passed to the
2666 2672 method.
2667 2673 """
2668 2674 # we must avoid cyclic reference between repo and transaction.
2669 2675 reporef = weakref.ref(self)
2670 2676
2671 2677 def updater(tr):
2672 2678 repo = reporef()
2673 2679 repo.updatecaches(tr)
2674 2680
2675 2681 return updater
2676 2682
2677 2683 @unfilteredmethod
2678 2684 def updatecaches(self, tr=None, full=False):
2679 2685 """warm appropriate caches
2680 2686
2681 2687 If this function is called after a transaction closed. The transaction
2682 2688 will be available in the 'tr' argument. This can be used to selectively
2683 2689 update caches relevant to the changes in that transaction.
2684 2690
2685 2691 If 'full' is set, make sure all caches the function knows about have
2686 2692 up-to-date data. Even the ones usually loaded more lazily.
2687 2693 """
2688 2694 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2689 2695 # During strip, many caches are invalid but
2690 2696 # later call to `destroyed` will refresh them.
2691 2697 return
2692 2698
2693 2699 if tr is None or tr.changes[b'origrepolen'] < len(self):
2694 2700 # accessing the 'served' branchmap should refresh all the others,
2695 2701 self.ui.debug(b'updating the branch cache\n')
2696 2702 self.filtered(b'served').branchmap()
2697 2703 self.filtered(b'served.hidden').branchmap()
2698 2704
2699 2705 if full:
2700 2706 unfi = self.unfiltered()
2701 2707
2702 2708 self.changelog.update_caches(transaction=tr)
2703 2709 self.manifestlog.update_caches(transaction=tr)
2704 2710
2705 2711 rbc = unfi.revbranchcache()
2706 2712 for r in unfi.changelog:
2707 2713 rbc.branchinfo(r)
2708 2714 rbc.write()
2709 2715
2710 2716 # ensure the working copy parents are in the manifestfulltextcache
2711 2717 for ctx in self[b'.'].parents():
2712 2718 ctx.manifest() # accessing the manifest is enough
2713 2719
2714 2720 # accessing fnode cache warms the cache
2715 2721 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2716 2722 # accessing tags warm the cache
2717 2723 self.tags()
2718 2724 self.filtered(b'served').tags()
2719 2725
2720 2726 # The `full` arg is documented as updating even the lazily-loaded
2721 2727 # caches immediately, so we're forcing a write to cause these caches
2722 2728 # to be warmed up even if they haven't explicitly been requested
2723 2729 # yet (if they've never been used by hg, they won't ever have been
2724 2730 # written, even if they're a subset of another kind of cache that
2725 2731 # *has* been used).
2726 2732 for filt in repoview.filtertable.keys():
2727 2733 filtered = self.filtered(filt)
2728 2734 filtered.branchmap().write(filtered)
2729 2735
2730 2736 def invalidatecaches(self):
2731 2737
2732 2738 if '_tagscache' in vars(self):
2733 2739 # can't use delattr on proxy
2734 2740 del self.__dict__['_tagscache']
2735 2741
2736 2742 self._branchcaches.clear()
2737 2743 self.invalidatevolatilesets()
2738 2744 self._sparsesignaturecache.clear()
2739 2745
2740 2746 def invalidatevolatilesets(self):
2741 2747 self.filteredrevcache.clear()
2742 2748 obsolete.clearobscaches(self)
2743 2749 self._quick_access_changeid_invalidate()
2744 2750
2745 2751 def invalidatedirstate(self):
2746 2752 """Invalidates the dirstate, causing the next call to dirstate
2747 2753 to check if it was modified since the last time it was read,
2748 2754 rereading it if it has.
2749 2755
2750 2756 This is different to dirstate.invalidate() that it doesn't always
2751 2757 rereads the dirstate. Use dirstate.invalidate() if you want to
2752 2758 explicitly read the dirstate again (i.e. restoring it to a previous
2753 2759 known good state)."""
2754 2760 if hasunfilteredcache(self, 'dirstate'):
2755 2761 for k in self.dirstate._filecache:
2756 2762 try:
2757 2763 delattr(self.dirstate, k)
2758 2764 except AttributeError:
2759 2765 pass
2760 2766 delattr(self.unfiltered(), 'dirstate')
2761 2767
2762 2768 def invalidate(self, clearfilecache=False):
2763 2769 """Invalidates both store and non-store parts other than dirstate
2764 2770
2765 2771 If a transaction is running, invalidation of store is omitted,
2766 2772 because discarding in-memory changes might cause inconsistency
2767 2773 (e.g. incomplete fncache causes unintentional failure, but
2768 2774 redundant one doesn't).
2769 2775 """
2770 2776 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2771 2777 for k in list(self._filecache.keys()):
2772 2778 # dirstate is invalidated separately in invalidatedirstate()
2773 2779 if k == b'dirstate':
2774 2780 continue
2775 2781 if (
2776 2782 k == b'changelog'
2777 2783 and self.currenttransaction()
2778 2784 and self.changelog._delayed
2779 2785 ):
2780 2786 # The changelog object may store unwritten revisions. We don't
2781 2787 # want to lose them.
2782 2788 # TODO: Solve the problem instead of working around it.
2783 2789 continue
2784 2790
2785 2791 if clearfilecache:
2786 2792 del self._filecache[k]
2787 2793 try:
2788 2794 delattr(unfiltered, k)
2789 2795 except AttributeError:
2790 2796 pass
2791 2797 self.invalidatecaches()
2792 2798 if not self.currenttransaction():
2793 2799 # TODO: Changing contents of store outside transaction
2794 2800 # causes inconsistency. We should make in-memory store
2795 2801 # changes detectable, and abort if changed.
2796 2802 self.store.invalidatecaches()
2797 2803
2798 2804 def invalidateall(self):
2799 2805 """Fully invalidates both store and non-store parts, causing the
2800 2806 subsequent operation to reread any outside changes."""
2801 2807 # extension should hook this to invalidate its caches
2802 2808 self.invalidate()
2803 2809 self.invalidatedirstate()
2804 2810
2805 2811 @unfilteredmethod
2806 2812 def _refreshfilecachestats(self, tr):
2807 2813 """Reload stats of cached files so that they are flagged as valid"""
2808 2814 for k, ce in self._filecache.items():
2809 2815 k = pycompat.sysstr(k)
2810 2816 if k == 'dirstate' or k not in self.__dict__:
2811 2817 continue
2812 2818 ce.refresh()
2813 2819
2814 2820 def _lock(
2815 2821 self,
2816 2822 vfs,
2817 2823 lockname,
2818 2824 wait,
2819 2825 releasefn,
2820 2826 acquirefn,
2821 2827 desc,
2822 2828 ):
2823 2829 timeout = 0
2824 2830 warntimeout = 0
2825 2831 if wait:
2826 2832 timeout = self.ui.configint(b"ui", b"timeout")
2827 2833 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2828 2834 # internal config: ui.signal-safe-lock
2829 2835 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2830 2836
2831 2837 l = lockmod.trylock(
2832 2838 self.ui,
2833 2839 vfs,
2834 2840 lockname,
2835 2841 timeout,
2836 2842 warntimeout,
2837 2843 releasefn=releasefn,
2838 2844 acquirefn=acquirefn,
2839 2845 desc=desc,
2840 2846 signalsafe=signalsafe,
2841 2847 )
2842 2848 return l
2843 2849
2844 2850 def _afterlock(self, callback):
2845 2851 """add a callback to be run when the repository is fully unlocked
2846 2852
2847 2853 The callback will be executed when the outermost lock is released
2848 2854 (with wlock being higher level than 'lock')."""
2849 2855 for ref in (self._wlockref, self._lockref):
2850 2856 l = ref and ref()
2851 2857 if l and l.held:
2852 2858 l.postrelease.append(callback)
2853 2859 break
2854 2860 else: # no lock have been found.
2855 2861 callback(True)
2856 2862
2857 2863 def lock(self, wait=True):
2858 2864 """Lock the repository store (.hg/store) and return a weak reference
2859 2865 to the lock. Use this before modifying the store (e.g. committing or
2860 2866 stripping). If you are opening a transaction, get a lock as well.)
2861 2867
2862 2868 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2863 2869 'wlock' first to avoid a dead-lock hazard."""
2864 2870 l = self._currentlock(self._lockref)
2865 2871 if l is not None:
2866 2872 l.lock()
2867 2873 return l
2868 2874
2869 2875 l = self._lock(
2870 2876 vfs=self.svfs,
2871 2877 lockname=b"lock",
2872 2878 wait=wait,
2873 2879 releasefn=None,
2874 2880 acquirefn=self.invalidate,
2875 2881 desc=_(b'repository %s') % self.origroot,
2876 2882 )
2877 2883 self._lockref = weakref.ref(l)
2878 2884 return l
2879 2885
2880 2886 def wlock(self, wait=True):
2881 2887 """Lock the non-store parts of the repository (everything under
2882 2888 .hg except .hg/store) and return a weak reference to the lock.
2883 2889
2884 2890 Use this before modifying files in .hg.
2885 2891
2886 2892 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2887 2893 'wlock' first to avoid a dead-lock hazard."""
2888 2894 l = self._wlockref and self._wlockref()
2889 2895 if l is not None and l.held:
2890 2896 l.lock()
2891 2897 return l
2892 2898
2893 2899 # We do not need to check for non-waiting lock acquisition. Such
2894 2900 # acquisition would not cause dead-lock as they would just fail.
2895 2901 if wait and (
2896 2902 self.ui.configbool(b'devel', b'all-warnings')
2897 2903 or self.ui.configbool(b'devel', b'check-locks')
2898 2904 ):
2899 2905 if self._currentlock(self._lockref) is not None:
2900 2906 self.ui.develwarn(b'"wlock" acquired after "lock"')
2901 2907
2902 2908 def unlock():
2903 2909 if self.dirstate.pendingparentchange():
2904 2910 self.dirstate.invalidate()
2905 2911 else:
2906 2912 self.dirstate.write(None)
2907 2913
2908 2914 self._filecache[b'dirstate'].refresh()
2909 2915
2910 2916 l = self._lock(
2911 2917 self.vfs,
2912 2918 b"wlock",
2913 2919 wait,
2914 2920 unlock,
2915 2921 self.invalidatedirstate,
2916 2922 _(b'working directory of %s') % self.origroot,
2917 2923 )
2918 2924 self._wlockref = weakref.ref(l)
2919 2925 return l
2920 2926
2921 2927 def _currentlock(self, lockref):
2922 2928 """Returns the lock if it's held, or None if it's not."""
2923 2929 if lockref is None:
2924 2930 return None
2925 2931 l = lockref()
2926 2932 if l is None or not l.held:
2927 2933 return None
2928 2934 return l
2929 2935
2930 2936 def currentwlock(self):
2931 2937 """Returns the wlock if it's held, or None if it's not."""
2932 2938 return self._currentlock(self._wlockref)
2933 2939
2934 2940 def checkcommitpatterns(self, wctx, match, status, fail):
2935 2941 """check for commit arguments that aren't committable"""
2936 2942 if match.isexact() or match.prefix():
2937 2943 matched = set(status.modified + status.added + status.removed)
2938 2944
2939 2945 for f in match.files():
2940 2946 f = self.dirstate.normalize(f)
2941 2947 if f == b'.' or f in matched or f in wctx.substate:
2942 2948 continue
2943 2949 if f in status.deleted:
2944 2950 fail(f, _(b'file not found!'))
2945 2951 # Is it a directory that exists or used to exist?
2946 2952 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2947 2953 d = f + b'/'
2948 2954 for mf in matched:
2949 2955 if mf.startswith(d):
2950 2956 break
2951 2957 else:
2952 2958 fail(f, _(b"no match under directory!"))
2953 2959 elif f not in self.dirstate:
2954 2960 fail(f, _(b"file not tracked!"))
2955 2961
2956 2962 @unfilteredmethod
2957 2963 def commit(
2958 2964 self,
2959 2965 text=b"",
2960 2966 user=None,
2961 2967 date=None,
2962 2968 match=None,
2963 2969 force=False,
2964 2970 editor=None,
2965 2971 extra=None,
2966 2972 ):
2967 2973 """Add a new revision to current repository.
2968 2974
2969 2975 Revision information is gathered from the working directory,
2970 2976 match can be used to filter the committed files. If editor is
2971 2977 supplied, it is called to get a commit message.
2972 2978 """
2973 2979 if extra is None:
2974 2980 extra = {}
2975 2981
2976 2982 def fail(f, msg):
2977 2983 raise error.InputError(b'%s: %s' % (f, msg))
2978 2984
2979 2985 if not match:
2980 2986 match = matchmod.always()
2981 2987
2982 2988 if not force:
2983 2989 match.bad = fail
2984 2990
2985 2991 # lock() for recent changelog (see issue4368)
2986 2992 with self.wlock(), self.lock():
2987 2993 wctx = self[None]
2988 2994 merge = len(wctx.parents()) > 1
2989 2995
2990 2996 if not force and merge and not match.always():
2991 2997 raise error.Abort(
2992 2998 _(
2993 2999 b'cannot partially commit a merge '
2994 3000 b'(do not specify files or patterns)'
2995 3001 )
2996 3002 )
2997 3003
2998 3004 status = self.status(match=match, clean=force)
2999 3005 if force:
3000 3006 status.modified.extend(
3001 3007 status.clean
3002 3008 ) # mq may commit clean files
3003 3009
3004 3010 # check subrepos
3005 3011 subs, commitsubs, newstate = subrepoutil.precommit(
3006 3012 self.ui, wctx, status, match, force=force
3007 3013 )
3008 3014
3009 3015 # make sure all explicit patterns are matched
3010 3016 if not force:
3011 3017 self.checkcommitpatterns(wctx, match, status, fail)
3012 3018
3013 3019 cctx = context.workingcommitctx(
3014 3020 self, status, text, user, date, extra
3015 3021 )
3016 3022
3017 3023 ms = mergestatemod.mergestate.read(self)
3018 3024 mergeutil.checkunresolved(ms)
3019 3025
3020 3026 # internal config: ui.allowemptycommit
3021 3027 if cctx.isempty() and not self.ui.configbool(
3022 3028 b'ui', b'allowemptycommit'
3023 3029 ):
3024 3030 self.ui.debug(b'nothing to commit, clearing merge state\n')
3025 3031 ms.reset()
3026 3032 return None
3027 3033
3028 3034 if merge and cctx.deleted():
3029 3035 raise error.Abort(_(b"cannot commit merge with missing files"))
3030 3036
3031 3037 if editor:
3032 3038 cctx._text = editor(self, cctx, subs)
3033 3039 edited = text != cctx._text
3034 3040
3035 3041 # Save commit message in case this transaction gets rolled back
3036 3042 # (e.g. by a pretxncommit hook). Leave the content alone on
3037 3043 # the assumption that the user will use the same editor again.
3038 3044 msgfn = self.savecommitmessage(cctx._text)
3039 3045
3040 3046 # commit subs and write new state
3041 3047 if subs:
3042 3048 uipathfn = scmutil.getuipathfn(self)
3043 3049 for s in sorted(commitsubs):
3044 3050 sub = wctx.sub(s)
3045 3051 self.ui.status(
3046 3052 _(b'committing subrepository %s\n')
3047 3053 % uipathfn(subrepoutil.subrelpath(sub))
3048 3054 )
3049 3055 sr = sub.commit(cctx._text, user, date)
3050 3056 newstate[s] = (newstate[s][0], sr)
3051 3057 subrepoutil.writestate(self, newstate)
3052 3058
3053 3059 p1, p2 = self.dirstate.parents()
3054 3060 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3055 3061 try:
3056 3062 self.hook(
3057 3063 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3058 3064 )
3059 3065 with self.transaction(b'commit'):
3060 3066 ret = self.commitctx(cctx, True)
3061 3067 # update bookmarks, dirstate and mergestate
3062 3068 bookmarks.update(self, [p1, p2], ret)
3063 3069 cctx.markcommitted(ret)
3064 3070 ms.reset()
3065 3071 except: # re-raises
3066 3072 if edited:
3067 3073 self.ui.write(
3068 3074 _(b'note: commit message saved in %s\n') % msgfn
3069 3075 )
3070 3076 self.ui.write(
3071 3077 _(
3072 3078 b"note: use 'hg commit --logfile "
3073 3079 b".hg/last-message.txt --edit' to reuse it\n"
3074 3080 )
3075 3081 )
3076 3082 raise
3077 3083
3078 3084 def commithook(unused_success):
3079 3085 # hack for command that use a temporary commit (eg: histedit)
3080 3086 # temporary commit got stripped before hook release
3081 3087 if self.changelog.hasnode(ret):
3082 3088 self.hook(
3083 3089 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3084 3090 )
3085 3091
3086 3092 self._afterlock(commithook)
3087 3093 return ret
3088 3094
3089 3095 @unfilteredmethod
3090 3096 def commitctx(self, ctx, error=False, origctx=None):
3091 3097 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3092 3098
3093 3099 @unfilteredmethod
3094 3100 def destroying(self):
3095 3101 """Inform the repository that nodes are about to be destroyed.
3096 3102 Intended for use by strip and rollback, so there's a common
3097 3103 place for anything that has to be done before destroying history.
3098 3104
3099 3105 This is mostly useful for saving state that is in memory and waiting
3100 3106 to be flushed when the current lock is released. Because a call to
3101 3107 destroyed is imminent, the repo will be invalidated causing those
3102 3108 changes to stay in memory (waiting for the next unlock), or vanish
3103 3109 completely.
3104 3110 """
3105 3111 # When using the same lock to commit and strip, the phasecache is left
3106 3112 # dirty after committing. Then when we strip, the repo is invalidated,
3107 3113 # causing those changes to disappear.
3108 3114 if '_phasecache' in vars(self):
3109 3115 self._phasecache.write()
3110 3116
3111 3117 @unfilteredmethod
3112 3118 def destroyed(self):
3113 3119 """Inform the repository that nodes have been destroyed.
3114 3120 Intended for use by strip and rollback, so there's a common
3115 3121 place for anything that has to be done after destroying history.
3116 3122 """
3117 3123 # When one tries to:
3118 3124 # 1) destroy nodes thus calling this method (e.g. strip)
3119 3125 # 2) use phasecache somewhere (e.g. commit)
3120 3126 #
3121 3127 # then 2) will fail because the phasecache contains nodes that were
3122 3128 # removed. We can either remove phasecache from the filecache,
3123 3129 # causing it to reload next time it is accessed, or simply filter
3124 3130 # the removed nodes now and write the updated cache.
3125 3131 self._phasecache.filterunknown(self)
3126 3132 self._phasecache.write()
3127 3133
3128 3134 # refresh all repository caches
3129 3135 self.updatecaches()
3130 3136
3131 3137 # Ensure the persistent tag cache is updated. Doing it now
3132 3138 # means that the tag cache only has to worry about destroyed
3133 3139 # heads immediately after a strip/rollback. That in turn
3134 3140 # guarantees that "cachetip == currenttip" (comparing both rev
3135 3141 # and node) always means no nodes have been added or destroyed.
3136 3142
3137 3143 # XXX this is suboptimal when qrefresh'ing: we strip the current
3138 3144 # head, refresh the tag cache, then immediately add a new head.
3139 3145 # But I think doing it this way is necessary for the "instant
3140 3146 # tag cache retrieval" case to work.
3141 3147 self.invalidate()
3142 3148
3143 3149 def status(
3144 3150 self,
3145 3151 node1=b'.',
3146 3152 node2=None,
3147 3153 match=None,
3148 3154 ignored=False,
3149 3155 clean=False,
3150 3156 unknown=False,
3151 3157 listsubrepos=False,
3152 3158 ):
3153 3159 '''a convenience method that calls node1.status(node2)'''
3154 3160 return self[node1].status(
3155 3161 node2, match, ignored, clean, unknown, listsubrepos
3156 3162 )
3157 3163
3158 3164 def addpostdsstatus(self, ps):
3159 3165 """Add a callback to run within the wlock, at the point at which status
3160 3166 fixups happen.
3161 3167
3162 3168 On status completion, callback(wctx, status) will be called with the
3163 3169 wlock held, unless the dirstate has changed from underneath or the wlock
3164 3170 couldn't be grabbed.
3165 3171
3166 3172 Callbacks should not capture and use a cached copy of the dirstate --
3167 3173 it might change in the meanwhile. Instead, they should access the
3168 3174 dirstate via wctx.repo().dirstate.
3169 3175
3170 3176 This list is emptied out after each status run -- extensions should
3171 3177 make sure it adds to this list each time dirstate.status is called.
3172 3178 Extensions should also make sure they don't call this for statuses
3173 3179 that don't involve the dirstate.
3174 3180 """
3175 3181
3176 3182 # The list is located here for uniqueness reasons -- it is actually
3177 3183 # managed by the workingctx, but that isn't unique per-repo.
3178 3184 self._postdsstatus.append(ps)
3179 3185
3180 3186 def postdsstatus(self):
3181 3187 """Used by workingctx to get the list of post-dirstate-status hooks."""
3182 3188 return self._postdsstatus
3183 3189
3184 3190 def clearpostdsstatus(self):
3185 3191 """Used by workingctx to clear post-dirstate-status hooks."""
3186 3192 del self._postdsstatus[:]
3187 3193
3188 3194 def heads(self, start=None):
3189 3195 if start is None:
3190 3196 cl = self.changelog
3191 3197 headrevs = reversed(cl.headrevs())
3192 3198 return [cl.node(rev) for rev in headrevs]
3193 3199
3194 3200 heads = self.changelog.heads(start)
3195 3201 # sort the output in rev descending order
3196 3202 return sorted(heads, key=self.changelog.rev, reverse=True)
3197 3203
3198 3204 def branchheads(self, branch=None, start=None, closed=False):
3199 3205 """return a (possibly filtered) list of heads for the given branch
3200 3206
3201 3207 Heads are returned in topological order, from newest to oldest.
3202 3208 If branch is None, use the dirstate branch.
3203 3209 If start is not None, return only heads reachable from start.
3204 3210 If closed is True, return heads that are marked as closed as well.
3205 3211 """
3206 3212 if branch is None:
3207 3213 branch = self[None].branch()
3208 3214 branches = self.branchmap()
3209 3215 if not branches.hasbranch(branch):
3210 3216 return []
3211 3217 # the cache returns heads ordered lowest to highest
3212 3218 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3213 3219 if start is not None:
3214 3220 # filter out the heads that cannot be reached from startrev
3215 3221 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3216 3222 bheads = [h for h in bheads if h in fbheads]
3217 3223 return bheads
3218 3224
3219 3225 def branches(self, nodes):
3220 3226 if not nodes:
3221 3227 nodes = [self.changelog.tip()]
3222 3228 b = []
3223 3229 for n in nodes:
3224 3230 t = n
3225 3231 while True:
3226 3232 p = self.changelog.parents(n)
3227 3233 if p[1] != nullid or p[0] == nullid:
3228 3234 b.append((t, n, p[0], p[1]))
3229 3235 break
3230 3236 n = p[0]
3231 3237 return b
3232 3238
3233 3239 def between(self, pairs):
3234 3240 r = []
3235 3241
3236 3242 for top, bottom in pairs:
3237 3243 n, l, i = top, [], 0
3238 3244 f = 1
3239 3245
3240 3246 while n != bottom and n != nullid:
3241 3247 p = self.changelog.parents(n)[0]
3242 3248 if i == f:
3243 3249 l.append(n)
3244 3250 f = f * 2
3245 3251 n = p
3246 3252 i += 1
3247 3253
3248 3254 r.append(l)
3249 3255
3250 3256 return r
3251 3257
3252 3258 def checkpush(self, pushop):
3253 3259 """Extensions can override this function if additional checks have
3254 3260 to be performed before pushing, or call it if they override push
3255 3261 command.
3256 3262 """
3257 3263
3258 3264 @unfilteredpropertycache
3259 3265 def prepushoutgoinghooks(self):
3260 3266 """Return util.hooks consists of a pushop with repo, remote, outgoing
3261 3267 methods, which are called before pushing changesets.
3262 3268 """
3263 3269 return util.hooks()
3264 3270
3265 3271 def pushkey(self, namespace, key, old, new):
3266 3272 try:
3267 3273 tr = self.currenttransaction()
3268 3274 hookargs = {}
3269 3275 if tr is not None:
3270 3276 hookargs.update(tr.hookargs)
3271 3277 hookargs = pycompat.strkwargs(hookargs)
3272 3278 hookargs['namespace'] = namespace
3273 3279 hookargs['key'] = key
3274 3280 hookargs['old'] = old
3275 3281 hookargs['new'] = new
3276 3282 self.hook(b'prepushkey', throw=True, **hookargs)
3277 3283 except error.HookAbort as exc:
3278 3284 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3279 3285 if exc.hint:
3280 3286 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3281 3287 return False
3282 3288 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3283 3289 ret = pushkey.push(self, namespace, key, old, new)
3284 3290
3285 3291 def runhook(unused_success):
3286 3292 self.hook(
3287 3293 b'pushkey',
3288 3294 namespace=namespace,
3289 3295 key=key,
3290 3296 old=old,
3291 3297 new=new,
3292 3298 ret=ret,
3293 3299 )
3294 3300
3295 3301 self._afterlock(runhook)
3296 3302 return ret
3297 3303
3298 3304 def listkeys(self, namespace):
3299 3305 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3300 3306 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3301 3307 values = pushkey.list(self, namespace)
3302 3308 self.hook(b'listkeys', namespace=namespace, values=values)
3303 3309 return values
3304 3310
3305 3311 def debugwireargs(self, one, two, three=None, four=None, five=None):
3306 3312 '''used to test argument passing over the wire'''
3307 3313 return b"%s %s %s %s %s" % (
3308 3314 one,
3309 3315 two,
3310 3316 pycompat.bytestr(three),
3311 3317 pycompat.bytestr(four),
3312 3318 pycompat.bytestr(five),
3313 3319 )
3314 3320
3315 3321 def savecommitmessage(self, text):
3316 3322 fp = self.vfs(b'last-message.txt', b'wb')
3317 3323 try:
3318 3324 fp.write(text)
3319 3325 finally:
3320 3326 fp.close()
3321 3327 return self.pathto(fp.name[len(self.root) + 1 :])
3322 3328
3323 3329
3324 3330 # used to avoid circular references so destructors work
3325 3331 def aftertrans(files):
3326 3332 renamefiles = [tuple(t) for t in files]
3327 3333
3328 3334 def a():
3329 3335 for vfs, src, dest in renamefiles:
3330 3336 # if src and dest refer to a same file, vfs.rename is a no-op,
3331 3337 # leaving both src and dest on disk. delete dest to make sure
3332 3338 # the rename couldn't be such a no-op.
3333 3339 vfs.tryunlink(dest)
3334 3340 try:
3335 3341 vfs.rename(src, dest)
3336 3342 except OSError: # journal file does not yet exist
3337 3343 pass
3338 3344
3339 3345 return a
3340 3346
3341 3347
3342 3348 def undoname(fn):
3343 3349 base, name = os.path.split(fn)
3344 3350 assert name.startswith(b'journal')
3345 3351 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3346 3352
3347 3353
3348 3354 def instance(ui, path, create, intents=None, createopts=None):
3349 3355 localpath = util.urllocalpath(path)
3350 3356 if create:
3351 3357 createrepository(ui, localpath, createopts=createopts)
3352 3358
3353 3359 return makelocalrepository(ui, localpath, intents=intents)
3354 3360
3355 3361
3356 3362 def islocal(path):
3357 3363 return True
3358 3364
3359 3365
3360 3366 def defaultcreateopts(ui, createopts=None):
3361 3367 """Populate the default creation options for a repository.
3362 3368
3363 3369 A dictionary of explicitly requested creation options can be passed
3364 3370 in. Missing keys will be populated.
3365 3371 """
3366 3372 createopts = dict(createopts or {})
3367 3373
3368 3374 if b'backend' not in createopts:
3369 3375 # experimental config: storage.new-repo-backend
3370 3376 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3371 3377
3372 3378 return createopts
3373 3379
3374 3380
3375 3381 def newreporequirements(ui, createopts):
3376 3382 """Determine the set of requirements for a new local repository.
3377 3383
3378 3384 Extensions can wrap this function to specify custom requirements for
3379 3385 new repositories.
3380 3386 """
3381 3387 # If the repo is being created from a shared repository, we copy
3382 3388 # its requirements.
3383 3389 if b'sharedrepo' in createopts:
3384 3390 requirements = set(createopts[b'sharedrepo'].requirements)
3385 3391 if createopts.get(b'sharedrelative'):
3386 3392 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3387 3393 else:
3388 3394 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3389 3395
3390 3396 return requirements
3391 3397
3392 3398 if b'backend' not in createopts:
3393 3399 raise error.ProgrammingError(
3394 3400 b'backend key not present in createopts; '
3395 3401 b'was defaultcreateopts() called?'
3396 3402 )
3397 3403
3398 3404 if createopts[b'backend'] != b'revlogv1':
3399 3405 raise error.Abort(
3400 3406 _(
3401 3407 b'unable to determine repository requirements for '
3402 3408 b'storage backend: %s'
3403 3409 )
3404 3410 % createopts[b'backend']
3405 3411 )
3406 3412
3407 3413 requirements = {b'revlogv1'}
3408 3414 if ui.configbool(b'format', b'usestore'):
3409 3415 requirements.add(b'store')
3410 3416 if ui.configbool(b'format', b'usefncache'):
3411 3417 requirements.add(b'fncache')
3412 3418 if ui.configbool(b'format', b'dotencode'):
3413 3419 requirements.add(b'dotencode')
3414 3420
3415 3421 compengines = ui.configlist(b'format', b'revlog-compression')
3416 3422 for compengine in compengines:
3417 3423 if compengine in util.compengines:
3418 3424 break
3419 3425 else:
3420 3426 raise error.Abort(
3421 3427 _(
3422 3428 b'compression engines %s defined by '
3423 3429 b'format.revlog-compression not available'
3424 3430 )
3425 3431 % b', '.join(b'"%s"' % e for e in compengines),
3426 3432 hint=_(
3427 3433 b'run "hg debuginstall" to list available '
3428 3434 b'compression engines'
3429 3435 ),
3430 3436 )
3431 3437
3432 3438 # zlib is the historical default and doesn't need an explicit requirement.
3433 3439 if compengine == b'zstd':
3434 3440 requirements.add(b'revlog-compression-zstd')
3435 3441 elif compengine != b'zlib':
3436 3442 requirements.add(b'exp-compression-%s' % compengine)
3437 3443
3438 3444 if scmutil.gdinitconfig(ui):
3439 3445 requirements.add(b'generaldelta')
3440 3446 if ui.configbool(b'format', b'sparse-revlog'):
3441 3447 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3442 3448
3443 3449 # experimental config: format.exp-use-side-data
3444 3450 if ui.configbool(b'format', b'exp-use-side-data'):
3445 3451 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3446 3452 # experimental config: format.exp-use-copies-side-data-changeset
3447 3453 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3448 3454 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3449 3455 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3450 3456 if ui.configbool(b'experimental', b'treemanifest'):
3451 3457 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3452 3458
3453 3459 revlogv2 = ui.config(b'experimental', b'revlogv2')
3454 3460 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3455 3461 requirements.remove(b'revlogv1')
3456 3462 # generaldelta is implied by revlogv2.
3457 3463 requirements.discard(b'generaldelta')
3458 3464 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3459 3465 # experimental config: format.internal-phase
3460 3466 if ui.configbool(b'format', b'internal-phase'):
3461 3467 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3462 3468
3463 3469 if createopts.get(b'narrowfiles'):
3464 3470 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3465 3471
3466 3472 if createopts.get(b'lfs'):
3467 3473 requirements.add(b'lfs')
3468 3474
3469 3475 if ui.configbool(b'format', b'bookmarks-in-store'):
3470 3476 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3471 3477
3472 3478 if ui.configbool(b'format', b'use-persistent-nodemap'):
3473 3479 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3474 3480
3475 3481 # if share-safe is enabled, let's create the new repository with the new
3476 3482 # requirement
3477 3483 if ui.configbool(b'format', b'use-share-safe'):
3478 3484 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3479 3485
3480 3486 return requirements
3481 3487
3482 3488
3483 3489 def checkrequirementscompat(ui, requirements):
3484 3490 """Checks compatibility of repository requirements enabled and disabled.
3485 3491
3486 3492 Returns a set of requirements which needs to be dropped because dependend
3487 3493 requirements are not enabled. Also warns users about it"""
3488 3494
3489 3495 dropped = set()
3490 3496
3491 3497 if b'store' not in requirements:
3492 3498 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3493 3499 ui.warn(
3494 3500 _(
3495 3501 b'ignoring enabled \'format.bookmarks-in-store\' config '
3496 3502 b'beacuse it is incompatible with disabled '
3497 3503 b'\'format.usestore\' config\n'
3498 3504 )
3499 3505 )
3500 3506 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3501 3507
3502 3508 if (
3503 3509 requirementsmod.SHARED_REQUIREMENT in requirements
3504 3510 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3505 3511 ):
3506 3512 raise error.Abort(
3507 3513 _(
3508 3514 b"cannot create shared repository as source was created"
3509 3515 b" with 'format.usestore' config disabled"
3510 3516 )
3511 3517 )
3512 3518
3513 3519 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3514 3520 ui.warn(
3515 3521 _(
3516 3522 b"ignoring enabled 'format.use-share-safe' config because "
3517 3523 b"it is incompatible with disabled 'format.usestore'"
3518 3524 b" config\n"
3519 3525 )
3520 3526 )
3521 3527 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3522 3528
3523 3529 return dropped
3524 3530
3525 3531
3526 3532 def filterknowncreateopts(ui, createopts):
3527 3533 """Filters a dict of repo creation options against options that are known.
3528 3534
3529 3535 Receives a dict of repo creation options and returns a dict of those
3530 3536 options that we don't know how to handle.
3531 3537
3532 3538 This function is called as part of repository creation. If the
3533 3539 returned dict contains any items, repository creation will not
3534 3540 be allowed, as it means there was a request to create a repository
3535 3541 with options not recognized by loaded code.
3536 3542
3537 3543 Extensions can wrap this function to filter out creation options
3538 3544 they know how to handle.
3539 3545 """
3540 3546 known = {
3541 3547 b'backend',
3542 3548 b'lfs',
3543 3549 b'narrowfiles',
3544 3550 b'sharedrepo',
3545 3551 b'sharedrelative',
3546 3552 b'shareditems',
3547 3553 b'shallowfilestore',
3548 3554 }
3549 3555
3550 3556 return {k: v for k, v in createopts.items() if k not in known}
3551 3557
3552 3558
3553 3559 def createrepository(ui, path, createopts=None):
3554 3560 """Create a new repository in a vfs.
3555 3561
3556 3562 ``path`` path to the new repo's working directory.
3557 3563 ``createopts`` options for the new repository.
3558 3564
3559 3565 The following keys for ``createopts`` are recognized:
3560 3566
3561 3567 backend
3562 3568 The storage backend to use.
3563 3569 lfs
3564 3570 Repository will be created with ``lfs`` requirement. The lfs extension
3565 3571 will automatically be loaded when the repository is accessed.
3566 3572 narrowfiles
3567 3573 Set up repository to support narrow file storage.
3568 3574 sharedrepo
3569 3575 Repository object from which storage should be shared.
3570 3576 sharedrelative
3571 3577 Boolean indicating if the path to the shared repo should be
3572 3578 stored as relative. By default, the pointer to the "parent" repo
3573 3579 is stored as an absolute path.
3574 3580 shareditems
3575 3581 Set of items to share to the new repository (in addition to storage).
3576 3582 shallowfilestore
3577 3583 Indicates that storage for files should be shallow (not all ancestor
3578 3584 revisions are known).
3579 3585 """
3580 3586 createopts = defaultcreateopts(ui, createopts=createopts)
3581 3587
3582 3588 unknownopts = filterknowncreateopts(ui, createopts)
3583 3589
3584 3590 if not isinstance(unknownopts, dict):
3585 3591 raise error.ProgrammingError(
3586 3592 b'filterknowncreateopts() did not return a dict'
3587 3593 )
3588 3594
3589 3595 if unknownopts:
3590 3596 raise error.Abort(
3591 3597 _(
3592 3598 b'unable to create repository because of unknown '
3593 3599 b'creation option: %s'
3594 3600 )
3595 3601 % b', '.join(sorted(unknownopts)),
3596 3602 hint=_(b'is a required extension not loaded?'),
3597 3603 )
3598 3604
3599 3605 requirements = newreporequirements(ui, createopts=createopts)
3600 3606 requirements -= checkrequirementscompat(ui, requirements)
3601 3607
3602 3608 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3603 3609
3604 3610 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3605 3611 if hgvfs.exists():
3606 3612 raise error.RepoError(_(b'repository %s already exists') % path)
3607 3613
3608 3614 if b'sharedrepo' in createopts:
3609 3615 sharedpath = createopts[b'sharedrepo'].sharedpath
3610 3616
3611 3617 if createopts.get(b'sharedrelative'):
3612 3618 try:
3613 3619 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3614 3620 except (IOError, ValueError) as e:
3615 3621 # ValueError is raised on Windows if the drive letters differ
3616 3622 # on each path.
3617 3623 raise error.Abort(
3618 3624 _(b'cannot calculate relative path'),
3619 3625 hint=stringutil.forcebytestr(e),
3620 3626 )
3621 3627
3622 3628 if not wdirvfs.exists():
3623 3629 wdirvfs.makedirs()
3624 3630
3625 3631 hgvfs.makedir(notindexed=True)
3626 3632 if b'sharedrepo' not in createopts:
3627 3633 hgvfs.mkdir(b'cache')
3628 3634 hgvfs.mkdir(b'wcache')
3629 3635
3630 3636 if b'store' in requirements and b'sharedrepo' not in createopts:
3631 3637 hgvfs.mkdir(b'store')
3632 3638
3633 3639 # We create an invalid changelog outside the store so very old
3634 3640 # Mercurial versions (which didn't know about the requirements
3635 3641 # file) encounter an error on reading the changelog. This
3636 3642 # effectively locks out old clients and prevents them from
3637 3643 # mucking with a repo in an unknown format.
3638 3644 #
3639 3645 # The revlog header has version 65535, which won't be recognized by
3640 3646 # such old clients.
3641 3647 hgvfs.append(
3642 3648 b'00changelog.i',
3643 3649 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3644 3650 b'layout',
3645 3651 )
3646 3652
3647 3653 # Filter the requirements into working copy and store ones
3648 3654 wcreq, storereq = scmutil.filterrequirements(requirements)
3649 3655 # write working copy ones
3650 3656 scmutil.writerequires(hgvfs, wcreq)
3651 3657 # If there are store requirements and the current repository
3652 3658 # is not a shared one, write stored requirements
3653 3659 # For new shared repository, we don't need to write the store
3654 3660 # requirements as they are already present in store requires
3655 3661 if storereq and b'sharedrepo' not in createopts:
3656 3662 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3657 3663 scmutil.writerequires(storevfs, storereq)
3658 3664
3659 3665 # Write out file telling readers where to find the shared store.
3660 3666 if b'sharedrepo' in createopts:
3661 3667 hgvfs.write(b'sharedpath', sharedpath)
3662 3668
3663 3669 if createopts.get(b'shareditems'):
3664 3670 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3665 3671 hgvfs.write(b'shared', shared)
3666 3672
3667 3673
3668 3674 def poisonrepository(repo):
3669 3675 """Poison a repository instance so it can no longer be used."""
3670 3676 # Perform any cleanup on the instance.
3671 3677 repo.close()
3672 3678
3673 3679 # Our strategy is to replace the type of the object with one that
3674 3680 # has all attribute lookups result in error.
3675 3681 #
3676 3682 # But we have to allow the close() method because some constructors
3677 3683 # of repos call close() on repo references.
3678 3684 class poisonedrepository(object):
3679 3685 def __getattribute__(self, item):
3680 3686 if item == 'close':
3681 3687 return object.__getattribute__(self, item)
3682 3688
3683 3689 raise error.ProgrammingError(
3684 3690 b'repo instances should not be used after unshare'
3685 3691 )
3686 3692
3687 3693 def close(self):
3688 3694 pass
3689 3695
3690 3696 # We may have a repoview, which intercepts __setattr__. So be sure
3691 3697 # we operate at the lowest level possible.
3692 3698 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,3087 +1,3110 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import collections
17 17 import contextlib
18 18 import errno
19 19 import io
20 20 import os
21 21 import struct
22 22 import zlib
23 23
24 24 # import stuff from node for others to import from revlog
25 25 from .node import (
26 26 bin,
27 27 hex,
28 28 nullhex,
29 29 nullid,
30 30 nullrev,
31 31 short,
32 32 wdirfilenodeids,
33 33 wdirhex,
34 34 wdirid,
35 35 wdirrev,
36 36 )
37 37 from .i18n import _
38 38 from .pycompat import getattr
39 39 from .revlogutils.constants import (
40 40 FLAG_GENERALDELTA,
41 41 FLAG_INLINE_DATA,
42 42 REVLOGV0,
43 43 REVLOGV1,
44 44 REVLOGV1_FLAGS,
45 45 REVLOGV2,
46 46 REVLOGV2_FLAGS,
47 47 REVLOG_DEFAULT_FLAGS,
48 48 REVLOG_DEFAULT_FORMAT,
49 49 REVLOG_DEFAULT_VERSION,
50 50 )
51 51 from .revlogutils.flagutil import (
52 52 REVIDX_DEFAULT_FLAGS,
53 53 REVIDX_ELLIPSIS,
54 54 REVIDX_EXTSTORED,
55 55 REVIDX_FLAGS_ORDER,
56 56 REVIDX_HASCOPIESINFO,
57 57 REVIDX_ISCENSORED,
58 58 REVIDX_RAWTEXT_CHANGING_FLAGS,
59 59 REVIDX_SIDEDATA,
60 60 )
61 61 from .thirdparty import attr
62 62 from . import (
63 63 ancestor,
64 64 dagop,
65 65 error,
66 66 mdiff,
67 67 policy,
68 68 pycompat,
69 69 templatefilters,
70 70 util,
71 71 )
72 72 from .interfaces import (
73 73 repository,
74 74 util as interfaceutil,
75 75 )
76 76 from .revlogutils import (
77 77 deltas as deltautil,
78 78 flagutil,
79 79 nodemap as nodemaputil,
80 80 sidedata as sidedatautil,
81 81 )
82 82 from .utils import (
83 83 storageutil,
84 84 stringutil,
85 85 )
86 86
87 87 # blanked usage of all the name to prevent pyflakes constraints
88 88 # We need these name available in the module for extensions.
89 89 REVLOGV0
90 90 REVLOGV1
91 91 REVLOGV2
92 92 FLAG_INLINE_DATA
93 93 FLAG_GENERALDELTA
94 94 REVLOG_DEFAULT_FLAGS
95 95 REVLOG_DEFAULT_FORMAT
96 96 REVLOG_DEFAULT_VERSION
97 97 REVLOGV1_FLAGS
98 98 REVLOGV2_FLAGS
99 99 REVIDX_ISCENSORED
100 100 REVIDX_ELLIPSIS
101 101 REVIDX_SIDEDATA
102 102 REVIDX_HASCOPIESINFO
103 103 REVIDX_EXTSTORED
104 104 REVIDX_DEFAULT_FLAGS
105 105 REVIDX_FLAGS_ORDER
106 106 REVIDX_RAWTEXT_CHANGING_FLAGS
107 107
108 108 parsers = policy.importmod('parsers')
109 109 rustancestor = policy.importrust('ancestor')
110 110 rustdagop = policy.importrust('dagop')
111 111 rustrevlog = policy.importrust('revlog')
112 112
113 113 # Aliased for performance.
114 114 _zlibdecompress = zlib.decompress
115 115
116 116 # max size of revlog with inline data
117 117 _maxinline = 131072
118 118 _chunksize = 1048576
119 119
120 120 # Flag processors for REVIDX_ELLIPSIS.
121 121 def ellipsisreadprocessor(rl, text):
122 122 return text, False, {}
123 123
124 124
125 125 def ellipsiswriteprocessor(rl, text, sidedata):
126 126 return text, False
127 127
128 128
129 129 def ellipsisrawprocessor(rl, text):
130 130 return False
131 131
132 132
133 133 ellipsisprocessor = (
134 134 ellipsisreadprocessor,
135 135 ellipsiswriteprocessor,
136 136 ellipsisrawprocessor,
137 137 )
138 138
139 139
140 140 def getoffset(q):
141 141 return int(q >> 16)
142 142
143 143
144 144 def gettype(q):
145 145 return int(q & 0xFFFF)
146 146
147 147
148 148 def offset_type(offset, type):
149 149 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
150 150 raise ValueError(b'unknown revlog index flags')
151 151 return int(int(offset) << 16 | type)
152 152
153 153
154 154 def _verify_revision(rl, skipflags, state, node):
155 155 """Verify the integrity of the given revlog ``node`` while providing a hook
156 156 point for extensions to influence the operation."""
157 157 if skipflags:
158 158 state[b'skipread'].add(node)
159 159 else:
160 160 # Side-effect: read content and verify hash.
161 161 rl.revision(node)
162 162
163 163
164 164 # True if a fast implementation for persistent-nodemap is available
165 165 #
166 166 # We also consider we have a "fast" implementation in "pure" python because
167 167 # people using pure don't really have performance consideration (and a
168 168 # wheelbarrow of other slowness source)
169 169 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
170 170 parsers, 'BaseIndexObject'
171 171 )
172 172
173 173
174 174 @attr.s(slots=True, frozen=True)
175 175 class _revisioninfo(object):
176 176 """Information about a revision that allows building its fulltext
177 177 node: expected hash of the revision
178 178 p1, p2: parent revs of the revision
179 179 btext: built text cache consisting of a one-element list
180 180 cachedelta: (baserev, uncompressed_delta) or None
181 181 flags: flags associated to the revision storage
182 182
183 183 One of btext[0] or cachedelta must be set.
184 184 """
185 185
186 186 node = attr.ib()
187 187 p1 = attr.ib()
188 188 p2 = attr.ib()
189 189 btext = attr.ib()
190 190 textlen = attr.ib()
191 191 cachedelta = attr.ib()
192 192 flags = attr.ib()
193 193
194 194
195 195 @interfaceutil.implementer(repository.irevisiondelta)
196 196 @attr.s(slots=True)
197 197 class revlogrevisiondelta(object):
198 198 node = attr.ib()
199 199 p1node = attr.ib()
200 200 p2node = attr.ib()
201 201 basenode = attr.ib()
202 202 flags = attr.ib()
203 203 baserevisionsize = attr.ib()
204 204 revision = attr.ib()
205 205 delta = attr.ib()
206 206 linknode = attr.ib(default=None)
207 207
208 208
209 209 @interfaceutil.implementer(repository.iverifyproblem)
210 210 @attr.s(frozen=True)
211 211 class revlogproblem(object):
212 212 warning = attr.ib(default=None)
213 213 error = attr.ib(default=None)
214 214 node = attr.ib(default=None)
215 215
216 216
217 217 # index v0:
218 218 # 4 bytes: offset
219 219 # 4 bytes: compressed length
220 220 # 4 bytes: base rev
221 221 # 4 bytes: link rev
222 222 # 20 bytes: parent 1 nodeid
223 223 # 20 bytes: parent 2 nodeid
224 224 # 20 bytes: nodeid
225 225 indexformatv0 = struct.Struct(b">4l20s20s20s")
226 226 indexformatv0_pack = indexformatv0.pack
227 227 indexformatv0_unpack = indexformatv0.unpack
228 228
229 229
230 230 class revlogoldindex(list):
231 231 @property
232 232 def nodemap(self):
233 233 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
234 234 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
235 235 return self._nodemap
236 236
237 237 @util.propertycache
238 238 def _nodemap(self):
239 239 nodemap = nodemaputil.NodeMap({nullid: nullrev})
240 240 for r in range(0, len(self)):
241 241 n = self[r][7]
242 242 nodemap[n] = r
243 243 return nodemap
244 244
245 245 def has_node(self, node):
246 246 """return True if the node exist in the index"""
247 247 return node in self._nodemap
248 248
249 249 def rev(self, node):
250 250 """return a revision for a node
251 251
252 252 If the node is unknown, raise a RevlogError"""
253 253 return self._nodemap[node]
254 254
255 255 def get_rev(self, node):
256 256 """return a revision for a node
257 257
258 258 If the node is unknown, return None"""
259 259 return self._nodemap.get(node)
260 260
261 261 def append(self, tup):
262 262 self._nodemap[tup[7]] = len(self)
263 263 super(revlogoldindex, self).append(tup)
264 264
265 265 def __delitem__(self, i):
266 266 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
267 267 raise ValueError(b"deleting slices only supports a:-1 with step 1")
268 268 for r in pycompat.xrange(i.start, len(self)):
269 269 del self._nodemap[self[r][7]]
270 270 super(revlogoldindex, self).__delitem__(i)
271 271
272 272 def clearcaches(self):
273 273 self.__dict__.pop('_nodemap', None)
274 274
275 275 def __getitem__(self, i):
276 276 if i == -1:
277 277 return (0, 0, 0, -1, -1, -1, -1, nullid)
278 278 return list.__getitem__(self, i)
279 279
280 280
281 281 class revlogoldio(object):
282 282 def __init__(self):
283 283 self.size = indexformatv0.size
284 284
285 285 def parseindex(self, data, inline):
286 286 s = self.size
287 287 index = []
288 288 nodemap = nodemaputil.NodeMap({nullid: nullrev})
289 289 n = off = 0
290 290 l = len(data)
291 291 while off + s <= l:
292 292 cur = data[off : off + s]
293 293 off += s
294 294 e = indexformatv0_unpack(cur)
295 295 # transform to revlogv1 format
296 296 e2 = (
297 297 offset_type(e[0], 0),
298 298 e[1],
299 299 -1,
300 300 e[2],
301 301 e[3],
302 302 nodemap.get(e[4], nullrev),
303 303 nodemap.get(e[5], nullrev),
304 304 e[6],
305 305 )
306 306 index.append(e2)
307 307 nodemap[e[6]] = n
308 308 n += 1
309 309
310 310 index = revlogoldindex(index)
311 311 return index, None
312 312
313 313 def packentry(self, entry, node, version, rev):
314 314 if gettype(entry[0]):
315 315 raise error.RevlogError(
316 316 _(b'index entry flags need revlog version 1')
317 317 )
318 318 e2 = (
319 319 getoffset(entry[0]),
320 320 entry[1],
321 321 entry[3],
322 322 entry[4],
323 323 node(entry[5]),
324 324 node(entry[6]),
325 325 entry[7],
326 326 )
327 327 return indexformatv0_pack(*e2)
328 328
329 329
330 330 # index ng:
331 331 # 6 bytes: offset
332 332 # 2 bytes: flags
333 333 # 4 bytes: compressed length
334 334 # 4 bytes: uncompressed length
335 335 # 4 bytes: base rev
336 336 # 4 bytes: link rev
337 337 # 4 bytes: parent 1 rev
338 338 # 4 bytes: parent 2 rev
339 339 # 32 bytes: nodeid
340 340 indexformatng = struct.Struct(b">Qiiiiii20s12x")
341 341 indexformatng_pack = indexformatng.pack
342 342 versionformat = struct.Struct(b">I")
343 343 versionformat_pack = versionformat.pack
344 344 versionformat_unpack = versionformat.unpack
345 345
346 346 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
347 347 # signed integer)
348 348 _maxentrysize = 0x7FFFFFFF
349 349
350 350
351 351 class revlogio(object):
352 352 def __init__(self):
353 353 self.size = indexformatng.size
354 354
355 355 def parseindex(self, data, inline):
356 356 # call the C implementation to parse the index data
357 357 index, cache = parsers.parse_index2(data, inline)
358 358 return index, cache
359 359
360 360 def packentry(self, entry, node, version, rev):
361 361 p = indexformatng_pack(*entry)
362 362 if rev == 0:
363 363 p = versionformat_pack(version) + p[4:]
364 364 return p
365 365
366 366
367 367 NodemapRevlogIO = None
368 368
369 369 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
370 370
371 371 class NodemapRevlogIO(revlogio):
372 372 """A debug oriented IO class that return a PersistentNodeMapIndexObject
373 373
374 374 The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
375 375 """
376 376
377 377 def parseindex(self, data, inline):
378 378 index, cache = parsers.parse_index_devel_nodemap(data, inline)
379 379 return index, cache
380 380
381 381
382 382 class rustrevlogio(revlogio):
383 383 def parseindex(self, data, inline):
384 384 index, cache = super(rustrevlogio, self).parseindex(data, inline)
385 385 return rustrevlog.MixedIndex(index), cache
386 386
387 387
388 388 class revlog(object):
389 389 """
390 390 the underlying revision storage object
391 391
392 392 A revlog consists of two parts, an index and the revision data.
393 393
394 394 The index is a file with a fixed record size containing
395 395 information on each revision, including its nodeid (hash), the
396 396 nodeids of its parents, the position and offset of its data within
397 397 the data file, and the revision it's based on. Finally, each entry
398 398 contains a linkrev entry that can serve as a pointer to external
399 399 data.
400 400
401 401 The revision data itself is a linear collection of data chunks.
402 402 Each chunk represents a revision and is usually represented as a
403 403 delta against the previous chunk. To bound lookup time, runs of
404 404 deltas are limited to about 2 times the length of the original
405 405 version data. This makes retrieval of a version proportional to
406 406 its size, or O(1) relative to the number of revisions.
407 407
408 408 Both pieces of the revlog are written to in an append-only
409 409 fashion, which means we never need to rewrite a file to insert or
410 410 remove data, and can use some simple techniques to avoid the need
411 411 for locking while reading.
412 412
413 413 If checkambig, indexfile is opened with checkambig=True at
414 414 writing, to avoid file stat ambiguity.
415 415
416 416 If mmaplargeindex is True, and an mmapindexthreshold is set, the
417 417 index will be mmapped rather than read if it is larger than the
418 418 configured threshold.
419 419
420 420 If censorable is True, the revlog can have censored revisions.
421 421
422 422 If `upperboundcomp` is not None, this is the expected maximal gain from
423 423 compression for the data content.
424
425 `concurrencychecker` is an optional function that receives 3 arguments: a
426 file handle, a filename, and an expected position. It should check whether
427 the current position in the file handle is valid, and log/warn/fail (by
428 raising).
424 429 """
425 430
426 431 _flagserrorclass = error.RevlogError
427 432
428 433 def __init__(
429 434 self,
430 435 opener,
431 436 indexfile,
432 437 datafile=None,
433 438 checkambig=False,
434 439 mmaplargeindex=False,
435 440 censorable=False,
436 441 upperboundcomp=None,
437 442 persistentnodemap=False,
443 concurrencychecker=None,
438 444 ):
439 445 """
440 446 create a revlog object
441 447
442 448 opener is a function that abstracts the file opening operation
443 449 and can be used to implement COW semantics or the like.
444 450
445 451 """
446 452 self.upperboundcomp = upperboundcomp
447 453 self.indexfile = indexfile
448 454 self.datafile = datafile or (indexfile[:-2] + b".d")
449 455 self.nodemap_file = None
450 456 if persistentnodemap:
451 457 self.nodemap_file = nodemaputil.get_nodemap_file(
452 458 opener, self.indexfile
453 459 )
454 460
455 461 self.opener = opener
456 462 # When True, indexfile is opened with checkambig=True at writing, to
457 463 # avoid file stat ambiguity.
458 464 self._checkambig = checkambig
459 465 self._mmaplargeindex = mmaplargeindex
460 466 self._censorable = censorable
461 467 # 3-tuple of (node, rev, text) for a raw revision.
462 468 self._revisioncache = None
463 469 # Maps rev to chain base rev.
464 470 self._chainbasecache = util.lrucachedict(100)
465 471 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
466 472 self._chunkcache = (0, b'')
467 473 # How much data to read and cache into the raw revlog data cache.
468 474 self._chunkcachesize = 65536
469 475 self._maxchainlen = None
470 476 self._deltabothparents = True
471 477 self.index = None
472 478 self._nodemap_docket = None
473 479 # Mapping of partial identifiers to full nodes.
474 480 self._pcache = {}
475 481 # Mapping of revision integer to full node.
476 482 self._compengine = b'zlib'
477 483 self._compengineopts = {}
478 484 self._maxdeltachainspan = -1
479 485 self._withsparseread = False
480 486 self._sparserevlog = False
481 487 self._srdensitythreshold = 0.50
482 488 self._srmingapsize = 262144
483 489
484 490 # Make copy of flag processors so each revlog instance can support
485 491 # custom flags.
486 492 self._flagprocessors = dict(flagutil.flagprocessors)
487 493
488 494 # 2-tuple of file handles being used for active writing.
489 495 self._writinghandles = None
490 496
491 497 self._loadindex()
492 498
499 self._concurrencychecker = concurrencychecker
500
493 501 def _loadindex(self):
494 502 mmapindexthreshold = None
495 503 opts = self.opener.options
496 504
497 505 if b'revlogv2' in opts:
498 506 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
499 507 elif b'revlogv1' in opts:
500 508 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
501 509 if b'generaldelta' in opts:
502 510 newversionflags |= FLAG_GENERALDELTA
503 511 elif b'revlogv0' in self.opener.options:
504 512 newversionflags = REVLOGV0
505 513 else:
506 514 newversionflags = REVLOG_DEFAULT_VERSION
507 515
508 516 if b'chunkcachesize' in opts:
509 517 self._chunkcachesize = opts[b'chunkcachesize']
510 518 if b'maxchainlen' in opts:
511 519 self._maxchainlen = opts[b'maxchainlen']
512 520 if b'deltabothparents' in opts:
513 521 self._deltabothparents = opts[b'deltabothparents']
514 522 self._lazydelta = bool(opts.get(b'lazydelta', True))
515 523 self._lazydeltabase = False
516 524 if self._lazydelta:
517 525 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
518 526 if b'compengine' in opts:
519 527 self._compengine = opts[b'compengine']
520 528 if b'zlib.level' in opts:
521 529 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
522 530 if b'zstd.level' in opts:
523 531 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
524 532 if b'maxdeltachainspan' in opts:
525 533 self._maxdeltachainspan = opts[b'maxdeltachainspan']
526 534 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
527 535 mmapindexthreshold = opts[b'mmapindexthreshold']
528 536 self.hassidedata = bool(opts.get(b'side-data', False))
529 537 if self.hassidedata:
530 538 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
531 539 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
532 540 withsparseread = bool(opts.get(b'with-sparse-read', False))
533 541 # sparse-revlog forces sparse-read
534 542 self._withsparseread = self._sparserevlog or withsparseread
535 543 if b'sparse-read-density-threshold' in opts:
536 544 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
537 545 if b'sparse-read-min-gap-size' in opts:
538 546 self._srmingapsize = opts[b'sparse-read-min-gap-size']
539 547 if opts.get(b'enableellipsis'):
540 548 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
541 549
542 550 # revlog v0 doesn't have flag processors
543 551 for flag, processor in pycompat.iteritems(
544 552 opts.get(b'flagprocessors', {})
545 553 ):
546 554 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
547 555
548 556 if self._chunkcachesize <= 0:
549 557 raise error.RevlogError(
550 558 _(b'revlog chunk cache size %r is not greater than 0')
551 559 % self._chunkcachesize
552 560 )
553 561 elif self._chunkcachesize & (self._chunkcachesize - 1):
554 562 raise error.RevlogError(
555 563 _(b'revlog chunk cache size %r is not a power of 2')
556 564 % self._chunkcachesize
557 565 )
558 566
559 567 indexdata = b''
560 568 self._initempty = True
561 569 try:
562 570 with self._indexfp() as f:
563 571 if (
564 572 mmapindexthreshold is not None
565 573 and self.opener.fstat(f).st_size >= mmapindexthreshold
566 574 ):
567 575 # TODO: should .close() to release resources without
568 576 # relying on Python GC
569 577 indexdata = util.buffer(util.mmapread(f))
570 578 else:
571 579 indexdata = f.read()
572 580 if len(indexdata) > 0:
573 581 versionflags = versionformat_unpack(indexdata[:4])[0]
574 582 self._initempty = False
575 583 else:
576 584 versionflags = newversionflags
577 585 except IOError as inst:
578 586 if inst.errno != errno.ENOENT:
579 587 raise
580 588
581 589 versionflags = newversionflags
582 590
583 591 self.version = versionflags
584 592
585 593 flags = versionflags & ~0xFFFF
586 594 fmt = versionflags & 0xFFFF
587 595
588 596 if fmt == REVLOGV0:
589 597 if flags:
590 598 raise error.RevlogError(
591 599 _(b'unknown flags (%#04x) in version %d revlog %s')
592 600 % (flags >> 16, fmt, self.indexfile)
593 601 )
594 602
595 603 self._inline = False
596 604 self._generaldelta = False
597 605
598 606 elif fmt == REVLOGV1:
599 607 if flags & ~REVLOGV1_FLAGS:
600 608 raise error.RevlogError(
601 609 _(b'unknown flags (%#04x) in version %d revlog %s')
602 610 % (flags >> 16, fmt, self.indexfile)
603 611 )
604 612
605 613 self._inline = versionflags & FLAG_INLINE_DATA
606 614 self._generaldelta = versionflags & FLAG_GENERALDELTA
607 615
608 616 elif fmt == REVLOGV2:
609 617 if flags & ~REVLOGV2_FLAGS:
610 618 raise error.RevlogError(
611 619 _(b'unknown flags (%#04x) in version %d revlog %s')
612 620 % (flags >> 16, fmt, self.indexfile)
613 621 )
614 622
615 623 self._inline = versionflags & FLAG_INLINE_DATA
616 624 # generaldelta implied by version 2 revlogs.
617 625 self._generaldelta = True
618 626
619 627 else:
620 628 raise error.RevlogError(
621 629 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
622 630 )
623 631 # sparse-revlog can't be on without general-delta (issue6056)
624 632 if not self._generaldelta:
625 633 self._sparserevlog = False
626 634
627 635 self._storedeltachains = True
628 636
629 637 devel_nodemap = (
630 638 self.nodemap_file
631 639 and opts.get(b'devel-force-nodemap', False)
632 640 and NodemapRevlogIO is not None
633 641 )
634 642
635 643 use_rust_index = False
636 644 if rustrevlog is not None:
637 645 if self.nodemap_file is not None:
638 646 use_rust_index = True
639 647 else:
640 648 use_rust_index = self.opener.options.get(b'rust.index')
641 649
642 650 self._io = revlogio()
643 651 if self.version == REVLOGV0:
644 652 self._io = revlogoldio()
645 653 elif devel_nodemap:
646 654 self._io = NodemapRevlogIO()
647 655 elif use_rust_index:
648 656 self._io = rustrevlogio()
649 657 try:
650 658 d = self._io.parseindex(indexdata, self._inline)
651 659 index, _chunkcache = d
652 660 use_nodemap = (
653 661 not self._inline
654 662 and self.nodemap_file is not None
655 663 and util.safehasattr(index, 'update_nodemap_data')
656 664 )
657 665 if use_nodemap:
658 666 nodemap_data = nodemaputil.persisted_data(self)
659 667 if nodemap_data is not None:
660 668 docket = nodemap_data[0]
661 669 if (
662 670 len(d[0]) > docket.tip_rev
663 671 and d[0][docket.tip_rev][7] == docket.tip_node
664 672 ):
665 673 # no changelog tampering
666 674 self._nodemap_docket = docket
667 675 index.update_nodemap_data(*nodemap_data)
668 676 except (ValueError, IndexError):
669 677 raise error.RevlogError(
670 678 _(b"index %s is corrupted") % self.indexfile
671 679 )
672 680 self.index, self._chunkcache = d
673 681 if not self._chunkcache:
674 682 self._chunkclear()
675 683 # revnum -> (chain-length, sum-delta-length)
676 684 self._chaininfocache = util.lrucachedict(500)
677 685 # revlog header -> revlog compressor
678 686 self._decompressors = {}
679 687
680 688 @util.propertycache
681 689 def _compressor(self):
682 690 engine = util.compengines[self._compengine]
683 691 return engine.revlogcompressor(self._compengineopts)
684 692
685 693 def _indexfp(self, mode=b'r'):
686 694 """file object for the revlog's index file"""
687 695 args = {'mode': mode}
688 696 if mode != b'r':
689 697 args['checkambig'] = self._checkambig
690 698 if mode == b'w':
691 699 args['atomictemp'] = True
692 700 return self.opener(self.indexfile, **args)
693 701
694 702 def _datafp(self, mode=b'r'):
695 703 """file object for the revlog's data file"""
696 704 return self.opener(self.datafile, mode=mode)
697 705
698 706 @contextlib.contextmanager
699 707 def _datareadfp(self, existingfp=None):
700 708 """file object suitable to read data"""
701 709 # Use explicit file handle, if given.
702 710 if existingfp is not None:
703 711 yield existingfp
704 712
705 713 # Use a file handle being actively used for writes, if available.
706 714 # There is some danger to doing this because reads will seek the
707 715 # file. However, _writeentry() performs a SEEK_END before all writes,
708 716 # so we should be safe.
709 717 elif self._writinghandles:
710 718 if self._inline:
711 719 yield self._writinghandles[0]
712 720 else:
713 721 yield self._writinghandles[1]
714 722
715 723 # Otherwise open a new file handle.
716 724 else:
717 725 if self._inline:
718 726 func = self._indexfp
719 727 else:
720 728 func = self._datafp
721 729 with func() as fp:
722 730 yield fp
723 731
724 732 def tiprev(self):
725 733 return len(self.index) - 1
726 734
727 735 def tip(self):
728 736 return self.node(self.tiprev())
729 737
730 738 def __contains__(self, rev):
731 739 return 0 <= rev < len(self)
732 740
733 741 def __len__(self):
734 742 return len(self.index)
735 743
736 744 def __iter__(self):
737 745 return iter(pycompat.xrange(len(self)))
738 746
739 747 def revs(self, start=0, stop=None):
740 748 """iterate over all rev in this revlog (from start to stop)"""
741 749 return storageutil.iterrevs(len(self), start=start, stop=stop)
742 750
743 751 @property
744 752 def nodemap(self):
745 753 msg = (
746 754 b"revlog.nodemap is deprecated, "
747 755 b"use revlog.index.[has_node|rev|get_rev]"
748 756 )
749 757 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
750 758 return self.index.nodemap
751 759
752 760 @property
753 761 def _nodecache(self):
754 762 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
755 763 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
756 764 return self.index.nodemap
757 765
758 766 def hasnode(self, node):
759 767 try:
760 768 self.rev(node)
761 769 return True
762 770 except KeyError:
763 771 return False
764 772
765 773 def candelta(self, baserev, rev):
766 774 """whether two revisions (baserev, rev) can be delta-ed or not"""
767 775 # Disable delta if either rev requires a content-changing flag
768 776 # processor (ex. LFS). This is because such flag processor can alter
769 777 # the rawtext content that the delta will be based on, and two clients
770 778 # could have a same revlog node with different flags (i.e. different
771 779 # rawtext contents) and the delta could be incompatible.
772 780 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
773 781 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
774 782 ):
775 783 return False
776 784 return True
777 785
778 786 def update_caches(self, transaction):
779 787 if self.nodemap_file is not None:
780 788 if transaction is None:
781 789 nodemaputil.update_persistent_nodemap(self)
782 790 else:
783 791 nodemaputil.setup_persistent_nodemap(transaction, self)
784 792
785 793 def clearcaches(self):
786 794 self._revisioncache = None
787 795 self._chainbasecache.clear()
788 796 self._chunkcache = (0, b'')
789 797 self._pcache = {}
790 798 self._nodemap_docket = None
791 799 self.index.clearcaches()
792 800 # The python code is the one responsible for validating the docket, we
793 801 # end up having to refresh it here.
794 802 use_nodemap = (
795 803 not self._inline
796 804 and self.nodemap_file is not None
797 805 and util.safehasattr(self.index, 'update_nodemap_data')
798 806 )
799 807 if use_nodemap:
800 808 nodemap_data = nodemaputil.persisted_data(self)
801 809 if nodemap_data is not None:
802 810 self._nodemap_docket = nodemap_data[0]
803 811 self.index.update_nodemap_data(*nodemap_data)
804 812
805 813 def rev(self, node):
806 814 try:
807 815 return self.index.rev(node)
808 816 except TypeError:
809 817 raise
810 818 except error.RevlogError:
811 819 # parsers.c radix tree lookup failed
812 820 if node == wdirid or node in wdirfilenodeids:
813 821 raise error.WdirUnsupported
814 822 raise error.LookupError(node, self.indexfile, _(b'no node'))
815 823
816 824 # Accessors for index entries.
817 825
818 826 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
819 827 # are flags.
820 828 def start(self, rev):
821 829 return int(self.index[rev][0] >> 16)
822 830
823 831 def flags(self, rev):
824 832 return self.index[rev][0] & 0xFFFF
825 833
826 834 def length(self, rev):
827 835 return self.index[rev][1]
828 836
829 837 def rawsize(self, rev):
830 838 """return the length of the uncompressed text for a given revision"""
831 839 l = self.index[rev][2]
832 840 if l >= 0:
833 841 return l
834 842
835 843 t = self.rawdata(rev)
836 844 return len(t)
837 845
838 846 def size(self, rev):
839 847 """length of non-raw text (processed by a "read" flag processor)"""
840 848 # fast path: if no "read" flag processor could change the content,
841 849 # size is rawsize. note: ELLIPSIS is known to not change the content.
842 850 flags = self.flags(rev)
843 851 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
844 852 return self.rawsize(rev)
845 853
846 854 return len(self.revision(rev, raw=False))
847 855
848 856 def chainbase(self, rev):
849 857 base = self._chainbasecache.get(rev)
850 858 if base is not None:
851 859 return base
852 860
853 861 index = self.index
854 862 iterrev = rev
855 863 base = index[iterrev][3]
856 864 while base != iterrev:
857 865 iterrev = base
858 866 base = index[iterrev][3]
859 867
860 868 self._chainbasecache[rev] = base
861 869 return base
862 870
863 871 def linkrev(self, rev):
864 872 return self.index[rev][4]
865 873
866 874 def parentrevs(self, rev):
867 875 try:
868 876 entry = self.index[rev]
869 877 except IndexError:
870 878 if rev == wdirrev:
871 879 raise error.WdirUnsupported
872 880 raise
873 881
874 882 return entry[5], entry[6]
875 883
876 884 # fast parentrevs(rev) where rev isn't filtered
877 885 _uncheckedparentrevs = parentrevs
878 886
879 887 def node(self, rev):
880 888 try:
881 889 return self.index[rev][7]
882 890 except IndexError:
883 891 if rev == wdirrev:
884 892 raise error.WdirUnsupported
885 893 raise
886 894
887 895 # Derived from index values.
888 896
889 897 def end(self, rev):
890 898 return self.start(rev) + self.length(rev)
891 899
892 900 def parents(self, node):
893 901 i = self.index
894 902 d = i[self.rev(node)]
895 903 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
896 904
897 905 def chainlen(self, rev):
898 906 return self._chaininfo(rev)[0]
899 907
900 908 def _chaininfo(self, rev):
901 909 chaininfocache = self._chaininfocache
902 910 if rev in chaininfocache:
903 911 return chaininfocache[rev]
904 912 index = self.index
905 913 generaldelta = self._generaldelta
906 914 iterrev = rev
907 915 e = index[iterrev]
908 916 clen = 0
909 917 compresseddeltalen = 0
910 918 while iterrev != e[3]:
911 919 clen += 1
912 920 compresseddeltalen += e[1]
913 921 if generaldelta:
914 922 iterrev = e[3]
915 923 else:
916 924 iterrev -= 1
917 925 if iterrev in chaininfocache:
918 926 t = chaininfocache[iterrev]
919 927 clen += t[0]
920 928 compresseddeltalen += t[1]
921 929 break
922 930 e = index[iterrev]
923 931 else:
924 932 # Add text length of base since decompressing that also takes
925 933 # work. For cache hits the length is already included.
926 934 compresseddeltalen += e[1]
927 935 r = (clen, compresseddeltalen)
928 936 chaininfocache[rev] = r
929 937 return r
930 938
931 939 def _deltachain(self, rev, stoprev=None):
932 940 """Obtain the delta chain for a revision.
933 941
934 942 ``stoprev`` specifies a revision to stop at. If not specified, we
935 943 stop at the base of the chain.
936 944
937 945 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
938 946 revs in ascending order and ``stopped`` is a bool indicating whether
939 947 ``stoprev`` was hit.
940 948 """
941 949 # Try C implementation.
942 950 try:
943 951 return self.index.deltachain(rev, stoprev, self._generaldelta)
944 952 except AttributeError:
945 953 pass
946 954
947 955 chain = []
948 956
949 957 # Alias to prevent attribute lookup in tight loop.
950 958 index = self.index
951 959 generaldelta = self._generaldelta
952 960
953 961 iterrev = rev
954 962 e = index[iterrev]
955 963 while iterrev != e[3] and iterrev != stoprev:
956 964 chain.append(iterrev)
957 965 if generaldelta:
958 966 iterrev = e[3]
959 967 else:
960 968 iterrev -= 1
961 969 e = index[iterrev]
962 970
963 971 if iterrev == stoprev:
964 972 stopped = True
965 973 else:
966 974 chain.append(iterrev)
967 975 stopped = False
968 976
969 977 chain.reverse()
970 978 return chain, stopped
971 979
972 980 def ancestors(self, revs, stoprev=0, inclusive=False):
973 981 """Generate the ancestors of 'revs' in reverse revision order.
974 982 Does not generate revs lower than stoprev.
975 983
976 984 See the documentation for ancestor.lazyancestors for more details."""
977 985
978 986 # first, make sure start revisions aren't filtered
979 987 revs = list(revs)
980 988 checkrev = self.node
981 989 for r in revs:
982 990 checkrev(r)
983 991 # and we're sure ancestors aren't filtered as well
984 992
985 993 if rustancestor is not None:
986 994 lazyancestors = rustancestor.LazyAncestors
987 995 arg = self.index
988 996 else:
989 997 lazyancestors = ancestor.lazyancestors
990 998 arg = self._uncheckedparentrevs
991 999 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
992 1000
993 1001 def descendants(self, revs):
994 1002 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
995 1003
996 1004 def findcommonmissing(self, common=None, heads=None):
997 1005 """Return a tuple of the ancestors of common and the ancestors of heads
998 1006 that are not ancestors of common. In revset terminology, we return the
999 1007 tuple:
1000 1008
1001 1009 ::common, (::heads) - (::common)
1002 1010
1003 1011 The list is sorted by revision number, meaning it is
1004 1012 topologically sorted.
1005 1013
1006 1014 'heads' and 'common' are both lists of node IDs. If heads is
1007 1015 not supplied, uses all of the revlog's heads. If common is not
1008 1016 supplied, uses nullid."""
1009 1017 if common is None:
1010 1018 common = [nullid]
1011 1019 if heads is None:
1012 1020 heads = self.heads()
1013 1021
1014 1022 common = [self.rev(n) for n in common]
1015 1023 heads = [self.rev(n) for n in heads]
1016 1024
1017 1025 # we want the ancestors, but inclusive
1018 1026 class lazyset(object):
1019 1027 def __init__(self, lazyvalues):
1020 1028 self.addedvalues = set()
1021 1029 self.lazyvalues = lazyvalues
1022 1030
1023 1031 def __contains__(self, value):
1024 1032 return value in self.addedvalues or value in self.lazyvalues
1025 1033
1026 1034 def __iter__(self):
1027 1035 added = self.addedvalues
1028 1036 for r in added:
1029 1037 yield r
1030 1038 for r in self.lazyvalues:
1031 1039 if not r in added:
1032 1040 yield r
1033 1041
1034 1042 def add(self, value):
1035 1043 self.addedvalues.add(value)
1036 1044
1037 1045 def update(self, values):
1038 1046 self.addedvalues.update(values)
1039 1047
1040 1048 has = lazyset(self.ancestors(common))
1041 1049 has.add(nullrev)
1042 1050 has.update(common)
1043 1051
1044 1052 # take all ancestors from heads that aren't in has
1045 1053 missing = set()
1046 1054 visit = collections.deque(r for r in heads if r not in has)
1047 1055 while visit:
1048 1056 r = visit.popleft()
1049 1057 if r in missing:
1050 1058 continue
1051 1059 else:
1052 1060 missing.add(r)
1053 1061 for p in self.parentrevs(r):
1054 1062 if p not in has:
1055 1063 visit.append(p)
1056 1064 missing = list(missing)
1057 1065 missing.sort()
1058 1066 return has, [self.node(miss) for miss in missing]
1059 1067
1060 1068 def incrementalmissingrevs(self, common=None):
1061 1069 """Return an object that can be used to incrementally compute the
1062 1070 revision numbers of the ancestors of arbitrary sets that are not
1063 1071 ancestors of common. This is an ancestor.incrementalmissingancestors
1064 1072 object.
1065 1073
1066 1074 'common' is a list of revision numbers. If common is not supplied, uses
1067 1075 nullrev.
1068 1076 """
1069 1077 if common is None:
1070 1078 common = [nullrev]
1071 1079
1072 1080 if rustancestor is not None:
1073 1081 return rustancestor.MissingAncestors(self.index, common)
1074 1082 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1075 1083
1076 1084 def findmissingrevs(self, common=None, heads=None):
1077 1085 """Return the revision numbers of the ancestors of heads that
1078 1086 are not ancestors of common.
1079 1087
1080 1088 More specifically, return a list of revision numbers corresponding to
1081 1089 nodes N such that every N satisfies the following constraints:
1082 1090
1083 1091 1. N is an ancestor of some node in 'heads'
1084 1092 2. N is not an ancestor of any node in 'common'
1085 1093
1086 1094 The list is sorted by revision number, meaning it is
1087 1095 topologically sorted.
1088 1096
1089 1097 'heads' and 'common' are both lists of revision numbers. If heads is
1090 1098 not supplied, uses all of the revlog's heads. If common is not
1091 1099 supplied, uses nullid."""
1092 1100 if common is None:
1093 1101 common = [nullrev]
1094 1102 if heads is None:
1095 1103 heads = self.headrevs()
1096 1104
1097 1105 inc = self.incrementalmissingrevs(common=common)
1098 1106 return inc.missingancestors(heads)
1099 1107
1100 1108 def findmissing(self, common=None, heads=None):
1101 1109 """Return the ancestors of heads that are not ancestors of common.
1102 1110
1103 1111 More specifically, return a list of nodes N such that every N
1104 1112 satisfies the following constraints:
1105 1113
1106 1114 1. N is an ancestor of some node in 'heads'
1107 1115 2. N is not an ancestor of any node in 'common'
1108 1116
1109 1117 The list is sorted by revision number, meaning it is
1110 1118 topologically sorted.
1111 1119
1112 1120 'heads' and 'common' are both lists of node IDs. If heads is
1113 1121 not supplied, uses all of the revlog's heads. If common is not
1114 1122 supplied, uses nullid."""
1115 1123 if common is None:
1116 1124 common = [nullid]
1117 1125 if heads is None:
1118 1126 heads = self.heads()
1119 1127
1120 1128 common = [self.rev(n) for n in common]
1121 1129 heads = [self.rev(n) for n in heads]
1122 1130
1123 1131 inc = self.incrementalmissingrevs(common=common)
1124 1132 return [self.node(r) for r in inc.missingancestors(heads)]
1125 1133
1126 1134 def nodesbetween(self, roots=None, heads=None):
1127 1135 """Return a topological path from 'roots' to 'heads'.
1128 1136
1129 1137 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1130 1138 topologically sorted list of all nodes N that satisfy both of
1131 1139 these constraints:
1132 1140
1133 1141 1. N is a descendant of some node in 'roots'
1134 1142 2. N is an ancestor of some node in 'heads'
1135 1143
1136 1144 Every node is considered to be both a descendant and an ancestor
1137 1145 of itself, so every reachable node in 'roots' and 'heads' will be
1138 1146 included in 'nodes'.
1139 1147
1140 1148 'outroots' is the list of reachable nodes in 'roots', i.e., the
1141 1149 subset of 'roots' that is returned in 'nodes'. Likewise,
1142 1150 'outheads' is the subset of 'heads' that is also in 'nodes'.
1143 1151
1144 1152 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1145 1153 unspecified, uses nullid as the only root. If 'heads' is
1146 1154 unspecified, uses list of all of the revlog's heads."""
1147 1155 nonodes = ([], [], [])
1148 1156 if roots is not None:
1149 1157 roots = list(roots)
1150 1158 if not roots:
1151 1159 return nonodes
1152 1160 lowestrev = min([self.rev(n) for n in roots])
1153 1161 else:
1154 1162 roots = [nullid] # Everybody's a descendant of nullid
1155 1163 lowestrev = nullrev
1156 1164 if (lowestrev == nullrev) and (heads is None):
1157 1165 # We want _all_ the nodes!
1158 1166 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1159 1167 if heads is None:
1160 1168 # All nodes are ancestors, so the latest ancestor is the last
1161 1169 # node.
1162 1170 highestrev = len(self) - 1
1163 1171 # Set ancestors to None to signal that every node is an ancestor.
1164 1172 ancestors = None
1165 1173 # Set heads to an empty dictionary for later discovery of heads
1166 1174 heads = {}
1167 1175 else:
1168 1176 heads = list(heads)
1169 1177 if not heads:
1170 1178 return nonodes
1171 1179 ancestors = set()
1172 1180 # Turn heads into a dictionary so we can remove 'fake' heads.
1173 1181 # Also, later we will be using it to filter out the heads we can't
1174 1182 # find from roots.
1175 1183 heads = dict.fromkeys(heads, False)
1176 1184 # Start at the top and keep marking parents until we're done.
1177 1185 nodestotag = set(heads)
1178 1186 # Remember where the top was so we can use it as a limit later.
1179 1187 highestrev = max([self.rev(n) for n in nodestotag])
1180 1188 while nodestotag:
1181 1189 # grab a node to tag
1182 1190 n = nodestotag.pop()
1183 1191 # Never tag nullid
1184 1192 if n == nullid:
1185 1193 continue
1186 1194 # A node's revision number represents its place in a
1187 1195 # topologically sorted list of nodes.
1188 1196 r = self.rev(n)
1189 1197 if r >= lowestrev:
1190 1198 if n not in ancestors:
1191 1199 # If we are possibly a descendant of one of the roots
1192 1200 # and we haven't already been marked as an ancestor
1193 1201 ancestors.add(n) # Mark as ancestor
1194 1202 # Add non-nullid parents to list of nodes to tag.
1195 1203 nodestotag.update(
1196 1204 [p for p in self.parents(n) if p != nullid]
1197 1205 )
1198 1206 elif n in heads: # We've seen it before, is it a fake head?
1199 1207 # So it is, real heads should not be the ancestors of
1200 1208 # any other heads.
1201 1209 heads.pop(n)
1202 1210 if not ancestors:
1203 1211 return nonodes
1204 1212 # Now that we have our set of ancestors, we want to remove any
1205 1213 # roots that are not ancestors.
1206 1214
1207 1215 # If one of the roots was nullid, everything is included anyway.
1208 1216 if lowestrev > nullrev:
1209 1217 # But, since we weren't, let's recompute the lowest rev to not
1210 1218 # include roots that aren't ancestors.
1211 1219
1212 1220 # Filter out roots that aren't ancestors of heads
1213 1221 roots = [root for root in roots if root in ancestors]
1214 1222 # Recompute the lowest revision
1215 1223 if roots:
1216 1224 lowestrev = min([self.rev(root) for root in roots])
1217 1225 else:
1218 1226 # No more roots? Return empty list
1219 1227 return nonodes
1220 1228 else:
1221 1229 # We are descending from nullid, and don't need to care about
1222 1230 # any other roots.
1223 1231 lowestrev = nullrev
1224 1232 roots = [nullid]
1225 1233 # Transform our roots list into a set.
1226 1234 descendants = set(roots)
1227 1235 # Also, keep the original roots so we can filter out roots that aren't
1228 1236 # 'real' roots (i.e. are descended from other roots).
1229 1237 roots = descendants.copy()
1230 1238 # Our topologically sorted list of output nodes.
1231 1239 orderedout = []
1232 1240 # Don't start at nullid since we don't want nullid in our output list,
1233 1241 # and if nullid shows up in descendants, empty parents will look like
1234 1242 # they're descendants.
1235 1243 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1236 1244 n = self.node(r)
1237 1245 isdescendant = False
1238 1246 if lowestrev == nullrev: # Everybody is a descendant of nullid
1239 1247 isdescendant = True
1240 1248 elif n in descendants:
1241 1249 # n is already a descendant
1242 1250 isdescendant = True
1243 1251 # This check only needs to be done here because all the roots
1244 1252 # will start being marked is descendants before the loop.
1245 1253 if n in roots:
1246 1254 # If n was a root, check if it's a 'real' root.
1247 1255 p = tuple(self.parents(n))
1248 1256 # If any of its parents are descendants, it's not a root.
1249 1257 if (p[0] in descendants) or (p[1] in descendants):
1250 1258 roots.remove(n)
1251 1259 else:
1252 1260 p = tuple(self.parents(n))
1253 1261 # A node is a descendant if either of its parents are
1254 1262 # descendants. (We seeded the dependents list with the roots
1255 1263 # up there, remember?)
1256 1264 if (p[0] in descendants) or (p[1] in descendants):
1257 1265 descendants.add(n)
1258 1266 isdescendant = True
1259 1267 if isdescendant and ((ancestors is None) or (n in ancestors)):
1260 1268 # Only include nodes that are both descendants and ancestors.
1261 1269 orderedout.append(n)
1262 1270 if (ancestors is not None) and (n in heads):
1263 1271 # We're trying to figure out which heads are reachable
1264 1272 # from roots.
1265 1273 # Mark this head as having been reached
1266 1274 heads[n] = True
1267 1275 elif ancestors is None:
1268 1276 # Otherwise, we're trying to discover the heads.
1269 1277 # Assume this is a head because if it isn't, the next step
1270 1278 # will eventually remove it.
1271 1279 heads[n] = True
1272 1280 # But, obviously its parents aren't.
1273 1281 for p in self.parents(n):
1274 1282 heads.pop(p, None)
1275 1283 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1276 1284 roots = list(roots)
1277 1285 assert orderedout
1278 1286 assert roots
1279 1287 assert heads
1280 1288 return (orderedout, roots, heads)
1281 1289
1282 1290 def headrevs(self, revs=None):
1283 1291 if revs is None:
1284 1292 try:
1285 1293 return self.index.headrevs()
1286 1294 except AttributeError:
1287 1295 return self._headrevs()
1288 1296 if rustdagop is not None:
1289 1297 return rustdagop.headrevs(self.index, revs)
1290 1298 return dagop.headrevs(revs, self._uncheckedparentrevs)
1291 1299
1292 1300 def computephases(self, roots):
1293 1301 return self.index.computephasesmapsets(roots)
1294 1302
1295 1303 def _headrevs(self):
1296 1304 count = len(self)
1297 1305 if not count:
1298 1306 return [nullrev]
1299 1307 # we won't iter over filtered rev so nobody is a head at start
1300 1308 ishead = [0] * (count + 1)
1301 1309 index = self.index
1302 1310 for r in self:
1303 1311 ishead[r] = 1 # I may be an head
1304 1312 e = index[r]
1305 1313 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1306 1314 return [r for r, val in enumerate(ishead) if val]
1307 1315
1308 1316 def heads(self, start=None, stop=None):
1309 1317 """return the list of all nodes that have no children
1310 1318
1311 1319 if start is specified, only heads that are descendants of
1312 1320 start will be returned
1313 1321 if stop is specified, it will consider all the revs from stop
1314 1322 as if they had no children
1315 1323 """
1316 1324 if start is None and stop is None:
1317 1325 if not len(self):
1318 1326 return [nullid]
1319 1327 return [self.node(r) for r in self.headrevs()]
1320 1328
1321 1329 if start is None:
1322 1330 start = nullrev
1323 1331 else:
1324 1332 start = self.rev(start)
1325 1333
1326 1334 stoprevs = {self.rev(n) for n in stop or []}
1327 1335
1328 1336 revs = dagop.headrevssubset(
1329 1337 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1330 1338 )
1331 1339
1332 1340 return [self.node(rev) for rev in revs]
1333 1341
1334 1342 def children(self, node):
1335 1343 """find the children of a given node"""
1336 1344 c = []
1337 1345 p = self.rev(node)
1338 1346 for r in self.revs(start=p + 1):
1339 1347 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1340 1348 if prevs:
1341 1349 for pr in prevs:
1342 1350 if pr == p:
1343 1351 c.append(self.node(r))
1344 1352 elif p == nullrev:
1345 1353 c.append(self.node(r))
1346 1354 return c
1347 1355
1348 1356 def commonancestorsheads(self, a, b):
1349 1357 """calculate all the heads of the common ancestors of nodes a and b"""
1350 1358 a, b = self.rev(a), self.rev(b)
1351 1359 ancs = self._commonancestorsheads(a, b)
1352 1360 return pycompat.maplist(self.node, ancs)
1353 1361
1354 1362 def _commonancestorsheads(self, *revs):
1355 1363 """calculate all the heads of the common ancestors of revs"""
1356 1364 try:
1357 1365 ancs = self.index.commonancestorsheads(*revs)
1358 1366 except (AttributeError, OverflowError): # C implementation failed
1359 1367 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1360 1368 return ancs
1361 1369
1362 1370 def isancestor(self, a, b):
1363 1371 """return True if node a is an ancestor of node b
1364 1372
1365 1373 A revision is considered an ancestor of itself."""
1366 1374 a, b = self.rev(a), self.rev(b)
1367 1375 return self.isancestorrev(a, b)
1368 1376
1369 1377 def isancestorrev(self, a, b):
1370 1378 """return True if revision a is an ancestor of revision b
1371 1379
1372 1380 A revision is considered an ancestor of itself.
1373 1381
1374 1382 The implementation of this is trivial but the use of
1375 1383 reachableroots is not."""
1376 1384 if a == nullrev:
1377 1385 return True
1378 1386 elif a == b:
1379 1387 return True
1380 1388 elif a > b:
1381 1389 return False
1382 1390 return bool(self.reachableroots(a, [b], [a], includepath=False))
1383 1391
1384 1392 def reachableroots(self, minroot, heads, roots, includepath=False):
1385 1393 """return (heads(::(<roots> and <roots>::<heads>)))
1386 1394
1387 1395 If includepath is True, return (<roots>::<heads>)."""
1388 1396 try:
1389 1397 return self.index.reachableroots2(
1390 1398 minroot, heads, roots, includepath
1391 1399 )
1392 1400 except AttributeError:
1393 1401 return dagop._reachablerootspure(
1394 1402 self.parentrevs, minroot, roots, heads, includepath
1395 1403 )
1396 1404
1397 1405 def ancestor(self, a, b):
1398 1406 """calculate the "best" common ancestor of nodes a and b"""
1399 1407
1400 1408 a, b = self.rev(a), self.rev(b)
1401 1409 try:
1402 1410 ancs = self.index.ancestors(a, b)
1403 1411 except (AttributeError, OverflowError):
1404 1412 ancs = ancestor.ancestors(self.parentrevs, a, b)
1405 1413 if ancs:
1406 1414 # choose a consistent winner when there's a tie
1407 1415 return min(map(self.node, ancs))
1408 1416 return nullid
1409 1417
1410 1418 def _match(self, id):
1411 1419 if isinstance(id, int):
1412 1420 # rev
1413 1421 return self.node(id)
1414 1422 if len(id) == 20:
1415 1423 # possibly a binary node
1416 1424 # odds of a binary node being all hex in ASCII are 1 in 10**25
1417 1425 try:
1418 1426 node = id
1419 1427 self.rev(node) # quick search the index
1420 1428 return node
1421 1429 except error.LookupError:
1422 1430 pass # may be partial hex id
1423 1431 try:
1424 1432 # str(rev)
1425 1433 rev = int(id)
1426 1434 if b"%d" % rev != id:
1427 1435 raise ValueError
1428 1436 if rev < 0:
1429 1437 rev = len(self) + rev
1430 1438 if rev < 0 or rev >= len(self):
1431 1439 raise ValueError
1432 1440 return self.node(rev)
1433 1441 except (ValueError, OverflowError):
1434 1442 pass
1435 1443 if len(id) == 40:
1436 1444 try:
1437 1445 # a full hex nodeid?
1438 1446 node = bin(id)
1439 1447 self.rev(node)
1440 1448 return node
1441 1449 except (TypeError, error.LookupError):
1442 1450 pass
1443 1451
1444 1452 def _partialmatch(self, id):
1445 1453 # we don't care wdirfilenodeids as they should be always full hash
1446 1454 maybewdir = wdirhex.startswith(id)
1447 1455 try:
1448 1456 partial = self.index.partialmatch(id)
1449 1457 if partial and self.hasnode(partial):
1450 1458 if maybewdir:
1451 1459 # single 'ff...' match in radix tree, ambiguous with wdir
1452 1460 raise error.RevlogError
1453 1461 return partial
1454 1462 if maybewdir:
1455 1463 # no 'ff...' match in radix tree, wdir identified
1456 1464 raise error.WdirUnsupported
1457 1465 return None
1458 1466 except error.RevlogError:
1459 1467 # parsers.c radix tree lookup gave multiple matches
1460 1468 # fast path: for unfiltered changelog, radix tree is accurate
1461 1469 if not getattr(self, 'filteredrevs', None):
1462 1470 raise error.AmbiguousPrefixLookupError(
1463 1471 id, self.indexfile, _(b'ambiguous identifier')
1464 1472 )
1465 1473 # fall through to slow path that filters hidden revisions
1466 1474 except (AttributeError, ValueError):
1467 1475 # we are pure python, or key was too short to search radix tree
1468 1476 pass
1469 1477
1470 1478 if id in self._pcache:
1471 1479 return self._pcache[id]
1472 1480
1473 1481 if len(id) <= 40:
1474 1482 try:
1475 1483 # hex(node)[:...]
1476 1484 l = len(id) // 2 # grab an even number of digits
1477 1485 prefix = bin(id[: l * 2])
1478 1486 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1479 1487 nl = [
1480 1488 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1481 1489 ]
1482 1490 if nullhex.startswith(id):
1483 1491 nl.append(nullid)
1484 1492 if len(nl) > 0:
1485 1493 if len(nl) == 1 and not maybewdir:
1486 1494 self._pcache[id] = nl[0]
1487 1495 return nl[0]
1488 1496 raise error.AmbiguousPrefixLookupError(
1489 1497 id, self.indexfile, _(b'ambiguous identifier')
1490 1498 )
1491 1499 if maybewdir:
1492 1500 raise error.WdirUnsupported
1493 1501 return None
1494 1502 except TypeError:
1495 1503 pass
1496 1504
1497 1505 def lookup(self, id):
1498 1506 """locate a node based on:
1499 1507 - revision number or str(revision number)
1500 1508 - nodeid or subset of hex nodeid
1501 1509 """
1502 1510 n = self._match(id)
1503 1511 if n is not None:
1504 1512 return n
1505 1513 n = self._partialmatch(id)
1506 1514 if n:
1507 1515 return n
1508 1516
1509 1517 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1510 1518
1511 1519 def shortest(self, node, minlength=1):
1512 1520 """Find the shortest unambiguous prefix that matches node."""
1513 1521
1514 1522 def isvalid(prefix):
1515 1523 try:
1516 1524 matchednode = self._partialmatch(prefix)
1517 1525 except error.AmbiguousPrefixLookupError:
1518 1526 return False
1519 1527 except error.WdirUnsupported:
1520 1528 # single 'ff...' match
1521 1529 return True
1522 1530 if matchednode is None:
1523 1531 raise error.LookupError(node, self.indexfile, _(b'no node'))
1524 1532 return True
1525 1533
1526 1534 def maybewdir(prefix):
1527 1535 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1528 1536
1529 1537 hexnode = hex(node)
1530 1538
1531 1539 def disambiguate(hexnode, minlength):
1532 1540 """Disambiguate against wdirid."""
1533 1541 for length in range(minlength, len(hexnode) + 1):
1534 1542 prefix = hexnode[:length]
1535 1543 if not maybewdir(prefix):
1536 1544 return prefix
1537 1545
1538 1546 if not getattr(self, 'filteredrevs', None):
1539 1547 try:
1540 1548 length = max(self.index.shortest(node), minlength)
1541 1549 return disambiguate(hexnode, length)
1542 1550 except error.RevlogError:
1543 1551 if node != wdirid:
1544 1552 raise error.LookupError(node, self.indexfile, _(b'no node'))
1545 1553 except AttributeError:
1546 1554 # Fall through to pure code
1547 1555 pass
1548 1556
1549 1557 if node == wdirid:
1550 1558 for length in range(minlength, len(hexnode) + 1):
1551 1559 prefix = hexnode[:length]
1552 1560 if isvalid(prefix):
1553 1561 return prefix
1554 1562
1555 1563 for length in range(minlength, len(hexnode) + 1):
1556 1564 prefix = hexnode[:length]
1557 1565 if isvalid(prefix):
1558 1566 return disambiguate(hexnode, length)
1559 1567
1560 1568 def cmp(self, node, text):
1561 1569 """compare text with a given file revision
1562 1570
1563 1571 returns True if text is different than what is stored.
1564 1572 """
1565 1573 p1, p2 = self.parents(node)
1566 1574 return storageutil.hashrevisionsha1(text, p1, p2) != node
1567 1575
1568 1576 def _cachesegment(self, offset, data):
1569 1577 """Add a segment to the revlog cache.
1570 1578
1571 1579 Accepts an absolute offset and the data that is at that location.
1572 1580 """
1573 1581 o, d = self._chunkcache
1574 1582 # try to add to existing cache
1575 1583 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1576 1584 self._chunkcache = o, d + data
1577 1585 else:
1578 1586 self._chunkcache = offset, data
1579 1587
1580 1588 def _readsegment(self, offset, length, df=None):
1581 1589 """Load a segment of raw data from the revlog.
1582 1590
1583 1591 Accepts an absolute offset, length to read, and an optional existing
1584 1592 file handle to read from.
1585 1593
1586 1594 If an existing file handle is passed, it will be seeked and the
1587 1595 original seek position will NOT be restored.
1588 1596
1589 1597 Returns a str or buffer of raw byte data.
1590 1598
1591 1599 Raises if the requested number of bytes could not be read.
1592 1600 """
1593 1601 # Cache data both forward and backward around the requested
1594 1602 # data, in a fixed size window. This helps speed up operations
1595 1603 # involving reading the revlog backwards.
1596 1604 cachesize = self._chunkcachesize
1597 1605 realoffset = offset & ~(cachesize - 1)
1598 1606 reallength = (
1599 1607 (offset + length + cachesize) & ~(cachesize - 1)
1600 1608 ) - realoffset
1601 1609 with self._datareadfp(df) as df:
1602 1610 df.seek(realoffset)
1603 1611 d = df.read(reallength)
1604 1612
1605 1613 self._cachesegment(realoffset, d)
1606 1614 if offset != realoffset or reallength != length:
1607 1615 startoffset = offset - realoffset
1608 1616 if len(d) - startoffset < length:
1609 1617 raise error.RevlogError(
1610 1618 _(
1611 1619 b'partial read of revlog %s; expected %d bytes from '
1612 1620 b'offset %d, got %d'
1613 1621 )
1614 1622 % (
1615 1623 self.indexfile if self._inline else self.datafile,
1616 1624 length,
1617 1625 realoffset,
1618 1626 len(d) - startoffset,
1619 1627 )
1620 1628 )
1621 1629
1622 1630 return util.buffer(d, startoffset, length)
1623 1631
1624 1632 if len(d) < length:
1625 1633 raise error.RevlogError(
1626 1634 _(
1627 1635 b'partial read of revlog %s; expected %d bytes from offset '
1628 1636 b'%d, got %d'
1629 1637 )
1630 1638 % (
1631 1639 self.indexfile if self._inline else self.datafile,
1632 1640 length,
1633 1641 offset,
1634 1642 len(d),
1635 1643 )
1636 1644 )
1637 1645
1638 1646 return d
1639 1647
1640 1648 def _getsegment(self, offset, length, df=None):
1641 1649 """Obtain a segment of raw data from the revlog.
1642 1650
1643 1651 Accepts an absolute offset, length of bytes to obtain, and an
1644 1652 optional file handle to the already-opened revlog. If the file
1645 1653 handle is used, it's original seek position will not be preserved.
1646 1654
1647 1655 Requests for data may be returned from a cache.
1648 1656
1649 1657 Returns a str or a buffer instance of raw byte data.
1650 1658 """
1651 1659 o, d = self._chunkcache
1652 1660 l = len(d)
1653 1661
1654 1662 # is it in the cache?
1655 1663 cachestart = offset - o
1656 1664 cacheend = cachestart + length
1657 1665 if cachestart >= 0 and cacheend <= l:
1658 1666 if cachestart == 0 and cacheend == l:
1659 1667 return d # avoid a copy
1660 1668 return util.buffer(d, cachestart, cacheend - cachestart)
1661 1669
1662 1670 return self._readsegment(offset, length, df=df)
1663 1671
1664 1672 def _getsegmentforrevs(self, startrev, endrev, df=None):
1665 1673 """Obtain a segment of raw data corresponding to a range of revisions.
1666 1674
1667 1675 Accepts the start and end revisions and an optional already-open
1668 1676 file handle to be used for reading. If the file handle is read, its
1669 1677 seek position will not be preserved.
1670 1678
1671 1679 Requests for data may be satisfied by a cache.
1672 1680
1673 1681 Returns a 2-tuple of (offset, data) for the requested range of
1674 1682 revisions. Offset is the integer offset from the beginning of the
1675 1683 revlog and data is a str or buffer of the raw byte data.
1676 1684
1677 1685 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1678 1686 to determine where each revision's data begins and ends.
1679 1687 """
1680 1688 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1681 1689 # (functions are expensive).
1682 1690 index = self.index
1683 1691 istart = index[startrev]
1684 1692 start = int(istart[0] >> 16)
1685 1693 if startrev == endrev:
1686 1694 end = start + istart[1]
1687 1695 else:
1688 1696 iend = index[endrev]
1689 1697 end = int(iend[0] >> 16) + iend[1]
1690 1698
1691 1699 if self._inline:
1692 1700 start += (startrev + 1) * self._io.size
1693 1701 end += (endrev + 1) * self._io.size
1694 1702 length = end - start
1695 1703
1696 1704 return start, self._getsegment(start, length, df=df)
1697 1705
1698 1706 def _chunk(self, rev, df=None):
1699 1707 """Obtain a single decompressed chunk for a revision.
1700 1708
1701 1709 Accepts an integer revision and an optional already-open file handle
1702 1710 to be used for reading. If used, the seek position of the file will not
1703 1711 be preserved.
1704 1712
1705 1713 Returns a str holding uncompressed data for the requested revision.
1706 1714 """
1707 1715 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1708 1716
1709 1717 def _chunks(self, revs, df=None, targetsize=None):
1710 1718 """Obtain decompressed chunks for the specified revisions.
1711 1719
1712 1720 Accepts an iterable of numeric revisions that are assumed to be in
1713 1721 ascending order. Also accepts an optional already-open file handle
1714 1722 to be used for reading. If used, the seek position of the file will
1715 1723 not be preserved.
1716 1724
1717 1725 This function is similar to calling ``self._chunk()`` multiple times,
1718 1726 but is faster.
1719 1727
1720 1728 Returns a list with decompressed data for each requested revision.
1721 1729 """
1722 1730 if not revs:
1723 1731 return []
1724 1732 start = self.start
1725 1733 length = self.length
1726 1734 inline = self._inline
1727 1735 iosize = self._io.size
1728 1736 buffer = util.buffer
1729 1737
1730 1738 l = []
1731 1739 ladd = l.append
1732 1740
1733 1741 if not self._withsparseread:
1734 1742 slicedchunks = (revs,)
1735 1743 else:
1736 1744 slicedchunks = deltautil.slicechunk(
1737 1745 self, revs, targetsize=targetsize
1738 1746 )
1739 1747
1740 1748 for revschunk in slicedchunks:
1741 1749 firstrev = revschunk[0]
1742 1750 # Skip trailing revisions with empty diff
1743 1751 for lastrev in revschunk[::-1]:
1744 1752 if length(lastrev) != 0:
1745 1753 break
1746 1754
1747 1755 try:
1748 1756 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1749 1757 except OverflowError:
1750 1758 # issue4215 - we can't cache a run of chunks greater than
1751 1759 # 2G on Windows
1752 1760 return [self._chunk(rev, df=df) for rev in revschunk]
1753 1761
1754 1762 decomp = self.decompress
1755 1763 for rev in revschunk:
1756 1764 chunkstart = start(rev)
1757 1765 if inline:
1758 1766 chunkstart += (rev + 1) * iosize
1759 1767 chunklength = length(rev)
1760 1768 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1761 1769
1762 1770 return l
1763 1771
1764 1772 def _chunkclear(self):
1765 1773 """Clear the raw chunk cache."""
1766 1774 self._chunkcache = (0, b'')
1767 1775
1768 1776 def deltaparent(self, rev):
1769 1777 """return deltaparent of the given revision"""
1770 1778 base = self.index[rev][3]
1771 1779 if base == rev:
1772 1780 return nullrev
1773 1781 elif self._generaldelta:
1774 1782 return base
1775 1783 else:
1776 1784 return rev - 1
1777 1785
1778 1786 def issnapshot(self, rev):
1779 1787 """tells whether rev is a snapshot"""
1780 1788 if not self._sparserevlog:
1781 1789 return self.deltaparent(rev) == nullrev
1782 1790 elif util.safehasattr(self.index, b'issnapshot'):
1783 1791 # directly assign the method to cache the testing and access
1784 1792 self.issnapshot = self.index.issnapshot
1785 1793 return self.issnapshot(rev)
1786 1794 if rev == nullrev:
1787 1795 return True
1788 1796 entry = self.index[rev]
1789 1797 base = entry[3]
1790 1798 if base == rev:
1791 1799 return True
1792 1800 if base == nullrev:
1793 1801 return True
1794 1802 p1 = entry[5]
1795 1803 p2 = entry[6]
1796 1804 if base == p1 or base == p2:
1797 1805 return False
1798 1806 return self.issnapshot(base)
1799 1807
1800 1808 def snapshotdepth(self, rev):
1801 1809 """number of snapshot in the chain before this one"""
1802 1810 if not self.issnapshot(rev):
1803 1811 raise error.ProgrammingError(b'revision %d not a snapshot')
1804 1812 return len(self._deltachain(rev)[0]) - 1
1805 1813
1806 1814 def revdiff(self, rev1, rev2):
1807 1815 """return or calculate a delta between two revisions
1808 1816
1809 1817 The delta calculated is in binary form and is intended to be written to
1810 1818 revlog data directly. So this function needs raw revision data.
1811 1819 """
1812 1820 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1813 1821 return bytes(self._chunk(rev2))
1814 1822
1815 1823 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1816 1824
1817 1825 def _processflags(self, text, flags, operation, raw=False):
1818 1826 """deprecated entry point to access flag processors"""
1819 1827 msg = b'_processflag(...) use the specialized variant'
1820 1828 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1821 1829 if raw:
1822 1830 return text, flagutil.processflagsraw(self, text, flags)
1823 1831 elif operation == b'read':
1824 1832 return flagutil.processflagsread(self, text, flags)
1825 1833 else: # write operation
1826 1834 return flagutil.processflagswrite(self, text, flags, None)
1827 1835
1828 1836 def revision(self, nodeorrev, _df=None, raw=False):
1829 1837 """return an uncompressed revision of a given node or revision
1830 1838 number.
1831 1839
1832 1840 _df - an existing file handle to read from. (internal-only)
1833 1841 raw - an optional argument specifying if the revision data is to be
1834 1842 treated as raw data when applying flag transforms. 'raw' should be set
1835 1843 to True when generating changegroups or in debug commands.
1836 1844 """
1837 1845 if raw:
1838 1846 msg = (
1839 1847 b'revlog.revision(..., raw=True) is deprecated, '
1840 1848 b'use revlog.rawdata(...)'
1841 1849 )
1842 1850 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1843 1851 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1844 1852
1845 1853 def sidedata(self, nodeorrev, _df=None):
1846 1854 """a map of extra data related to the changeset but not part of the hash
1847 1855
1848 1856 This function currently return a dictionary. However, more advanced
1849 1857 mapping object will likely be used in the future for a more
1850 1858 efficient/lazy code.
1851 1859 """
1852 1860 return self._revisiondata(nodeorrev, _df)[1]
1853 1861
1854 1862 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1855 1863 # deal with <nodeorrev> argument type
1856 1864 if isinstance(nodeorrev, int):
1857 1865 rev = nodeorrev
1858 1866 node = self.node(rev)
1859 1867 else:
1860 1868 node = nodeorrev
1861 1869 rev = None
1862 1870
1863 1871 # fast path the special `nullid` rev
1864 1872 if node == nullid:
1865 1873 return b"", {}
1866 1874
1867 1875 # ``rawtext`` is the text as stored inside the revlog. Might be the
1868 1876 # revision or might need to be processed to retrieve the revision.
1869 1877 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1870 1878
1871 1879 if raw and validated:
1872 1880 # if we don't want to process the raw text and that raw
1873 1881 # text is cached, we can exit early.
1874 1882 return rawtext, {}
1875 1883 if rev is None:
1876 1884 rev = self.rev(node)
1877 1885 # the revlog's flag for this revision
1878 1886 # (usually alter its state or content)
1879 1887 flags = self.flags(rev)
1880 1888
1881 1889 if validated and flags == REVIDX_DEFAULT_FLAGS:
1882 1890 # no extra flags set, no flag processor runs, text = rawtext
1883 1891 return rawtext, {}
1884 1892
1885 1893 sidedata = {}
1886 1894 if raw:
1887 1895 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1888 1896 text = rawtext
1889 1897 else:
1890 1898 try:
1891 1899 r = flagutil.processflagsread(self, rawtext, flags)
1892 1900 except error.SidedataHashError as exc:
1893 1901 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1894 1902 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1895 1903 raise error.RevlogError(msg)
1896 1904 text, validatehash, sidedata = r
1897 1905 if validatehash:
1898 1906 self.checkhash(text, node, rev=rev)
1899 1907 if not validated:
1900 1908 self._revisioncache = (node, rev, rawtext)
1901 1909
1902 1910 return text, sidedata
1903 1911
1904 1912 def _rawtext(self, node, rev, _df=None):
1905 1913 """return the possibly unvalidated rawtext for a revision
1906 1914
1907 1915 returns (rev, rawtext, validated)
1908 1916 """
1909 1917
1910 1918 # revision in the cache (could be useful to apply delta)
1911 1919 cachedrev = None
1912 1920 # An intermediate text to apply deltas to
1913 1921 basetext = None
1914 1922
1915 1923 # Check if we have the entry in cache
1916 1924 # The cache entry looks like (node, rev, rawtext)
1917 1925 if self._revisioncache:
1918 1926 if self._revisioncache[0] == node:
1919 1927 return (rev, self._revisioncache[2], True)
1920 1928 cachedrev = self._revisioncache[1]
1921 1929
1922 1930 if rev is None:
1923 1931 rev = self.rev(node)
1924 1932
1925 1933 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1926 1934 if stopped:
1927 1935 basetext = self._revisioncache[2]
1928 1936
1929 1937 # drop cache to save memory, the caller is expected to
1930 1938 # update self._revisioncache after validating the text
1931 1939 self._revisioncache = None
1932 1940
1933 1941 targetsize = None
1934 1942 rawsize = self.index[rev][2]
1935 1943 if 0 <= rawsize:
1936 1944 targetsize = 4 * rawsize
1937 1945
1938 1946 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1939 1947 if basetext is None:
1940 1948 basetext = bytes(bins[0])
1941 1949 bins = bins[1:]
1942 1950
1943 1951 rawtext = mdiff.patches(basetext, bins)
1944 1952 del basetext # let us have a chance to free memory early
1945 1953 return (rev, rawtext, False)
1946 1954
1947 1955 def rawdata(self, nodeorrev, _df=None):
1948 1956 """return an uncompressed raw data of a given node or revision number.
1949 1957
1950 1958 _df - an existing file handle to read from. (internal-only)
1951 1959 """
1952 1960 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1953 1961
1954 1962 def hash(self, text, p1, p2):
1955 1963 """Compute a node hash.
1956 1964
1957 1965 Available as a function so that subclasses can replace the hash
1958 1966 as needed.
1959 1967 """
1960 1968 return storageutil.hashrevisionsha1(text, p1, p2)
1961 1969
1962 1970 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1963 1971 """Check node hash integrity.
1964 1972
1965 1973 Available as a function so that subclasses can extend hash mismatch
1966 1974 behaviors as needed.
1967 1975 """
1968 1976 try:
1969 1977 if p1 is None and p2 is None:
1970 1978 p1, p2 = self.parents(node)
1971 1979 if node != self.hash(text, p1, p2):
1972 1980 # Clear the revision cache on hash failure. The revision cache
1973 1981 # only stores the raw revision and clearing the cache does have
1974 1982 # the side-effect that we won't have a cache hit when the raw
1975 1983 # revision data is accessed. But this case should be rare and
1976 1984 # it is extra work to teach the cache about the hash
1977 1985 # verification state.
1978 1986 if self._revisioncache and self._revisioncache[0] == node:
1979 1987 self._revisioncache = None
1980 1988
1981 1989 revornode = rev
1982 1990 if revornode is None:
1983 1991 revornode = templatefilters.short(hex(node))
1984 1992 raise error.RevlogError(
1985 1993 _(b"integrity check failed on %s:%s")
1986 1994 % (self.indexfile, pycompat.bytestr(revornode))
1987 1995 )
1988 1996 except error.RevlogError:
1989 1997 if self._censorable and storageutil.iscensoredtext(text):
1990 1998 raise error.CensoredNodeError(self.indexfile, node, text)
1991 1999 raise
1992 2000
1993 2001 def _enforceinlinesize(self, tr, fp=None):
1994 2002 """Check if the revlog is too big for inline and convert if so.
1995 2003
1996 2004 This should be called after revisions are added to the revlog. If the
1997 2005 revlog has grown too large to be an inline revlog, it will convert it
1998 2006 to use multiple index and data files.
1999 2007 """
2000 2008 tiprev = len(self) - 1
2001 2009 if (
2002 2010 not self._inline
2003 2011 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
2004 2012 ):
2005 2013 return
2006 2014
2007 2015 troffset = tr.findoffset(self.indexfile)
2008 2016 if troffset is None:
2009 2017 raise error.RevlogError(
2010 2018 _(b"%s not found in the transaction") % self.indexfile
2011 2019 )
2012 2020 trindex = 0
2013 2021 tr.add(self.datafile, 0)
2014 2022
2015 2023 if fp:
2016 2024 fp.flush()
2017 2025 fp.close()
2018 2026 # We can't use the cached file handle after close(). So prevent
2019 2027 # its usage.
2020 2028 self._writinghandles = None
2021 2029
2022 2030 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
2023 2031 for r in self:
2024 2032 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
2025 2033 if troffset <= self.start(r):
2026 2034 trindex = r
2027 2035
2028 2036 with self._indexfp(b'w') as fp:
2029 2037 self.version &= ~FLAG_INLINE_DATA
2030 2038 self._inline = False
2031 2039 io = self._io
2032 2040 for i in self:
2033 2041 e = io.packentry(self.index[i], self.node, self.version, i)
2034 2042 fp.write(e)
2035 2043
2036 2044 # the temp file replace the real index when we exit the context
2037 2045 # manager
2038 2046
2039 2047 tr.replace(self.indexfile, trindex * self._io.size)
2040 2048 nodemaputil.setup_persistent_nodemap(tr, self)
2041 2049 self._chunkclear()
2042 2050
2043 2051 def _nodeduplicatecallback(self, transaction, node):
2044 2052 """called when trying to add a node already stored."""
2045 2053
2046 2054 def addrevision(
2047 2055 self,
2048 2056 text,
2049 2057 transaction,
2050 2058 link,
2051 2059 p1,
2052 2060 p2,
2053 2061 cachedelta=None,
2054 2062 node=None,
2055 2063 flags=REVIDX_DEFAULT_FLAGS,
2056 2064 deltacomputer=None,
2057 2065 sidedata=None,
2058 2066 ):
2059 2067 """add a revision to the log
2060 2068
2061 2069 text - the revision data to add
2062 2070 transaction - the transaction object used for rollback
2063 2071 link - the linkrev data to add
2064 2072 p1, p2 - the parent nodeids of the revision
2065 2073 cachedelta - an optional precomputed delta
2066 2074 node - nodeid of revision; typically node is not specified, and it is
2067 2075 computed by default as hash(text, p1, p2), however subclasses might
2068 2076 use different hashing method (and override checkhash() in such case)
2069 2077 flags - the known flags to set on the revision
2070 2078 deltacomputer - an optional deltacomputer instance shared between
2071 2079 multiple calls
2072 2080 """
2073 2081 if link == nullrev:
2074 2082 raise error.RevlogError(
2075 2083 _(b"attempted to add linkrev -1 to %s") % self.indexfile
2076 2084 )
2077 2085
2078 2086 if sidedata is None:
2079 2087 sidedata = {}
2080 2088 flags = flags & ~REVIDX_SIDEDATA
2081 2089 elif not self.hassidedata:
2082 2090 raise error.ProgrammingError(
2083 2091 _(b"trying to add sidedata to a revlog who don't support them")
2084 2092 )
2085 2093 else:
2086 2094 flags |= REVIDX_SIDEDATA
2087 2095
2088 2096 if flags:
2089 2097 node = node or self.hash(text, p1, p2)
2090 2098
2091 2099 rawtext, validatehash = flagutil.processflagswrite(
2092 2100 self, text, flags, sidedata=sidedata
2093 2101 )
2094 2102
2095 2103 # If the flag processor modifies the revision data, ignore any provided
2096 2104 # cachedelta.
2097 2105 if rawtext != text:
2098 2106 cachedelta = None
2099 2107
2100 2108 if len(rawtext) > _maxentrysize:
2101 2109 raise error.RevlogError(
2102 2110 _(
2103 2111 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2104 2112 )
2105 2113 % (self.indexfile, len(rawtext))
2106 2114 )
2107 2115
2108 2116 node = node or self.hash(rawtext, p1, p2)
2109 2117 rev = self.index.get_rev(node)
2110 2118 if rev is not None:
2111 2119 return rev
2112 2120
2113 2121 if validatehash:
2114 2122 self.checkhash(rawtext, node, p1=p1, p2=p2)
2115 2123
2116 2124 return self.addrawrevision(
2117 2125 rawtext,
2118 2126 transaction,
2119 2127 link,
2120 2128 p1,
2121 2129 p2,
2122 2130 node,
2123 2131 flags,
2124 2132 cachedelta=cachedelta,
2125 2133 deltacomputer=deltacomputer,
2126 2134 )
2127 2135
2128 2136 def addrawrevision(
2129 2137 self,
2130 2138 rawtext,
2131 2139 transaction,
2132 2140 link,
2133 2141 p1,
2134 2142 p2,
2135 2143 node,
2136 2144 flags,
2137 2145 cachedelta=None,
2138 2146 deltacomputer=None,
2139 2147 ):
2140 2148 """add a raw revision with known flags, node and parents
2141 2149 useful when reusing a revision not stored in this revlog (ex: received
2142 2150 over wire, or read from an external bundle).
2143 2151 """
2144 2152 dfh = None
2145 2153 if not self._inline:
2146 2154 dfh = self._datafp(b"a+")
2147 2155 ifh = self._indexfp(b"a+")
2148 2156 try:
2149 2157 return self._addrevision(
2150 2158 node,
2151 2159 rawtext,
2152 2160 transaction,
2153 2161 link,
2154 2162 p1,
2155 2163 p2,
2156 2164 flags,
2157 2165 cachedelta,
2158 2166 ifh,
2159 2167 dfh,
2160 2168 deltacomputer=deltacomputer,
2161 2169 )
2162 2170 finally:
2163 2171 if dfh:
2164 2172 dfh.close()
2165 2173 ifh.close()
2166 2174
2167 2175 def compress(self, data):
2168 2176 """Generate a possibly-compressed representation of data."""
2169 2177 if not data:
2170 2178 return b'', data
2171 2179
2172 2180 compressed = self._compressor.compress(data)
2173 2181
2174 2182 if compressed:
2175 2183 # The revlog compressor added the header in the returned data.
2176 2184 return b'', compressed
2177 2185
2178 2186 if data[0:1] == b'\0':
2179 2187 return b'', data
2180 2188 return b'u', data
2181 2189
2182 2190 def decompress(self, data):
2183 2191 """Decompress a revlog chunk.
2184 2192
2185 2193 The chunk is expected to begin with a header identifying the
2186 2194 format type so it can be routed to an appropriate decompressor.
2187 2195 """
2188 2196 if not data:
2189 2197 return data
2190 2198
2191 2199 # Revlogs are read much more frequently than they are written and many
2192 2200 # chunks only take microseconds to decompress, so performance is
2193 2201 # important here.
2194 2202 #
2195 2203 # We can make a few assumptions about revlogs:
2196 2204 #
2197 2205 # 1) the majority of chunks will be compressed (as opposed to inline
2198 2206 # raw data).
2199 2207 # 2) decompressing *any* data will likely by at least 10x slower than
2200 2208 # returning raw inline data.
2201 2209 # 3) we want to prioritize common and officially supported compression
2202 2210 # engines
2203 2211 #
2204 2212 # It follows that we want to optimize for "decompress compressed data
2205 2213 # when encoded with common and officially supported compression engines"
2206 2214 # case over "raw data" and "data encoded by less common or non-official
2207 2215 # compression engines." That is why we have the inline lookup first
2208 2216 # followed by the compengines lookup.
2209 2217 #
2210 2218 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2211 2219 # compressed chunks. And this matters for changelog and manifest reads.
2212 2220 t = data[0:1]
2213 2221
2214 2222 if t == b'x':
2215 2223 try:
2216 2224 return _zlibdecompress(data)
2217 2225 except zlib.error as e:
2218 2226 raise error.RevlogError(
2219 2227 _(b'revlog decompress error: %s')
2220 2228 % stringutil.forcebytestr(e)
2221 2229 )
2222 2230 # '\0' is more common than 'u' so it goes first.
2223 2231 elif t == b'\0':
2224 2232 return data
2225 2233 elif t == b'u':
2226 2234 return util.buffer(data, 1)
2227 2235
2228 2236 try:
2229 2237 compressor = self._decompressors[t]
2230 2238 except KeyError:
2231 2239 try:
2232 2240 engine = util.compengines.forrevlogheader(t)
2233 2241 compressor = engine.revlogcompressor(self._compengineopts)
2234 2242 self._decompressors[t] = compressor
2235 2243 except KeyError:
2236 2244 raise error.RevlogError(_(b'unknown compression type %r') % t)
2237 2245
2238 2246 return compressor.decompress(data)
2239 2247
2240 2248 def _addrevision(
2241 2249 self,
2242 2250 node,
2243 2251 rawtext,
2244 2252 transaction,
2245 2253 link,
2246 2254 p1,
2247 2255 p2,
2248 2256 flags,
2249 2257 cachedelta,
2250 2258 ifh,
2251 2259 dfh,
2252 2260 alwayscache=False,
2253 2261 deltacomputer=None,
2254 2262 ):
2255 2263 """internal function to add revisions to the log
2256 2264
2257 2265 see addrevision for argument descriptions.
2258 2266
2259 2267 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2260 2268
2261 2269 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2262 2270 be used.
2263 2271
2264 2272 invariants:
2265 2273 - rawtext is optional (can be None); if not set, cachedelta must be set.
2266 2274 if both are set, they must correspond to each other.
2267 2275 """
2268 2276 if node == nullid:
2269 2277 raise error.RevlogError(
2270 2278 _(b"%s: attempt to add null revision") % self.indexfile
2271 2279 )
2272 2280 if node == wdirid or node in wdirfilenodeids:
2273 2281 raise error.RevlogError(
2274 2282 _(b"%s: attempt to add wdir revision") % self.indexfile
2275 2283 )
2276 2284
2277 2285 if self._inline:
2278 2286 fh = ifh
2279 2287 else:
2280 2288 fh = dfh
2281 2289
2282 2290 btext = [rawtext]
2283 2291
2284 2292 curr = len(self)
2285 2293 prev = curr - 1
2286 2294 offset = self.end(prev)
2295
2296 if self._concurrencychecker:
2297 if self._inline:
2298 # offset is "as if" it were in the .d file, so we need to add on
2299 # the size of the entry metadata.
2300 self._concurrencychecker(
2301 ifh, self.indexfile, offset + curr * self._io.size
2302 )
2303 else:
2304 # Entries in the .i are a consistent size.
2305 self._concurrencychecker(
2306 ifh, self.indexfile, curr * self._io.size
2307 )
2308 self._concurrencychecker(dfh, self.datafile, offset)
2309
2287 2310 p1r, p2r = self.rev(p1), self.rev(p2)
2288 2311
2289 2312 # full versions are inserted when the needed deltas
2290 2313 # become comparable to the uncompressed text
2291 2314 if rawtext is None:
2292 2315 # need rawtext size, before changed by flag processors, which is
2293 2316 # the non-raw size. use revlog explicitly to avoid filelog's extra
2294 2317 # logic that might remove metadata size.
2295 2318 textlen = mdiff.patchedsize(
2296 2319 revlog.size(self, cachedelta[0]), cachedelta[1]
2297 2320 )
2298 2321 else:
2299 2322 textlen = len(rawtext)
2300 2323
2301 2324 if deltacomputer is None:
2302 2325 deltacomputer = deltautil.deltacomputer(self)
2303 2326
2304 2327 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2305 2328
2306 2329 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2307 2330
2308 2331 e = (
2309 2332 offset_type(offset, flags),
2310 2333 deltainfo.deltalen,
2311 2334 textlen,
2312 2335 deltainfo.base,
2313 2336 link,
2314 2337 p1r,
2315 2338 p2r,
2316 2339 node,
2317 2340 )
2318 2341 self.index.append(e)
2319 2342
2320 2343 entry = self._io.packentry(e, self.node, self.version, curr)
2321 2344 self._writeentry(
2322 2345 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2323 2346 )
2324 2347
2325 2348 rawtext = btext[0]
2326 2349
2327 2350 if alwayscache and rawtext is None:
2328 2351 rawtext = deltacomputer.buildtext(revinfo, fh)
2329 2352
2330 2353 if type(rawtext) == bytes: # only accept immutable objects
2331 2354 self._revisioncache = (node, curr, rawtext)
2332 2355 self._chainbasecache[curr] = deltainfo.chainbase
2333 2356 return curr
2334 2357
2335 2358 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2336 2359 # Files opened in a+ mode have inconsistent behavior on various
2337 2360 # platforms. Windows requires that a file positioning call be made
2338 2361 # when the file handle transitions between reads and writes. See
2339 2362 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2340 2363 # platforms, Python or the platform itself can be buggy. Some versions
2341 2364 # of Solaris have been observed to not append at the end of the file
2342 2365 # if the file was seeked to before the end. See issue4943 for more.
2343 2366 #
2344 2367 # We work around this issue by inserting a seek() before writing.
2345 2368 # Note: This is likely not necessary on Python 3. However, because
2346 2369 # the file handle is reused for reads and may be seeked there, we need
2347 2370 # to be careful before changing this.
2348 2371 ifh.seek(0, os.SEEK_END)
2349 2372 if dfh:
2350 2373 dfh.seek(0, os.SEEK_END)
2351 2374
2352 2375 curr = len(self) - 1
2353 2376 if not self._inline:
2354 2377 transaction.add(self.datafile, offset)
2355 2378 transaction.add(self.indexfile, curr * len(entry))
2356 2379 if data[0]:
2357 2380 dfh.write(data[0])
2358 2381 dfh.write(data[1])
2359 2382 ifh.write(entry)
2360 2383 else:
2361 2384 offset += curr * self._io.size
2362 2385 transaction.add(self.indexfile, offset)
2363 2386 ifh.write(entry)
2364 2387 ifh.write(data[0])
2365 2388 ifh.write(data[1])
2366 2389 self._enforceinlinesize(transaction, ifh)
2367 2390 nodemaputil.setup_persistent_nodemap(transaction, self)
2368 2391
2369 2392 def addgroup(
2370 2393 self,
2371 2394 deltas,
2372 2395 linkmapper,
2373 2396 transaction,
2374 2397 alwayscache=False,
2375 2398 addrevisioncb=None,
2376 2399 duplicaterevisioncb=None,
2377 2400 ):
2378 2401 """
2379 2402 add a delta group
2380 2403
2381 2404 given a set of deltas, add them to the revision log. the
2382 2405 first delta is against its parent, which should be in our
2383 2406 log, the rest are against the previous delta.
2384 2407
2385 2408 If ``addrevisioncb`` is defined, it will be called with arguments of
2386 2409 this revlog and the node that was added.
2387 2410 """
2388 2411
2389 2412 if self._writinghandles:
2390 2413 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2391 2414
2392 2415 r = len(self)
2393 2416 end = 0
2394 2417 if r:
2395 2418 end = self.end(r - 1)
2396 2419 ifh = self._indexfp(b"a+")
2397 2420 isize = r * self._io.size
2398 2421 if self._inline:
2399 2422 transaction.add(self.indexfile, end + isize)
2400 2423 dfh = None
2401 2424 else:
2402 2425 transaction.add(self.indexfile, isize)
2403 2426 transaction.add(self.datafile, end)
2404 2427 dfh = self._datafp(b"a+")
2405 2428
2406 2429 def flush():
2407 2430 if dfh:
2408 2431 dfh.flush()
2409 2432 ifh.flush()
2410 2433
2411 2434 self._writinghandles = (ifh, dfh)
2412 2435 empty = True
2413 2436
2414 2437 try:
2415 2438 deltacomputer = deltautil.deltacomputer(self)
2416 2439 # loop through our set of deltas
2417 2440 for data in deltas:
2418 2441 node, p1, p2, linknode, deltabase, delta, flags = data
2419 2442 link = linkmapper(linknode)
2420 2443 flags = flags or REVIDX_DEFAULT_FLAGS
2421 2444
2422 2445 rev = self.index.get_rev(node)
2423 2446 if rev is not None:
2424 2447 # this can happen if two branches make the same change
2425 2448 self._nodeduplicatecallback(transaction, rev)
2426 2449 if duplicaterevisioncb:
2427 2450 duplicaterevisioncb(self, rev)
2428 2451 empty = False
2429 2452 continue
2430 2453
2431 2454 for p in (p1, p2):
2432 2455 if not self.index.has_node(p):
2433 2456 raise error.LookupError(
2434 2457 p, self.indexfile, _(b'unknown parent')
2435 2458 )
2436 2459
2437 2460 if not self.index.has_node(deltabase):
2438 2461 raise error.LookupError(
2439 2462 deltabase, self.indexfile, _(b'unknown delta base')
2440 2463 )
2441 2464
2442 2465 baserev = self.rev(deltabase)
2443 2466
2444 2467 if baserev != nullrev and self.iscensored(baserev):
2445 2468 # if base is censored, delta must be full replacement in a
2446 2469 # single patch operation
2447 2470 hlen = struct.calcsize(b">lll")
2448 2471 oldlen = self.rawsize(baserev)
2449 2472 newlen = len(delta) - hlen
2450 2473 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2451 2474 raise error.CensoredBaseError(
2452 2475 self.indexfile, self.node(baserev)
2453 2476 )
2454 2477
2455 2478 if not flags and self._peek_iscensored(baserev, delta, flush):
2456 2479 flags |= REVIDX_ISCENSORED
2457 2480
2458 2481 # We assume consumers of addrevisioncb will want to retrieve
2459 2482 # the added revision, which will require a call to
2460 2483 # revision(). revision() will fast path if there is a cache
2461 2484 # hit. So, we tell _addrevision() to always cache in this case.
2462 2485 # We're only using addgroup() in the context of changegroup
2463 2486 # generation so the revision data can always be handled as raw
2464 2487 # by the flagprocessor.
2465 2488 rev = self._addrevision(
2466 2489 node,
2467 2490 None,
2468 2491 transaction,
2469 2492 link,
2470 2493 p1,
2471 2494 p2,
2472 2495 flags,
2473 2496 (baserev, delta),
2474 2497 ifh,
2475 2498 dfh,
2476 2499 alwayscache=alwayscache,
2477 2500 deltacomputer=deltacomputer,
2478 2501 )
2479 2502
2480 2503 if addrevisioncb:
2481 2504 addrevisioncb(self, rev)
2482 2505 empty = False
2483 2506
2484 2507 if not dfh and not self._inline:
2485 2508 # addrevision switched from inline to conventional
2486 2509 # reopen the index
2487 2510 ifh.close()
2488 2511 dfh = self._datafp(b"a+")
2489 2512 ifh = self._indexfp(b"a+")
2490 2513 self._writinghandles = (ifh, dfh)
2491 2514 finally:
2492 2515 self._writinghandles = None
2493 2516
2494 2517 if dfh:
2495 2518 dfh.close()
2496 2519 ifh.close()
2497 2520 return not empty
2498 2521
2499 2522 def iscensored(self, rev):
2500 2523 """Check if a file revision is censored."""
2501 2524 if not self._censorable:
2502 2525 return False
2503 2526
2504 2527 return self.flags(rev) & REVIDX_ISCENSORED
2505 2528
2506 2529 def _peek_iscensored(self, baserev, delta, flush):
2507 2530 """Quickly check if a delta produces a censored revision."""
2508 2531 if not self._censorable:
2509 2532 return False
2510 2533
2511 2534 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2512 2535
2513 2536 def getstrippoint(self, minlink):
2514 2537 """find the minimum rev that must be stripped to strip the linkrev
2515 2538
2516 2539 Returns a tuple containing the minimum rev and a set of all revs that
2517 2540 have linkrevs that will be broken by this strip.
2518 2541 """
2519 2542 return storageutil.resolvestripinfo(
2520 2543 minlink,
2521 2544 len(self) - 1,
2522 2545 self.headrevs(),
2523 2546 self.linkrev,
2524 2547 self.parentrevs,
2525 2548 )
2526 2549
2527 2550 def strip(self, minlink, transaction):
2528 2551 """truncate the revlog on the first revision with a linkrev >= minlink
2529 2552
2530 2553 This function is called when we're stripping revision minlink and
2531 2554 its descendants from the repository.
2532 2555
2533 2556 We have to remove all revisions with linkrev >= minlink, because
2534 2557 the equivalent changelog revisions will be renumbered after the
2535 2558 strip.
2536 2559
2537 2560 So we truncate the revlog on the first of these revisions, and
2538 2561 trust that the caller has saved the revisions that shouldn't be
2539 2562 removed and that it'll re-add them after this truncation.
2540 2563 """
2541 2564 if len(self) == 0:
2542 2565 return
2543 2566
2544 2567 rev, _ = self.getstrippoint(minlink)
2545 2568 if rev == len(self):
2546 2569 return
2547 2570
2548 2571 # first truncate the files on disk
2549 2572 end = self.start(rev)
2550 2573 if not self._inline:
2551 2574 transaction.add(self.datafile, end)
2552 2575 end = rev * self._io.size
2553 2576 else:
2554 2577 end += rev * self._io.size
2555 2578
2556 2579 transaction.add(self.indexfile, end)
2557 2580
2558 2581 # then reset internal state in memory to forget those revisions
2559 2582 self._revisioncache = None
2560 2583 self._chaininfocache = util.lrucachedict(500)
2561 2584 self._chunkclear()
2562 2585
2563 2586 del self.index[rev:-1]
2564 2587
2565 2588 def checksize(self):
2566 2589 """Check size of index and data files
2567 2590
2568 2591 return a (dd, di) tuple.
2569 2592 - dd: extra bytes for the "data" file
2570 2593 - di: extra bytes for the "index" file
2571 2594
2572 2595 A healthy revlog will return (0, 0).
2573 2596 """
2574 2597 expected = 0
2575 2598 if len(self):
2576 2599 expected = max(0, self.end(len(self) - 1))
2577 2600
2578 2601 try:
2579 2602 with self._datafp() as f:
2580 2603 f.seek(0, io.SEEK_END)
2581 2604 actual = f.tell()
2582 2605 dd = actual - expected
2583 2606 except IOError as inst:
2584 2607 if inst.errno != errno.ENOENT:
2585 2608 raise
2586 2609 dd = 0
2587 2610
2588 2611 try:
2589 2612 f = self.opener(self.indexfile)
2590 2613 f.seek(0, io.SEEK_END)
2591 2614 actual = f.tell()
2592 2615 f.close()
2593 2616 s = self._io.size
2594 2617 i = max(0, actual // s)
2595 2618 di = actual - (i * s)
2596 2619 if self._inline:
2597 2620 databytes = 0
2598 2621 for r in self:
2599 2622 databytes += max(0, self.length(r))
2600 2623 dd = 0
2601 2624 di = actual - len(self) * s - databytes
2602 2625 except IOError as inst:
2603 2626 if inst.errno != errno.ENOENT:
2604 2627 raise
2605 2628 di = 0
2606 2629
2607 2630 return (dd, di)
2608 2631
2609 2632 def files(self):
2610 2633 res = [self.indexfile]
2611 2634 if not self._inline:
2612 2635 res.append(self.datafile)
2613 2636 return res
2614 2637
2615 2638 def emitrevisions(
2616 2639 self,
2617 2640 nodes,
2618 2641 nodesorder=None,
2619 2642 revisiondata=False,
2620 2643 assumehaveparentrevisions=False,
2621 2644 deltamode=repository.CG_DELTAMODE_STD,
2622 2645 ):
2623 2646 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2624 2647 raise error.ProgrammingError(
2625 2648 b'unhandled value for nodesorder: %s' % nodesorder
2626 2649 )
2627 2650
2628 2651 if nodesorder is None and not self._generaldelta:
2629 2652 nodesorder = b'storage'
2630 2653
2631 2654 if (
2632 2655 not self._storedeltachains
2633 2656 and deltamode != repository.CG_DELTAMODE_PREV
2634 2657 ):
2635 2658 deltamode = repository.CG_DELTAMODE_FULL
2636 2659
2637 2660 return storageutil.emitrevisions(
2638 2661 self,
2639 2662 nodes,
2640 2663 nodesorder,
2641 2664 revlogrevisiondelta,
2642 2665 deltaparentfn=self.deltaparent,
2643 2666 candeltafn=self.candelta,
2644 2667 rawsizefn=self.rawsize,
2645 2668 revdifffn=self.revdiff,
2646 2669 flagsfn=self.flags,
2647 2670 deltamode=deltamode,
2648 2671 revisiondata=revisiondata,
2649 2672 assumehaveparentrevisions=assumehaveparentrevisions,
2650 2673 )
2651 2674
2652 2675 DELTAREUSEALWAYS = b'always'
2653 2676 DELTAREUSESAMEREVS = b'samerevs'
2654 2677 DELTAREUSENEVER = b'never'
2655 2678
2656 2679 DELTAREUSEFULLADD = b'fulladd'
2657 2680
2658 2681 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2659 2682
2660 2683 def clone(
2661 2684 self,
2662 2685 tr,
2663 2686 destrevlog,
2664 2687 addrevisioncb=None,
2665 2688 deltareuse=DELTAREUSESAMEREVS,
2666 2689 forcedeltabothparents=None,
2667 2690 sidedatacompanion=None,
2668 2691 ):
2669 2692 """Copy this revlog to another, possibly with format changes.
2670 2693
2671 2694 The destination revlog will contain the same revisions and nodes.
2672 2695 However, it may not be bit-for-bit identical due to e.g. delta encoding
2673 2696 differences.
2674 2697
2675 2698 The ``deltareuse`` argument control how deltas from the existing revlog
2676 2699 are preserved in the destination revlog. The argument can have the
2677 2700 following values:
2678 2701
2679 2702 DELTAREUSEALWAYS
2680 2703 Deltas will always be reused (if possible), even if the destination
2681 2704 revlog would not select the same revisions for the delta. This is the
2682 2705 fastest mode of operation.
2683 2706 DELTAREUSESAMEREVS
2684 2707 Deltas will be reused if the destination revlog would pick the same
2685 2708 revisions for the delta. This mode strikes a balance between speed
2686 2709 and optimization.
2687 2710 DELTAREUSENEVER
2688 2711 Deltas will never be reused. This is the slowest mode of execution.
2689 2712 This mode can be used to recompute deltas (e.g. if the diff/delta
2690 2713 algorithm changes).
2691 2714 DELTAREUSEFULLADD
2692 2715 Revision will be re-added as if their were new content. This is
2693 2716 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2694 2717 eg: large file detection and handling.
2695 2718
2696 2719 Delta computation can be slow, so the choice of delta reuse policy can
2697 2720 significantly affect run time.
2698 2721
2699 2722 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2700 2723 two extremes. Deltas will be reused if they are appropriate. But if the
2701 2724 delta could choose a better revision, it will do so. This means if you
2702 2725 are converting a non-generaldelta revlog to a generaldelta revlog,
2703 2726 deltas will be recomputed if the delta's parent isn't a parent of the
2704 2727 revision.
2705 2728
2706 2729 In addition to the delta policy, the ``forcedeltabothparents``
2707 2730 argument controls whether to force compute deltas against both parents
2708 2731 for merges. By default, the current default is used.
2709 2732
2710 2733 If not None, the `sidedatacompanion` is callable that accept two
2711 2734 arguments:
2712 2735
2713 2736 (srcrevlog, rev)
2714 2737
2715 2738 and return a quintet that control changes to sidedata content from the
2716 2739 old revision to the new clone result:
2717 2740
2718 2741 (dropall, filterout, update, new_flags, dropped_flags)
2719 2742
2720 2743 * if `dropall` is True, all sidedata should be dropped
2721 2744 * `filterout` is a set of sidedata keys that should be dropped
2722 2745 * `update` is a mapping of additionnal/new key -> value
2723 2746 * new_flags is a bitfields of new flags that the revision should get
2724 2747 * dropped_flags is a bitfields of new flags that the revision shoudl not longer have
2725 2748 """
2726 2749 if deltareuse not in self.DELTAREUSEALL:
2727 2750 raise ValueError(
2728 2751 _(b'value for deltareuse invalid: %s') % deltareuse
2729 2752 )
2730 2753
2731 2754 if len(destrevlog):
2732 2755 raise ValueError(_(b'destination revlog is not empty'))
2733 2756
2734 2757 if getattr(self, 'filteredrevs', None):
2735 2758 raise ValueError(_(b'source revlog has filtered revisions'))
2736 2759 if getattr(destrevlog, 'filteredrevs', None):
2737 2760 raise ValueError(_(b'destination revlog has filtered revisions'))
2738 2761
2739 2762 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2740 2763 # if possible.
2741 2764 oldlazydelta = destrevlog._lazydelta
2742 2765 oldlazydeltabase = destrevlog._lazydeltabase
2743 2766 oldamd = destrevlog._deltabothparents
2744 2767
2745 2768 try:
2746 2769 if deltareuse == self.DELTAREUSEALWAYS:
2747 2770 destrevlog._lazydeltabase = True
2748 2771 destrevlog._lazydelta = True
2749 2772 elif deltareuse == self.DELTAREUSESAMEREVS:
2750 2773 destrevlog._lazydeltabase = False
2751 2774 destrevlog._lazydelta = True
2752 2775 elif deltareuse == self.DELTAREUSENEVER:
2753 2776 destrevlog._lazydeltabase = False
2754 2777 destrevlog._lazydelta = False
2755 2778
2756 2779 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2757 2780
2758 2781 self._clone(
2759 2782 tr,
2760 2783 destrevlog,
2761 2784 addrevisioncb,
2762 2785 deltareuse,
2763 2786 forcedeltabothparents,
2764 2787 sidedatacompanion,
2765 2788 )
2766 2789
2767 2790 finally:
2768 2791 destrevlog._lazydelta = oldlazydelta
2769 2792 destrevlog._lazydeltabase = oldlazydeltabase
2770 2793 destrevlog._deltabothparents = oldamd
2771 2794
2772 2795 def _clone(
2773 2796 self,
2774 2797 tr,
2775 2798 destrevlog,
2776 2799 addrevisioncb,
2777 2800 deltareuse,
2778 2801 forcedeltabothparents,
2779 2802 sidedatacompanion,
2780 2803 ):
2781 2804 """perform the core duty of `revlog.clone` after parameter processing"""
2782 2805 deltacomputer = deltautil.deltacomputer(destrevlog)
2783 2806 index = self.index
2784 2807 for rev in self:
2785 2808 entry = index[rev]
2786 2809
2787 2810 # Some classes override linkrev to take filtered revs into
2788 2811 # account. Use raw entry from index.
2789 2812 flags = entry[0] & 0xFFFF
2790 2813 linkrev = entry[4]
2791 2814 p1 = index[entry[5]][7]
2792 2815 p2 = index[entry[6]][7]
2793 2816 node = entry[7]
2794 2817
2795 2818 sidedataactions = (False, [], {}, 0, 0)
2796 2819 if sidedatacompanion is not None:
2797 2820 sidedataactions = sidedatacompanion(self, rev)
2798 2821
2799 2822 # (Possibly) reuse the delta from the revlog if allowed and
2800 2823 # the revlog chunk is a delta.
2801 2824 cachedelta = None
2802 2825 rawtext = None
2803 2826 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2804 2827 dropall = sidedataactions[0]
2805 2828 filterout = sidedataactions[1]
2806 2829 update = sidedataactions[2]
2807 2830 new_flags = sidedataactions[3]
2808 2831 dropped_flags = sidedataactions[4]
2809 2832 text, sidedata = self._revisiondata(rev)
2810 2833 if dropall:
2811 2834 sidedata = {}
2812 2835 for key in filterout:
2813 2836 sidedata.pop(key, None)
2814 2837 sidedata.update(update)
2815 2838 if not sidedata:
2816 2839 sidedata = None
2817 2840
2818 2841 flags |= new_flags
2819 2842 flags &= ~dropped_flags
2820 2843
2821 2844 destrevlog.addrevision(
2822 2845 text,
2823 2846 tr,
2824 2847 linkrev,
2825 2848 p1,
2826 2849 p2,
2827 2850 cachedelta=cachedelta,
2828 2851 node=node,
2829 2852 flags=flags,
2830 2853 deltacomputer=deltacomputer,
2831 2854 sidedata=sidedata,
2832 2855 )
2833 2856 else:
2834 2857 if destrevlog._lazydelta:
2835 2858 dp = self.deltaparent(rev)
2836 2859 if dp != nullrev:
2837 2860 cachedelta = (dp, bytes(self._chunk(rev)))
2838 2861
2839 2862 if not cachedelta:
2840 2863 rawtext = self.rawdata(rev)
2841 2864
2842 2865 ifh = destrevlog.opener(
2843 2866 destrevlog.indexfile, b'a+', checkambig=False
2844 2867 )
2845 2868 dfh = None
2846 2869 if not destrevlog._inline:
2847 2870 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2848 2871 try:
2849 2872 destrevlog._addrevision(
2850 2873 node,
2851 2874 rawtext,
2852 2875 tr,
2853 2876 linkrev,
2854 2877 p1,
2855 2878 p2,
2856 2879 flags,
2857 2880 cachedelta,
2858 2881 ifh,
2859 2882 dfh,
2860 2883 deltacomputer=deltacomputer,
2861 2884 )
2862 2885 finally:
2863 2886 if dfh:
2864 2887 dfh.close()
2865 2888 ifh.close()
2866 2889
2867 2890 if addrevisioncb:
2868 2891 addrevisioncb(self, rev, node)
2869 2892
2870 2893 def censorrevision(self, tr, censornode, tombstone=b''):
2871 2894 if (self.version & 0xFFFF) == REVLOGV0:
2872 2895 raise error.RevlogError(
2873 2896 _(b'cannot censor with version %d revlogs') % self.version
2874 2897 )
2875 2898
2876 2899 censorrev = self.rev(censornode)
2877 2900 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2878 2901
2879 2902 if len(tombstone) > self.rawsize(censorrev):
2880 2903 raise error.Abort(
2881 2904 _(b'censor tombstone must be no longer than censored data')
2882 2905 )
2883 2906
2884 2907 # Rewriting the revlog in place is hard. Our strategy for censoring is
2885 2908 # to create a new revlog, copy all revisions to it, then replace the
2886 2909 # revlogs on transaction close.
2887 2910
2888 2911 newindexfile = self.indexfile + b'.tmpcensored'
2889 2912 newdatafile = self.datafile + b'.tmpcensored'
2890 2913
2891 2914 # This is a bit dangerous. We could easily have a mismatch of state.
2892 2915 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2893 2916 newrl.version = self.version
2894 2917 newrl._generaldelta = self._generaldelta
2895 2918 newrl._io = self._io
2896 2919
2897 2920 for rev in self.revs():
2898 2921 node = self.node(rev)
2899 2922 p1, p2 = self.parents(node)
2900 2923
2901 2924 if rev == censorrev:
2902 2925 newrl.addrawrevision(
2903 2926 tombstone,
2904 2927 tr,
2905 2928 self.linkrev(censorrev),
2906 2929 p1,
2907 2930 p2,
2908 2931 censornode,
2909 2932 REVIDX_ISCENSORED,
2910 2933 )
2911 2934
2912 2935 if newrl.deltaparent(rev) != nullrev:
2913 2936 raise error.Abort(
2914 2937 _(
2915 2938 b'censored revision stored as delta; '
2916 2939 b'cannot censor'
2917 2940 ),
2918 2941 hint=_(
2919 2942 b'censoring of revlogs is not '
2920 2943 b'fully implemented; please report '
2921 2944 b'this bug'
2922 2945 ),
2923 2946 )
2924 2947 continue
2925 2948
2926 2949 if self.iscensored(rev):
2927 2950 if self.deltaparent(rev) != nullrev:
2928 2951 raise error.Abort(
2929 2952 _(
2930 2953 b'cannot censor due to censored '
2931 2954 b'revision having delta stored'
2932 2955 )
2933 2956 )
2934 2957 rawtext = self._chunk(rev)
2935 2958 else:
2936 2959 rawtext = self.rawdata(rev)
2937 2960
2938 2961 newrl.addrawrevision(
2939 2962 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2940 2963 )
2941 2964
2942 2965 tr.addbackup(self.indexfile, location=b'store')
2943 2966 if not self._inline:
2944 2967 tr.addbackup(self.datafile, location=b'store')
2945 2968
2946 2969 self.opener.rename(newrl.indexfile, self.indexfile)
2947 2970 if not self._inline:
2948 2971 self.opener.rename(newrl.datafile, self.datafile)
2949 2972
2950 2973 self.clearcaches()
2951 2974 self._loadindex()
2952 2975
2953 2976 def verifyintegrity(self, state):
2954 2977 """Verifies the integrity of the revlog.
2955 2978
2956 2979 Yields ``revlogproblem`` instances describing problems that are
2957 2980 found.
2958 2981 """
2959 2982 dd, di = self.checksize()
2960 2983 if dd:
2961 2984 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2962 2985 if di:
2963 2986 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2964 2987
2965 2988 version = self.version & 0xFFFF
2966 2989
2967 2990 # The verifier tells us what version revlog we should be.
2968 2991 if version != state[b'expectedversion']:
2969 2992 yield revlogproblem(
2970 2993 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2971 2994 % (self.indexfile, version, state[b'expectedversion'])
2972 2995 )
2973 2996
2974 2997 state[b'skipread'] = set()
2975 2998 state[b'safe_renamed'] = set()
2976 2999
2977 3000 for rev in self:
2978 3001 node = self.node(rev)
2979 3002
2980 3003 # Verify contents. 4 cases to care about:
2981 3004 #
2982 3005 # common: the most common case
2983 3006 # rename: with a rename
2984 3007 # meta: file content starts with b'\1\n', the metadata
2985 3008 # header defined in filelog.py, but without a rename
2986 3009 # ext: content stored externally
2987 3010 #
2988 3011 # More formally, their differences are shown below:
2989 3012 #
2990 3013 # | common | rename | meta | ext
2991 3014 # -------------------------------------------------------
2992 3015 # flags() | 0 | 0 | 0 | not 0
2993 3016 # renamed() | False | True | False | ?
2994 3017 # rawtext[0:2]=='\1\n'| False | True | True | ?
2995 3018 #
2996 3019 # "rawtext" means the raw text stored in revlog data, which
2997 3020 # could be retrieved by "rawdata(rev)". "text"
2998 3021 # mentioned below is "revision(rev)".
2999 3022 #
3000 3023 # There are 3 different lengths stored physically:
3001 3024 # 1. L1: rawsize, stored in revlog index
3002 3025 # 2. L2: len(rawtext), stored in revlog data
3003 3026 # 3. L3: len(text), stored in revlog data if flags==0, or
3004 3027 # possibly somewhere else if flags!=0
3005 3028 #
3006 3029 # L1 should be equal to L2. L3 could be different from them.
3007 3030 # "text" may or may not affect commit hash depending on flag
3008 3031 # processors (see flagutil.addflagprocessor).
3009 3032 #
3010 3033 # | common | rename | meta | ext
3011 3034 # -------------------------------------------------
3012 3035 # rawsize() | L1 | L1 | L1 | L1
3013 3036 # size() | L1 | L2-LM | L1(*) | L1 (?)
3014 3037 # len(rawtext) | L2 | L2 | L2 | L2
3015 3038 # len(text) | L2 | L2 | L2 | L3
3016 3039 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3017 3040 #
3018 3041 # LM: length of metadata, depending on rawtext
3019 3042 # (*): not ideal, see comment in filelog.size
3020 3043 # (?): could be "- len(meta)" if the resolved content has
3021 3044 # rename metadata
3022 3045 #
3023 3046 # Checks needed to be done:
3024 3047 # 1. length check: L1 == L2, in all cases.
3025 3048 # 2. hash check: depending on flag processor, we may need to
3026 3049 # use either "text" (external), or "rawtext" (in revlog).
3027 3050
3028 3051 try:
3029 3052 skipflags = state.get(b'skipflags', 0)
3030 3053 if skipflags:
3031 3054 skipflags &= self.flags(rev)
3032 3055
3033 3056 _verify_revision(self, skipflags, state, node)
3034 3057
3035 3058 l1 = self.rawsize(rev)
3036 3059 l2 = len(self.rawdata(node))
3037 3060
3038 3061 if l1 != l2:
3039 3062 yield revlogproblem(
3040 3063 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3041 3064 node=node,
3042 3065 )
3043 3066
3044 3067 except error.CensoredNodeError:
3045 3068 if state[b'erroroncensored']:
3046 3069 yield revlogproblem(
3047 3070 error=_(b'censored file data'), node=node
3048 3071 )
3049 3072 state[b'skipread'].add(node)
3050 3073 except Exception as e:
3051 3074 yield revlogproblem(
3052 3075 error=_(b'unpacking %s: %s')
3053 3076 % (short(node), stringutil.forcebytestr(e)),
3054 3077 node=node,
3055 3078 )
3056 3079 state[b'skipread'].add(node)
3057 3080
3058 3081 def storageinfo(
3059 3082 self,
3060 3083 exclusivefiles=False,
3061 3084 sharedfiles=False,
3062 3085 revisionscount=False,
3063 3086 trackedsize=False,
3064 3087 storedsize=False,
3065 3088 ):
3066 3089 d = {}
3067 3090
3068 3091 if exclusivefiles:
3069 3092 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
3070 3093 if not self._inline:
3071 3094 d[b'exclusivefiles'].append((self.opener, self.datafile))
3072 3095
3073 3096 if sharedfiles:
3074 3097 d[b'sharedfiles'] = []
3075 3098
3076 3099 if revisionscount:
3077 3100 d[b'revisionscount'] = len(self)
3078 3101
3079 3102 if trackedsize:
3080 3103 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3081 3104
3082 3105 if storedsize:
3083 3106 d[b'storedsize'] = sum(
3084 3107 self.opener.stat(path).st_size for path in self.files()
3085 3108 )
3086 3109
3087 3110 return d
@@ -1,750 +1,754 b''
1 1 # store.py - repository store handling for Mercurial
2 2 #
3 3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import functools
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .pycompat import getattr
17 17 from .node import hex
18 18 from . import (
19 19 changelog,
20 20 error,
21 21 manifest,
22 22 policy,
23 23 pycompat,
24 24 util,
25 25 vfs as vfsmod,
26 26 )
27 27 from .utils import hashutil
28 28
29 29 parsers = policy.importmod('parsers')
30 30 # how much bytes should be read from fncache in one read
31 31 # It is done to prevent loading large fncache files into memory
32 32 fncache_chunksize = 10 ** 6
33 33
34 34
35 35 def _matchtrackedpath(path, matcher):
36 36 """parses a fncache entry and returns whether the entry is tracking a path
37 37 matched by matcher or not.
38 38
39 39 If matcher is None, returns True"""
40 40
41 41 if matcher is None:
42 42 return True
43 43 path = decodedir(path)
44 44 if path.startswith(b'data/'):
45 45 return matcher(path[len(b'data/') : -len(b'.i')])
46 46 elif path.startswith(b'meta/'):
47 47 return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')])
48 48
49 49 raise error.ProgrammingError(b"cannot decode path %s" % path)
50 50
51 51
52 52 # This avoids a collision between a file named foo and a dir named
53 53 # foo.i or foo.d
54 54 def _encodedir(path):
55 55 """
56 56 >>> _encodedir(b'data/foo.i')
57 57 'data/foo.i'
58 58 >>> _encodedir(b'data/foo.i/bla.i')
59 59 'data/foo.i.hg/bla.i'
60 60 >>> _encodedir(b'data/foo.i.hg/bla.i')
61 61 'data/foo.i.hg.hg/bla.i'
62 62 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
63 63 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
64 64 """
65 65 return (
66 66 path.replace(b".hg/", b".hg.hg/")
67 67 .replace(b".i/", b".i.hg/")
68 68 .replace(b".d/", b".d.hg/")
69 69 )
70 70
71 71
72 72 encodedir = getattr(parsers, 'encodedir', _encodedir)
73 73
74 74
75 75 def decodedir(path):
76 76 """
77 77 >>> decodedir(b'data/foo.i')
78 78 'data/foo.i'
79 79 >>> decodedir(b'data/foo.i.hg/bla.i')
80 80 'data/foo.i/bla.i'
81 81 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
82 82 'data/foo.i.hg/bla.i'
83 83 """
84 84 if b".hg/" not in path:
85 85 return path
86 86 return (
87 87 path.replace(b".d.hg/", b".d/")
88 88 .replace(b".i.hg/", b".i/")
89 89 .replace(b".hg.hg/", b".hg/")
90 90 )
91 91
92 92
93 93 def _reserved():
94 94 """characters that are problematic for filesystems
95 95
96 96 * ascii escapes (0..31)
97 97 * ascii hi (126..255)
98 98 * windows specials
99 99
100 100 these characters will be escaped by encodefunctions
101 101 """
102 102 winreserved = [ord(x) for x in u'\\:*?"<>|']
103 103 for x in range(32):
104 104 yield x
105 105 for x in range(126, 256):
106 106 yield x
107 107 for x in winreserved:
108 108 yield x
109 109
110 110
111 111 def _buildencodefun():
112 112 """
113 113 >>> enc, dec = _buildencodefun()
114 114
115 115 >>> enc(b'nothing/special.txt')
116 116 'nothing/special.txt'
117 117 >>> dec(b'nothing/special.txt')
118 118 'nothing/special.txt'
119 119
120 120 >>> enc(b'HELLO')
121 121 '_h_e_l_l_o'
122 122 >>> dec(b'_h_e_l_l_o')
123 123 'HELLO'
124 124
125 125 >>> enc(b'hello:world?')
126 126 'hello~3aworld~3f'
127 127 >>> dec(b'hello~3aworld~3f')
128 128 'hello:world?'
129 129
130 130 >>> enc(b'the\\x07quick\\xADshot')
131 131 'the~07quick~adshot'
132 132 >>> dec(b'the~07quick~adshot')
133 133 'the\\x07quick\\xadshot'
134 134 """
135 135 e = b'_'
136 136 xchr = pycompat.bytechr
137 137 asciistr = list(map(xchr, range(127)))
138 138 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
139 139
140 140 cmap = {x: x for x in asciistr}
141 141 for x in _reserved():
142 142 cmap[xchr(x)] = b"~%02x" % x
143 143 for x in capitals + [ord(e)]:
144 144 cmap[xchr(x)] = e + xchr(x).lower()
145 145
146 146 dmap = {}
147 147 for k, v in pycompat.iteritems(cmap):
148 148 dmap[v] = k
149 149
150 150 def decode(s):
151 151 i = 0
152 152 while i < len(s):
153 153 for l in pycompat.xrange(1, 4):
154 154 try:
155 155 yield dmap[s[i : i + l]]
156 156 i += l
157 157 break
158 158 except KeyError:
159 159 pass
160 160 else:
161 161 raise KeyError
162 162
163 163 return (
164 164 lambda s: b''.join(
165 165 [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))]
166 166 ),
167 167 lambda s: b''.join(list(decode(s))),
168 168 )
169 169
170 170
171 171 _encodefname, _decodefname = _buildencodefun()
172 172
173 173
174 174 def encodefilename(s):
175 175 """
176 176 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
177 177 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
178 178 """
179 179 return _encodefname(encodedir(s))
180 180
181 181
182 182 def decodefilename(s):
183 183 """
184 184 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
185 185 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
186 186 """
187 187 return decodedir(_decodefname(s))
188 188
189 189
190 190 def _buildlowerencodefun():
191 191 """
192 192 >>> f = _buildlowerencodefun()
193 193 >>> f(b'nothing/special.txt')
194 194 'nothing/special.txt'
195 195 >>> f(b'HELLO')
196 196 'hello'
197 197 >>> f(b'hello:world?')
198 198 'hello~3aworld~3f'
199 199 >>> f(b'the\\x07quick\\xADshot')
200 200 'the~07quick~adshot'
201 201 """
202 202 xchr = pycompat.bytechr
203 203 cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)}
204 204 for x in _reserved():
205 205 cmap[xchr(x)] = b"~%02x" % x
206 206 for x in range(ord(b"A"), ord(b"Z") + 1):
207 207 cmap[xchr(x)] = xchr(x).lower()
208 208
209 209 def lowerencode(s):
210 210 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
211 211
212 212 return lowerencode
213 213
214 214
215 215 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
216 216
217 217 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
218 218 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
219 219 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
220 220
221 221
222 222 def _auxencode(path, dotencode):
223 223 """
224 224 Encodes filenames containing names reserved by Windows or which end in
225 225 period or space. Does not touch other single reserved characters c.
226 226 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
227 227 Additionally encodes space or period at the beginning, if dotencode is
228 228 True. Parameter path is assumed to be all lowercase.
229 229 A segment only needs encoding if a reserved name appears as a
230 230 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
231 231 doesn't need encoding.
232 232
233 233 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
234 234 >>> _auxencode(s.split(b'/'), True)
235 235 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
236 236 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
237 237 >>> _auxencode(s.split(b'/'), False)
238 238 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
239 239 >>> _auxencode([b'foo. '], True)
240 240 ['foo.~20']
241 241 >>> _auxencode([b' .foo'], True)
242 242 ['~20.foo']
243 243 """
244 244 for i, n in enumerate(path):
245 245 if not n:
246 246 continue
247 247 if dotencode and n[0] in b'. ':
248 248 n = b"~%02x" % ord(n[0:1]) + n[1:]
249 249 path[i] = n
250 250 else:
251 251 l = n.find(b'.')
252 252 if l == -1:
253 253 l = len(n)
254 254 if (l == 3 and n[:3] in _winres3) or (
255 255 l == 4
256 256 and n[3:4] <= b'9'
257 257 and n[3:4] >= b'1'
258 258 and n[:3] in _winres4
259 259 ):
260 260 # encode third letter ('aux' -> 'au~78')
261 261 ec = b"~%02x" % ord(n[2:3])
262 262 n = n[0:2] + ec + n[3:]
263 263 path[i] = n
264 264 if n[-1] in b'. ':
265 265 # encode last period or space ('foo...' -> 'foo..~2e')
266 266 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
267 267 return path
268 268
269 269
270 270 _maxstorepathlen = 120
271 271 _dirprefixlen = 8
272 272 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
273 273
274 274
275 275 def _hashencode(path, dotencode):
276 276 digest = hex(hashutil.sha1(path).digest())
277 277 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
278 278 parts = _auxencode(le, dotencode)
279 279 basename = parts[-1]
280 280 _root, ext = os.path.splitext(basename)
281 281 sdirs = []
282 282 sdirslen = 0
283 283 for p in parts[:-1]:
284 284 d = p[:_dirprefixlen]
285 285 if d[-1] in b'. ':
286 286 # Windows can't access dirs ending in period or space
287 287 d = d[:-1] + b'_'
288 288 if sdirslen == 0:
289 289 t = len(d)
290 290 else:
291 291 t = sdirslen + 1 + len(d)
292 292 if t > _maxshortdirslen:
293 293 break
294 294 sdirs.append(d)
295 295 sdirslen = t
296 296 dirs = b'/'.join(sdirs)
297 297 if len(dirs) > 0:
298 298 dirs += b'/'
299 299 res = b'dh/' + dirs + digest + ext
300 300 spaceleft = _maxstorepathlen - len(res)
301 301 if spaceleft > 0:
302 302 filler = basename[:spaceleft]
303 303 res = b'dh/' + dirs + filler + digest + ext
304 304 return res
305 305
306 306
307 307 def _hybridencode(path, dotencode):
308 308 """encodes path with a length limit
309 309
310 310 Encodes all paths that begin with 'data/', according to the following.
311 311
312 312 Default encoding (reversible):
313 313
314 314 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
315 315 characters are encoded as '~xx', where xx is the two digit hex code
316 316 of the character (see encodefilename).
317 317 Relevant path components consisting of Windows reserved filenames are
318 318 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
319 319
320 320 Hashed encoding (not reversible):
321 321
322 322 If the default-encoded path is longer than _maxstorepathlen, a
323 323 non-reversible hybrid hashing of the path is done instead.
324 324 This encoding uses up to _dirprefixlen characters of all directory
325 325 levels of the lowerencoded path, but not more levels than can fit into
326 326 _maxshortdirslen.
327 327 Then follows the filler followed by the sha digest of the full path.
328 328 The filler is the beginning of the basename of the lowerencoded path
329 329 (the basename is everything after the last path separator). The filler
330 330 is as long as possible, filling in characters from the basename until
331 331 the encoded path has _maxstorepathlen characters (or all chars of the
332 332 basename have been taken).
333 333 The extension (e.g. '.i' or '.d') is preserved.
334 334
335 335 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
336 336 encoding was used.
337 337 """
338 338 path = encodedir(path)
339 339 ef = _encodefname(path).split(b'/')
340 340 res = b'/'.join(_auxencode(ef, dotencode))
341 341 if len(res) > _maxstorepathlen:
342 342 res = _hashencode(path, dotencode)
343 343 return res
344 344
345 345
346 346 def _pathencode(path):
347 347 de = encodedir(path)
348 348 if len(path) > _maxstorepathlen:
349 349 return _hashencode(de, True)
350 350 ef = _encodefname(de).split(b'/')
351 351 res = b'/'.join(_auxencode(ef, True))
352 352 if len(res) > _maxstorepathlen:
353 353 return _hashencode(de, True)
354 354 return res
355 355
356 356
357 357 _pathencode = getattr(parsers, 'pathencode', _pathencode)
358 358
359 359
360 360 def _plainhybridencode(f):
361 361 return _hybridencode(f, False)
362 362
363 363
364 364 def _calcmode(vfs):
365 365 try:
366 366 # files in .hg/ will be created using this mode
367 367 mode = vfs.stat().st_mode
368 368 # avoid some useless chmods
369 369 if (0o777 & ~util.umask) == (0o777 & mode):
370 370 mode = None
371 371 except OSError:
372 372 mode = None
373 373 return mode
374 374
375 375
376 376 _data = [
377 377 b'bookmarks',
378 378 b'narrowspec',
379 379 b'data',
380 380 b'meta',
381 381 b'00manifest.d',
382 382 b'00manifest.i',
383 383 b'00changelog.d',
384 384 b'00changelog.i',
385 385 b'phaseroots',
386 386 b'obsstore',
387 387 b'requires',
388 388 ]
389 389
390 390 REVLOG_FILES_EXT = (b'.i', b'.d', b'.n', b'.nd')
391 391
392 392
393 393 def isrevlog(f, kind, st):
394 394 if kind != stat.S_IFREG:
395 395 return False
396 396 return f.endswith(REVLOG_FILES_EXT)
397 397
398 398
399 399 class basicstore(object):
400 400 '''base class for local repository stores'''
401 401
402 402 def __init__(self, path, vfstype):
403 403 vfs = vfstype(path)
404 404 self.path = vfs.base
405 405 self.createmode = _calcmode(vfs)
406 406 vfs.createmode = self.createmode
407 407 self.rawvfs = vfs
408 408 self.vfs = vfsmod.filtervfs(vfs, encodedir)
409 409 self.opener = self.vfs
410 410
411 411 def join(self, f):
412 412 return self.path + b'/' + encodedir(f)
413 413
414 414 def _walk(self, relpath, recurse, filefilter=isrevlog):
415 415 '''yields (unencoded, encoded, size)'''
416 416 path = self.path
417 417 if relpath:
418 418 path += b'/' + relpath
419 419 striplen = len(self.path) + 1
420 420 l = []
421 421 if self.rawvfs.isdir(path):
422 422 visit = [path]
423 423 readdir = self.rawvfs.readdir
424 424 while visit:
425 425 p = visit.pop()
426 426 for f, kind, st in readdir(p, stat=True):
427 427 fp = p + b'/' + f
428 428 if filefilter(f, kind, st):
429 429 n = util.pconvert(fp[striplen:])
430 430 l.append((decodedir(n), n, st.st_size))
431 431 elif kind == stat.S_IFDIR and recurse:
432 432 visit.append(fp)
433 433 l.sort()
434 434 return l
435 435
436 def changelog(self, trypending):
437 return changelog.changelog(self.vfs, trypending=trypending)
436 def changelog(self, trypending, concurrencychecker=None):
437 return changelog.changelog(
438 self.vfs,
439 trypending=trypending,
440 concurrencychecker=concurrencychecker,
441 )
438 442
439 443 def manifestlog(self, repo, storenarrowmatch):
440 444 rootstore = manifest.manifestrevlog(self.vfs)
441 445 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
442 446
443 447 def datafiles(self, matcher=None):
444 448 return self._walk(b'data', True) + self._walk(b'meta', True)
445 449
446 450 def topfiles(self):
447 451 # yield manifest before changelog
448 452 return reversed(self._walk(b'', False))
449 453
450 454 def walk(self, matcher=None):
451 455 """yields (unencoded, encoded, size)
452 456
453 457 if a matcher is passed, storage files of only those tracked paths
454 458 are passed with matches the matcher
455 459 """
456 460 # yield data files first
457 461 for x in self.datafiles(matcher):
458 462 yield x
459 463 for x in self.topfiles():
460 464 yield x
461 465
462 466 def copylist(self):
463 467 return _data
464 468
465 469 def write(self, tr):
466 470 pass
467 471
468 472 def invalidatecaches(self):
469 473 pass
470 474
471 475 def markremoved(self, fn):
472 476 pass
473 477
474 478 def __contains__(self, path):
475 479 '''Checks if the store contains path'''
476 480 path = b"/".join((b"data", path))
477 481 # file?
478 482 if self.vfs.exists(path + b".i"):
479 483 return True
480 484 # dir?
481 485 if not path.endswith(b"/"):
482 486 path = path + b"/"
483 487 return self.vfs.exists(path)
484 488
485 489
486 490 class encodedstore(basicstore):
487 491 def __init__(self, path, vfstype):
488 492 vfs = vfstype(path + b'/store')
489 493 self.path = vfs.base
490 494 self.createmode = _calcmode(vfs)
491 495 vfs.createmode = self.createmode
492 496 self.rawvfs = vfs
493 497 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
494 498 self.opener = self.vfs
495 499
496 500 def datafiles(self, matcher=None):
497 501 for a, b, size in super(encodedstore, self).datafiles():
498 502 try:
499 503 a = decodefilename(a)
500 504 except KeyError:
501 505 a = None
502 506 if a is not None and not _matchtrackedpath(a, matcher):
503 507 continue
504 508 yield a, b, size
505 509
506 510 def join(self, f):
507 511 return self.path + b'/' + encodefilename(f)
508 512
509 513 def copylist(self):
510 514 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
511 515
512 516
513 517 class fncache(object):
514 518 # the filename used to be partially encoded
515 519 # hence the encodedir/decodedir dance
516 520 def __init__(self, vfs):
517 521 self.vfs = vfs
518 522 self.entries = None
519 523 self._dirty = False
520 524 # set of new additions to fncache
521 525 self.addls = set()
522 526
523 527 def ensureloaded(self, warn=None):
524 528 """read the fncache file if not already read.
525 529
526 530 If the file on disk is corrupted, raise. If warn is provided,
527 531 warn and keep going instead."""
528 532 if self.entries is None:
529 533 self._load(warn)
530 534
531 535 def _load(self, warn=None):
532 536 '''fill the entries from the fncache file'''
533 537 self._dirty = False
534 538 try:
535 539 fp = self.vfs(b'fncache', mode=b'rb')
536 540 except IOError:
537 541 # skip nonexistent file
538 542 self.entries = set()
539 543 return
540 544
541 545 self.entries = set()
542 546 chunk = b''
543 547 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
544 548 chunk += c
545 549 try:
546 550 p = chunk.rindex(b'\n')
547 551 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
548 552 chunk = chunk[p + 1 :]
549 553 except ValueError:
550 554 # substring '\n' not found, maybe the entry is bigger than the
551 555 # chunksize, so let's keep iterating
552 556 pass
553 557
554 558 if chunk:
555 559 msg = _(b"fncache does not ends with a newline")
556 560 if warn:
557 561 warn(msg + b'\n')
558 562 else:
559 563 raise error.Abort(
560 564 msg,
561 565 hint=_(
562 566 b"use 'hg debugrebuildfncache' to "
563 567 b"rebuild the fncache"
564 568 ),
565 569 )
566 570 self._checkentries(fp, warn)
567 571 fp.close()
568 572
569 573 def _checkentries(self, fp, warn):
570 574 """ make sure there is no empty string in entries """
571 575 if b'' in self.entries:
572 576 fp.seek(0)
573 577 for n, line in enumerate(util.iterfile(fp)):
574 578 if not line.rstrip(b'\n'):
575 579 t = _(b'invalid entry in fncache, line %d') % (n + 1)
576 580 if warn:
577 581 warn(t + b'\n')
578 582 else:
579 583 raise error.Abort(t)
580 584
581 585 def write(self, tr):
582 586 if self._dirty:
583 587 assert self.entries is not None
584 588 self.entries = self.entries | self.addls
585 589 self.addls = set()
586 590 tr.addbackup(b'fncache')
587 591 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
588 592 if self.entries:
589 593 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
590 594 fp.close()
591 595 self._dirty = False
592 596 if self.addls:
593 597 # if we have just new entries, let's append them to the fncache
594 598 tr.addbackup(b'fncache')
595 599 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
596 600 if self.addls:
597 601 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
598 602 fp.close()
599 603 self.entries = None
600 604 self.addls = set()
601 605
602 606 def add(self, fn):
603 607 if self.entries is None:
604 608 self._load()
605 609 if fn not in self.entries:
606 610 self.addls.add(fn)
607 611
608 612 def remove(self, fn):
609 613 if self.entries is None:
610 614 self._load()
611 615 if fn in self.addls:
612 616 self.addls.remove(fn)
613 617 return
614 618 try:
615 619 self.entries.remove(fn)
616 620 self._dirty = True
617 621 except KeyError:
618 622 pass
619 623
620 624 def __contains__(self, fn):
621 625 if fn in self.addls:
622 626 return True
623 627 if self.entries is None:
624 628 self._load()
625 629 return fn in self.entries
626 630
627 631 def __iter__(self):
628 632 if self.entries is None:
629 633 self._load()
630 634 return iter(self.entries | self.addls)
631 635
632 636
633 637 class _fncachevfs(vfsmod.proxyvfs):
634 638 def __init__(self, vfs, fnc, encode):
635 639 vfsmod.proxyvfs.__init__(self, vfs)
636 640 self.fncache = fnc
637 641 self.encode = encode
638 642
639 643 def __call__(self, path, mode=b'r', *args, **kw):
640 644 encoded = self.encode(path)
641 645 if mode not in (b'r', b'rb') and (
642 646 path.startswith(b'data/') or path.startswith(b'meta/')
643 647 ):
644 648 # do not trigger a fncache load when adding a file that already is
645 649 # known to exist.
646 650 notload = self.fncache.entries is None and self.vfs.exists(encoded)
647 651 if notload and b'a' in mode and not self.vfs.stat(encoded).st_size:
648 652 # when appending to an existing file, if the file has size zero,
649 653 # it should be considered as missing. Such zero-size files are
650 654 # the result of truncation when a transaction is aborted.
651 655 notload = False
652 656 if not notload:
653 657 self.fncache.add(path)
654 658 return self.vfs(encoded, mode, *args, **kw)
655 659
656 660 def join(self, path):
657 661 if path:
658 662 return self.vfs.join(self.encode(path))
659 663 else:
660 664 return self.vfs.join(path)
661 665
662 666
663 667 class fncachestore(basicstore):
664 668 def __init__(self, path, vfstype, dotencode):
665 669 if dotencode:
666 670 encode = _pathencode
667 671 else:
668 672 encode = _plainhybridencode
669 673 self.encode = encode
670 674 vfs = vfstype(path + b'/store')
671 675 self.path = vfs.base
672 676 self.pathsep = self.path + b'/'
673 677 self.createmode = _calcmode(vfs)
674 678 vfs.createmode = self.createmode
675 679 self.rawvfs = vfs
676 680 fnc = fncache(vfs)
677 681 self.fncache = fnc
678 682 self.vfs = _fncachevfs(vfs, fnc, encode)
679 683 self.opener = self.vfs
680 684
681 685 def join(self, f):
682 686 return self.pathsep + self.encode(f)
683 687
684 688 def getsize(self, path):
685 689 return self.rawvfs.stat(path).st_size
686 690
687 691 def datafiles(self, matcher=None):
688 692 for f in sorted(self.fncache):
689 693 if not _matchtrackedpath(f, matcher):
690 694 continue
691 695 ef = self.encode(f)
692 696 try:
693 697 yield f, ef, self.getsize(ef)
694 698 except OSError as err:
695 699 if err.errno != errno.ENOENT:
696 700 raise
697 701
698 702 def copylist(self):
699 703 d = (
700 704 b'bookmarks',
701 705 b'narrowspec',
702 706 b'data',
703 707 b'meta',
704 708 b'dh',
705 709 b'fncache',
706 710 b'phaseroots',
707 711 b'obsstore',
708 712 b'00manifest.d',
709 713 b'00manifest.i',
710 714 b'00changelog.d',
711 715 b'00changelog.i',
712 716 b'requires',
713 717 )
714 718 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
715 719
716 720 def write(self, tr):
717 721 self.fncache.write(tr)
718 722
719 723 def invalidatecaches(self):
720 724 self.fncache.entries = None
721 725 self.fncache.addls = set()
722 726
723 727 def markremoved(self, fn):
724 728 self.fncache.remove(fn)
725 729
726 730 def _exists(self, f):
727 731 ef = self.encode(f)
728 732 try:
729 733 self.getsize(ef)
730 734 return True
731 735 except OSError as err:
732 736 if err.errno != errno.ENOENT:
733 737 raise
734 738 # nonexistent entry
735 739 return False
736 740
737 741 def __contains__(self, path):
738 742 '''Checks if the store contains path'''
739 743 path = b"/".join((b"data", path))
740 744 # check for files (exact match)
741 745 e = path + b'.i'
742 746 if e in self.fncache and self._exists(e):
743 747 return True
744 748 # now check for directories (prefix match)
745 749 if not path.endswith(b'/'):
746 750 path += b'/'
747 751 for e in self.fncache:
748 752 if e.startswith(path) and self._exists(e):
749 753 return True
750 754 return False
General Comments 0
You need to be logged in to leave comments. Login now