##// END OF EJS Templates
revlog: add a mechanism to verify expected file position before appending...
Kyle Lippincott -
r47349:e9901d01 default
parent child Browse files
Show More
@@ -0,0 +1,38 b''
1 from ..i18n import _
2 from .. import error
3
4
5 def get_checker(ui, revlog_name=b'changelog'):
6 """Get a function that checks file handle position is as expected.
7
8 This is used to ensure that files haven't been modified outside of our
9 knowledge (such as on a networked filesystem, if `hg debuglocks` was used,
10 or writes to .hg that ignored locks happened).
11
12 Due to revlogs supporting a concept of buffered, delayed, or diverted
13 writes, we're allowing the files to be shorter than expected (the data may
14 not have been written yet), but they can't be longer.
15
16 Please note that this check is not perfect; it can't detect all cases (there
17 may be false-negatives/false-OKs), but it should never claim there's an
18 issue when there isn't (false-positives/false-failures).
19 """
20
21 vpos = ui.config(b'debug', b'revlog.verifyposition.' + revlog_name)
22 # Avoid any `fh.tell` cost if this isn't enabled.
23 if not vpos or vpos not in [b'log', b'warn', b'fail']:
24 return None
25
26 def _checker(fh, fn, expected):
27 if fh.tell() <= expected:
28 return
29
30 msg = _(b'%s: file cursor at position %d, expected %d')
31 # Always log if we're going to warn or fail.
32 ui.log(b'debug', msg + b'\n', fn, fh.tell(), expected)
33 if vpos == b'warn':
34 ui.warn((msg + b'\n') % (fn, fh.tell(), expected))
35 elif vpos == b'fail':
36 raise error.RevlogError(msg % (fn, fh.tell(), expected))
37
38 return _checker
@@ -0,0 +1,102 b''
1 #testcases skip-detection fail-if-detected
2
3 Test situations that "should" only be reproducible:
4 - on networked filesystems, or
5 - user using `hg debuglocks` to eliminate the lock file, or
6 - something (that doesn't respect the lock file) writing to the .hg directory
7 while we're running
8
9 $ hg init a
10 $ cd a
11
12 $ cat > "$TESTTMP/waitlock_editor.sh" <<EOF
13 > [ -n "\${WAITLOCK_ANNOUNCE:-}" ] && touch "\${WAITLOCK_ANNOUNCE}"
14 > f="\${WAITLOCK_FILE}"
15 > start=\`date +%s\`
16 > timeout=5
17 > while [ \\( ! -f \$f \\) -a \\( ! -L \$f \\) ]; do
18 > now=\`date +%s\`
19 > if [ "\`expr \$now - \$start\`" -gt \$timeout ]; then
20 > echo "timeout: \$f was not created in \$timeout seconds (it is now \$(date +%s))"
21 > exit 1
22 > fi
23 > sleep 0.1
24 > done
25 > if [ \$# -gt 1 ]; then
26 > cat "\$@"
27 > fi
28 > EOF
29 $ chmod +x "$TESTTMP/waitlock_editor.sh"
30
31 Things behave differently if we don't already have a 00changelog.i file when
32 this all starts, so let's make one.
33
34 $ echo r0 > r0
35 $ hg commit -qAm 'r0'
36
37 Start an hg commit that will take a while
38 $ EDITOR_STARTED="$(pwd)/.editor_started"
39 $ MISCHIEF_MANAGED="$(pwd)/.mischief_managed"
40 $ JOBS_FINISHED="$(pwd)/.jobs_finished"
41
42 #if fail-if-detected
43 $ cat >> .hg/hgrc << EOF
44 > [debug]
45 > revlog.verifyposition.changelog = fail
46 > EOF
47 #endif
48
49 $ echo foo > foo
50 $ (WAITLOCK_ANNOUNCE="${EDITOR_STARTED}" \
51 > WAITLOCK_FILE="${MISCHIEF_MANAGED}" \
52 > HGEDITOR="$TESTTMP/waitlock_editor.sh" \
53 > hg commit -qAm 'r1 (foo)' --edit foo > .foo_commit_out 2>&1 ; touch "${JOBS_FINISHED}") &
54
55 Wait for the "editor" to actually start
56 $ WAITLOCK_FILE="${EDITOR_STARTED}" "$TESTTMP/waitlock_editor.sh"
57
58 Break the locks, and make another commit.
59 $ hg debuglocks -LW
60 $ echo bar > bar
61 $ hg commit -qAm 'r2 (bar)' bar
62 $ hg debugrevlogindex -c
63 rev linkrev nodeid p1 p2
64 0 0 222799e2f90b 000000000000 000000000000
65 1 1 6f124f6007a0 222799e2f90b 000000000000
66
67 Awaken the editor from that first commit
68 $ touch "${MISCHIEF_MANAGED}"
69 And wait for it to finish
70 $ WAITLOCK_FILE="${JOBS_FINISHED}" "$TESTTMP/waitlock_editor.sh"
71
72 #if skip-detection
73 (Ensure there was no output)
74 $ cat .foo_commit_out
75 And observe a corrupted repository -- rev 2's linkrev is 1, which should never
76 happen for the changelog (the linkrev should always refer to itself).
77 $ hg debugrevlogindex -c
78 rev linkrev nodeid p1 p2
79 0 0 222799e2f90b 000000000000 000000000000
80 1 1 6f124f6007a0 222799e2f90b 000000000000
81 2 1 ac80e6205bb2 222799e2f90b 000000000000
82 #endif
83
84 #if fail-if-detected
85 $ cat .foo_commit_out
86 transaction abort!
87 rollback completed
88 note: commit message saved in .hg/last-message.txt
89 note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
90 abort: 00changelog.i: file cursor at position 249, expected 121
91 And no corruption in the changelog.
92 $ hg debugrevlogindex -c
93 rev linkrev nodeid p1 p2
94 0 0 222799e2f90b 000000000000 000000000000
95 1 1 6f124f6007a0 222799e2f90b 000000000000
96 And, because of transactions, there's none in the manifestlog either.
97 $ hg debugrevlogindex -m
98 rev linkrev nodeid p1 p2
99 0 0 7b7020262a56 000000000000 000000000000
100 1 1 ad3fe36d86d9 7b7020262a56 000000000000
101 #endif
102
@@ -1,343 +1,343 b''
1 """grant Mercurial the ability to operate on Git repositories. (EXPERIMENTAL)
1 """grant Mercurial the ability to operate on Git repositories. (EXPERIMENTAL)
2
2
3 This is currently super experimental. It probably will consume your
3 This is currently super experimental. It probably will consume your
4 firstborn a la Rumpelstiltskin, etc.
4 firstborn a la Rumpelstiltskin, etc.
5 """
5 """
6
6
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import os
9 import os
10
10
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12
12
13 from mercurial import (
13 from mercurial import (
14 commands,
14 commands,
15 error,
15 error,
16 extensions,
16 extensions,
17 localrepo,
17 localrepo,
18 pycompat,
18 pycompat,
19 registrar,
19 registrar,
20 scmutil,
20 scmutil,
21 store,
21 store,
22 util,
22 util,
23 )
23 )
24
24
25 from . import (
25 from . import (
26 dirstate,
26 dirstate,
27 gitlog,
27 gitlog,
28 gitutil,
28 gitutil,
29 index,
29 index,
30 )
30 )
31
31
32 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
32 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
33 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
33 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
34 # be specifying the version(s) of Mercurial they are tested with, or
34 # be specifying the version(s) of Mercurial they are tested with, or
35 # leave the attribute unspecified.
35 # leave the attribute unspecified.
36 testedwith = b'ships-with-hg-core'
36 testedwith = b'ships-with-hg-core'
37
37
38 configtable = {}
38 configtable = {}
39 configitem = registrar.configitem(configtable)
39 configitem = registrar.configitem(configtable)
40 # git.log-index-cache-miss: internal knob for testing
40 # git.log-index-cache-miss: internal knob for testing
41 configitem(
41 configitem(
42 b"git",
42 b"git",
43 b"log-index-cache-miss",
43 b"log-index-cache-miss",
44 default=False,
44 default=False,
45 )
45 )
46
46
47 getversion = gitutil.pygit2_version
47 getversion = gitutil.pygit2_version
48
48
49
49
50 # TODO: extract an interface for this in core
50 # TODO: extract an interface for this in core
51 class gitstore(object): # store.basicstore):
51 class gitstore(object): # store.basicstore):
52 def __init__(self, path, vfstype):
52 def __init__(self, path, vfstype):
53 self.vfs = vfstype(path)
53 self.vfs = vfstype(path)
54 self.path = self.vfs.base
54 self.path = self.vfs.base
55 self.createmode = store._calcmode(self.vfs)
55 self.createmode = store._calcmode(self.vfs)
56 # above lines should go away in favor of:
56 # above lines should go away in favor of:
57 # super(gitstore, self).__init__(path, vfstype)
57 # super(gitstore, self).__init__(path, vfstype)
58
58
59 self.git = gitutil.get_pygit2().Repository(
59 self.git = gitutil.get_pygit2().Repository(
60 os.path.normpath(os.path.join(path, b'..', b'.git'))
60 os.path.normpath(os.path.join(path, b'..', b'.git'))
61 )
61 )
62 self._progress_factory = lambda *args, **kwargs: None
62 self._progress_factory = lambda *args, **kwargs: None
63 self._logfn = lambda x: None
63 self._logfn = lambda x: None
64
64
65 @util.propertycache
65 @util.propertycache
66 def _db(self):
66 def _db(self):
67 # We lazy-create the database because we want to thread a
67 # We lazy-create the database because we want to thread a
68 # progress callback down to the indexing process if it's
68 # progress callback down to the indexing process if it's
69 # required, and we don't have a ui handle in makestore().
69 # required, and we don't have a ui handle in makestore().
70 return index.get_index(self.git, self._logfn, self._progress_factory)
70 return index.get_index(self.git, self._logfn, self._progress_factory)
71
71
72 def join(self, f):
72 def join(self, f):
73 """Fake store.join method for git repositories.
73 """Fake store.join method for git repositories.
74
74
75 For the most part, store.join is used for @storecache
75 For the most part, store.join is used for @storecache
76 decorators to invalidate caches when various files
76 decorators to invalidate caches when various files
77 change. We'll map the ones we care about, and ignore the rest.
77 change. We'll map the ones we care about, and ignore the rest.
78 """
78 """
79 if f in (b'00changelog.i', b'00manifest.i'):
79 if f in (b'00changelog.i', b'00manifest.i'):
80 # This is close enough: in order for the changelog cache
80 # This is close enough: in order for the changelog cache
81 # to be invalidated, HEAD will have to change.
81 # to be invalidated, HEAD will have to change.
82 return os.path.join(self.path, b'HEAD')
82 return os.path.join(self.path, b'HEAD')
83 elif f == b'lock':
83 elif f == b'lock':
84 # TODO: we probably want to map this to a git lock, I
84 # TODO: we probably want to map this to a git lock, I
85 # suspect index.lock. We should figure out what the
85 # suspect index.lock. We should figure out what the
86 # most-alike file is in git-land. For now we're risking
86 # most-alike file is in git-land. For now we're risking
87 # bad concurrency errors if another git client is used.
87 # bad concurrency errors if another git client is used.
88 return os.path.join(self.path, b'hgit-bogus-lock')
88 return os.path.join(self.path, b'hgit-bogus-lock')
89 elif f in (b'obsstore', b'phaseroots', b'narrowspec', b'bookmarks'):
89 elif f in (b'obsstore', b'phaseroots', b'narrowspec', b'bookmarks'):
90 return os.path.join(self.path, b'..', b'.hg', f)
90 return os.path.join(self.path, b'..', b'.hg', f)
91 raise NotImplementedError(b'Need to pick file for %s.' % f)
91 raise NotImplementedError(b'Need to pick file for %s.' % f)
92
92
93 def changelog(self, trypending):
93 def changelog(self, trypending, concurrencychecker):
94 # TODO we don't have a plan for trypending in hg's git support yet
94 # TODO we don't have a plan for trypending in hg's git support yet
95 return gitlog.changelog(self.git, self._db)
95 return gitlog.changelog(self.git, self._db)
96
96
97 def manifestlog(self, repo, storenarrowmatch):
97 def manifestlog(self, repo, storenarrowmatch):
98 # TODO handle storenarrowmatch and figure out if we need the repo arg
98 # TODO handle storenarrowmatch and figure out if we need the repo arg
99 return gitlog.manifestlog(self.git, self._db)
99 return gitlog.manifestlog(self.git, self._db)
100
100
101 def invalidatecaches(self):
101 def invalidatecaches(self):
102 pass
102 pass
103
103
104 def write(self, tr=None):
104 def write(self, tr=None):
105 # normally this handles things like fncache writes, which we don't have
105 # normally this handles things like fncache writes, which we don't have
106 pass
106 pass
107
107
108
108
109 def _makestore(orig, requirements, storebasepath, vfstype):
109 def _makestore(orig, requirements, storebasepath, vfstype):
110 if b'git' in requirements:
110 if b'git' in requirements:
111 if not os.path.exists(os.path.join(storebasepath, b'..', b'.git')):
111 if not os.path.exists(os.path.join(storebasepath, b'..', b'.git')):
112 raise error.Abort(
112 raise error.Abort(
113 _(
113 _(
114 b'repository specified git format in '
114 b'repository specified git format in '
115 b'.hg/requires but has no .git directory'
115 b'.hg/requires but has no .git directory'
116 )
116 )
117 )
117 )
118 # Check for presence of pygit2 only here. The assumption is that we'll
118 # Check for presence of pygit2 only here. The assumption is that we'll
119 # run this code iff we'll later need pygit2.
119 # run this code iff we'll later need pygit2.
120 if gitutil.get_pygit2() is None:
120 if gitutil.get_pygit2() is None:
121 raise error.Abort(
121 raise error.Abort(
122 _(
122 _(
123 b'the git extension requires the Python '
123 b'the git extension requires the Python '
124 b'pygit2 library to be installed'
124 b'pygit2 library to be installed'
125 )
125 )
126 )
126 )
127
127
128 return gitstore(storebasepath, vfstype)
128 return gitstore(storebasepath, vfstype)
129 return orig(requirements, storebasepath, vfstype)
129 return orig(requirements, storebasepath, vfstype)
130
130
131
131
132 class gitfilestorage(object):
132 class gitfilestorage(object):
133 def file(self, path):
133 def file(self, path):
134 if path[0:1] == b'/':
134 if path[0:1] == b'/':
135 path = path[1:]
135 path = path[1:]
136 return gitlog.filelog(self.store.git, self.store._db, path)
136 return gitlog.filelog(self.store.git, self.store._db, path)
137
137
138
138
139 def _makefilestorage(orig, requirements, features, **kwargs):
139 def _makefilestorage(orig, requirements, features, **kwargs):
140 store = kwargs['store']
140 store = kwargs['store']
141 if isinstance(store, gitstore):
141 if isinstance(store, gitstore):
142 return gitfilestorage
142 return gitfilestorage
143 return orig(requirements, features, **kwargs)
143 return orig(requirements, features, **kwargs)
144
144
145
145
146 def _setupdothg(ui, path):
146 def _setupdothg(ui, path):
147 dothg = os.path.join(path, b'.hg')
147 dothg = os.path.join(path, b'.hg')
148 if os.path.exists(dothg):
148 if os.path.exists(dothg):
149 ui.warn(_(b'git repo already initialized for hg\n'))
149 ui.warn(_(b'git repo already initialized for hg\n'))
150 else:
150 else:
151 os.mkdir(os.path.join(path, b'.hg'))
151 os.mkdir(os.path.join(path, b'.hg'))
152 # TODO is it ok to extend .git/info/exclude like this?
152 # TODO is it ok to extend .git/info/exclude like this?
153 with open(
153 with open(
154 os.path.join(path, b'.git', b'info', b'exclude'), 'ab'
154 os.path.join(path, b'.git', b'info', b'exclude'), 'ab'
155 ) as exclude:
155 ) as exclude:
156 exclude.write(b'\n.hg\n')
156 exclude.write(b'\n.hg\n')
157 with open(os.path.join(dothg, b'requires'), 'wb') as f:
157 with open(os.path.join(dothg, b'requires'), 'wb') as f:
158 f.write(b'git\n')
158 f.write(b'git\n')
159
159
160
160
161 _BMS_PREFIX = 'refs/heads/'
161 _BMS_PREFIX = 'refs/heads/'
162
162
163
163
164 class gitbmstore(object):
164 class gitbmstore(object):
165 def __init__(self, gitrepo):
165 def __init__(self, gitrepo):
166 self.gitrepo = gitrepo
166 self.gitrepo = gitrepo
167 self._aclean = True
167 self._aclean = True
168 self._active = gitrepo.references['HEAD'] # git head, not mark
168 self._active = gitrepo.references['HEAD'] # git head, not mark
169
169
170 def __contains__(self, name):
170 def __contains__(self, name):
171 return (
171 return (
172 _BMS_PREFIX + pycompat.fsdecode(name)
172 _BMS_PREFIX + pycompat.fsdecode(name)
173 ) in self.gitrepo.references
173 ) in self.gitrepo.references
174
174
175 def __iter__(self):
175 def __iter__(self):
176 for r in self.gitrepo.listall_references():
176 for r in self.gitrepo.listall_references():
177 if r.startswith(_BMS_PREFIX):
177 if r.startswith(_BMS_PREFIX):
178 yield pycompat.fsencode(r[len(_BMS_PREFIX) :])
178 yield pycompat.fsencode(r[len(_BMS_PREFIX) :])
179
179
180 def __getitem__(self, k):
180 def __getitem__(self, k):
181 return (
181 return (
182 self.gitrepo.references[_BMS_PREFIX + pycompat.fsdecode(k)]
182 self.gitrepo.references[_BMS_PREFIX + pycompat.fsdecode(k)]
183 .peel()
183 .peel()
184 .id.raw
184 .id.raw
185 )
185 )
186
186
187 def get(self, k, default=None):
187 def get(self, k, default=None):
188 try:
188 try:
189 if k in self:
189 if k in self:
190 return self[k]
190 return self[k]
191 return default
191 return default
192 except gitutil.get_pygit2().InvalidSpecError:
192 except gitutil.get_pygit2().InvalidSpecError:
193 return default
193 return default
194
194
195 @property
195 @property
196 def active(self):
196 def active(self):
197 h = self.gitrepo.references['HEAD']
197 h = self.gitrepo.references['HEAD']
198 if not isinstance(h.target, str) or not h.target.startswith(
198 if not isinstance(h.target, str) or not h.target.startswith(
199 _BMS_PREFIX
199 _BMS_PREFIX
200 ):
200 ):
201 return None
201 return None
202 return pycompat.fsencode(h.target[len(_BMS_PREFIX) :])
202 return pycompat.fsencode(h.target[len(_BMS_PREFIX) :])
203
203
204 @active.setter
204 @active.setter
205 def active(self, mark):
205 def active(self, mark):
206 githead = mark is not None and (_BMS_PREFIX + mark) or None
206 githead = mark is not None and (_BMS_PREFIX + mark) or None
207 if githead is not None and githead not in self.gitrepo.references:
207 if githead is not None and githead not in self.gitrepo.references:
208 raise AssertionError(b'bookmark %s does not exist!' % mark)
208 raise AssertionError(b'bookmark %s does not exist!' % mark)
209
209
210 self._active = githead
210 self._active = githead
211 self._aclean = False
211 self._aclean = False
212
212
213 def _writeactive(self):
213 def _writeactive(self):
214 if self._aclean:
214 if self._aclean:
215 return
215 return
216 self.gitrepo.references.create('HEAD', self._active, True)
216 self.gitrepo.references.create('HEAD', self._active, True)
217 self._aclean = True
217 self._aclean = True
218
218
219 def names(self, node):
219 def names(self, node):
220 r = []
220 r = []
221 for ref in self.gitrepo.listall_references():
221 for ref in self.gitrepo.listall_references():
222 if not ref.startswith(_BMS_PREFIX):
222 if not ref.startswith(_BMS_PREFIX):
223 continue
223 continue
224 if self.gitrepo.references[ref].peel().id.raw != node:
224 if self.gitrepo.references[ref].peel().id.raw != node:
225 continue
225 continue
226 r.append(pycompat.fsencode(ref[len(_BMS_PREFIX) :]))
226 r.append(pycompat.fsencode(ref[len(_BMS_PREFIX) :]))
227 return r
227 return r
228
228
229 # Cleanup opportunity: this is *identical* to core's bookmarks store.
229 # Cleanup opportunity: this is *identical* to core's bookmarks store.
230 def expandname(self, bname):
230 def expandname(self, bname):
231 if bname == b'.':
231 if bname == b'.':
232 if self.active:
232 if self.active:
233 return self.active
233 return self.active
234 raise error.RepoLookupError(_(b"no active bookmark"))
234 raise error.RepoLookupError(_(b"no active bookmark"))
235 return bname
235 return bname
236
236
237 def applychanges(self, repo, tr, changes):
237 def applychanges(self, repo, tr, changes):
238 """Apply a list of changes to bookmarks"""
238 """Apply a list of changes to bookmarks"""
239 # TODO: this should respect transactions, but that's going to
239 # TODO: this should respect transactions, but that's going to
240 # require enlarging the gitbmstore to know how to do in-memory
240 # require enlarging the gitbmstore to know how to do in-memory
241 # temporary writes and read those back prior to transaction
241 # temporary writes and read those back prior to transaction
242 # finalization.
242 # finalization.
243 for name, node in changes:
243 for name, node in changes:
244 if node is None:
244 if node is None:
245 self.gitrepo.references.delete(
245 self.gitrepo.references.delete(
246 _BMS_PREFIX + pycompat.fsdecode(name)
246 _BMS_PREFIX + pycompat.fsdecode(name)
247 )
247 )
248 else:
248 else:
249 self.gitrepo.references.create(
249 self.gitrepo.references.create(
250 _BMS_PREFIX + pycompat.fsdecode(name),
250 _BMS_PREFIX + pycompat.fsdecode(name),
251 gitutil.togitnode(node),
251 gitutil.togitnode(node),
252 force=True,
252 force=True,
253 )
253 )
254
254
255 def checkconflict(self, mark, force=False, target=None):
255 def checkconflict(self, mark, force=False, target=None):
256 githead = _BMS_PREFIX + mark
256 githead = _BMS_PREFIX + mark
257 cur = self.gitrepo.references['HEAD']
257 cur = self.gitrepo.references['HEAD']
258 if githead in self.gitrepo.references and not force:
258 if githead in self.gitrepo.references and not force:
259 if target:
259 if target:
260 if self.gitrepo.references[githead] == target and target == cur:
260 if self.gitrepo.references[githead] == target and target == cur:
261 # re-activating a bookmark
261 # re-activating a bookmark
262 return []
262 return []
263 # moving a bookmark - forward?
263 # moving a bookmark - forward?
264 raise NotImplementedError
264 raise NotImplementedError
265 raise error.Abort(
265 raise error.Abort(
266 _(b"bookmark '%s' already exists (use -f to force)") % mark
266 _(b"bookmark '%s' already exists (use -f to force)") % mark
267 )
267 )
268 if len(mark) > 3 and not force:
268 if len(mark) > 3 and not force:
269 try:
269 try:
270 shadowhash = scmutil.isrevsymbol(self._repo, mark)
270 shadowhash = scmutil.isrevsymbol(self._repo, mark)
271 except error.LookupError: # ambiguous identifier
271 except error.LookupError: # ambiguous identifier
272 shadowhash = False
272 shadowhash = False
273 if shadowhash:
273 if shadowhash:
274 self._repo.ui.warn(
274 self._repo.ui.warn(
275 _(
275 _(
276 b"bookmark %s matches a changeset hash\n"
276 b"bookmark %s matches a changeset hash\n"
277 b"(did you leave a -r out of an 'hg bookmark' "
277 b"(did you leave a -r out of an 'hg bookmark' "
278 b"command?)\n"
278 b"command?)\n"
279 )
279 )
280 % mark
280 % mark
281 )
281 )
282 return []
282 return []
283
283
284
284
285 def init(orig, ui, dest=b'.', **opts):
285 def init(orig, ui, dest=b'.', **opts):
286 if opts.get('git', False):
286 if opts.get('git', False):
287 path = os.path.abspath(dest)
287 path = os.path.abspath(dest)
288 # TODO: walk up looking for the git repo
288 # TODO: walk up looking for the git repo
289 _setupdothg(ui, path)
289 _setupdothg(ui, path)
290 return 0
290 return 0
291 return orig(ui, dest=dest, **opts)
291 return orig(ui, dest=dest, **opts)
292
292
293
293
294 def reposetup(ui, repo):
294 def reposetup(ui, repo):
295 if repo.local() and isinstance(repo.store, gitstore):
295 if repo.local() and isinstance(repo.store, gitstore):
296 orig = repo.__class__
296 orig = repo.__class__
297 repo.store._progress_factory = repo.ui.makeprogress
297 repo.store._progress_factory = repo.ui.makeprogress
298 if ui.configbool(b'git', b'log-index-cache-miss'):
298 if ui.configbool(b'git', b'log-index-cache-miss'):
299 repo.store._logfn = repo.ui.warn
299 repo.store._logfn = repo.ui.warn
300
300
301 class gitlocalrepo(orig):
301 class gitlocalrepo(orig):
302 def _makedirstate(self):
302 def _makedirstate(self):
303 # TODO narrow support here
303 # TODO narrow support here
304 return dirstate.gitdirstate(
304 return dirstate.gitdirstate(
305 self.ui, self.vfs.base, self.store.git
305 self.ui, self.vfs.base, self.store.git
306 )
306 )
307
307
308 def commit(self, *args, **kwargs):
308 def commit(self, *args, **kwargs):
309 ret = orig.commit(self, *args, **kwargs)
309 ret = orig.commit(self, *args, **kwargs)
310 if ret is None:
310 if ret is None:
311 # there was nothing to commit, so we should skip
311 # there was nothing to commit, so we should skip
312 # the index fixup logic we'd otherwise do.
312 # the index fixup logic we'd otherwise do.
313 return None
313 return None
314 tid = self.store.git[gitutil.togitnode(ret)].tree.id
314 tid = self.store.git[gitutil.togitnode(ret)].tree.id
315 # DANGER! This will flush any writes staged to the
315 # DANGER! This will flush any writes staged to the
316 # index in Git, but we're sidestepping the index in a
316 # index in Git, but we're sidestepping the index in a
317 # way that confuses git when we commit. Alas.
317 # way that confuses git when we commit. Alas.
318 self.store.git.index.read_tree(tid)
318 self.store.git.index.read_tree(tid)
319 self.store.git.index.write()
319 self.store.git.index.write()
320 return ret
320 return ret
321
321
322 @property
322 @property
323 def _bookmarks(self):
323 def _bookmarks(self):
324 return gitbmstore(self.store.git)
324 return gitbmstore(self.store.git)
325
325
326 repo.__class__ = gitlocalrepo
326 repo.__class__ = gitlocalrepo
327 return repo
327 return repo
328
328
329
329
330 def _featuresetup(ui, supported):
330 def _featuresetup(ui, supported):
331 # don't die on seeing a repo with the git requirement
331 # don't die on seeing a repo with the git requirement
332 supported |= {b'git'}
332 supported |= {b'git'}
333
333
334
334
335 def extsetup(ui):
335 def extsetup(ui):
336 extensions.wrapfunction(localrepo, b'makestore', _makestore)
336 extensions.wrapfunction(localrepo, b'makestore', _makestore)
337 extensions.wrapfunction(localrepo, b'makefilestorage', _makefilestorage)
337 extensions.wrapfunction(localrepo, b'makefilestorage', _makefilestorage)
338 # Inject --git flag for `hg init`
338 # Inject --git flag for `hg init`
339 entry = extensions.wrapcommand(commands.table, b'init', init)
339 entry = extensions.wrapcommand(commands.table, b'init', init)
340 entry[1].extend(
340 entry[1].extend(
341 [(b'', b'git', None, b'setup up a git repository instead of hg')]
341 [(b'', b'git', None, b'setup up a git repository instead of hg')]
342 )
342 )
343 localrepo.featuresetupfuncs.add(_featuresetup)
343 localrepo.featuresetupfuncs.add(_featuresetup)
@@ -1,618 +1,622 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullid,
14 nullid,
15 )
15 )
16 from .thirdparty import attr
16 from .thirdparty import attr
17
17
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 metadata,
21 metadata,
22 pycompat,
22 pycompat,
23 revlog,
23 revlog,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 dateutil,
26 dateutil,
27 stringutil,
27 stringutil,
28 )
28 )
29 from .revlogutils import flagutil
29 from .revlogutils import flagutil
30
30
31 _defaultextra = {b'branch': b'default'}
31 _defaultextra = {b'branch': b'default'}
32
32
33
33
34 def _string_escape(text):
34 def _string_escape(text):
35 """
35 """
36 >>> from .pycompat import bytechr as chr
36 >>> from .pycompat import bytechr as chr
37 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
37 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
38 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
38 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
39 >>> s
39 >>> s
40 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
40 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
41 >>> res = _string_escape(s)
41 >>> res = _string_escape(s)
42 >>> s == _string_unescape(res)
42 >>> s == _string_unescape(res)
43 True
43 True
44 """
44 """
45 # subset of the string_escape codec
45 # subset of the string_escape codec
46 text = (
46 text = (
47 text.replace(b'\\', b'\\\\')
47 text.replace(b'\\', b'\\\\')
48 .replace(b'\n', b'\\n')
48 .replace(b'\n', b'\\n')
49 .replace(b'\r', b'\\r')
49 .replace(b'\r', b'\\r')
50 )
50 )
51 return text.replace(b'\0', b'\\0')
51 return text.replace(b'\0', b'\\0')
52
52
53
53
54 def _string_unescape(text):
54 def _string_unescape(text):
55 if b'\\0' in text:
55 if b'\\0' in text:
56 # fix up \0 without getting into trouble with \\0
56 # fix up \0 without getting into trouble with \\0
57 text = text.replace(b'\\\\', b'\\\\\n')
57 text = text.replace(b'\\\\', b'\\\\\n')
58 text = text.replace(b'\\0', b'\0')
58 text = text.replace(b'\\0', b'\0')
59 text = text.replace(b'\n', b'')
59 text = text.replace(b'\n', b'')
60 return stringutil.unescapestr(text)
60 return stringutil.unescapestr(text)
61
61
62
62
63 def decodeextra(text):
63 def decodeextra(text):
64 """
64 """
65 >>> from .pycompat import bytechr as chr
65 >>> from .pycompat import bytechr as chr
66 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
66 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
67 ... ).items())
67 ... ).items())
68 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
68 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
69 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
69 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
70 ... b'baz': chr(92) + chr(0) + b'2'})
70 ... b'baz': chr(92) + chr(0) + b'2'})
71 ... ).items())
71 ... ).items())
72 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
72 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
73 """
73 """
74 extra = _defaultextra.copy()
74 extra = _defaultextra.copy()
75 for l in text.split(b'\0'):
75 for l in text.split(b'\0'):
76 if l:
76 if l:
77 k, v = _string_unescape(l).split(b':', 1)
77 k, v = _string_unescape(l).split(b':', 1)
78 extra[k] = v
78 extra[k] = v
79 return extra
79 return extra
80
80
81
81
82 def encodeextra(d):
82 def encodeextra(d):
83 # keys must be sorted to produce a deterministic changelog entry
83 # keys must be sorted to produce a deterministic changelog entry
84 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
84 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
85 return b"\0".join(items)
85 return b"\0".join(items)
86
86
87
87
88 def stripdesc(desc):
88 def stripdesc(desc):
89 """strip trailing whitespace and leading and trailing empty lines"""
89 """strip trailing whitespace and leading and trailing empty lines"""
90 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
90 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
91
91
92
92
93 class appender(object):
93 class appender(object):
94 """the changelog index must be updated last on disk, so we use this class
94 """the changelog index must be updated last on disk, so we use this class
95 to delay writes to it"""
95 to delay writes to it"""
96
96
97 def __init__(self, vfs, name, mode, buf):
97 def __init__(self, vfs, name, mode, buf):
98 self.data = buf
98 self.data = buf
99 fp = vfs(name, mode)
99 fp = vfs(name, mode)
100 self.fp = fp
100 self.fp = fp
101 self.offset = fp.tell()
101 self.offset = fp.tell()
102 self.size = vfs.fstat(fp).st_size
102 self.size = vfs.fstat(fp).st_size
103 self._end = self.size
103 self._end = self.size
104
104
105 def end(self):
105 def end(self):
106 return self._end
106 return self._end
107
107
108 def tell(self):
108 def tell(self):
109 return self.offset
109 return self.offset
110
110
111 def flush(self):
111 def flush(self):
112 pass
112 pass
113
113
114 @property
114 @property
115 def closed(self):
115 def closed(self):
116 return self.fp.closed
116 return self.fp.closed
117
117
118 def close(self):
118 def close(self):
119 self.fp.close()
119 self.fp.close()
120
120
121 def seek(self, offset, whence=0):
121 def seek(self, offset, whence=0):
122 '''virtual file offset spans real file and data'''
122 '''virtual file offset spans real file and data'''
123 if whence == 0:
123 if whence == 0:
124 self.offset = offset
124 self.offset = offset
125 elif whence == 1:
125 elif whence == 1:
126 self.offset += offset
126 self.offset += offset
127 elif whence == 2:
127 elif whence == 2:
128 self.offset = self.end() + offset
128 self.offset = self.end() + offset
129 if self.offset < self.size:
129 if self.offset < self.size:
130 self.fp.seek(self.offset)
130 self.fp.seek(self.offset)
131
131
132 def read(self, count=-1):
132 def read(self, count=-1):
133 '''only trick here is reads that span real file and data'''
133 '''only trick here is reads that span real file and data'''
134 ret = b""
134 ret = b""
135 if self.offset < self.size:
135 if self.offset < self.size:
136 s = self.fp.read(count)
136 s = self.fp.read(count)
137 ret = s
137 ret = s
138 self.offset += len(s)
138 self.offset += len(s)
139 if count > 0:
139 if count > 0:
140 count -= len(s)
140 count -= len(s)
141 if count != 0:
141 if count != 0:
142 doff = self.offset - self.size
142 doff = self.offset - self.size
143 self.data.insert(0, b"".join(self.data))
143 self.data.insert(0, b"".join(self.data))
144 del self.data[1:]
144 del self.data[1:]
145 s = self.data[0][doff : doff + count]
145 s = self.data[0][doff : doff + count]
146 self.offset += len(s)
146 self.offset += len(s)
147 ret += s
147 ret += s
148 return ret
148 return ret
149
149
150 def write(self, s):
150 def write(self, s):
151 self.data.append(bytes(s))
151 self.data.append(bytes(s))
152 self.offset += len(s)
152 self.offset += len(s)
153 self._end += len(s)
153 self._end += len(s)
154
154
155 def __enter__(self):
155 def __enter__(self):
156 self.fp.__enter__()
156 self.fp.__enter__()
157 return self
157 return self
158
158
159 def __exit__(self, *args):
159 def __exit__(self, *args):
160 return self.fp.__exit__(*args)
160 return self.fp.__exit__(*args)
161
161
162
162
163 class _divertopener(object):
163 class _divertopener(object):
164 def __init__(self, opener, target):
164 def __init__(self, opener, target):
165 self._opener = opener
165 self._opener = opener
166 self._target = target
166 self._target = target
167
167
168 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
168 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
169 if name != self._target:
169 if name != self._target:
170 return self._opener(name, mode, **kwargs)
170 return self._opener(name, mode, **kwargs)
171 return self._opener(name + b".a", mode, **kwargs)
171 return self._opener(name + b".a", mode, **kwargs)
172
172
173 def __getattr__(self, attr):
173 def __getattr__(self, attr):
174 return getattr(self._opener, attr)
174 return getattr(self._opener, attr)
175
175
176
176
177 def _delayopener(opener, target, buf):
177 def _delayopener(opener, target, buf):
178 """build an opener that stores chunks in 'buf' instead of 'target'"""
178 """build an opener that stores chunks in 'buf' instead of 'target'"""
179
179
180 def _delay(name, mode=b'r', checkambig=False, **kwargs):
180 def _delay(name, mode=b'r', checkambig=False, **kwargs):
181 if name != target:
181 if name != target:
182 return opener(name, mode, **kwargs)
182 return opener(name, mode, **kwargs)
183 assert not kwargs
183 assert not kwargs
184 return appender(opener, name, mode, buf)
184 return appender(opener, name, mode, buf)
185
185
186 return _delay
186 return _delay
187
187
188
188
189 @attr.s
189 @attr.s
190 class _changelogrevision(object):
190 class _changelogrevision(object):
191 # Extensions might modify _defaultextra, so let the constructor below pass
191 # Extensions might modify _defaultextra, so let the constructor below pass
192 # it in
192 # it in
193 extra = attr.ib()
193 extra = attr.ib()
194 manifest = attr.ib(default=nullid)
194 manifest = attr.ib(default=nullid)
195 user = attr.ib(default=b'')
195 user = attr.ib(default=b'')
196 date = attr.ib(default=(0, 0))
196 date = attr.ib(default=(0, 0))
197 files = attr.ib(default=attr.Factory(list))
197 files = attr.ib(default=attr.Factory(list))
198 filesadded = attr.ib(default=None)
198 filesadded = attr.ib(default=None)
199 filesremoved = attr.ib(default=None)
199 filesremoved = attr.ib(default=None)
200 p1copies = attr.ib(default=None)
200 p1copies = attr.ib(default=None)
201 p2copies = attr.ib(default=None)
201 p2copies = attr.ib(default=None)
202 description = attr.ib(default=b'')
202 description = attr.ib(default=b'')
203 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
203 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
204
204
205
205
206 class changelogrevision(object):
206 class changelogrevision(object):
207 """Holds results of a parsed changelog revision.
207 """Holds results of a parsed changelog revision.
208
208
209 Changelog revisions consist of multiple pieces of data, including
209 Changelog revisions consist of multiple pieces of data, including
210 the manifest node, user, and date. This object exposes a view into
210 the manifest node, user, and date. This object exposes a view into
211 the parsed object.
211 the parsed object.
212 """
212 """
213
213
214 __slots__ = (
214 __slots__ = (
215 '_offsets',
215 '_offsets',
216 '_text',
216 '_text',
217 '_sidedata',
217 '_sidedata',
218 '_cpsd',
218 '_cpsd',
219 '_changes',
219 '_changes',
220 )
220 )
221
221
222 def __new__(cls, text, sidedata, cpsd):
222 def __new__(cls, text, sidedata, cpsd):
223 if not text:
223 if not text:
224 return _changelogrevision(extra=_defaultextra)
224 return _changelogrevision(extra=_defaultextra)
225
225
226 self = super(changelogrevision, cls).__new__(cls)
226 self = super(changelogrevision, cls).__new__(cls)
227 # We could return here and implement the following as an __init__.
227 # We could return here and implement the following as an __init__.
228 # But doing it here is equivalent and saves an extra function call.
228 # But doing it here is equivalent and saves an extra function call.
229
229
230 # format used:
230 # format used:
231 # nodeid\n : manifest node in ascii
231 # nodeid\n : manifest node in ascii
232 # user\n : user, no \n or \r allowed
232 # user\n : user, no \n or \r allowed
233 # time tz extra\n : date (time is int or float, timezone is int)
233 # time tz extra\n : date (time is int or float, timezone is int)
234 # : extra is metadata, encoded and separated by '\0'
234 # : extra is metadata, encoded and separated by '\0'
235 # : older versions ignore it
235 # : older versions ignore it
236 # files\n\n : files modified by the cset, no \n or \r allowed
236 # files\n\n : files modified by the cset, no \n or \r allowed
237 # (.*) : comment (free text, ideally utf-8)
237 # (.*) : comment (free text, ideally utf-8)
238 #
238 #
239 # changelog v0 doesn't use extra
239 # changelog v0 doesn't use extra
240
240
241 nl1 = text.index(b'\n')
241 nl1 = text.index(b'\n')
242 nl2 = text.index(b'\n', nl1 + 1)
242 nl2 = text.index(b'\n', nl1 + 1)
243 nl3 = text.index(b'\n', nl2 + 1)
243 nl3 = text.index(b'\n', nl2 + 1)
244
244
245 # The list of files may be empty. Which means nl3 is the first of the
245 # The list of files may be empty. Which means nl3 is the first of the
246 # double newline that precedes the description.
246 # double newline that precedes the description.
247 if text[nl3 + 1 : nl3 + 2] == b'\n':
247 if text[nl3 + 1 : nl3 + 2] == b'\n':
248 doublenl = nl3
248 doublenl = nl3
249 else:
249 else:
250 doublenl = text.index(b'\n\n', nl3 + 1)
250 doublenl = text.index(b'\n\n', nl3 + 1)
251
251
252 self._offsets = (nl1, nl2, nl3, doublenl)
252 self._offsets = (nl1, nl2, nl3, doublenl)
253 self._text = text
253 self._text = text
254 self._sidedata = sidedata
254 self._sidedata = sidedata
255 self._cpsd = cpsd
255 self._cpsd = cpsd
256 self._changes = None
256 self._changes = None
257
257
258 return self
258 return self
259
259
260 @property
260 @property
261 def manifest(self):
261 def manifest(self):
262 return bin(self._text[0 : self._offsets[0]])
262 return bin(self._text[0 : self._offsets[0]])
263
263
264 @property
264 @property
265 def user(self):
265 def user(self):
266 off = self._offsets
266 off = self._offsets
267 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
267 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
268
268
269 @property
269 @property
270 def _rawdate(self):
270 def _rawdate(self):
271 off = self._offsets
271 off = self._offsets
272 dateextra = self._text[off[1] + 1 : off[2]]
272 dateextra = self._text[off[1] + 1 : off[2]]
273 return dateextra.split(b' ', 2)[0:2]
273 return dateextra.split(b' ', 2)[0:2]
274
274
275 @property
275 @property
276 def _rawextra(self):
276 def _rawextra(self):
277 off = self._offsets
277 off = self._offsets
278 dateextra = self._text[off[1] + 1 : off[2]]
278 dateextra = self._text[off[1] + 1 : off[2]]
279 fields = dateextra.split(b' ', 2)
279 fields = dateextra.split(b' ', 2)
280 if len(fields) != 3:
280 if len(fields) != 3:
281 return None
281 return None
282
282
283 return fields[2]
283 return fields[2]
284
284
285 @property
285 @property
286 def date(self):
286 def date(self):
287 raw = self._rawdate
287 raw = self._rawdate
288 time = float(raw[0])
288 time = float(raw[0])
289 # Various tools did silly things with the timezone.
289 # Various tools did silly things with the timezone.
290 try:
290 try:
291 timezone = int(raw[1])
291 timezone = int(raw[1])
292 except ValueError:
292 except ValueError:
293 timezone = 0
293 timezone = 0
294
294
295 return time, timezone
295 return time, timezone
296
296
297 @property
297 @property
298 def extra(self):
298 def extra(self):
299 raw = self._rawextra
299 raw = self._rawextra
300 if raw is None:
300 if raw is None:
301 return _defaultextra
301 return _defaultextra
302
302
303 return decodeextra(raw)
303 return decodeextra(raw)
304
304
305 @property
305 @property
306 def changes(self):
306 def changes(self):
307 if self._changes is not None:
307 if self._changes is not None:
308 return self._changes
308 return self._changes
309 if self._cpsd:
309 if self._cpsd:
310 changes = metadata.decode_files_sidedata(self._sidedata)
310 changes = metadata.decode_files_sidedata(self._sidedata)
311 else:
311 else:
312 changes = metadata.ChangingFiles(
312 changes = metadata.ChangingFiles(
313 touched=self.files or (),
313 touched=self.files or (),
314 added=self.filesadded or (),
314 added=self.filesadded or (),
315 removed=self.filesremoved or (),
315 removed=self.filesremoved or (),
316 p1_copies=self.p1copies or {},
316 p1_copies=self.p1copies or {},
317 p2_copies=self.p2copies or {},
317 p2_copies=self.p2copies or {},
318 )
318 )
319 self._changes = changes
319 self._changes = changes
320 return changes
320 return changes
321
321
322 @property
322 @property
323 def files(self):
323 def files(self):
324 if self._cpsd:
324 if self._cpsd:
325 return sorted(self.changes.touched)
325 return sorted(self.changes.touched)
326 off = self._offsets
326 off = self._offsets
327 if off[2] == off[3]:
327 if off[2] == off[3]:
328 return []
328 return []
329
329
330 return self._text[off[2] + 1 : off[3]].split(b'\n')
330 return self._text[off[2] + 1 : off[3]].split(b'\n')
331
331
332 @property
332 @property
333 def filesadded(self):
333 def filesadded(self):
334 if self._cpsd:
334 if self._cpsd:
335 return self.changes.added
335 return self.changes.added
336 else:
336 else:
337 rawindices = self.extra.get(b'filesadded')
337 rawindices = self.extra.get(b'filesadded')
338 if rawindices is None:
338 if rawindices is None:
339 return None
339 return None
340 return metadata.decodefileindices(self.files, rawindices)
340 return metadata.decodefileindices(self.files, rawindices)
341
341
342 @property
342 @property
343 def filesremoved(self):
343 def filesremoved(self):
344 if self._cpsd:
344 if self._cpsd:
345 return self.changes.removed
345 return self.changes.removed
346 else:
346 else:
347 rawindices = self.extra.get(b'filesremoved')
347 rawindices = self.extra.get(b'filesremoved')
348 if rawindices is None:
348 if rawindices is None:
349 return None
349 return None
350 return metadata.decodefileindices(self.files, rawindices)
350 return metadata.decodefileindices(self.files, rawindices)
351
351
352 @property
352 @property
353 def p1copies(self):
353 def p1copies(self):
354 if self._cpsd:
354 if self._cpsd:
355 return self.changes.copied_from_p1
355 return self.changes.copied_from_p1
356 else:
356 else:
357 rawcopies = self.extra.get(b'p1copies')
357 rawcopies = self.extra.get(b'p1copies')
358 if rawcopies is None:
358 if rawcopies is None:
359 return None
359 return None
360 return metadata.decodecopies(self.files, rawcopies)
360 return metadata.decodecopies(self.files, rawcopies)
361
361
362 @property
362 @property
363 def p2copies(self):
363 def p2copies(self):
364 if self._cpsd:
364 if self._cpsd:
365 return self.changes.copied_from_p2
365 return self.changes.copied_from_p2
366 else:
366 else:
367 rawcopies = self.extra.get(b'p2copies')
367 rawcopies = self.extra.get(b'p2copies')
368 if rawcopies is None:
368 if rawcopies is None:
369 return None
369 return None
370 return metadata.decodecopies(self.files, rawcopies)
370 return metadata.decodecopies(self.files, rawcopies)
371
371
372 @property
372 @property
373 def description(self):
373 def description(self):
374 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
374 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
375
375
376 @property
376 @property
377 def branchinfo(self):
377 def branchinfo(self):
378 extra = self.extra
378 extra = self.extra
379 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
379 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
380
380
381
381
382 class changelog(revlog.revlog):
382 class changelog(revlog.revlog):
383 def __init__(self, opener, trypending=False):
383 def __init__(self, opener, trypending=False, concurrencychecker=None):
384 """Load a changelog revlog using an opener.
384 """Load a changelog revlog using an opener.
385
385
386 If ``trypending`` is true, we attempt to load the index from a
386 If ``trypending`` is true, we attempt to load the index from a
387 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
387 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
388 The ``00changelog.i.a`` file contains index (and possibly inline
388 The ``00changelog.i.a`` file contains index (and possibly inline
389 revision) data for a transaction that hasn't been finalized yet.
389 revision) data for a transaction that hasn't been finalized yet.
390 It exists in a separate file to facilitate readers (such as
390 It exists in a separate file to facilitate readers (such as
391 hooks processes) accessing data before a transaction is finalized.
391 hooks processes) accessing data before a transaction is finalized.
392
393 ``concurrencychecker`` will be passed to the revlog init function, see
394 the documentation there.
392 """
395 """
393 if trypending and opener.exists(b'00changelog.i.a'):
396 if trypending and opener.exists(b'00changelog.i.a'):
394 indexfile = b'00changelog.i.a'
397 indexfile = b'00changelog.i.a'
395 else:
398 else:
396 indexfile = b'00changelog.i'
399 indexfile = b'00changelog.i'
397
400
398 datafile = b'00changelog.d'
401 datafile = b'00changelog.d'
399 revlog.revlog.__init__(
402 revlog.revlog.__init__(
400 self,
403 self,
401 opener,
404 opener,
402 indexfile,
405 indexfile,
403 datafile=datafile,
406 datafile=datafile,
404 checkambig=True,
407 checkambig=True,
405 mmaplargeindex=True,
408 mmaplargeindex=True,
406 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
409 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
410 concurrencychecker=concurrencychecker,
407 )
411 )
408
412
409 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
413 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
410 # changelogs don't benefit from generaldelta.
414 # changelogs don't benefit from generaldelta.
411
415
412 self.version &= ~revlog.FLAG_GENERALDELTA
416 self.version &= ~revlog.FLAG_GENERALDELTA
413 self._generaldelta = False
417 self._generaldelta = False
414
418
415 # Delta chains for changelogs tend to be very small because entries
419 # Delta chains for changelogs tend to be very small because entries
416 # tend to be small and don't delta well with each. So disable delta
420 # tend to be small and don't delta well with each. So disable delta
417 # chains.
421 # chains.
418 self._storedeltachains = False
422 self._storedeltachains = False
419
423
420 self._realopener = opener
424 self._realopener = opener
421 self._delayed = False
425 self._delayed = False
422 self._delaybuf = None
426 self._delaybuf = None
423 self._divert = False
427 self._divert = False
424 self._filteredrevs = frozenset()
428 self._filteredrevs = frozenset()
425 self._filteredrevs_hashcache = {}
429 self._filteredrevs_hashcache = {}
426 self._copiesstorage = opener.options.get(b'copies-storage')
430 self._copiesstorage = opener.options.get(b'copies-storage')
427
431
428 @property
432 @property
429 def filteredrevs(self):
433 def filteredrevs(self):
430 return self._filteredrevs
434 return self._filteredrevs
431
435
432 @filteredrevs.setter
436 @filteredrevs.setter
433 def filteredrevs(self, val):
437 def filteredrevs(self, val):
434 # Ensure all updates go through this function
438 # Ensure all updates go through this function
435 assert isinstance(val, frozenset)
439 assert isinstance(val, frozenset)
436 self._filteredrevs = val
440 self._filteredrevs = val
437 self._filteredrevs_hashcache = {}
441 self._filteredrevs_hashcache = {}
438
442
439 def delayupdate(self, tr):
443 def delayupdate(self, tr):
440 """delay visibility of index updates to other readers"""
444 """delay visibility of index updates to other readers"""
441
445
442 if not self._delayed:
446 if not self._delayed:
443 if len(self) == 0:
447 if len(self) == 0:
444 self._divert = True
448 self._divert = True
445 if self._realopener.exists(self.indexfile + b'.a'):
449 if self._realopener.exists(self.indexfile + b'.a'):
446 self._realopener.unlink(self.indexfile + b'.a')
450 self._realopener.unlink(self.indexfile + b'.a')
447 self.opener = _divertopener(self._realopener, self.indexfile)
451 self.opener = _divertopener(self._realopener, self.indexfile)
448 else:
452 else:
449 self._delaybuf = []
453 self._delaybuf = []
450 self.opener = _delayopener(
454 self.opener = _delayopener(
451 self._realopener, self.indexfile, self._delaybuf
455 self._realopener, self.indexfile, self._delaybuf
452 )
456 )
453 self._delayed = True
457 self._delayed = True
454 tr.addpending(b'cl-%i' % id(self), self._writepending)
458 tr.addpending(b'cl-%i' % id(self), self._writepending)
455 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
459 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
456
460
457 def _finalize(self, tr):
461 def _finalize(self, tr):
458 """finalize index updates"""
462 """finalize index updates"""
459 self._delayed = False
463 self._delayed = False
460 self.opener = self._realopener
464 self.opener = self._realopener
461 # move redirected index data back into place
465 # move redirected index data back into place
462 if self._divert:
466 if self._divert:
463 assert not self._delaybuf
467 assert not self._delaybuf
464 tmpname = self.indexfile + b".a"
468 tmpname = self.indexfile + b".a"
465 nfile = self.opener.open(tmpname)
469 nfile = self.opener.open(tmpname)
466 nfile.close()
470 nfile.close()
467 self.opener.rename(tmpname, self.indexfile, checkambig=True)
471 self.opener.rename(tmpname, self.indexfile, checkambig=True)
468 elif self._delaybuf:
472 elif self._delaybuf:
469 fp = self.opener(self.indexfile, b'a', checkambig=True)
473 fp = self.opener(self.indexfile, b'a', checkambig=True)
470 fp.write(b"".join(self._delaybuf))
474 fp.write(b"".join(self._delaybuf))
471 fp.close()
475 fp.close()
472 self._delaybuf = None
476 self._delaybuf = None
473 self._divert = False
477 self._divert = False
474 # split when we're done
478 # split when we're done
475 self._enforceinlinesize(tr)
479 self._enforceinlinesize(tr)
476
480
477 def _writepending(self, tr):
481 def _writepending(self, tr):
478 """create a file containing the unfinalized state for
482 """create a file containing the unfinalized state for
479 pretxnchangegroup"""
483 pretxnchangegroup"""
480 if self._delaybuf:
484 if self._delaybuf:
481 # make a temporary copy of the index
485 # make a temporary copy of the index
482 fp1 = self._realopener(self.indexfile)
486 fp1 = self._realopener(self.indexfile)
483 pendingfilename = self.indexfile + b".a"
487 pendingfilename = self.indexfile + b".a"
484 # register as a temp file to ensure cleanup on failure
488 # register as a temp file to ensure cleanup on failure
485 tr.registertmp(pendingfilename)
489 tr.registertmp(pendingfilename)
486 # write existing data
490 # write existing data
487 fp2 = self._realopener(pendingfilename, b"w")
491 fp2 = self._realopener(pendingfilename, b"w")
488 fp2.write(fp1.read())
492 fp2.write(fp1.read())
489 # add pending data
493 # add pending data
490 fp2.write(b"".join(self._delaybuf))
494 fp2.write(b"".join(self._delaybuf))
491 fp2.close()
495 fp2.close()
492 # switch modes so finalize can simply rename
496 # switch modes so finalize can simply rename
493 self._delaybuf = None
497 self._delaybuf = None
494 self._divert = True
498 self._divert = True
495 self.opener = _divertopener(self._realopener, self.indexfile)
499 self.opener = _divertopener(self._realopener, self.indexfile)
496
500
497 if self._divert:
501 if self._divert:
498 return True
502 return True
499
503
500 return False
504 return False
501
505
502 def _enforceinlinesize(self, tr, fp=None):
506 def _enforceinlinesize(self, tr, fp=None):
503 if not self._delayed:
507 if not self._delayed:
504 revlog.revlog._enforceinlinesize(self, tr, fp)
508 revlog.revlog._enforceinlinesize(self, tr, fp)
505
509
506 def read(self, node):
510 def read(self, node):
507 """Obtain data from a parsed changelog revision.
511 """Obtain data from a parsed changelog revision.
508
512
509 Returns a 6-tuple of:
513 Returns a 6-tuple of:
510
514
511 - manifest node in binary
515 - manifest node in binary
512 - author/user as a localstr
516 - author/user as a localstr
513 - date as a 2-tuple of (time, timezone)
517 - date as a 2-tuple of (time, timezone)
514 - list of files
518 - list of files
515 - commit message as a localstr
519 - commit message as a localstr
516 - dict of extra metadata
520 - dict of extra metadata
517
521
518 Unless you need to access all fields, consider calling
522 Unless you need to access all fields, consider calling
519 ``changelogrevision`` instead, as it is faster for partial object
523 ``changelogrevision`` instead, as it is faster for partial object
520 access.
524 access.
521 """
525 """
522 d, s = self._revisiondata(node)
526 d, s = self._revisiondata(node)
523 c = changelogrevision(
527 c = changelogrevision(
524 d, s, self._copiesstorage == b'changeset-sidedata'
528 d, s, self._copiesstorage == b'changeset-sidedata'
525 )
529 )
526 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
530 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
527
531
528 def changelogrevision(self, nodeorrev):
532 def changelogrevision(self, nodeorrev):
529 """Obtain a ``changelogrevision`` for a node or revision."""
533 """Obtain a ``changelogrevision`` for a node or revision."""
530 text, sidedata = self._revisiondata(nodeorrev)
534 text, sidedata = self._revisiondata(nodeorrev)
531 return changelogrevision(
535 return changelogrevision(
532 text, sidedata, self._copiesstorage == b'changeset-sidedata'
536 text, sidedata, self._copiesstorage == b'changeset-sidedata'
533 )
537 )
534
538
535 def readfiles(self, node):
539 def readfiles(self, node):
536 """
540 """
537 short version of read that only returns the files modified by the cset
541 short version of read that only returns the files modified by the cset
538 """
542 """
539 text = self.revision(node)
543 text = self.revision(node)
540 if not text:
544 if not text:
541 return []
545 return []
542 last = text.index(b"\n\n")
546 last = text.index(b"\n\n")
543 l = text[:last].split(b'\n')
547 l = text[:last].split(b'\n')
544 return l[3:]
548 return l[3:]
545
549
546 def add(
550 def add(
547 self,
551 self,
548 manifest,
552 manifest,
549 files,
553 files,
550 desc,
554 desc,
551 transaction,
555 transaction,
552 p1,
556 p1,
553 p2,
557 p2,
554 user,
558 user,
555 date=None,
559 date=None,
556 extra=None,
560 extra=None,
557 ):
561 ):
558 # Convert to UTF-8 encoded bytestrings as the very first
562 # Convert to UTF-8 encoded bytestrings as the very first
559 # thing: calling any method on a localstr object will turn it
563 # thing: calling any method on a localstr object will turn it
560 # into a str object and the cached UTF-8 string is thus lost.
564 # into a str object and the cached UTF-8 string is thus lost.
561 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
565 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
562
566
563 user = user.strip()
567 user = user.strip()
564 # An empty username or a username with a "\n" will make the
568 # An empty username or a username with a "\n" will make the
565 # revision text contain two "\n\n" sequences -> corrupt
569 # revision text contain two "\n\n" sequences -> corrupt
566 # repository since read cannot unpack the revision.
570 # repository since read cannot unpack the revision.
567 if not user:
571 if not user:
568 raise error.StorageError(_(b"empty username"))
572 raise error.StorageError(_(b"empty username"))
569 if b"\n" in user:
573 if b"\n" in user:
570 raise error.StorageError(
574 raise error.StorageError(
571 _(b"username %r contains a newline") % pycompat.bytestr(user)
575 _(b"username %r contains a newline") % pycompat.bytestr(user)
572 )
576 )
573
577
574 desc = stripdesc(desc)
578 desc = stripdesc(desc)
575
579
576 if date:
580 if date:
577 parseddate = b"%d %d" % dateutil.parsedate(date)
581 parseddate = b"%d %d" % dateutil.parsedate(date)
578 else:
582 else:
579 parseddate = b"%d %d" % dateutil.makedate()
583 parseddate = b"%d %d" % dateutil.makedate()
580 if extra:
584 if extra:
581 branch = extra.get(b"branch")
585 branch = extra.get(b"branch")
582 if branch in (b"default", b""):
586 if branch in (b"default", b""):
583 del extra[b"branch"]
587 del extra[b"branch"]
584 elif branch in (b".", b"null", b"tip"):
588 elif branch in (b".", b"null", b"tip"):
585 raise error.StorageError(
589 raise error.StorageError(
586 _(b'the name \'%s\' is reserved') % branch
590 _(b'the name \'%s\' is reserved') % branch
587 )
591 )
588 sortedfiles = sorted(files.touched)
592 sortedfiles = sorted(files.touched)
589 flags = 0
593 flags = 0
590 sidedata = None
594 sidedata = None
591 if self._copiesstorage == b'changeset-sidedata':
595 if self._copiesstorage == b'changeset-sidedata':
592 if files.has_copies_info:
596 if files.has_copies_info:
593 flags |= flagutil.REVIDX_HASCOPIESINFO
597 flags |= flagutil.REVIDX_HASCOPIESINFO
594 sidedata = metadata.encode_files_sidedata(files)
598 sidedata = metadata.encode_files_sidedata(files)
595
599
596 if extra:
600 if extra:
597 extra = encodeextra(extra)
601 extra = encodeextra(extra)
598 parseddate = b"%s %s" % (parseddate, extra)
602 parseddate = b"%s %s" % (parseddate, extra)
599 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
603 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
600 text = b"\n".join(l)
604 text = b"\n".join(l)
601 rev = self.addrevision(
605 rev = self.addrevision(
602 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
606 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
603 )
607 )
604 return self.node(rev)
608 return self.node(rev)
605
609
606 def branchinfo(self, rev):
610 def branchinfo(self, rev):
607 """return the branch name and open/close state of a revision
611 """return the branch name and open/close state of a revision
608
612
609 This function exists because creating a changectx object
613 This function exists because creating a changectx object
610 just to access this is costly."""
614 just to access this is costly."""
611 return self.changelogrevision(rev).branchinfo
615 return self.changelogrevision(rev).branchinfo
612
616
613 def _nodeduplicatecallback(self, transaction, rev):
617 def _nodeduplicatecallback(self, transaction, rev):
614 # keep track of revisions that got "re-added", eg: unbunde of know rev.
618 # keep track of revisions that got "re-added", eg: unbunde of know rev.
615 #
619 #
616 # We track them in a list to preserve their order from the source bundle
620 # We track them in a list to preserve their order from the source bundle
617 duplicates = transaction.changes.setdefault(b'revduplicates', [])
621 duplicates = transaction.changes.setdefault(b'revduplicates', [])
618 duplicates.append(rev)
622 duplicates.append(rev)
@@ -1,2622 +1,2627 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section,
136 section,
137 configprefix + b'nodates',
137 configprefix + b'nodates',
138 default=False,
138 default=False,
139 )
139 )
140 coreconfigitem(
140 coreconfigitem(
141 section,
141 section,
142 configprefix + b'showfunc',
142 configprefix + b'showfunc',
143 default=False,
143 default=False,
144 )
144 )
145 coreconfigitem(
145 coreconfigitem(
146 section,
146 section,
147 configprefix + b'unified',
147 configprefix + b'unified',
148 default=None,
148 default=None,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section,
151 section,
152 configprefix + b'git',
152 configprefix + b'git',
153 default=False,
153 default=False,
154 )
154 )
155 coreconfigitem(
155 coreconfigitem(
156 section,
156 section,
157 configprefix + b'ignorews',
157 configprefix + b'ignorews',
158 default=False,
158 default=False,
159 )
159 )
160 coreconfigitem(
160 coreconfigitem(
161 section,
161 section,
162 configprefix + b'ignorewsamount',
162 configprefix + b'ignorewsamount',
163 default=False,
163 default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section,
166 section,
167 configprefix + b'ignoreblanklines',
167 configprefix + b'ignoreblanklines',
168 default=False,
168 default=False,
169 )
169 )
170 coreconfigitem(
170 coreconfigitem(
171 section,
171 section,
172 configprefix + b'ignorewseol',
172 configprefix + b'ignorewseol',
173 default=False,
173 default=False,
174 )
174 )
175 coreconfigitem(
175 coreconfigitem(
176 section,
176 section,
177 configprefix + b'nobinary',
177 configprefix + b'nobinary',
178 default=False,
178 default=False,
179 )
179 )
180 coreconfigitem(
180 coreconfigitem(
181 section,
181 section,
182 configprefix + b'noprefix',
182 configprefix + b'noprefix',
183 default=False,
183 default=False,
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 section,
186 section,
187 configprefix + b'word-diff',
187 configprefix + b'word-diff',
188 default=False,
188 default=False,
189 )
189 )
190
190
191
191
192 coreconfigitem(
192 coreconfigitem(
193 b'alias',
193 b'alias',
194 b'.*',
194 b'.*',
195 default=dynamicdefault,
195 default=dynamicdefault,
196 generic=True,
196 generic=True,
197 )
197 )
198 coreconfigitem(
198 coreconfigitem(
199 b'auth',
199 b'auth',
200 b'cookiefile',
200 b'cookiefile',
201 default=None,
201 default=None,
202 )
202 )
203 _registerdiffopts(section=b'annotate')
203 _registerdiffopts(section=b'annotate')
204 # bookmarks.pushing: internal hack for discovery
204 # bookmarks.pushing: internal hack for discovery
205 coreconfigitem(
205 coreconfigitem(
206 b'bookmarks',
206 b'bookmarks',
207 b'pushing',
207 b'pushing',
208 default=list,
208 default=list,
209 )
209 )
210 # bundle.mainreporoot: internal hack for bundlerepo
210 # bundle.mainreporoot: internal hack for bundlerepo
211 coreconfigitem(
211 coreconfigitem(
212 b'bundle',
212 b'bundle',
213 b'mainreporoot',
213 b'mainreporoot',
214 default=b'',
214 default=b'',
215 )
215 )
216 coreconfigitem(
216 coreconfigitem(
217 b'censor',
217 b'censor',
218 b'policy',
218 b'policy',
219 default=b'abort',
219 default=b'abort',
220 experimental=True,
220 experimental=True,
221 )
221 )
222 coreconfigitem(
222 coreconfigitem(
223 b'chgserver',
223 b'chgserver',
224 b'idletimeout',
224 b'idletimeout',
225 default=3600,
225 default=3600,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'chgserver',
228 b'chgserver',
229 b'skiphash',
229 b'skiphash',
230 default=False,
230 default=False,
231 )
231 )
232 coreconfigitem(
232 coreconfigitem(
233 b'cmdserver',
233 b'cmdserver',
234 b'log',
234 b'log',
235 default=None,
235 default=None,
236 )
236 )
237 coreconfigitem(
237 coreconfigitem(
238 b'cmdserver',
238 b'cmdserver',
239 b'max-log-files',
239 b'max-log-files',
240 default=7,
240 default=7,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'cmdserver',
243 b'cmdserver',
244 b'max-log-size',
244 b'max-log-size',
245 default=b'1 MB',
245 default=b'1 MB',
246 )
246 )
247 coreconfigitem(
247 coreconfigitem(
248 b'cmdserver',
248 b'cmdserver',
249 b'max-repo-cache',
249 b'max-repo-cache',
250 default=0,
250 default=0,
251 experimental=True,
251 experimental=True,
252 )
252 )
253 coreconfigitem(
253 coreconfigitem(
254 b'cmdserver',
254 b'cmdserver',
255 b'message-encodings',
255 b'message-encodings',
256 default=list,
256 default=list,
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'cmdserver',
259 b'cmdserver',
260 b'track-log',
260 b'track-log',
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 )
262 )
263 coreconfigitem(
263 coreconfigitem(
264 b'cmdserver',
264 b'cmdserver',
265 b'shutdown-on-interrupt',
265 b'shutdown-on-interrupt',
266 default=True,
266 default=True,
267 )
267 )
268 coreconfigitem(
268 coreconfigitem(
269 b'color',
269 b'color',
270 b'.*',
270 b'.*',
271 default=None,
271 default=None,
272 generic=True,
272 generic=True,
273 )
273 )
274 coreconfigitem(
274 coreconfigitem(
275 b'color',
275 b'color',
276 b'mode',
276 b'mode',
277 default=b'auto',
277 default=b'auto',
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'color',
280 b'color',
281 b'pagermode',
281 b'pagermode',
282 default=dynamicdefault,
282 default=dynamicdefault,
283 )
283 )
284 coreconfigitem(
284 coreconfigitem(
285 b'command-templates',
285 b'command-templates',
286 b'graphnode',
286 b'graphnode',
287 default=None,
287 default=None,
288 alias=[(b'ui', b'graphnodetemplate')],
288 alias=[(b'ui', b'graphnodetemplate')],
289 )
289 )
290 coreconfigitem(
290 coreconfigitem(
291 b'command-templates',
291 b'command-templates',
292 b'log',
292 b'log',
293 default=None,
293 default=None,
294 alias=[(b'ui', b'logtemplate')],
294 alias=[(b'ui', b'logtemplate')],
295 )
295 )
296 coreconfigitem(
296 coreconfigitem(
297 b'command-templates',
297 b'command-templates',
298 b'mergemarker',
298 b'mergemarker',
299 default=(
299 default=(
300 b'{node|short} '
300 b'{node|short} '
301 b'{ifeq(tags, "tip", "", '
301 b'{ifeq(tags, "tip", "", '
302 b'ifeq(tags, "", "", "{tags} "))}'
302 b'ifeq(tags, "", "", "{tags} "))}'
303 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{if(bookmarks, "{bookmarks} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 b'- {author|user}: {desc|firstline}'
305 b'- {author|user}: {desc|firstline}'
306 ),
306 ),
307 alias=[(b'ui', b'mergemarkertemplate')],
307 alias=[(b'ui', b'mergemarkertemplate')],
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'command-templates',
310 b'command-templates',
311 b'pre-merge-tool-output',
311 b'pre-merge-tool-output',
312 default=None,
312 default=None,
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'command-templates',
316 b'command-templates',
317 b'oneline-summary',
317 b'oneline-summary',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem(
320 coreconfigitem(
321 b'command-templates',
321 b'command-templates',
322 b'oneline-summary.*',
322 b'oneline-summary.*',
323 default=dynamicdefault,
323 default=dynamicdefault,
324 generic=True,
324 generic=True,
325 )
325 )
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 coreconfigitem(
327 coreconfigitem(
328 b'commands',
328 b'commands',
329 b'commit.post-status',
329 b'commit.post-status',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem(
332 coreconfigitem(
333 b'commands',
333 b'commands',
334 b'grep.all-files',
334 b'grep.all-files',
335 default=False,
335 default=False,
336 experimental=True,
336 experimental=True,
337 )
337 )
338 coreconfigitem(
338 coreconfigitem(
339 b'commands',
339 b'commands',
340 b'merge.require-rev',
340 b'merge.require-rev',
341 default=False,
341 default=False,
342 )
342 )
343 coreconfigitem(
343 coreconfigitem(
344 b'commands',
344 b'commands',
345 b'push.require-revs',
345 b'push.require-revs',
346 default=False,
346 default=False,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'commands',
349 b'commands',
350 b'resolve.confirm',
350 b'resolve.confirm',
351 default=False,
351 default=False,
352 )
352 )
353 coreconfigitem(
353 coreconfigitem(
354 b'commands',
354 b'commands',
355 b'resolve.explicit-re-merge',
355 b'resolve.explicit-re-merge',
356 default=False,
356 default=False,
357 )
357 )
358 coreconfigitem(
358 coreconfigitem(
359 b'commands',
359 b'commands',
360 b'resolve.mark-check',
360 b'resolve.mark-check',
361 default=b'none',
361 default=b'none',
362 )
362 )
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 coreconfigitem(
364 coreconfigitem(
365 b'commands',
365 b'commands',
366 b'show.aliasprefix',
366 b'show.aliasprefix',
367 default=list,
367 default=list,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'commands',
370 b'commands',
371 b'status.relative',
371 b'status.relative',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem(
374 coreconfigitem(
375 b'commands',
375 b'commands',
376 b'status.skipstates',
376 b'status.skipstates',
377 default=[],
377 default=[],
378 experimental=True,
378 experimental=True,
379 )
379 )
380 coreconfigitem(
380 coreconfigitem(
381 b'commands',
381 b'commands',
382 b'status.terse',
382 b'status.terse',
383 default=b'',
383 default=b'',
384 )
384 )
385 coreconfigitem(
385 coreconfigitem(
386 b'commands',
386 b'commands',
387 b'status.verbose',
387 b'status.verbose',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'commands',
391 b'commands',
392 b'update.check',
392 b'update.check',
393 default=None,
393 default=None,
394 )
394 )
395 coreconfigitem(
395 coreconfigitem(
396 b'commands',
396 b'commands',
397 b'update.requiredest',
397 b'update.requiredest',
398 default=False,
398 default=False,
399 )
399 )
400 coreconfigitem(
400 coreconfigitem(
401 b'committemplate',
401 b'committemplate',
402 b'.*',
402 b'.*',
403 default=None,
403 default=None,
404 generic=True,
404 generic=True,
405 )
405 )
406 coreconfigitem(
406 coreconfigitem(
407 b'convert',
407 b'convert',
408 b'bzr.saverev',
408 b'bzr.saverev',
409 default=True,
409 default=True,
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'convert',
412 b'convert',
413 b'cvsps.cache',
413 b'cvsps.cache',
414 default=True,
414 default=True,
415 )
415 )
416 coreconfigitem(
416 coreconfigitem(
417 b'convert',
417 b'convert',
418 b'cvsps.fuzz',
418 b'cvsps.fuzz',
419 default=60,
419 default=60,
420 )
420 )
421 coreconfigitem(
421 coreconfigitem(
422 b'convert',
422 b'convert',
423 b'cvsps.logencoding',
423 b'cvsps.logencoding',
424 default=None,
424 default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'convert',
427 b'convert',
428 b'cvsps.mergefrom',
428 b'cvsps.mergefrom',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem(
431 coreconfigitem(
432 b'convert',
432 b'convert',
433 b'cvsps.mergeto',
433 b'cvsps.mergeto',
434 default=None,
434 default=None,
435 )
435 )
436 coreconfigitem(
436 coreconfigitem(
437 b'convert',
437 b'convert',
438 b'git.committeractions',
438 b'git.committeractions',
439 default=lambda: [b'messagedifferent'],
439 default=lambda: [b'messagedifferent'],
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'convert',
442 b'convert',
443 b'git.extrakeys',
443 b'git.extrakeys',
444 default=list,
444 default=list,
445 )
445 )
446 coreconfigitem(
446 coreconfigitem(
447 b'convert',
447 b'convert',
448 b'git.findcopiesharder',
448 b'git.findcopiesharder',
449 default=False,
449 default=False,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'convert',
452 b'convert',
453 b'git.remoteprefix',
453 b'git.remoteprefix',
454 default=b'remote',
454 default=b'remote',
455 )
455 )
456 coreconfigitem(
456 coreconfigitem(
457 b'convert',
457 b'convert',
458 b'git.renamelimit',
458 b'git.renamelimit',
459 default=400,
459 default=400,
460 )
460 )
461 coreconfigitem(
461 coreconfigitem(
462 b'convert',
462 b'convert',
463 b'git.saverev',
463 b'git.saverev',
464 default=True,
464 default=True,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'convert',
467 b'convert',
468 b'git.similarity',
468 b'git.similarity',
469 default=50,
469 default=50,
470 )
470 )
471 coreconfigitem(
471 coreconfigitem(
472 b'convert',
472 b'convert',
473 b'git.skipsubmodules',
473 b'git.skipsubmodules',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem(
476 coreconfigitem(
477 b'convert',
477 b'convert',
478 b'hg.clonebranches',
478 b'hg.clonebranches',
479 default=False,
479 default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'convert',
482 b'convert',
483 b'hg.ignoreerrors',
483 b'hg.ignoreerrors',
484 default=False,
484 default=False,
485 )
485 )
486 coreconfigitem(
486 coreconfigitem(
487 b'convert',
487 b'convert',
488 b'hg.preserve-hash',
488 b'hg.preserve-hash',
489 default=False,
489 default=False,
490 )
490 )
491 coreconfigitem(
491 coreconfigitem(
492 b'convert',
492 b'convert',
493 b'hg.revs',
493 b'hg.revs',
494 default=None,
494 default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'convert',
497 b'convert',
498 b'hg.saverev',
498 b'hg.saverev',
499 default=False,
499 default=False,
500 )
500 )
501 coreconfigitem(
501 coreconfigitem(
502 b'convert',
502 b'convert',
503 b'hg.sourcename',
503 b'hg.sourcename',
504 default=None,
504 default=None,
505 )
505 )
506 coreconfigitem(
506 coreconfigitem(
507 b'convert',
507 b'convert',
508 b'hg.startrev',
508 b'hg.startrev',
509 default=None,
509 default=None,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'convert',
512 b'convert',
513 b'hg.tagsbranch',
513 b'hg.tagsbranch',
514 default=b'default',
514 default=b'default',
515 )
515 )
516 coreconfigitem(
516 coreconfigitem(
517 b'convert',
517 b'convert',
518 b'hg.usebranchnames',
518 b'hg.usebranchnames',
519 default=True,
519 default=True,
520 )
520 )
521 coreconfigitem(
521 coreconfigitem(
522 b'convert',
522 b'convert',
523 b'ignoreancestorcheck',
523 b'ignoreancestorcheck',
524 default=False,
524 default=False,
525 experimental=True,
525 experimental=True,
526 )
526 )
527 coreconfigitem(
527 coreconfigitem(
528 b'convert',
528 b'convert',
529 b'localtimezone',
529 b'localtimezone',
530 default=False,
530 default=False,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'convert',
533 b'convert',
534 b'p4.encoding',
534 b'p4.encoding',
535 default=dynamicdefault,
535 default=dynamicdefault,
536 )
536 )
537 coreconfigitem(
537 coreconfigitem(
538 b'convert',
538 b'convert',
539 b'p4.startrev',
539 b'p4.startrev',
540 default=0,
540 default=0,
541 )
541 )
542 coreconfigitem(
542 coreconfigitem(
543 b'convert',
543 b'convert',
544 b'skiptags',
544 b'skiptags',
545 default=False,
545 default=False,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'convert',
548 b'convert',
549 b'svn.debugsvnlog',
549 b'svn.debugsvnlog',
550 default=True,
550 default=True,
551 )
551 )
552 coreconfigitem(
552 coreconfigitem(
553 b'convert',
553 b'convert',
554 b'svn.trunk',
554 b'svn.trunk',
555 default=None,
555 default=None,
556 )
556 )
557 coreconfigitem(
557 coreconfigitem(
558 b'convert',
558 b'convert',
559 b'svn.tags',
559 b'svn.tags',
560 default=None,
560 default=None,
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'convert',
563 b'convert',
564 b'svn.branches',
564 b'svn.branches',
565 default=None,
565 default=None,
566 )
566 )
567 coreconfigitem(
567 coreconfigitem(
568 b'convert',
568 b'convert',
569 b'svn.startrev',
569 b'svn.startrev',
570 default=0,
570 default=0,
571 )
571 )
572 coreconfigitem(
572 coreconfigitem(
573 b'convert',
573 b'convert',
574 b'svn.dangerous-set-commit-dates',
574 b'svn.dangerous-set-commit-dates',
575 default=False,
575 default=False,
576 )
576 )
577 coreconfigitem(
577 coreconfigitem(
578 b'debug',
578 b'debug',
579 b'dirstate.delaywrite',
579 b'dirstate.delaywrite',
580 default=0,
580 default=0,
581 )
581 )
582 coreconfigitem(
582 coreconfigitem(
583 b'debug',
584 b'revlog.verifyposition.changelog',
585 default=b'',
586 )
587 coreconfigitem(
583 b'defaults',
588 b'defaults',
584 b'.*',
589 b'.*',
585 default=None,
590 default=None,
586 generic=True,
591 generic=True,
587 )
592 )
588 coreconfigitem(
593 coreconfigitem(
589 b'devel',
594 b'devel',
590 b'all-warnings',
595 b'all-warnings',
591 default=False,
596 default=False,
592 )
597 )
593 coreconfigitem(
598 coreconfigitem(
594 b'devel',
599 b'devel',
595 b'bundle2.debug',
600 b'bundle2.debug',
596 default=False,
601 default=False,
597 )
602 )
598 coreconfigitem(
603 coreconfigitem(
599 b'devel',
604 b'devel',
600 b'bundle.delta',
605 b'bundle.delta',
601 default=b'',
606 default=b'',
602 )
607 )
603 coreconfigitem(
608 coreconfigitem(
604 b'devel',
609 b'devel',
605 b'cache-vfs',
610 b'cache-vfs',
606 default=None,
611 default=None,
607 )
612 )
608 coreconfigitem(
613 coreconfigitem(
609 b'devel',
614 b'devel',
610 b'check-locks',
615 b'check-locks',
611 default=False,
616 default=False,
612 )
617 )
613 coreconfigitem(
618 coreconfigitem(
614 b'devel',
619 b'devel',
615 b'check-relroot',
620 b'check-relroot',
616 default=False,
621 default=False,
617 )
622 )
618 # Track copy information for all file, not just "added" one (very slow)
623 # Track copy information for all file, not just "added" one (very slow)
619 coreconfigitem(
624 coreconfigitem(
620 b'devel',
625 b'devel',
621 b'copy-tracing.trace-all-files',
626 b'copy-tracing.trace-all-files',
622 default=False,
627 default=False,
623 )
628 )
624 coreconfigitem(
629 coreconfigitem(
625 b'devel',
630 b'devel',
626 b'default-date',
631 b'default-date',
627 default=None,
632 default=None,
628 )
633 )
629 coreconfigitem(
634 coreconfigitem(
630 b'devel',
635 b'devel',
631 b'deprec-warn',
636 b'deprec-warn',
632 default=False,
637 default=False,
633 )
638 )
634 coreconfigitem(
639 coreconfigitem(
635 b'devel',
640 b'devel',
636 b'disableloaddefaultcerts',
641 b'disableloaddefaultcerts',
637 default=False,
642 default=False,
638 )
643 )
639 coreconfigitem(
644 coreconfigitem(
640 b'devel',
645 b'devel',
641 b'warn-empty-changegroup',
646 b'warn-empty-changegroup',
642 default=False,
647 default=False,
643 )
648 )
644 coreconfigitem(
649 coreconfigitem(
645 b'devel',
650 b'devel',
646 b'legacy.exchange',
651 b'legacy.exchange',
647 default=list,
652 default=list,
648 )
653 )
649 # When True, revlogs use a special reference version of the nodemap, that is not
654 # When True, revlogs use a special reference version of the nodemap, that is not
650 # performant but is "known" to behave properly.
655 # performant but is "known" to behave properly.
651 coreconfigitem(
656 coreconfigitem(
652 b'devel',
657 b'devel',
653 b'persistent-nodemap',
658 b'persistent-nodemap',
654 default=False,
659 default=False,
655 )
660 )
656 coreconfigitem(
661 coreconfigitem(
657 b'devel',
662 b'devel',
658 b'servercafile',
663 b'servercafile',
659 default=b'',
664 default=b'',
660 )
665 )
661 coreconfigitem(
666 coreconfigitem(
662 b'devel',
667 b'devel',
663 b'serverexactprotocol',
668 b'serverexactprotocol',
664 default=b'',
669 default=b'',
665 )
670 )
666 coreconfigitem(
671 coreconfigitem(
667 b'devel',
672 b'devel',
668 b'serverrequirecert',
673 b'serverrequirecert',
669 default=False,
674 default=False,
670 )
675 )
671 coreconfigitem(
676 coreconfigitem(
672 b'devel',
677 b'devel',
673 b'strip-obsmarkers',
678 b'strip-obsmarkers',
674 default=True,
679 default=True,
675 )
680 )
676 coreconfigitem(
681 coreconfigitem(
677 b'devel',
682 b'devel',
678 b'warn-config',
683 b'warn-config',
679 default=None,
684 default=None,
680 )
685 )
681 coreconfigitem(
686 coreconfigitem(
682 b'devel',
687 b'devel',
683 b'warn-config-default',
688 b'warn-config-default',
684 default=None,
689 default=None,
685 )
690 )
686 coreconfigitem(
691 coreconfigitem(
687 b'devel',
692 b'devel',
688 b'user.obsmarker',
693 b'user.obsmarker',
689 default=None,
694 default=None,
690 )
695 )
691 coreconfigitem(
696 coreconfigitem(
692 b'devel',
697 b'devel',
693 b'warn-config-unknown',
698 b'warn-config-unknown',
694 default=None,
699 default=None,
695 )
700 )
696 coreconfigitem(
701 coreconfigitem(
697 b'devel',
702 b'devel',
698 b'debug.copies',
703 b'debug.copies',
699 default=False,
704 default=False,
700 )
705 )
701 coreconfigitem(
706 coreconfigitem(
702 b'devel',
707 b'devel',
703 b'copy-tracing.multi-thread',
708 b'copy-tracing.multi-thread',
704 default=True,
709 default=True,
705 )
710 )
706 coreconfigitem(
711 coreconfigitem(
707 b'devel',
712 b'devel',
708 b'debug.extensions',
713 b'debug.extensions',
709 default=False,
714 default=False,
710 )
715 )
711 coreconfigitem(
716 coreconfigitem(
712 b'devel',
717 b'devel',
713 b'debug.repo-filters',
718 b'debug.repo-filters',
714 default=False,
719 default=False,
715 )
720 )
716 coreconfigitem(
721 coreconfigitem(
717 b'devel',
722 b'devel',
718 b'debug.peer-request',
723 b'debug.peer-request',
719 default=False,
724 default=False,
720 )
725 )
721 # If discovery.exchange-heads is False, the discovery will not start with
726 # If discovery.exchange-heads is False, the discovery will not start with
722 # remote head fetching and local head querying.
727 # remote head fetching and local head querying.
723 coreconfigitem(
728 coreconfigitem(
724 b'devel',
729 b'devel',
725 b'discovery.exchange-heads',
730 b'discovery.exchange-heads',
726 default=True,
731 default=True,
727 )
732 )
728 # If discovery.grow-sample is False, the sample size used in set discovery will
733 # If discovery.grow-sample is False, the sample size used in set discovery will
729 # not be increased through the process
734 # not be increased through the process
730 coreconfigitem(
735 coreconfigitem(
731 b'devel',
736 b'devel',
732 b'discovery.grow-sample',
737 b'discovery.grow-sample',
733 default=True,
738 default=True,
734 )
739 )
735 # discovery.grow-sample.rate control the rate at which the sample grow
740 # discovery.grow-sample.rate control the rate at which the sample grow
736 coreconfigitem(
741 coreconfigitem(
737 b'devel',
742 b'devel',
738 b'discovery.grow-sample.rate',
743 b'discovery.grow-sample.rate',
739 default=1.05,
744 default=1.05,
740 )
745 )
741 # If discovery.randomize is False, random sampling during discovery are
746 # If discovery.randomize is False, random sampling during discovery are
742 # deterministic. It is meant for integration tests.
747 # deterministic. It is meant for integration tests.
743 coreconfigitem(
748 coreconfigitem(
744 b'devel',
749 b'devel',
745 b'discovery.randomize',
750 b'discovery.randomize',
746 default=True,
751 default=True,
747 )
752 )
748 # Control the initial size of the discovery sample
753 # Control the initial size of the discovery sample
749 coreconfigitem(
754 coreconfigitem(
750 b'devel',
755 b'devel',
751 b'discovery.sample-size',
756 b'discovery.sample-size',
752 default=200,
757 default=200,
753 )
758 )
754 # Control the initial size of the discovery for initial change
759 # Control the initial size of the discovery for initial change
755 coreconfigitem(
760 coreconfigitem(
756 b'devel',
761 b'devel',
757 b'discovery.sample-size.initial',
762 b'discovery.sample-size.initial',
758 default=100,
763 default=100,
759 )
764 )
760 _registerdiffopts(section=b'diff')
765 _registerdiffopts(section=b'diff')
761 coreconfigitem(
766 coreconfigitem(
762 b'diff',
767 b'diff',
763 b'merge',
768 b'merge',
764 default=False,
769 default=False,
765 experimental=True,
770 experimental=True,
766 )
771 )
767 coreconfigitem(
772 coreconfigitem(
768 b'email',
773 b'email',
769 b'bcc',
774 b'bcc',
770 default=None,
775 default=None,
771 )
776 )
772 coreconfigitem(
777 coreconfigitem(
773 b'email',
778 b'email',
774 b'cc',
779 b'cc',
775 default=None,
780 default=None,
776 )
781 )
777 coreconfigitem(
782 coreconfigitem(
778 b'email',
783 b'email',
779 b'charsets',
784 b'charsets',
780 default=list,
785 default=list,
781 )
786 )
782 coreconfigitem(
787 coreconfigitem(
783 b'email',
788 b'email',
784 b'from',
789 b'from',
785 default=None,
790 default=None,
786 )
791 )
787 coreconfigitem(
792 coreconfigitem(
788 b'email',
793 b'email',
789 b'method',
794 b'method',
790 default=b'smtp',
795 default=b'smtp',
791 )
796 )
792 coreconfigitem(
797 coreconfigitem(
793 b'email',
798 b'email',
794 b'reply-to',
799 b'reply-to',
795 default=None,
800 default=None,
796 )
801 )
797 coreconfigitem(
802 coreconfigitem(
798 b'email',
803 b'email',
799 b'to',
804 b'to',
800 default=None,
805 default=None,
801 )
806 )
802 coreconfigitem(
807 coreconfigitem(
803 b'experimental',
808 b'experimental',
804 b'archivemetatemplate',
809 b'archivemetatemplate',
805 default=dynamicdefault,
810 default=dynamicdefault,
806 )
811 )
807 coreconfigitem(
812 coreconfigitem(
808 b'experimental',
813 b'experimental',
809 b'auto-publish',
814 b'auto-publish',
810 default=b'publish',
815 default=b'publish',
811 )
816 )
812 coreconfigitem(
817 coreconfigitem(
813 b'experimental',
818 b'experimental',
814 b'bundle-phases',
819 b'bundle-phases',
815 default=False,
820 default=False,
816 )
821 )
817 coreconfigitem(
822 coreconfigitem(
818 b'experimental',
823 b'experimental',
819 b'bundle2-advertise',
824 b'bundle2-advertise',
820 default=True,
825 default=True,
821 )
826 )
822 coreconfigitem(
827 coreconfigitem(
823 b'experimental',
828 b'experimental',
824 b'bundle2-output-capture',
829 b'bundle2-output-capture',
825 default=False,
830 default=False,
826 )
831 )
827 coreconfigitem(
832 coreconfigitem(
828 b'experimental',
833 b'experimental',
829 b'bundle2.pushback',
834 b'bundle2.pushback',
830 default=False,
835 default=False,
831 )
836 )
832 coreconfigitem(
837 coreconfigitem(
833 b'experimental',
838 b'experimental',
834 b'bundle2lazylocking',
839 b'bundle2lazylocking',
835 default=False,
840 default=False,
836 )
841 )
837 coreconfigitem(
842 coreconfigitem(
838 b'experimental',
843 b'experimental',
839 b'bundlecomplevel',
844 b'bundlecomplevel',
840 default=None,
845 default=None,
841 )
846 )
842 coreconfigitem(
847 coreconfigitem(
843 b'experimental',
848 b'experimental',
844 b'bundlecomplevel.bzip2',
849 b'bundlecomplevel.bzip2',
845 default=None,
850 default=None,
846 )
851 )
847 coreconfigitem(
852 coreconfigitem(
848 b'experimental',
853 b'experimental',
849 b'bundlecomplevel.gzip',
854 b'bundlecomplevel.gzip',
850 default=None,
855 default=None,
851 )
856 )
852 coreconfigitem(
857 coreconfigitem(
853 b'experimental',
858 b'experimental',
854 b'bundlecomplevel.none',
859 b'bundlecomplevel.none',
855 default=None,
860 default=None,
856 )
861 )
857 coreconfigitem(
862 coreconfigitem(
858 b'experimental',
863 b'experimental',
859 b'bundlecomplevel.zstd',
864 b'bundlecomplevel.zstd',
860 default=None,
865 default=None,
861 )
866 )
862 coreconfigitem(
867 coreconfigitem(
863 b'experimental',
868 b'experimental',
864 b'changegroup3',
869 b'changegroup3',
865 default=False,
870 default=False,
866 )
871 )
867 coreconfigitem(
872 coreconfigitem(
868 b'experimental',
873 b'experimental',
869 b'cleanup-as-archived',
874 b'cleanup-as-archived',
870 default=False,
875 default=False,
871 )
876 )
872 coreconfigitem(
877 coreconfigitem(
873 b'experimental',
878 b'experimental',
874 b'clientcompressionengines',
879 b'clientcompressionengines',
875 default=list,
880 default=list,
876 )
881 )
877 coreconfigitem(
882 coreconfigitem(
878 b'experimental',
883 b'experimental',
879 b'copytrace',
884 b'copytrace',
880 default=b'on',
885 default=b'on',
881 )
886 )
882 coreconfigitem(
887 coreconfigitem(
883 b'experimental',
888 b'experimental',
884 b'copytrace.movecandidateslimit',
889 b'copytrace.movecandidateslimit',
885 default=100,
890 default=100,
886 )
891 )
887 coreconfigitem(
892 coreconfigitem(
888 b'experimental',
893 b'experimental',
889 b'copytrace.sourcecommitlimit',
894 b'copytrace.sourcecommitlimit',
890 default=100,
895 default=100,
891 )
896 )
892 coreconfigitem(
897 coreconfigitem(
893 b'experimental',
898 b'experimental',
894 b'copies.read-from',
899 b'copies.read-from',
895 default=b"filelog-only",
900 default=b"filelog-only",
896 )
901 )
897 coreconfigitem(
902 coreconfigitem(
898 b'experimental',
903 b'experimental',
899 b'copies.write-to',
904 b'copies.write-to',
900 default=b'filelog-only',
905 default=b'filelog-only',
901 )
906 )
902 coreconfigitem(
907 coreconfigitem(
903 b'experimental',
908 b'experimental',
904 b'crecordtest',
909 b'crecordtest',
905 default=None,
910 default=None,
906 )
911 )
907 coreconfigitem(
912 coreconfigitem(
908 b'experimental',
913 b'experimental',
909 b'directaccess',
914 b'directaccess',
910 default=False,
915 default=False,
911 )
916 )
912 coreconfigitem(
917 coreconfigitem(
913 b'experimental',
918 b'experimental',
914 b'directaccess.revnums',
919 b'directaccess.revnums',
915 default=False,
920 default=False,
916 )
921 )
917 coreconfigitem(
922 coreconfigitem(
918 b'experimental',
923 b'experimental',
919 b'editortmpinhg',
924 b'editortmpinhg',
920 default=False,
925 default=False,
921 )
926 )
922 coreconfigitem(
927 coreconfigitem(
923 b'experimental',
928 b'experimental',
924 b'evolution',
929 b'evolution',
925 default=list,
930 default=list,
926 )
931 )
927 coreconfigitem(
932 coreconfigitem(
928 b'experimental',
933 b'experimental',
929 b'evolution.allowdivergence',
934 b'evolution.allowdivergence',
930 default=False,
935 default=False,
931 alias=[(b'experimental', b'allowdivergence')],
936 alias=[(b'experimental', b'allowdivergence')],
932 )
937 )
933 coreconfigitem(
938 coreconfigitem(
934 b'experimental',
939 b'experimental',
935 b'evolution.allowunstable',
940 b'evolution.allowunstable',
936 default=None,
941 default=None,
937 )
942 )
938 coreconfigitem(
943 coreconfigitem(
939 b'experimental',
944 b'experimental',
940 b'evolution.createmarkers',
945 b'evolution.createmarkers',
941 default=None,
946 default=None,
942 )
947 )
943 coreconfigitem(
948 coreconfigitem(
944 b'experimental',
949 b'experimental',
945 b'evolution.effect-flags',
950 b'evolution.effect-flags',
946 default=True,
951 default=True,
947 alias=[(b'experimental', b'effect-flags')],
952 alias=[(b'experimental', b'effect-flags')],
948 )
953 )
949 coreconfigitem(
954 coreconfigitem(
950 b'experimental',
955 b'experimental',
951 b'evolution.exchange',
956 b'evolution.exchange',
952 default=None,
957 default=None,
953 )
958 )
954 coreconfigitem(
959 coreconfigitem(
955 b'experimental',
960 b'experimental',
956 b'evolution.bundle-obsmarker',
961 b'evolution.bundle-obsmarker',
957 default=False,
962 default=False,
958 )
963 )
959 coreconfigitem(
964 coreconfigitem(
960 b'experimental',
965 b'experimental',
961 b'evolution.bundle-obsmarker:mandatory',
966 b'evolution.bundle-obsmarker:mandatory',
962 default=True,
967 default=True,
963 )
968 )
964 coreconfigitem(
969 coreconfigitem(
965 b'experimental',
970 b'experimental',
966 b'log.topo',
971 b'log.topo',
967 default=False,
972 default=False,
968 )
973 )
969 coreconfigitem(
974 coreconfigitem(
970 b'experimental',
975 b'experimental',
971 b'evolution.report-instabilities',
976 b'evolution.report-instabilities',
972 default=True,
977 default=True,
973 )
978 )
974 coreconfigitem(
979 coreconfigitem(
975 b'experimental',
980 b'experimental',
976 b'evolution.track-operation',
981 b'evolution.track-operation',
977 default=True,
982 default=True,
978 )
983 )
979 # repo-level config to exclude a revset visibility
984 # repo-level config to exclude a revset visibility
980 #
985 #
981 # The target use case is to use `share` to expose different subset of the same
986 # The target use case is to use `share` to expose different subset of the same
982 # repository, especially server side. See also `server.view`.
987 # repository, especially server side. See also `server.view`.
983 coreconfigitem(
988 coreconfigitem(
984 b'experimental',
989 b'experimental',
985 b'extra-filter-revs',
990 b'extra-filter-revs',
986 default=None,
991 default=None,
987 )
992 )
988 coreconfigitem(
993 coreconfigitem(
989 b'experimental',
994 b'experimental',
990 b'maxdeltachainspan',
995 b'maxdeltachainspan',
991 default=-1,
996 default=-1,
992 )
997 )
993 # tracks files which were undeleted (merge might delete them but we explicitly
998 # tracks files which were undeleted (merge might delete them but we explicitly
994 # kept/undeleted them) and creates new filenodes for them
999 # kept/undeleted them) and creates new filenodes for them
995 coreconfigitem(
1000 coreconfigitem(
996 b'experimental',
1001 b'experimental',
997 b'merge-track-salvaged',
1002 b'merge-track-salvaged',
998 default=False,
1003 default=False,
999 )
1004 )
1000 coreconfigitem(
1005 coreconfigitem(
1001 b'experimental',
1006 b'experimental',
1002 b'mergetempdirprefix',
1007 b'mergetempdirprefix',
1003 default=None,
1008 default=None,
1004 )
1009 )
1005 coreconfigitem(
1010 coreconfigitem(
1006 b'experimental',
1011 b'experimental',
1007 b'mmapindexthreshold',
1012 b'mmapindexthreshold',
1008 default=None,
1013 default=None,
1009 )
1014 )
1010 coreconfigitem(
1015 coreconfigitem(
1011 b'experimental',
1016 b'experimental',
1012 b'narrow',
1017 b'narrow',
1013 default=False,
1018 default=False,
1014 )
1019 )
1015 coreconfigitem(
1020 coreconfigitem(
1016 b'experimental',
1021 b'experimental',
1017 b'nonnormalparanoidcheck',
1022 b'nonnormalparanoidcheck',
1018 default=False,
1023 default=False,
1019 )
1024 )
1020 coreconfigitem(
1025 coreconfigitem(
1021 b'experimental',
1026 b'experimental',
1022 b'exportableenviron',
1027 b'exportableenviron',
1023 default=list,
1028 default=list,
1024 )
1029 )
1025 coreconfigitem(
1030 coreconfigitem(
1026 b'experimental',
1031 b'experimental',
1027 b'extendedheader.index',
1032 b'extendedheader.index',
1028 default=None,
1033 default=None,
1029 )
1034 )
1030 coreconfigitem(
1035 coreconfigitem(
1031 b'experimental',
1036 b'experimental',
1032 b'extendedheader.similarity',
1037 b'extendedheader.similarity',
1033 default=False,
1038 default=False,
1034 )
1039 )
1035 coreconfigitem(
1040 coreconfigitem(
1036 b'experimental',
1041 b'experimental',
1037 b'graphshorten',
1042 b'graphshorten',
1038 default=False,
1043 default=False,
1039 )
1044 )
1040 coreconfigitem(
1045 coreconfigitem(
1041 b'experimental',
1046 b'experimental',
1042 b'graphstyle.parent',
1047 b'graphstyle.parent',
1043 default=dynamicdefault,
1048 default=dynamicdefault,
1044 )
1049 )
1045 coreconfigitem(
1050 coreconfigitem(
1046 b'experimental',
1051 b'experimental',
1047 b'graphstyle.missing',
1052 b'graphstyle.missing',
1048 default=dynamicdefault,
1053 default=dynamicdefault,
1049 )
1054 )
1050 coreconfigitem(
1055 coreconfigitem(
1051 b'experimental',
1056 b'experimental',
1052 b'graphstyle.grandparent',
1057 b'graphstyle.grandparent',
1053 default=dynamicdefault,
1058 default=dynamicdefault,
1054 )
1059 )
1055 coreconfigitem(
1060 coreconfigitem(
1056 b'experimental',
1061 b'experimental',
1057 b'hook-track-tags',
1062 b'hook-track-tags',
1058 default=False,
1063 default=False,
1059 )
1064 )
1060 coreconfigitem(
1065 coreconfigitem(
1061 b'experimental',
1066 b'experimental',
1062 b'httppeer.advertise-v2',
1067 b'httppeer.advertise-v2',
1063 default=False,
1068 default=False,
1064 )
1069 )
1065 coreconfigitem(
1070 coreconfigitem(
1066 b'experimental',
1071 b'experimental',
1067 b'httppeer.v2-encoder-order',
1072 b'httppeer.v2-encoder-order',
1068 default=None,
1073 default=None,
1069 )
1074 )
1070 coreconfigitem(
1075 coreconfigitem(
1071 b'experimental',
1076 b'experimental',
1072 b'httppostargs',
1077 b'httppostargs',
1073 default=False,
1078 default=False,
1074 )
1079 )
1075 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1080 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1076 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1081 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1077
1082
1078 coreconfigitem(
1083 coreconfigitem(
1079 b'experimental',
1084 b'experimental',
1080 b'obsmarkers-exchange-debug',
1085 b'obsmarkers-exchange-debug',
1081 default=False,
1086 default=False,
1082 )
1087 )
1083 coreconfigitem(
1088 coreconfigitem(
1084 b'experimental',
1089 b'experimental',
1085 b'remotenames',
1090 b'remotenames',
1086 default=False,
1091 default=False,
1087 )
1092 )
1088 coreconfigitem(
1093 coreconfigitem(
1089 b'experimental',
1094 b'experimental',
1090 b'removeemptydirs',
1095 b'removeemptydirs',
1091 default=True,
1096 default=True,
1092 )
1097 )
1093 coreconfigitem(
1098 coreconfigitem(
1094 b'experimental',
1099 b'experimental',
1095 b'revert.interactive.select-to-keep',
1100 b'revert.interactive.select-to-keep',
1096 default=False,
1101 default=False,
1097 )
1102 )
1098 coreconfigitem(
1103 coreconfigitem(
1099 b'experimental',
1104 b'experimental',
1100 b'revisions.prefixhexnode',
1105 b'revisions.prefixhexnode',
1101 default=False,
1106 default=False,
1102 )
1107 )
1103 coreconfigitem(
1108 coreconfigitem(
1104 b'experimental',
1109 b'experimental',
1105 b'revlogv2',
1110 b'revlogv2',
1106 default=None,
1111 default=None,
1107 )
1112 )
1108 coreconfigitem(
1113 coreconfigitem(
1109 b'experimental',
1114 b'experimental',
1110 b'revisions.disambiguatewithin',
1115 b'revisions.disambiguatewithin',
1111 default=None,
1116 default=None,
1112 )
1117 )
1113 coreconfigitem(
1118 coreconfigitem(
1114 b'experimental',
1119 b'experimental',
1115 b'rust.index',
1120 b'rust.index',
1116 default=False,
1121 default=False,
1117 )
1122 )
1118 coreconfigitem(
1123 coreconfigitem(
1119 b'experimental',
1124 b'experimental',
1120 b'server.filesdata.recommended-batch-size',
1125 b'server.filesdata.recommended-batch-size',
1121 default=50000,
1126 default=50000,
1122 )
1127 )
1123 coreconfigitem(
1128 coreconfigitem(
1124 b'experimental',
1129 b'experimental',
1125 b'server.manifestdata.recommended-batch-size',
1130 b'server.manifestdata.recommended-batch-size',
1126 default=100000,
1131 default=100000,
1127 )
1132 )
1128 coreconfigitem(
1133 coreconfigitem(
1129 b'experimental',
1134 b'experimental',
1130 b'server.stream-narrow-clones',
1135 b'server.stream-narrow-clones',
1131 default=False,
1136 default=False,
1132 )
1137 )
1133 coreconfigitem(
1138 coreconfigitem(
1134 b'experimental',
1139 b'experimental',
1135 b'single-head-per-branch',
1140 b'single-head-per-branch',
1136 default=False,
1141 default=False,
1137 )
1142 )
1138 coreconfigitem(
1143 coreconfigitem(
1139 b'experimental',
1144 b'experimental',
1140 b'single-head-per-branch:account-closed-heads',
1145 b'single-head-per-branch:account-closed-heads',
1141 default=False,
1146 default=False,
1142 )
1147 )
1143 coreconfigitem(
1148 coreconfigitem(
1144 b'experimental',
1149 b'experimental',
1145 b'single-head-per-branch:public-changes-only',
1150 b'single-head-per-branch:public-changes-only',
1146 default=False,
1151 default=False,
1147 )
1152 )
1148 coreconfigitem(
1153 coreconfigitem(
1149 b'experimental',
1154 b'experimental',
1150 b'sshserver.support-v2',
1155 b'sshserver.support-v2',
1151 default=False,
1156 default=False,
1152 )
1157 )
1153 coreconfigitem(
1158 coreconfigitem(
1154 b'experimental',
1159 b'experimental',
1155 b'sparse-read',
1160 b'sparse-read',
1156 default=False,
1161 default=False,
1157 )
1162 )
1158 coreconfigitem(
1163 coreconfigitem(
1159 b'experimental',
1164 b'experimental',
1160 b'sparse-read.density-threshold',
1165 b'sparse-read.density-threshold',
1161 default=0.50,
1166 default=0.50,
1162 )
1167 )
1163 coreconfigitem(
1168 coreconfigitem(
1164 b'experimental',
1169 b'experimental',
1165 b'sparse-read.min-gap-size',
1170 b'sparse-read.min-gap-size',
1166 default=b'65K',
1171 default=b'65K',
1167 )
1172 )
1168 coreconfigitem(
1173 coreconfigitem(
1169 b'experimental',
1174 b'experimental',
1170 b'treemanifest',
1175 b'treemanifest',
1171 default=False,
1176 default=False,
1172 )
1177 )
1173 coreconfigitem(
1178 coreconfigitem(
1174 b'experimental',
1179 b'experimental',
1175 b'update.atomic-file',
1180 b'update.atomic-file',
1176 default=False,
1181 default=False,
1177 )
1182 )
1178 coreconfigitem(
1183 coreconfigitem(
1179 b'experimental',
1184 b'experimental',
1180 b'sshpeer.advertise-v2',
1185 b'sshpeer.advertise-v2',
1181 default=False,
1186 default=False,
1182 )
1187 )
1183 coreconfigitem(
1188 coreconfigitem(
1184 b'experimental',
1189 b'experimental',
1185 b'web.apiserver',
1190 b'web.apiserver',
1186 default=False,
1191 default=False,
1187 )
1192 )
1188 coreconfigitem(
1193 coreconfigitem(
1189 b'experimental',
1194 b'experimental',
1190 b'web.api.http-v2',
1195 b'web.api.http-v2',
1191 default=False,
1196 default=False,
1192 )
1197 )
1193 coreconfigitem(
1198 coreconfigitem(
1194 b'experimental',
1199 b'experimental',
1195 b'web.api.debugreflect',
1200 b'web.api.debugreflect',
1196 default=False,
1201 default=False,
1197 )
1202 )
1198 coreconfigitem(
1203 coreconfigitem(
1199 b'experimental',
1204 b'experimental',
1200 b'worker.wdir-get-thread-safe',
1205 b'worker.wdir-get-thread-safe',
1201 default=False,
1206 default=False,
1202 )
1207 )
1203 coreconfigitem(
1208 coreconfigitem(
1204 b'experimental',
1209 b'experimental',
1205 b'worker.repository-upgrade',
1210 b'worker.repository-upgrade',
1206 default=False,
1211 default=False,
1207 )
1212 )
1208 coreconfigitem(
1213 coreconfigitem(
1209 b'experimental',
1214 b'experimental',
1210 b'xdiff',
1215 b'xdiff',
1211 default=False,
1216 default=False,
1212 )
1217 )
1213 coreconfigitem(
1218 coreconfigitem(
1214 b'extensions',
1219 b'extensions',
1215 b'.*',
1220 b'.*',
1216 default=None,
1221 default=None,
1217 generic=True,
1222 generic=True,
1218 )
1223 )
1219 coreconfigitem(
1224 coreconfigitem(
1220 b'extdata',
1225 b'extdata',
1221 b'.*',
1226 b'.*',
1222 default=None,
1227 default=None,
1223 generic=True,
1228 generic=True,
1224 )
1229 )
1225 coreconfigitem(
1230 coreconfigitem(
1226 b'format',
1231 b'format',
1227 b'bookmarks-in-store',
1232 b'bookmarks-in-store',
1228 default=False,
1233 default=False,
1229 )
1234 )
1230 coreconfigitem(
1235 coreconfigitem(
1231 b'format',
1236 b'format',
1232 b'chunkcachesize',
1237 b'chunkcachesize',
1233 default=None,
1238 default=None,
1234 experimental=True,
1239 experimental=True,
1235 )
1240 )
1236 coreconfigitem(
1241 coreconfigitem(
1237 b'format',
1242 b'format',
1238 b'dotencode',
1243 b'dotencode',
1239 default=True,
1244 default=True,
1240 )
1245 )
1241 coreconfigitem(
1246 coreconfigitem(
1242 b'format',
1247 b'format',
1243 b'generaldelta',
1248 b'generaldelta',
1244 default=False,
1249 default=False,
1245 experimental=True,
1250 experimental=True,
1246 )
1251 )
1247 coreconfigitem(
1252 coreconfigitem(
1248 b'format',
1253 b'format',
1249 b'manifestcachesize',
1254 b'manifestcachesize',
1250 default=None,
1255 default=None,
1251 experimental=True,
1256 experimental=True,
1252 )
1257 )
1253 coreconfigitem(
1258 coreconfigitem(
1254 b'format',
1259 b'format',
1255 b'maxchainlen',
1260 b'maxchainlen',
1256 default=dynamicdefault,
1261 default=dynamicdefault,
1257 experimental=True,
1262 experimental=True,
1258 )
1263 )
1259 coreconfigitem(
1264 coreconfigitem(
1260 b'format',
1265 b'format',
1261 b'obsstore-version',
1266 b'obsstore-version',
1262 default=None,
1267 default=None,
1263 )
1268 )
1264 coreconfigitem(
1269 coreconfigitem(
1265 b'format',
1270 b'format',
1266 b'sparse-revlog',
1271 b'sparse-revlog',
1267 default=True,
1272 default=True,
1268 )
1273 )
1269 coreconfigitem(
1274 coreconfigitem(
1270 b'format',
1275 b'format',
1271 b'revlog-compression',
1276 b'revlog-compression',
1272 default=lambda: [b'zlib'],
1277 default=lambda: [b'zlib'],
1273 alias=[(b'experimental', b'format.compression')],
1278 alias=[(b'experimental', b'format.compression')],
1274 )
1279 )
1275 coreconfigitem(
1280 coreconfigitem(
1276 b'format',
1281 b'format',
1277 b'usefncache',
1282 b'usefncache',
1278 default=True,
1283 default=True,
1279 )
1284 )
1280 coreconfigitem(
1285 coreconfigitem(
1281 b'format',
1286 b'format',
1282 b'usegeneraldelta',
1287 b'usegeneraldelta',
1283 default=True,
1288 default=True,
1284 )
1289 )
1285 coreconfigitem(
1290 coreconfigitem(
1286 b'format',
1291 b'format',
1287 b'usestore',
1292 b'usestore',
1288 default=True,
1293 default=True,
1289 )
1294 )
1290 coreconfigitem(
1295 coreconfigitem(
1291 b'format',
1296 b'format',
1292 b'use-persistent-nodemap',
1297 b'use-persistent-nodemap',
1293 default=False,
1298 default=False,
1294 )
1299 )
1295 coreconfigitem(
1300 coreconfigitem(
1296 b'format',
1301 b'format',
1297 b'exp-use-copies-side-data-changeset',
1302 b'exp-use-copies-side-data-changeset',
1298 default=False,
1303 default=False,
1299 experimental=True,
1304 experimental=True,
1300 )
1305 )
1301 coreconfigitem(
1306 coreconfigitem(
1302 b'format',
1307 b'format',
1303 b'exp-use-side-data',
1308 b'exp-use-side-data',
1304 default=False,
1309 default=False,
1305 experimental=True,
1310 experimental=True,
1306 )
1311 )
1307 coreconfigitem(
1312 coreconfigitem(
1308 b'format',
1313 b'format',
1309 b'use-share-safe',
1314 b'use-share-safe',
1310 default=False,
1315 default=False,
1311 )
1316 )
1312 coreconfigitem(
1317 coreconfigitem(
1313 b'format',
1318 b'format',
1314 b'internal-phase',
1319 b'internal-phase',
1315 default=False,
1320 default=False,
1316 experimental=True,
1321 experimental=True,
1317 )
1322 )
1318 coreconfigitem(
1323 coreconfigitem(
1319 b'fsmonitor',
1324 b'fsmonitor',
1320 b'warn_when_unused',
1325 b'warn_when_unused',
1321 default=True,
1326 default=True,
1322 )
1327 )
1323 coreconfigitem(
1328 coreconfigitem(
1324 b'fsmonitor',
1329 b'fsmonitor',
1325 b'warn_update_file_count',
1330 b'warn_update_file_count',
1326 default=50000,
1331 default=50000,
1327 )
1332 )
1328 coreconfigitem(
1333 coreconfigitem(
1329 b'fsmonitor',
1334 b'fsmonitor',
1330 b'warn_update_file_count_rust',
1335 b'warn_update_file_count_rust',
1331 default=400000,
1336 default=400000,
1332 )
1337 )
1333 coreconfigitem(
1338 coreconfigitem(
1334 b'help',
1339 b'help',
1335 br'hidden-command\..*',
1340 br'hidden-command\..*',
1336 default=False,
1341 default=False,
1337 generic=True,
1342 generic=True,
1338 )
1343 )
1339 coreconfigitem(
1344 coreconfigitem(
1340 b'help',
1345 b'help',
1341 br'hidden-topic\..*',
1346 br'hidden-topic\..*',
1342 default=False,
1347 default=False,
1343 generic=True,
1348 generic=True,
1344 )
1349 )
1345 coreconfigitem(
1350 coreconfigitem(
1346 b'hooks',
1351 b'hooks',
1347 b'[^:]*',
1352 b'[^:]*',
1348 default=dynamicdefault,
1353 default=dynamicdefault,
1349 generic=True,
1354 generic=True,
1350 )
1355 )
1351 coreconfigitem(
1356 coreconfigitem(
1352 b'hooks',
1357 b'hooks',
1353 b'.*:run-with-plain',
1358 b'.*:run-with-plain',
1354 default=True,
1359 default=True,
1355 generic=True,
1360 generic=True,
1356 )
1361 )
1357 coreconfigitem(
1362 coreconfigitem(
1358 b'hgweb-paths',
1363 b'hgweb-paths',
1359 b'.*',
1364 b'.*',
1360 default=list,
1365 default=list,
1361 generic=True,
1366 generic=True,
1362 )
1367 )
1363 coreconfigitem(
1368 coreconfigitem(
1364 b'hostfingerprints',
1369 b'hostfingerprints',
1365 b'.*',
1370 b'.*',
1366 default=list,
1371 default=list,
1367 generic=True,
1372 generic=True,
1368 )
1373 )
1369 coreconfigitem(
1374 coreconfigitem(
1370 b'hostsecurity',
1375 b'hostsecurity',
1371 b'ciphers',
1376 b'ciphers',
1372 default=None,
1377 default=None,
1373 )
1378 )
1374 coreconfigitem(
1379 coreconfigitem(
1375 b'hostsecurity',
1380 b'hostsecurity',
1376 b'minimumprotocol',
1381 b'minimumprotocol',
1377 default=dynamicdefault,
1382 default=dynamicdefault,
1378 )
1383 )
1379 coreconfigitem(
1384 coreconfigitem(
1380 b'hostsecurity',
1385 b'hostsecurity',
1381 b'.*:minimumprotocol$',
1386 b'.*:minimumprotocol$',
1382 default=dynamicdefault,
1387 default=dynamicdefault,
1383 generic=True,
1388 generic=True,
1384 )
1389 )
1385 coreconfigitem(
1390 coreconfigitem(
1386 b'hostsecurity',
1391 b'hostsecurity',
1387 b'.*:ciphers$',
1392 b'.*:ciphers$',
1388 default=dynamicdefault,
1393 default=dynamicdefault,
1389 generic=True,
1394 generic=True,
1390 )
1395 )
1391 coreconfigitem(
1396 coreconfigitem(
1392 b'hostsecurity',
1397 b'hostsecurity',
1393 b'.*:fingerprints$',
1398 b'.*:fingerprints$',
1394 default=list,
1399 default=list,
1395 generic=True,
1400 generic=True,
1396 )
1401 )
1397 coreconfigitem(
1402 coreconfigitem(
1398 b'hostsecurity',
1403 b'hostsecurity',
1399 b'.*:verifycertsfile$',
1404 b'.*:verifycertsfile$',
1400 default=None,
1405 default=None,
1401 generic=True,
1406 generic=True,
1402 )
1407 )
1403
1408
1404 coreconfigitem(
1409 coreconfigitem(
1405 b'http_proxy',
1410 b'http_proxy',
1406 b'always',
1411 b'always',
1407 default=False,
1412 default=False,
1408 )
1413 )
1409 coreconfigitem(
1414 coreconfigitem(
1410 b'http_proxy',
1415 b'http_proxy',
1411 b'host',
1416 b'host',
1412 default=None,
1417 default=None,
1413 )
1418 )
1414 coreconfigitem(
1419 coreconfigitem(
1415 b'http_proxy',
1420 b'http_proxy',
1416 b'no',
1421 b'no',
1417 default=list,
1422 default=list,
1418 )
1423 )
1419 coreconfigitem(
1424 coreconfigitem(
1420 b'http_proxy',
1425 b'http_proxy',
1421 b'passwd',
1426 b'passwd',
1422 default=None,
1427 default=None,
1423 )
1428 )
1424 coreconfigitem(
1429 coreconfigitem(
1425 b'http_proxy',
1430 b'http_proxy',
1426 b'user',
1431 b'user',
1427 default=None,
1432 default=None,
1428 )
1433 )
1429
1434
1430 coreconfigitem(
1435 coreconfigitem(
1431 b'http',
1436 b'http',
1432 b'timeout',
1437 b'timeout',
1433 default=None,
1438 default=None,
1434 )
1439 )
1435
1440
1436 coreconfigitem(
1441 coreconfigitem(
1437 b'logtoprocess',
1442 b'logtoprocess',
1438 b'commandexception',
1443 b'commandexception',
1439 default=None,
1444 default=None,
1440 )
1445 )
1441 coreconfigitem(
1446 coreconfigitem(
1442 b'logtoprocess',
1447 b'logtoprocess',
1443 b'commandfinish',
1448 b'commandfinish',
1444 default=None,
1449 default=None,
1445 )
1450 )
1446 coreconfigitem(
1451 coreconfigitem(
1447 b'logtoprocess',
1452 b'logtoprocess',
1448 b'command',
1453 b'command',
1449 default=None,
1454 default=None,
1450 )
1455 )
1451 coreconfigitem(
1456 coreconfigitem(
1452 b'logtoprocess',
1457 b'logtoprocess',
1453 b'develwarn',
1458 b'develwarn',
1454 default=None,
1459 default=None,
1455 )
1460 )
1456 coreconfigitem(
1461 coreconfigitem(
1457 b'logtoprocess',
1462 b'logtoprocess',
1458 b'uiblocked',
1463 b'uiblocked',
1459 default=None,
1464 default=None,
1460 )
1465 )
1461 coreconfigitem(
1466 coreconfigitem(
1462 b'merge',
1467 b'merge',
1463 b'checkunknown',
1468 b'checkunknown',
1464 default=b'abort',
1469 default=b'abort',
1465 )
1470 )
1466 coreconfigitem(
1471 coreconfigitem(
1467 b'merge',
1472 b'merge',
1468 b'checkignored',
1473 b'checkignored',
1469 default=b'abort',
1474 default=b'abort',
1470 )
1475 )
1471 coreconfigitem(
1476 coreconfigitem(
1472 b'experimental',
1477 b'experimental',
1473 b'merge.checkpathconflicts',
1478 b'merge.checkpathconflicts',
1474 default=False,
1479 default=False,
1475 )
1480 )
1476 coreconfigitem(
1481 coreconfigitem(
1477 b'merge',
1482 b'merge',
1478 b'followcopies',
1483 b'followcopies',
1479 default=True,
1484 default=True,
1480 )
1485 )
1481 coreconfigitem(
1486 coreconfigitem(
1482 b'merge',
1487 b'merge',
1483 b'on-failure',
1488 b'on-failure',
1484 default=b'continue',
1489 default=b'continue',
1485 )
1490 )
1486 coreconfigitem(
1491 coreconfigitem(
1487 b'merge',
1492 b'merge',
1488 b'preferancestor',
1493 b'preferancestor',
1489 default=lambda: [b'*'],
1494 default=lambda: [b'*'],
1490 experimental=True,
1495 experimental=True,
1491 )
1496 )
1492 coreconfigitem(
1497 coreconfigitem(
1493 b'merge',
1498 b'merge',
1494 b'strict-capability-check',
1499 b'strict-capability-check',
1495 default=False,
1500 default=False,
1496 )
1501 )
1497 coreconfigitem(
1502 coreconfigitem(
1498 b'merge-tools',
1503 b'merge-tools',
1499 b'.*',
1504 b'.*',
1500 default=None,
1505 default=None,
1501 generic=True,
1506 generic=True,
1502 )
1507 )
1503 coreconfigitem(
1508 coreconfigitem(
1504 b'merge-tools',
1509 b'merge-tools',
1505 br'.*\.args$',
1510 br'.*\.args$',
1506 default=b"$local $base $other",
1511 default=b"$local $base $other",
1507 generic=True,
1512 generic=True,
1508 priority=-1,
1513 priority=-1,
1509 )
1514 )
1510 coreconfigitem(
1515 coreconfigitem(
1511 b'merge-tools',
1516 b'merge-tools',
1512 br'.*\.binary$',
1517 br'.*\.binary$',
1513 default=False,
1518 default=False,
1514 generic=True,
1519 generic=True,
1515 priority=-1,
1520 priority=-1,
1516 )
1521 )
1517 coreconfigitem(
1522 coreconfigitem(
1518 b'merge-tools',
1523 b'merge-tools',
1519 br'.*\.check$',
1524 br'.*\.check$',
1520 default=list,
1525 default=list,
1521 generic=True,
1526 generic=True,
1522 priority=-1,
1527 priority=-1,
1523 )
1528 )
1524 coreconfigitem(
1529 coreconfigitem(
1525 b'merge-tools',
1530 b'merge-tools',
1526 br'.*\.checkchanged$',
1531 br'.*\.checkchanged$',
1527 default=False,
1532 default=False,
1528 generic=True,
1533 generic=True,
1529 priority=-1,
1534 priority=-1,
1530 )
1535 )
1531 coreconfigitem(
1536 coreconfigitem(
1532 b'merge-tools',
1537 b'merge-tools',
1533 br'.*\.executable$',
1538 br'.*\.executable$',
1534 default=dynamicdefault,
1539 default=dynamicdefault,
1535 generic=True,
1540 generic=True,
1536 priority=-1,
1541 priority=-1,
1537 )
1542 )
1538 coreconfigitem(
1543 coreconfigitem(
1539 b'merge-tools',
1544 b'merge-tools',
1540 br'.*\.fixeol$',
1545 br'.*\.fixeol$',
1541 default=False,
1546 default=False,
1542 generic=True,
1547 generic=True,
1543 priority=-1,
1548 priority=-1,
1544 )
1549 )
1545 coreconfigitem(
1550 coreconfigitem(
1546 b'merge-tools',
1551 b'merge-tools',
1547 br'.*\.gui$',
1552 br'.*\.gui$',
1548 default=False,
1553 default=False,
1549 generic=True,
1554 generic=True,
1550 priority=-1,
1555 priority=-1,
1551 )
1556 )
1552 coreconfigitem(
1557 coreconfigitem(
1553 b'merge-tools',
1558 b'merge-tools',
1554 br'.*\.mergemarkers$',
1559 br'.*\.mergemarkers$',
1555 default=b'basic',
1560 default=b'basic',
1556 generic=True,
1561 generic=True,
1557 priority=-1,
1562 priority=-1,
1558 )
1563 )
1559 coreconfigitem(
1564 coreconfigitem(
1560 b'merge-tools',
1565 b'merge-tools',
1561 br'.*\.mergemarkertemplate$',
1566 br'.*\.mergemarkertemplate$',
1562 default=dynamicdefault, # take from command-templates.mergemarker
1567 default=dynamicdefault, # take from command-templates.mergemarker
1563 generic=True,
1568 generic=True,
1564 priority=-1,
1569 priority=-1,
1565 )
1570 )
1566 coreconfigitem(
1571 coreconfigitem(
1567 b'merge-tools',
1572 b'merge-tools',
1568 br'.*\.priority$',
1573 br'.*\.priority$',
1569 default=0,
1574 default=0,
1570 generic=True,
1575 generic=True,
1571 priority=-1,
1576 priority=-1,
1572 )
1577 )
1573 coreconfigitem(
1578 coreconfigitem(
1574 b'merge-tools',
1579 b'merge-tools',
1575 br'.*\.premerge$',
1580 br'.*\.premerge$',
1576 default=dynamicdefault,
1581 default=dynamicdefault,
1577 generic=True,
1582 generic=True,
1578 priority=-1,
1583 priority=-1,
1579 )
1584 )
1580 coreconfigitem(
1585 coreconfigitem(
1581 b'merge-tools',
1586 b'merge-tools',
1582 br'.*\.symlink$',
1587 br'.*\.symlink$',
1583 default=False,
1588 default=False,
1584 generic=True,
1589 generic=True,
1585 priority=-1,
1590 priority=-1,
1586 )
1591 )
1587 coreconfigitem(
1592 coreconfigitem(
1588 b'pager',
1593 b'pager',
1589 b'attend-.*',
1594 b'attend-.*',
1590 default=dynamicdefault,
1595 default=dynamicdefault,
1591 generic=True,
1596 generic=True,
1592 )
1597 )
1593 coreconfigitem(
1598 coreconfigitem(
1594 b'pager',
1599 b'pager',
1595 b'ignore',
1600 b'ignore',
1596 default=list,
1601 default=list,
1597 )
1602 )
1598 coreconfigitem(
1603 coreconfigitem(
1599 b'pager',
1604 b'pager',
1600 b'pager',
1605 b'pager',
1601 default=dynamicdefault,
1606 default=dynamicdefault,
1602 )
1607 )
1603 coreconfigitem(
1608 coreconfigitem(
1604 b'patch',
1609 b'patch',
1605 b'eol',
1610 b'eol',
1606 default=b'strict',
1611 default=b'strict',
1607 )
1612 )
1608 coreconfigitem(
1613 coreconfigitem(
1609 b'patch',
1614 b'patch',
1610 b'fuzz',
1615 b'fuzz',
1611 default=2,
1616 default=2,
1612 )
1617 )
1613 coreconfigitem(
1618 coreconfigitem(
1614 b'paths',
1619 b'paths',
1615 b'default',
1620 b'default',
1616 default=None,
1621 default=None,
1617 )
1622 )
1618 coreconfigitem(
1623 coreconfigitem(
1619 b'paths',
1624 b'paths',
1620 b'default-push',
1625 b'default-push',
1621 default=None,
1626 default=None,
1622 )
1627 )
1623 coreconfigitem(
1628 coreconfigitem(
1624 b'paths',
1629 b'paths',
1625 b'.*',
1630 b'.*',
1626 default=None,
1631 default=None,
1627 generic=True,
1632 generic=True,
1628 )
1633 )
1629 coreconfigitem(
1634 coreconfigitem(
1630 b'phases',
1635 b'phases',
1631 b'checksubrepos',
1636 b'checksubrepos',
1632 default=b'follow',
1637 default=b'follow',
1633 )
1638 )
1634 coreconfigitem(
1639 coreconfigitem(
1635 b'phases',
1640 b'phases',
1636 b'new-commit',
1641 b'new-commit',
1637 default=b'draft',
1642 default=b'draft',
1638 )
1643 )
1639 coreconfigitem(
1644 coreconfigitem(
1640 b'phases',
1645 b'phases',
1641 b'publish',
1646 b'publish',
1642 default=True,
1647 default=True,
1643 )
1648 )
1644 coreconfigitem(
1649 coreconfigitem(
1645 b'profiling',
1650 b'profiling',
1646 b'enabled',
1651 b'enabled',
1647 default=False,
1652 default=False,
1648 )
1653 )
1649 coreconfigitem(
1654 coreconfigitem(
1650 b'profiling',
1655 b'profiling',
1651 b'format',
1656 b'format',
1652 default=b'text',
1657 default=b'text',
1653 )
1658 )
1654 coreconfigitem(
1659 coreconfigitem(
1655 b'profiling',
1660 b'profiling',
1656 b'freq',
1661 b'freq',
1657 default=1000,
1662 default=1000,
1658 )
1663 )
1659 coreconfigitem(
1664 coreconfigitem(
1660 b'profiling',
1665 b'profiling',
1661 b'limit',
1666 b'limit',
1662 default=30,
1667 default=30,
1663 )
1668 )
1664 coreconfigitem(
1669 coreconfigitem(
1665 b'profiling',
1670 b'profiling',
1666 b'nested',
1671 b'nested',
1667 default=0,
1672 default=0,
1668 )
1673 )
1669 coreconfigitem(
1674 coreconfigitem(
1670 b'profiling',
1675 b'profiling',
1671 b'output',
1676 b'output',
1672 default=None,
1677 default=None,
1673 )
1678 )
1674 coreconfigitem(
1679 coreconfigitem(
1675 b'profiling',
1680 b'profiling',
1676 b'showmax',
1681 b'showmax',
1677 default=0.999,
1682 default=0.999,
1678 )
1683 )
1679 coreconfigitem(
1684 coreconfigitem(
1680 b'profiling',
1685 b'profiling',
1681 b'showmin',
1686 b'showmin',
1682 default=dynamicdefault,
1687 default=dynamicdefault,
1683 )
1688 )
1684 coreconfigitem(
1689 coreconfigitem(
1685 b'profiling',
1690 b'profiling',
1686 b'showtime',
1691 b'showtime',
1687 default=True,
1692 default=True,
1688 )
1693 )
1689 coreconfigitem(
1694 coreconfigitem(
1690 b'profiling',
1695 b'profiling',
1691 b'sort',
1696 b'sort',
1692 default=b'inlinetime',
1697 default=b'inlinetime',
1693 )
1698 )
1694 coreconfigitem(
1699 coreconfigitem(
1695 b'profiling',
1700 b'profiling',
1696 b'statformat',
1701 b'statformat',
1697 default=b'hotpath',
1702 default=b'hotpath',
1698 )
1703 )
1699 coreconfigitem(
1704 coreconfigitem(
1700 b'profiling',
1705 b'profiling',
1701 b'time-track',
1706 b'time-track',
1702 default=dynamicdefault,
1707 default=dynamicdefault,
1703 )
1708 )
1704 coreconfigitem(
1709 coreconfigitem(
1705 b'profiling',
1710 b'profiling',
1706 b'type',
1711 b'type',
1707 default=b'stat',
1712 default=b'stat',
1708 )
1713 )
1709 coreconfigitem(
1714 coreconfigitem(
1710 b'progress',
1715 b'progress',
1711 b'assume-tty',
1716 b'assume-tty',
1712 default=False,
1717 default=False,
1713 )
1718 )
1714 coreconfigitem(
1719 coreconfigitem(
1715 b'progress',
1720 b'progress',
1716 b'changedelay',
1721 b'changedelay',
1717 default=1,
1722 default=1,
1718 )
1723 )
1719 coreconfigitem(
1724 coreconfigitem(
1720 b'progress',
1725 b'progress',
1721 b'clear-complete',
1726 b'clear-complete',
1722 default=True,
1727 default=True,
1723 )
1728 )
1724 coreconfigitem(
1729 coreconfigitem(
1725 b'progress',
1730 b'progress',
1726 b'debug',
1731 b'debug',
1727 default=False,
1732 default=False,
1728 )
1733 )
1729 coreconfigitem(
1734 coreconfigitem(
1730 b'progress',
1735 b'progress',
1731 b'delay',
1736 b'delay',
1732 default=3,
1737 default=3,
1733 )
1738 )
1734 coreconfigitem(
1739 coreconfigitem(
1735 b'progress',
1740 b'progress',
1736 b'disable',
1741 b'disable',
1737 default=False,
1742 default=False,
1738 )
1743 )
1739 coreconfigitem(
1744 coreconfigitem(
1740 b'progress',
1745 b'progress',
1741 b'estimateinterval',
1746 b'estimateinterval',
1742 default=60.0,
1747 default=60.0,
1743 )
1748 )
1744 coreconfigitem(
1749 coreconfigitem(
1745 b'progress',
1750 b'progress',
1746 b'format',
1751 b'format',
1747 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1752 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1748 )
1753 )
1749 coreconfigitem(
1754 coreconfigitem(
1750 b'progress',
1755 b'progress',
1751 b'refresh',
1756 b'refresh',
1752 default=0.1,
1757 default=0.1,
1753 )
1758 )
1754 coreconfigitem(
1759 coreconfigitem(
1755 b'progress',
1760 b'progress',
1756 b'width',
1761 b'width',
1757 default=dynamicdefault,
1762 default=dynamicdefault,
1758 )
1763 )
1759 coreconfigitem(
1764 coreconfigitem(
1760 b'pull',
1765 b'pull',
1761 b'confirm',
1766 b'confirm',
1762 default=False,
1767 default=False,
1763 )
1768 )
1764 coreconfigitem(
1769 coreconfigitem(
1765 b'push',
1770 b'push',
1766 b'pushvars.server',
1771 b'pushvars.server',
1767 default=False,
1772 default=False,
1768 )
1773 )
1769 coreconfigitem(
1774 coreconfigitem(
1770 b'rewrite',
1775 b'rewrite',
1771 b'backup-bundle',
1776 b'backup-bundle',
1772 default=True,
1777 default=True,
1773 alias=[(b'ui', b'history-editing-backup')],
1778 alias=[(b'ui', b'history-editing-backup')],
1774 )
1779 )
1775 coreconfigitem(
1780 coreconfigitem(
1776 b'rewrite',
1781 b'rewrite',
1777 b'update-timestamp',
1782 b'update-timestamp',
1778 default=False,
1783 default=False,
1779 )
1784 )
1780 coreconfigitem(
1785 coreconfigitem(
1781 b'rewrite',
1786 b'rewrite',
1782 b'empty-successor',
1787 b'empty-successor',
1783 default=b'skip',
1788 default=b'skip',
1784 experimental=True,
1789 experimental=True,
1785 )
1790 )
1786 coreconfigitem(
1791 coreconfigitem(
1787 b'storage',
1792 b'storage',
1788 b'new-repo-backend',
1793 b'new-repo-backend',
1789 default=b'revlogv1',
1794 default=b'revlogv1',
1790 experimental=True,
1795 experimental=True,
1791 )
1796 )
1792 coreconfigitem(
1797 coreconfigitem(
1793 b'storage',
1798 b'storage',
1794 b'revlog.optimize-delta-parent-choice',
1799 b'revlog.optimize-delta-parent-choice',
1795 default=True,
1800 default=True,
1796 alias=[(b'format', b'aggressivemergedeltas')],
1801 alias=[(b'format', b'aggressivemergedeltas')],
1797 )
1802 )
1798 # experimental as long as rust is experimental (or a C version is implemented)
1803 # experimental as long as rust is experimental (or a C version is implemented)
1799 coreconfigitem(
1804 coreconfigitem(
1800 b'storage',
1805 b'storage',
1801 b'revlog.persistent-nodemap.mmap',
1806 b'revlog.persistent-nodemap.mmap',
1802 default=True,
1807 default=True,
1803 )
1808 )
1804 # experimental as long as format.use-persistent-nodemap is.
1809 # experimental as long as format.use-persistent-nodemap is.
1805 coreconfigitem(
1810 coreconfigitem(
1806 b'storage',
1811 b'storage',
1807 b'revlog.persistent-nodemap.slow-path',
1812 b'revlog.persistent-nodemap.slow-path',
1808 default=b"abort",
1813 default=b"abort",
1809 )
1814 )
1810
1815
1811 coreconfigitem(
1816 coreconfigitem(
1812 b'storage',
1817 b'storage',
1813 b'revlog.reuse-external-delta',
1818 b'revlog.reuse-external-delta',
1814 default=True,
1819 default=True,
1815 )
1820 )
1816 coreconfigitem(
1821 coreconfigitem(
1817 b'storage',
1822 b'storage',
1818 b'revlog.reuse-external-delta-parent',
1823 b'revlog.reuse-external-delta-parent',
1819 default=None,
1824 default=None,
1820 )
1825 )
1821 coreconfigitem(
1826 coreconfigitem(
1822 b'storage',
1827 b'storage',
1823 b'revlog.zlib.level',
1828 b'revlog.zlib.level',
1824 default=None,
1829 default=None,
1825 )
1830 )
1826 coreconfigitem(
1831 coreconfigitem(
1827 b'storage',
1832 b'storage',
1828 b'revlog.zstd.level',
1833 b'revlog.zstd.level',
1829 default=None,
1834 default=None,
1830 )
1835 )
1831 coreconfigitem(
1836 coreconfigitem(
1832 b'server',
1837 b'server',
1833 b'bookmarks-pushkey-compat',
1838 b'bookmarks-pushkey-compat',
1834 default=True,
1839 default=True,
1835 )
1840 )
1836 coreconfigitem(
1841 coreconfigitem(
1837 b'server',
1842 b'server',
1838 b'bundle1',
1843 b'bundle1',
1839 default=True,
1844 default=True,
1840 )
1845 )
1841 coreconfigitem(
1846 coreconfigitem(
1842 b'server',
1847 b'server',
1843 b'bundle1gd',
1848 b'bundle1gd',
1844 default=None,
1849 default=None,
1845 )
1850 )
1846 coreconfigitem(
1851 coreconfigitem(
1847 b'server',
1852 b'server',
1848 b'bundle1.pull',
1853 b'bundle1.pull',
1849 default=None,
1854 default=None,
1850 )
1855 )
1851 coreconfigitem(
1856 coreconfigitem(
1852 b'server',
1857 b'server',
1853 b'bundle1gd.pull',
1858 b'bundle1gd.pull',
1854 default=None,
1859 default=None,
1855 )
1860 )
1856 coreconfigitem(
1861 coreconfigitem(
1857 b'server',
1862 b'server',
1858 b'bundle1.push',
1863 b'bundle1.push',
1859 default=None,
1864 default=None,
1860 )
1865 )
1861 coreconfigitem(
1866 coreconfigitem(
1862 b'server',
1867 b'server',
1863 b'bundle1gd.push',
1868 b'bundle1gd.push',
1864 default=None,
1869 default=None,
1865 )
1870 )
1866 coreconfigitem(
1871 coreconfigitem(
1867 b'server',
1872 b'server',
1868 b'bundle2.stream',
1873 b'bundle2.stream',
1869 default=True,
1874 default=True,
1870 alias=[(b'experimental', b'bundle2.stream')],
1875 alias=[(b'experimental', b'bundle2.stream')],
1871 )
1876 )
1872 coreconfigitem(
1877 coreconfigitem(
1873 b'server',
1878 b'server',
1874 b'compressionengines',
1879 b'compressionengines',
1875 default=list,
1880 default=list,
1876 )
1881 )
1877 coreconfigitem(
1882 coreconfigitem(
1878 b'server',
1883 b'server',
1879 b'concurrent-push-mode',
1884 b'concurrent-push-mode',
1880 default=b'check-related',
1885 default=b'check-related',
1881 )
1886 )
1882 coreconfigitem(
1887 coreconfigitem(
1883 b'server',
1888 b'server',
1884 b'disablefullbundle',
1889 b'disablefullbundle',
1885 default=False,
1890 default=False,
1886 )
1891 )
1887 coreconfigitem(
1892 coreconfigitem(
1888 b'server',
1893 b'server',
1889 b'maxhttpheaderlen',
1894 b'maxhttpheaderlen',
1890 default=1024,
1895 default=1024,
1891 )
1896 )
1892 coreconfigitem(
1897 coreconfigitem(
1893 b'server',
1898 b'server',
1894 b'pullbundle',
1899 b'pullbundle',
1895 default=False,
1900 default=False,
1896 )
1901 )
1897 coreconfigitem(
1902 coreconfigitem(
1898 b'server',
1903 b'server',
1899 b'preferuncompressed',
1904 b'preferuncompressed',
1900 default=False,
1905 default=False,
1901 )
1906 )
1902 coreconfigitem(
1907 coreconfigitem(
1903 b'server',
1908 b'server',
1904 b'streamunbundle',
1909 b'streamunbundle',
1905 default=False,
1910 default=False,
1906 )
1911 )
1907 coreconfigitem(
1912 coreconfigitem(
1908 b'server',
1913 b'server',
1909 b'uncompressed',
1914 b'uncompressed',
1910 default=True,
1915 default=True,
1911 )
1916 )
1912 coreconfigitem(
1917 coreconfigitem(
1913 b'server',
1918 b'server',
1914 b'uncompressedallowsecret',
1919 b'uncompressedallowsecret',
1915 default=False,
1920 default=False,
1916 )
1921 )
1917 coreconfigitem(
1922 coreconfigitem(
1918 b'server',
1923 b'server',
1919 b'view',
1924 b'view',
1920 default=b'served',
1925 default=b'served',
1921 )
1926 )
1922 coreconfigitem(
1927 coreconfigitem(
1923 b'server',
1928 b'server',
1924 b'validate',
1929 b'validate',
1925 default=False,
1930 default=False,
1926 )
1931 )
1927 coreconfigitem(
1932 coreconfigitem(
1928 b'server',
1933 b'server',
1929 b'zliblevel',
1934 b'zliblevel',
1930 default=-1,
1935 default=-1,
1931 )
1936 )
1932 coreconfigitem(
1937 coreconfigitem(
1933 b'server',
1938 b'server',
1934 b'zstdlevel',
1939 b'zstdlevel',
1935 default=3,
1940 default=3,
1936 )
1941 )
1937 coreconfigitem(
1942 coreconfigitem(
1938 b'share',
1943 b'share',
1939 b'pool',
1944 b'pool',
1940 default=None,
1945 default=None,
1941 )
1946 )
1942 coreconfigitem(
1947 coreconfigitem(
1943 b'share',
1948 b'share',
1944 b'poolnaming',
1949 b'poolnaming',
1945 default=b'identity',
1950 default=b'identity',
1946 )
1951 )
1947 coreconfigitem(
1952 coreconfigitem(
1948 b'share',
1953 b'share',
1949 b'safe-mismatch.source-not-safe',
1954 b'safe-mismatch.source-not-safe',
1950 default=b'abort',
1955 default=b'abort',
1951 )
1956 )
1952 coreconfigitem(
1957 coreconfigitem(
1953 b'share',
1958 b'share',
1954 b'safe-mismatch.source-safe',
1959 b'safe-mismatch.source-safe',
1955 default=b'abort',
1960 default=b'abort',
1956 )
1961 )
1957 coreconfigitem(
1962 coreconfigitem(
1958 b'share',
1963 b'share',
1959 b'safe-mismatch.source-not-safe.warn',
1964 b'safe-mismatch.source-not-safe.warn',
1960 default=True,
1965 default=True,
1961 )
1966 )
1962 coreconfigitem(
1967 coreconfigitem(
1963 b'share',
1968 b'share',
1964 b'safe-mismatch.source-safe.warn',
1969 b'safe-mismatch.source-safe.warn',
1965 default=True,
1970 default=True,
1966 )
1971 )
1967 coreconfigitem(
1972 coreconfigitem(
1968 b'shelve',
1973 b'shelve',
1969 b'maxbackups',
1974 b'maxbackups',
1970 default=10,
1975 default=10,
1971 )
1976 )
1972 coreconfigitem(
1977 coreconfigitem(
1973 b'smtp',
1978 b'smtp',
1974 b'host',
1979 b'host',
1975 default=None,
1980 default=None,
1976 )
1981 )
1977 coreconfigitem(
1982 coreconfigitem(
1978 b'smtp',
1983 b'smtp',
1979 b'local_hostname',
1984 b'local_hostname',
1980 default=None,
1985 default=None,
1981 )
1986 )
1982 coreconfigitem(
1987 coreconfigitem(
1983 b'smtp',
1988 b'smtp',
1984 b'password',
1989 b'password',
1985 default=None,
1990 default=None,
1986 )
1991 )
1987 coreconfigitem(
1992 coreconfigitem(
1988 b'smtp',
1993 b'smtp',
1989 b'port',
1994 b'port',
1990 default=dynamicdefault,
1995 default=dynamicdefault,
1991 )
1996 )
1992 coreconfigitem(
1997 coreconfigitem(
1993 b'smtp',
1998 b'smtp',
1994 b'tls',
1999 b'tls',
1995 default=b'none',
2000 default=b'none',
1996 )
2001 )
1997 coreconfigitem(
2002 coreconfigitem(
1998 b'smtp',
2003 b'smtp',
1999 b'username',
2004 b'username',
2000 default=None,
2005 default=None,
2001 )
2006 )
2002 coreconfigitem(
2007 coreconfigitem(
2003 b'sparse',
2008 b'sparse',
2004 b'missingwarning',
2009 b'missingwarning',
2005 default=True,
2010 default=True,
2006 experimental=True,
2011 experimental=True,
2007 )
2012 )
2008 coreconfigitem(
2013 coreconfigitem(
2009 b'subrepos',
2014 b'subrepos',
2010 b'allowed',
2015 b'allowed',
2011 default=dynamicdefault, # to make backporting simpler
2016 default=dynamicdefault, # to make backporting simpler
2012 )
2017 )
2013 coreconfigitem(
2018 coreconfigitem(
2014 b'subrepos',
2019 b'subrepos',
2015 b'hg:allowed',
2020 b'hg:allowed',
2016 default=dynamicdefault,
2021 default=dynamicdefault,
2017 )
2022 )
2018 coreconfigitem(
2023 coreconfigitem(
2019 b'subrepos',
2024 b'subrepos',
2020 b'git:allowed',
2025 b'git:allowed',
2021 default=dynamicdefault,
2026 default=dynamicdefault,
2022 )
2027 )
2023 coreconfigitem(
2028 coreconfigitem(
2024 b'subrepos',
2029 b'subrepos',
2025 b'svn:allowed',
2030 b'svn:allowed',
2026 default=dynamicdefault,
2031 default=dynamicdefault,
2027 )
2032 )
2028 coreconfigitem(
2033 coreconfigitem(
2029 b'templates',
2034 b'templates',
2030 b'.*',
2035 b'.*',
2031 default=None,
2036 default=None,
2032 generic=True,
2037 generic=True,
2033 )
2038 )
2034 coreconfigitem(
2039 coreconfigitem(
2035 b'templateconfig',
2040 b'templateconfig',
2036 b'.*',
2041 b'.*',
2037 default=dynamicdefault,
2042 default=dynamicdefault,
2038 generic=True,
2043 generic=True,
2039 )
2044 )
2040 coreconfigitem(
2045 coreconfigitem(
2041 b'trusted',
2046 b'trusted',
2042 b'groups',
2047 b'groups',
2043 default=list,
2048 default=list,
2044 )
2049 )
2045 coreconfigitem(
2050 coreconfigitem(
2046 b'trusted',
2051 b'trusted',
2047 b'users',
2052 b'users',
2048 default=list,
2053 default=list,
2049 )
2054 )
2050 coreconfigitem(
2055 coreconfigitem(
2051 b'ui',
2056 b'ui',
2052 b'_usedassubrepo',
2057 b'_usedassubrepo',
2053 default=False,
2058 default=False,
2054 )
2059 )
2055 coreconfigitem(
2060 coreconfigitem(
2056 b'ui',
2061 b'ui',
2057 b'allowemptycommit',
2062 b'allowemptycommit',
2058 default=False,
2063 default=False,
2059 )
2064 )
2060 coreconfigitem(
2065 coreconfigitem(
2061 b'ui',
2066 b'ui',
2062 b'archivemeta',
2067 b'archivemeta',
2063 default=True,
2068 default=True,
2064 )
2069 )
2065 coreconfigitem(
2070 coreconfigitem(
2066 b'ui',
2071 b'ui',
2067 b'askusername',
2072 b'askusername',
2068 default=False,
2073 default=False,
2069 )
2074 )
2070 coreconfigitem(
2075 coreconfigitem(
2071 b'ui',
2076 b'ui',
2072 b'available-memory',
2077 b'available-memory',
2073 default=None,
2078 default=None,
2074 )
2079 )
2075
2080
2076 coreconfigitem(
2081 coreconfigitem(
2077 b'ui',
2082 b'ui',
2078 b'clonebundlefallback',
2083 b'clonebundlefallback',
2079 default=False,
2084 default=False,
2080 )
2085 )
2081 coreconfigitem(
2086 coreconfigitem(
2082 b'ui',
2087 b'ui',
2083 b'clonebundleprefers',
2088 b'clonebundleprefers',
2084 default=list,
2089 default=list,
2085 )
2090 )
2086 coreconfigitem(
2091 coreconfigitem(
2087 b'ui',
2092 b'ui',
2088 b'clonebundles',
2093 b'clonebundles',
2089 default=True,
2094 default=True,
2090 )
2095 )
2091 coreconfigitem(
2096 coreconfigitem(
2092 b'ui',
2097 b'ui',
2093 b'color',
2098 b'color',
2094 default=b'auto',
2099 default=b'auto',
2095 )
2100 )
2096 coreconfigitem(
2101 coreconfigitem(
2097 b'ui',
2102 b'ui',
2098 b'commitsubrepos',
2103 b'commitsubrepos',
2099 default=False,
2104 default=False,
2100 )
2105 )
2101 coreconfigitem(
2106 coreconfigitem(
2102 b'ui',
2107 b'ui',
2103 b'debug',
2108 b'debug',
2104 default=False,
2109 default=False,
2105 )
2110 )
2106 coreconfigitem(
2111 coreconfigitem(
2107 b'ui',
2112 b'ui',
2108 b'debugger',
2113 b'debugger',
2109 default=None,
2114 default=None,
2110 )
2115 )
2111 coreconfigitem(
2116 coreconfigitem(
2112 b'ui',
2117 b'ui',
2113 b'editor',
2118 b'editor',
2114 default=dynamicdefault,
2119 default=dynamicdefault,
2115 )
2120 )
2116 coreconfigitem(
2121 coreconfigitem(
2117 b'ui',
2122 b'ui',
2118 b'detailed-exit-code',
2123 b'detailed-exit-code',
2119 default=False,
2124 default=False,
2120 experimental=True,
2125 experimental=True,
2121 )
2126 )
2122 coreconfigitem(
2127 coreconfigitem(
2123 b'ui',
2128 b'ui',
2124 b'fallbackencoding',
2129 b'fallbackencoding',
2125 default=None,
2130 default=None,
2126 )
2131 )
2127 coreconfigitem(
2132 coreconfigitem(
2128 b'ui',
2133 b'ui',
2129 b'forcecwd',
2134 b'forcecwd',
2130 default=None,
2135 default=None,
2131 )
2136 )
2132 coreconfigitem(
2137 coreconfigitem(
2133 b'ui',
2138 b'ui',
2134 b'forcemerge',
2139 b'forcemerge',
2135 default=None,
2140 default=None,
2136 )
2141 )
2137 coreconfigitem(
2142 coreconfigitem(
2138 b'ui',
2143 b'ui',
2139 b'formatdebug',
2144 b'formatdebug',
2140 default=False,
2145 default=False,
2141 )
2146 )
2142 coreconfigitem(
2147 coreconfigitem(
2143 b'ui',
2148 b'ui',
2144 b'formatjson',
2149 b'formatjson',
2145 default=False,
2150 default=False,
2146 )
2151 )
2147 coreconfigitem(
2152 coreconfigitem(
2148 b'ui',
2153 b'ui',
2149 b'formatted',
2154 b'formatted',
2150 default=None,
2155 default=None,
2151 )
2156 )
2152 coreconfigitem(
2157 coreconfigitem(
2153 b'ui',
2158 b'ui',
2154 b'interactive',
2159 b'interactive',
2155 default=None,
2160 default=None,
2156 )
2161 )
2157 coreconfigitem(
2162 coreconfigitem(
2158 b'ui',
2163 b'ui',
2159 b'interface',
2164 b'interface',
2160 default=None,
2165 default=None,
2161 )
2166 )
2162 coreconfigitem(
2167 coreconfigitem(
2163 b'ui',
2168 b'ui',
2164 b'interface.chunkselector',
2169 b'interface.chunkselector',
2165 default=None,
2170 default=None,
2166 )
2171 )
2167 coreconfigitem(
2172 coreconfigitem(
2168 b'ui',
2173 b'ui',
2169 b'large-file-limit',
2174 b'large-file-limit',
2170 default=10000000,
2175 default=10000000,
2171 )
2176 )
2172 coreconfigitem(
2177 coreconfigitem(
2173 b'ui',
2178 b'ui',
2174 b'logblockedtimes',
2179 b'logblockedtimes',
2175 default=False,
2180 default=False,
2176 )
2181 )
2177 coreconfigitem(
2182 coreconfigitem(
2178 b'ui',
2183 b'ui',
2179 b'merge',
2184 b'merge',
2180 default=None,
2185 default=None,
2181 )
2186 )
2182 coreconfigitem(
2187 coreconfigitem(
2183 b'ui',
2188 b'ui',
2184 b'mergemarkers',
2189 b'mergemarkers',
2185 default=b'basic',
2190 default=b'basic',
2186 )
2191 )
2187 coreconfigitem(
2192 coreconfigitem(
2188 b'ui',
2193 b'ui',
2189 b'message-output',
2194 b'message-output',
2190 default=b'stdio',
2195 default=b'stdio',
2191 )
2196 )
2192 coreconfigitem(
2197 coreconfigitem(
2193 b'ui',
2198 b'ui',
2194 b'nontty',
2199 b'nontty',
2195 default=False,
2200 default=False,
2196 )
2201 )
2197 coreconfigitem(
2202 coreconfigitem(
2198 b'ui',
2203 b'ui',
2199 b'origbackuppath',
2204 b'origbackuppath',
2200 default=None,
2205 default=None,
2201 )
2206 )
2202 coreconfigitem(
2207 coreconfigitem(
2203 b'ui',
2208 b'ui',
2204 b'paginate',
2209 b'paginate',
2205 default=True,
2210 default=True,
2206 )
2211 )
2207 coreconfigitem(
2212 coreconfigitem(
2208 b'ui',
2213 b'ui',
2209 b'patch',
2214 b'patch',
2210 default=None,
2215 default=None,
2211 )
2216 )
2212 coreconfigitem(
2217 coreconfigitem(
2213 b'ui',
2218 b'ui',
2214 b'portablefilenames',
2219 b'portablefilenames',
2215 default=b'warn',
2220 default=b'warn',
2216 )
2221 )
2217 coreconfigitem(
2222 coreconfigitem(
2218 b'ui',
2223 b'ui',
2219 b'promptecho',
2224 b'promptecho',
2220 default=False,
2225 default=False,
2221 )
2226 )
2222 coreconfigitem(
2227 coreconfigitem(
2223 b'ui',
2228 b'ui',
2224 b'quiet',
2229 b'quiet',
2225 default=False,
2230 default=False,
2226 )
2231 )
2227 coreconfigitem(
2232 coreconfigitem(
2228 b'ui',
2233 b'ui',
2229 b'quietbookmarkmove',
2234 b'quietbookmarkmove',
2230 default=False,
2235 default=False,
2231 )
2236 )
2232 coreconfigitem(
2237 coreconfigitem(
2233 b'ui',
2238 b'ui',
2234 b'relative-paths',
2239 b'relative-paths',
2235 default=b'legacy',
2240 default=b'legacy',
2236 )
2241 )
2237 coreconfigitem(
2242 coreconfigitem(
2238 b'ui',
2243 b'ui',
2239 b'remotecmd',
2244 b'remotecmd',
2240 default=b'hg',
2245 default=b'hg',
2241 )
2246 )
2242 coreconfigitem(
2247 coreconfigitem(
2243 b'ui',
2248 b'ui',
2244 b'report_untrusted',
2249 b'report_untrusted',
2245 default=True,
2250 default=True,
2246 )
2251 )
2247 coreconfigitem(
2252 coreconfigitem(
2248 b'ui',
2253 b'ui',
2249 b'rollback',
2254 b'rollback',
2250 default=True,
2255 default=True,
2251 )
2256 )
2252 coreconfigitem(
2257 coreconfigitem(
2253 b'ui',
2258 b'ui',
2254 b'signal-safe-lock',
2259 b'signal-safe-lock',
2255 default=True,
2260 default=True,
2256 )
2261 )
2257 coreconfigitem(
2262 coreconfigitem(
2258 b'ui',
2263 b'ui',
2259 b'slash',
2264 b'slash',
2260 default=False,
2265 default=False,
2261 )
2266 )
2262 coreconfigitem(
2267 coreconfigitem(
2263 b'ui',
2268 b'ui',
2264 b'ssh',
2269 b'ssh',
2265 default=b'ssh',
2270 default=b'ssh',
2266 )
2271 )
2267 coreconfigitem(
2272 coreconfigitem(
2268 b'ui',
2273 b'ui',
2269 b'ssherrorhint',
2274 b'ssherrorhint',
2270 default=None,
2275 default=None,
2271 )
2276 )
2272 coreconfigitem(
2277 coreconfigitem(
2273 b'ui',
2278 b'ui',
2274 b'statuscopies',
2279 b'statuscopies',
2275 default=False,
2280 default=False,
2276 )
2281 )
2277 coreconfigitem(
2282 coreconfigitem(
2278 b'ui',
2283 b'ui',
2279 b'strict',
2284 b'strict',
2280 default=False,
2285 default=False,
2281 )
2286 )
2282 coreconfigitem(
2287 coreconfigitem(
2283 b'ui',
2288 b'ui',
2284 b'style',
2289 b'style',
2285 default=b'',
2290 default=b'',
2286 )
2291 )
2287 coreconfigitem(
2292 coreconfigitem(
2288 b'ui',
2293 b'ui',
2289 b'supportcontact',
2294 b'supportcontact',
2290 default=None,
2295 default=None,
2291 )
2296 )
2292 coreconfigitem(
2297 coreconfigitem(
2293 b'ui',
2298 b'ui',
2294 b'textwidth',
2299 b'textwidth',
2295 default=78,
2300 default=78,
2296 )
2301 )
2297 coreconfigitem(
2302 coreconfigitem(
2298 b'ui',
2303 b'ui',
2299 b'timeout',
2304 b'timeout',
2300 default=b'600',
2305 default=b'600',
2301 )
2306 )
2302 coreconfigitem(
2307 coreconfigitem(
2303 b'ui',
2308 b'ui',
2304 b'timeout.warn',
2309 b'timeout.warn',
2305 default=0,
2310 default=0,
2306 )
2311 )
2307 coreconfigitem(
2312 coreconfigitem(
2308 b'ui',
2313 b'ui',
2309 b'timestamp-output',
2314 b'timestamp-output',
2310 default=False,
2315 default=False,
2311 )
2316 )
2312 coreconfigitem(
2317 coreconfigitem(
2313 b'ui',
2318 b'ui',
2314 b'traceback',
2319 b'traceback',
2315 default=False,
2320 default=False,
2316 )
2321 )
2317 coreconfigitem(
2322 coreconfigitem(
2318 b'ui',
2323 b'ui',
2319 b'tweakdefaults',
2324 b'tweakdefaults',
2320 default=False,
2325 default=False,
2321 )
2326 )
2322 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2327 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2323 coreconfigitem(
2328 coreconfigitem(
2324 b'ui',
2329 b'ui',
2325 b'verbose',
2330 b'verbose',
2326 default=False,
2331 default=False,
2327 )
2332 )
2328 coreconfigitem(
2333 coreconfigitem(
2329 b'verify',
2334 b'verify',
2330 b'skipflags',
2335 b'skipflags',
2331 default=None,
2336 default=None,
2332 )
2337 )
2333 coreconfigitem(
2338 coreconfigitem(
2334 b'web',
2339 b'web',
2335 b'allowbz2',
2340 b'allowbz2',
2336 default=False,
2341 default=False,
2337 )
2342 )
2338 coreconfigitem(
2343 coreconfigitem(
2339 b'web',
2344 b'web',
2340 b'allowgz',
2345 b'allowgz',
2341 default=False,
2346 default=False,
2342 )
2347 )
2343 coreconfigitem(
2348 coreconfigitem(
2344 b'web',
2349 b'web',
2345 b'allow-pull',
2350 b'allow-pull',
2346 alias=[(b'web', b'allowpull')],
2351 alias=[(b'web', b'allowpull')],
2347 default=True,
2352 default=True,
2348 )
2353 )
2349 coreconfigitem(
2354 coreconfigitem(
2350 b'web',
2355 b'web',
2351 b'allow-push',
2356 b'allow-push',
2352 alias=[(b'web', b'allow_push')],
2357 alias=[(b'web', b'allow_push')],
2353 default=list,
2358 default=list,
2354 )
2359 )
2355 coreconfigitem(
2360 coreconfigitem(
2356 b'web',
2361 b'web',
2357 b'allowzip',
2362 b'allowzip',
2358 default=False,
2363 default=False,
2359 )
2364 )
2360 coreconfigitem(
2365 coreconfigitem(
2361 b'web',
2366 b'web',
2362 b'archivesubrepos',
2367 b'archivesubrepos',
2363 default=False,
2368 default=False,
2364 )
2369 )
2365 coreconfigitem(
2370 coreconfigitem(
2366 b'web',
2371 b'web',
2367 b'cache',
2372 b'cache',
2368 default=True,
2373 default=True,
2369 )
2374 )
2370 coreconfigitem(
2375 coreconfigitem(
2371 b'web',
2376 b'web',
2372 b'comparisoncontext',
2377 b'comparisoncontext',
2373 default=5,
2378 default=5,
2374 )
2379 )
2375 coreconfigitem(
2380 coreconfigitem(
2376 b'web',
2381 b'web',
2377 b'contact',
2382 b'contact',
2378 default=None,
2383 default=None,
2379 )
2384 )
2380 coreconfigitem(
2385 coreconfigitem(
2381 b'web',
2386 b'web',
2382 b'deny_push',
2387 b'deny_push',
2383 default=list,
2388 default=list,
2384 )
2389 )
2385 coreconfigitem(
2390 coreconfigitem(
2386 b'web',
2391 b'web',
2387 b'guessmime',
2392 b'guessmime',
2388 default=False,
2393 default=False,
2389 )
2394 )
2390 coreconfigitem(
2395 coreconfigitem(
2391 b'web',
2396 b'web',
2392 b'hidden',
2397 b'hidden',
2393 default=False,
2398 default=False,
2394 )
2399 )
2395 coreconfigitem(
2400 coreconfigitem(
2396 b'web',
2401 b'web',
2397 b'labels',
2402 b'labels',
2398 default=list,
2403 default=list,
2399 )
2404 )
2400 coreconfigitem(
2405 coreconfigitem(
2401 b'web',
2406 b'web',
2402 b'logoimg',
2407 b'logoimg',
2403 default=b'hglogo.png',
2408 default=b'hglogo.png',
2404 )
2409 )
2405 coreconfigitem(
2410 coreconfigitem(
2406 b'web',
2411 b'web',
2407 b'logourl',
2412 b'logourl',
2408 default=b'https://mercurial-scm.org/',
2413 default=b'https://mercurial-scm.org/',
2409 )
2414 )
2410 coreconfigitem(
2415 coreconfigitem(
2411 b'web',
2416 b'web',
2412 b'accesslog',
2417 b'accesslog',
2413 default=b'-',
2418 default=b'-',
2414 )
2419 )
2415 coreconfigitem(
2420 coreconfigitem(
2416 b'web',
2421 b'web',
2417 b'address',
2422 b'address',
2418 default=b'',
2423 default=b'',
2419 )
2424 )
2420 coreconfigitem(
2425 coreconfigitem(
2421 b'web',
2426 b'web',
2422 b'allow-archive',
2427 b'allow-archive',
2423 alias=[(b'web', b'allow_archive')],
2428 alias=[(b'web', b'allow_archive')],
2424 default=list,
2429 default=list,
2425 )
2430 )
2426 coreconfigitem(
2431 coreconfigitem(
2427 b'web',
2432 b'web',
2428 b'allow_read',
2433 b'allow_read',
2429 default=list,
2434 default=list,
2430 )
2435 )
2431 coreconfigitem(
2436 coreconfigitem(
2432 b'web',
2437 b'web',
2433 b'baseurl',
2438 b'baseurl',
2434 default=None,
2439 default=None,
2435 )
2440 )
2436 coreconfigitem(
2441 coreconfigitem(
2437 b'web',
2442 b'web',
2438 b'cacerts',
2443 b'cacerts',
2439 default=None,
2444 default=None,
2440 )
2445 )
2441 coreconfigitem(
2446 coreconfigitem(
2442 b'web',
2447 b'web',
2443 b'certificate',
2448 b'certificate',
2444 default=None,
2449 default=None,
2445 )
2450 )
2446 coreconfigitem(
2451 coreconfigitem(
2447 b'web',
2452 b'web',
2448 b'collapse',
2453 b'collapse',
2449 default=False,
2454 default=False,
2450 )
2455 )
2451 coreconfigitem(
2456 coreconfigitem(
2452 b'web',
2457 b'web',
2453 b'csp',
2458 b'csp',
2454 default=None,
2459 default=None,
2455 )
2460 )
2456 coreconfigitem(
2461 coreconfigitem(
2457 b'web',
2462 b'web',
2458 b'deny_read',
2463 b'deny_read',
2459 default=list,
2464 default=list,
2460 )
2465 )
2461 coreconfigitem(
2466 coreconfigitem(
2462 b'web',
2467 b'web',
2463 b'descend',
2468 b'descend',
2464 default=True,
2469 default=True,
2465 )
2470 )
2466 coreconfigitem(
2471 coreconfigitem(
2467 b'web',
2472 b'web',
2468 b'description',
2473 b'description',
2469 default=b"",
2474 default=b"",
2470 )
2475 )
2471 coreconfigitem(
2476 coreconfigitem(
2472 b'web',
2477 b'web',
2473 b'encoding',
2478 b'encoding',
2474 default=lambda: encoding.encoding,
2479 default=lambda: encoding.encoding,
2475 )
2480 )
2476 coreconfigitem(
2481 coreconfigitem(
2477 b'web',
2482 b'web',
2478 b'errorlog',
2483 b'errorlog',
2479 default=b'-',
2484 default=b'-',
2480 )
2485 )
2481 coreconfigitem(
2486 coreconfigitem(
2482 b'web',
2487 b'web',
2483 b'ipv6',
2488 b'ipv6',
2484 default=False,
2489 default=False,
2485 )
2490 )
2486 coreconfigitem(
2491 coreconfigitem(
2487 b'web',
2492 b'web',
2488 b'maxchanges',
2493 b'maxchanges',
2489 default=10,
2494 default=10,
2490 )
2495 )
2491 coreconfigitem(
2496 coreconfigitem(
2492 b'web',
2497 b'web',
2493 b'maxfiles',
2498 b'maxfiles',
2494 default=10,
2499 default=10,
2495 )
2500 )
2496 coreconfigitem(
2501 coreconfigitem(
2497 b'web',
2502 b'web',
2498 b'maxshortchanges',
2503 b'maxshortchanges',
2499 default=60,
2504 default=60,
2500 )
2505 )
2501 coreconfigitem(
2506 coreconfigitem(
2502 b'web',
2507 b'web',
2503 b'motd',
2508 b'motd',
2504 default=b'',
2509 default=b'',
2505 )
2510 )
2506 coreconfigitem(
2511 coreconfigitem(
2507 b'web',
2512 b'web',
2508 b'name',
2513 b'name',
2509 default=dynamicdefault,
2514 default=dynamicdefault,
2510 )
2515 )
2511 coreconfigitem(
2516 coreconfigitem(
2512 b'web',
2517 b'web',
2513 b'port',
2518 b'port',
2514 default=8000,
2519 default=8000,
2515 )
2520 )
2516 coreconfigitem(
2521 coreconfigitem(
2517 b'web',
2522 b'web',
2518 b'prefix',
2523 b'prefix',
2519 default=b'',
2524 default=b'',
2520 )
2525 )
2521 coreconfigitem(
2526 coreconfigitem(
2522 b'web',
2527 b'web',
2523 b'push_ssl',
2528 b'push_ssl',
2524 default=True,
2529 default=True,
2525 )
2530 )
2526 coreconfigitem(
2531 coreconfigitem(
2527 b'web',
2532 b'web',
2528 b'refreshinterval',
2533 b'refreshinterval',
2529 default=20,
2534 default=20,
2530 )
2535 )
2531 coreconfigitem(
2536 coreconfigitem(
2532 b'web',
2537 b'web',
2533 b'server-header',
2538 b'server-header',
2534 default=None,
2539 default=None,
2535 )
2540 )
2536 coreconfigitem(
2541 coreconfigitem(
2537 b'web',
2542 b'web',
2538 b'static',
2543 b'static',
2539 default=None,
2544 default=None,
2540 )
2545 )
2541 coreconfigitem(
2546 coreconfigitem(
2542 b'web',
2547 b'web',
2543 b'staticurl',
2548 b'staticurl',
2544 default=None,
2549 default=None,
2545 )
2550 )
2546 coreconfigitem(
2551 coreconfigitem(
2547 b'web',
2552 b'web',
2548 b'stripes',
2553 b'stripes',
2549 default=1,
2554 default=1,
2550 )
2555 )
2551 coreconfigitem(
2556 coreconfigitem(
2552 b'web',
2557 b'web',
2553 b'style',
2558 b'style',
2554 default=b'paper',
2559 default=b'paper',
2555 )
2560 )
2556 coreconfigitem(
2561 coreconfigitem(
2557 b'web',
2562 b'web',
2558 b'templates',
2563 b'templates',
2559 default=None,
2564 default=None,
2560 )
2565 )
2561 coreconfigitem(
2566 coreconfigitem(
2562 b'web',
2567 b'web',
2563 b'view',
2568 b'view',
2564 default=b'served',
2569 default=b'served',
2565 experimental=True,
2570 experimental=True,
2566 )
2571 )
2567 coreconfigitem(
2572 coreconfigitem(
2568 b'worker',
2573 b'worker',
2569 b'backgroundclose',
2574 b'backgroundclose',
2570 default=dynamicdefault,
2575 default=dynamicdefault,
2571 )
2576 )
2572 # Windows defaults to a limit of 512 open files. A buffer of 128
2577 # Windows defaults to a limit of 512 open files. A buffer of 128
2573 # should give us enough headway.
2578 # should give us enough headway.
2574 coreconfigitem(
2579 coreconfigitem(
2575 b'worker',
2580 b'worker',
2576 b'backgroundclosemaxqueue',
2581 b'backgroundclosemaxqueue',
2577 default=384,
2582 default=384,
2578 )
2583 )
2579 coreconfigitem(
2584 coreconfigitem(
2580 b'worker',
2585 b'worker',
2581 b'backgroundcloseminfilecount',
2586 b'backgroundcloseminfilecount',
2582 default=2048,
2587 default=2048,
2583 )
2588 )
2584 coreconfigitem(
2589 coreconfigitem(
2585 b'worker',
2590 b'worker',
2586 b'backgroundclosethreadcount',
2591 b'backgroundclosethreadcount',
2587 default=4,
2592 default=4,
2588 )
2593 )
2589 coreconfigitem(
2594 coreconfigitem(
2590 b'worker',
2595 b'worker',
2591 b'enabled',
2596 b'enabled',
2592 default=True,
2597 default=True,
2593 )
2598 )
2594 coreconfigitem(
2599 coreconfigitem(
2595 b'worker',
2600 b'worker',
2596 b'numcpus',
2601 b'numcpus',
2597 default=None,
2602 default=None,
2598 )
2603 )
2599
2604
2600 # Rebase related configuration moved to core because other extension are doing
2605 # Rebase related configuration moved to core because other extension are doing
2601 # strange things. For example, shelve import the extensions to reuse some bit
2606 # strange things. For example, shelve import the extensions to reuse some bit
2602 # without formally loading it.
2607 # without formally loading it.
2603 coreconfigitem(
2608 coreconfigitem(
2604 b'commands',
2609 b'commands',
2605 b'rebase.requiredest',
2610 b'rebase.requiredest',
2606 default=False,
2611 default=False,
2607 )
2612 )
2608 coreconfigitem(
2613 coreconfigitem(
2609 b'experimental',
2614 b'experimental',
2610 b'rebaseskipobsolete',
2615 b'rebaseskipobsolete',
2611 default=True,
2616 default=True,
2612 )
2617 )
2613 coreconfigitem(
2618 coreconfigitem(
2614 b'rebase',
2619 b'rebase',
2615 b'singletransaction',
2620 b'singletransaction',
2616 default=False,
2621 default=False,
2617 )
2622 )
2618 coreconfigitem(
2623 coreconfigitem(
2619 b'rebase',
2624 b'rebase',
2620 b'experimental.inmemory',
2625 b'experimental.inmemory',
2621 default=False,
2626 default=False,
2622 )
2627 )
@@ -1,3692 +1,3698 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revlog,
62 revlog,
63 revset,
63 revset,
64 revsetlang,
64 revsetlang,
65 scmutil,
65 scmutil,
66 sparse,
66 sparse,
67 store as storemod,
67 store as storemod,
68 subrepoutil,
68 subrepoutil,
69 tags as tagsmod,
69 tags as tagsmod,
70 transaction,
70 transaction,
71 txnutil,
71 txnutil,
72 util,
72 util,
73 vfs as vfsmod,
73 vfs as vfsmod,
74 )
74 )
75
75
76 from .interfaces import (
76 from .interfaces import (
77 repository,
77 repository,
78 util as interfaceutil,
78 util as interfaceutil,
79 )
79 )
80
80
81 from .utils import (
81 from .utils import (
82 hashutil,
82 hashutil,
83 procutil,
83 procutil,
84 stringutil,
84 stringutil,
85 )
85 )
86
86
87 from .revlogutils import constants as revlogconst
87 from .revlogutils import (
88 concurrency_checker as revlogchecker,
89 constants as revlogconst,
90 )
88
91
89 release = lockmod.release
92 release = lockmod.release
90 urlerr = util.urlerr
93 urlerr = util.urlerr
91 urlreq = util.urlreq
94 urlreq = util.urlreq
92
95
93 # set of (path, vfs-location) tuples. vfs-location is:
96 # set of (path, vfs-location) tuples. vfs-location is:
94 # - 'plain for vfs relative paths
97 # - 'plain for vfs relative paths
95 # - '' for svfs relative paths
98 # - '' for svfs relative paths
96 _cachedfiles = set()
99 _cachedfiles = set()
97
100
98
101
99 class _basefilecache(scmutil.filecache):
102 class _basefilecache(scmutil.filecache):
100 """All filecache usage on repo are done for logic that should be unfiltered"""
103 """All filecache usage on repo are done for logic that should be unfiltered"""
101
104
102 def __get__(self, repo, type=None):
105 def __get__(self, repo, type=None):
103 if repo is None:
106 if repo is None:
104 return self
107 return self
105 # proxy to unfiltered __dict__ since filtered repo has no entry
108 # proxy to unfiltered __dict__ since filtered repo has no entry
106 unfi = repo.unfiltered()
109 unfi = repo.unfiltered()
107 try:
110 try:
108 return unfi.__dict__[self.sname]
111 return unfi.__dict__[self.sname]
109 except KeyError:
112 except KeyError:
110 pass
113 pass
111 return super(_basefilecache, self).__get__(unfi, type)
114 return super(_basefilecache, self).__get__(unfi, type)
112
115
113 def set(self, repo, value):
116 def set(self, repo, value):
114 return super(_basefilecache, self).set(repo.unfiltered(), value)
117 return super(_basefilecache, self).set(repo.unfiltered(), value)
115
118
116
119
117 class repofilecache(_basefilecache):
120 class repofilecache(_basefilecache):
118 """filecache for files in .hg but outside of .hg/store"""
121 """filecache for files in .hg but outside of .hg/store"""
119
122
120 def __init__(self, *paths):
123 def __init__(self, *paths):
121 super(repofilecache, self).__init__(*paths)
124 super(repofilecache, self).__init__(*paths)
122 for path in paths:
125 for path in paths:
123 _cachedfiles.add((path, b'plain'))
126 _cachedfiles.add((path, b'plain'))
124
127
125 def join(self, obj, fname):
128 def join(self, obj, fname):
126 return obj.vfs.join(fname)
129 return obj.vfs.join(fname)
127
130
128
131
129 class storecache(_basefilecache):
132 class storecache(_basefilecache):
130 """filecache for files in the store"""
133 """filecache for files in the store"""
131
134
132 def __init__(self, *paths):
135 def __init__(self, *paths):
133 super(storecache, self).__init__(*paths)
136 super(storecache, self).__init__(*paths)
134 for path in paths:
137 for path in paths:
135 _cachedfiles.add((path, b''))
138 _cachedfiles.add((path, b''))
136
139
137 def join(self, obj, fname):
140 def join(self, obj, fname):
138 return obj.sjoin(fname)
141 return obj.sjoin(fname)
139
142
140
143
141 class mixedrepostorecache(_basefilecache):
144 class mixedrepostorecache(_basefilecache):
142 """filecache for a mix files in .hg/store and outside"""
145 """filecache for a mix files in .hg/store and outside"""
143
146
144 def __init__(self, *pathsandlocations):
147 def __init__(self, *pathsandlocations):
145 # scmutil.filecache only uses the path for passing back into our
148 # scmutil.filecache only uses the path for passing back into our
146 # join(), so we can safely pass a list of paths and locations
149 # join(), so we can safely pass a list of paths and locations
147 super(mixedrepostorecache, self).__init__(*pathsandlocations)
150 super(mixedrepostorecache, self).__init__(*pathsandlocations)
148 _cachedfiles.update(pathsandlocations)
151 _cachedfiles.update(pathsandlocations)
149
152
150 def join(self, obj, fnameandlocation):
153 def join(self, obj, fnameandlocation):
151 fname, location = fnameandlocation
154 fname, location = fnameandlocation
152 if location == b'plain':
155 if location == b'plain':
153 return obj.vfs.join(fname)
156 return obj.vfs.join(fname)
154 else:
157 else:
155 if location != b'':
158 if location != b'':
156 raise error.ProgrammingError(
159 raise error.ProgrammingError(
157 b'unexpected location: %s' % location
160 b'unexpected location: %s' % location
158 )
161 )
159 return obj.sjoin(fname)
162 return obj.sjoin(fname)
160
163
161
164
162 def isfilecached(repo, name):
165 def isfilecached(repo, name):
163 """check if a repo has already cached "name" filecache-ed property
166 """check if a repo has already cached "name" filecache-ed property
164
167
165 This returns (cachedobj-or-None, iscached) tuple.
168 This returns (cachedobj-or-None, iscached) tuple.
166 """
169 """
167 cacheentry = repo.unfiltered()._filecache.get(name, None)
170 cacheentry = repo.unfiltered()._filecache.get(name, None)
168 if not cacheentry:
171 if not cacheentry:
169 return None, False
172 return None, False
170 return cacheentry.obj, True
173 return cacheentry.obj, True
171
174
172
175
173 class unfilteredpropertycache(util.propertycache):
176 class unfilteredpropertycache(util.propertycache):
174 """propertycache that apply to unfiltered repo only"""
177 """propertycache that apply to unfiltered repo only"""
175
178
176 def __get__(self, repo, type=None):
179 def __get__(self, repo, type=None):
177 unfi = repo.unfiltered()
180 unfi = repo.unfiltered()
178 if unfi is repo:
181 if unfi is repo:
179 return super(unfilteredpropertycache, self).__get__(unfi)
182 return super(unfilteredpropertycache, self).__get__(unfi)
180 return getattr(unfi, self.name)
183 return getattr(unfi, self.name)
181
184
182
185
183 class filteredpropertycache(util.propertycache):
186 class filteredpropertycache(util.propertycache):
184 """propertycache that must take filtering in account"""
187 """propertycache that must take filtering in account"""
185
188
186 def cachevalue(self, obj, value):
189 def cachevalue(self, obj, value):
187 object.__setattr__(obj, self.name, value)
190 object.__setattr__(obj, self.name, value)
188
191
189
192
190 def hasunfilteredcache(repo, name):
193 def hasunfilteredcache(repo, name):
191 """check if a repo has an unfilteredpropertycache value for <name>"""
194 """check if a repo has an unfilteredpropertycache value for <name>"""
192 return name in vars(repo.unfiltered())
195 return name in vars(repo.unfiltered())
193
196
194
197
195 def unfilteredmethod(orig):
198 def unfilteredmethod(orig):
196 """decorate method that always need to be run on unfiltered version"""
199 """decorate method that always need to be run on unfiltered version"""
197
200
198 @functools.wraps(orig)
201 @functools.wraps(orig)
199 def wrapper(repo, *args, **kwargs):
202 def wrapper(repo, *args, **kwargs):
200 return orig(repo.unfiltered(), *args, **kwargs)
203 return orig(repo.unfiltered(), *args, **kwargs)
201
204
202 return wrapper
205 return wrapper
203
206
204
207
205 moderncaps = {
208 moderncaps = {
206 b'lookup',
209 b'lookup',
207 b'branchmap',
210 b'branchmap',
208 b'pushkey',
211 b'pushkey',
209 b'known',
212 b'known',
210 b'getbundle',
213 b'getbundle',
211 b'unbundle',
214 b'unbundle',
212 }
215 }
213 legacycaps = moderncaps.union({b'changegroupsubset'})
216 legacycaps = moderncaps.union({b'changegroupsubset'})
214
217
215
218
216 @interfaceutil.implementer(repository.ipeercommandexecutor)
219 @interfaceutil.implementer(repository.ipeercommandexecutor)
217 class localcommandexecutor(object):
220 class localcommandexecutor(object):
218 def __init__(self, peer):
221 def __init__(self, peer):
219 self._peer = peer
222 self._peer = peer
220 self._sent = False
223 self._sent = False
221 self._closed = False
224 self._closed = False
222
225
223 def __enter__(self):
226 def __enter__(self):
224 return self
227 return self
225
228
226 def __exit__(self, exctype, excvalue, exctb):
229 def __exit__(self, exctype, excvalue, exctb):
227 self.close()
230 self.close()
228
231
229 def callcommand(self, command, args):
232 def callcommand(self, command, args):
230 if self._sent:
233 if self._sent:
231 raise error.ProgrammingError(
234 raise error.ProgrammingError(
232 b'callcommand() cannot be used after sendcommands()'
235 b'callcommand() cannot be used after sendcommands()'
233 )
236 )
234
237
235 if self._closed:
238 if self._closed:
236 raise error.ProgrammingError(
239 raise error.ProgrammingError(
237 b'callcommand() cannot be used after close()'
240 b'callcommand() cannot be used after close()'
238 )
241 )
239
242
240 # We don't need to support anything fancy. Just call the named
243 # We don't need to support anything fancy. Just call the named
241 # method on the peer and return a resolved future.
244 # method on the peer and return a resolved future.
242 fn = getattr(self._peer, pycompat.sysstr(command))
245 fn = getattr(self._peer, pycompat.sysstr(command))
243
246
244 f = pycompat.futures.Future()
247 f = pycompat.futures.Future()
245
248
246 try:
249 try:
247 result = fn(**pycompat.strkwargs(args))
250 result = fn(**pycompat.strkwargs(args))
248 except Exception:
251 except Exception:
249 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
252 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
250 else:
253 else:
251 f.set_result(result)
254 f.set_result(result)
252
255
253 return f
256 return f
254
257
255 def sendcommands(self):
258 def sendcommands(self):
256 self._sent = True
259 self._sent = True
257
260
258 def close(self):
261 def close(self):
259 self._closed = True
262 self._closed = True
260
263
261
264
262 @interfaceutil.implementer(repository.ipeercommands)
265 @interfaceutil.implementer(repository.ipeercommands)
263 class localpeer(repository.peer):
266 class localpeer(repository.peer):
264 '''peer for a local repo; reflects only the most recent API'''
267 '''peer for a local repo; reflects only the most recent API'''
265
268
266 def __init__(self, repo, caps=None):
269 def __init__(self, repo, caps=None):
267 super(localpeer, self).__init__()
270 super(localpeer, self).__init__()
268
271
269 if caps is None:
272 if caps is None:
270 caps = moderncaps.copy()
273 caps = moderncaps.copy()
271 self._repo = repo.filtered(b'served')
274 self._repo = repo.filtered(b'served')
272 self.ui = repo.ui
275 self.ui = repo.ui
273 self._caps = repo._restrictcapabilities(caps)
276 self._caps = repo._restrictcapabilities(caps)
274
277
275 # Begin of _basepeer interface.
278 # Begin of _basepeer interface.
276
279
277 def url(self):
280 def url(self):
278 return self._repo.url()
281 return self._repo.url()
279
282
280 def local(self):
283 def local(self):
281 return self._repo
284 return self._repo
282
285
283 def peer(self):
286 def peer(self):
284 return self
287 return self
285
288
286 def canpush(self):
289 def canpush(self):
287 return True
290 return True
288
291
289 def close(self):
292 def close(self):
290 self._repo.close()
293 self._repo.close()
291
294
292 # End of _basepeer interface.
295 # End of _basepeer interface.
293
296
294 # Begin of _basewirecommands interface.
297 # Begin of _basewirecommands interface.
295
298
296 def branchmap(self):
299 def branchmap(self):
297 return self._repo.branchmap()
300 return self._repo.branchmap()
298
301
299 def capabilities(self):
302 def capabilities(self):
300 return self._caps
303 return self._caps
301
304
302 def clonebundles(self):
305 def clonebundles(self):
303 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
306 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
304
307
305 def debugwireargs(self, one, two, three=None, four=None, five=None):
308 def debugwireargs(self, one, two, three=None, four=None, five=None):
306 """Used to test argument passing over the wire"""
309 """Used to test argument passing over the wire"""
307 return b"%s %s %s %s %s" % (
310 return b"%s %s %s %s %s" % (
308 one,
311 one,
309 two,
312 two,
310 pycompat.bytestr(three),
313 pycompat.bytestr(three),
311 pycompat.bytestr(four),
314 pycompat.bytestr(four),
312 pycompat.bytestr(five),
315 pycompat.bytestr(five),
313 )
316 )
314
317
315 def getbundle(
318 def getbundle(
316 self, source, heads=None, common=None, bundlecaps=None, **kwargs
319 self, source, heads=None, common=None, bundlecaps=None, **kwargs
317 ):
320 ):
318 chunks = exchange.getbundlechunks(
321 chunks = exchange.getbundlechunks(
319 self._repo,
322 self._repo,
320 source,
323 source,
321 heads=heads,
324 heads=heads,
322 common=common,
325 common=common,
323 bundlecaps=bundlecaps,
326 bundlecaps=bundlecaps,
324 **kwargs
327 **kwargs
325 )[1]
328 )[1]
326 cb = util.chunkbuffer(chunks)
329 cb = util.chunkbuffer(chunks)
327
330
328 if exchange.bundle2requested(bundlecaps):
331 if exchange.bundle2requested(bundlecaps):
329 # When requesting a bundle2, getbundle returns a stream to make the
332 # When requesting a bundle2, getbundle returns a stream to make the
330 # wire level function happier. We need to build a proper object
333 # wire level function happier. We need to build a proper object
331 # from it in local peer.
334 # from it in local peer.
332 return bundle2.getunbundler(self.ui, cb)
335 return bundle2.getunbundler(self.ui, cb)
333 else:
336 else:
334 return changegroup.getunbundler(b'01', cb, None)
337 return changegroup.getunbundler(b'01', cb, None)
335
338
336 def heads(self):
339 def heads(self):
337 return self._repo.heads()
340 return self._repo.heads()
338
341
339 def known(self, nodes):
342 def known(self, nodes):
340 return self._repo.known(nodes)
343 return self._repo.known(nodes)
341
344
342 def listkeys(self, namespace):
345 def listkeys(self, namespace):
343 return self._repo.listkeys(namespace)
346 return self._repo.listkeys(namespace)
344
347
345 def lookup(self, key):
348 def lookup(self, key):
346 return self._repo.lookup(key)
349 return self._repo.lookup(key)
347
350
348 def pushkey(self, namespace, key, old, new):
351 def pushkey(self, namespace, key, old, new):
349 return self._repo.pushkey(namespace, key, old, new)
352 return self._repo.pushkey(namespace, key, old, new)
350
353
351 def stream_out(self):
354 def stream_out(self):
352 raise error.Abort(_(b'cannot perform stream clone against local peer'))
355 raise error.Abort(_(b'cannot perform stream clone against local peer'))
353
356
354 def unbundle(self, bundle, heads, url):
357 def unbundle(self, bundle, heads, url):
355 """apply a bundle on a repo
358 """apply a bundle on a repo
356
359
357 This function handles the repo locking itself."""
360 This function handles the repo locking itself."""
358 try:
361 try:
359 try:
362 try:
360 bundle = exchange.readbundle(self.ui, bundle, None)
363 bundle = exchange.readbundle(self.ui, bundle, None)
361 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
364 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
362 if util.safehasattr(ret, b'getchunks'):
365 if util.safehasattr(ret, b'getchunks'):
363 # This is a bundle20 object, turn it into an unbundler.
366 # This is a bundle20 object, turn it into an unbundler.
364 # This little dance should be dropped eventually when the
367 # This little dance should be dropped eventually when the
365 # API is finally improved.
368 # API is finally improved.
366 stream = util.chunkbuffer(ret.getchunks())
369 stream = util.chunkbuffer(ret.getchunks())
367 ret = bundle2.getunbundler(self.ui, stream)
370 ret = bundle2.getunbundler(self.ui, stream)
368 return ret
371 return ret
369 except Exception as exc:
372 except Exception as exc:
370 # If the exception contains output salvaged from a bundle2
373 # If the exception contains output salvaged from a bundle2
371 # reply, we need to make sure it is printed before continuing
374 # reply, we need to make sure it is printed before continuing
372 # to fail. So we build a bundle2 with such output and consume
375 # to fail. So we build a bundle2 with such output and consume
373 # it directly.
376 # it directly.
374 #
377 #
375 # This is not very elegant but allows a "simple" solution for
378 # This is not very elegant but allows a "simple" solution for
376 # issue4594
379 # issue4594
377 output = getattr(exc, '_bundle2salvagedoutput', ())
380 output = getattr(exc, '_bundle2salvagedoutput', ())
378 if output:
381 if output:
379 bundler = bundle2.bundle20(self._repo.ui)
382 bundler = bundle2.bundle20(self._repo.ui)
380 for out in output:
383 for out in output:
381 bundler.addpart(out)
384 bundler.addpart(out)
382 stream = util.chunkbuffer(bundler.getchunks())
385 stream = util.chunkbuffer(bundler.getchunks())
383 b = bundle2.getunbundler(self.ui, stream)
386 b = bundle2.getunbundler(self.ui, stream)
384 bundle2.processbundle(self._repo, b)
387 bundle2.processbundle(self._repo, b)
385 raise
388 raise
386 except error.PushRaced as exc:
389 except error.PushRaced as exc:
387 raise error.ResponseError(
390 raise error.ResponseError(
388 _(b'push failed:'), stringutil.forcebytestr(exc)
391 _(b'push failed:'), stringutil.forcebytestr(exc)
389 )
392 )
390
393
391 # End of _basewirecommands interface.
394 # End of _basewirecommands interface.
392
395
393 # Begin of peer interface.
396 # Begin of peer interface.
394
397
395 def commandexecutor(self):
398 def commandexecutor(self):
396 return localcommandexecutor(self)
399 return localcommandexecutor(self)
397
400
398 # End of peer interface.
401 # End of peer interface.
399
402
400
403
401 @interfaceutil.implementer(repository.ipeerlegacycommands)
404 @interfaceutil.implementer(repository.ipeerlegacycommands)
402 class locallegacypeer(localpeer):
405 class locallegacypeer(localpeer):
403 """peer extension which implements legacy methods too; used for tests with
406 """peer extension which implements legacy methods too; used for tests with
404 restricted capabilities"""
407 restricted capabilities"""
405
408
406 def __init__(self, repo):
409 def __init__(self, repo):
407 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
410 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
408
411
409 # Begin of baselegacywirecommands interface.
412 # Begin of baselegacywirecommands interface.
410
413
411 def between(self, pairs):
414 def between(self, pairs):
412 return self._repo.between(pairs)
415 return self._repo.between(pairs)
413
416
414 def branches(self, nodes):
417 def branches(self, nodes):
415 return self._repo.branches(nodes)
418 return self._repo.branches(nodes)
416
419
417 def changegroup(self, nodes, source):
420 def changegroup(self, nodes, source):
418 outgoing = discovery.outgoing(
421 outgoing = discovery.outgoing(
419 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
422 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
420 )
423 )
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422
425
423 def changegroupsubset(self, bases, heads, source):
426 def changegroupsubset(self, bases, heads, source):
424 outgoing = discovery.outgoing(
427 outgoing = discovery.outgoing(
425 self._repo, missingroots=bases, ancestorsof=heads
428 self._repo, missingroots=bases, ancestorsof=heads
426 )
429 )
427 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
430 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
428
431
429 # End of baselegacywirecommands interface.
432 # End of baselegacywirecommands interface.
430
433
431
434
432 # Functions receiving (ui, features) that extensions can register to impact
435 # Functions receiving (ui, features) that extensions can register to impact
433 # the ability to load repositories with custom requirements. Only
436 # the ability to load repositories with custom requirements. Only
434 # functions defined in loaded extensions are called.
437 # functions defined in loaded extensions are called.
435 #
438 #
436 # The function receives a set of requirement strings that the repository
439 # The function receives a set of requirement strings that the repository
437 # is capable of opening. Functions will typically add elements to the
440 # is capable of opening. Functions will typically add elements to the
438 # set to reflect that the extension knows how to handle that requirements.
441 # set to reflect that the extension knows how to handle that requirements.
439 featuresetupfuncs = set()
442 featuresetupfuncs = set()
440
443
441
444
442 def _getsharedvfs(hgvfs, requirements):
445 def _getsharedvfs(hgvfs, requirements):
443 """returns the vfs object pointing to root of shared source
446 """returns the vfs object pointing to root of shared source
444 repo for a shared repository
447 repo for a shared repository
445
448
446 hgvfs is vfs pointing at .hg/ of current repo (shared one)
449 hgvfs is vfs pointing at .hg/ of current repo (shared one)
447 requirements is a set of requirements of current repo (shared one)
450 requirements is a set of requirements of current repo (shared one)
448 """
451 """
449 # The ``shared`` or ``relshared`` requirements indicate the
452 # The ``shared`` or ``relshared`` requirements indicate the
450 # store lives in the path contained in the ``.hg/sharedpath`` file.
453 # store lives in the path contained in the ``.hg/sharedpath`` file.
451 # This is an absolute path for ``shared`` and relative to
454 # This is an absolute path for ``shared`` and relative to
452 # ``.hg/`` for ``relshared``.
455 # ``.hg/`` for ``relshared``.
453 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
456 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
454 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
457 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
455 sharedpath = hgvfs.join(sharedpath)
458 sharedpath = hgvfs.join(sharedpath)
456
459
457 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
460 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
458
461
459 if not sharedvfs.exists():
462 if not sharedvfs.exists():
460 raise error.RepoError(
463 raise error.RepoError(
461 _(b'.hg/sharedpath points to nonexistent directory %s')
464 _(b'.hg/sharedpath points to nonexistent directory %s')
462 % sharedvfs.base
465 % sharedvfs.base
463 )
466 )
464 return sharedvfs
467 return sharedvfs
465
468
466
469
467 def _readrequires(vfs, allowmissing):
470 def _readrequires(vfs, allowmissing):
468 """reads the require file present at root of this vfs
471 """reads the require file present at root of this vfs
469 and return a set of requirements
472 and return a set of requirements
470
473
471 If allowmissing is True, we suppress ENOENT if raised"""
474 If allowmissing is True, we suppress ENOENT if raised"""
472 # requires file contains a newline-delimited list of
475 # requires file contains a newline-delimited list of
473 # features/capabilities the opener (us) must have in order to use
476 # features/capabilities the opener (us) must have in order to use
474 # the repository. This file was introduced in Mercurial 0.9.2,
477 # the repository. This file was introduced in Mercurial 0.9.2,
475 # which means very old repositories may not have one. We assume
478 # which means very old repositories may not have one. We assume
476 # a missing file translates to no requirements.
479 # a missing file translates to no requirements.
477 try:
480 try:
478 requirements = set(vfs.read(b'requires').splitlines())
481 requirements = set(vfs.read(b'requires').splitlines())
479 except IOError as e:
482 except IOError as e:
480 if not (allowmissing and e.errno == errno.ENOENT):
483 if not (allowmissing and e.errno == errno.ENOENT):
481 raise
484 raise
482 requirements = set()
485 requirements = set()
483 return requirements
486 return requirements
484
487
485
488
486 def makelocalrepository(baseui, path, intents=None):
489 def makelocalrepository(baseui, path, intents=None):
487 """Create a local repository object.
490 """Create a local repository object.
488
491
489 Given arguments needed to construct a local repository, this function
492 Given arguments needed to construct a local repository, this function
490 performs various early repository loading functionality (such as
493 performs various early repository loading functionality (such as
491 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
494 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
492 the repository can be opened, derives a type suitable for representing
495 the repository can be opened, derives a type suitable for representing
493 that repository, and returns an instance of it.
496 that repository, and returns an instance of it.
494
497
495 The returned object conforms to the ``repository.completelocalrepository``
498 The returned object conforms to the ``repository.completelocalrepository``
496 interface.
499 interface.
497
500
498 The repository type is derived by calling a series of factory functions
501 The repository type is derived by calling a series of factory functions
499 for each aspect/interface of the final repository. These are defined by
502 for each aspect/interface of the final repository. These are defined by
500 ``REPO_INTERFACES``.
503 ``REPO_INTERFACES``.
501
504
502 Each factory function is called to produce a type implementing a specific
505 Each factory function is called to produce a type implementing a specific
503 interface. The cumulative list of returned types will be combined into a
506 interface. The cumulative list of returned types will be combined into a
504 new type and that type will be instantiated to represent the local
507 new type and that type will be instantiated to represent the local
505 repository.
508 repository.
506
509
507 The factory functions each receive various state that may be consulted
510 The factory functions each receive various state that may be consulted
508 as part of deriving a type.
511 as part of deriving a type.
509
512
510 Extensions should wrap these factory functions to customize repository type
513 Extensions should wrap these factory functions to customize repository type
511 creation. Note that an extension's wrapped function may be called even if
514 creation. Note that an extension's wrapped function may be called even if
512 that extension is not loaded for the repo being constructed. Extensions
515 that extension is not loaded for the repo being constructed. Extensions
513 should check if their ``__name__`` appears in the
516 should check if their ``__name__`` appears in the
514 ``extensionmodulenames`` set passed to the factory function and no-op if
517 ``extensionmodulenames`` set passed to the factory function and no-op if
515 not.
518 not.
516 """
519 """
517 ui = baseui.copy()
520 ui = baseui.copy()
518 # Prevent copying repo configuration.
521 # Prevent copying repo configuration.
519 ui.copy = baseui.copy
522 ui.copy = baseui.copy
520
523
521 # Working directory VFS rooted at repository root.
524 # Working directory VFS rooted at repository root.
522 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
525 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
523
526
524 # Main VFS for .hg/ directory.
527 # Main VFS for .hg/ directory.
525 hgpath = wdirvfs.join(b'.hg')
528 hgpath = wdirvfs.join(b'.hg')
526 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
529 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
527 # Whether this repository is shared one or not
530 # Whether this repository is shared one or not
528 shared = False
531 shared = False
529 # If this repository is shared, vfs pointing to shared repo
532 # If this repository is shared, vfs pointing to shared repo
530 sharedvfs = None
533 sharedvfs = None
531
534
532 # The .hg/ path should exist and should be a directory. All other
535 # The .hg/ path should exist and should be a directory. All other
533 # cases are errors.
536 # cases are errors.
534 if not hgvfs.isdir():
537 if not hgvfs.isdir():
535 try:
538 try:
536 hgvfs.stat()
539 hgvfs.stat()
537 except OSError as e:
540 except OSError as e:
538 if e.errno != errno.ENOENT:
541 if e.errno != errno.ENOENT:
539 raise
542 raise
540 except ValueError as e:
543 except ValueError as e:
541 # Can be raised on Python 3.8 when path is invalid.
544 # Can be raised on Python 3.8 when path is invalid.
542 raise error.Abort(
545 raise error.Abort(
543 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
546 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
544 )
547 )
545
548
546 raise error.RepoError(_(b'repository %s not found') % path)
549 raise error.RepoError(_(b'repository %s not found') % path)
547
550
548 requirements = _readrequires(hgvfs, True)
551 requirements = _readrequires(hgvfs, True)
549 shared = (
552 shared = (
550 requirementsmod.SHARED_REQUIREMENT in requirements
553 requirementsmod.SHARED_REQUIREMENT in requirements
551 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
554 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
552 )
555 )
553 storevfs = None
556 storevfs = None
554 if shared:
557 if shared:
555 # This is a shared repo
558 # This is a shared repo
556 sharedvfs = _getsharedvfs(hgvfs, requirements)
559 sharedvfs = _getsharedvfs(hgvfs, requirements)
557 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
560 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
558 else:
561 else:
559 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
562 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
560
563
561 # if .hg/requires contains the sharesafe requirement, it means
564 # if .hg/requires contains the sharesafe requirement, it means
562 # there exists a `.hg/store/requires` too and we should read it
565 # there exists a `.hg/store/requires` too and we should read it
563 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
566 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
564 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
567 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
565 # is not present, refer checkrequirementscompat() for that
568 # is not present, refer checkrequirementscompat() for that
566 #
569 #
567 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
570 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
568 # repository was shared the old way. We check the share source .hg/requires
571 # repository was shared the old way. We check the share source .hg/requires
569 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
572 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
570 # to be reshared
573 # to be reshared
571 hint = _("see `hg help config.format.use-share-safe` for more information")
574 hint = _("see `hg help config.format.use-share-safe` for more information")
572 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
575 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
573
576
574 if (
577 if (
575 shared
578 shared
576 and requirementsmod.SHARESAFE_REQUIREMENT
579 and requirementsmod.SHARESAFE_REQUIREMENT
577 not in _readrequires(sharedvfs, True)
580 not in _readrequires(sharedvfs, True)
578 ):
581 ):
579 mismatch_warn = ui.configbool(
582 mismatch_warn = ui.configbool(
580 b'share', b'safe-mismatch.source-not-safe.warn'
583 b'share', b'safe-mismatch.source-not-safe.warn'
581 )
584 )
582 mismatch_config = ui.config(
585 mismatch_config = ui.config(
583 b'share', b'safe-mismatch.source-not-safe'
586 b'share', b'safe-mismatch.source-not-safe'
584 )
587 )
585 if mismatch_config in (
588 if mismatch_config in (
586 b'downgrade-allow',
589 b'downgrade-allow',
587 b'allow',
590 b'allow',
588 b'downgrade-abort',
591 b'downgrade-abort',
589 ):
592 ):
590 # prevent cyclic import localrepo -> upgrade -> localrepo
593 # prevent cyclic import localrepo -> upgrade -> localrepo
591 from . import upgrade
594 from . import upgrade
592
595
593 upgrade.downgrade_share_to_non_safe(
596 upgrade.downgrade_share_to_non_safe(
594 ui,
597 ui,
595 hgvfs,
598 hgvfs,
596 sharedvfs,
599 sharedvfs,
597 requirements,
600 requirements,
598 mismatch_config,
601 mismatch_config,
599 mismatch_warn,
602 mismatch_warn,
600 )
603 )
601 elif mismatch_config == b'abort':
604 elif mismatch_config == b'abort':
602 raise error.Abort(
605 raise error.Abort(
603 _(b"share source does not support share-safe requirement"),
606 _(b"share source does not support share-safe requirement"),
604 hint=hint,
607 hint=hint,
605 )
608 )
606 else:
609 else:
607 raise error.Abort(
610 raise error.Abort(
608 _(
611 _(
609 b"share-safe mismatch with source.\nUnrecognized"
612 b"share-safe mismatch with source.\nUnrecognized"
610 b" value '%s' of `share.safe-mismatch.source-not-safe`"
613 b" value '%s' of `share.safe-mismatch.source-not-safe`"
611 b" set."
614 b" set."
612 )
615 )
613 % mismatch_config,
616 % mismatch_config,
614 hint=hint,
617 hint=hint,
615 )
618 )
616 else:
619 else:
617 requirements |= _readrequires(storevfs, False)
620 requirements |= _readrequires(storevfs, False)
618 elif shared:
621 elif shared:
619 sourcerequires = _readrequires(sharedvfs, False)
622 sourcerequires = _readrequires(sharedvfs, False)
620 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
623 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
621 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
624 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
622 mismatch_warn = ui.configbool(
625 mismatch_warn = ui.configbool(
623 b'share', b'safe-mismatch.source-safe.warn'
626 b'share', b'safe-mismatch.source-safe.warn'
624 )
627 )
625 if mismatch_config in (
628 if mismatch_config in (
626 b'upgrade-allow',
629 b'upgrade-allow',
627 b'allow',
630 b'allow',
628 b'upgrade-abort',
631 b'upgrade-abort',
629 ):
632 ):
630 # prevent cyclic import localrepo -> upgrade -> localrepo
633 # prevent cyclic import localrepo -> upgrade -> localrepo
631 from . import upgrade
634 from . import upgrade
632
635
633 upgrade.upgrade_share_to_safe(
636 upgrade.upgrade_share_to_safe(
634 ui,
637 ui,
635 hgvfs,
638 hgvfs,
636 storevfs,
639 storevfs,
637 requirements,
640 requirements,
638 mismatch_config,
641 mismatch_config,
639 mismatch_warn,
642 mismatch_warn,
640 )
643 )
641 elif mismatch_config == b'abort':
644 elif mismatch_config == b'abort':
642 raise error.Abort(
645 raise error.Abort(
643 _(
646 _(
644 b'version mismatch: source uses share-safe'
647 b'version mismatch: source uses share-safe'
645 b' functionality while the current share does not'
648 b' functionality while the current share does not'
646 ),
649 ),
647 hint=hint,
650 hint=hint,
648 )
651 )
649 else:
652 else:
650 raise error.Abort(
653 raise error.Abort(
651 _(
654 _(
652 b"share-safe mismatch with source.\nUnrecognized"
655 b"share-safe mismatch with source.\nUnrecognized"
653 b" value '%s' of `share.safe-mismatch.source-safe` set."
656 b" value '%s' of `share.safe-mismatch.source-safe` set."
654 )
657 )
655 % mismatch_config,
658 % mismatch_config,
656 hint=hint,
659 hint=hint,
657 )
660 )
658
661
659 # The .hg/hgrc file may load extensions or contain config options
662 # The .hg/hgrc file may load extensions or contain config options
660 # that influence repository construction. Attempt to load it and
663 # that influence repository construction. Attempt to load it and
661 # process any new extensions that it may have pulled in.
664 # process any new extensions that it may have pulled in.
662 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
665 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
663 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
666 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
664 extensions.loadall(ui)
667 extensions.loadall(ui)
665 extensions.populateui(ui)
668 extensions.populateui(ui)
666
669
667 # Set of module names of extensions loaded for this repository.
670 # Set of module names of extensions loaded for this repository.
668 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
671 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
669
672
670 supportedrequirements = gathersupportedrequirements(ui)
673 supportedrequirements = gathersupportedrequirements(ui)
671
674
672 # We first validate the requirements are known.
675 # We first validate the requirements are known.
673 ensurerequirementsrecognized(requirements, supportedrequirements)
676 ensurerequirementsrecognized(requirements, supportedrequirements)
674
677
675 # Then we validate that the known set is reasonable to use together.
678 # Then we validate that the known set is reasonable to use together.
676 ensurerequirementscompatible(ui, requirements)
679 ensurerequirementscompatible(ui, requirements)
677
680
678 # TODO there are unhandled edge cases related to opening repositories with
681 # TODO there are unhandled edge cases related to opening repositories with
679 # shared storage. If storage is shared, we should also test for requirements
682 # shared storage. If storage is shared, we should also test for requirements
680 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
683 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
681 # that repo, as that repo may load extensions needed to open it. This is a
684 # that repo, as that repo may load extensions needed to open it. This is a
682 # bit complicated because we don't want the other hgrc to overwrite settings
685 # bit complicated because we don't want the other hgrc to overwrite settings
683 # in this hgrc.
686 # in this hgrc.
684 #
687 #
685 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
688 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
686 # file when sharing repos. But if a requirement is added after the share is
689 # file when sharing repos. But if a requirement is added after the share is
687 # performed, thereby introducing a new requirement for the opener, we may
690 # performed, thereby introducing a new requirement for the opener, we may
688 # will not see that and could encounter a run-time error interacting with
691 # will not see that and could encounter a run-time error interacting with
689 # that shared store since it has an unknown-to-us requirement.
692 # that shared store since it has an unknown-to-us requirement.
690
693
691 # At this point, we know we should be capable of opening the repository.
694 # At this point, we know we should be capable of opening the repository.
692 # Now get on with doing that.
695 # Now get on with doing that.
693
696
694 features = set()
697 features = set()
695
698
696 # The "store" part of the repository holds versioned data. How it is
699 # The "store" part of the repository holds versioned data. How it is
697 # accessed is determined by various requirements. If `shared` or
700 # accessed is determined by various requirements. If `shared` or
698 # `relshared` requirements are present, this indicates current repository
701 # `relshared` requirements are present, this indicates current repository
699 # is a share and store exists in path mentioned in `.hg/sharedpath`
702 # is a share and store exists in path mentioned in `.hg/sharedpath`
700 if shared:
703 if shared:
701 storebasepath = sharedvfs.base
704 storebasepath = sharedvfs.base
702 cachepath = sharedvfs.join(b'cache')
705 cachepath = sharedvfs.join(b'cache')
703 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
706 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
704 else:
707 else:
705 storebasepath = hgvfs.base
708 storebasepath = hgvfs.base
706 cachepath = hgvfs.join(b'cache')
709 cachepath = hgvfs.join(b'cache')
707 wcachepath = hgvfs.join(b'wcache')
710 wcachepath = hgvfs.join(b'wcache')
708
711
709 # The store has changed over time and the exact layout is dictated by
712 # The store has changed over time and the exact layout is dictated by
710 # requirements. The store interface abstracts differences across all
713 # requirements. The store interface abstracts differences across all
711 # of them.
714 # of them.
712 store = makestore(
715 store = makestore(
713 requirements,
716 requirements,
714 storebasepath,
717 storebasepath,
715 lambda base: vfsmod.vfs(base, cacheaudited=True),
718 lambda base: vfsmod.vfs(base, cacheaudited=True),
716 )
719 )
717 hgvfs.createmode = store.createmode
720 hgvfs.createmode = store.createmode
718
721
719 storevfs = store.vfs
722 storevfs = store.vfs
720 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
723 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
721
724
722 # The cache vfs is used to manage cache files.
725 # The cache vfs is used to manage cache files.
723 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
726 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
724 cachevfs.createmode = store.createmode
727 cachevfs.createmode = store.createmode
725 # The cache vfs is used to manage cache files related to the working copy
728 # The cache vfs is used to manage cache files related to the working copy
726 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
729 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
727 wcachevfs.createmode = store.createmode
730 wcachevfs.createmode = store.createmode
728
731
729 # Now resolve the type for the repository object. We do this by repeatedly
732 # Now resolve the type for the repository object. We do this by repeatedly
730 # calling a factory function to produces types for specific aspects of the
733 # calling a factory function to produces types for specific aspects of the
731 # repo's operation. The aggregate returned types are used as base classes
734 # repo's operation. The aggregate returned types are used as base classes
732 # for a dynamically-derived type, which will represent our new repository.
735 # for a dynamically-derived type, which will represent our new repository.
733
736
734 bases = []
737 bases = []
735 extrastate = {}
738 extrastate = {}
736
739
737 for iface, fn in REPO_INTERFACES:
740 for iface, fn in REPO_INTERFACES:
738 # We pass all potentially useful state to give extensions tons of
741 # We pass all potentially useful state to give extensions tons of
739 # flexibility.
742 # flexibility.
740 typ = fn()(
743 typ = fn()(
741 ui=ui,
744 ui=ui,
742 intents=intents,
745 intents=intents,
743 requirements=requirements,
746 requirements=requirements,
744 features=features,
747 features=features,
745 wdirvfs=wdirvfs,
748 wdirvfs=wdirvfs,
746 hgvfs=hgvfs,
749 hgvfs=hgvfs,
747 store=store,
750 store=store,
748 storevfs=storevfs,
751 storevfs=storevfs,
749 storeoptions=storevfs.options,
752 storeoptions=storevfs.options,
750 cachevfs=cachevfs,
753 cachevfs=cachevfs,
751 wcachevfs=wcachevfs,
754 wcachevfs=wcachevfs,
752 extensionmodulenames=extensionmodulenames,
755 extensionmodulenames=extensionmodulenames,
753 extrastate=extrastate,
756 extrastate=extrastate,
754 baseclasses=bases,
757 baseclasses=bases,
755 )
758 )
756
759
757 if not isinstance(typ, type):
760 if not isinstance(typ, type):
758 raise error.ProgrammingError(
761 raise error.ProgrammingError(
759 b'unable to construct type for %s' % iface
762 b'unable to construct type for %s' % iface
760 )
763 )
761
764
762 bases.append(typ)
765 bases.append(typ)
763
766
764 # type() allows you to use characters in type names that wouldn't be
767 # type() allows you to use characters in type names that wouldn't be
765 # recognized as Python symbols in source code. We abuse that to add
768 # recognized as Python symbols in source code. We abuse that to add
766 # rich information about our constructed repo.
769 # rich information about our constructed repo.
767 name = pycompat.sysstr(
770 name = pycompat.sysstr(
768 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
771 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
769 )
772 )
770
773
771 cls = type(name, tuple(bases), {})
774 cls = type(name, tuple(bases), {})
772
775
773 return cls(
776 return cls(
774 baseui=baseui,
777 baseui=baseui,
775 ui=ui,
778 ui=ui,
776 origroot=path,
779 origroot=path,
777 wdirvfs=wdirvfs,
780 wdirvfs=wdirvfs,
778 hgvfs=hgvfs,
781 hgvfs=hgvfs,
779 requirements=requirements,
782 requirements=requirements,
780 supportedrequirements=supportedrequirements,
783 supportedrequirements=supportedrequirements,
781 sharedpath=storebasepath,
784 sharedpath=storebasepath,
782 store=store,
785 store=store,
783 cachevfs=cachevfs,
786 cachevfs=cachevfs,
784 wcachevfs=wcachevfs,
787 wcachevfs=wcachevfs,
785 features=features,
788 features=features,
786 intents=intents,
789 intents=intents,
787 )
790 )
788
791
789
792
790 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
793 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
791 """Load hgrc files/content into a ui instance.
794 """Load hgrc files/content into a ui instance.
792
795
793 This is called during repository opening to load any additional
796 This is called during repository opening to load any additional
794 config files or settings relevant to the current repository.
797 config files or settings relevant to the current repository.
795
798
796 Returns a bool indicating whether any additional configs were loaded.
799 Returns a bool indicating whether any additional configs were loaded.
797
800
798 Extensions should monkeypatch this function to modify how per-repo
801 Extensions should monkeypatch this function to modify how per-repo
799 configs are loaded. For example, an extension may wish to pull in
802 configs are loaded. For example, an extension may wish to pull in
800 configs from alternate files or sources.
803 configs from alternate files or sources.
801
804
802 sharedvfs is vfs object pointing to source repo if the current one is a
805 sharedvfs is vfs object pointing to source repo if the current one is a
803 shared one
806 shared one
804 """
807 """
805 if not rcutil.use_repo_hgrc():
808 if not rcutil.use_repo_hgrc():
806 return False
809 return False
807
810
808 ret = False
811 ret = False
809 # first load config from shared source if we has to
812 # first load config from shared source if we has to
810 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
813 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
811 try:
814 try:
812 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
815 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
813 ret = True
816 ret = True
814 except IOError:
817 except IOError:
815 pass
818 pass
816
819
817 try:
820 try:
818 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
821 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
819 ret = True
822 ret = True
820 except IOError:
823 except IOError:
821 pass
824 pass
822
825
823 try:
826 try:
824 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
827 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
825 ret = True
828 ret = True
826 except IOError:
829 except IOError:
827 pass
830 pass
828
831
829 return ret
832 return ret
830
833
831
834
832 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
835 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
833 """Perform additional actions after .hg/hgrc is loaded.
836 """Perform additional actions after .hg/hgrc is loaded.
834
837
835 This function is called during repository loading immediately after
838 This function is called during repository loading immediately after
836 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
839 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
837
840
838 The function can be used to validate configs, automatically add
841 The function can be used to validate configs, automatically add
839 options (including extensions) based on requirements, etc.
842 options (including extensions) based on requirements, etc.
840 """
843 """
841
844
842 # Map of requirements to list of extensions to load automatically when
845 # Map of requirements to list of extensions to load automatically when
843 # requirement is present.
846 # requirement is present.
844 autoextensions = {
847 autoextensions = {
845 b'git': [b'git'],
848 b'git': [b'git'],
846 b'largefiles': [b'largefiles'],
849 b'largefiles': [b'largefiles'],
847 b'lfs': [b'lfs'],
850 b'lfs': [b'lfs'],
848 }
851 }
849
852
850 for requirement, names in sorted(autoextensions.items()):
853 for requirement, names in sorted(autoextensions.items()):
851 if requirement not in requirements:
854 if requirement not in requirements:
852 continue
855 continue
853
856
854 for name in names:
857 for name in names:
855 if not ui.hasconfig(b'extensions', name):
858 if not ui.hasconfig(b'extensions', name):
856 ui.setconfig(b'extensions', name, b'', source=b'autoload')
859 ui.setconfig(b'extensions', name, b'', source=b'autoload')
857
860
858
861
859 def gathersupportedrequirements(ui):
862 def gathersupportedrequirements(ui):
860 """Determine the complete set of recognized requirements."""
863 """Determine the complete set of recognized requirements."""
861 # Start with all requirements supported by this file.
864 # Start with all requirements supported by this file.
862 supported = set(localrepository._basesupported)
865 supported = set(localrepository._basesupported)
863
866
864 # Execute ``featuresetupfuncs`` entries if they belong to an extension
867 # Execute ``featuresetupfuncs`` entries if they belong to an extension
865 # relevant to this ui instance.
868 # relevant to this ui instance.
866 modules = {m.__name__ for n, m in extensions.extensions(ui)}
869 modules = {m.__name__ for n, m in extensions.extensions(ui)}
867
870
868 for fn in featuresetupfuncs:
871 for fn in featuresetupfuncs:
869 if fn.__module__ in modules:
872 if fn.__module__ in modules:
870 fn(ui, supported)
873 fn(ui, supported)
871
874
872 # Add derived requirements from registered compression engines.
875 # Add derived requirements from registered compression engines.
873 for name in util.compengines:
876 for name in util.compengines:
874 engine = util.compengines[name]
877 engine = util.compengines[name]
875 if engine.available() and engine.revlogheader():
878 if engine.available() and engine.revlogheader():
876 supported.add(b'exp-compression-%s' % name)
879 supported.add(b'exp-compression-%s' % name)
877 if engine.name() == b'zstd':
880 if engine.name() == b'zstd':
878 supported.add(b'revlog-compression-zstd')
881 supported.add(b'revlog-compression-zstd')
879
882
880 return supported
883 return supported
881
884
882
885
883 def ensurerequirementsrecognized(requirements, supported):
886 def ensurerequirementsrecognized(requirements, supported):
884 """Validate that a set of local requirements is recognized.
887 """Validate that a set of local requirements is recognized.
885
888
886 Receives a set of requirements. Raises an ``error.RepoError`` if there
889 Receives a set of requirements. Raises an ``error.RepoError`` if there
887 exists any requirement in that set that currently loaded code doesn't
890 exists any requirement in that set that currently loaded code doesn't
888 recognize.
891 recognize.
889
892
890 Returns a set of supported requirements.
893 Returns a set of supported requirements.
891 """
894 """
892 missing = set()
895 missing = set()
893
896
894 for requirement in requirements:
897 for requirement in requirements:
895 if requirement in supported:
898 if requirement in supported:
896 continue
899 continue
897
900
898 if not requirement or not requirement[0:1].isalnum():
901 if not requirement or not requirement[0:1].isalnum():
899 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
902 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
900
903
901 missing.add(requirement)
904 missing.add(requirement)
902
905
903 if missing:
906 if missing:
904 raise error.RequirementError(
907 raise error.RequirementError(
905 _(b'repository requires features unknown to this Mercurial: %s')
908 _(b'repository requires features unknown to this Mercurial: %s')
906 % b' '.join(sorted(missing)),
909 % b' '.join(sorted(missing)),
907 hint=_(
910 hint=_(
908 b'see https://mercurial-scm.org/wiki/MissingRequirement '
911 b'see https://mercurial-scm.org/wiki/MissingRequirement '
909 b'for more information'
912 b'for more information'
910 ),
913 ),
911 )
914 )
912
915
913
916
914 def ensurerequirementscompatible(ui, requirements):
917 def ensurerequirementscompatible(ui, requirements):
915 """Validates that a set of recognized requirements is mutually compatible.
918 """Validates that a set of recognized requirements is mutually compatible.
916
919
917 Some requirements may not be compatible with others or require
920 Some requirements may not be compatible with others or require
918 config options that aren't enabled. This function is called during
921 config options that aren't enabled. This function is called during
919 repository opening to ensure that the set of requirements needed
922 repository opening to ensure that the set of requirements needed
920 to open a repository is sane and compatible with config options.
923 to open a repository is sane and compatible with config options.
921
924
922 Extensions can monkeypatch this function to perform additional
925 Extensions can monkeypatch this function to perform additional
923 checking.
926 checking.
924
927
925 ``error.RepoError`` should be raised on failure.
928 ``error.RepoError`` should be raised on failure.
926 """
929 """
927 if (
930 if (
928 requirementsmod.SPARSE_REQUIREMENT in requirements
931 requirementsmod.SPARSE_REQUIREMENT in requirements
929 and not sparse.enabled
932 and not sparse.enabled
930 ):
933 ):
931 raise error.RepoError(
934 raise error.RepoError(
932 _(
935 _(
933 b'repository is using sparse feature but '
936 b'repository is using sparse feature but '
934 b'sparse is not enabled; enable the '
937 b'sparse is not enabled; enable the '
935 b'"sparse" extensions to access'
938 b'"sparse" extensions to access'
936 )
939 )
937 )
940 )
938
941
939
942
940 def makestore(requirements, path, vfstype):
943 def makestore(requirements, path, vfstype):
941 """Construct a storage object for a repository."""
944 """Construct a storage object for a repository."""
942 if b'store' in requirements:
945 if b'store' in requirements:
943 if b'fncache' in requirements:
946 if b'fncache' in requirements:
944 return storemod.fncachestore(
947 return storemod.fncachestore(
945 path, vfstype, b'dotencode' in requirements
948 path, vfstype, b'dotencode' in requirements
946 )
949 )
947
950
948 return storemod.encodedstore(path, vfstype)
951 return storemod.encodedstore(path, vfstype)
949
952
950 return storemod.basicstore(path, vfstype)
953 return storemod.basicstore(path, vfstype)
951
954
952
955
953 def resolvestorevfsoptions(ui, requirements, features):
956 def resolvestorevfsoptions(ui, requirements, features):
954 """Resolve the options to pass to the store vfs opener.
957 """Resolve the options to pass to the store vfs opener.
955
958
956 The returned dict is used to influence behavior of the storage layer.
959 The returned dict is used to influence behavior of the storage layer.
957 """
960 """
958 options = {}
961 options = {}
959
962
960 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
963 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
961 options[b'treemanifest'] = True
964 options[b'treemanifest'] = True
962
965
963 # experimental config: format.manifestcachesize
966 # experimental config: format.manifestcachesize
964 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
967 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
965 if manifestcachesize is not None:
968 if manifestcachesize is not None:
966 options[b'manifestcachesize'] = manifestcachesize
969 options[b'manifestcachesize'] = manifestcachesize
967
970
968 # In the absence of another requirement superseding a revlog-related
971 # In the absence of another requirement superseding a revlog-related
969 # requirement, we have to assume the repo is using revlog version 0.
972 # requirement, we have to assume the repo is using revlog version 0.
970 # This revlog format is super old and we don't bother trying to parse
973 # This revlog format is super old and we don't bother trying to parse
971 # opener options for it because those options wouldn't do anything
974 # opener options for it because those options wouldn't do anything
972 # meaningful on such old repos.
975 # meaningful on such old repos.
973 if (
976 if (
974 b'revlogv1' in requirements
977 b'revlogv1' in requirements
975 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
978 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
976 ):
979 ):
977 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
980 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
978 else: # explicitly mark repo as using revlogv0
981 else: # explicitly mark repo as using revlogv0
979 options[b'revlogv0'] = True
982 options[b'revlogv0'] = True
980
983
981 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
984 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
982 options[b'copies-storage'] = b'changeset-sidedata'
985 options[b'copies-storage'] = b'changeset-sidedata'
983 else:
986 else:
984 writecopiesto = ui.config(b'experimental', b'copies.write-to')
987 writecopiesto = ui.config(b'experimental', b'copies.write-to')
985 copiesextramode = (b'changeset-only', b'compatibility')
988 copiesextramode = (b'changeset-only', b'compatibility')
986 if writecopiesto in copiesextramode:
989 if writecopiesto in copiesextramode:
987 options[b'copies-storage'] = b'extra'
990 options[b'copies-storage'] = b'extra'
988
991
989 return options
992 return options
990
993
991
994
992 def resolverevlogstorevfsoptions(ui, requirements, features):
995 def resolverevlogstorevfsoptions(ui, requirements, features):
993 """Resolve opener options specific to revlogs."""
996 """Resolve opener options specific to revlogs."""
994
997
995 options = {}
998 options = {}
996 options[b'flagprocessors'] = {}
999 options[b'flagprocessors'] = {}
997
1000
998 if b'revlogv1' in requirements:
1001 if b'revlogv1' in requirements:
999 options[b'revlogv1'] = True
1002 options[b'revlogv1'] = True
1000 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1003 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1001 options[b'revlogv2'] = True
1004 options[b'revlogv2'] = True
1002
1005
1003 if b'generaldelta' in requirements:
1006 if b'generaldelta' in requirements:
1004 options[b'generaldelta'] = True
1007 options[b'generaldelta'] = True
1005
1008
1006 # experimental config: format.chunkcachesize
1009 # experimental config: format.chunkcachesize
1007 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1010 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1008 if chunkcachesize is not None:
1011 if chunkcachesize is not None:
1009 options[b'chunkcachesize'] = chunkcachesize
1012 options[b'chunkcachesize'] = chunkcachesize
1010
1013
1011 deltabothparents = ui.configbool(
1014 deltabothparents = ui.configbool(
1012 b'storage', b'revlog.optimize-delta-parent-choice'
1015 b'storage', b'revlog.optimize-delta-parent-choice'
1013 )
1016 )
1014 options[b'deltabothparents'] = deltabothparents
1017 options[b'deltabothparents'] = deltabothparents
1015
1018
1016 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1019 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1017 lazydeltabase = False
1020 lazydeltabase = False
1018 if lazydelta:
1021 if lazydelta:
1019 lazydeltabase = ui.configbool(
1022 lazydeltabase = ui.configbool(
1020 b'storage', b'revlog.reuse-external-delta-parent'
1023 b'storage', b'revlog.reuse-external-delta-parent'
1021 )
1024 )
1022 if lazydeltabase is None:
1025 if lazydeltabase is None:
1023 lazydeltabase = not scmutil.gddeltaconfig(ui)
1026 lazydeltabase = not scmutil.gddeltaconfig(ui)
1024 options[b'lazydelta'] = lazydelta
1027 options[b'lazydelta'] = lazydelta
1025 options[b'lazydeltabase'] = lazydeltabase
1028 options[b'lazydeltabase'] = lazydeltabase
1026
1029
1027 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1030 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1028 if 0 <= chainspan:
1031 if 0 <= chainspan:
1029 options[b'maxdeltachainspan'] = chainspan
1032 options[b'maxdeltachainspan'] = chainspan
1030
1033
1031 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1034 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1032 if mmapindexthreshold is not None:
1035 if mmapindexthreshold is not None:
1033 options[b'mmapindexthreshold'] = mmapindexthreshold
1036 options[b'mmapindexthreshold'] = mmapindexthreshold
1034
1037
1035 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1038 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1036 srdensitythres = float(
1039 srdensitythres = float(
1037 ui.config(b'experimental', b'sparse-read.density-threshold')
1040 ui.config(b'experimental', b'sparse-read.density-threshold')
1038 )
1041 )
1039 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1042 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1040 options[b'with-sparse-read'] = withsparseread
1043 options[b'with-sparse-read'] = withsparseread
1041 options[b'sparse-read-density-threshold'] = srdensitythres
1044 options[b'sparse-read-density-threshold'] = srdensitythres
1042 options[b'sparse-read-min-gap-size'] = srmingapsize
1045 options[b'sparse-read-min-gap-size'] = srmingapsize
1043
1046
1044 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1047 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1045 options[b'sparse-revlog'] = sparserevlog
1048 options[b'sparse-revlog'] = sparserevlog
1046 if sparserevlog:
1049 if sparserevlog:
1047 options[b'generaldelta'] = True
1050 options[b'generaldelta'] = True
1048
1051
1049 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1052 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1050 options[b'side-data'] = sidedata
1053 options[b'side-data'] = sidedata
1051
1054
1052 maxchainlen = None
1055 maxchainlen = None
1053 if sparserevlog:
1056 if sparserevlog:
1054 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1057 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1055 # experimental config: format.maxchainlen
1058 # experimental config: format.maxchainlen
1056 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1059 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1057 if maxchainlen is not None:
1060 if maxchainlen is not None:
1058 options[b'maxchainlen'] = maxchainlen
1061 options[b'maxchainlen'] = maxchainlen
1059
1062
1060 for r in requirements:
1063 for r in requirements:
1061 # we allow multiple compression engine requirement to co-exist because
1064 # we allow multiple compression engine requirement to co-exist because
1062 # strickly speaking, revlog seems to support mixed compression style.
1065 # strickly speaking, revlog seems to support mixed compression style.
1063 #
1066 #
1064 # The compression used for new entries will be "the last one"
1067 # The compression used for new entries will be "the last one"
1065 prefix = r.startswith
1068 prefix = r.startswith
1066 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1069 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1067 options[b'compengine'] = r.split(b'-', 2)[2]
1070 options[b'compengine'] = r.split(b'-', 2)[2]
1068
1071
1069 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1072 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1070 if options[b'zlib.level'] is not None:
1073 if options[b'zlib.level'] is not None:
1071 if not (0 <= options[b'zlib.level'] <= 9):
1074 if not (0 <= options[b'zlib.level'] <= 9):
1072 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1075 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1073 raise error.Abort(msg % options[b'zlib.level'])
1076 raise error.Abort(msg % options[b'zlib.level'])
1074 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1077 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1075 if options[b'zstd.level'] is not None:
1078 if options[b'zstd.level'] is not None:
1076 if not (0 <= options[b'zstd.level'] <= 22):
1079 if not (0 <= options[b'zstd.level'] <= 22):
1077 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1080 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1078 raise error.Abort(msg % options[b'zstd.level'])
1081 raise error.Abort(msg % options[b'zstd.level'])
1079
1082
1080 if requirementsmod.NARROW_REQUIREMENT in requirements:
1083 if requirementsmod.NARROW_REQUIREMENT in requirements:
1081 options[b'enableellipsis'] = True
1084 options[b'enableellipsis'] = True
1082
1085
1083 if ui.configbool(b'experimental', b'rust.index'):
1086 if ui.configbool(b'experimental', b'rust.index'):
1084 options[b'rust.index'] = True
1087 options[b'rust.index'] = True
1085 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1088 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1086 slow_path = ui.config(
1089 slow_path = ui.config(
1087 b'storage', b'revlog.persistent-nodemap.slow-path'
1090 b'storage', b'revlog.persistent-nodemap.slow-path'
1088 )
1091 )
1089 if slow_path not in (b'allow', b'warn', b'abort'):
1092 if slow_path not in (b'allow', b'warn', b'abort'):
1090 default = ui.config_default(
1093 default = ui.config_default(
1091 b'storage', b'revlog.persistent-nodemap.slow-path'
1094 b'storage', b'revlog.persistent-nodemap.slow-path'
1092 )
1095 )
1093 msg = _(
1096 msg = _(
1094 b'unknown value for config '
1097 b'unknown value for config '
1095 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1098 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1096 )
1099 )
1097 ui.warn(msg % slow_path)
1100 ui.warn(msg % slow_path)
1098 if not ui.quiet:
1101 if not ui.quiet:
1099 ui.warn(_(b'falling back to default value: %s\n') % default)
1102 ui.warn(_(b'falling back to default value: %s\n') % default)
1100 slow_path = default
1103 slow_path = default
1101
1104
1102 msg = _(
1105 msg = _(
1103 b"accessing `persistent-nodemap` repository without associated "
1106 b"accessing `persistent-nodemap` repository without associated "
1104 b"fast implementation."
1107 b"fast implementation."
1105 )
1108 )
1106 hint = _(
1109 hint = _(
1107 b"check `hg help config.format.use-persistent-nodemap` "
1110 b"check `hg help config.format.use-persistent-nodemap` "
1108 b"for details"
1111 b"for details"
1109 )
1112 )
1110 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1113 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1111 if slow_path == b'warn':
1114 if slow_path == b'warn':
1112 msg = b"warning: " + msg + b'\n'
1115 msg = b"warning: " + msg + b'\n'
1113 ui.warn(msg)
1116 ui.warn(msg)
1114 if not ui.quiet:
1117 if not ui.quiet:
1115 hint = b'(' + hint + b')\n'
1118 hint = b'(' + hint + b')\n'
1116 ui.warn(hint)
1119 ui.warn(hint)
1117 if slow_path == b'abort':
1120 if slow_path == b'abort':
1118 raise error.Abort(msg, hint=hint)
1121 raise error.Abort(msg, hint=hint)
1119 options[b'persistent-nodemap'] = True
1122 options[b'persistent-nodemap'] = True
1120 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1123 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1121 options[b'persistent-nodemap.mmap'] = True
1124 options[b'persistent-nodemap.mmap'] = True
1122 if ui.configbool(b'devel', b'persistent-nodemap'):
1125 if ui.configbool(b'devel', b'persistent-nodemap'):
1123 options[b'devel-force-nodemap'] = True
1126 options[b'devel-force-nodemap'] = True
1124
1127
1125 return options
1128 return options
1126
1129
1127
1130
1128 def makemain(**kwargs):
1131 def makemain(**kwargs):
1129 """Produce a type conforming to ``ilocalrepositorymain``."""
1132 """Produce a type conforming to ``ilocalrepositorymain``."""
1130 return localrepository
1133 return localrepository
1131
1134
1132
1135
1133 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1136 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1134 class revlogfilestorage(object):
1137 class revlogfilestorage(object):
1135 """File storage when using revlogs."""
1138 """File storage when using revlogs."""
1136
1139
1137 def file(self, path):
1140 def file(self, path):
1138 if path[0] == b'/':
1141 if path[0] == b'/':
1139 path = path[1:]
1142 path = path[1:]
1140
1143
1141 return filelog.filelog(self.svfs, path)
1144 return filelog.filelog(self.svfs, path)
1142
1145
1143
1146
1144 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1147 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1145 class revlognarrowfilestorage(object):
1148 class revlognarrowfilestorage(object):
1146 """File storage when using revlogs and narrow files."""
1149 """File storage when using revlogs and narrow files."""
1147
1150
1148 def file(self, path):
1151 def file(self, path):
1149 if path[0] == b'/':
1152 if path[0] == b'/':
1150 path = path[1:]
1153 path = path[1:]
1151
1154
1152 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1155 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1153
1156
1154
1157
1155 def makefilestorage(requirements, features, **kwargs):
1158 def makefilestorage(requirements, features, **kwargs):
1156 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1159 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1157 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1160 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1158 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1161 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1159
1162
1160 if requirementsmod.NARROW_REQUIREMENT in requirements:
1163 if requirementsmod.NARROW_REQUIREMENT in requirements:
1161 return revlognarrowfilestorage
1164 return revlognarrowfilestorage
1162 else:
1165 else:
1163 return revlogfilestorage
1166 return revlogfilestorage
1164
1167
1165
1168
1166 # List of repository interfaces and factory functions for them. Each
1169 # List of repository interfaces and factory functions for them. Each
1167 # will be called in order during ``makelocalrepository()`` to iteratively
1170 # will be called in order during ``makelocalrepository()`` to iteratively
1168 # derive the final type for a local repository instance. We capture the
1171 # derive the final type for a local repository instance. We capture the
1169 # function as a lambda so we don't hold a reference and the module-level
1172 # function as a lambda so we don't hold a reference and the module-level
1170 # functions can be wrapped.
1173 # functions can be wrapped.
1171 REPO_INTERFACES = [
1174 REPO_INTERFACES = [
1172 (repository.ilocalrepositorymain, lambda: makemain),
1175 (repository.ilocalrepositorymain, lambda: makemain),
1173 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1176 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1174 ]
1177 ]
1175
1178
1176
1179
1177 @interfaceutil.implementer(repository.ilocalrepositorymain)
1180 @interfaceutil.implementer(repository.ilocalrepositorymain)
1178 class localrepository(object):
1181 class localrepository(object):
1179 """Main class for representing local repositories.
1182 """Main class for representing local repositories.
1180
1183
1181 All local repositories are instances of this class.
1184 All local repositories are instances of this class.
1182
1185
1183 Constructed on its own, instances of this class are not usable as
1186 Constructed on its own, instances of this class are not usable as
1184 repository objects. To obtain a usable repository object, call
1187 repository objects. To obtain a usable repository object, call
1185 ``hg.repository()``, ``localrepo.instance()``, or
1188 ``hg.repository()``, ``localrepo.instance()``, or
1186 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1189 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1187 ``instance()`` adds support for creating new repositories.
1190 ``instance()`` adds support for creating new repositories.
1188 ``hg.repository()`` adds more extension integration, including calling
1191 ``hg.repository()`` adds more extension integration, including calling
1189 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1192 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1190 used.
1193 used.
1191 """
1194 """
1192
1195
1193 # obsolete experimental requirements:
1196 # obsolete experimental requirements:
1194 # - manifestv2: An experimental new manifest format that allowed
1197 # - manifestv2: An experimental new manifest format that allowed
1195 # for stem compression of long paths. Experiment ended up not
1198 # for stem compression of long paths. Experiment ended up not
1196 # being successful (repository sizes went up due to worse delta
1199 # being successful (repository sizes went up due to worse delta
1197 # chains), and the code was deleted in 4.6.
1200 # chains), and the code was deleted in 4.6.
1198 supportedformats = {
1201 supportedformats = {
1199 b'revlogv1',
1202 b'revlogv1',
1200 b'generaldelta',
1203 b'generaldelta',
1201 requirementsmod.TREEMANIFEST_REQUIREMENT,
1204 requirementsmod.TREEMANIFEST_REQUIREMENT,
1202 requirementsmod.COPIESSDC_REQUIREMENT,
1205 requirementsmod.COPIESSDC_REQUIREMENT,
1203 requirementsmod.REVLOGV2_REQUIREMENT,
1206 requirementsmod.REVLOGV2_REQUIREMENT,
1204 requirementsmod.SIDEDATA_REQUIREMENT,
1207 requirementsmod.SIDEDATA_REQUIREMENT,
1205 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1208 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1206 requirementsmod.NODEMAP_REQUIREMENT,
1209 requirementsmod.NODEMAP_REQUIREMENT,
1207 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1210 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1208 requirementsmod.SHARESAFE_REQUIREMENT,
1211 requirementsmod.SHARESAFE_REQUIREMENT,
1209 }
1212 }
1210 _basesupported = supportedformats | {
1213 _basesupported = supportedformats | {
1211 b'store',
1214 b'store',
1212 b'fncache',
1215 b'fncache',
1213 requirementsmod.SHARED_REQUIREMENT,
1216 requirementsmod.SHARED_REQUIREMENT,
1214 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1217 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1215 b'dotencode',
1218 b'dotencode',
1216 requirementsmod.SPARSE_REQUIREMENT,
1219 requirementsmod.SPARSE_REQUIREMENT,
1217 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1220 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1218 }
1221 }
1219
1222
1220 # list of prefix for file which can be written without 'wlock'
1223 # list of prefix for file which can be written without 'wlock'
1221 # Extensions should extend this list when needed
1224 # Extensions should extend this list when needed
1222 _wlockfreeprefix = {
1225 _wlockfreeprefix = {
1223 # We migh consider requiring 'wlock' for the next
1226 # We migh consider requiring 'wlock' for the next
1224 # two, but pretty much all the existing code assume
1227 # two, but pretty much all the existing code assume
1225 # wlock is not needed so we keep them excluded for
1228 # wlock is not needed so we keep them excluded for
1226 # now.
1229 # now.
1227 b'hgrc',
1230 b'hgrc',
1228 b'requires',
1231 b'requires',
1229 # XXX cache is a complicatged business someone
1232 # XXX cache is a complicatged business someone
1230 # should investigate this in depth at some point
1233 # should investigate this in depth at some point
1231 b'cache/',
1234 b'cache/',
1232 # XXX shouldn't be dirstate covered by the wlock?
1235 # XXX shouldn't be dirstate covered by the wlock?
1233 b'dirstate',
1236 b'dirstate',
1234 # XXX bisect was still a bit too messy at the time
1237 # XXX bisect was still a bit too messy at the time
1235 # this changeset was introduced. Someone should fix
1238 # this changeset was introduced. Someone should fix
1236 # the remainig bit and drop this line
1239 # the remainig bit and drop this line
1237 b'bisect.state',
1240 b'bisect.state',
1238 }
1241 }
1239
1242
1240 def __init__(
1243 def __init__(
1241 self,
1244 self,
1242 baseui,
1245 baseui,
1243 ui,
1246 ui,
1244 origroot,
1247 origroot,
1245 wdirvfs,
1248 wdirvfs,
1246 hgvfs,
1249 hgvfs,
1247 requirements,
1250 requirements,
1248 supportedrequirements,
1251 supportedrequirements,
1249 sharedpath,
1252 sharedpath,
1250 store,
1253 store,
1251 cachevfs,
1254 cachevfs,
1252 wcachevfs,
1255 wcachevfs,
1253 features,
1256 features,
1254 intents=None,
1257 intents=None,
1255 ):
1258 ):
1256 """Create a new local repository instance.
1259 """Create a new local repository instance.
1257
1260
1258 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1261 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1259 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1262 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1260 object.
1263 object.
1261
1264
1262 Arguments:
1265 Arguments:
1263
1266
1264 baseui
1267 baseui
1265 ``ui.ui`` instance that ``ui`` argument was based off of.
1268 ``ui.ui`` instance that ``ui`` argument was based off of.
1266
1269
1267 ui
1270 ui
1268 ``ui.ui`` instance for use by the repository.
1271 ``ui.ui`` instance for use by the repository.
1269
1272
1270 origroot
1273 origroot
1271 ``bytes`` path to working directory root of this repository.
1274 ``bytes`` path to working directory root of this repository.
1272
1275
1273 wdirvfs
1276 wdirvfs
1274 ``vfs.vfs`` rooted at the working directory.
1277 ``vfs.vfs`` rooted at the working directory.
1275
1278
1276 hgvfs
1279 hgvfs
1277 ``vfs.vfs`` rooted at .hg/
1280 ``vfs.vfs`` rooted at .hg/
1278
1281
1279 requirements
1282 requirements
1280 ``set`` of bytestrings representing repository opening requirements.
1283 ``set`` of bytestrings representing repository opening requirements.
1281
1284
1282 supportedrequirements
1285 supportedrequirements
1283 ``set`` of bytestrings representing repository requirements that we
1286 ``set`` of bytestrings representing repository requirements that we
1284 know how to open. May be a supetset of ``requirements``.
1287 know how to open. May be a supetset of ``requirements``.
1285
1288
1286 sharedpath
1289 sharedpath
1287 ``bytes`` Defining path to storage base directory. Points to a
1290 ``bytes`` Defining path to storage base directory. Points to a
1288 ``.hg/`` directory somewhere.
1291 ``.hg/`` directory somewhere.
1289
1292
1290 store
1293 store
1291 ``store.basicstore`` (or derived) instance providing access to
1294 ``store.basicstore`` (or derived) instance providing access to
1292 versioned storage.
1295 versioned storage.
1293
1296
1294 cachevfs
1297 cachevfs
1295 ``vfs.vfs`` used for cache files.
1298 ``vfs.vfs`` used for cache files.
1296
1299
1297 wcachevfs
1300 wcachevfs
1298 ``vfs.vfs`` used for cache files related to the working copy.
1301 ``vfs.vfs`` used for cache files related to the working copy.
1299
1302
1300 features
1303 features
1301 ``set`` of bytestrings defining features/capabilities of this
1304 ``set`` of bytestrings defining features/capabilities of this
1302 instance.
1305 instance.
1303
1306
1304 intents
1307 intents
1305 ``set`` of system strings indicating what this repo will be used
1308 ``set`` of system strings indicating what this repo will be used
1306 for.
1309 for.
1307 """
1310 """
1308 self.baseui = baseui
1311 self.baseui = baseui
1309 self.ui = ui
1312 self.ui = ui
1310 self.origroot = origroot
1313 self.origroot = origroot
1311 # vfs rooted at working directory.
1314 # vfs rooted at working directory.
1312 self.wvfs = wdirvfs
1315 self.wvfs = wdirvfs
1313 self.root = wdirvfs.base
1316 self.root = wdirvfs.base
1314 # vfs rooted at .hg/. Used to access most non-store paths.
1317 # vfs rooted at .hg/. Used to access most non-store paths.
1315 self.vfs = hgvfs
1318 self.vfs = hgvfs
1316 self.path = hgvfs.base
1319 self.path = hgvfs.base
1317 self.requirements = requirements
1320 self.requirements = requirements
1318 self.supported = supportedrequirements
1321 self.supported = supportedrequirements
1319 self.sharedpath = sharedpath
1322 self.sharedpath = sharedpath
1320 self.store = store
1323 self.store = store
1321 self.cachevfs = cachevfs
1324 self.cachevfs = cachevfs
1322 self.wcachevfs = wcachevfs
1325 self.wcachevfs = wcachevfs
1323 self.features = features
1326 self.features = features
1324
1327
1325 self.filtername = None
1328 self.filtername = None
1326
1329
1327 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1330 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1328 b'devel', b'check-locks'
1331 b'devel', b'check-locks'
1329 ):
1332 ):
1330 self.vfs.audit = self._getvfsward(self.vfs.audit)
1333 self.vfs.audit = self._getvfsward(self.vfs.audit)
1331 # A list of callback to shape the phase if no data were found.
1334 # A list of callback to shape the phase if no data were found.
1332 # Callback are in the form: func(repo, roots) --> processed root.
1335 # Callback are in the form: func(repo, roots) --> processed root.
1333 # This list it to be filled by extension during repo setup
1336 # This list it to be filled by extension during repo setup
1334 self._phasedefaults = []
1337 self._phasedefaults = []
1335
1338
1336 color.setup(self.ui)
1339 color.setup(self.ui)
1337
1340
1338 self.spath = self.store.path
1341 self.spath = self.store.path
1339 self.svfs = self.store.vfs
1342 self.svfs = self.store.vfs
1340 self.sjoin = self.store.join
1343 self.sjoin = self.store.join
1341 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1344 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1342 b'devel', b'check-locks'
1345 b'devel', b'check-locks'
1343 ):
1346 ):
1344 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1347 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1345 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1348 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1346 else: # standard vfs
1349 else: # standard vfs
1347 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1350 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1348
1351
1349 self._dirstatevalidatewarned = False
1352 self._dirstatevalidatewarned = False
1350
1353
1351 self._branchcaches = branchmap.BranchMapCache()
1354 self._branchcaches = branchmap.BranchMapCache()
1352 self._revbranchcache = None
1355 self._revbranchcache = None
1353 self._filterpats = {}
1356 self._filterpats = {}
1354 self._datafilters = {}
1357 self._datafilters = {}
1355 self._transref = self._lockref = self._wlockref = None
1358 self._transref = self._lockref = self._wlockref = None
1356
1359
1357 # A cache for various files under .hg/ that tracks file changes,
1360 # A cache for various files under .hg/ that tracks file changes,
1358 # (used by the filecache decorator)
1361 # (used by the filecache decorator)
1359 #
1362 #
1360 # Maps a property name to its util.filecacheentry
1363 # Maps a property name to its util.filecacheentry
1361 self._filecache = {}
1364 self._filecache = {}
1362
1365
1363 # hold sets of revision to be filtered
1366 # hold sets of revision to be filtered
1364 # should be cleared when something might have changed the filter value:
1367 # should be cleared when something might have changed the filter value:
1365 # - new changesets,
1368 # - new changesets,
1366 # - phase change,
1369 # - phase change,
1367 # - new obsolescence marker,
1370 # - new obsolescence marker,
1368 # - working directory parent change,
1371 # - working directory parent change,
1369 # - bookmark changes
1372 # - bookmark changes
1370 self.filteredrevcache = {}
1373 self.filteredrevcache = {}
1371
1374
1372 # post-dirstate-status hooks
1375 # post-dirstate-status hooks
1373 self._postdsstatus = []
1376 self._postdsstatus = []
1374
1377
1375 # generic mapping between names and nodes
1378 # generic mapping between names and nodes
1376 self.names = namespaces.namespaces()
1379 self.names = namespaces.namespaces()
1377
1380
1378 # Key to signature value.
1381 # Key to signature value.
1379 self._sparsesignaturecache = {}
1382 self._sparsesignaturecache = {}
1380 # Signature to cached matcher instance.
1383 # Signature to cached matcher instance.
1381 self._sparsematchercache = {}
1384 self._sparsematchercache = {}
1382
1385
1383 self._extrafilterid = repoview.extrafilter(ui)
1386 self._extrafilterid = repoview.extrafilter(ui)
1384
1387
1385 self.filecopiesmode = None
1388 self.filecopiesmode = None
1386 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1389 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1387 self.filecopiesmode = b'changeset-sidedata'
1390 self.filecopiesmode = b'changeset-sidedata'
1388
1391
1389 def _getvfsward(self, origfunc):
1392 def _getvfsward(self, origfunc):
1390 """build a ward for self.vfs"""
1393 """build a ward for self.vfs"""
1391 rref = weakref.ref(self)
1394 rref = weakref.ref(self)
1392
1395
1393 def checkvfs(path, mode=None):
1396 def checkvfs(path, mode=None):
1394 ret = origfunc(path, mode=mode)
1397 ret = origfunc(path, mode=mode)
1395 repo = rref()
1398 repo = rref()
1396 if (
1399 if (
1397 repo is None
1400 repo is None
1398 or not util.safehasattr(repo, b'_wlockref')
1401 or not util.safehasattr(repo, b'_wlockref')
1399 or not util.safehasattr(repo, b'_lockref')
1402 or not util.safehasattr(repo, b'_lockref')
1400 ):
1403 ):
1401 return
1404 return
1402 if mode in (None, b'r', b'rb'):
1405 if mode in (None, b'r', b'rb'):
1403 return
1406 return
1404 if path.startswith(repo.path):
1407 if path.startswith(repo.path):
1405 # truncate name relative to the repository (.hg)
1408 # truncate name relative to the repository (.hg)
1406 path = path[len(repo.path) + 1 :]
1409 path = path[len(repo.path) + 1 :]
1407 if path.startswith(b'cache/'):
1410 if path.startswith(b'cache/'):
1408 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1411 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1409 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1412 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1410 # path prefixes covered by 'lock'
1413 # path prefixes covered by 'lock'
1411 vfs_path_prefixes = (
1414 vfs_path_prefixes = (
1412 b'journal.',
1415 b'journal.',
1413 b'undo.',
1416 b'undo.',
1414 b'strip-backup/',
1417 b'strip-backup/',
1415 b'cache/',
1418 b'cache/',
1416 )
1419 )
1417 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1420 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1418 if repo._currentlock(repo._lockref) is None:
1421 if repo._currentlock(repo._lockref) is None:
1419 repo.ui.develwarn(
1422 repo.ui.develwarn(
1420 b'write with no lock: "%s"' % path,
1423 b'write with no lock: "%s"' % path,
1421 stacklevel=3,
1424 stacklevel=3,
1422 config=b'check-locks',
1425 config=b'check-locks',
1423 )
1426 )
1424 elif repo._currentlock(repo._wlockref) is None:
1427 elif repo._currentlock(repo._wlockref) is None:
1425 # rest of vfs files are covered by 'wlock'
1428 # rest of vfs files are covered by 'wlock'
1426 #
1429 #
1427 # exclude special files
1430 # exclude special files
1428 for prefix in self._wlockfreeprefix:
1431 for prefix in self._wlockfreeprefix:
1429 if path.startswith(prefix):
1432 if path.startswith(prefix):
1430 return
1433 return
1431 repo.ui.develwarn(
1434 repo.ui.develwarn(
1432 b'write with no wlock: "%s"' % path,
1435 b'write with no wlock: "%s"' % path,
1433 stacklevel=3,
1436 stacklevel=3,
1434 config=b'check-locks',
1437 config=b'check-locks',
1435 )
1438 )
1436 return ret
1439 return ret
1437
1440
1438 return checkvfs
1441 return checkvfs
1439
1442
1440 def _getsvfsward(self, origfunc):
1443 def _getsvfsward(self, origfunc):
1441 """build a ward for self.svfs"""
1444 """build a ward for self.svfs"""
1442 rref = weakref.ref(self)
1445 rref = weakref.ref(self)
1443
1446
1444 def checksvfs(path, mode=None):
1447 def checksvfs(path, mode=None):
1445 ret = origfunc(path, mode=mode)
1448 ret = origfunc(path, mode=mode)
1446 repo = rref()
1449 repo = rref()
1447 if repo is None or not util.safehasattr(repo, b'_lockref'):
1450 if repo is None or not util.safehasattr(repo, b'_lockref'):
1448 return
1451 return
1449 if mode in (None, b'r', b'rb'):
1452 if mode in (None, b'r', b'rb'):
1450 return
1453 return
1451 if path.startswith(repo.sharedpath):
1454 if path.startswith(repo.sharedpath):
1452 # truncate name relative to the repository (.hg)
1455 # truncate name relative to the repository (.hg)
1453 path = path[len(repo.sharedpath) + 1 :]
1456 path = path[len(repo.sharedpath) + 1 :]
1454 if repo._currentlock(repo._lockref) is None:
1457 if repo._currentlock(repo._lockref) is None:
1455 repo.ui.develwarn(
1458 repo.ui.develwarn(
1456 b'write with no lock: "%s"' % path, stacklevel=4
1459 b'write with no lock: "%s"' % path, stacklevel=4
1457 )
1460 )
1458 return ret
1461 return ret
1459
1462
1460 return checksvfs
1463 return checksvfs
1461
1464
1462 def close(self):
1465 def close(self):
1463 self._writecaches()
1466 self._writecaches()
1464
1467
1465 def _writecaches(self):
1468 def _writecaches(self):
1466 if self._revbranchcache:
1469 if self._revbranchcache:
1467 self._revbranchcache.write()
1470 self._revbranchcache.write()
1468
1471
1469 def _restrictcapabilities(self, caps):
1472 def _restrictcapabilities(self, caps):
1470 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1473 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1471 caps = set(caps)
1474 caps = set(caps)
1472 capsblob = bundle2.encodecaps(
1475 capsblob = bundle2.encodecaps(
1473 bundle2.getrepocaps(self, role=b'client')
1476 bundle2.getrepocaps(self, role=b'client')
1474 )
1477 )
1475 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1478 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1476 return caps
1479 return caps
1477
1480
1478 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1481 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1479 # self -> auditor -> self._checknested -> self
1482 # self -> auditor -> self._checknested -> self
1480
1483
1481 @property
1484 @property
1482 def auditor(self):
1485 def auditor(self):
1483 # This is only used by context.workingctx.match in order to
1486 # This is only used by context.workingctx.match in order to
1484 # detect files in subrepos.
1487 # detect files in subrepos.
1485 return pathutil.pathauditor(self.root, callback=self._checknested)
1488 return pathutil.pathauditor(self.root, callback=self._checknested)
1486
1489
1487 @property
1490 @property
1488 def nofsauditor(self):
1491 def nofsauditor(self):
1489 # This is only used by context.basectx.match in order to detect
1492 # This is only used by context.basectx.match in order to detect
1490 # files in subrepos.
1493 # files in subrepos.
1491 return pathutil.pathauditor(
1494 return pathutil.pathauditor(
1492 self.root, callback=self._checknested, realfs=False, cached=True
1495 self.root, callback=self._checknested, realfs=False, cached=True
1493 )
1496 )
1494
1497
1495 def _checknested(self, path):
1498 def _checknested(self, path):
1496 """Determine if path is a legal nested repository."""
1499 """Determine if path is a legal nested repository."""
1497 if not path.startswith(self.root):
1500 if not path.startswith(self.root):
1498 return False
1501 return False
1499 subpath = path[len(self.root) + 1 :]
1502 subpath = path[len(self.root) + 1 :]
1500 normsubpath = util.pconvert(subpath)
1503 normsubpath = util.pconvert(subpath)
1501
1504
1502 # XXX: Checking against the current working copy is wrong in
1505 # XXX: Checking against the current working copy is wrong in
1503 # the sense that it can reject things like
1506 # the sense that it can reject things like
1504 #
1507 #
1505 # $ hg cat -r 10 sub/x.txt
1508 # $ hg cat -r 10 sub/x.txt
1506 #
1509 #
1507 # if sub/ is no longer a subrepository in the working copy
1510 # if sub/ is no longer a subrepository in the working copy
1508 # parent revision.
1511 # parent revision.
1509 #
1512 #
1510 # However, it can of course also allow things that would have
1513 # However, it can of course also allow things that would have
1511 # been rejected before, such as the above cat command if sub/
1514 # been rejected before, such as the above cat command if sub/
1512 # is a subrepository now, but was a normal directory before.
1515 # is a subrepository now, but was a normal directory before.
1513 # The old path auditor would have rejected by mistake since it
1516 # The old path auditor would have rejected by mistake since it
1514 # panics when it sees sub/.hg/.
1517 # panics when it sees sub/.hg/.
1515 #
1518 #
1516 # All in all, checking against the working copy seems sensible
1519 # All in all, checking against the working copy seems sensible
1517 # since we want to prevent access to nested repositories on
1520 # since we want to prevent access to nested repositories on
1518 # the filesystem *now*.
1521 # the filesystem *now*.
1519 ctx = self[None]
1522 ctx = self[None]
1520 parts = util.splitpath(subpath)
1523 parts = util.splitpath(subpath)
1521 while parts:
1524 while parts:
1522 prefix = b'/'.join(parts)
1525 prefix = b'/'.join(parts)
1523 if prefix in ctx.substate:
1526 if prefix in ctx.substate:
1524 if prefix == normsubpath:
1527 if prefix == normsubpath:
1525 return True
1528 return True
1526 else:
1529 else:
1527 sub = ctx.sub(prefix)
1530 sub = ctx.sub(prefix)
1528 return sub.checknested(subpath[len(prefix) + 1 :])
1531 return sub.checknested(subpath[len(prefix) + 1 :])
1529 else:
1532 else:
1530 parts.pop()
1533 parts.pop()
1531 return False
1534 return False
1532
1535
1533 def peer(self):
1536 def peer(self):
1534 return localpeer(self) # not cached to avoid reference cycle
1537 return localpeer(self) # not cached to avoid reference cycle
1535
1538
1536 def unfiltered(self):
1539 def unfiltered(self):
1537 """Return unfiltered version of the repository
1540 """Return unfiltered version of the repository
1538
1541
1539 Intended to be overwritten by filtered repo."""
1542 Intended to be overwritten by filtered repo."""
1540 return self
1543 return self
1541
1544
1542 def filtered(self, name, visibilityexceptions=None):
1545 def filtered(self, name, visibilityexceptions=None):
1543 """Return a filtered version of a repository
1546 """Return a filtered version of a repository
1544
1547
1545 The `name` parameter is the identifier of the requested view. This
1548 The `name` parameter is the identifier of the requested view. This
1546 will return a repoview object set "exactly" to the specified view.
1549 will return a repoview object set "exactly" to the specified view.
1547
1550
1548 This function does not apply recursive filtering to a repository. For
1551 This function does not apply recursive filtering to a repository. For
1549 example calling `repo.filtered("served")` will return a repoview using
1552 example calling `repo.filtered("served")` will return a repoview using
1550 the "served" view, regardless of the initial view used by `repo`.
1553 the "served" view, regardless of the initial view used by `repo`.
1551
1554
1552 In other word, there is always only one level of `repoview` "filtering".
1555 In other word, there is always only one level of `repoview` "filtering".
1553 """
1556 """
1554 if self._extrafilterid is not None and b'%' not in name:
1557 if self._extrafilterid is not None and b'%' not in name:
1555 name = name + b'%' + self._extrafilterid
1558 name = name + b'%' + self._extrafilterid
1556
1559
1557 cls = repoview.newtype(self.unfiltered().__class__)
1560 cls = repoview.newtype(self.unfiltered().__class__)
1558 return cls(self, name, visibilityexceptions)
1561 return cls(self, name, visibilityexceptions)
1559
1562
1560 @mixedrepostorecache(
1563 @mixedrepostorecache(
1561 (b'bookmarks', b'plain'),
1564 (b'bookmarks', b'plain'),
1562 (b'bookmarks.current', b'plain'),
1565 (b'bookmarks.current', b'plain'),
1563 (b'bookmarks', b''),
1566 (b'bookmarks', b''),
1564 (b'00changelog.i', b''),
1567 (b'00changelog.i', b''),
1565 )
1568 )
1566 def _bookmarks(self):
1569 def _bookmarks(self):
1567 # Since the multiple files involved in the transaction cannot be
1570 # Since the multiple files involved in the transaction cannot be
1568 # written atomically (with current repository format), there is a race
1571 # written atomically (with current repository format), there is a race
1569 # condition here.
1572 # condition here.
1570 #
1573 #
1571 # 1) changelog content A is read
1574 # 1) changelog content A is read
1572 # 2) outside transaction update changelog to content B
1575 # 2) outside transaction update changelog to content B
1573 # 3) outside transaction update bookmark file referring to content B
1576 # 3) outside transaction update bookmark file referring to content B
1574 # 4) bookmarks file content is read and filtered against changelog-A
1577 # 4) bookmarks file content is read and filtered against changelog-A
1575 #
1578 #
1576 # When this happens, bookmarks against nodes missing from A are dropped.
1579 # When this happens, bookmarks against nodes missing from A are dropped.
1577 #
1580 #
1578 # Having this happening during read is not great, but it become worse
1581 # Having this happening during read is not great, but it become worse
1579 # when this happen during write because the bookmarks to the "unknown"
1582 # when this happen during write because the bookmarks to the "unknown"
1580 # nodes will be dropped for good. However, writes happen within locks.
1583 # nodes will be dropped for good. However, writes happen within locks.
1581 # This locking makes it possible to have a race free consistent read.
1584 # This locking makes it possible to have a race free consistent read.
1582 # For this purpose data read from disc before locking are
1585 # For this purpose data read from disc before locking are
1583 # "invalidated" right after the locks are taken. This invalidations are
1586 # "invalidated" right after the locks are taken. This invalidations are
1584 # "light", the `filecache` mechanism keep the data in memory and will
1587 # "light", the `filecache` mechanism keep the data in memory and will
1585 # reuse them if the underlying files did not changed. Not parsing the
1588 # reuse them if the underlying files did not changed. Not parsing the
1586 # same data multiple times helps performances.
1589 # same data multiple times helps performances.
1587 #
1590 #
1588 # Unfortunately in the case describe above, the files tracked by the
1591 # Unfortunately in the case describe above, the files tracked by the
1589 # bookmarks file cache might not have changed, but the in-memory
1592 # bookmarks file cache might not have changed, but the in-memory
1590 # content is still "wrong" because we used an older changelog content
1593 # content is still "wrong" because we used an older changelog content
1591 # to process the on-disk data. So after locking, the changelog would be
1594 # to process the on-disk data. So after locking, the changelog would be
1592 # refreshed but `_bookmarks` would be preserved.
1595 # refreshed but `_bookmarks` would be preserved.
1593 # Adding `00changelog.i` to the list of tracked file is not
1596 # Adding `00changelog.i` to the list of tracked file is not
1594 # enough, because at the time we build the content for `_bookmarks` in
1597 # enough, because at the time we build the content for `_bookmarks` in
1595 # (4), the changelog file has already diverged from the content used
1598 # (4), the changelog file has already diverged from the content used
1596 # for loading `changelog` in (1)
1599 # for loading `changelog` in (1)
1597 #
1600 #
1598 # To prevent the issue, we force the changelog to be explicitly
1601 # To prevent the issue, we force the changelog to be explicitly
1599 # reloaded while computing `_bookmarks`. The data race can still happen
1602 # reloaded while computing `_bookmarks`. The data race can still happen
1600 # without the lock (with a narrower window), but it would no longer go
1603 # without the lock (with a narrower window), but it would no longer go
1601 # undetected during the lock time refresh.
1604 # undetected during the lock time refresh.
1602 #
1605 #
1603 # The new schedule is as follow
1606 # The new schedule is as follow
1604 #
1607 #
1605 # 1) filecache logic detect that `_bookmarks` needs to be computed
1608 # 1) filecache logic detect that `_bookmarks` needs to be computed
1606 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1609 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1607 # 3) We force `changelog` filecache to be tested
1610 # 3) We force `changelog` filecache to be tested
1608 # 4) cachestat for `changelog` are captured (for changelog)
1611 # 4) cachestat for `changelog` are captured (for changelog)
1609 # 5) `_bookmarks` is computed and cached
1612 # 5) `_bookmarks` is computed and cached
1610 #
1613 #
1611 # The step in (3) ensure we have a changelog at least as recent as the
1614 # The step in (3) ensure we have a changelog at least as recent as the
1612 # cache stat computed in (1). As a result at locking time:
1615 # cache stat computed in (1). As a result at locking time:
1613 # * if the changelog did not changed since (1) -> we can reuse the data
1616 # * if the changelog did not changed since (1) -> we can reuse the data
1614 # * otherwise -> the bookmarks get refreshed.
1617 # * otherwise -> the bookmarks get refreshed.
1615 self._refreshchangelog()
1618 self._refreshchangelog()
1616 return bookmarks.bmstore(self)
1619 return bookmarks.bmstore(self)
1617
1620
1618 def _refreshchangelog(self):
1621 def _refreshchangelog(self):
1619 """make sure the in memory changelog match the on-disk one"""
1622 """make sure the in memory changelog match the on-disk one"""
1620 if 'changelog' in vars(self) and self.currenttransaction() is None:
1623 if 'changelog' in vars(self) and self.currenttransaction() is None:
1621 del self.changelog
1624 del self.changelog
1622
1625
1623 @property
1626 @property
1624 def _activebookmark(self):
1627 def _activebookmark(self):
1625 return self._bookmarks.active
1628 return self._bookmarks.active
1626
1629
1627 # _phasesets depend on changelog. what we need is to call
1630 # _phasesets depend on changelog. what we need is to call
1628 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1631 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1629 # can't be easily expressed in filecache mechanism.
1632 # can't be easily expressed in filecache mechanism.
1630 @storecache(b'phaseroots', b'00changelog.i')
1633 @storecache(b'phaseroots', b'00changelog.i')
1631 def _phasecache(self):
1634 def _phasecache(self):
1632 return phases.phasecache(self, self._phasedefaults)
1635 return phases.phasecache(self, self._phasedefaults)
1633
1636
1634 @storecache(b'obsstore')
1637 @storecache(b'obsstore')
1635 def obsstore(self):
1638 def obsstore(self):
1636 return obsolete.makestore(self.ui, self)
1639 return obsolete.makestore(self.ui, self)
1637
1640
1638 @storecache(b'00changelog.i')
1641 @storecache(b'00changelog.i')
1639 def changelog(self):
1642 def changelog(self):
1640 # load dirstate before changelog to avoid race see issue6303
1643 # load dirstate before changelog to avoid race see issue6303
1641 self.dirstate.prefetch_parents()
1644 self.dirstate.prefetch_parents()
1642 return self.store.changelog(txnutil.mayhavepending(self.root))
1645 return self.store.changelog(
1646 txnutil.mayhavepending(self.root),
1647 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1648 )
1643
1649
1644 @storecache(b'00manifest.i')
1650 @storecache(b'00manifest.i')
1645 def manifestlog(self):
1651 def manifestlog(self):
1646 return self.store.manifestlog(self, self._storenarrowmatch)
1652 return self.store.manifestlog(self, self._storenarrowmatch)
1647
1653
1648 @repofilecache(b'dirstate')
1654 @repofilecache(b'dirstate')
1649 def dirstate(self):
1655 def dirstate(self):
1650 return self._makedirstate()
1656 return self._makedirstate()
1651
1657
1652 def _makedirstate(self):
1658 def _makedirstate(self):
1653 """Extension point for wrapping the dirstate per-repo."""
1659 """Extension point for wrapping the dirstate per-repo."""
1654 sparsematchfn = lambda: sparse.matcher(self)
1660 sparsematchfn = lambda: sparse.matcher(self)
1655
1661
1656 return dirstate.dirstate(
1662 return dirstate.dirstate(
1657 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1663 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1658 )
1664 )
1659
1665
1660 def _dirstatevalidate(self, node):
1666 def _dirstatevalidate(self, node):
1661 try:
1667 try:
1662 self.changelog.rev(node)
1668 self.changelog.rev(node)
1663 return node
1669 return node
1664 except error.LookupError:
1670 except error.LookupError:
1665 if not self._dirstatevalidatewarned:
1671 if not self._dirstatevalidatewarned:
1666 self._dirstatevalidatewarned = True
1672 self._dirstatevalidatewarned = True
1667 self.ui.warn(
1673 self.ui.warn(
1668 _(b"warning: ignoring unknown working parent %s!\n")
1674 _(b"warning: ignoring unknown working parent %s!\n")
1669 % short(node)
1675 % short(node)
1670 )
1676 )
1671 return nullid
1677 return nullid
1672
1678
1673 @storecache(narrowspec.FILENAME)
1679 @storecache(narrowspec.FILENAME)
1674 def narrowpats(self):
1680 def narrowpats(self):
1675 """matcher patterns for this repository's narrowspec
1681 """matcher patterns for this repository's narrowspec
1676
1682
1677 A tuple of (includes, excludes).
1683 A tuple of (includes, excludes).
1678 """
1684 """
1679 return narrowspec.load(self)
1685 return narrowspec.load(self)
1680
1686
1681 @storecache(narrowspec.FILENAME)
1687 @storecache(narrowspec.FILENAME)
1682 def _storenarrowmatch(self):
1688 def _storenarrowmatch(self):
1683 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1689 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1684 return matchmod.always()
1690 return matchmod.always()
1685 include, exclude = self.narrowpats
1691 include, exclude = self.narrowpats
1686 return narrowspec.match(self.root, include=include, exclude=exclude)
1692 return narrowspec.match(self.root, include=include, exclude=exclude)
1687
1693
1688 @storecache(narrowspec.FILENAME)
1694 @storecache(narrowspec.FILENAME)
1689 def _narrowmatch(self):
1695 def _narrowmatch(self):
1690 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1696 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1691 return matchmod.always()
1697 return matchmod.always()
1692 narrowspec.checkworkingcopynarrowspec(self)
1698 narrowspec.checkworkingcopynarrowspec(self)
1693 include, exclude = self.narrowpats
1699 include, exclude = self.narrowpats
1694 return narrowspec.match(self.root, include=include, exclude=exclude)
1700 return narrowspec.match(self.root, include=include, exclude=exclude)
1695
1701
1696 def narrowmatch(self, match=None, includeexact=False):
1702 def narrowmatch(self, match=None, includeexact=False):
1697 """matcher corresponding the the repo's narrowspec
1703 """matcher corresponding the the repo's narrowspec
1698
1704
1699 If `match` is given, then that will be intersected with the narrow
1705 If `match` is given, then that will be intersected with the narrow
1700 matcher.
1706 matcher.
1701
1707
1702 If `includeexact` is True, then any exact matches from `match` will
1708 If `includeexact` is True, then any exact matches from `match` will
1703 be included even if they're outside the narrowspec.
1709 be included even if they're outside the narrowspec.
1704 """
1710 """
1705 if match:
1711 if match:
1706 if includeexact and not self._narrowmatch.always():
1712 if includeexact and not self._narrowmatch.always():
1707 # do not exclude explicitly-specified paths so that they can
1713 # do not exclude explicitly-specified paths so that they can
1708 # be warned later on
1714 # be warned later on
1709 em = matchmod.exact(match.files())
1715 em = matchmod.exact(match.files())
1710 nm = matchmod.unionmatcher([self._narrowmatch, em])
1716 nm = matchmod.unionmatcher([self._narrowmatch, em])
1711 return matchmod.intersectmatchers(match, nm)
1717 return matchmod.intersectmatchers(match, nm)
1712 return matchmod.intersectmatchers(match, self._narrowmatch)
1718 return matchmod.intersectmatchers(match, self._narrowmatch)
1713 return self._narrowmatch
1719 return self._narrowmatch
1714
1720
1715 def setnarrowpats(self, newincludes, newexcludes):
1721 def setnarrowpats(self, newincludes, newexcludes):
1716 narrowspec.save(self, newincludes, newexcludes)
1722 narrowspec.save(self, newincludes, newexcludes)
1717 self.invalidate(clearfilecache=True)
1723 self.invalidate(clearfilecache=True)
1718
1724
1719 @unfilteredpropertycache
1725 @unfilteredpropertycache
1720 def _quick_access_changeid_null(self):
1726 def _quick_access_changeid_null(self):
1721 return {
1727 return {
1722 b'null': (nullrev, nullid),
1728 b'null': (nullrev, nullid),
1723 nullrev: (nullrev, nullid),
1729 nullrev: (nullrev, nullid),
1724 nullid: (nullrev, nullid),
1730 nullid: (nullrev, nullid),
1725 }
1731 }
1726
1732
1727 @unfilteredpropertycache
1733 @unfilteredpropertycache
1728 def _quick_access_changeid_wc(self):
1734 def _quick_access_changeid_wc(self):
1729 # also fast path access to the working copy parents
1735 # also fast path access to the working copy parents
1730 # however, only do it for filter that ensure wc is visible.
1736 # however, only do it for filter that ensure wc is visible.
1731 quick = self._quick_access_changeid_null.copy()
1737 quick = self._quick_access_changeid_null.copy()
1732 cl = self.unfiltered().changelog
1738 cl = self.unfiltered().changelog
1733 for node in self.dirstate.parents():
1739 for node in self.dirstate.parents():
1734 if node == nullid:
1740 if node == nullid:
1735 continue
1741 continue
1736 rev = cl.index.get_rev(node)
1742 rev = cl.index.get_rev(node)
1737 if rev is None:
1743 if rev is None:
1738 # unknown working copy parent case:
1744 # unknown working copy parent case:
1739 #
1745 #
1740 # skip the fast path and let higher code deal with it
1746 # skip the fast path and let higher code deal with it
1741 continue
1747 continue
1742 pair = (rev, node)
1748 pair = (rev, node)
1743 quick[rev] = pair
1749 quick[rev] = pair
1744 quick[node] = pair
1750 quick[node] = pair
1745 # also add the parents of the parents
1751 # also add the parents of the parents
1746 for r in cl.parentrevs(rev):
1752 for r in cl.parentrevs(rev):
1747 if r == nullrev:
1753 if r == nullrev:
1748 continue
1754 continue
1749 n = cl.node(r)
1755 n = cl.node(r)
1750 pair = (r, n)
1756 pair = (r, n)
1751 quick[r] = pair
1757 quick[r] = pair
1752 quick[n] = pair
1758 quick[n] = pair
1753 p1node = self.dirstate.p1()
1759 p1node = self.dirstate.p1()
1754 if p1node != nullid:
1760 if p1node != nullid:
1755 quick[b'.'] = quick[p1node]
1761 quick[b'.'] = quick[p1node]
1756 return quick
1762 return quick
1757
1763
1758 @unfilteredmethod
1764 @unfilteredmethod
1759 def _quick_access_changeid_invalidate(self):
1765 def _quick_access_changeid_invalidate(self):
1760 if '_quick_access_changeid_wc' in vars(self):
1766 if '_quick_access_changeid_wc' in vars(self):
1761 del self.__dict__['_quick_access_changeid_wc']
1767 del self.__dict__['_quick_access_changeid_wc']
1762
1768
1763 @property
1769 @property
1764 def _quick_access_changeid(self):
1770 def _quick_access_changeid(self):
1765 """an helper dictionnary for __getitem__ calls
1771 """an helper dictionnary for __getitem__ calls
1766
1772
1767 This contains a list of symbol we can recognise right away without
1773 This contains a list of symbol we can recognise right away without
1768 further processing.
1774 further processing.
1769 """
1775 """
1770 if self.filtername in repoview.filter_has_wc:
1776 if self.filtername in repoview.filter_has_wc:
1771 return self._quick_access_changeid_wc
1777 return self._quick_access_changeid_wc
1772 return self._quick_access_changeid_null
1778 return self._quick_access_changeid_null
1773
1779
1774 def __getitem__(self, changeid):
1780 def __getitem__(self, changeid):
1775 # dealing with special cases
1781 # dealing with special cases
1776 if changeid is None:
1782 if changeid is None:
1777 return context.workingctx(self)
1783 return context.workingctx(self)
1778 if isinstance(changeid, context.basectx):
1784 if isinstance(changeid, context.basectx):
1779 return changeid
1785 return changeid
1780
1786
1781 # dealing with multiple revisions
1787 # dealing with multiple revisions
1782 if isinstance(changeid, slice):
1788 if isinstance(changeid, slice):
1783 # wdirrev isn't contiguous so the slice shouldn't include it
1789 # wdirrev isn't contiguous so the slice shouldn't include it
1784 return [
1790 return [
1785 self[i]
1791 self[i]
1786 for i in pycompat.xrange(*changeid.indices(len(self)))
1792 for i in pycompat.xrange(*changeid.indices(len(self)))
1787 if i not in self.changelog.filteredrevs
1793 if i not in self.changelog.filteredrevs
1788 ]
1794 ]
1789
1795
1790 # dealing with some special values
1796 # dealing with some special values
1791 quick_access = self._quick_access_changeid.get(changeid)
1797 quick_access = self._quick_access_changeid.get(changeid)
1792 if quick_access is not None:
1798 if quick_access is not None:
1793 rev, node = quick_access
1799 rev, node = quick_access
1794 return context.changectx(self, rev, node, maybe_filtered=False)
1800 return context.changectx(self, rev, node, maybe_filtered=False)
1795 if changeid == b'tip':
1801 if changeid == b'tip':
1796 node = self.changelog.tip()
1802 node = self.changelog.tip()
1797 rev = self.changelog.rev(node)
1803 rev = self.changelog.rev(node)
1798 return context.changectx(self, rev, node)
1804 return context.changectx(self, rev, node)
1799
1805
1800 # dealing with arbitrary values
1806 # dealing with arbitrary values
1801 try:
1807 try:
1802 if isinstance(changeid, int):
1808 if isinstance(changeid, int):
1803 node = self.changelog.node(changeid)
1809 node = self.changelog.node(changeid)
1804 rev = changeid
1810 rev = changeid
1805 elif changeid == b'.':
1811 elif changeid == b'.':
1806 # this is a hack to delay/avoid loading obsmarkers
1812 # this is a hack to delay/avoid loading obsmarkers
1807 # when we know that '.' won't be hidden
1813 # when we know that '.' won't be hidden
1808 node = self.dirstate.p1()
1814 node = self.dirstate.p1()
1809 rev = self.unfiltered().changelog.rev(node)
1815 rev = self.unfiltered().changelog.rev(node)
1810 elif len(changeid) == 20:
1816 elif len(changeid) == 20:
1811 try:
1817 try:
1812 node = changeid
1818 node = changeid
1813 rev = self.changelog.rev(changeid)
1819 rev = self.changelog.rev(changeid)
1814 except error.FilteredLookupError:
1820 except error.FilteredLookupError:
1815 changeid = hex(changeid) # for the error message
1821 changeid = hex(changeid) # for the error message
1816 raise
1822 raise
1817 except LookupError:
1823 except LookupError:
1818 # check if it might have come from damaged dirstate
1824 # check if it might have come from damaged dirstate
1819 #
1825 #
1820 # XXX we could avoid the unfiltered if we had a recognizable
1826 # XXX we could avoid the unfiltered if we had a recognizable
1821 # exception for filtered changeset access
1827 # exception for filtered changeset access
1822 if (
1828 if (
1823 self.local()
1829 self.local()
1824 and changeid in self.unfiltered().dirstate.parents()
1830 and changeid in self.unfiltered().dirstate.parents()
1825 ):
1831 ):
1826 msg = _(b"working directory has unknown parent '%s'!")
1832 msg = _(b"working directory has unknown parent '%s'!")
1827 raise error.Abort(msg % short(changeid))
1833 raise error.Abort(msg % short(changeid))
1828 changeid = hex(changeid) # for the error message
1834 changeid = hex(changeid) # for the error message
1829 raise
1835 raise
1830
1836
1831 elif len(changeid) == 40:
1837 elif len(changeid) == 40:
1832 node = bin(changeid)
1838 node = bin(changeid)
1833 rev = self.changelog.rev(node)
1839 rev = self.changelog.rev(node)
1834 else:
1840 else:
1835 raise error.ProgrammingError(
1841 raise error.ProgrammingError(
1836 b"unsupported changeid '%s' of type %s"
1842 b"unsupported changeid '%s' of type %s"
1837 % (changeid, pycompat.bytestr(type(changeid)))
1843 % (changeid, pycompat.bytestr(type(changeid)))
1838 )
1844 )
1839
1845
1840 return context.changectx(self, rev, node)
1846 return context.changectx(self, rev, node)
1841
1847
1842 except (error.FilteredIndexError, error.FilteredLookupError):
1848 except (error.FilteredIndexError, error.FilteredLookupError):
1843 raise error.FilteredRepoLookupError(
1849 raise error.FilteredRepoLookupError(
1844 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1850 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1845 )
1851 )
1846 except (IndexError, LookupError):
1852 except (IndexError, LookupError):
1847 raise error.RepoLookupError(
1853 raise error.RepoLookupError(
1848 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1854 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1849 )
1855 )
1850 except error.WdirUnsupported:
1856 except error.WdirUnsupported:
1851 return context.workingctx(self)
1857 return context.workingctx(self)
1852
1858
1853 def __contains__(self, changeid):
1859 def __contains__(self, changeid):
1854 """True if the given changeid exists"""
1860 """True if the given changeid exists"""
1855 try:
1861 try:
1856 self[changeid]
1862 self[changeid]
1857 return True
1863 return True
1858 except error.RepoLookupError:
1864 except error.RepoLookupError:
1859 return False
1865 return False
1860
1866
1861 def __nonzero__(self):
1867 def __nonzero__(self):
1862 return True
1868 return True
1863
1869
1864 __bool__ = __nonzero__
1870 __bool__ = __nonzero__
1865
1871
1866 def __len__(self):
1872 def __len__(self):
1867 # no need to pay the cost of repoview.changelog
1873 # no need to pay the cost of repoview.changelog
1868 unfi = self.unfiltered()
1874 unfi = self.unfiltered()
1869 return len(unfi.changelog)
1875 return len(unfi.changelog)
1870
1876
1871 def __iter__(self):
1877 def __iter__(self):
1872 return iter(self.changelog)
1878 return iter(self.changelog)
1873
1879
1874 def revs(self, expr, *args):
1880 def revs(self, expr, *args):
1875 """Find revisions matching a revset.
1881 """Find revisions matching a revset.
1876
1882
1877 The revset is specified as a string ``expr`` that may contain
1883 The revset is specified as a string ``expr`` that may contain
1878 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1884 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1879
1885
1880 Revset aliases from the configuration are not expanded. To expand
1886 Revset aliases from the configuration are not expanded. To expand
1881 user aliases, consider calling ``scmutil.revrange()`` or
1887 user aliases, consider calling ``scmutil.revrange()`` or
1882 ``repo.anyrevs([expr], user=True)``.
1888 ``repo.anyrevs([expr], user=True)``.
1883
1889
1884 Returns a smartset.abstractsmartset, which is a list-like interface
1890 Returns a smartset.abstractsmartset, which is a list-like interface
1885 that contains integer revisions.
1891 that contains integer revisions.
1886 """
1892 """
1887 tree = revsetlang.spectree(expr, *args)
1893 tree = revsetlang.spectree(expr, *args)
1888 return revset.makematcher(tree)(self)
1894 return revset.makematcher(tree)(self)
1889
1895
1890 def set(self, expr, *args):
1896 def set(self, expr, *args):
1891 """Find revisions matching a revset and emit changectx instances.
1897 """Find revisions matching a revset and emit changectx instances.
1892
1898
1893 This is a convenience wrapper around ``revs()`` that iterates the
1899 This is a convenience wrapper around ``revs()`` that iterates the
1894 result and is a generator of changectx instances.
1900 result and is a generator of changectx instances.
1895
1901
1896 Revset aliases from the configuration are not expanded. To expand
1902 Revset aliases from the configuration are not expanded. To expand
1897 user aliases, consider calling ``scmutil.revrange()``.
1903 user aliases, consider calling ``scmutil.revrange()``.
1898 """
1904 """
1899 for r in self.revs(expr, *args):
1905 for r in self.revs(expr, *args):
1900 yield self[r]
1906 yield self[r]
1901
1907
1902 def anyrevs(self, specs, user=False, localalias=None):
1908 def anyrevs(self, specs, user=False, localalias=None):
1903 """Find revisions matching one of the given revsets.
1909 """Find revisions matching one of the given revsets.
1904
1910
1905 Revset aliases from the configuration are not expanded by default. To
1911 Revset aliases from the configuration are not expanded by default. To
1906 expand user aliases, specify ``user=True``. To provide some local
1912 expand user aliases, specify ``user=True``. To provide some local
1907 definitions overriding user aliases, set ``localalias`` to
1913 definitions overriding user aliases, set ``localalias`` to
1908 ``{name: definitionstring}``.
1914 ``{name: definitionstring}``.
1909 """
1915 """
1910 if specs == [b'null']:
1916 if specs == [b'null']:
1911 return revset.baseset([nullrev])
1917 return revset.baseset([nullrev])
1912 if specs == [b'.']:
1918 if specs == [b'.']:
1913 quick_data = self._quick_access_changeid.get(b'.')
1919 quick_data = self._quick_access_changeid.get(b'.')
1914 if quick_data is not None:
1920 if quick_data is not None:
1915 return revset.baseset([quick_data[0]])
1921 return revset.baseset([quick_data[0]])
1916 if user:
1922 if user:
1917 m = revset.matchany(
1923 m = revset.matchany(
1918 self.ui,
1924 self.ui,
1919 specs,
1925 specs,
1920 lookup=revset.lookupfn(self),
1926 lookup=revset.lookupfn(self),
1921 localalias=localalias,
1927 localalias=localalias,
1922 )
1928 )
1923 else:
1929 else:
1924 m = revset.matchany(None, specs, localalias=localalias)
1930 m = revset.matchany(None, specs, localalias=localalias)
1925 return m(self)
1931 return m(self)
1926
1932
1927 def url(self):
1933 def url(self):
1928 return b'file:' + self.root
1934 return b'file:' + self.root
1929
1935
1930 def hook(self, name, throw=False, **args):
1936 def hook(self, name, throw=False, **args):
1931 """Call a hook, passing this repo instance.
1937 """Call a hook, passing this repo instance.
1932
1938
1933 This a convenience method to aid invoking hooks. Extensions likely
1939 This a convenience method to aid invoking hooks. Extensions likely
1934 won't call this unless they have registered a custom hook or are
1940 won't call this unless they have registered a custom hook or are
1935 replacing code that is expected to call a hook.
1941 replacing code that is expected to call a hook.
1936 """
1942 """
1937 return hook.hook(self.ui, self, name, throw, **args)
1943 return hook.hook(self.ui, self, name, throw, **args)
1938
1944
1939 @filteredpropertycache
1945 @filteredpropertycache
1940 def _tagscache(self):
1946 def _tagscache(self):
1941 """Returns a tagscache object that contains various tags related
1947 """Returns a tagscache object that contains various tags related
1942 caches."""
1948 caches."""
1943
1949
1944 # This simplifies its cache management by having one decorated
1950 # This simplifies its cache management by having one decorated
1945 # function (this one) and the rest simply fetch things from it.
1951 # function (this one) and the rest simply fetch things from it.
1946 class tagscache(object):
1952 class tagscache(object):
1947 def __init__(self):
1953 def __init__(self):
1948 # These two define the set of tags for this repository. tags
1954 # These two define the set of tags for this repository. tags
1949 # maps tag name to node; tagtypes maps tag name to 'global' or
1955 # maps tag name to node; tagtypes maps tag name to 'global' or
1950 # 'local'. (Global tags are defined by .hgtags across all
1956 # 'local'. (Global tags are defined by .hgtags across all
1951 # heads, and local tags are defined in .hg/localtags.)
1957 # heads, and local tags are defined in .hg/localtags.)
1952 # They constitute the in-memory cache of tags.
1958 # They constitute the in-memory cache of tags.
1953 self.tags = self.tagtypes = None
1959 self.tags = self.tagtypes = None
1954
1960
1955 self.nodetagscache = self.tagslist = None
1961 self.nodetagscache = self.tagslist = None
1956
1962
1957 cache = tagscache()
1963 cache = tagscache()
1958 cache.tags, cache.tagtypes = self._findtags()
1964 cache.tags, cache.tagtypes = self._findtags()
1959
1965
1960 return cache
1966 return cache
1961
1967
1962 def tags(self):
1968 def tags(self):
1963 '''return a mapping of tag to node'''
1969 '''return a mapping of tag to node'''
1964 t = {}
1970 t = {}
1965 if self.changelog.filteredrevs:
1971 if self.changelog.filteredrevs:
1966 tags, tt = self._findtags()
1972 tags, tt = self._findtags()
1967 else:
1973 else:
1968 tags = self._tagscache.tags
1974 tags = self._tagscache.tags
1969 rev = self.changelog.rev
1975 rev = self.changelog.rev
1970 for k, v in pycompat.iteritems(tags):
1976 for k, v in pycompat.iteritems(tags):
1971 try:
1977 try:
1972 # ignore tags to unknown nodes
1978 # ignore tags to unknown nodes
1973 rev(v)
1979 rev(v)
1974 t[k] = v
1980 t[k] = v
1975 except (error.LookupError, ValueError):
1981 except (error.LookupError, ValueError):
1976 pass
1982 pass
1977 return t
1983 return t
1978
1984
1979 def _findtags(self):
1985 def _findtags(self):
1980 """Do the hard work of finding tags. Return a pair of dicts
1986 """Do the hard work of finding tags. Return a pair of dicts
1981 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1987 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1982 maps tag name to a string like \'global\' or \'local\'.
1988 maps tag name to a string like \'global\' or \'local\'.
1983 Subclasses or extensions are free to add their own tags, but
1989 Subclasses or extensions are free to add their own tags, but
1984 should be aware that the returned dicts will be retained for the
1990 should be aware that the returned dicts will be retained for the
1985 duration of the localrepo object."""
1991 duration of the localrepo object."""
1986
1992
1987 # XXX what tagtype should subclasses/extensions use? Currently
1993 # XXX what tagtype should subclasses/extensions use? Currently
1988 # mq and bookmarks add tags, but do not set the tagtype at all.
1994 # mq and bookmarks add tags, but do not set the tagtype at all.
1989 # Should each extension invent its own tag type? Should there
1995 # Should each extension invent its own tag type? Should there
1990 # be one tagtype for all such "virtual" tags? Or is the status
1996 # be one tagtype for all such "virtual" tags? Or is the status
1991 # quo fine?
1997 # quo fine?
1992
1998
1993 # map tag name to (node, hist)
1999 # map tag name to (node, hist)
1994 alltags = tagsmod.findglobaltags(self.ui, self)
2000 alltags = tagsmod.findglobaltags(self.ui, self)
1995 # map tag name to tag type
2001 # map tag name to tag type
1996 tagtypes = {tag: b'global' for tag in alltags}
2002 tagtypes = {tag: b'global' for tag in alltags}
1997
2003
1998 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2004 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1999
2005
2000 # Build the return dicts. Have to re-encode tag names because
2006 # Build the return dicts. Have to re-encode tag names because
2001 # the tags module always uses UTF-8 (in order not to lose info
2007 # the tags module always uses UTF-8 (in order not to lose info
2002 # writing to the cache), but the rest of Mercurial wants them in
2008 # writing to the cache), but the rest of Mercurial wants them in
2003 # local encoding.
2009 # local encoding.
2004 tags = {}
2010 tags = {}
2005 for (name, (node, hist)) in pycompat.iteritems(alltags):
2011 for (name, (node, hist)) in pycompat.iteritems(alltags):
2006 if node != nullid:
2012 if node != nullid:
2007 tags[encoding.tolocal(name)] = node
2013 tags[encoding.tolocal(name)] = node
2008 tags[b'tip'] = self.changelog.tip()
2014 tags[b'tip'] = self.changelog.tip()
2009 tagtypes = {
2015 tagtypes = {
2010 encoding.tolocal(name): value
2016 encoding.tolocal(name): value
2011 for (name, value) in pycompat.iteritems(tagtypes)
2017 for (name, value) in pycompat.iteritems(tagtypes)
2012 }
2018 }
2013 return (tags, tagtypes)
2019 return (tags, tagtypes)
2014
2020
2015 def tagtype(self, tagname):
2021 def tagtype(self, tagname):
2016 """
2022 """
2017 return the type of the given tag. result can be:
2023 return the type of the given tag. result can be:
2018
2024
2019 'local' : a local tag
2025 'local' : a local tag
2020 'global' : a global tag
2026 'global' : a global tag
2021 None : tag does not exist
2027 None : tag does not exist
2022 """
2028 """
2023
2029
2024 return self._tagscache.tagtypes.get(tagname)
2030 return self._tagscache.tagtypes.get(tagname)
2025
2031
2026 def tagslist(self):
2032 def tagslist(self):
2027 '''return a list of tags ordered by revision'''
2033 '''return a list of tags ordered by revision'''
2028 if not self._tagscache.tagslist:
2034 if not self._tagscache.tagslist:
2029 l = []
2035 l = []
2030 for t, n in pycompat.iteritems(self.tags()):
2036 for t, n in pycompat.iteritems(self.tags()):
2031 l.append((self.changelog.rev(n), t, n))
2037 l.append((self.changelog.rev(n), t, n))
2032 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2038 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2033
2039
2034 return self._tagscache.tagslist
2040 return self._tagscache.tagslist
2035
2041
2036 def nodetags(self, node):
2042 def nodetags(self, node):
2037 '''return the tags associated with a node'''
2043 '''return the tags associated with a node'''
2038 if not self._tagscache.nodetagscache:
2044 if not self._tagscache.nodetagscache:
2039 nodetagscache = {}
2045 nodetagscache = {}
2040 for t, n in pycompat.iteritems(self._tagscache.tags):
2046 for t, n in pycompat.iteritems(self._tagscache.tags):
2041 nodetagscache.setdefault(n, []).append(t)
2047 nodetagscache.setdefault(n, []).append(t)
2042 for tags in pycompat.itervalues(nodetagscache):
2048 for tags in pycompat.itervalues(nodetagscache):
2043 tags.sort()
2049 tags.sort()
2044 self._tagscache.nodetagscache = nodetagscache
2050 self._tagscache.nodetagscache = nodetagscache
2045 return self._tagscache.nodetagscache.get(node, [])
2051 return self._tagscache.nodetagscache.get(node, [])
2046
2052
2047 def nodebookmarks(self, node):
2053 def nodebookmarks(self, node):
2048 """return the list of bookmarks pointing to the specified node"""
2054 """return the list of bookmarks pointing to the specified node"""
2049 return self._bookmarks.names(node)
2055 return self._bookmarks.names(node)
2050
2056
2051 def branchmap(self):
2057 def branchmap(self):
2052 """returns a dictionary {branch: [branchheads]} with branchheads
2058 """returns a dictionary {branch: [branchheads]} with branchheads
2053 ordered by increasing revision number"""
2059 ordered by increasing revision number"""
2054 return self._branchcaches[self]
2060 return self._branchcaches[self]
2055
2061
2056 @unfilteredmethod
2062 @unfilteredmethod
2057 def revbranchcache(self):
2063 def revbranchcache(self):
2058 if not self._revbranchcache:
2064 if not self._revbranchcache:
2059 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2065 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2060 return self._revbranchcache
2066 return self._revbranchcache
2061
2067
2062 def register_changeset(self, rev, changelogrevision):
2068 def register_changeset(self, rev, changelogrevision):
2063 self.revbranchcache().setdata(rev, changelogrevision)
2069 self.revbranchcache().setdata(rev, changelogrevision)
2064
2070
2065 def branchtip(self, branch, ignoremissing=False):
2071 def branchtip(self, branch, ignoremissing=False):
2066 """return the tip node for a given branch
2072 """return the tip node for a given branch
2067
2073
2068 If ignoremissing is True, then this method will not raise an error.
2074 If ignoremissing is True, then this method will not raise an error.
2069 This is helpful for callers that only expect None for a missing branch
2075 This is helpful for callers that only expect None for a missing branch
2070 (e.g. namespace).
2076 (e.g. namespace).
2071
2077
2072 """
2078 """
2073 try:
2079 try:
2074 return self.branchmap().branchtip(branch)
2080 return self.branchmap().branchtip(branch)
2075 except KeyError:
2081 except KeyError:
2076 if not ignoremissing:
2082 if not ignoremissing:
2077 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2083 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2078 else:
2084 else:
2079 pass
2085 pass
2080
2086
2081 def lookup(self, key):
2087 def lookup(self, key):
2082 node = scmutil.revsymbol(self, key).node()
2088 node = scmutil.revsymbol(self, key).node()
2083 if node is None:
2089 if node is None:
2084 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2090 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2085 return node
2091 return node
2086
2092
2087 def lookupbranch(self, key):
2093 def lookupbranch(self, key):
2088 if self.branchmap().hasbranch(key):
2094 if self.branchmap().hasbranch(key):
2089 return key
2095 return key
2090
2096
2091 return scmutil.revsymbol(self, key).branch()
2097 return scmutil.revsymbol(self, key).branch()
2092
2098
2093 def known(self, nodes):
2099 def known(self, nodes):
2094 cl = self.changelog
2100 cl = self.changelog
2095 get_rev = cl.index.get_rev
2101 get_rev = cl.index.get_rev
2096 filtered = cl.filteredrevs
2102 filtered = cl.filteredrevs
2097 result = []
2103 result = []
2098 for n in nodes:
2104 for n in nodes:
2099 r = get_rev(n)
2105 r = get_rev(n)
2100 resp = not (r is None or r in filtered)
2106 resp = not (r is None or r in filtered)
2101 result.append(resp)
2107 result.append(resp)
2102 return result
2108 return result
2103
2109
2104 def local(self):
2110 def local(self):
2105 return self
2111 return self
2106
2112
2107 def publishing(self):
2113 def publishing(self):
2108 # it's safe (and desirable) to trust the publish flag unconditionally
2114 # it's safe (and desirable) to trust the publish flag unconditionally
2109 # so that we don't finalize changes shared between users via ssh or nfs
2115 # so that we don't finalize changes shared between users via ssh or nfs
2110 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2116 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2111
2117
2112 def cancopy(self):
2118 def cancopy(self):
2113 # so statichttprepo's override of local() works
2119 # so statichttprepo's override of local() works
2114 if not self.local():
2120 if not self.local():
2115 return False
2121 return False
2116 if not self.publishing():
2122 if not self.publishing():
2117 return True
2123 return True
2118 # if publishing we can't copy if there is filtered content
2124 # if publishing we can't copy if there is filtered content
2119 return not self.filtered(b'visible').changelog.filteredrevs
2125 return not self.filtered(b'visible').changelog.filteredrevs
2120
2126
2121 def shared(self):
2127 def shared(self):
2122 '''the type of shared repository (None if not shared)'''
2128 '''the type of shared repository (None if not shared)'''
2123 if self.sharedpath != self.path:
2129 if self.sharedpath != self.path:
2124 return b'store'
2130 return b'store'
2125 return None
2131 return None
2126
2132
2127 def wjoin(self, f, *insidef):
2133 def wjoin(self, f, *insidef):
2128 return self.vfs.reljoin(self.root, f, *insidef)
2134 return self.vfs.reljoin(self.root, f, *insidef)
2129
2135
2130 def setparents(self, p1, p2=nullid):
2136 def setparents(self, p1, p2=nullid):
2131 self[None].setparents(p1, p2)
2137 self[None].setparents(p1, p2)
2132 self._quick_access_changeid_invalidate()
2138 self._quick_access_changeid_invalidate()
2133
2139
2134 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2140 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2135 """changeid must be a changeset revision, if specified.
2141 """changeid must be a changeset revision, if specified.
2136 fileid can be a file revision or node."""
2142 fileid can be a file revision or node."""
2137 return context.filectx(
2143 return context.filectx(
2138 self, path, changeid, fileid, changectx=changectx
2144 self, path, changeid, fileid, changectx=changectx
2139 )
2145 )
2140
2146
2141 def getcwd(self):
2147 def getcwd(self):
2142 return self.dirstate.getcwd()
2148 return self.dirstate.getcwd()
2143
2149
2144 def pathto(self, f, cwd=None):
2150 def pathto(self, f, cwd=None):
2145 return self.dirstate.pathto(f, cwd)
2151 return self.dirstate.pathto(f, cwd)
2146
2152
2147 def _loadfilter(self, filter):
2153 def _loadfilter(self, filter):
2148 if filter not in self._filterpats:
2154 if filter not in self._filterpats:
2149 l = []
2155 l = []
2150 for pat, cmd in self.ui.configitems(filter):
2156 for pat, cmd in self.ui.configitems(filter):
2151 if cmd == b'!':
2157 if cmd == b'!':
2152 continue
2158 continue
2153 mf = matchmod.match(self.root, b'', [pat])
2159 mf = matchmod.match(self.root, b'', [pat])
2154 fn = None
2160 fn = None
2155 params = cmd
2161 params = cmd
2156 for name, filterfn in pycompat.iteritems(self._datafilters):
2162 for name, filterfn in pycompat.iteritems(self._datafilters):
2157 if cmd.startswith(name):
2163 if cmd.startswith(name):
2158 fn = filterfn
2164 fn = filterfn
2159 params = cmd[len(name) :].lstrip()
2165 params = cmd[len(name) :].lstrip()
2160 break
2166 break
2161 if not fn:
2167 if not fn:
2162 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2168 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2163 fn.__name__ = 'commandfilter'
2169 fn.__name__ = 'commandfilter'
2164 # Wrap old filters not supporting keyword arguments
2170 # Wrap old filters not supporting keyword arguments
2165 if not pycompat.getargspec(fn)[2]:
2171 if not pycompat.getargspec(fn)[2]:
2166 oldfn = fn
2172 oldfn = fn
2167 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2173 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2168 fn.__name__ = 'compat-' + oldfn.__name__
2174 fn.__name__ = 'compat-' + oldfn.__name__
2169 l.append((mf, fn, params))
2175 l.append((mf, fn, params))
2170 self._filterpats[filter] = l
2176 self._filterpats[filter] = l
2171 return self._filterpats[filter]
2177 return self._filterpats[filter]
2172
2178
2173 def _filter(self, filterpats, filename, data):
2179 def _filter(self, filterpats, filename, data):
2174 for mf, fn, cmd in filterpats:
2180 for mf, fn, cmd in filterpats:
2175 if mf(filename):
2181 if mf(filename):
2176 self.ui.debug(
2182 self.ui.debug(
2177 b"filtering %s through %s\n"
2183 b"filtering %s through %s\n"
2178 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2184 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2179 )
2185 )
2180 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2186 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2181 break
2187 break
2182
2188
2183 return data
2189 return data
2184
2190
2185 @unfilteredpropertycache
2191 @unfilteredpropertycache
2186 def _encodefilterpats(self):
2192 def _encodefilterpats(self):
2187 return self._loadfilter(b'encode')
2193 return self._loadfilter(b'encode')
2188
2194
2189 @unfilteredpropertycache
2195 @unfilteredpropertycache
2190 def _decodefilterpats(self):
2196 def _decodefilterpats(self):
2191 return self._loadfilter(b'decode')
2197 return self._loadfilter(b'decode')
2192
2198
2193 def adddatafilter(self, name, filter):
2199 def adddatafilter(self, name, filter):
2194 self._datafilters[name] = filter
2200 self._datafilters[name] = filter
2195
2201
2196 def wread(self, filename):
2202 def wread(self, filename):
2197 if self.wvfs.islink(filename):
2203 if self.wvfs.islink(filename):
2198 data = self.wvfs.readlink(filename)
2204 data = self.wvfs.readlink(filename)
2199 else:
2205 else:
2200 data = self.wvfs.read(filename)
2206 data = self.wvfs.read(filename)
2201 return self._filter(self._encodefilterpats, filename, data)
2207 return self._filter(self._encodefilterpats, filename, data)
2202
2208
2203 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2209 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2204 """write ``data`` into ``filename`` in the working directory
2210 """write ``data`` into ``filename`` in the working directory
2205
2211
2206 This returns length of written (maybe decoded) data.
2212 This returns length of written (maybe decoded) data.
2207 """
2213 """
2208 data = self._filter(self._decodefilterpats, filename, data)
2214 data = self._filter(self._decodefilterpats, filename, data)
2209 if b'l' in flags:
2215 if b'l' in flags:
2210 self.wvfs.symlink(data, filename)
2216 self.wvfs.symlink(data, filename)
2211 else:
2217 else:
2212 self.wvfs.write(
2218 self.wvfs.write(
2213 filename, data, backgroundclose=backgroundclose, **kwargs
2219 filename, data, backgroundclose=backgroundclose, **kwargs
2214 )
2220 )
2215 if b'x' in flags:
2221 if b'x' in flags:
2216 self.wvfs.setflags(filename, False, True)
2222 self.wvfs.setflags(filename, False, True)
2217 else:
2223 else:
2218 self.wvfs.setflags(filename, False, False)
2224 self.wvfs.setflags(filename, False, False)
2219 return len(data)
2225 return len(data)
2220
2226
2221 def wwritedata(self, filename, data):
2227 def wwritedata(self, filename, data):
2222 return self._filter(self._decodefilterpats, filename, data)
2228 return self._filter(self._decodefilterpats, filename, data)
2223
2229
2224 def currenttransaction(self):
2230 def currenttransaction(self):
2225 """return the current transaction or None if non exists"""
2231 """return the current transaction or None if non exists"""
2226 if self._transref:
2232 if self._transref:
2227 tr = self._transref()
2233 tr = self._transref()
2228 else:
2234 else:
2229 tr = None
2235 tr = None
2230
2236
2231 if tr and tr.running():
2237 if tr and tr.running():
2232 return tr
2238 return tr
2233 return None
2239 return None
2234
2240
2235 def transaction(self, desc, report=None):
2241 def transaction(self, desc, report=None):
2236 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2242 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2237 b'devel', b'check-locks'
2243 b'devel', b'check-locks'
2238 ):
2244 ):
2239 if self._currentlock(self._lockref) is None:
2245 if self._currentlock(self._lockref) is None:
2240 raise error.ProgrammingError(b'transaction requires locking')
2246 raise error.ProgrammingError(b'transaction requires locking')
2241 tr = self.currenttransaction()
2247 tr = self.currenttransaction()
2242 if tr is not None:
2248 if tr is not None:
2243 return tr.nest(name=desc)
2249 return tr.nest(name=desc)
2244
2250
2245 # abort here if the journal already exists
2251 # abort here if the journal already exists
2246 if self.svfs.exists(b"journal"):
2252 if self.svfs.exists(b"journal"):
2247 raise error.RepoError(
2253 raise error.RepoError(
2248 _(b"abandoned transaction found"),
2254 _(b"abandoned transaction found"),
2249 hint=_(b"run 'hg recover' to clean up transaction"),
2255 hint=_(b"run 'hg recover' to clean up transaction"),
2250 )
2256 )
2251
2257
2252 idbase = b"%.40f#%f" % (random.random(), time.time())
2258 idbase = b"%.40f#%f" % (random.random(), time.time())
2253 ha = hex(hashutil.sha1(idbase).digest())
2259 ha = hex(hashutil.sha1(idbase).digest())
2254 txnid = b'TXN:' + ha
2260 txnid = b'TXN:' + ha
2255 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2261 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2256
2262
2257 self._writejournal(desc)
2263 self._writejournal(desc)
2258 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2264 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2259 if report:
2265 if report:
2260 rp = report
2266 rp = report
2261 else:
2267 else:
2262 rp = self.ui.warn
2268 rp = self.ui.warn
2263 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2269 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2264 # we must avoid cyclic reference between repo and transaction.
2270 # we must avoid cyclic reference between repo and transaction.
2265 reporef = weakref.ref(self)
2271 reporef = weakref.ref(self)
2266 # Code to track tag movement
2272 # Code to track tag movement
2267 #
2273 #
2268 # Since tags are all handled as file content, it is actually quite hard
2274 # Since tags are all handled as file content, it is actually quite hard
2269 # to track these movement from a code perspective. So we fallback to a
2275 # to track these movement from a code perspective. So we fallback to a
2270 # tracking at the repository level. One could envision to track changes
2276 # tracking at the repository level. One could envision to track changes
2271 # to the '.hgtags' file through changegroup apply but that fails to
2277 # to the '.hgtags' file through changegroup apply but that fails to
2272 # cope with case where transaction expose new heads without changegroup
2278 # cope with case where transaction expose new heads without changegroup
2273 # being involved (eg: phase movement).
2279 # being involved (eg: phase movement).
2274 #
2280 #
2275 # For now, We gate the feature behind a flag since this likely comes
2281 # For now, We gate the feature behind a flag since this likely comes
2276 # with performance impacts. The current code run more often than needed
2282 # with performance impacts. The current code run more often than needed
2277 # and do not use caches as much as it could. The current focus is on
2283 # and do not use caches as much as it could. The current focus is on
2278 # the behavior of the feature so we disable it by default. The flag
2284 # the behavior of the feature so we disable it by default. The flag
2279 # will be removed when we are happy with the performance impact.
2285 # will be removed when we are happy with the performance impact.
2280 #
2286 #
2281 # Once this feature is no longer experimental move the following
2287 # Once this feature is no longer experimental move the following
2282 # documentation to the appropriate help section:
2288 # documentation to the appropriate help section:
2283 #
2289 #
2284 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2290 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2285 # tags (new or changed or deleted tags). In addition the details of
2291 # tags (new or changed or deleted tags). In addition the details of
2286 # these changes are made available in a file at:
2292 # these changes are made available in a file at:
2287 # ``REPOROOT/.hg/changes/tags.changes``.
2293 # ``REPOROOT/.hg/changes/tags.changes``.
2288 # Make sure you check for HG_TAG_MOVED before reading that file as it
2294 # Make sure you check for HG_TAG_MOVED before reading that file as it
2289 # might exist from a previous transaction even if no tag were touched
2295 # might exist from a previous transaction even if no tag were touched
2290 # in this one. Changes are recorded in a line base format::
2296 # in this one. Changes are recorded in a line base format::
2291 #
2297 #
2292 # <action> <hex-node> <tag-name>\n
2298 # <action> <hex-node> <tag-name>\n
2293 #
2299 #
2294 # Actions are defined as follow:
2300 # Actions are defined as follow:
2295 # "-R": tag is removed,
2301 # "-R": tag is removed,
2296 # "+A": tag is added,
2302 # "+A": tag is added,
2297 # "-M": tag is moved (old value),
2303 # "-M": tag is moved (old value),
2298 # "+M": tag is moved (new value),
2304 # "+M": tag is moved (new value),
2299 tracktags = lambda x: None
2305 tracktags = lambda x: None
2300 # experimental config: experimental.hook-track-tags
2306 # experimental config: experimental.hook-track-tags
2301 shouldtracktags = self.ui.configbool(
2307 shouldtracktags = self.ui.configbool(
2302 b'experimental', b'hook-track-tags'
2308 b'experimental', b'hook-track-tags'
2303 )
2309 )
2304 if desc != b'strip' and shouldtracktags:
2310 if desc != b'strip' and shouldtracktags:
2305 oldheads = self.changelog.headrevs()
2311 oldheads = self.changelog.headrevs()
2306
2312
2307 def tracktags(tr2):
2313 def tracktags(tr2):
2308 repo = reporef()
2314 repo = reporef()
2309 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2315 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2310 newheads = repo.changelog.headrevs()
2316 newheads = repo.changelog.headrevs()
2311 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2317 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2312 # notes: we compare lists here.
2318 # notes: we compare lists here.
2313 # As we do it only once buiding set would not be cheaper
2319 # As we do it only once buiding set would not be cheaper
2314 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2320 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2315 if changes:
2321 if changes:
2316 tr2.hookargs[b'tag_moved'] = b'1'
2322 tr2.hookargs[b'tag_moved'] = b'1'
2317 with repo.vfs(
2323 with repo.vfs(
2318 b'changes/tags.changes', b'w', atomictemp=True
2324 b'changes/tags.changes', b'w', atomictemp=True
2319 ) as changesfile:
2325 ) as changesfile:
2320 # note: we do not register the file to the transaction
2326 # note: we do not register the file to the transaction
2321 # because we needs it to still exist on the transaction
2327 # because we needs it to still exist on the transaction
2322 # is close (for txnclose hooks)
2328 # is close (for txnclose hooks)
2323 tagsmod.writediff(changesfile, changes)
2329 tagsmod.writediff(changesfile, changes)
2324
2330
2325 def validate(tr2):
2331 def validate(tr2):
2326 """will run pre-closing hooks"""
2332 """will run pre-closing hooks"""
2327 # XXX the transaction API is a bit lacking here so we take a hacky
2333 # XXX the transaction API is a bit lacking here so we take a hacky
2328 # path for now
2334 # path for now
2329 #
2335 #
2330 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2336 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2331 # dict is copied before these run. In addition we needs the data
2337 # dict is copied before these run. In addition we needs the data
2332 # available to in memory hooks too.
2338 # available to in memory hooks too.
2333 #
2339 #
2334 # Moreover, we also need to make sure this runs before txnclose
2340 # Moreover, we also need to make sure this runs before txnclose
2335 # hooks and there is no "pending" mechanism that would execute
2341 # hooks and there is no "pending" mechanism that would execute
2336 # logic only if hooks are about to run.
2342 # logic only if hooks are about to run.
2337 #
2343 #
2338 # Fixing this limitation of the transaction is also needed to track
2344 # Fixing this limitation of the transaction is also needed to track
2339 # other families of changes (bookmarks, phases, obsolescence).
2345 # other families of changes (bookmarks, phases, obsolescence).
2340 #
2346 #
2341 # This will have to be fixed before we remove the experimental
2347 # This will have to be fixed before we remove the experimental
2342 # gating.
2348 # gating.
2343 tracktags(tr2)
2349 tracktags(tr2)
2344 repo = reporef()
2350 repo = reporef()
2345
2351
2346 singleheadopt = (b'experimental', b'single-head-per-branch')
2352 singleheadopt = (b'experimental', b'single-head-per-branch')
2347 singlehead = repo.ui.configbool(*singleheadopt)
2353 singlehead = repo.ui.configbool(*singleheadopt)
2348 if singlehead:
2354 if singlehead:
2349 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2355 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2350 accountclosed = singleheadsub.get(
2356 accountclosed = singleheadsub.get(
2351 b"account-closed-heads", False
2357 b"account-closed-heads", False
2352 )
2358 )
2353 if singleheadsub.get(b"public-changes-only", False):
2359 if singleheadsub.get(b"public-changes-only", False):
2354 filtername = b"immutable"
2360 filtername = b"immutable"
2355 else:
2361 else:
2356 filtername = b"visible"
2362 filtername = b"visible"
2357 scmutil.enforcesinglehead(
2363 scmutil.enforcesinglehead(
2358 repo, tr2, desc, accountclosed, filtername
2364 repo, tr2, desc, accountclosed, filtername
2359 )
2365 )
2360 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2366 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2361 for name, (old, new) in sorted(
2367 for name, (old, new) in sorted(
2362 tr.changes[b'bookmarks'].items()
2368 tr.changes[b'bookmarks'].items()
2363 ):
2369 ):
2364 args = tr.hookargs.copy()
2370 args = tr.hookargs.copy()
2365 args.update(bookmarks.preparehookargs(name, old, new))
2371 args.update(bookmarks.preparehookargs(name, old, new))
2366 repo.hook(
2372 repo.hook(
2367 b'pretxnclose-bookmark',
2373 b'pretxnclose-bookmark',
2368 throw=True,
2374 throw=True,
2369 **pycompat.strkwargs(args)
2375 **pycompat.strkwargs(args)
2370 )
2376 )
2371 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2377 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2372 cl = repo.unfiltered().changelog
2378 cl = repo.unfiltered().changelog
2373 for revs, (old, new) in tr.changes[b'phases']:
2379 for revs, (old, new) in tr.changes[b'phases']:
2374 for rev in revs:
2380 for rev in revs:
2375 args = tr.hookargs.copy()
2381 args = tr.hookargs.copy()
2376 node = hex(cl.node(rev))
2382 node = hex(cl.node(rev))
2377 args.update(phases.preparehookargs(node, old, new))
2383 args.update(phases.preparehookargs(node, old, new))
2378 repo.hook(
2384 repo.hook(
2379 b'pretxnclose-phase',
2385 b'pretxnclose-phase',
2380 throw=True,
2386 throw=True,
2381 **pycompat.strkwargs(args)
2387 **pycompat.strkwargs(args)
2382 )
2388 )
2383
2389
2384 repo.hook(
2390 repo.hook(
2385 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2391 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2386 )
2392 )
2387
2393
2388 def releasefn(tr, success):
2394 def releasefn(tr, success):
2389 repo = reporef()
2395 repo = reporef()
2390 if repo is None:
2396 if repo is None:
2391 # If the repo has been GC'd (and this release function is being
2397 # If the repo has been GC'd (and this release function is being
2392 # called from transaction.__del__), there's not much we can do,
2398 # called from transaction.__del__), there's not much we can do,
2393 # so just leave the unfinished transaction there and let the
2399 # so just leave the unfinished transaction there and let the
2394 # user run `hg recover`.
2400 # user run `hg recover`.
2395 return
2401 return
2396 if success:
2402 if success:
2397 # this should be explicitly invoked here, because
2403 # this should be explicitly invoked here, because
2398 # in-memory changes aren't written out at closing
2404 # in-memory changes aren't written out at closing
2399 # transaction, if tr.addfilegenerator (via
2405 # transaction, if tr.addfilegenerator (via
2400 # dirstate.write or so) isn't invoked while
2406 # dirstate.write or so) isn't invoked while
2401 # transaction running
2407 # transaction running
2402 repo.dirstate.write(None)
2408 repo.dirstate.write(None)
2403 else:
2409 else:
2404 # discard all changes (including ones already written
2410 # discard all changes (including ones already written
2405 # out) in this transaction
2411 # out) in this transaction
2406 narrowspec.restorebackup(self, b'journal.narrowspec')
2412 narrowspec.restorebackup(self, b'journal.narrowspec')
2407 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2413 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2408 repo.dirstate.restorebackup(None, b'journal.dirstate')
2414 repo.dirstate.restorebackup(None, b'journal.dirstate')
2409
2415
2410 repo.invalidate(clearfilecache=True)
2416 repo.invalidate(clearfilecache=True)
2411
2417
2412 tr = transaction.transaction(
2418 tr = transaction.transaction(
2413 rp,
2419 rp,
2414 self.svfs,
2420 self.svfs,
2415 vfsmap,
2421 vfsmap,
2416 b"journal",
2422 b"journal",
2417 b"undo",
2423 b"undo",
2418 aftertrans(renames),
2424 aftertrans(renames),
2419 self.store.createmode,
2425 self.store.createmode,
2420 validator=validate,
2426 validator=validate,
2421 releasefn=releasefn,
2427 releasefn=releasefn,
2422 checkambigfiles=_cachedfiles,
2428 checkambigfiles=_cachedfiles,
2423 name=desc,
2429 name=desc,
2424 )
2430 )
2425 tr.changes[b'origrepolen'] = len(self)
2431 tr.changes[b'origrepolen'] = len(self)
2426 tr.changes[b'obsmarkers'] = set()
2432 tr.changes[b'obsmarkers'] = set()
2427 tr.changes[b'phases'] = []
2433 tr.changes[b'phases'] = []
2428 tr.changes[b'bookmarks'] = {}
2434 tr.changes[b'bookmarks'] = {}
2429
2435
2430 tr.hookargs[b'txnid'] = txnid
2436 tr.hookargs[b'txnid'] = txnid
2431 tr.hookargs[b'txnname'] = desc
2437 tr.hookargs[b'txnname'] = desc
2432 tr.hookargs[b'changes'] = tr.changes
2438 tr.hookargs[b'changes'] = tr.changes
2433 # note: writing the fncache only during finalize mean that the file is
2439 # note: writing the fncache only during finalize mean that the file is
2434 # outdated when running hooks. As fncache is used for streaming clone,
2440 # outdated when running hooks. As fncache is used for streaming clone,
2435 # this is not expected to break anything that happen during the hooks.
2441 # this is not expected to break anything that happen during the hooks.
2436 tr.addfinalize(b'flush-fncache', self.store.write)
2442 tr.addfinalize(b'flush-fncache', self.store.write)
2437
2443
2438 def txnclosehook(tr2):
2444 def txnclosehook(tr2):
2439 """To be run if transaction is successful, will schedule a hook run"""
2445 """To be run if transaction is successful, will schedule a hook run"""
2440 # Don't reference tr2 in hook() so we don't hold a reference.
2446 # Don't reference tr2 in hook() so we don't hold a reference.
2441 # This reduces memory consumption when there are multiple
2447 # This reduces memory consumption when there are multiple
2442 # transactions per lock. This can likely go away if issue5045
2448 # transactions per lock. This can likely go away if issue5045
2443 # fixes the function accumulation.
2449 # fixes the function accumulation.
2444 hookargs = tr2.hookargs
2450 hookargs = tr2.hookargs
2445
2451
2446 def hookfunc(unused_success):
2452 def hookfunc(unused_success):
2447 repo = reporef()
2453 repo = reporef()
2448 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2454 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2449 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2455 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2450 for name, (old, new) in bmchanges:
2456 for name, (old, new) in bmchanges:
2451 args = tr.hookargs.copy()
2457 args = tr.hookargs.copy()
2452 args.update(bookmarks.preparehookargs(name, old, new))
2458 args.update(bookmarks.preparehookargs(name, old, new))
2453 repo.hook(
2459 repo.hook(
2454 b'txnclose-bookmark',
2460 b'txnclose-bookmark',
2455 throw=False,
2461 throw=False,
2456 **pycompat.strkwargs(args)
2462 **pycompat.strkwargs(args)
2457 )
2463 )
2458
2464
2459 if hook.hashook(repo.ui, b'txnclose-phase'):
2465 if hook.hashook(repo.ui, b'txnclose-phase'):
2460 cl = repo.unfiltered().changelog
2466 cl = repo.unfiltered().changelog
2461 phasemv = sorted(
2467 phasemv = sorted(
2462 tr.changes[b'phases'], key=lambda r: r[0][0]
2468 tr.changes[b'phases'], key=lambda r: r[0][0]
2463 )
2469 )
2464 for revs, (old, new) in phasemv:
2470 for revs, (old, new) in phasemv:
2465 for rev in revs:
2471 for rev in revs:
2466 args = tr.hookargs.copy()
2472 args = tr.hookargs.copy()
2467 node = hex(cl.node(rev))
2473 node = hex(cl.node(rev))
2468 args.update(phases.preparehookargs(node, old, new))
2474 args.update(phases.preparehookargs(node, old, new))
2469 repo.hook(
2475 repo.hook(
2470 b'txnclose-phase',
2476 b'txnclose-phase',
2471 throw=False,
2477 throw=False,
2472 **pycompat.strkwargs(args)
2478 **pycompat.strkwargs(args)
2473 )
2479 )
2474
2480
2475 repo.hook(
2481 repo.hook(
2476 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2482 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2477 )
2483 )
2478
2484
2479 reporef()._afterlock(hookfunc)
2485 reporef()._afterlock(hookfunc)
2480
2486
2481 tr.addfinalize(b'txnclose-hook', txnclosehook)
2487 tr.addfinalize(b'txnclose-hook', txnclosehook)
2482 # Include a leading "-" to make it happen before the transaction summary
2488 # Include a leading "-" to make it happen before the transaction summary
2483 # reports registered via scmutil.registersummarycallback() whose names
2489 # reports registered via scmutil.registersummarycallback() whose names
2484 # are 00-txnreport etc. That way, the caches will be warm when the
2490 # are 00-txnreport etc. That way, the caches will be warm when the
2485 # callbacks run.
2491 # callbacks run.
2486 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2492 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2487
2493
2488 def txnaborthook(tr2):
2494 def txnaborthook(tr2):
2489 """To be run if transaction is aborted"""
2495 """To be run if transaction is aborted"""
2490 reporef().hook(
2496 reporef().hook(
2491 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2497 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2492 )
2498 )
2493
2499
2494 tr.addabort(b'txnabort-hook', txnaborthook)
2500 tr.addabort(b'txnabort-hook', txnaborthook)
2495 # avoid eager cache invalidation. in-memory data should be identical
2501 # avoid eager cache invalidation. in-memory data should be identical
2496 # to stored data if transaction has no error.
2502 # to stored data if transaction has no error.
2497 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2503 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2498 self._transref = weakref.ref(tr)
2504 self._transref = weakref.ref(tr)
2499 scmutil.registersummarycallback(self, tr, desc)
2505 scmutil.registersummarycallback(self, tr, desc)
2500 return tr
2506 return tr
2501
2507
2502 def _journalfiles(self):
2508 def _journalfiles(self):
2503 return (
2509 return (
2504 (self.svfs, b'journal'),
2510 (self.svfs, b'journal'),
2505 (self.svfs, b'journal.narrowspec'),
2511 (self.svfs, b'journal.narrowspec'),
2506 (self.vfs, b'journal.narrowspec.dirstate'),
2512 (self.vfs, b'journal.narrowspec.dirstate'),
2507 (self.vfs, b'journal.dirstate'),
2513 (self.vfs, b'journal.dirstate'),
2508 (self.vfs, b'journal.branch'),
2514 (self.vfs, b'journal.branch'),
2509 (self.vfs, b'journal.desc'),
2515 (self.vfs, b'journal.desc'),
2510 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2516 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2511 (self.svfs, b'journal.phaseroots'),
2517 (self.svfs, b'journal.phaseroots'),
2512 )
2518 )
2513
2519
2514 def undofiles(self):
2520 def undofiles(self):
2515 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2521 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2516
2522
2517 @unfilteredmethod
2523 @unfilteredmethod
2518 def _writejournal(self, desc):
2524 def _writejournal(self, desc):
2519 self.dirstate.savebackup(None, b'journal.dirstate')
2525 self.dirstate.savebackup(None, b'journal.dirstate')
2520 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2526 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2521 narrowspec.savebackup(self, b'journal.narrowspec')
2527 narrowspec.savebackup(self, b'journal.narrowspec')
2522 self.vfs.write(
2528 self.vfs.write(
2523 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2529 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2524 )
2530 )
2525 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2531 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2526 bookmarksvfs = bookmarks.bookmarksvfs(self)
2532 bookmarksvfs = bookmarks.bookmarksvfs(self)
2527 bookmarksvfs.write(
2533 bookmarksvfs.write(
2528 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2534 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2529 )
2535 )
2530 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2536 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2531
2537
2532 def recover(self):
2538 def recover(self):
2533 with self.lock():
2539 with self.lock():
2534 if self.svfs.exists(b"journal"):
2540 if self.svfs.exists(b"journal"):
2535 self.ui.status(_(b"rolling back interrupted transaction\n"))
2541 self.ui.status(_(b"rolling back interrupted transaction\n"))
2536 vfsmap = {
2542 vfsmap = {
2537 b'': self.svfs,
2543 b'': self.svfs,
2538 b'plain': self.vfs,
2544 b'plain': self.vfs,
2539 }
2545 }
2540 transaction.rollback(
2546 transaction.rollback(
2541 self.svfs,
2547 self.svfs,
2542 vfsmap,
2548 vfsmap,
2543 b"journal",
2549 b"journal",
2544 self.ui.warn,
2550 self.ui.warn,
2545 checkambigfiles=_cachedfiles,
2551 checkambigfiles=_cachedfiles,
2546 )
2552 )
2547 self.invalidate()
2553 self.invalidate()
2548 return True
2554 return True
2549 else:
2555 else:
2550 self.ui.warn(_(b"no interrupted transaction available\n"))
2556 self.ui.warn(_(b"no interrupted transaction available\n"))
2551 return False
2557 return False
2552
2558
2553 def rollback(self, dryrun=False, force=False):
2559 def rollback(self, dryrun=False, force=False):
2554 wlock = lock = dsguard = None
2560 wlock = lock = dsguard = None
2555 try:
2561 try:
2556 wlock = self.wlock()
2562 wlock = self.wlock()
2557 lock = self.lock()
2563 lock = self.lock()
2558 if self.svfs.exists(b"undo"):
2564 if self.svfs.exists(b"undo"):
2559 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2565 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2560
2566
2561 return self._rollback(dryrun, force, dsguard)
2567 return self._rollback(dryrun, force, dsguard)
2562 else:
2568 else:
2563 self.ui.warn(_(b"no rollback information available\n"))
2569 self.ui.warn(_(b"no rollback information available\n"))
2564 return 1
2570 return 1
2565 finally:
2571 finally:
2566 release(dsguard, lock, wlock)
2572 release(dsguard, lock, wlock)
2567
2573
2568 @unfilteredmethod # Until we get smarter cache management
2574 @unfilteredmethod # Until we get smarter cache management
2569 def _rollback(self, dryrun, force, dsguard):
2575 def _rollback(self, dryrun, force, dsguard):
2570 ui = self.ui
2576 ui = self.ui
2571 try:
2577 try:
2572 args = self.vfs.read(b'undo.desc').splitlines()
2578 args = self.vfs.read(b'undo.desc').splitlines()
2573 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2579 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2574 if len(args) >= 3:
2580 if len(args) >= 3:
2575 detail = args[2]
2581 detail = args[2]
2576 oldtip = oldlen - 1
2582 oldtip = oldlen - 1
2577
2583
2578 if detail and ui.verbose:
2584 if detail and ui.verbose:
2579 msg = _(
2585 msg = _(
2580 b'repository tip rolled back to revision %d'
2586 b'repository tip rolled back to revision %d'
2581 b' (undo %s: %s)\n'
2587 b' (undo %s: %s)\n'
2582 ) % (oldtip, desc, detail)
2588 ) % (oldtip, desc, detail)
2583 else:
2589 else:
2584 msg = _(
2590 msg = _(
2585 b'repository tip rolled back to revision %d (undo %s)\n'
2591 b'repository tip rolled back to revision %d (undo %s)\n'
2586 ) % (oldtip, desc)
2592 ) % (oldtip, desc)
2587 except IOError:
2593 except IOError:
2588 msg = _(b'rolling back unknown transaction\n')
2594 msg = _(b'rolling back unknown transaction\n')
2589 desc = None
2595 desc = None
2590
2596
2591 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2597 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2592 raise error.Abort(
2598 raise error.Abort(
2593 _(
2599 _(
2594 b'rollback of last commit while not checked out '
2600 b'rollback of last commit while not checked out '
2595 b'may lose data'
2601 b'may lose data'
2596 ),
2602 ),
2597 hint=_(b'use -f to force'),
2603 hint=_(b'use -f to force'),
2598 )
2604 )
2599
2605
2600 ui.status(msg)
2606 ui.status(msg)
2601 if dryrun:
2607 if dryrun:
2602 return 0
2608 return 0
2603
2609
2604 parents = self.dirstate.parents()
2610 parents = self.dirstate.parents()
2605 self.destroying()
2611 self.destroying()
2606 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2612 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2607 transaction.rollback(
2613 transaction.rollback(
2608 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2614 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2609 )
2615 )
2610 bookmarksvfs = bookmarks.bookmarksvfs(self)
2616 bookmarksvfs = bookmarks.bookmarksvfs(self)
2611 if bookmarksvfs.exists(b'undo.bookmarks'):
2617 if bookmarksvfs.exists(b'undo.bookmarks'):
2612 bookmarksvfs.rename(
2618 bookmarksvfs.rename(
2613 b'undo.bookmarks', b'bookmarks', checkambig=True
2619 b'undo.bookmarks', b'bookmarks', checkambig=True
2614 )
2620 )
2615 if self.svfs.exists(b'undo.phaseroots'):
2621 if self.svfs.exists(b'undo.phaseroots'):
2616 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2622 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2617 self.invalidate()
2623 self.invalidate()
2618
2624
2619 has_node = self.changelog.index.has_node
2625 has_node = self.changelog.index.has_node
2620 parentgone = any(not has_node(p) for p in parents)
2626 parentgone = any(not has_node(p) for p in parents)
2621 if parentgone:
2627 if parentgone:
2622 # prevent dirstateguard from overwriting already restored one
2628 # prevent dirstateguard from overwriting already restored one
2623 dsguard.close()
2629 dsguard.close()
2624
2630
2625 narrowspec.restorebackup(self, b'undo.narrowspec')
2631 narrowspec.restorebackup(self, b'undo.narrowspec')
2626 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2632 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2627 self.dirstate.restorebackup(None, b'undo.dirstate')
2633 self.dirstate.restorebackup(None, b'undo.dirstate')
2628 try:
2634 try:
2629 branch = self.vfs.read(b'undo.branch')
2635 branch = self.vfs.read(b'undo.branch')
2630 self.dirstate.setbranch(encoding.tolocal(branch))
2636 self.dirstate.setbranch(encoding.tolocal(branch))
2631 except IOError:
2637 except IOError:
2632 ui.warn(
2638 ui.warn(
2633 _(
2639 _(
2634 b'named branch could not be reset: '
2640 b'named branch could not be reset: '
2635 b'current branch is still \'%s\'\n'
2641 b'current branch is still \'%s\'\n'
2636 )
2642 )
2637 % self.dirstate.branch()
2643 % self.dirstate.branch()
2638 )
2644 )
2639
2645
2640 parents = tuple([p.rev() for p in self[None].parents()])
2646 parents = tuple([p.rev() for p in self[None].parents()])
2641 if len(parents) > 1:
2647 if len(parents) > 1:
2642 ui.status(
2648 ui.status(
2643 _(
2649 _(
2644 b'working directory now based on '
2650 b'working directory now based on '
2645 b'revisions %d and %d\n'
2651 b'revisions %d and %d\n'
2646 )
2652 )
2647 % parents
2653 % parents
2648 )
2654 )
2649 else:
2655 else:
2650 ui.status(
2656 ui.status(
2651 _(b'working directory now based on revision %d\n') % parents
2657 _(b'working directory now based on revision %d\n') % parents
2652 )
2658 )
2653 mergestatemod.mergestate.clean(self)
2659 mergestatemod.mergestate.clean(self)
2654
2660
2655 # TODO: if we know which new heads may result from this rollback, pass
2661 # TODO: if we know which new heads may result from this rollback, pass
2656 # them to destroy(), which will prevent the branchhead cache from being
2662 # them to destroy(), which will prevent the branchhead cache from being
2657 # invalidated.
2663 # invalidated.
2658 self.destroyed()
2664 self.destroyed()
2659 return 0
2665 return 0
2660
2666
2661 def _buildcacheupdater(self, newtransaction):
2667 def _buildcacheupdater(self, newtransaction):
2662 """called during transaction to build the callback updating cache
2668 """called during transaction to build the callback updating cache
2663
2669
2664 Lives on the repository to help extension who might want to augment
2670 Lives on the repository to help extension who might want to augment
2665 this logic. For this purpose, the created transaction is passed to the
2671 this logic. For this purpose, the created transaction is passed to the
2666 method.
2672 method.
2667 """
2673 """
2668 # we must avoid cyclic reference between repo and transaction.
2674 # we must avoid cyclic reference between repo and transaction.
2669 reporef = weakref.ref(self)
2675 reporef = weakref.ref(self)
2670
2676
2671 def updater(tr):
2677 def updater(tr):
2672 repo = reporef()
2678 repo = reporef()
2673 repo.updatecaches(tr)
2679 repo.updatecaches(tr)
2674
2680
2675 return updater
2681 return updater
2676
2682
2677 @unfilteredmethod
2683 @unfilteredmethod
2678 def updatecaches(self, tr=None, full=False):
2684 def updatecaches(self, tr=None, full=False):
2679 """warm appropriate caches
2685 """warm appropriate caches
2680
2686
2681 If this function is called after a transaction closed. The transaction
2687 If this function is called after a transaction closed. The transaction
2682 will be available in the 'tr' argument. This can be used to selectively
2688 will be available in the 'tr' argument. This can be used to selectively
2683 update caches relevant to the changes in that transaction.
2689 update caches relevant to the changes in that transaction.
2684
2690
2685 If 'full' is set, make sure all caches the function knows about have
2691 If 'full' is set, make sure all caches the function knows about have
2686 up-to-date data. Even the ones usually loaded more lazily.
2692 up-to-date data. Even the ones usually loaded more lazily.
2687 """
2693 """
2688 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2694 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2689 # During strip, many caches are invalid but
2695 # During strip, many caches are invalid but
2690 # later call to `destroyed` will refresh them.
2696 # later call to `destroyed` will refresh them.
2691 return
2697 return
2692
2698
2693 if tr is None or tr.changes[b'origrepolen'] < len(self):
2699 if tr is None or tr.changes[b'origrepolen'] < len(self):
2694 # accessing the 'served' branchmap should refresh all the others,
2700 # accessing the 'served' branchmap should refresh all the others,
2695 self.ui.debug(b'updating the branch cache\n')
2701 self.ui.debug(b'updating the branch cache\n')
2696 self.filtered(b'served').branchmap()
2702 self.filtered(b'served').branchmap()
2697 self.filtered(b'served.hidden').branchmap()
2703 self.filtered(b'served.hidden').branchmap()
2698
2704
2699 if full:
2705 if full:
2700 unfi = self.unfiltered()
2706 unfi = self.unfiltered()
2701
2707
2702 self.changelog.update_caches(transaction=tr)
2708 self.changelog.update_caches(transaction=tr)
2703 self.manifestlog.update_caches(transaction=tr)
2709 self.manifestlog.update_caches(transaction=tr)
2704
2710
2705 rbc = unfi.revbranchcache()
2711 rbc = unfi.revbranchcache()
2706 for r in unfi.changelog:
2712 for r in unfi.changelog:
2707 rbc.branchinfo(r)
2713 rbc.branchinfo(r)
2708 rbc.write()
2714 rbc.write()
2709
2715
2710 # ensure the working copy parents are in the manifestfulltextcache
2716 # ensure the working copy parents are in the manifestfulltextcache
2711 for ctx in self[b'.'].parents():
2717 for ctx in self[b'.'].parents():
2712 ctx.manifest() # accessing the manifest is enough
2718 ctx.manifest() # accessing the manifest is enough
2713
2719
2714 # accessing fnode cache warms the cache
2720 # accessing fnode cache warms the cache
2715 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2721 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2716 # accessing tags warm the cache
2722 # accessing tags warm the cache
2717 self.tags()
2723 self.tags()
2718 self.filtered(b'served').tags()
2724 self.filtered(b'served').tags()
2719
2725
2720 # The `full` arg is documented as updating even the lazily-loaded
2726 # The `full` arg is documented as updating even the lazily-loaded
2721 # caches immediately, so we're forcing a write to cause these caches
2727 # caches immediately, so we're forcing a write to cause these caches
2722 # to be warmed up even if they haven't explicitly been requested
2728 # to be warmed up even if they haven't explicitly been requested
2723 # yet (if they've never been used by hg, they won't ever have been
2729 # yet (if they've never been used by hg, they won't ever have been
2724 # written, even if they're a subset of another kind of cache that
2730 # written, even if they're a subset of another kind of cache that
2725 # *has* been used).
2731 # *has* been used).
2726 for filt in repoview.filtertable.keys():
2732 for filt in repoview.filtertable.keys():
2727 filtered = self.filtered(filt)
2733 filtered = self.filtered(filt)
2728 filtered.branchmap().write(filtered)
2734 filtered.branchmap().write(filtered)
2729
2735
2730 def invalidatecaches(self):
2736 def invalidatecaches(self):
2731
2737
2732 if '_tagscache' in vars(self):
2738 if '_tagscache' in vars(self):
2733 # can't use delattr on proxy
2739 # can't use delattr on proxy
2734 del self.__dict__['_tagscache']
2740 del self.__dict__['_tagscache']
2735
2741
2736 self._branchcaches.clear()
2742 self._branchcaches.clear()
2737 self.invalidatevolatilesets()
2743 self.invalidatevolatilesets()
2738 self._sparsesignaturecache.clear()
2744 self._sparsesignaturecache.clear()
2739
2745
2740 def invalidatevolatilesets(self):
2746 def invalidatevolatilesets(self):
2741 self.filteredrevcache.clear()
2747 self.filteredrevcache.clear()
2742 obsolete.clearobscaches(self)
2748 obsolete.clearobscaches(self)
2743 self._quick_access_changeid_invalidate()
2749 self._quick_access_changeid_invalidate()
2744
2750
2745 def invalidatedirstate(self):
2751 def invalidatedirstate(self):
2746 """Invalidates the dirstate, causing the next call to dirstate
2752 """Invalidates the dirstate, causing the next call to dirstate
2747 to check if it was modified since the last time it was read,
2753 to check if it was modified since the last time it was read,
2748 rereading it if it has.
2754 rereading it if it has.
2749
2755
2750 This is different to dirstate.invalidate() that it doesn't always
2756 This is different to dirstate.invalidate() that it doesn't always
2751 rereads the dirstate. Use dirstate.invalidate() if you want to
2757 rereads the dirstate. Use dirstate.invalidate() if you want to
2752 explicitly read the dirstate again (i.e. restoring it to a previous
2758 explicitly read the dirstate again (i.e. restoring it to a previous
2753 known good state)."""
2759 known good state)."""
2754 if hasunfilteredcache(self, 'dirstate'):
2760 if hasunfilteredcache(self, 'dirstate'):
2755 for k in self.dirstate._filecache:
2761 for k in self.dirstate._filecache:
2756 try:
2762 try:
2757 delattr(self.dirstate, k)
2763 delattr(self.dirstate, k)
2758 except AttributeError:
2764 except AttributeError:
2759 pass
2765 pass
2760 delattr(self.unfiltered(), 'dirstate')
2766 delattr(self.unfiltered(), 'dirstate')
2761
2767
2762 def invalidate(self, clearfilecache=False):
2768 def invalidate(self, clearfilecache=False):
2763 """Invalidates both store and non-store parts other than dirstate
2769 """Invalidates both store and non-store parts other than dirstate
2764
2770
2765 If a transaction is running, invalidation of store is omitted,
2771 If a transaction is running, invalidation of store is omitted,
2766 because discarding in-memory changes might cause inconsistency
2772 because discarding in-memory changes might cause inconsistency
2767 (e.g. incomplete fncache causes unintentional failure, but
2773 (e.g. incomplete fncache causes unintentional failure, but
2768 redundant one doesn't).
2774 redundant one doesn't).
2769 """
2775 """
2770 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2776 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2771 for k in list(self._filecache.keys()):
2777 for k in list(self._filecache.keys()):
2772 # dirstate is invalidated separately in invalidatedirstate()
2778 # dirstate is invalidated separately in invalidatedirstate()
2773 if k == b'dirstate':
2779 if k == b'dirstate':
2774 continue
2780 continue
2775 if (
2781 if (
2776 k == b'changelog'
2782 k == b'changelog'
2777 and self.currenttransaction()
2783 and self.currenttransaction()
2778 and self.changelog._delayed
2784 and self.changelog._delayed
2779 ):
2785 ):
2780 # The changelog object may store unwritten revisions. We don't
2786 # The changelog object may store unwritten revisions. We don't
2781 # want to lose them.
2787 # want to lose them.
2782 # TODO: Solve the problem instead of working around it.
2788 # TODO: Solve the problem instead of working around it.
2783 continue
2789 continue
2784
2790
2785 if clearfilecache:
2791 if clearfilecache:
2786 del self._filecache[k]
2792 del self._filecache[k]
2787 try:
2793 try:
2788 delattr(unfiltered, k)
2794 delattr(unfiltered, k)
2789 except AttributeError:
2795 except AttributeError:
2790 pass
2796 pass
2791 self.invalidatecaches()
2797 self.invalidatecaches()
2792 if not self.currenttransaction():
2798 if not self.currenttransaction():
2793 # TODO: Changing contents of store outside transaction
2799 # TODO: Changing contents of store outside transaction
2794 # causes inconsistency. We should make in-memory store
2800 # causes inconsistency. We should make in-memory store
2795 # changes detectable, and abort if changed.
2801 # changes detectable, and abort if changed.
2796 self.store.invalidatecaches()
2802 self.store.invalidatecaches()
2797
2803
2798 def invalidateall(self):
2804 def invalidateall(self):
2799 """Fully invalidates both store and non-store parts, causing the
2805 """Fully invalidates both store and non-store parts, causing the
2800 subsequent operation to reread any outside changes."""
2806 subsequent operation to reread any outside changes."""
2801 # extension should hook this to invalidate its caches
2807 # extension should hook this to invalidate its caches
2802 self.invalidate()
2808 self.invalidate()
2803 self.invalidatedirstate()
2809 self.invalidatedirstate()
2804
2810
2805 @unfilteredmethod
2811 @unfilteredmethod
2806 def _refreshfilecachestats(self, tr):
2812 def _refreshfilecachestats(self, tr):
2807 """Reload stats of cached files so that they are flagged as valid"""
2813 """Reload stats of cached files so that they are flagged as valid"""
2808 for k, ce in self._filecache.items():
2814 for k, ce in self._filecache.items():
2809 k = pycompat.sysstr(k)
2815 k = pycompat.sysstr(k)
2810 if k == 'dirstate' or k not in self.__dict__:
2816 if k == 'dirstate' or k not in self.__dict__:
2811 continue
2817 continue
2812 ce.refresh()
2818 ce.refresh()
2813
2819
2814 def _lock(
2820 def _lock(
2815 self,
2821 self,
2816 vfs,
2822 vfs,
2817 lockname,
2823 lockname,
2818 wait,
2824 wait,
2819 releasefn,
2825 releasefn,
2820 acquirefn,
2826 acquirefn,
2821 desc,
2827 desc,
2822 ):
2828 ):
2823 timeout = 0
2829 timeout = 0
2824 warntimeout = 0
2830 warntimeout = 0
2825 if wait:
2831 if wait:
2826 timeout = self.ui.configint(b"ui", b"timeout")
2832 timeout = self.ui.configint(b"ui", b"timeout")
2827 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2833 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2828 # internal config: ui.signal-safe-lock
2834 # internal config: ui.signal-safe-lock
2829 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2835 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2830
2836
2831 l = lockmod.trylock(
2837 l = lockmod.trylock(
2832 self.ui,
2838 self.ui,
2833 vfs,
2839 vfs,
2834 lockname,
2840 lockname,
2835 timeout,
2841 timeout,
2836 warntimeout,
2842 warntimeout,
2837 releasefn=releasefn,
2843 releasefn=releasefn,
2838 acquirefn=acquirefn,
2844 acquirefn=acquirefn,
2839 desc=desc,
2845 desc=desc,
2840 signalsafe=signalsafe,
2846 signalsafe=signalsafe,
2841 )
2847 )
2842 return l
2848 return l
2843
2849
2844 def _afterlock(self, callback):
2850 def _afterlock(self, callback):
2845 """add a callback to be run when the repository is fully unlocked
2851 """add a callback to be run when the repository is fully unlocked
2846
2852
2847 The callback will be executed when the outermost lock is released
2853 The callback will be executed when the outermost lock is released
2848 (with wlock being higher level than 'lock')."""
2854 (with wlock being higher level than 'lock')."""
2849 for ref in (self._wlockref, self._lockref):
2855 for ref in (self._wlockref, self._lockref):
2850 l = ref and ref()
2856 l = ref and ref()
2851 if l and l.held:
2857 if l and l.held:
2852 l.postrelease.append(callback)
2858 l.postrelease.append(callback)
2853 break
2859 break
2854 else: # no lock have been found.
2860 else: # no lock have been found.
2855 callback(True)
2861 callback(True)
2856
2862
2857 def lock(self, wait=True):
2863 def lock(self, wait=True):
2858 """Lock the repository store (.hg/store) and return a weak reference
2864 """Lock the repository store (.hg/store) and return a weak reference
2859 to the lock. Use this before modifying the store (e.g. committing or
2865 to the lock. Use this before modifying the store (e.g. committing or
2860 stripping). If you are opening a transaction, get a lock as well.)
2866 stripping). If you are opening a transaction, get a lock as well.)
2861
2867
2862 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2868 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2863 'wlock' first to avoid a dead-lock hazard."""
2869 'wlock' first to avoid a dead-lock hazard."""
2864 l = self._currentlock(self._lockref)
2870 l = self._currentlock(self._lockref)
2865 if l is not None:
2871 if l is not None:
2866 l.lock()
2872 l.lock()
2867 return l
2873 return l
2868
2874
2869 l = self._lock(
2875 l = self._lock(
2870 vfs=self.svfs,
2876 vfs=self.svfs,
2871 lockname=b"lock",
2877 lockname=b"lock",
2872 wait=wait,
2878 wait=wait,
2873 releasefn=None,
2879 releasefn=None,
2874 acquirefn=self.invalidate,
2880 acquirefn=self.invalidate,
2875 desc=_(b'repository %s') % self.origroot,
2881 desc=_(b'repository %s') % self.origroot,
2876 )
2882 )
2877 self._lockref = weakref.ref(l)
2883 self._lockref = weakref.ref(l)
2878 return l
2884 return l
2879
2885
2880 def wlock(self, wait=True):
2886 def wlock(self, wait=True):
2881 """Lock the non-store parts of the repository (everything under
2887 """Lock the non-store parts of the repository (everything under
2882 .hg except .hg/store) and return a weak reference to the lock.
2888 .hg except .hg/store) and return a weak reference to the lock.
2883
2889
2884 Use this before modifying files in .hg.
2890 Use this before modifying files in .hg.
2885
2891
2886 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2892 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2887 'wlock' first to avoid a dead-lock hazard."""
2893 'wlock' first to avoid a dead-lock hazard."""
2888 l = self._wlockref and self._wlockref()
2894 l = self._wlockref and self._wlockref()
2889 if l is not None and l.held:
2895 if l is not None and l.held:
2890 l.lock()
2896 l.lock()
2891 return l
2897 return l
2892
2898
2893 # We do not need to check for non-waiting lock acquisition. Such
2899 # We do not need to check for non-waiting lock acquisition. Such
2894 # acquisition would not cause dead-lock as they would just fail.
2900 # acquisition would not cause dead-lock as they would just fail.
2895 if wait and (
2901 if wait and (
2896 self.ui.configbool(b'devel', b'all-warnings')
2902 self.ui.configbool(b'devel', b'all-warnings')
2897 or self.ui.configbool(b'devel', b'check-locks')
2903 or self.ui.configbool(b'devel', b'check-locks')
2898 ):
2904 ):
2899 if self._currentlock(self._lockref) is not None:
2905 if self._currentlock(self._lockref) is not None:
2900 self.ui.develwarn(b'"wlock" acquired after "lock"')
2906 self.ui.develwarn(b'"wlock" acquired after "lock"')
2901
2907
2902 def unlock():
2908 def unlock():
2903 if self.dirstate.pendingparentchange():
2909 if self.dirstate.pendingparentchange():
2904 self.dirstate.invalidate()
2910 self.dirstate.invalidate()
2905 else:
2911 else:
2906 self.dirstate.write(None)
2912 self.dirstate.write(None)
2907
2913
2908 self._filecache[b'dirstate'].refresh()
2914 self._filecache[b'dirstate'].refresh()
2909
2915
2910 l = self._lock(
2916 l = self._lock(
2911 self.vfs,
2917 self.vfs,
2912 b"wlock",
2918 b"wlock",
2913 wait,
2919 wait,
2914 unlock,
2920 unlock,
2915 self.invalidatedirstate,
2921 self.invalidatedirstate,
2916 _(b'working directory of %s') % self.origroot,
2922 _(b'working directory of %s') % self.origroot,
2917 )
2923 )
2918 self._wlockref = weakref.ref(l)
2924 self._wlockref = weakref.ref(l)
2919 return l
2925 return l
2920
2926
2921 def _currentlock(self, lockref):
2927 def _currentlock(self, lockref):
2922 """Returns the lock if it's held, or None if it's not."""
2928 """Returns the lock if it's held, or None if it's not."""
2923 if lockref is None:
2929 if lockref is None:
2924 return None
2930 return None
2925 l = lockref()
2931 l = lockref()
2926 if l is None or not l.held:
2932 if l is None or not l.held:
2927 return None
2933 return None
2928 return l
2934 return l
2929
2935
2930 def currentwlock(self):
2936 def currentwlock(self):
2931 """Returns the wlock if it's held, or None if it's not."""
2937 """Returns the wlock if it's held, or None if it's not."""
2932 return self._currentlock(self._wlockref)
2938 return self._currentlock(self._wlockref)
2933
2939
2934 def checkcommitpatterns(self, wctx, match, status, fail):
2940 def checkcommitpatterns(self, wctx, match, status, fail):
2935 """check for commit arguments that aren't committable"""
2941 """check for commit arguments that aren't committable"""
2936 if match.isexact() or match.prefix():
2942 if match.isexact() or match.prefix():
2937 matched = set(status.modified + status.added + status.removed)
2943 matched = set(status.modified + status.added + status.removed)
2938
2944
2939 for f in match.files():
2945 for f in match.files():
2940 f = self.dirstate.normalize(f)
2946 f = self.dirstate.normalize(f)
2941 if f == b'.' or f in matched or f in wctx.substate:
2947 if f == b'.' or f in matched or f in wctx.substate:
2942 continue
2948 continue
2943 if f in status.deleted:
2949 if f in status.deleted:
2944 fail(f, _(b'file not found!'))
2950 fail(f, _(b'file not found!'))
2945 # Is it a directory that exists or used to exist?
2951 # Is it a directory that exists or used to exist?
2946 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2952 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2947 d = f + b'/'
2953 d = f + b'/'
2948 for mf in matched:
2954 for mf in matched:
2949 if mf.startswith(d):
2955 if mf.startswith(d):
2950 break
2956 break
2951 else:
2957 else:
2952 fail(f, _(b"no match under directory!"))
2958 fail(f, _(b"no match under directory!"))
2953 elif f not in self.dirstate:
2959 elif f not in self.dirstate:
2954 fail(f, _(b"file not tracked!"))
2960 fail(f, _(b"file not tracked!"))
2955
2961
2956 @unfilteredmethod
2962 @unfilteredmethod
2957 def commit(
2963 def commit(
2958 self,
2964 self,
2959 text=b"",
2965 text=b"",
2960 user=None,
2966 user=None,
2961 date=None,
2967 date=None,
2962 match=None,
2968 match=None,
2963 force=False,
2969 force=False,
2964 editor=None,
2970 editor=None,
2965 extra=None,
2971 extra=None,
2966 ):
2972 ):
2967 """Add a new revision to current repository.
2973 """Add a new revision to current repository.
2968
2974
2969 Revision information is gathered from the working directory,
2975 Revision information is gathered from the working directory,
2970 match can be used to filter the committed files. If editor is
2976 match can be used to filter the committed files. If editor is
2971 supplied, it is called to get a commit message.
2977 supplied, it is called to get a commit message.
2972 """
2978 """
2973 if extra is None:
2979 if extra is None:
2974 extra = {}
2980 extra = {}
2975
2981
2976 def fail(f, msg):
2982 def fail(f, msg):
2977 raise error.InputError(b'%s: %s' % (f, msg))
2983 raise error.InputError(b'%s: %s' % (f, msg))
2978
2984
2979 if not match:
2985 if not match:
2980 match = matchmod.always()
2986 match = matchmod.always()
2981
2987
2982 if not force:
2988 if not force:
2983 match.bad = fail
2989 match.bad = fail
2984
2990
2985 # lock() for recent changelog (see issue4368)
2991 # lock() for recent changelog (see issue4368)
2986 with self.wlock(), self.lock():
2992 with self.wlock(), self.lock():
2987 wctx = self[None]
2993 wctx = self[None]
2988 merge = len(wctx.parents()) > 1
2994 merge = len(wctx.parents()) > 1
2989
2995
2990 if not force and merge and not match.always():
2996 if not force and merge and not match.always():
2991 raise error.Abort(
2997 raise error.Abort(
2992 _(
2998 _(
2993 b'cannot partially commit a merge '
2999 b'cannot partially commit a merge '
2994 b'(do not specify files or patterns)'
3000 b'(do not specify files or patterns)'
2995 )
3001 )
2996 )
3002 )
2997
3003
2998 status = self.status(match=match, clean=force)
3004 status = self.status(match=match, clean=force)
2999 if force:
3005 if force:
3000 status.modified.extend(
3006 status.modified.extend(
3001 status.clean
3007 status.clean
3002 ) # mq may commit clean files
3008 ) # mq may commit clean files
3003
3009
3004 # check subrepos
3010 # check subrepos
3005 subs, commitsubs, newstate = subrepoutil.precommit(
3011 subs, commitsubs, newstate = subrepoutil.precommit(
3006 self.ui, wctx, status, match, force=force
3012 self.ui, wctx, status, match, force=force
3007 )
3013 )
3008
3014
3009 # make sure all explicit patterns are matched
3015 # make sure all explicit patterns are matched
3010 if not force:
3016 if not force:
3011 self.checkcommitpatterns(wctx, match, status, fail)
3017 self.checkcommitpatterns(wctx, match, status, fail)
3012
3018
3013 cctx = context.workingcommitctx(
3019 cctx = context.workingcommitctx(
3014 self, status, text, user, date, extra
3020 self, status, text, user, date, extra
3015 )
3021 )
3016
3022
3017 ms = mergestatemod.mergestate.read(self)
3023 ms = mergestatemod.mergestate.read(self)
3018 mergeutil.checkunresolved(ms)
3024 mergeutil.checkunresolved(ms)
3019
3025
3020 # internal config: ui.allowemptycommit
3026 # internal config: ui.allowemptycommit
3021 if cctx.isempty() and not self.ui.configbool(
3027 if cctx.isempty() and not self.ui.configbool(
3022 b'ui', b'allowemptycommit'
3028 b'ui', b'allowemptycommit'
3023 ):
3029 ):
3024 self.ui.debug(b'nothing to commit, clearing merge state\n')
3030 self.ui.debug(b'nothing to commit, clearing merge state\n')
3025 ms.reset()
3031 ms.reset()
3026 return None
3032 return None
3027
3033
3028 if merge and cctx.deleted():
3034 if merge and cctx.deleted():
3029 raise error.Abort(_(b"cannot commit merge with missing files"))
3035 raise error.Abort(_(b"cannot commit merge with missing files"))
3030
3036
3031 if editor:
3037 if editor:
3032 cctx._text = editor(self, cctx, subs)
3038 cctx._text = editor(self, cctx, subs)
3033 edited = text != cctx._text
3039 edited = text != cctx._text
3034
3040
3035 # Save commit message in case this transaction gets rolled back
3041 # Save commit message in case this transaction gets rolled back
3036 # (e.g. by a pretxncommit hook). Leave the content alone on
3042 # (e.g. by a pretxncommit hook). Leave the content alone on
3037 # the assumption that the user will use the same editor again.
3043 # the assumption that the user will use the same editor again.
3038 msgfn = self.savecommitmessage(cctx._text)
3044 msgfn = self.savecommitmessage(cctx._text)
3039
3045
3040 # commit subs and write new state
3046 # commit subs and write new state
3041 if subs:
3047 if subs:
3042 uipathfn = scmutil.getuipathfn(self)
3048 uipathfn = scmutil.getuipathfn(self)
3043 for s in sorted(commitsubs):
3049 for s in sorted(commitsubs):
3044 sub = wctx.sub(s)
3050 sub = wctx.sub(s)
3045 self.ui.status(
3051 self.ui.status(
3046 _(b'committing subrepository %s\n')
3052 _(b'committing subrepository %s\n')
3047 % uipathfn(subrepoutil.subrelpath(sub))
3053 % uipathfn(subrepoutil.subrelpath(sub))
3048 )
3054 )
3049 sr = sub.commit(cctx._text, user, date)
3055 sr = sub.commit(cctx._text, user, date)
3050 newstate[s] = (newstate[s][0], sr)
3056 newstate[s] = (newstate[s][0], sr)
3051 subrepoutil.writestate(self, newstate)
3057 subrepoutil.writestate(self, newstate)
3052
3058
3053 p1, p2 = self.dirstate.parents()
3059 p1, p2 = self.dirstate.parents()
3054 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3060 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3055 try:
3061 try:
3056 self.hook(
3062 self.hook(
3057 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3063 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3058 )
3064 )
3059 with self.transaction(b'commit'):
3065 with self.transaction(b'commit'):
3060 ret = self.commitctx(cctx, True)
3066 ret = self.commitctx(cctx, True)
3061 # update bookmarks, dirstate and mergestate
3067 # update bookmarks, dirstate and mergestate
3062 bookmarks.update(self, [p1, p2], ret)
3068 bookmarks.update(self, [p1, p2], ret)
3063 cctx.markcommitted(ret)
3069 cctx.markcommitted(ret)
3064 ms.reset()
3070 ms.reset()
3065 except: # re-raises
3071 except: # re-raises
3066 if edited:
3072 if edited:
3067 self.ui.write(
3073 self.ui.write(
3068 _(b'note: commit message saved in %s\n') % msgfn
3074 _(b'note: commit message saved in %s\n') % msgfn
3069 )
3075 )
3070 self.ui.write(
3076 self.ui.write(
3071 _(
3077 _(
3072 b"note: use 'hg commit --logfile "
3078 b"note: use 'hg commit --logfile "
3073 b".hg/last-message.txt --edit' to reuse it\n"
3079 b".hg/last-message.txt --edit' to reuse it\n"
3074 )
3080 )
3075 )
3081 )
3076 raise
3082 raise
3077
3083
3078 def commithook(unused_success):
3084 def commithook(unused_success):
3079 # hack for command that use a temporary commit (eg: histedit)
3085 # hack for command that use a temporary commit (eg: histedit)
3080 # temporary commit got stripped before hook release
3086 # temporary commit got stripped before hook release
3081 if self.changelog.hasnode(ret):
3087 if self.changelog.hasnode(ret):
3082 self.hook(
3088 self.hook(
3083 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3089 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3084 )
3090 )
3085
3091
3086 self._afterlock(commithook)
3092 self._afterlock(commithook)
3087 return ret
3093 return ret
3088
3094
3089 @unfilteredmethod
3095 @unfilteredmethod
3090 def commitctx(self, ctx, error=False, origctx=None):
3096 def commitctx(self, ctx, error=False, origctx=None):
3091 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3097 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3092
3098
3093 @unfilteredmethod
3099 @unfilteredmethod
3094 def destroying(self):
3100 def destroying(self):
3095 """Inform the repository that nodes are about to be destroyed.
3101 """Inform the repository that nodes are about to be destroyed.
3096 Intended for use by strip and rollback, so there's a common
3102 Intended for use by strip and rollback, so there's a common
3097 place for anything that has to be done before destroying history.
3103 place for anything that has to be done before destroying history.
3098
3104
3099 This is mostly useful for saving state that is in memory and waiting
3105 This is mostly useful for saving state that is in memory and waiting
3100 to be flushed when the current lock is released. Because a call to
3106 to be flushed when the current lock is released. Because a call to
3101 destroyed is imminent, the repo will be invalidated causing those
3107 destroyed is imminent, the repo will be invalidated causing those
3102 changes to stay in memory (waiting for the next unlock), or vanish
3108 changes to stay in memory (waiting for the next unlock), or vanish
3103 completely.
3109 completely.
3104 """
3110 """
3105 # When using the same lock to commit and strip, the phasecache is left
3111 # When using the same lock to commit and strip, the phasecache is left
3106 # dirty after committing. Then when we strip, the repo is invalidated,
3112 # dirty after committing. Then when we strip, the repo is invalidated,
3107 # causing those changes to disappear.
3113 # causing those changes to disappear.
3108 if '_phasecache' in vars(self):
3114 if '_phasecache' in vars(self):
3109 self._phasecache.write()
3115 self._phasecache.write()
3110
3116
3111 @unfilteredmethod
3117 @unfilteredmethod
3112 def destroyed(self):
3118 def destroyed(self):
3113 """Inform the repository that nodes have been destroyed.
3119 """Inform the repository that nodes have been destroyed.
3114 Intended for use by strip and rollback, so there's a common
3120 Intended for use by strip and rollback, so there's a common
3115 place for anything that has to be done after destroying history.
3121 place for anything that has to be done after destroying history.
3116 """
3122 """
3117 # When one tries to:
3123 # When one tries to:
3118 # 1) destroy nodes thus calling this method (e.g. strip)
3124 # 1) destroy nodes thus calling this method (e.g. strip)
3119 # 2) use phasecache somewhere (e.g. commit)
3125 # 2) use phasecache somewhere (e.g. commit)
3120 #
3126 #
3121 # then 2) will fail because the phasecache contains nodes that were
3127 # then 2) will fail because the phasecache contains nodes that were
3122 # removed. We can either remove phasecache from the filecache,
3128 # removed. We can either remove phasecache from the filecache,
3123 # causing it to reload next time it is accessed, or simply filter
3129 # causing it to reload next time it is accessed, or simply filter
3124 # the removed nodes now and write the updated cache.
3130 # the removed nodes now and write the updated cache.
3125 self._phasecache.filterunknown(self)
3131 self._phasecache.filterunknown(self)
3126 self._phasecache.write()
3132 self._phasecache.write()
3127
3133
3128 # refresh all repository caches
3134 # refresh all repository caches
3129 self.updatecaches()
3135 self.updatecaches()
3130
3136
3131 # Ensure the persistent tag cache is updated. Doing it now
3137 # Ensure the persistent tag cache is updated. Doing it now
3132 # means that the tag cache only has to worry about destroyed
3138 # means that the tag cache only has to worry about destroyed
3133 # heads immediately after a strip/rollback. That in turn
3139 # heads immediately after a strip/rollback. That in turn
3134 # guarantees that "cachetip == currenttip" (comparing both rev
3140 # guarantees that "cachetip == currenttip" (comparing both rev
3135 # and node) always means no nodes have been added or destroyed.
3141 # and node) always means no nodes have been added or destroyed.
3136
3142
3137 # XXX this is suboptimal when qrefresh'ing: we strip the current
3143 # XXX this is suboptimal when qrefresh'ing: we strip the current
3138 # head, refresh the tag cache, then immediately add a new head.
3144 # head, refresh the tag cache, then immediately add a new head.
3139 # But I think doing it this way is necessary for the "instant
3145 # But I think doing it this way is necessary for the "instant
3140 # tag cache retrieval" case to work.
3146 # tag cache retrieval" case to work.
3141 self.invalidate()
3147 self.invalidate()
3142
3148
3143 def status(
3149 def status(
3144 self,
3150 self,
3145 node1=b'.',
3151 node1=b'.',
3146 node2=None,
3152 node2=None,
3147 match=None,
3153 match=None,
3148 ignored=False,
3154 ignored=False,
3149 clean=False,
3155 clean=False,
3150 unknown=False,
3156 unknown=False,
3151 listsubrepos=False,
3157 listsubrepos=False,
3152 ):
3158 ):
3153 '''a convenience method that calls node1.status(node2)'''
3159 '''a convenience method that calls node1.status(node2)'''
3154 return self[node1].status(
3160 return self[node1].status(
3155 node2, match, ignored, clean, unknown, listsubrepos
3161 node2, match, ignored, clean, unknown, listsubrepos
3156 )
3162 )
3157
3163
3158 def addpostdsstatus(self, ps):
3164 def addpostdsstatus(self, ps):
3159 """Add a callback to run within the wlock, at the point at which status
3165 """Add a callback to run within the wlock, at the point at which status
3160 fixups happen.
3166 fixups happen.
3161
3167
3162 On status completion, callback(wctx, status) will be called with the
3168 On status completion, callback(wctx, status) will be called with the
3163 wlock held, unless the dirstate has changed from underneath or the wlock
3169 wlock held, unless the dirstate has changed from underneath or the wlock
3164 couldn't be grabbed.
3170 couldn't be grabbed.
3165
3171
3166 Callbacks should not capture and use a cached copy of the dirstate --
3172 Callbacks should not capture and use a cached copy of the dirstate --
3167 it might change in the meanwhile. Instead, they should access the
3173 it might change in the meanwhile. Instead, they should access the
3168 dirstate via wctx.repo().dirstate.
3174 dirstate via wctx.repo().dirstate.
3169
3175
3170 This list is emptied out after each status run -- extensions should
3176 This list is emptied out after each status run -- extensions should
3171 make sure it adds to this list each time dirstate.status is called.
3177 make sure it adds to this list each time dirstate.status is called.
3172 Extensions should also make sure they don't call this for statuses
3178 Extensions should also make sure they don't call this for statuses
3173 that don't involve the dirstate.
3179 that don't involve the dirstate.
3174 """
3180 """
3175
3181
3176 # The list is located here for uniqueness reasons -- it is actually
3182 # The list is located here for uniqueness reasons -- it is actually
3177 # managed by the workingctx, but that isn't unique per-repo.
3183 # managed by the workingctx, but that isn't unique per-repo.
3178 self._postdsstatus.append(ps)
3184 self._postdsstatus.append(ps)
3179
3185
3180 def postdsstatus(self):
3186 def postdsstatus(self):
3181 """Used by workingctx to get the list of post-dirstate-status hooks."""
3187 """Used by workingctx to get the list of post-dirstate-status hooks."""
3182 return self._postdsstatus
3188 return self._postdsstatus
3183
3189
3184 def clearpostdsstatus(self):
3190 def clearpostdsstatus(self):
3185 """Used by workingctx to clear post-dirstate-status hooks."""
3191 """Used by workingctx to clear post-dirstate-status hooks."""
3186 del self._postdsstatus[:]
3192 del self._postdsstatus[:]
3187
3193
3188 def heads(self, start=None):
3194 def heads(self, start=None):
3189 if start is None:
3195 if start is None:
3190 cl = self.changelog
3196 cl = self.changelog
3191 headrevs = reversed(cl.headrevs())
3197 headrevs = reversed(cl.headrevs())
3192 return [cl.node(rev) for rev in headrevs]
3198 return [cl.node(rev) for rev in headrevs]
3193
3199
3194 heads = self.changelog.heads(start)
3200 heads = self.changelog.heads(start)
3195 # sort the output in rev descending order
3201 # sort the output in rev descending order
3196 return sorted(heads, key=self.changelog.rev, reverse=True)
3202 return sorted(heads, key=self.changelog.rev, reverse=True)
3197
3203
3198 def branchheads(self, branch=None, start=None, closed=False):
3204 def branchheads(self, branch=None, start=None, closed=False):
3199 """return a (possibly filtered) list of heads for the given branch
3205 """return a (possibly filtered) list of heads for the given branch
3200
3206
3201 Heads are returned in topological order, from newest to oldest.
3207 Heads are returned in topological order, from newest to oldest.
3202 If branch is None, use the dirstate branch.
3208 If branch is None, use the dirstate branch.
3203 If start is not None, return only heads reachable from start.
3209 If start is not None, return only heads reachable from start.
3204 If closed is True, return heads that are marked as closed as well.
3210 If closed is True, return heads that are marked as closed as well.
3205 """
3211 """
3206 if branch is None:
3212 if branch is None:
3207 branch = self[None].branch()
3213 branch = self[None].branch()
3208 branches = self.branchmap()
3214 branches = self.branchmap()
3209 if not branches.hasbranch(branch):
3215 if not branches.hasbranch(branch):
3210 return []
3216 return []
3211 # the cache returns heads ordered lowest to highest
3217 # the cache returns heads ordered lowest to highest
3212 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3218 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3213 if start is not None:
3219 if start is not None:
3214 # filter out the heads that cannot be reached from startrev
3220 # filter out the heads that cannot be reached from startrev
3215 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3221 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3216 bheads = [h for h in bheads if h in fbheads]
3222 bheads = [h for h in bheads if h in fbheads]
3217 return bheads
3223 return bheads
3218
3224
3219 def branches(self, nodes):
3225 def branches(self, nodes):
3220 if not nodes:
3226 if not nodes:
3221 nodes = [self.changelog.tip()]
3227 nodes = [self.changelog.tip()]
3222 b = []
3228 b = []
3223 for n in nodes:
3229 for n in nodes:
3224 t = n
3230 t = n
3225 while True:
3231 while True:
3226 p = self.changelog.parents(n)
3232 p = self.changelog.parents(n)
3227 if p[1] != nullid or p[0] == nullid:
3233 if p[1] != nullid or p[0] == nullid:
3228 b.append((t, n, p[0], p[1]))
3234 b.append((t, n, p[0], p[1]))
3229 break
3235 break
3230 n = p[0]
3236 n = p[0]
3231 return b
3237 return b
3232
3238
3233 def between(self, pairs):
3239 def between(self, pairs):
3234 r = []
3240 r = []
3235
3241
3236 for top, bottom in pairs:
3242 for top, bottom in pairs:
3237 n, l, i = top, [], 0
3243 n, l, i = top, [], 0
3238 f = 1
3244 f = 1
3239
3245
3240 while n != bottom and n != nullid:
3246 while n != bottom and n != nullid:
3241 p = self.changelog.parents(n)[0]
3247 p = self.changelog.parents(n)[0]
3242 if i == f:
3248 if i == f:
3243 l.append(n)
3249 l.append(n)
3244 f = f * 2
3250 f = f * 2
3245 n = p
3251 n = p
3246 i += 1
3252 i += 1
3247
3253
3248 r.append(l)
3254 r.append(l)
3249
3255
3250 return r
3256 return r
3251
3257
3252 def checkpush(self, pushop):
3258 def checkpush(self, pushop):
3253 """Extensions can override this function if additional checks have
3259 """Extensions can override this function if additional checks have
3254 to be performed before pushing, or call it if they override push
3260 to be performed before pushing, or call it if they override push
3255 command.
3261 command.
3256 """
3262 """
3257
3263
3258 @unfilteredpropertycache
3264 @unfilteredpropertycache
3259 def prepushoutgoinghooks(self):
3265 def prepushoutgoinghooks(self):
3260 """Return util.hooks consists of a pushop with repo, remote, outgoing
3266 """Return util.hooks consists of a pushop with repo, remote, outgoing
3261 methods, which are called before pushing changesets.
3267 methods, which are called before pushing changesets.
3262 """
3268 """
3263 return util.hooks()
3269 return util.hooks()
3264
3270
3265 def pushkey(self, namespace, key, old, new):
3271 def pushkey(self, namespace, key, old, new):
3266 try:
3272 try:
3267 tr = self.currenttransaction()
3273 tr = self.currenttransaction()
3268 hookargs = {}
3274 hookargs = {}
3269 if tr is not None:
3275 if tr is not None:
3270 hookargs.update(tr.hookargs)
3276 hookargs.update(tr.hookargs)
3271 hookargs = pycompat.strkwargs(hookargs)
3277 hookargs = pycompat.strkwargs(hookargs)
3272 hookargs['namespace'] = namespace
3278 hookargs['namespace'] = namespace
3273 hookargs['key'] = key
3279 hookargs['key'] = key
3274 hookargs['old'] = old
3280 hookargs['old'] = old
3275 hookargs['new'] = new
3281 hookargs['new'] = new
3276 self.hook(b'prepushkey', throw=True, **hookargs)
3282 self.hook(b'prepushkey', throw=True, **hookargs)
3277 except error.HookAbort as exc:
3283 except error.HookAbort as exc:
3278 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3284 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3279 if exc.hint:
3285 if exc.hint:
3280 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3286 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3281 return False
3287 return False
3282 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3288 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3283 ret = pushkey.push(self, namespace, key, old, new)
3289 ret = pushkey.push(self, namespace, key, old, new)
3284
3290
3285 def runhook(unused_success):
3291 def runhook(unused_success):
3286 self.hook(
3292 self.hook(
3287 b'pushkey',
3293 b'pushkey',
3288 namespace=namespace,
3294 namespace=namespace,
3289 key=key,
3295 key=key,
3290 old=old,
3296 old=old,
3291 new=new,
3297 new=new,
3292 ret=ret,
3298 ret=ret,
3293 )
3299 )
3294
3300
3295 self._afterlock(runhook)
3301 self._afterlock(runhook)
3296 return ret
3302 return ret
3297
3303
3298 def listkeys(self, namespace):
3304 def listkeys(self, namespace):
3299 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3305 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3300 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3306 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3301 values = pushkey.list(self, namespace)
3307 values = pushkey.list(self, namespace)
3302 self.hook(b'listkeys', namespace=namespace, values=values)
3308 self.hook(b'listkeys', namespace=namespace, values=values)
3303 return values
3309 return values
3304
3310
3305 def debugwireargs(self, one, two, three=None, four=None, five=None):
3311 def debugwireargs(self, one, two, three=None, four=None, five=None):
3306 '''used to test argument passing over the wire'''
3312 '''used to test argument passing over the wire'''
3307 return b"%s %s %s %s %s" % (
3313 return b"%s %s %s %s %s" % (
3308 one,
3314 one,
3309 two,
3315 two,
3310 pycompat.bytestr(three),
3316 pycompat.bytestr(three),
3311 pycompat.bytestr(four),
3317 pycompat.bytestr(four),
3312 pycompat.bytestr(five),
3318 pycompat.bytestr(five),
3313 )
3319 )
3314
3320
3315 def savecommitmessage(self, text):
3321 def savecommitmessage(self, text):
3316 fp = self.vfs(b'last-message.txt', b'wb')
3322 fp = self.vfs(b'last-message.txt', b'wb')
3317 try:
3323 try:
3318 fp.write(text)
3324 fp.write(text)
3319 finally:
3325 finally:
3320 fp.close()
3326 fp.close()
3321 return self.pathto(fp.name[len(self.root) + 1 :])
3327 return self.pathto(fp.name[len(self.root) + 1 :])
3322
3328
3323
3329
3324 # used to avoid circular references so destructors work
3330 # used to avoid circular references so destructors work
3325 def aftertrans(files):
3331 def aftertrans(files):
3326 renamefiles = [tuple(t) for t in files]
3332 renamefiles = [tuple(t) for t in files]
3327
3333
3328 def a():
3334 def a():
3329 for vfs, src, dest in renamefiles:
3335 for vfs, src, dest in renamefiles:
3330 # if src and dest refer to a same file, vfs.rename is a no-op,
3336 # if src and dest refer to a same file, vfs.rename is a no-op,
3331 # leaving both src and dest on disk. delete dest to make sure
3337 # leaving both src and dest on disk. delete dest to make sure
3332 # the rename couldn't be such a no-op.
3338 # the rename couldn't be such a no-op.
3333 vfs.tryunlink(dest)
3339 vfs.tryunlink(dest)
3334 try:
3340 try:
3335 vfs.rename(src, dest)
3341 vfs.rename(src, dest)
3336 except OSError: # journal file does not yet exist
3342 except OSError: # journal file does not yet exist
3337 pass
3343 pass
3338
3344
3339 return a
3345 return a
3340
3346
3341
3347
3342 def undoname(fn):
3348 def undoname(fn):
3343 base, name = os.path.split(fn)
3349 base, name = os.path.split(fn)
3344 assert name.startswith(b'journal')
3350 assert name.startswith(b'journal')
3345 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3351 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3346
3352
3347
3353
3348 def instance(ui, path, create, intents=None, createopts=None):
3354 def instance(ui, path, create, intents=None, createopts=None):
3349 localpath = util.urllocalpath(path)
3355 localpath = util.urllocalpath(path)
3350 if create:
3356 if create:
3351 createrepository(ui, localpath, createopts=createopts)
3357 createrepository(ui, localpath, createopts=createopts)
3352
3358
3353 return makelocalrepository(ui, localpath, intents=intents)
3359 return makelocalrepository(ui, localpath, intents=intents)
3354
3360
3355
3361
3356 def islocal(path):
3362 def islocal(path):
3357 return True
3363 return True
3358
3364
3359
3365
3360 def defaultcreateopts(ui, createopts=None):
3366 def defaultcreateopts(ui, createopts=None):
3361 """Populate the default creation options for a repository.
3367 """Populate the default creation options for a repository.
3362
3368
3363 A dictionary of explicitly requested creation options can be passed
3369 A dictionary of explicitly requested creation options can be passed
3364 in. Missing keys will be populated.
3370 in. Missing keys will be populated.
3365 """
3371 """
3366 createopts = dict(createopts or {})
3372 createopts = dict(createopts or {})
3367
3373
3368 if b'backend' not in createopts:
3374 if b'backend' not in createopts:
3369 # experimental config: storage.new-repo-backend
3375 # experimental config: storage.new-repo-backend
3370 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3376 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3371
3377
3372 return createopts
3378 return createopts
3373
3379
3374
3380
3375 def newreporequirements(ui, createopts):
3381 def newreporequirements(ui, createopts):
3376 """Determine the set of requirements for a new local repository.
3382 """Determine the set of requirements for a new local repository.
3377
3383
3378 Extensions can wrap this function to specify custom requirements for
3384 Extensions can wrap this function to specify custom requirements for
3379 new repositories.
3385 new repositories.
3380 """
3386 """
3381 # If the repo is being created from a shared repository, we copy
3387 # If the repo is being created from a shared repository, we copy
3382 # its requirements.
3388 # its requirements.
3383 if b'sharedrepo' in createopts:
3389 if b'sharedrepo' in createopts:
3384 requirements = set(createopts[b'sharedrepo'].requirements)
3390 requirements = set(createopts[b'sharedrepo'].requirements)
3385 if createopts.get(b'sharedrelative'):
3391 if createopts.get(b'sharedrelative'):
3386 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3392 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3387 else:
3393 else:
3388 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3394 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3389
3395
3390 return requirements
3396 return requirements
3391
3397
3392 if b'backend' not in createopts:
3398 if b'backend' not in createopts:
3393 raise error.ProgrammingError(
3399 raise error.ProgrammingError(
3394 b'backend key not present in createopts; '
3400 b'backend key not present in createopts; '
3395 b'was defaultcreateopts() called?'
3401 b'was defaultcreateopts() called?'
3396 )
3402 )
3397
3403
3398 if createopts[b'backend'] != b'revlogv1':
3404 if createopts[b'backend'] != b'revlogv1':
3399 raise error.Abort(
3405 raise error.Abort(
3400 _(
3406 _(
3401 b'unable to determine repository requirements for '
3407 b'unable to determine repository requirements for '
3402 b'storage backend: %s'
3408 b'storage backend: %s'
3403 )
3409 )
3404 % createopts[b'backend']
3410 % createopts[b'backend']
3405 )
3411 )
3406
3412
3407 requirements = {b'revlogv1'}
3413 requirements = {b'revlogv1'}
3408 if ui.configbool(b'format', b'usestore'):
3414 if ui.configbool(b'format', b'usestore'):
3409 requirements.add(b'store')
3415 requirements.add(b'store')
3410 if ui.configbool(b'format', b'usefncache'):
3416 if ui.configbool(b'format', b'usefncache'):
3411 requirements.add(b'fncache')
3417 requirements.add(b'fncache')
3412 if ui.configbool(b'format', b'dotencode'):
3418 if ui.configbool(b'format', b'dotencode'):
3413 requirements.add(b'dotencode')
3419 requirements.add(b'dotencode')
3414
3420
3415 compengines = ui.configlist(b'format', b'revlog-compression')
3421 compengines = ui.configlist(b'format', b'revlog-compression')
3416 for compengine in compengines:
3422 for compengine in compengines:
3417 if compengine in util.compengines:
3423 if compengine in util.compengines:
3418 break
3424 break
3419 else:
3425 else:
3420 raise error.Abort(
3426 raise error.Abort(
3421 _(
3427 _(
3422 b'compression engines %s defined by '
3428 b'compression engines %s defined by '
3423 b'format.revlog-compression not available'
3429 b'format.revlog-compression not available'
3424 )
3430 )
3425 % b', '.join(b'"%s"' % e for e in compengines),
3431 % b', '.join(b'"%s"' % e for e in compengines),
3426 hint=_(
3432 hint=_(
3427 b'run "hg debuginstall" to list available '
3433 b'run "hg debuginstall" to list available '
3428 b'compression engines'
3434 b'compression engines'
3429 ),
3435 ),
3430 )
3436 )
3431
3437
3432 # zlib is the historical default and doesn't need an explicit requirement.
3438 # zlib is the historical default and doesn't need an explicit requirement.
3433 if compengine == b'zstd':
3439 if compengine == b'zstd':
3434 requirements.add(b'revlog-compression-zstd')
3440 requirements.add(b'revlog-compression-zstd')
3435 elif compengine != b'zlib':
3441 elif compengine != b'zlib':
3436 requirements.add(b'exp-compression-%s' % compengine)
3442 requirements.add(b'exp-compression-%s' % compengine)
3437
3443
3438 if scmutil.gdinitconfig(ui):
3444 if scmutil.gdinitconfig(ui):
3439 requirements.add(b'generaldelta')
3445 requirements.add(b'generaldelta')
3440 if ui.configbool(b'format', b'sparse-revlog'):
3446 if ui.configbool(b'format', b'sparse-revlog'):
3441 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3447 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3442
3448
3443 # experimental config: format.exp-use-side-data
3449 # experimental config: format.exp-use-side-data
3444 if ui.configbool(b'format', b'exp-use-side-data'):
3450 if ui.configbool(b'format', b'exp-use-side-data'):
3445 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3451 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3446 # experimental config: format.exp-use-copies-side-data-changeset
3452 # experimental config: format.exp-use-copies-side-data-changeset
3447 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3453 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3448 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3454 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3449 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3455 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3450 if ui.configbool(b'experimental', b'treemanifest'):
3456 if ui.configbool(b'experimental', b'treemanifest'):
3451 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3457 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3452
3458
3453 revlogv2 = ui.config(b'experimental', b'revlogv2')
3459 revlogv2 = ui.config(b'experimental', b'revlogv2')
3454 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3460 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3455 requirements.remove(b'revlogv1')
3461 requirements.remove(b'revlogv1')
3456 # generaldelta is implied by revlogv2.
3462 # generaldelta is implied by revlogv2.
3457 requirements.discard(b'generaldelta')
3463 requirements.discard(b'generaldelta')
3458 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3464 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3459 # experimental config: format.internal-phase
3465 # experimental config: format.internal-phase
3460 if ui.configbool(b'format', b'internal-phase'):
3466 if ui.configbool(b'format', b'internal-phase'):
3461 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3467 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3462
3468
3463 if createopts.get(b'narrowfiles'):
3469 if createopts.get(b'narrowfiles'):
3464 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3470 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3465
3471
3466 if createopts.get(b'lfs'):
3472 if createopts.get(b'lfs'):
3467 requirements.add(b'lfs')
3473 requirements.add(b'lfs')
3468
3474
3469 if ui.configbool(b'format', b'bookmarks-in-store'):
3475 if ui.configbool(b'format', b'bookmarks-in-store'):
3470 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3476 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3471
3477
3472 if ui.configbool(b'format', b'use-persistent-nodemap'):
3478 if ui.configbool(b'format', b'use-persistent-nodemap'):
3473 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3479 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3474
3480
3475 # if share-safe is enabled, let's create the new repository with the new
3481 # if share-safe is enabled, let's create the new repository with the new
3476 # requirement
3482 # requirement
3477 if ui.configbool(b'format', b'use-share-safe'):
3483 if ui.configbool(b'format', b'use-share-safe'):
3478 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3484 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3479
3485
3480 return requirements
3486 return requirements
3481
3487
3482
3488
3483 def checkrequirementscompat(ui, requirements):
3489 def checkrequirementscompat(ui, requirements):
3484 """Checks compatibility of repository requirements enabled and disabled.
3490 """Checks compatibility of repository requirements enabled and disabled.
3485
3491
3486 Returns a set of requirements which needs to be dropped because dependend
3492 Returns a set of requirements which needs to be dropped because dependend
3487 requirements are not enabled. Also warns users about it"""
3493 requirements are not enabled. Also warns users about it"""
3488
3494
3489 dropped = set()
3495 dropped = set()
3490
3496
3491 if b'store' not in requirements:
3497 if b'store' not in requirements:
3492 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3498 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3493 ui.warn(
3499 ui.warn(
3494 _(
3500 _(
3495 b'ignoring enabled \'format.bookmarks-in-store\' config '
3501 b'ignoring enabled \'format.bookmarks-in-store\' config '
3496 b'beacuse it is incompatible with disabled '
3502 b'beacuse it is incompatible with disabled '
3497 b'\'format.usestore\' config\n'
3503 b'\'format.usestore\' config\n'
3498 )
3504 )
3499 )
3505 )
3500 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3506 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3501
3507
3502 if (
3508 if (
3503 requirementsmod.SHARED_REQUIREMENT in requirements
3509 requirementsmod.SHARED_REQUIREMENT in requirements
3504 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3510 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3505 ):
3511 ):
3506 raise error.Abort(
3512 raise error.Abort(
3507 _(
3513 _(
3508 b"cannot create shared repository as source was created"
3514 b"cannot create shared repository as source was created"
3509 b" with 'format.usestore' config disabled"
3515 b" with 'format.usestore' config disabled"
3510 )
3516 )
3511 )
3517 )
3512
3518
3513 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3519 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3514 ui.warn(
3520 ui.warn(
3515 _(
3521 _(
3516 b"ignoring enabled 'format.use-share-safe' config because "
3522 b"ignoring enabled 'format.use-share-safe' config because "
3517 b"it is incompatible with disabled 'format.usestore'"
3523 b"it is incompatible with disabled 'format.usestore'"
3518 b" config\n"
3524 b" config\n"
3519 )
3525 )
3520 )
3526 )
3521 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3527 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3522
3528
3523 return dropped
3529 return dropped
3524
3530
3525
3531
3526 def filterknowncreateopts(ui, createopts):
3532 def filterknowncreateopts(ui, createopts):
3527 """Filters a dict of repo creation options against options that are known.
3533 """Filters a dict of repo creation options against options that are known.
3528
3534
3529 Receives a dict of repo creation options and returns a dict of those
3535 Receives a dict of repo creation options and returns a dict of those
3530 options that we don't know how to handle.
3536 options that we don't know how to handle.
3531
3537
3532 This function is called as part of repository creation. If the
3538 This function is called as part of repository creation. If the
3533 returned dict contains any items, repository creation will not
3539 returned dict contains any items, repository creation will not
3534 be allowed, as it means there was a request to create a repository
3540 be allowed, as it means there was a request to create a repository
3535 with options not recognized by loaded code.
3541 with options not recognized by loaded code.
3536
3542
3537 Extensions can wrap this function to filter out creation options
3543 Extensions can wrap this function to filter out creation options
3538 they know how to handle.
3544 they know how to handle.
3539 """
3545 """
3540 known = {
3546 known = {
3541 b'backend',
3547 b'backend',
3542 b'lfs',
3548 b'lfs',
3543 b'narrowfiles',
3549 b'narrowfiles',
3544 b'sharedrepo',
3550 b'sharedrepo',
3545 b'sharedrelative',
3551 b'sharedrelative',
3546 b'shareditems',
3552 b'shareditems',
3547 b'shallowfilestore',
3553 b'shallowfilestore',
3548 }
3554 }
3549
3555
3550 return {k: v for k, v in createopts.items() if k not in known}
3556 return {k: v for k, v in createopts.items() if k not in known}
3551
3557
3552
3558
3553 def createrepository(ui, path, createopts=None):
3559 def createrepository(ui, path, createopts=None):
3554 """Create a new repository in a vfs.
3560 """Create a new repository in a vfs.
3555
3561
3556 ``path`` path to the new repo's working directory.
3562 ``path`` path to the new repo's working directory.
3557 ``createopts`` options for the new repository.
3563 ``createopts`` options for the new repository.
3558
3564
3559 The following keys for ``createopts`` are recognized:
3565 The following keys for ``createopts`` are recognized:
3560
3566
3561 backend
3567 backend
3562 The storage backend to use.
3568 The storage backend to use.
3563 lfs
3569 lfs
3564 Repository will be created with ``lfs`` requirement. The lfs extension
3570 Repository will be created with ``lfs`` requirement. The lfs extension
3565 will automatically be loaded when the repository is accessed.
3571 will automatically be loaded when the repository is accessed.
3566 narrowfiles
3572 narrowfiles
3567 Set up repository to support narrow file storage.
3573 Set up repository to support narrow file storage.
3568 sharedrepo
3574 sharedrepo
3569 Repository object from which storage should be shared.
3575 Repository object from which storage should be shared.
3570 sharedrelative
3576 sharedrelative
3571 Boolean indicating if the path to the shared repo should be
3577 Boolean indicating if the path to the shared repo should be
3572 stored as relative. By default, the pointer to the "parent" repo
3578 stored as relative. By default, the pointer to the "parent" repo
3573 is stored as an absolute path.
3579 is stored as an absolute path.
3574 shareditems
3580 shareditems
3575 Set of items to share to the new repository (in addition to storage).
3581 Set of items to share to the new repository (in addition to storage).
3576 shallowfilestore
3582 shallowfilestore
3577 Indicates that storage for files should be shallow (not all ancestor
3583 Indicates that storage for files should be shallow (not all ancestor
3578 revisions are known).
3584 revisions are known).
3579 """
3585 """
3580 createopts = defaultcreateopts(ui, createopts=createopts)
3586 createopts = defaultcreateopts(ui, createopts=createopts)
3581
3587
3582 unknownopts = filterknowncreateopts(ui, createopts)
3588 unknownopts = filterknowncreateopts(ui, createopts)
3583
3589
3584 if not isinstance(unknownopts, dict):
3590 if not isinstance(unknownopts, dict):
3585 raise error.ProgrammingError(
3591 raise error.ProgrammingError(
3586 b'filterknowncreateopts() did not return a dict'
3592 b'filterknowncreateopts() did not return a dict'
3587 )
3593 )
3588
3594
3589 if unknownopts:
3595 if unknownopts:
3590 raise error.Abort(
3596 raise error.Abort(
3591 _(
3597 _(
3592 b'unable to create repository because of unknown '
3598 b'unable to create repository because of unknown '
3593 b'creation option: %s'
3599 b'creation option: %s'
3594 )
3600 )
3595 % b', '.join(sorted(unknownopts)),
3601 % b', '.join(sorted(unknownopts)),
3596 hint=_(b'is a required extension not loaded?'),
3602 hint=_(b'is a required extension not loaded?'),
3597 )
3603 )
3598
3604
3599 requirements = newreporequirements(ui, createopts=createopts)
3605 requirements = newreporequirements(ui, createopts=createopts)
3600 requirements -= checkrequirementscompat(ui, requirements)
3606 requirements -= checkrequirementscompat(ui, requirements)
3601
3607
3602 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3608 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3603
3609
3604 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3610 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3605 if hgvfs.exists():
3611 if hgvfs.exists():
3606 raise error.RepoError(_(b'repository %s already exists') % path)
3612 raise error.RepoError(_(b'repository %s already exists') % path)
3607
3613
3608 if b'sharedrepo' in createopts:
3614 if b'sharedrepo' in createopts:
3609 sharedpath = createopts[b'sharedrepo'].sharedpath
3615 sharedpath = createopts[b'sharedrepo'].sharedpath
3610
3616
3611 if createopts.get(b'sharedrelative'):
3617 if createopts.get(b'sharedrelative'):
3612 try:
3618 try:
3613 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3619 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3614 except (IOError, ValueError) as e:
3620 except (IOError, ValueError) as e:
3615 # ValueError is raised on Windows if the drive letters differ
3621 # ValueError is raised on Windows if the drive letters differ
3616 # on each path.
3622 # on each path.
3617 raise error.Abort(
3623 raise error.Abort(
3618 _(b'cannot calculate relative path'),
3624 _(b'cannot calculate relative path'),
3619 hint=stringutil.forcebytestr(e),
3625 hint=stringutil.forcebytestr(e),
3620 )
3626 )
3621
3627
3622 if not wdirvfs.exists():
3628 if not wdirvfs.exists():
3623 wdirvfs.makedirs()
3629 wdirvfs.makedirs()
3624
3630
3625 hgvfs.makedir(notindexed=True)
3631 hgvfs.makedir(notindexed=True)
3626 if b'sharedrepo' not in createopts:
3632 if b'sharedrepo' not in createopts:
3627 hgvfs.mkdir(b'cache')
3633 hgvfs.mkdir(b'cache')
3628 hgvfs.mkdir(b'wcache')
3634 hgvfs.mkdir(b'wcache')
3629
3635
3630 if b'store' in requirements and b'sharedrepo' not in createopts:
3636 if b'store' in requirements and b'sharedrepo' not in createopts:
3631 hgvfs.mkdir(b'store')
3637 hgvfs.mkdir(b'store')
3632
3638
3633 # We create an invalid changelog outside the store so very old
3639 # We create an invalid changelog outside the store so very old
3634 # Mercurial versions (which didn't know about the requirements
3640 # Mercurial versions (which didn't know about the requirements
3635 # file) encounter an error on reading the changelog. This
3641 # file) encounter an error on reading the changelog. This
3636 # effectively locks out old clients and prevents them from
3642 # effectively locks out old clients and prevents them from
3637 # mucking with a repo in an unknown format.
3643 # mucking with a repo in an unknown format.
3638 #
3644 #
3639 # The revlog header has version 65535, which won't be recognized by
3645 # The revlog header has version 65535, which won't be recognized by
3640 # such old clients.
3646 # such old clients.
3641 hgvfs.append(
3647 hgvfs.append(
3642 b'00changelog.i',
3648 b'00changelog.i',
3643 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3649 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3644 b'layout',
3650 b'layout',
3645 )
3651 )
3646
3652
3647 # Filter the requirements into working copy and store ones
3653 # Filter the requirements into working copy and store ones
3648 wcreq, storereq = scmutil.filterrequirements(requirements)
3654 wcreq, storereq = scmutil.filterrequirements(requirements)
3649 # write working copy ones
3655 # write working copy ones
3650 scmutil.writerequires(hgvfs, wcreq)
3656 scmutil.writerequires(hgvfs, wcreq)
3651 # If there are store requirements and the current repository
3657 # If there are store requirements and the current repository
3652 # is not a shared one, write stored requirements
3658 # is not a shared one, write stored requirements
3653 # For new shared repository, we don't need to write the store
3659 # For new shared repository, we don't need to write the store
3654 # requirements as they are already present in store requires
3660 # requirements as they are already present in store requires
3655 if storereq and b'sharedrepo' not in createopts:
3661 if storereq and b'sharedrepo' not in createopts:
3656 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3662 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3657 scmutil.writerequires(storevfs, storereq)
3663 scmutil.writerequires(storevfs, storereq)
3658
3664
3659 # Write out file telling readers where to find the shared store.
3665 # Write out file telling readers where to find the shared store.
3660 if b'sharedrepo' in createopts:
3666 if b'sharedrepo' in createopts:
3661 hgvfs.write(b'sharedpath', sharedpath)
3667 hgvfs.write(b'sharedpath', sharedpath)
3662
3668
3663 if createopts.get(b'shareditems'):
3669 if createopts.get(b'shareditems'):
3664 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3670 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3665 hgvfs.write(b'shared', shared)
3671 hgvfs.write(b'shared', shared)
3666
3672
3667
3673
3668 def poisonrepository(repo):
3674 def poisonrepository(repo):
3669 """Poison a repository instance so it can no longer be used."""
3675 """Poison a repository instance so it can no longer be used."""
3670 # Perform any cleanup on the instance.
3676 # Perform any cleanup on the instance.
3671 repo.close()
3677 repo.close()
3672
3678
3673 # Our strategy is to replace the type of the object with one that
3679 # Our strategy is to replace the type of the object with one that
3674 # has all attribute lookups result in error.
3680 # has all attribute lookups result in error.
3675 #
3681 #
3676 # But we have to allow the close() method because some constructors
3682 # But we have to allow the close() method because some constructors
3677 # of repos call close() on repo references.
3683 # of repos call close() on repo references.
3678 class poisonedrepository(object):
3684 class poisonedrepository(object):
3679 def __getattribute__(self, item):
3685 def __getattribute__(self, item):
3680 if item == 'close':
3686 if item == 'close':
3681 return object.__getattribute__(self, item)
3687 return object.__getattribute__(self, item)
3682
3688
3683 raise error.ProgrammingError(
3689 raise error.ProgrammingError(
3684 b'repo instances should not be used after unshare'
3690 b'repo instances should not be used after unshare'
3685 )
3691 )
3686
3692
3687 def close(self):
3693 def close(self):
3688 pass
3694 pass
3689
3695
3690 # We may have a repoview, which intercepts __setattr__. So be sure
3696 # We may have a repoview, which intercepts __setattr__. So be sure
3691 # we operate at the lowest level possible.
3697 # we operate at the lowest level possible.
3692 object.__setattr__(repo, '__class__', poisonedrepository)
3698 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,3087 +1,3110 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import zlib
22 import zlib
23
23
24 # import stuff from node for others to import from revlog
24 # import stuff from node for others to import from revlog
25 from .node import (
25 from .node import (
26 bin,
26 bin,
27 hex,
27 hex,
28 nullhex,
28 nullhex,
29 nullid,
29 nullid,
30 nullrev,
30 nullrev,
31 short,
31 short,
32 wdirfilenodeids,
32 wdirfilenodeids,
33 wdirhex,
33 wdirhex,
34 wdirid,
34 wdirid,
35 wdirrev,
35 wdirrev,
36 )
36 )
37 from .i18n import _
37 from .i18n import _
38 from .pycompat import getattr
38 from .pycompat import getattr
39 from .revlogutils.constants import (
39 from .revlogutils.constants import (
40 FLAG_GENERALDELTA,
40 FLAG_GENERALDELTA,
41 FLAG_INLINE_DATA,
41 FLAG_INLINE_DATA,
42 REVLOGV0,
42 REVLOGV0,
43 REVLOGV1,
43 REVLOGV1,
44 REVLOGV1_FLAGS,
44 REVLOGV1_FLAGS,
45 REVLOGV2,
45 REVLOGV2,
46 REVLOGV2_FLAGS,
46 REVLOGV2_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
48 REVLOG_DEFAULT_FORMAT,
48 REVLOG_DEFAULT_FORMAT,
49 REVLOG_DEFAULT_VERSION,
49 REVLOG_DEFAULT_VERSION,
50 )
50 )
51 from .revlogutils.flagutil import (
51 from .revlogutils.flagutil import (
52 REVIDX_DEFAULT_FLAGS,
52 REVIDX_DEFAULT_FLAGS,
53 REVIDX_ELLIPSIS,
53 REVIDX_ELLIPSIS,
54 REVIDX_EXTSTORED,
54 REVIDX_EXTSTORED,
55 REVIDX_FLAGS_ORDER,
55 REVIDX_FLAGS_ORDER,
56 REVIDX_HASCOPIESINFO,
56 REVIDX_HASCOPIESINFO,
57 REVIDX_ISCENSORED,
57 REVIDX_ISCENSORED,
58 REVIDX_RAWTEXT_CHANGING_FLAGS,
58 REVIDX_RAWTEXT_CHANGING_FLAGS,
59 REVIDX_SIDEDATA,
59 REVIDX_SIDEDATA,
60 )
60 )
61 from .thirdparty import attr
61 from .thirdparty import attr
62 from . import (
62 from . import (
63 ancestor,
63 ancestor,
64 dagop,
64 dagop,
65 error,
65 error,
66 mdiff,
66 mdiff,
67 policy,
67 policy,
68 pycompat,
68 pycompat,
69 templatefilters,
69 templatefilters,
70 util,
70 util,
71 )
71 )
72 from .interfaces import (
72 from .interfaces import (
73 repository,
73 repository,
74 util as interfaceutil,
74 util as interfaceutil,
75 )
75 )
76 from .revlogutils import (
76 from .revlogutils import (
77 deltas as deltautil,
77 deltas as deltautil,
78 flagutil,
78 flagutil,
79 nodemap as nodemaputil,
79 nodemap as nodemaputil,
80 sidedata as sidedatautil,
80 sidedata as sidedatautil,
81 )
81 )
82 from .utils import (
82 from .utils import (
83 storageutil,
83 storageutil,
84 stringutil,
84 stringutil,
85 )
85 )
86
86
87 # blanked usage of all the name to prevent pyflakes constraints
87 # blanked usage of all the name to prevent pyflakes constraints
88 # We need these name available in the module for extensions.
88 # We need these name available in the module for extensions.
89 REVLOGV0
89 REVLOGV0
90 REVLOGV1
90 REVLOGV1
91 REVLOGV2
91 REVLOGV2
92 FLAG_INLINE_DATA
92 FLAG_INLINE_DATA
93 FLAG_GENERALDELTA
93 FLAG_GENERALDELTA
94 REVLOG_DEFAULT_FLAGS
94 REVLOG_DEFAULT_FLAGS
95 REVLOG_DEFAULT_FORMAT
95 REVLOG_DEFAULT_FORMAT
96 REVLOG_DEFAULT_VERSION
96 REVLOG_DEFAULT_VERSION
97 REVLOGV1_FLAGS
97 REVLOGV1_FLAGS
98 REVLOGV2_FLAGS
98 REVLOGV2_FLAGS
99 REVIDX_ISCENSORED
99 REVIDX_ISCENSORED
100 REVIDX_ELLIPSIS
100 REVIDX_ELLIPSIS
101 REVIDX_SIDEDATA
101 REVIDX_SIDEDATA
102 REVIDX_HASCOPIESINFO
102 REVIDX_HASCOPIESINFO
103 REVIDX_EXTSTORED
103 REVIDX_EXTSTORED
104 REVIDX_DEFAULT_FLAGS
104 REVIDX_DEFAULT_FLAGS
105 REVIDX_FLAGS_ORDER
105 REVIDX_FLAGS_ORDER
106 REVIDX_RAWTEXT_CHANGING_FLAGS
106 REVIDX_RAWTEXT_CHANGING_FLAGS
107
107
108 parsers = policy.importmod('parsers')
108 parsers = policy.importmod('parsers')
109 rustancestor = policy.importrust('ancestor')
109 rustancestor = policy.importrust('ancestor')
110 rustdagop = policy.importrust('dagop')
110 rustdagop = policy.importrust('dagop')
111 rustrevlog = policy.importrust('revlog')
111 rustrevlog = policy.importrust('revlog')
112
112
113 # Aliased for performance.
113 # Aliased for performance.
114 _zlibdecompress = zlib.decompress
114 _zlibdecompress = zlib.decompress
115
115
116 # max size of revlog with inline data
116 # max size of revlog with inline data
117 _maxinline = 131072
117 _maxinline = 131072
118 _chunksize = 1048576
118 _chunksize = 1048576
119
119
120 # Flag processors for REVIDX_ELLIPSIS.
120 # Flag processors for REVIDX_ELLIPSIS.
121 def ellipsisreadprocessor(rl, text):
121 def ellipsisreadprocessor(rl, text):
122 return text, False, {}
122 return text, False, {}
123
123
124
124
125 def ellipsiswriteprocessor(rl, text, sidedata):
125 def ellipsiswriteprocessor(rl, text, sidedata):
126 return text, False
126 return text, False
127
127
128
128
129 def ellipsisrawprocessor(rl, text):
129 def ellipsisrawprocessor(rl, text):
130 return False
130 return False
131
131
132
132
133 ellipsisprocessor = (
133 ellipsisprocessor = (
134 ellipsisreadprocessor,
134 ellipsisreadprocessor,
135 ellipsiswriteprocessor,
135 ellipsiswriteprocessor,
136 ellipsisrawprocessor,
136 ellipsisrawprocessor,
137 )
137 )
138
138
139
139
140 def getoffset(q):
140 def getoffset(q):
141 return int(q >> 16)
141 return int(q >> 16)
142
142
143
143
144 def gettype(q):
144 def gettype(q):
145 return int(q & 0xFFFF)
145 return int(q & 0xFFFF)
146
146
147
147
148 def offset_type(offset, type):
148 def offset_type(offset, type):
149 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
149 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
150 raise ValueError(b'unknown revlog index flags')
150 raise ValueError(b'unknown revlog index flags')
151 return int(int(offset) << 16 | type)
151 return int(int(offset) << 16 | type)
152
152
153
153
154 def _verify_revision(rl, skipflags, state, node):
154 def _verify_revision(rl, skipflags, state, node):
155 """Verify the integrity of the given revlog ``node`` while providing a hook
155 """Verify the integrity of the given revlog ``node`` while providing a hook
156 point for extensions to influence the operation."""
156 point for extensions to influence the operation."""
157 if skipflags:
157 if skipflags:
158 state[b'skipread'].add(node)
158 state[b'skipread'].add(node)
159 else:
159 else:
160 # Side-effect: read content and verify hash.
160 # Side-effect: read content and verify hash.
161 rl.revision(node)
161 rl.revision(node)
162
162
163
163
164 # True if a fast implementation for persistent-nodemap is available
164 # True if a fast implementation for persistent-nodemap is available
165 #
165 #
166 # We also consider we have a "fast" implementation in "pure" python because
166 # We also consider we have a "fast" implementation in "pure" python because
167 # people using pure don't really have performance consideration (and a
167 # people using pure don't really have performance consideration (and a
168 # wheelbarrow of other slowness source)
168 # wheelbarrow of other slowness source)
169 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
169 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
170 parsers, 'BaseIndexObject'
170 parsers, 'BaseIndexObject'
171 )
171 )
172
172
173
173
174 @attr.s(slots=True, frozen=True)
174 @attr.s(slots=True, frozen=True)
175 class _revisioninfo(object):
175 class _revisioninfo(object):
176 """Information about a revision that allows building its fulltext
176 """Information about a revision that allows building its fulltext
177 node: expected hash of the revision
177 node: expected hash of the revision
178 p1, p2: parent revs of the revision
178 p1, p2: parent revs of the revision
179 btext: built text cache consisting of a one-element list
179 btext: built text cache consisting of a one-element list
180 cachedelta: (baserev, uncompressed_delta) or None
180 cachedelta: (baserev, uncompressed_delta) or None
181 flags: flags associated to the revision storage
181 flags: flags associated to the revision storage
182
182
183 One of btext[0] or cachedelta must be set.
183 One of btext[0] or cachedelta must be set.
184 """
184 """
185
185
186 node = attr.ib()
186 node = attr.ib()
187 p1 = attr.ib()
187 p1 = attr.ib()
188 p2 = attr.ib()
188 p2 = attr.ib()
189 btext = attr.ib()
189 btext = attr.ib()
190 textlen = attr.ib()
190 textlen = attr.ib()
191 cachedelta = attr.ib()
191 cachedelta = attr.ib()
192 flags = attr.ib()
192 flags = attr.ib()
193
193
194
194
195 @interfaceutil.implementer(repository.irevisiondelta)
195 @interfaceutil.implementer(repository.irevisiondelta)
196 @attr.s(slots=True)
196 @attr.s(slots=True)
197 class revlogrevisiondelta(object):
197 class revlogrevisiondelta(object):
198 node = attr.ib()
198 node = attr.ib()
199 p1node = attr.ib()
199 p1node = attr.ib()
200 p2node = attr.ib()
200 p2node = attr.ib()
201 basenode = attr.ib()
201 basenode = attr.ib()
202 flags = attr.ib()
202 flags = attr.ib()
203 baserevisionsize = attr.ib()
203 baserevisionsize = attr.ib()
204 revision = attr.ib()
204 revision = attr.ib()
205 delta = attr.ib()
205 delta = attr.ib()
206 linknode = attr.ib(default=None)
206 linknode = attr.ib(default=None)
207
207
208
208
209 @interfaceutil.implementer(repository.iverifyproblem)
209 @interfaceutil.implementer(repository.iverifyproblem)
210 @attr.s(frozen=True)
210 @attr.s(frozen=True)
211 class revlogproblem(object):
211 class revlogproblem(object):
212 warning = attr.ib(default=None)
212 warning = attr.ib(default=None)
213 error = attr.ib(default=None)
213 error = attr.ib(default=None)
214 node = attr.ib(default=None)
214 node = attr.ib(default=None)
215
215
216
216
217 # index v0:
217 # index v0:
218 # 4 bytes: offset
218 # 4 bytes: offset
219 # 4 bytes: compressed length
219 # 4 bytes: compressed length
220 # 4 bytes: base rev
220 # 4 bytes: base rev
221 # 4 bytes: link rev
221 # 4 bytes: link rev
222 # 20 bytes: parent 1 nodeid
222 # 20 bytes: parent 1 nodeid
223 # 20 bytes: parent 2 nodeid
223 # 20 bytes: parent 2 nodeid
224 # 20 bytes: nodeid
224 # 20 bytes: nodeid
225 indexformatv0 = struct.Struct(b">4l20s20s20s")
225 indexformatv0 = struct.Struct(b">4l20s20s20s")
226 indexformatv0_pack = indexformatv0.pack
226 indexformatv0_pack = indexformatv0.pack
227 indexformatv0_unpack = indexformatv0.unpack
227 indexformatv0_unpack = indexformatv0.unpack
228
228
229
229
230 class revlogoldindex(list):
230 class revlogoldindex(list):
231 @property
231 @property
232 def nodemap(self):
232 def nodemap(self):
233 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
233 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
234 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
234 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
235 return self._nodemap
235 return self._nodemap
236
236
237 @util.propertycache
237 @util.propertycache
238 def _nodemap(self):
238 def _nodemap(self):
239 nodemap = nodemaputil.NodeMap({nullid: nullrev})
239 nodemap = nodemaputil.NodeMap({nullid: nullrev})
240 for r in range(0, len(self)):
240 for r in range(0, len(self)):
241 n = self[r][7]
241 n = self[r][7]
242 nodemap[n] = r
242 nodemap[n] = r
243 return nodemap
243 return nodemap
244
244
245 def has_node(self, node):
245 def has_node(self, node):
246 """return True if the node exist in the index"""
246 """return True if the node exist in the index"""
247 return node in self._nodemap
247 return node in self._nodemap
248
248
249 def rev(self, node):
249 def rev(self, node):
250 """return a revision for a node
250 """return a revision for a node
251
251
252 If the node is unknown, raise a RevlogError"""
252 If the node is unknown, raise a RevlogError"""
253 return self._nodemap[node]
253 return self._nodemap[node]
254
254
255 def get_rev(self, node):
255 def get_rev(self, node):
256 """return a revision for a node
256 """return a revision for a node
257
257
258 If the node is unknown, return None"""
258 If the node is unknown, return None"""
259 return self._nodemap.get(node)
259 return self._nodemap.get(node)
260
260
261 def append(self, tup):
261 def append(self, tup):
262 self._nodemap[tup[7]] = len(self)
262 self._nodemap[tup[7]] = len(self)
263 super(revlogoldindex, self).append(tup)
263 super(revlogoldindex, self).append(tup)
264
264
265 def __delitem__(self, i):
265 def __delitem__(self, i):
266 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
266 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
267 raise ValueError(b"deleting slices only supports a:-1 with step 1")
267 raise ValueError(b"deleting slices only supports a:-1 with step 1")
268 for r in pycompat.xrange(i.start, len(self)):
268 for r in pycompat.xrange(i.start, len(self)):
269 del self._nodemap[self[r][7]]
269 del self._nodemap[self[r][7]]
270 super(revlogoldindex, self).__delitem__(i)
270 super(revlogoldindex, self).__delitem__(i)
271
271
272 def clearcaches(self):
272 def clearcaches(self):
273 self.__dict__.pop('_nodemap', None)
273 self.__dict__.pop('_nodemap', None)
274
274
275 def __getitem__(self, i):
275 def __getitem__(self, i):
276 if i == -1:
276 if i == -1:
277 return (0, 0, 0, -1, -1, -1, -1, nullid)
277 return (0, 0, 0, -1, -1, -1, -1, nullid)
278 return list.__getitem__(self, i)
278 return list.__getitem__(self, i)
279
279
280
280
281 class revlogoldio(object):
281 class revlogoldio(object):
282 def __init__(self):
282 def __init__(self):
283 self.size = indexformatv0.size
283 self.size = indexformatv0.size
284
284
285 def parseindex(self, data, inline):
285 def parseindex(self, data, inline):
286 s = self.size
286 s = self.size
287 index = []
287 index = []
288 nodemap = nodemaputil.NodeMap({nullid: nullrev})
288 nodemap = nodemaputil.NodeMap({nullid: nullrev})
289 n = off = 0
289 n = off = 0
290 l = len(data)
290 l = len(data)
291 while off + s <= l:
291 while off + s <= l:
292 cur = data[off : off + s]
292 cur = data[off : off + s]
293 off += s
293 off += s
294 e = indexformatv0_unpack(cur)
294 e = indexformatv0_unpack(cur)
295 # transform to revlogv1 format
295 # transform to revlogv1 format
296 e2 = (
296 e2 = (
297 offset_type(e[0], 0),
297 offset_type(e[0], 0),
298 e[1],
298 e[1],
299 -1,
299 -1,
300 e[2],
300 e[2],
301 e[3],
301 e[3],
302 nodemap.get(e[4], nullrev),
302 nodemap.get(e[4], nullrev),
303 nodemap.get(e[5], nullrev),
303 nodemap.get(e[5], nullrev),
304 e[6],
304 e[6],
305 )
305 )
306 index.append(e2)
306 index.append(e2)
307 nodemap[e[6]] = n
307 nodemap[e[6]] = n
308 n += 1
308 n += 1
309
309
310 index = revlogoldindex(index)
310 index = revlogoldindex(index)
311 return index, None
311 return index, None
312
312
313 def packentry(self, entry, node, version, rev):
313 def packentry(self, entry, node, version, rev):
314 if gettype(entry[0]):
314 if gettype(entry[0]):
315 raise error.RevlogError(
315 raise error.RevlogError(
316 _(b'index entry flags need revlog version 1')
316 _(b'index entry flags need revlog version 1')
317 )
317 )
318 e2 = (
318 e2 = (
319 getoffset(entry[0]),
319 getoffset(entry[0]),
320 entry[1],
320 entry[1],
321 entry[3],
321 entry[3],
322 entry[4],
322 entry[4],
323 node(entry[5]),
323 node(entry[5]),
324 node(entry[6]),
324 node(entry[6]),
325 entry[7],
325 entry[7],
326 )
326 )
327 return indexformatv0_pack(*e2)
327 return indexformatv0_pack(*e2)
328
328
329
329
330 # index ng:
330 # index ng:
331 # 6 bytes: offset
331 # 6 bytes: offset
332 # 2 bytes: flags
332 # 2 bytes: flags
333 # 4 bytes: compressed length
333 # 4 bytes: compressed length
334 # 4 bytes: uncompressed length
334 # 4 bytes: uncompressed length
335 # 4 bytes: base rev
335 # 4 bytes: base rev
336 # 4 bytes: link rev
336 # 4 bytes: link rev
337 # 4 bytes: parent 1 rev
337 # 4 bytes: parent 1 rev
338 # 4 bytes: parent 2 rev
338 # 4 bytes: parent 2 rev
339 # 32 bytes: nodeid
339 # 32 bytes: nodeid
340 indexformatng = struct.Struct(b">Qiiiiii20s12x")
340 indexformatng = struct.Struct(b">Qiiiiii20s12x")
341 indexformatng_pack = indexformatng.pack
341 indexformatng_pack = indexformatng.pack
342 versionformat = struct.Struct(b">I")
342 versionformat = struct.Struct(b">I")
343 versionformat_pack = versionformat.pack
343 versionformat_pack = versionformat.pack
344 versionformat_unpack = versionformat.unpack
344 versionformat_unpack = versionformat.unpack
345
345
346 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
346 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
347 # signed integer)
347 # signed integer)
348 _maxentrysize = 0x7FFFFFFF
348 _maxentrysize = 0x7FFFFFFF
349
349
350
350
351 class revlogio(object):
351 class revlogio(object):
352 def __init__(self):
352 def __init__(self):
353 self.size = indexformatng.size
353 self.size = indexformatng.size
354
354
355 def parseindex(self, data, inline):
355 def parseindex(self, data, inline):
356 # call the C implementation to parse the index data
356 # call the C implementation to parse the index data
357 index, cache = parsers.parse_index2(data, inline)
357 index, cache = parsers.parse_index2(data, inline)
358 return index, cache
358 return index, cache
359
359
360 def packentry(self, entry, node, version, rev):
360 def packentry(self, entry, node, version, rev):
361 p = indexformatng_pack(*entry)
361 p = indexformatng_pack(*entry)
362 if rev == 0:
362 if rev == 0:
363 p = versionformat_pack(version) + p[4:]
363 p = versionformat_pack(version) + p[4:]
364 return p
364 return p
365
365
366
366
367 NodemapRevlogIO = None
367 NodemapRevlogIO = None
368
368
369 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
369 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
370
370
371 class NodemapRevlogIO(revlogio):
371 class NodemapRevlogIO(revlogio):
372 """A debug oriented IO class that return a PersistentNodeMapIndexObject
372 """A debug oriented IO class that return a PersistentNodeMapIndexObject
373
373
374 The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
374 The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
375 """
375 """
376
376
377 def parseindex(self, data, inline):
377 def parseindex(self, data, inline):
378 index, cache = parsers.parse_index_devel_nodemap(data, inline)
378 index, cache = parsers.parse_index_devel_nodemap(data, inline)
379 return index, cache
379 return index, cache
380
380
381
381
382 class rustrevlogio(revlogio):
382 class rustrevlogio(revlogio):
383 def parseindex(self, data, inline):
383 def parseindex(self, data, inline):
384 index, cache = super(rustrevlogio, self).parseindex(data, inline)
384 index, cache = super(rustrevlogio, self).parseindex(data, inline)
385 return rustrevlog.MixedIndex(index), cache
385 return rustrevlog.MixedIndex(index), cache
386
386
387
387
388 class revlog(object):
388 class revlog(object):
389 """
389 """
390 the underlying revision storage object
390 the underlying revision storage object
391
391
392 A revlog consists of two parts, an index and the revision data.
392 A revlog consists of two parts, an index and the revision data.
393
393
394 The index is a file with a fixed record size containing
394 The index is a file with a fixed record size containing
395 information on each revision, including its nodeid (hash), the
395 information on each revision, including its nodeid (hash), the
396 nodeids of its parents, the position and offset of its data within
396 nodeids of its parents, the position and offset of its data within
397 the data file, and the revision it's based on. Finally, each entry
397 the data file, and the revision it's based on. Finally, each entry
398 contains a linkrev entry that can serve as a pointer to external
398 contains a linkrev entry that can serve as a pointer to external
399 data.
399 data.
400
400
401 The revision data itself is a linear collection of data chunks.
401 The revision data itself is a linear collection of data chunks.
402 Each chunk represents a revision and is usually represented as a
402 Each chunk represents a revision and is usually represented as a
403 delta against the previous chunk. To bound lookup time, runs of
403 delta against the previous chunk. To bound lookup time, runs of
404 deltas are limited to about 2 times the length of the original
404 deltas are limited to about 2 times the length of the original
405 version data. This makes retrieval of a version proportional to
405 version data. This makes retrieval of a version proportional to
406 its size, or O(1) relative to the number of revisions.
406 its size, or O(1) relative to the number of revisions.
407
407
408 Both pieces of the revlog are written to in an append-only
408 Both pieces of the revlog are written to in an append-only
409 fashion, which means we never need to rewrite a file to insert or
409 fashion, which means we never need to rewrite a file to insert or
410 remove data, and can use some simple techniques to avoid the need
410 remove data, and can use some simple techniques to avoid the need
411 for locking while reading.
411 for locking while reading.
412
412
413 If checkambig, indexfile is opened with checkambig=True at
413 If checkambig, indexfile is opened with checkambig=True at
414 writing, to avoid file stat ambiguity.
414 writing, to avoid file stat ambiguity.
415
415
416 If mmaplargeindex is True, and an mmapindexthreshold is set, the
416 If mmaplargeindex is True, and an mmapindexthreshold is set, the
417 index will be mmapped rather than read if it is larger than the
417 index will be mmapped rather than read if it is larger than the
418 configured threshold.
418 configured threshold.
419
419
420 If censorable is True, the revlog can have censored revisions.
420 If censorable is True, the revlog can have censored revisions.
421
421
422 If `upperboundcomp` is not None, this is the expected maximal gain from
422 If `upperboundcomp` is not None, this is the expected maximal gain from
423 compression for the data content.
423 compression for the data content.
424
425 `concurrencychecker` is an optional function that receives 3 arguments: a
426 file handle, a filename, and an expected position. It should check whether
427 the current position in the file handle is valid, and log/warn/fail (by
428 raising).
424 """
429 """
425
430
426 _flagserrorclass = error.RevlogError
431 _flagserrorclass = error.RevlogError
427
432
428 def __init__(
433 def __init__(
429 self,
434 self,
430 opener,
435 opener,
431 indexfile,
436 indexfile,
432 datafile=None,
437 datafile=None,
433 checkambig=False,
438 checkambig=False,
434 mmaplargeindex=False,
439 mmaplargeindex=False,
435 censorable=False,
440 censorable=False,
436 upperboundcomp=None,
441 upperboundcomp=None,
437 persistentnodemap=False,
442 persistentnodemap=False,
443 concurrencychecker=None,
438 ):
444 ):
439 """
445 """
440 create a revlog object
446 create a revlog object
441
447
442 opener is a function that abstracts the file opening operation
448 opener is a function that abstracts the file opening operation
443 and can be used to implement COW semantics or the like.
449 and can be used to implement COW semantics or the like.
444
450
445 """
451 """
446 self.upperboundcomp = upperboundcomp
452 self.upperboundcomp = upperboundcomp
447 self.indexfile = indexfile
453 self.indexfile = indexfile
448 self.datafile = datafile or (indexfile[:-2] + b".d")
454 self.datafile = datafile or (indexfile[:-2] + b".d")
449 self.nodemap_file = None
455 self.nodemap_file = None
450 if persistentnodemap:
456 if persistentnodemap:
451 self.nodemap_file = nodemaputil.get_nodemap_file(
457 self.nodemap_file = nodemaputil.get_nodemap_file(
452 opener, self.indexfile
458 opener, self.indexfile
453 )
459 )
454
460
455 self.opener = opener
461 self.opener = opener
456 # When True, indexfile is opened with checkambig=True at writing, to
462 # When True, indexfile is opened with checkambig=True at writing, to
457 # avoid file stat ambiguity.
463 # avoid file stat ambiguity.
458 self._checkambig = checkambig
464 self._checkambig = checkambig
459 self._mmaplargeindex = mmaplargeindex
465 self._mmaplargeindex = mmaplargeindex
460 self._censorable = censorable
466 self._censorable = censorable
461 # 3-tuple of (node, rev, text) for a raw revision.
467 # 3-tuple of (node, rev, text) for a raw revision.
462 self._revisioncache = None
468 self._revisioncache = None
463 # Maps rev to chain base rev.
469 # Maps rev to chain base rev.
464 self._chainbasecache = util.lrucachedict(100)
470 self._chainbasecache = util.lrucachedict(100)
465 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
471 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
466 self._chunkcache = (0, b'')
472 self._chunkcache = (0, b'')
467 # How much data to read and cache into the raw revlog data cache.
473 # How much data to read and cache into the raw revlog data cache.
468 self._chunkcachesize = 65536
474 self._chunkcachesize = 65536
469 self._maxchainlen = None
475 self._maxchainlen = None
470 self._deltabothparents = True
476 self._deltabothparents = True
471 self.index = None
477 self.index = None
472 self._nodemap_docket = None
478 self._nodemap_docket = None
473 # Mapping of partial identifiers to full nodes.
479 # Mapping of partial identifiers to full nodes.
474 self._pcache = {}
480 self._pcache = {}
475 # Mapping of revision integer to full node.
481 # Mapping of revision integer to full node.
476 self._compengine = b'zlib'
482 self._compengine = b'zlib'
477 self._compengineopts = {}
483 self._compengineopts = {}
478 self._maxdeltachainspan = -1
484 self._maxdeltachainspan = -1
479 self._withsparseread = False
485 self._withsparseread = False
480 self._sparserevlog = False
486 self._sparserevlog = False
481 self._srdensitythreshold = 0.50
487 self._srdensitythreshold = 0.50
482 self._srmingapsize = 262144
488 self._srmingapsize = 262144
483
489
484 # Make copy of flag processors so each revlog instance can support
490 # Make copy of flag processors so each revlog instance can support
485 # custom flags.
491 # custom flags.
486 self._flagprocessors = dict(flagutil.flagprocessors)
492 self._flagprocessors = dict(flagutil.flagprocessors)
487
493
488 # 2-tuple of file handles being used for active writing.
494 # 2-tuple of file handles being used for active writing.
489 self._writinghandles = None
495 self._writinghandles = None
490
496
491 self._loadindex()
497 self._loadindex()
492
498
499 self._concurrencychecker = concurrencychecker
500
493 def _loadindex(self):
501 def _loadindex(self):
494 mmapindexthreshold = None
502 mmapindexthreshold = None
495 opts = self.opener.options
503 opts = self.opener.options
496
504
497 if b'revlogv2' in opts:
505 if b'revlogv2' in opts:
498 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
506 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
499 elif b'revlogv1' in opts:
507 elif b'revlogv1' in opts:
500 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
508 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
501 if b'generaldelta' in opts:
509 if b'generaldelta' in opts:
502 newversionflags |= FLAG_GENERALDELTA
510 newversionflags |= FLAG_GENERALDELTA
503 elif b'revlogv0' in self.opener.options:
511 elif b'revlogv0' in self.opener.options:
504 newversionflags = REVLOGV0
512 newversionflags = REVLOGV0
505 else:
513 else:
506 newversionflags = REVLOG_DEFAULT_VERSION
514 newversionflags = REVLOG_DEFAULT_VERSION
507
515
508 if b'chunkcachesize' in opts:
516 if b'chunkcachesize' in opts:
509 self._chunkcachesize = opts[b'chunkcachesize']
517 self._chunkcachesize = opts[b'chunkcachesize']
510 if b'maxchainlen' in opts:
518 if b'maxchainlen' in opts:
511 self._maxchainlen = opts[b'maxchainlen']
519 self._maxchainlen = opts[b'maxchainlen']
512 if b'deltabothparents' in opts:
520 if b'deltabothparents' in opts:
513 self._deltabothparents = opts[b'deltabothparents']
521 self._deltabothparents = opts[b'deltabothparents']
514 self._lazydelta = bool(opts.get(b'lazydelta', True))
522 self._lazydelta = bool(opts.get(b'lazydelta', True))
515 self._lazydeltabase = False
523 self._lazydeltabase = False
516 if self._lazydelta:
524 if self._lazydelta:
517 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
525 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
518 if b'compengine' in opts:
526 if b'compengine' in opts:
519 self._compengine = opts[b'compengine']
527 self._compengine = opts[b'compengine']
520 if b'zlib.level' in opts:
528 if b'zlib.level' in opts:
521 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
529 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
522 if b'zstd.level' in opts:
530 if b'zstd.level' in opts:
523 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
531 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
524 if b'maxdeltachainspan' in opts:
532 if b'maxdeltachainspan' in opts:
525 self._maxdeltachainspan = opts[b'maxdeltachainspan']
533 self._maxdeltachainspan = opts[b'maxdeltachainspan']
526 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
534 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
527 mmapindexthreshold = opts[b'mmapindexthreshold']
535 mmapindexthreshold = opts[b'mmapindexthreshold']
528 self.hassidedata = bool(opts.get(b'side-data', False))
536 self.hassidedata = bool(opts.get(b'side-data', False))
529 if self.hassidedata:
537 if self.hassidedata:
530 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
538 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
531 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
539 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
532 withsparseread = bool(opts.get(b'with-sparse-read', False))
540 withsparseread = bool(opts.get(b'with-sparse-read', False))
533 # sparse-revlog forces sparse-read
541 # sparse-revlog forces sparse-read
534 self._withsparseread = self._sparserevlog or withsparseread
542 self._withsparseread = self._sparserevlog or withsparseread
535 if b'sparse-read-density-threshold' in opts:
543 if b'sparse-read-density-threshold' in opts:
536 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
544 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
537 if b'sparse-read-min-gap-size' in opts:
545 if b'sparse-read-min-gap-size' in opts:
538 self._srmingapsize = opts[b'sparse-read-min-gap-size']
546 self._srmingapsize = opts[b'sparse-read-min-gap-size']
539 if opts.get(b'enableellipsis'):
547 if opts.get(b'enableellipsis'):
540 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
548 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
541
549
542 # revlog v0 doesn't have flag processors
550 # revlog v0 doesn't have flag processors
543 for flag, processor in pycompat.iteritems(
551 for flag, processor in pycompat.iteritems(
544 opts.get(b'flagprocessors', {})
552 opts.get(b'flagprocessors', {})
545 ):
553 ):
546 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
554 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
547
555
548 if self._chunkcachesize <= 0:
556 if self._chunkcachesize <= 0:
549 raise error.RevlogError(
557 raise error.RevlogError(
550 _(b'revlog chunk cache size %r is not greater than 0')
558 _(b'revlog chunk cache size %r is not greater than 0')
551 % self._chunkcachesize
559 % self._chunkcachesize
552 )
560 )
553 elif self._chunkcachesize & (self._chunkcachesize - 1):
561 elif self._chunkcachesize & (self._chunkcachesize - 1):
554 raise error.RevlogError(
562 raise error.RevlogError(
555 _(b'revlog chunk cache size %r is not a power of 2')
563 _(b'revlog chunk cache size %r is not a power of 2')
556 % self._chunkcachesize
564 % self._chunkcachesize
557 )
565 )
558
566
559 indexdata = b''
567 indexdata = b''
560 self._initempty = True
568 self._initempty = True
561 try:
569 try:
562 with self._indexfp() as f:
570 with self._indexfp() as f:
563 if (
571 if (
564 mmapindexthreshold is not None
572 mmapindexthreshold is not None
565 and self.opener.fstat(f).st_size >= mmapindexthreshold
573 and self.opener.fstat(f).st_size >= mmapindexthreshold
566 ):
574 ):
567 # TODO: should .close() to release resources without
575 # TODO: should .close() to release resources without
568 # relying on Python GC
576 # relying on Python GC
569 indexdata = util.buffer(util.mmapread(f))
577 indexdata = util.buffer(util.mmapread(f))
570 else:
578 else:
571 indexdata = f.read()
579 indexdata = f.read()
572 if len(indexdata) > 0:
580 if len(indexdata) > 0:
573 versionflags = versionformat_unpack(indexdata[:4])[0]
581 versionflags = versionformat_unpack(indexdata[:4])[0]
574 self._initempty = False
582 self._initempty = False
575 else:
583 else:
576 versionflags = newversionflags
584 versionflags = newversionflags
577 except IOError as inst:
585 except IOError as inst:
578 if inst.errno != errno.ENOENT:
586 if inst.errno != errno.ENOENT:
579 raise
587 raise
580
588
581 versionflags = newversionflags
589 versionflags = newversionflags
582
590
583 self.version = versionflags
591 self.version = versionflags
584
592
585 flags = versionflags & ~0xFFFF
593 flags = versionflags & ~0xFFFF
586 fmt = versionflags & 0xFFFF
594 fmt = versionflags & 0xFFFF
587
595
588 if fmt == REVLOGV0:
596 if fmt == REVLOGV0:
589 if flags:
597 if flags:
590 raise error.RevlogError(
598 raise error.RevlogError(
591 _(b'unknown flags (%#04x) in version %d revlog %s')
599 _(b'unknown flags (%#04x) in version %d revlog %s')
592 % (flags >> 16, fmt, self.indexfile)
600 % (flags >> 16, fmt, self.indexfile)
593 )
601 )
594
602
595 self._inline = False
603 self._inline = False
596 self._generaldelta = False
604 self._generaldelta = False
597
605
598 elif fmt == REVLOGV1:
606 elif fmt == REVLOGV1:
599 if flags & ~REVLOGV1_FLAGS:
607 if flags & ~REVLOGV1_FLAGS:
600 raise error.RevlogError(
608 raise error.RevlogError(
601 _(b'unknown flags (%#04x) in version %d revlog %s')
609 _(b'unknown flags (%#04x) in version %d revlog %s')
602 % (flags >> 16, fmt, self.indexfile)
610 % (flags >> 16, fmt, self.indexfile)
603 )
611 )
604
612
605 self._inline = versionflags & FLAG_INLINE_DATA
613 self._inline = versionflags & FLAG_INLINE_DATA
606 self._generaldelta = versionflags & FLAG_GENERALDELTA
614 self._generaldelta = versionflags & FLAG_GENERALDELTA
607
615
608 elif fmt == REVLOGV2:
616 elif fmt == REVLOGV2:
609 if flags & ~REVLOGV2_FLAGS:
617 if flags & ~REVLOGV2_FLAGS:
610 raise error.RevlogError(
618 raise error.RevlogError(
611 _(b'unknown flags (%#04x) in version %d revlog %s')
619 _(b'unknown flags (%#04x) in version %d revlog %s')
612 % (flags >> 16, fmt, self.indexfile)
620 % (flags >> 16, fmt, self.indexfile)
613 )
621 )
614
622
615 self._inline = versionflags & FLAG_INLINE_DATA
623 self._inline = versionflags & FLAG_INLINE_DATA
616 # generaldelta implied by version 2 revlogs.
624 # generaldelta implied by version 2 revlogs.
617 self._generaldelta = True
625 self._generaldelta = True
618
626
619 else:
627 else:
620 raise error.RevlogError(
628 raise error.RevlogError(
621 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
629 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
622 )
630 )
623 # sparse-revlog can't be on without general-delta (issue6056)
631 # sparse-revlog can't be on without general-delta (issue6056)
624 if not self._generaldelta:
632 if not self._generaldelta:
625 self._sparserevlog = False
633 self._sparserevlog = False
626
634
627 self._storedeltachains = True
635 self._storedeltachains = True
628
636
629 devel_nodemap = (
637 devel_nodemap = (
630 self.nodemap_file
638 self.nodemap_file
631 and opts.get(b'devel-force-nodemap', False)
639 and opts.get(b'devel-force-nodemap', False)
632 and NodemapRevlogIO is not None
640 and NodemapRevlogIO is not None
633 )
641 )
634
642
635 use_rust_index = False
643 use_rust_index = False
636 if rustrevlog is not None:
644 if rustrevlog is not None:
637 if self.nodemap_file is not None:
645 if self.nodemap_file is not None:
638 use_rust_index = True
646 use_rust_index = True
639 else:
647 else:
640 use_rust_index = self.opener.options.get(b'rust.index')
648 use_rust_index = self.opener.options.get(b'rust.index')
641
649
642 self._io = revlogio()
650 self._io = revlogio()
643 if self.version == REVLOGV0:
651 if self.version == REVLOGV0:
644 self._io = revlogoldio()
652 self._io = revlogoldio()
645 elif devel_nodemap:
653 elif devel_nodemap:
646 self._io = NodemapRevlogIO()
654 self._io = NodemapRevlogIO()
647 elif use_rust_index:
655 elif use_rust_index:
648 self._io = rustrevlogio()
656 self._io = rustrevlogio()
649 try:
657 try:
650 d = self._io.parseindex(indexdata, self._inline)
658 d = self._io.parseindex(indexdata, self._inline)
651 index, _chunkcache = d
659 index, _chunkcache = d
652 use_nodemap = (
660 use_nodemap = (
653 not self._inline
661 not self._inline
654 and self.nodemap_file is not None
662 and self.nodemap_file is not None
655 and util.safehasattr(index, 'update_nodemap_data')
663 and util.safehasattr(index, 'update_nodemap_data')
656 )
664 )
657 if use_nodemap:
665 if use_nodemap:
658 nodemap_data = nodemaputil.persisted_data(self)
666 nodemap_data = nodemaputil.persisted_data(self)
659 if nodemap_data is not None:
667 if nodemap_data is not None:
660 docket = nodemap_data[0]
668 docket = nodemap_data[0]
661 if (
669 if (
662 len(d[0]) > docket.tip_rev
670 len(d[0]) > docket.tip_rev
663 and d[0][docket.tip_rev][7] == docket.tip_node
671 and d[0][docket.tip_rev][7] == docket.tip_node
664 ):
672 ):
665 # no changelog tampering
673 # no changelog tampering
666 self._nodemap_docket = docket
674 self._nodemap_docket = docket
667 index.update_nodemap_data(*nodemap_data)
675 index.update_nodemap_data(*nodemap_data)
668 except (ValueError, IndexError):
676 except (ValueError, IndexError):
669 raise error.RevlogError(
677 raise error.RevlogError(
670 _(b"index %s is corrupted") % self.indexfile
678 _(b"index %s is corrupted") % self.indexfile
671 )
679 )
672 self.index, self._chunkcache = d
680 self.index, self._chunkcache = d
673 if not self._chunkcache:
681 if not self._chunkcache:
674 self._chunkclear()
682 self._chunkclear()
675 # revnum -> (chain-length, sum-delta-length)
683 # revnum -> (chain-length, sum-delta-length)
676 self._chaininfocache = util.lrucachedict(500)
684 self._chaininfocache = util.lrucachedict(500)
677 # revlog header -> revlog compressor
685 # revlog header -> revlog compressor
678 self._decompressors = {}
686 self._decompressors = {}
679
687
680 @util.propertycache
688 @util.propertycache
681 def _compressor(self):
689 def _compressor(self):
682 engine = util.compengines[self._compengine]
690 engine = util.compengines[self._compengine]
683 return engine.revlogcompressor(self._compengineopts)
691 return engine.revlogcompressor(self._compengineopts)
684
692
685 def _indexfp(self, mode=b'r'):
693 def _indexfp(self, mode=b'r'):
686 """file object for the revlog's index file"""
694 """file object for the revlog's index file"""
687 args = {'mode': mode}
695 args = {'mode': mode}
688 if mode != b'r':
696 if mode != b'r':
689 args['checkambig'] = self._checkambig
697 args['checkambig'] = self._checkambig
690 if mode == b'w':
698 if mode == b'w':
691 args['atomictemp'] = True
699 args['atomictemp'] = True
692 return self.opener(self.indexfile, **args)
700 return self.opener(self.indexfile, **args)
693
701
694 def _datafp(self, mode=b'r'):
702 def _datafp(self, mode=b'r'):
695 """file object for the revlog's data file"""
703 """file object for the revlog's data file"""
696 return self.opener(self.datafile, mode=mode)
704 return self.opener(self.datafile, mode=mode)
697
705
698 @contextlib.contextmanager
706 @contextlib.contextmanager
699 def _datareadfp(self, existingfp=None):
707 def _datareadfp(self, existingfp=None):
700 """file object suitable to read data"""
708 """file object suitable to read data"""
701 # Use explicit file handle, if given.
709 # Use explicit file handle, if given.
702 if existingfp is not None:
710 if existingfp is not None:
703 yield existingfp
711 yield existingfp
704
712
705 # Use a file handle being actively used for writes, if available.
713 # Use a file handle being actively used for writes, if available.
706 # There is some danger to doing this because reads will seek the
714 # There is some danger to doing this because reads will seek the
707 # file. However, _writeentry() performs a SEEK_END before all writes,
715 # file. However, _writeentry() performs a SEEK_END before all writes,
708 # so we should be safe.
716 # so we should be safe.
709 elif self._writinghandles:
717 elif self._writinghandles:
710 if self._inline:
718 if self._inline:
711 yield self._writinghandles[0]
719 yield self._writinghandles[0]
712 else:
720 else:
713 yield self._writinghandles[1]
721 yield self._writinghandles[1]
714
722
715 # Otherwise open a new file handle.
723 # Otherwise open a new file handle.
716 else:
724 else:
717 if self._inline:
725 if self._inline:
718 func = self._indexfp
726 func = self._indexfp
719 else:
727 else:
720 func = self._datafp
728 func = self._datafp
721 with func() as fp:
729 with func() as fp:
722 yield fp
730 yield fp
723
731
724 def tiprev(self):
732 def tiprev(self):
725 return len(self.index) - 1
733 return len(self.index) - 1
726
734
727 def tip(self):
735 def tip(self):
728 return self.node(self.tiprev())
736 return self.node(self.tiprev())
729
737
730 def __contains__(self, rev):
738 def __contains__(self, rev):
731 return 0 <= rev < len(self)
739 return 0 <= rev < len(self)
732
740
733 def __len__(self):
741 def __len__(self):
734 return len(self.index)
742 return len(self.index)
735
743
736 def __iter__(self):
744 def __iter__(self):
737 return iter(pycompat.xrange(len(self)))
745 return iter(pycompat.xrange(len(self)))
738
746
739 def revs(self, start=0, stop=None):
747 def revs(self, start=0, stop=None):
740 """iterate over all rev in this revlog (from start to stop)"""
748 """iterate over all rev in this revlog (from start to stop)"""
741 return storageutil.iterrevs(len(self), start=start, stop=stop)
749 return storageutil.iterrevs(len(self), start=start, stop=stop)
742
750
743 @property
751 @property
744 def nodemap(self):
752 def nodemap(self):
745 msg = (
753 msg = (
746 b"revlog.nodemap is deprecated, "
754 b"revlog.nodemap is deprecated, "
747 b"use revlog.index.[has_node|rev|get_rev]"
755 b"use revlog.index.[has_node|rev|get_rev]"
748 )
756 )
749 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
757 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
750 return self.index.nodemap
758 return self.index.nodemap
751
759
752 @property
760 @property
753 def _nodecache(self):
761 def _nodecache(self):
754 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
762 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
755 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
763 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
756 return self.index.nodemap
764 return self.index.nodemap
757
765
758 def hasnode(self, node):
766 def hasnode(self, node):
759 try:
767 try:
760 self.rev(node)
768 self.rev(node)
761 return True
769 return True
762 except KeyError:
770 except KeyError:
763 return False
771 return False
764
772
765 def candelta(self, baserev, rev):
773 def candelta(self, baserev, rev):
766 """whether two revisions (baserev, rev) can be delta-ed or not"""
774 """whether two revisions (baserev, rev) can be delta-ed or not"""
767 # Disable delta if either rev requires a content-changing flag
775 # Disable delta if either rev requires a content-changing flag
768 # processor (ex. LFS). This is because such flag processor can alter
776 # processor (ex. LFS). This is because such flag processor can alter
769 # the rawtext content that the delta will be based on, and two clients
777 # the rawtext content that the delta will be based on, and two clients
770 # could have a same revlog node with different flags (i.e. different
778 # could have a same revlog node with different flags (i.e. different
771 # rawtext contents) and the delta could be incompatible.
779 # rawtext contents) and the delta could be incompatible.
772 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
780 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
773 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
781 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
774 ):
782 ):
775 return False
783 return False
776 return True
784 return True
777
785
778 def update_caches(self, transaction):
786 def update_caches(self, transaction):
779 if self.nodemap_file is not None:
787 if self.nodemap_file is not None:
780 if transaction is None:
788 if transaction is None:
781 nodemaputil.update_persistent_nodemap(self)
789 nodemaputil.update_persistent_nodemap(self)
782 else:
790 else:
783 nodemaputil.setup_persistent_nodemap(transaction, self)
791 nodemaputil.setup_persistent_nodemap(transaction, self)
784
792
785 def clearcaches(self):
793 def clearcaches(self):
786 self._revisioncache = None
794 self._revisioncache = None
787 self._chainbasecache.clear()
795 self._chainbasecache.clear()
788 self._chunkcache = (0, b'')
796 self._chunkcache = (0, b'')
789 self._pcache = {}
797 self._pcache = {}
790 self._nodemap_docket = None
798 self._nodemap_docket = None
791 self.index.clearcaches()
799 self.index.clearcaches()
792 # The python code is the one responsible for validating the docket, we
800 # The python code is the one responsible for validating the docket, we
793 # end up having to refresh it here.
801 # end up having to refresh it here.
794 use_nodemap = (
802 use_nodemap = (
795 not self._inline
803 not self._inline
796 and self.nodemap_file is not None
804 and self.nodemap_file is not None
797 and util.safehasattr(self.index, 'update_nodemap_data')
805 and util.safehasattr(self.index, 'update_nodemap_data')
798 )
806 )
799 if use_nodemap:
807 if use_nodemap:
800 nodemap_data = nodemaputil.persisted_data(self)
808 nodemap_data = nodemaputil.persisted_data(self)
801 if nodemap_data is not None:
809 if nodemap_data is not None:
802 self._nodemap_docket = nodemap_data[0]
810 self._nodemap_docket = nodemap_data[0]
803 self.index.update_nodemap_data(*nodemap_data)
811 self.index.update_nodemap_data(*nodemap_data)
804
812
805 def rev(self, node):
813 def rev(self, node):
806 try:
814 try:
807 return self.index.rev(node)
815 return self.index.rev(node)
808 except TypeError:
816 except TypeError:
809 raise
817 raise
810 except error.RevlogError:
818 except error.RevlogError:
811 # parsers.c radix tree lookup failed
819 # parsers.c radix tree lookup failed
812 if node == wdirid or node in wdirfilenodeids:
820 if node == wdirid or node in wdirfilenodeids:
813 raise error.WdirUnsupported
821 raise error.WdirUnsupported
814 raise error.LookupError(node, self.indexfile, _(b'no node'))
822 raise error.LookupError(node, self.indexfile, _(b'no node'))
815
823
816 # Accessors for index entries.
824 # Accessors for index entries.
817
825
818 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
826 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
819 # are flags.
827 # are flags.
820 def start(self, rev):
828 def start(self, rev):
821 return int(self.index[rev][0] >> 16)
829 return int(self.index[rev][0] >> 16)
822
830
823 def flags(self, rev):
831 def flags(self, rev):
824 return self.index[rev][0] & 0xFFFF
832 return self.index[rev][0] & 0xFFFF
825
833
826 def length(self, rev):
834 def length(self, rev):
827 return self.index[rev][1]
835 return self.index[rev][1]
828
836
829 def rawsize(self, rev):
837 def rawsize(self, rev):
830 """return the length of the uncompressed text for a given revision"""
838 """return the length of the uncompressed text for a given revision"""
831 l = self.index[rev][2]
839 l = self.index[rev][2]
832 if l >= 0:
840 if l >= 0:
833 return l
841 return l
834
842
835 t = self.rawdata(rev)
843 t = self.rawdata(rev)
836 return len(t)
844 return len(t)
837
845
838 def size(self, rev):
846 def size(self, rev):
839 """length of non-raw text (processed by a "read" flag processor)"""
847 """length of non-raw text (processed by a "read" flag processor)"""
840 # fast path: if no "read" flag processor could change the content,
848 # fast path: if no "read" flag processor could change the content,
841 # size is rawsize. note: ELLIPSIS is known to not change the content.
849 # size is rawsize. note: ELLIPSIS is known to not change the content.
842 flags = self.flags(rev)
850 flags = self.flags(rev)
843 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
851 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
844 return self.rawsize(rev)
852 return self.rawsize(rev)
845
853
846 return len(self.revision(rev, raw=False))
854 return len(self.revision(rev, raw=False))
847
855
848 def chainbase(self, rev):
856 def chainbase(self, rev):
849 base = self._chainbasecache.get(rev)
857 base = self._chainbasecache.get(rev)
850 if base is not None:
858 if base is not None:
851 return base
859 return base
852
860
853 index = self.index
861 index = self.index
854 iterrev = rev
862 iterrev = rev
855 base = index[iterrev][3]
863 base = index[iterrev][3]
856 while base != iterrev:
864 while base != iterrev:
857 iterrev = base
865 iterrev = base
858 base = index[iterrev][3]
866 base = index[iterrev][3]
859
867
860 self._chainbasecache[rev] = base
868 self._chainbasecache[rev] = base
861 return base
869 return base
862
870
863 def linkrev(self, rev):
871 def linkrev(self, rev):
864 return self.index[rev][4]
872 return self.index[rev][4]
865
873
866 def parentrevs(self, rev):
874 def parentrevs(self, rev):
867 try:
875 try:
868 entry = self.index[rev]
876 entry = self.index[rev]
869 except IndexError:
877 except IndexError:
870 if rev == wdirrev:
878 if rev == wdirrev:
871 raise error.WdirUnsupported
879 raise error.WdirUnsupported
872 raise
880 raise
873
881
874 return entry[5], entry[6]
882 return entry[5], entry[6]
875
883
876 # fast parentrevs(rev) where rev isn't filtered
884 # fast parentrevs(rev) where rev isn't filtered
877 _uncheckedparentrevs = parentrevs
885 _uncheckedparentrevs = parentrevs
878
886
879 def node(self, rev):
887 def node(self, rev):
880 try:
888 try:
881 return self.index[rev][7]
889 return self.index[rev][7]
882 except IndexError:
890 except IndexError:
883 if rev == wdirrev:
891 if rev == wdirrev:
884 raise error.WdirUnsupported
892 raise error.WdirUnsupported
885 raise
893 raise
886
894
887 # Derived from index values.
895 # Derived from index values.
888
896
889 def end(self, rev):
897 def end(self, rev):
890 return self.start(rev) + self.length(rev)
898 return self.start(rev) + self.length(rev)
891
899
892 def parents(self, node):
900 def parents(self, node):
893 i = self.index
901 i = self.index
894 d = i[self.rev(node)]
902 d = i[self.rev(node)]
895 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
903 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
896
904
897 def chainlen(self, rev):
905 def chainlen(self, rev):
898 return self._chaininfo(rev)[0]
906 return self._chaininfo(rev)[0]
899
907
900 def _chaininfo(self, rev):
908 def _chaininfo(self, rev):
901 chaininfocache = self._chaininfocache
909 chaininfocache = self._chaininfocache
902 if rev in chaininfocache:
910 if rev in chaininfocache:
903 return chaininfocache[rev]
911 return chaininfocache[rev]
904 index = self.index
912 index = self.index
905 generaldelta = self._generaldelta
913 generaldelta = self._generaldelta
906 iterrev = rev
914 iterrev = rev
907 e = index[iterrev]
915 e = index[iterrev]
908 clen = 0
916 clen = 0
909 compresseddeltalen = 0
917 compresseddeltalen = 0
910 while iterrev != e[3]:
918 while iterrev != e[3]:
911 clen += 1
919 clen += 1
912 compresseddeltalen += e[1]
920 compresseddeltalen += e[1]
913 if generaldelta:
921 if generaldelta:
914 iterrev = e[3]
922 iterrev = e[3]
915 else:
923 else:
916 iterrev -= 1
924 iterrev -= 1
917 if iterrev in chaininfocache:
925 if iterrev in chaininfocache:
918 t = chaininfocache[iterrev]
926 t = chaininfocache[iterrev]
919 clen += t[0]
927 clen += t[0]
920 compresseddeltalen += t[1]
928 compresseddeltalen += t[1]
921 break
929 break
922 e = index[iterrev]
930 e = index[iterrev]
923 else:
931 else:
924 # Add text length of base since decompressing that also takes
932 # Add text length of base since decompressing that also takes
925 # work. For cache hits the length is already included.
933 # work. For cache hits the length is already included.
926 compresseddeltalen += e[1]
934 compresseddeltalen += e[1]
927 r = (clen, compresseddeltalen)
935 r = (clen, compresseddeltalen)
928 chaininfocache[rev] = r
936 chaininfocache[rev] = r
929 return r
937 return r
930
938
931 def _deltachain(self, rev, stoprev=None):
939 def _deltachain(self, rev, stoprev=None):
932 """Obtain the delta chain for a revision.
940 """Obtain the delta chain for a revision.
933
941
934 ``stoprev`` specifies a revision to stop at. If not specified, we
942 ``stoprev`` specifies a revision to stop at. If not specified, we
935 stop at the base of the chain.
943 stop at the base of the chain.
936
944
937 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
945 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
938 revs in ascending order and ``stopped`` is a bool indicating whether
946 revs in ascending order and ``stopped`` is a bool indicating whether
939 ``stoprev`` was hit.
947 ``stoprev`` was hit.
940 """
948 """
941 # Try C implementation.
949 # Try C implementation.
942 try:
950 try:
943 return self.index.deltachain(rev, stoprev, self._generaldelta)
951 return self.index.deltachain(rev, stoprev, self._generaldelta)
944 except AttributeError:
952 except AttributeError:
945 pass
953 pass
946
954
947 chain = []
955 chain = []
948
956
949 # Alias to prevent attribute lookup in tight loop.
957 # Alias to prevent attribute lookup in tight loop.
950 index = self.index
958 index = self.index
951 generaldelta = self._generaldelta
959 generaldelta = self._generaldelta
952
960
953 iterrev = rev
961 iterrev = rev
954 e = index[iterrev]
962 e = index[iterrev]
955 while iterrev != e[3] and iterrev != stoprev:
963 while iterrev != e[3] and iterrev != stoprev:
956 chain.append(iterrev)
964 chain.append(iterrev)
957 if generaldelta:
965 if generaldelta:
958 iterrev = e[3]
966 iterrev = e[3]
959 else:
967 else:
960 iterrev -= 1
968 iterrev -= 1
961 e = index[iterrev]
969 e = index[iterrev]
962
970
963 if iterrev == stoprev:
971 if iterrev == stoprev:
964 stopped = True
972 stopped = True
965 else:
973 else:
966 chain.append(iterrev)
974 chain.append(iterrev)
967 stopped = False
975 stopped = False
968
976
969 chain.reverse()
977 chain.reverse()
970 return chain, stopped
978 return chain, stopped
971
979
972 def ancestors(self, revs, stoprev=0, inclusive=False):
980 def ancestors(self, revs, stoprev=0, inclusive=False):
973 """Generate the ancestors of 'revs' in reverse revision order.
981 """Generate the ancestors of 'revs' in reverse revision order.
974 Does not generate revs lower than stoprev.
982 Does not generate revs lower than stoprev.
975
983
976 See the documentation for ancestor.lazyancestors for more details."""
984 See the documentation for ancestor.lazyancestors for more details."""
977
985
978 # first, make sure start revisions aren't filtered
986 # first, make sure start revisions aren't filtered
979 revs = list(revs)
987 revs = list(revs)
980 checkrev = self.node
988 checkrev = self.node
981 for r in revs:
989 for r in revs:
982 checkrev(r)
990 checkrev(r)
983 # and we're sure ancestors aren't filtered as well
991 # and we're sure ancestors aren't filtered as well
984
992
985 if rustancestor is not None:
993 if rustancestor is not None:
986 lazyancestors = rustancestor.LazyAncestors
994 lazyancestors = rustancestor.LazyAncestors
987 arg = self.index
995 arg = self.index
988 else:
996 else:
989 lazyancestors = ancestor.lazyancestors
997 lazyancestors = ancestor.lazyancestors
990 arg = self._uncheckedparentrevs
998 arg = self._uncheckedparentrevs
991 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
999 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
992
1000
993 def descendants(self, revs):
1001 def descendants(self, revs):
994 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1002 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
995
1003
996 def findcommonmissing(self, common=None, heads=None):
1004 def findcommonmissing(self, common=None, heads=None):
997 """Return a tuple of the ancestors of common and the ancestors of heads
1005 """Return a tuple of the ancestors of common and the ancestors of heads
998 that are not ancestors of common. In revset terminology, we return the
1006 that are not ancestors of common. In revset terminology, we return the
999 tuple:
1007 tuple:
1000
1008
1001 ::common, (::heads) - (::common)
1009 ::common, (::heads) - (::common)
1002
1010
1003 The list is sorted by revision number, meaning it is
1011 The list is sorted by revision number, meaning it is
1004 topologically sorted.
1012 topologically sorted.
1005
1013
1006 'heads' and 'common' are both lists of node IDs. If heads is
1014 'heads' and 'common' are both lists of node IDs. If heads is
1007 not supplied, uses all of the revlog's heads. If common is not
1015 not supplied, uses all of the revlog's heads. If common is not
1008 supplied, uses nullid."""
1016 supplied, uses nullid."""
1009 if common is None:
1017 if common is None:
1010 common = [nullid]
1018 common = [nullid]
1011 if heads is None:
1019 if heads is None:
1012 heads = self.heads()
1020 heads = self.heads()
1013
1021
1014 common = [self.rev(n) for n in common]
1022 common = [self.rev(n) for n in common]
1015 heads = [self.rev(n) for n in heads]
1023 heads = [self.rev(n) for n in heads]
1016
1024
1017 # we want the ancestors, but inclusive
1025 # we want the ancestors, but inclusive
1018 class lazyset(object):
1026 class lazyset(object):
1019 def __init__(self, lazyvalues):
1027 def __init__(self, lazyvalues):
1020 self.addedvalues = set()
1028 self.addedvalues = set()
1021 self.lazyvalues = lazyvalues
1029 self.lazyvalues = lazyvalues
1022
1030
1023 def __contains__(self, value):
1031 def __contains__(self, value):
1024 return value in self.addedvalues or value in self.lazyvalues
1032 return value in self.addedvalues or value in self.lazyvalues
1025
1033
1026 def __iter__(self):
1034 def __iter__(self):
1027 added = self.addedvalues
1035 added = self.addedvalues
1028 for r in added:
1036 for r in added:
1029 yield r
1037 yield r
1030 for r in self.lazyvalues:
1038 for r in self.lazyvalues:
1031 if not r in added:
1039 if not r in added:
1032 yield r
1040 yield r
1033
1041
1034 def add(self, value):
1042 def add(self, value):
1035 self.addedvalues.add(value)
1043 self.addedvalues.add(value)
1036
1044
1037 def update(self, values):
1045 def update(self, values):
1038 self.addedvalues.update(values)
1046 self.addedvalues.update(values)
1039
1047
1040 has = lazyset(self.ancestors(common))
1048 has = lazyset(self.ancestors(common))
1041 has.add(nullrev)
1049 has.add(nullrev)
1042 has.update(common)
1050 has.update(common)
1043
1051
1044 # take all ancestors from heads that aren't in has
1052 # take all ancestors from heads that aren't in has
1045 missing = set()
1053 missing = set()
1046 visit = collections.deque(r for r in heads if r not in has)
1054 visit = collections.deque(r for r in heads if r not in has)
1047 while visit:
1055 while visit:
1048 r = visit.popleft()
1056 r = visit.popleft()
1049 if r in missing:
1057 if r in missing:
1050 continue
1058 continue
1051 else:
1059 else:
1052 missing.add(r)
1060 missing.add(r)
1053 for p in self.parentrevs(r):
1061 for p in self.parentrevs(r):
1054 if p not in has:
1062 if p not in has:
1055 visit.append(p)
1063 visit.append(p)
1056 missing = list(missing)
1064 missing = list(missing)
1057 missing.sort()
1065 missing.sort()
1058 return has, [self.node(miss) for miss in missing]
1066 return has, [self.node(miss) for miss in missing]
1059
1067
1060 def incrementalmissingrevs(self, common=None):
1068 def incrementalmissingrevs(self, common=None):
1061 """Return an object that can be used to incrementally compute the
1069 """Return an object that can be used to incrementally compute the
1062 revision numbers of the ancestors of arbitrary sets that are not
1070 revision numbers of the ancestors of arbitrary sets that are not
1063 ancestors of common. This is an ancestor.incrementalmissingancestors
1071 ancestors of common. This is an ancestor.incrementalmissingancestors
1064 object.
1072 object.
1065
1073
1066 'common' is a list of revision numbers. If common is not supplied, uses
1074 'common' is a list of revision numbers. If common is not supplied, uses
1067 nullrev.
1075 nullrev.
1068 """
1076 """
1069 if common is None:
1077 if common is None:
1070 common = [nullrev]
1078 common = [nullrev]
1071
1079
1072 if rustancestor is not None:
1080 if rustancestor is not None:
1073 return rustancestor.MissingAncestors(self.index, common)
1081 return rustancestor.MissingAncestors(self.index, common)
1074 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1082 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1075
1083
1076 def findmissingrevs(self, common=None, heads=None):
1084 def findmissingrevs(self, common=None, heads=None):
1077 """Return the revision numbers of the ancestors of heads that
1085 """Return the revision numbers of the ancestors of heads that
1078 are not ancestors of common.
1086 are not ancestors of common.
1079
1087
1080 More specifically, return a list of revision numbers corresponding to
1088 More specifically, return a list of revision numbers corresponding to
1081 nodes N such that every N satisfies the following constraints:
1089 nodes N such that every N satisfies the following constraints:
1082
1090
1083 1. N is an ancestor of some node in 'heads'
1091 1. N is an ancestor of some node in 'heads'
1084 2. N is not an ancestor of any node in 'common'
1092 2. N is not an ancestor of any node in 'common'
1085
1093
1086 The list is sorted by revision number, meaning it is
1094 The list is sorted by revision number, meaning it is
1087 topologically sorted.
1095 topologically sorted.
1088
1096
1089 'heads' and 'common' are both lists of revision numbers. If heads is
1097 'heads' and 'common' are both lists of revision numbers. If heads is
1090 not supplied, uses all of the revlog's heads. If common is not
1098 not supplied, uses all of the revlog's heads. If common is not
1091 supplied, uses nullid."""
1099 supplied, uses nullid."""
1092 if common is None:
1100 if common is None:
1093 common = [nullrev]
1101 common = [nullrev]
1094 if heads is None:
1102 if heads is None:
1095 heads = self.headrevs()
1103 heads = self.headrevs()
1096
1104
1097 inc = self.incrementalmissingrevs(common=common)
1105 inc = self.incrementalmissingrevs(common=common)
1098 return inc.missingancestors(heads)
1106 return inc.missingancestors(heads)
1099
1107
1100 def findmissing(self, common=None, heads=None):
1108 def findmissing(self, common=None, heads=None):
1101 """Return the ancestors of heads that are not ancestors of common.
1109 """Return the ancestors of heads that are not ancestors of common.
1102
1110
1103 More specifically, return a list of nodes N such that every N
1111 More specifically, return a list of nodes N such that every N
1104 satisfies the following constraints:
1112 satisfies the following constraints:
1105
1113
1106 1. N is an ancestor of some node in 'heads'
1114 1. N is an ancestor of some node in 'heads'
1107 2. N is not an ancestor of any node in 'common'
1115 2. N is not an ancestor of any node in 'common'
1108
1116
1109 The list is sorted by revision number, meaning it is
1117 The list is sorted by revision number, meaning it is
1110 topologically sorted.
1118 topologically sorted.
1111
1119
1112 'heads' and 'common' are both lists of node IDs. If heads is
1120 'heads' and 'common' are both lists of node IDs. If heads is
1113 not supplied, uses all of the revlog's heads. If common is not
1121 not supplied, uses all of the revlog's heads. If common is not
1114 supplied, uses nullid."""
1122 supplied, uses nullid."""
1115 if common is None:
1123 if common is None:
1116 common = [nullid]
1124 common = [nullid]
1117 if heads is None:
1125 if heads is None:
1118 heads = self.heads()
1126 heads = self.heads()
1119
1127
1120 common = [self.rev(n) for n in common]
1128 common = [self.rev(n) for n in common]
1121 heads = [self.rev(n) for n in heads]
1129 heads = [self.rev(n) for n in heads]
1122
1130
1123 inc = self.incrementalmissingrevs(common=common)
1131 inc = self.incrementalmissingrevs(common=common)
1124 return [self.node(r) for r in inc.missingancestors(heads)]
1132 return [self.node(r) for r in inc.missingancestors(heads)]
1125
1133
1126 def nodesbetween(self, roots=None, heads=None):
1134 def nodesbetween(self, roots=None, heads=None):
1127 """Return a topological path from 'roots' to 'heads'.
1135 """Return a topological path from 'roots' to 'heads'.
1128
1136
1129 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1137 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1130 topologically sorted list of all nodes N that satisfy both of
1138 topologically sorted list of all nodes N that satisfy both of
1131 these constraints:
1139 these constraints:
1132
1140
1133 1. N is a descendant of some node in 'roots'
1141 1. N is a descendant of some node in 'roots'
1134 2. N is an ancestor of some node in 'heads'
1142 2. N is an ancestor of some node in 'heads'
1135
1143
1136 Every node is considered to be both a descendant and an ancestor
1144 Every node is considered to be both a descendant and an ancestor
1137 of itself, so every reachable node in 'roots' and 'heads' will be
1145 of itself, so every reachable node in 'roots' and 'heads' will be
1138 included in 'nodes'.
1146 included in 'nodes'.
1139
1147
1140 'outroots' is the list of reachable nodes in 'roots', i.e., the
1148 'outroots' is the list of reachable nodes in 'roots', i.e., the
1141 subset of 'roots' that is returned in 'nodes'. Likewise,
1149 subset of 'roots' that is returned in 'nodes'. Likewise,
1142 'outheads' is the subset of 'heads' that is also in 'nodes'.
1150 'outheads' is the subset of 'heads' that is also in 'nodes'.
1143
1151
1144 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1152 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1145 unspecified, uses nullid as the only root. If 'heads' is
1153 unspecified, uses nullid as the only root. If 'heads' is
1146 unspecified, uses list of all of the revlog's heads."""
1154 unspecified, uses list of all of the revlog's heads."""
1147 nonodes = ([], [], [])
1155 nonodes = ([], [], [])
1148 if roots is not None:
1156 if roots is not None:
1149 roots = list(roots)
1157 roots = list(roots)
1150 if not roots:
1158 if not roots:
1151 return nonodes
1159 return nonodes
1152 lowestrev = min([self.rev(n) for n in roots])
1160 lowestrev = min([self.rev(n) for n in roots])
1153 else:
1161 else:
1154 roots = [nullid] # Everybody's a descendant of nullid
1162 roots = [nullid] # Everybody's a descendant of nullid
1155 lowestrev = nullrev
1163 lowestrev = nullrev
1156 if (lowestrev == nullrev) and (heads is None):
1164 if (lowestrev == nullrev) and (heads is None):
1157 # We want _all_ the nodes!
1165 # We want _all_ the nodes!
1158 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1166 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1159 if heads is None:
1167 if heads is None:
1160 # All nodes are ancestors, so the latest ancestor is the last
1168 # All nodes are ancestors, so the latest ancestor is the last
1161 # node.
1169 # node.
1162 highestrev = len(self) - 1
1170 highestrev = len(self) - 1
1163 # Set ancestors to None to signal that every node is an ancestor.
1171 # Set ancestors to None to signal that every node is an ancestor.
1164 ancestors = None
1172 ancestors = None
1165 # Set heads to an empty dictionary for later discovery of heads
1173 # Set heads to an empty dictionary for later discovery of heads
1166 heads = {}
1174 heads = {}
1167 else:
1175 else:
1168 heads = list(heads)
1176 heads = list(heads)
1169 if not heads:
1177 if not heads:
1170 return nonodes
1178 return nonodes
1171 ancestors = set()
1179 ancestors = set()
1172 # Turn heads into a dictionary so we can remove 'fake' heads.
1180 # Turn heads into a dictionary so we can remove 'fake' heads.
1173 # Also, later we will be using it to filter out the heads we can't
1181 # Also, later we will be using it to filter out the heads we can't
1174 # find from roots.
1182 # find from roots.
1175 heads = dict.fromkeys(heads, False)
1183 heads = dict.fromkeys(heads, False)
1176 # Start at the top and keep marking parents until we're done.
1184 # Start at the top and keep marking parents until we're done.
1177 nodestotag = set(heads)
1185 nodestotag = set(heads)
1178 # Remember where the top was so we can use it as a limit later.
1186 # Remember where the top was so we can use it as a limit later.
1179 highestrev = max([self.rev(n) for n in nodestotag])
1187 highestrev = max([self.rev(n) for n in nodestotag])
1180 while nodestotag:
1188 while nodestotag:
1181 # grab a node to tag
1189 # grab a node to tag
1182 n = nodestotag.pop()
1190 n = nodestotag.pop()
1183 # Never tag nullid
1191 # Never tag nullid
1184 if n == nullid:
1192 if n == nullid:
1185 continue
1193 continue
1186 # A node's revision number represents its place in a
1194 # A node's revision number represents its place in a
1187 # topologically sorted list of nodes.
1195 # topologically sorted list of nodes.
1188 r = self.rev(n)
1196 r = self.rev(n)
1189 if r >= lowestrev:
1197 if r >= lowestrev:
1190 if n not in ancestors:
1198 if n not in ancestors:
1191 # If we are possibly a descendant of one of the roots
1199 # If we are possibly a descendant of one of the roots
1192 # and we haven't already been marked as an ancestor
1200 # and we haven't already been marked as an ancestor
1193 ancestors.add(n) # Mark as ancestor
1201 ancestors.add(n) # Mark as ancestor
1194 # Add non-nullid parents to list of nodes to tag.
1202 # Add non-nullid parents to list of nodes to tag.
1195 nodestotag.update(
1203 nodestotag.update(
1196 [p for p in self.parents(n) if p != nullid]
1204 [p for p in self.parents(n) if p != nullid]
1197 )
1205 )
1198 elif n in heads: # We've seen it before, is it a fake head?
1206 elif n in heads: # We've seen it before, is it a fake head?
1199 # So it is, real heads should not be the ancestors of
1207 # So it is, real heads should not be the ancestors of
1200 # any other heads.
1208 # any other heads.
1201 heads.pop(n)
1209 heads.pop(n)
1202 if not ancestors:
1210 if not ancestors:
1203 return nonodes
1211 return nonodes
1204 # Now that we have our set of ancestors, we want to remove any
1212 # Now that we have our set of ancestors, we want to remove any
1205 # roots that are not ancestors.
1213 # roots that are not ancestors.
1206
1214
1207 # If one of the roots was nullid, everything is included anyway.
1215 # If one of the roots was nullid, everything is included anyway.
1208 if lowestrev > nullrev:
1216 if lowestrev > nullrev:
1209 # But, since we weren't, let's recompute the lowest rev to not
1217 # But, since we weren't, let's recompute the lowest rev to not
1210 # include roots that aren't ancestors.
1218 # include roots that aren't ancestors.
1211
1219
1212 # Filter out roots that aren't ancestors of heads
1220 # Filter out roots that aren't ancestors of heads
1213 roots = [root for root in roots if root in ancestors]
1221 roots = [root for root in roots if root in ancestors]
1214 # Recompute the lowest revision
1222 # Recompute the lowest revision
1215 if roots:
1223 if roots:
1216 lowestrev = min([self.rev(root) for root in roots])
1224 lowestrev = min([self.rev(root) for root in roots])
1217 else:
1225 else:
1218 # No more roots? Return empty list
1226 # No more roots? Return empty list
1219 return nonodes
1227 return nonodes
1220 else:
1228 else:
1221 # We are descending from nullid, and don't need to care about
1229 # We are descending from nullid, and don't need to care about
1222 # any other roots.
1230 # any other roots.
1223 lowestrev = nullrev
1231 lowestrev = nullrev
1224 roots = [nullid]
1232 roots = [nullid]
1225 # Transform our roots list into a set.
1233 # Transform our roots list into a set.
1226 descendants = set(roots)
1234 descendants = set(roots)
1227 # Also, keep the original roots so we can filter out roots that aren't
1235 # Also, keep the original roots so we can filter out roots that aren't
1228 # 'real' roots (i.e. are descended from other roots).
1236 # 'real' roots (i.e. are descended from other roots).
1229 roots = descendants.copy()
1237 roots = descendants.copy()
1230 # Our topologically sorted list of output nodes.
1238 # Our topologically sorted list of output nodes.
1231 orderedout = []
1239 orderedout = []
1232 # Don't start at nullid since we don't want nullid in our output list,
1240 # Don't start at nullid since we don't want nullid in our output list,
1233 # and if nullid shows up in descendants, empty parents will look like
1241 # and if nullid shows up in descendants, empty parents will look like
1234 # they're descendants.
1242 # they're descendants.
1235 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1243 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1236 n = self.node(r)
1244 n = self.node(r)
1237 isdescendant = False
1245 isdescendant = False
1238 if lowestrev == nullrev: # Everybody is a descendant of nullid
1246 if lowestrev == nullrev: # Everybody is a descendant of nullid
1239 isdescendant = True
1247 isdescendant = True
1240 elif n in descendants:
1248 elif n in descendants:
1241 # n is already a descendant
1249 # n is already a descendant
1242 isdescendant = True
1250 isdescendant = True
1243 # This check only needs to be done here because all the roots
1251 # This check only needs to be done here because all the roots
1244 # will start being marked is descendants before the loop.
1252 # will start being marked is descendants before the loop.
1245 if n in roots:
1253 if n in roots:
1246 # If n was a root, check if it's a 'real' root.
1254 # If n was a root, check if it's a 'real' root.
1247 p = tuple(self.parents(n))
1255 p = tuple(self.parents(n))
1248 # If any of its parents are descendants, it's not a root.
1256 # If any of its parents are descendants, it's not a root.
1249 if (p[0] in descendants) or (p[1] in descendants):
1257 if (p[0] in descendants) or (p[1] in descendants):
1250 roots.remove(n)
1258 roots.remove(n)
1251 else:
1259 else:
1252 p = tuple(self.parents(n))
1260 p = tuple(self.parents(n))
1253 # A node is a descendant if either of its parents are
1261 # A node is a descendant if either of its parents are
1254 # descendants. (We seeded the dependents list with the roots
1262 # descendants. (We seeded the dependents list with the roots
1255 # up there, remember?)
1263 # up there, remember?)
1256 if (p[0] in descendants) or (p[1] in descendants):
1264 if (p[0] in descendants) or (p[1] in descendants):
1257 descendants.add(n)
1265 descendants.add(n)
1258 isdescendant = True
1266 isdescendant = True
1259 if isdescendant and ((ancestors is None) or (n in ancestors)):
1267 if isdescendant and ((ancestors is None) or (n in ancestors)):
1260 # Only include nodes that are both descendants and ancestors.
1268 # Only include nodes that are both descendants and ancestors.
1261 orderedout.append(n)
1269 orderedout.append(n)
1262 if (ancestors is not None) and (n in heads):
1270 if (ancestors is not None) and (n in heads):
1263 # We're trying to figure out which heads are reachable
1271 # We're trying to figure out which heads are reachable
1264 # from roots.
1272 # from roots.
1265 # Mark this head as having been reached
1273 # Mark this head as having been reached
1266 heads[n] = True
1274 heads[n] = True
1267 elif ancestors is None:
1275 elif ancestors is None:
1268 # Otherwise, we're trying to discover the heads.
1276 # Otherwise, we're trying to discover the heads.
1269 # Assume this is a head because if it isn't, the next step
1277 # Assume this is a head because if it isn't, the next step
1270 # will eventually remove it.
1278 # will eventually remove it.
1271 heads[n] = True
1279 heads[n] = True
1272 # But, obviously its parents aren't.
1280 # But, obviously its parents aren't.
1273 for p in self.parents(n):
1281 for p in self.parents(n):
1274 heads.pop(p, None)
1282 heads.pop(p, None)
1275 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1283 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1276 roots = list(roots)
1284 roots = list(roots)
1277 assert orderedout
1285 assert orderedout
1278 assert roots
1286 assert roots
1279 assert heads
1287 assert heads
1280 return (orderedout, roots, heads)
1288 return (orderedout, roots, heads)
1281
1289
1282 def headrevs(self, revs=None):
1290 def headrevs(self, revs=None):
1283 if revs is None:
1291 if revs is None:
1284 try:
1292 try:
1285 return self.index.headrevs()
1293 return self.index.headrevs()
1286 except AttributeError:
1294 except AttributeError:
1287 return self._headrevs()
1295 return self._headrevs()
1288 if rustdagop is not None:
1296 if rustdagop is not None:
1289 return rustdagop.headrevs(self.index, revs)
1297 return rustdagop.headrevs(self.index, revs)
1290 return dagop.headrevs(revs, self._uncheckedparentrevs)
1298 return dagop.headrevs(revs, self._uncheckedparentrevs)
1291
1299
1292 def computephases(self, roots):
1300 def computephases(self, roots):
1293 return self.index.computephasesmapsets(roots)
1301 return self.index.computephasesmapsets(roots)
1294
1302
1295 def _headrevs(self):
1303 def _headrevs(self):
1296 count = len(self)
1304 count = len(self)
1297 if not count:
1305 if not count:
1298 return [nullrev]
1306 return [nullrev]
1299 # we won't iter over filtered rev so nobody is a head at start
1307 # we won't iter over filtered rev so nobody is a head at start
1300 ishead = [0] * (count + 1)
1308 ishead = [0] * (count + 1)
1301 index = self.index
1309 index = self.index
1302 for r in self:
1310 for r in self:
1303 ishead[r] = 1 # I may be an head
1311 ishead[r] = 1 # I may be an head
1304 e = index[r]
1312 e = index[r]
1305 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1313 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1306 return [r for r, val in enumerate(ishead) if val]
1314 return [r for r, val in enumerate(ishead) if val]
1307
1315
1308 def heads(self, start=None, stop=None):
1316 def heads(self, start=None, stop=None):
1309 """return the list of all nodes that have no children
1317 """return the list of all nodes that have no children
1310
1318
1311 if start is specified, only heads that are descendants of
1319 if start is specified, only heads that are descendants of
1312 start will be returned
1320 start will be returned
1313 if stop is specified, it will consider all the revs from stop
1321 if stop is specified, it will consider all the revs from stop
1314 as if they had no children
1322 as if they had no children
1315 """
1323 """
1316 if start is None and stop is None:
1324 if start is None and stop is None:
1317 if not len(self):
1325 if not len(self):
1318 return [nullid]
1326 return [nullid]
1319 return [self.node(r) for r in self.headrevs()]
1327 return [self.node(r) for r in self.headrevs()]
1320
1328
1321 if start is None:
1329 if start is None:
1322 start = nullrev
1330 start = nullrev
1323 else:
1331 else:
1324 start = self.rev(start)
1332 start = self.rev(start)
1325
1333
1326 stoprevs = {self.rev(n) for n in stop or []}
1334 stoprevs = {self.rev(n) for n in stop or []}
1327
1335
1328 revs = dagop.headrevssubset(
1336 revs = dagop.headrevssubset(
1329 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1337 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1330 )
1338 )
1331
1339
1332 return [self.node(rev) for rev in revs]
1340 return [self.node(rev) for rev in revs]
1333
1341
1334 def children(self, node):
1342 def children(self, node):
1335 """find the children of a given node"""
1343 """find the children of a given node"""
1336 c = []
1344 c = []
1337 p = self.rev(node)
1345 p = self.rev(node)
1338 for r in self.revs(start=p + 1):
1346 for r in self.revs(start=p + 1):
1339 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1347 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1340 if prevs:
1348 if prevs:
1341 for pr in prevs:
1349 for pr in prevs:
1342 if pr == p:
1350 if pr == p:
1343 c.append(self.node(r))
1351 c.append(self.node(r))
1344 elif p == nullrev:
1352 elif p == nullrev:
1345 c.append(self.node(r))
1353 c.append(self.node(r))
1346 return c
1354 return c
1347
1355
1348 def commonancestorsheads(self, a, b):
1356 def commonancestorsheads(self, a, b):
1349 """calculate all the heads of the common ancestors of nodes a and b"""
1357 """calculate all the heads of the common ancestors of nodes a and b"""
1350 a, b = self.rev(a), self.rev(b)
1358 a, b = self.rev(a), self.rev(b)
1351 ancs = self._commonancestorsheads(a, b)
1359 ancs = self._commonancestorsheads(a, b)
1352 return pycompat.maplist(self.node, ancs)
1360 return pycompat.maplist(self.node, ancs)
1353
1361
1354 def _commonancestorsheads(self, *revs):
1362 def _commonancestorsheads(self, *revs):
1355 """calculate all the heads of the common ancestors of revs"""
1363 """calculate all the heads of the common ancestors of revs"""
1356 try:
1364 try:
1357 ancs = self.index.commonancestorsheads(*revs)
1365 ancs = self.index.commonancestorsheads(*revs)
1358 except (AttributeError, OverflowError): # C implementation failed
1366 except (AttributeError, OverflowError): # C implementation failed
1359 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1367 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1360 return ancs
1368 return ancs
1361
1369
1362 def isancestor(self, a, b):
1370 def isancestor(self, a, b):
1363 """return True if node a is an ancestor of node b
1371 """return True if node a is an ancestor of node b
1364
1372
1365 A revision is considered an ancestor of itself."""
1373 A revision is considered an ancestor of itself."""
1366 a, b = self.rev(a), self.rev(b)
1374 a, b = self.rev(a), self.rev(b)
1367 return self.isancestorrev(a, b)
1375 return self.isancestorrev(a, b)
1368
1376
1369 def isancestorrev(self, a, b):
1377 def isancestorrev(self, a, b):
1370 """return True if revision a is an ancestor of revision b
1378 """return True if revision a is an ancestor of revision b
1371
1379
1372 A revision is considered an ancestor of itself.
1380 A revision is considered an ancestor of itself.
1373
1381
1374 The implementation of this is trivial but the use of
1382 The implementation of this is trivial but the use of
1375 reachableroots is not."""
1383 reachableroots is not."""
1376 if a == nullrev:
1384 if a == nullrev:
1377 return True
1385 return True
1378 elif a == b:
1386 elif a == b:
1379 return True
1387 return True
1380 elif a > b:
1388 elif a > b:
1381 return False
1389 return False
1382 return bool(self.reachableroots(a, [b], [a], includepath=False))
1390 return bool(self.reachableroots(a, [b], [a], includepath=False))
1383
1391
1384 def reachableroots(self, minroot, heads, roots, includepath=False):
1392 def reachableroots(self, minroot, heads, roots, includepath=False):
1385 """return (heads(::(<roots> and <roots>::<heads>)))
1393 """return (heads(::(<roots> and <roots>::<heads>)))
1386
1394
1387 If includepath is True, return (<roots>::<heads>)."""
1395 If includepath is True, return (<roots>::<heads>)."""
1388 try:
1396 try:
1389 return self.index.reachableroots2(
1397 return self.index.reachableroots2(
1390 minroot, heads, roots, includepath
1398 minroot, heads, roots, includepath
1391 )
1399 )
1392 except AttributeError:
1400 except AttributeError:
1393 return dagop._reachablerootspure(
1401 return dagop._reachablerootspure(
1394 self.parentrevs, minroot, roots, heads, includepath
1402 self.parentrevs, minroot, roots, heads, includepath
1395 )
1403 )
1396
1404
1397 def ancestor(self, a, b):
1405 def ancestor(self, a, b):
1398 """calculate the "best" common ancestor of nodes a and b"""
1406 """calculate the "best" common ancestor of nodes a and b"""
1399
1407
1400 a, b = self.rev(a), self.rev(b)
1408 a, b = self.rev(a), self.rev(b)
1401 try:
1409 try:
1402 ancs = self.index.ancestors(a, b)
1410 ancs = self.index.ancestors(a, b)
1403 except (AttributeError, OverflowError):
1411 except (AttributeError, OverflowError):
1404 ancs = ancestor.ancestors(self.parentrevs, a, b)
1412 ancs = ancestor.ancestors(self.parentrevs, a, b)
1405 if ancs:
1413 if ancs:
1406 # choose a consistent winner when there's a tie
1414 # choose a consistent winner when there's a tie
1407 return min(map(self.node, ancs))
1415 return min(map(self.node, ancs))
1408 return nullid
1416 return nullid
1409
1417
1410 def _match(self, id):
1418 def _match(self, id):
1411 if isinstance(id, int):
1419 if isinstance(id, int):
1412 # rev
1420 # rev
1413 return self.node(id)
1421 return self.node(id)
1414 if len(id) == 20:
1422 if len(id) == 20:
1415 # possibly a binary node
1423 # possibly a binary node
1416 # odds of a binary node being all hex in ASCII are 1 in 10**25
1424 # odds of a binary node being all hex in ASCII are 1 in 10**25
1417 try:
1425 try:
1418 node = id
1426 node = id
1419 self.rev(node) # quick search the index
1427 self.rev(node) # quick search the index
1420 return node
1428 return node
1421 except error.LookupError:
1429 except error.LookupError:
1422 pass # may be partial hex id
1430 pass # may be partial hex id
1423 try:
1431 try:
1424 # str(rev)
1432 # str(rev)
1425 rev = int(id)
1433 rev = int(id)
1426 if b"%d" % rev != id:
1434 if b"%d" % rev != id:
1427 raise ValueError
1435 raise ValueError
1428 if rev < 0:
1436 if rev < 0:
1429 rev = len(self) + rev
1437 rev = len(self) + rev
1430 if rev < 0 or rev >= len(self):
1438 if rev < 0 or rev >= len(self):
1431 raise ValueError
1439 raise ValueError
1432 return self.node(rev)
1440 return self.node(rev)
1433 except (ValueError, OverflowError):
1441 except (ValueError, OverflowError):
1434 pass
1442 pass
1435 if len(id) == 40:
1443 if len(id) == 40:
1436 try:
1444 try:
1437 # a full hex nodeid?
1445 # a full hex nodeid?
1438 node = bin(id)
1446 node = bin(id)
1439 self.rev(node)
1447 self.rev(node)
1440 return node
1448 return node
1441 except (TypeError, error.LookupError):
1449 except (TypeError, error.LookupError):
1442 pass
1450 pass
1443
1451
1444 def _partialmatch(self, id):
1452 def _partialmatch(self, id):
1445 # we don't care wdirfilenodeids as they should be always full hash
1453 # we don't care wdirfilenodeids as they should be always full hash
1446 maybewdir = wdirhex.startswith(id)
1454 maybewdir = wdirhex.startswith(id)
1447 try:
1455 try:
1448 partial = self.index.partialmatch(id)
1456 partial = self.index.partialmatch(id)
1449 if partial and self.hasnode(partial):
1457 if partial and self.hasnode(partial):
1450 if maybewdir:
1458 if maybewdir:
1451 # single 'ff...' match in radix tree, ambiguous with wdir
1459 # single 'ff...' match in radix tree, ambiguous with wdir
1452 raise error.RevlogError
1460 raise error.RevlogError
1453 return partial
1461 return partial
1454 if maybewdir:
1462 if maybewdir:
1455 # no 'ff...' match in radix tree, wdir identified
1463 # no 'ff...' match in radix tree, wdir identified
1456 raise error.WdirUnsupported
1464 raise error.WdirUnsupported
1457 return None
1465 return None
1458 except error.RevlogError:
1466 except error.RevlogError:
1459 # parsers.c radix tree lookup gave multiple matches
1467 # parsers.c radix tree lookup gave multiple matches
1460 # fast path: for unfiltered changelog, radix tree is accurate
1468 # fast path: for unfiltered changelog, radix tree is accurate
1461 if not getattr(self, 'filteredrevs', None):
1469 if not getattr(self, 'filteredrevs', None):
1462 raise error.AmbiguousPrefixLookupError(
1470 raise error.AmbiguousPrefixLookupError(
1463 id, self.indexfile, _(b'ambiguous identifier')
1471 id, self.indexfile, _(b'ambiguous identifier')
1464 )
1472 )
1465 # fall through to slow path that filters hidden revisions
1473 # fall through to slow path that filters hidden revisions
1466 except (AttributeError, ValueError):
1474 except (AttributeError, ValueError):
1467 # we are pure python, or key was too short to search radix tree
1475 # we are pure python, or key was too short to search radix tree
1468 pass
1476 pass
1469
1477
1470 if id in self._pcache:
1478 if id in self._pcache:
1471 return self._pcache[id]
1479 return self._pcache[id]
1472
1480
1473 if len(id) <= 40:
1481 if len(id) <= 40:
1474 try:
1482 try:
1475 # hex(node)[:...]
1483 # hex(node)[:...]
1476 l = len(id) // 2 # grab an even number of digits
1484 l = len(id) // 2 # grab an even number of digits
1477 prefix = bin(id[: l * 2])
1485 prefix = bin(id[: l * 2])
1478 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1486 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1479 nl = [
1487 nl = [
1480 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1488 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1481 ]
1489 ]
1482 if nullhex.startswith(id):
1490 if nullhex.startswith(id):
1483 nl.append(nullid)
1491 nl.append(nullid)
1484 if len(nl) > 0:
1492 if len(nl) > 0:
1485 if len(nl) == 1 and not maybewdir:
1493 if len(nl) == 1 and not maybewdir:
1486 self._pcache[id] = nl[0]
1494 self._pcache[id] = nl[0]
1487 return nl[0]
1495 return nl[0]
1488 raise error.AmbiguousPrefixLookupError(
1496 raise error.AmbiguousPrefixLookupError(
1489 id, self.indexfile, _(b'ambiguous identifier')
1497 id, self.indexfile, _(b'ambiguous identifier')
1490 )
1498 )
1491 if maybewdir:
1499 if maybewdir:
1492 raise error.WdirUnsupported
1500 raise error.WdirUnsupported
1493 return None
1501 return None
1494 except TypeError:
1502 except TypeError:
1495 pass
1503 pass
1496
1504
1497 def lookup(self, id):
1505 def lookup(self, id):
1498 """locate a node based on:
1506 """locate a node based on:
1499 - revision number or str(revision number)
1507 - revision number or str(revision number)
1500 - nodeid or subset of hex nodeid
1508 - nodeid or subset of hex nodeid
1501 """
1509 """
1502 n = self._match(id)
1510 n = self._match(id)
1503 if n is not None:
1511 if n is not None:
1504 return n
1512 return n
1505 n = self._partialmatch(id)
1513 n = self._partialmatch(id)
1506 if n:
1514 if n:
1507 return n
1515 return n
1508
1516
1509 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1517 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1510
1518
1511 def shortest(self, node, minlength=1):
1519 def shortest(self, node, minlength=1):
1512 """Find the shortest unambiguous prefix that matches node."""
1520 """Find the shortest unambiguous prefix that matches node."""
1513
1521
1514 def isvalid(prefix):
1522 def isvalid(prefix):
1515 try:
1523 try:
1516 matchednode = self._partialmatch(prefix)
1524 matchednode = self._partialmatch(prefix)
1517 except error.AmbiguousPrefixLookupError:
1525 except error.AmbiguousPrefixLookupError:
1518 return False
1526 return False
1519 except error.WdirUnsupported:
1527 except error.WdirUnsupported:
1520 # single 'ff...' match
1528 # single 'ff...' match
1521 return True
1529 return True
1522 if matchednode is None:
1530 if matchednode is None:
1523 raise error.LookupError(node, self.indexfile, _(b'no node'))
1531 raise error.LookupError(node, self.indexfile, _(b'no node'))
1524 return True
1532 return True
1525
1533
1526 def maybewdir(prefix):
1534 def maybewdir(prefix):
1527 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1535 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1528
1536
1529 hexnode = hex(node)
1537 hexnode = hex(node)
1530
1538
1531 def disambiguate(hexnode, minlength):
1539 def disambiguate(hexnode, minlength):
1532 """Disambiguate against wdirid."""
1540 """Disambiguate against wdirid."""
1533 for length in range(minlength, len(hexnode) + 1):
1541 for length in range(minlength, len(hexnode) + 1):
1534 prefix = hexnode[:length]
1542 prefix = hexnode[:length]
1535 if not maybewdir(prefix):
1543 if not maybewdir(prefix):
1536 return prefix
1544 return prefix
1537
1545
1538 if not getattr(self, 'filteredrevs', None):
1546 if not getattr(self, 'filteredrevs', None):
1539 try:
1547 try:
1540 length = max(self.index.shortest(node), minlength)
1548 length = max(self.index.shortest(node), minlength)
1541 return disambiguate(hexnode, length)
1549 return disambiguate(hexnode, length)
1542 except error.RevlogError:
1550 except error.RevlogError:
1543 if node != wdirid:
1551 if node != wdirid:
1544 raise error.LookupError(node, self.indexfile, _(b'no node'))
1552 raise error.LookupError(node, self.indexfile, _(b'no node'))
1545 except AttributeError:
1553 except AttributeError:
1546 # Fall through to pure code
1554 # Fall through to pure code
1547 pass
1555 pass
1548
1556
1549 if node == wdirid:
1557 if node == wdirid:
1550 for length in range(minlength, len(hexnode) + 1):
1558 for length in range(minlength, len(hexnode) + 1):
1551 prefix = hexnode[:length]
1559 prefix = hexnode[:length]
1552 if isvalid(prefix):
1560 if isvalid(prefix):
1553 return prefix
1561 return prefix
1554
1562
1555 for length in range(minlength, len(hexnode) + 1):
1563 for length in range(minlength, len(hexnode) + 1):
1556 prefix = hexnode[:length]
1564 prefix = hexnode[:length]
1557 if isvalid(prefix):
1565 if isvalid(prefix):
1558 return disambiguate(hexnode, length)
1566 return disambiguate(hexnode, length)
1559
1567
1560 def cmp(self, node, text):
1568 def cmp(self, node, text):
1561 """compare text with a given file revision
1569 """compare text with a given file revision
1562
1570
1563 returns True if text is different than what is stored.
1571 returns True if text is different than what is stored.
1564 """
1572 """
1565 p1, p2 = self.parents(node)
1573 p1, p2 = self.parents(node)
1566 return storageutil.hashrevisionsha1(text, p1, p2) != node
1574 return storageutil.hashrevisionsha1(text, p1, p2) != node
1567
1575
1568 def _cachesegment(self, offset, data):
1576 def _cachesegment(self, offset, data):
1569 """Add a segment to the revlog cache.
1577 """Add a segment to the revlog cache.
1570
1578
1571 Accepts an absolute offset and the data that is at that location.
1579 Accepts an absolute offset and the data that is at that location.
1572 """
1580 """
1573 o, d = self._chunkcache
1581 o, d = self._chunkcache
1574 # try to add to existing cache
1582 # try to add to existing cache
1575 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1583 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1576 self._chunkcache = o, d + data
1584 self._chunkcache = o, d + data
1577 else:
1585 else:
1578 self._chunkcache = offset, data
1586 self._chunkcache = offset, data
1579
1587
1580 def _readsegment(self, offset, length, df=None):
1588 def _readsegment(self, offset, length, df=None):
1581 """Load a segment of raw data from the revlog.
1589 """Load a segment of raw data from the revlog.
1582
1590
1583 Accepts an absolute offset, length to read, and an optional existing
1591 Accepts an absolute offset, length to read, and an optional existing
1584 file handle to read from.
1592 file handle to read from.
1585
1593
1586 If an existing file handle is passed, it will be seeked and the
1594 If an existing file handle is passed, it will be seeked and the
1587 original seek position will NOT be restored.
1595 original seek position will NOT be restored.
1588
1596
1589 Returns a str or buffer of raw byte data.
1597 Returns a str or buffer of raw byte data.
1590
1598
1591 Raises if the requested number of bytes could not be read.
1599 Raises if the requested number of bytes could not be read.
1592 """
1600 """
1593 # Cache data both forward and backward around the requested
1601 # Cache data both forward and backward around the requested
1594 # data, in a fixed size window. This helps speed up operations
1602 # data, in a fixed size window. This helps speed up operations
1595 # involving reading the revlog backwards.
1603 # involving reading the revlog backwards.
1596 cachesize = self._chunkcachesize
1604 cachesize = self._chunkcachesize
1597 realoffset = offset & ~(cachesize - 1)
1605 realoffset = offset & ~(cachesize - 1)
1598 reallength = (
1606 reallength = (
1599 (offset + length + cachesize) & ~(cachesize - 1)
1607 (offset + length + cachesize) & ~(cachesize - 1)
1600 ) - realoffset
1608 ) - realoffset
1601 with self._datareadfp(df) as df:
1609 with self._datareadfp(df) as df:
1602 df.seek(realoffset)
1610 df.seek(realoffset)
1603 d = df.read(reallength)
1611 d = df.read(reallength)
1604
1612
1605 self._cachesegment(realoffset, d)
1613 self._cachesegment(realoffset, d)
1606 if offset != realoffset or reallength != length:
1614 if offset != realoffset or reallength != length:
1607 startoffset = offset - realoffset
1615 startoffset = offset - realoffset
1608 if len(d) - startoffset < length:
1616 if len(d) - startoffset < length:
1609 raise error.RevlogError(
1617 raise error.RevlogError(
1610 _(
1618 _(
1611 b'partial read of revlog %s; expected %d bytes from '
1619 b'partial read of revlog %s; expected %d bytes from '
1612 b'offset %d, got %d'
1620 b'offset %d, got %d'
1613 )
1621 )
1614 % (
1622 % (
1615 self.indexfile if self._inline else self.datafile,
1623 self.indexfile if self._inline else self.datafile,
1616 length,
1624 length,
1617 realoffset,
1625 realoffset,
1618 len(d) - startoffset,
1626 len(d) - startoffset,
1619 )
1627 )
1620 )
1628 )
1621
1629
1622 return util.buffer(d, startoffset, length)
1630 return util.buffer(d, startoffset, length)
1623
1631
1624 if len(d) < length:
1632 if len(d) < length:
1625 raise error.RevlogError(
1633 raise error.RevlogError(
1626 _(
1634 _(
1627 b'partial read of revlog %s; expected %d bytes from offset '
1635 b'partial read of revlog %s; expected %d bytes from offset '
1628 b'%d, got %d'
1636 b'%d, got %d'
1629 )
1637 )
1630 % (
1638 % (
1631 self.indexfile if self._inline else self.datafile,
1639 self.indexfile if self._inline else self.datafile,
1632 length,
1640 length,
1633 offset,
1641 offset,
1634 len(d),
1642 len(d),
1635 )
1643 )
1636 )
1644 )
1637
1645
1638 return d
1646 return d
1639
1647
1640 def _getsegment(self, offset, length, df=None):
1648 def _getsegment(self, offset, length, df=None):
1641 """Obtain a segment of raw data from the revlog.
1649 """Obtain a segment of raw data from the revlog.
1642
1650
1643 Accepts an absolute offset, length of bytes to obtain, and an
1651 Accepts an absolute offset, length of bytes to obtain, and an
1644 optional file handle to the already-opened revlog. If the file
1652 optional file handle to the already-opened revlog. If the file
1645 handle is used, it's original seek position will not be preserved.
1653 handle is used, it's original seek position will not be preserved.
1646
1654
1647 Requests for data may be returned from a cache.
1655 Requests for data may be returned from a cache.
1648
1656
1649 Returns a str or a buffer instance of raw byte data.
1657 Returns a str or a buffer instance of raw byte data.
1650 """
1658 """
1651 o, d = self._chunkcache
1659 o, d = self._chunkcache
1652 l = len(d)
1660 l = len(d)
1653
1661
1654 # is it in the cache?
1662 # is it in the cache?
1655 cachestart = offset - o
1663 cachestart = offset - o
1656 cacheend = cachestart + length
1664 cacheend = cachestart + length
1657 if cachestart >= 0 and cacheend <= l:
1665 if cachestart >= 0 and cacheend <= l:
1658 if cachestart == 0 and cacheend == l:
1666 if cachestart == 0 and cacheend == l:
1659 return d # avoid a copy
1667 return d # avoid a copy
1660 return util.buffer(d, cachestart, cacheend - cachestart)
1668 return util.buffer(d, cachestart, cacheend - cachestart)
1661
1669
1662 return self._readsegment(offset, length, df=df)
1670 return self._readsegment(offset, length, df=df)
1663
1671
1664 def _getsegmentforrevs(self, startrev, endrev, df=None):
1672 def _getsegmentforrevs(self, startrev, endrev, df=None):
1665 """Obtain a segment of raw data corresponding to a range of revisions.
1673 """Obtain a segment of raw data corresponding to a range of revisions.
1666
1674
1667 Accepts the start and end revisions and an optional already-open
1675 Accepts the start and end revisions and an optional already-open
1668 file handle to be used for reading. If the file handle is read, its
1676 file handle to be used for reading. If the file handle is read, its
1669 seek position will not be preserved.
1677 seek position will not be preserved.
1670
1678
1671 Requests for data may be satisfied by a cache.
1679 Requests for data may be satisfied by a cache.
1672
1680
1673 Returns a 2-tuple of (offset, data) for the requested range of
1681 Returns a 2-tuple of (offset, data) for the requested range of
1674 revisions. Offset is the integer offset from the beginning of the
1682 revisions. Offset is the integer offset from the beginning of the
1675 revlog and data is a str or buffer of the raw byte data.
1683 revlog and data is a str or buffer of the raw byte data.
1676
1684
1677 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1685 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1678 to determine where each revision's data begins and ends.
1686 to determine where each revision's data begins and ends.
1679 """
1687 """
1680 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1688 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1681 # (functions are expensive).
1689 # (functions are expensive).
1682 index = self.index
1690 index = self.index
1683 istart = index[startrev]
1691 istart = index[startrev]
1684 start = int(istart[0] >> 16)
1692 start = int(istart[0] >> 16)
1685 if startrev == endrev:
1693 if startrev == endrev:
1686 end = start + istart[1]
1694 end = start + istart[1]
1687 else:
1695 else:
1688 iend = index[endrev]
1696 iend = index[endrev]
1689 end = int(iend[0] >> 16) + iend[1]
1697 end = int(iend[0] >> 16) + iend[1]
1690
1698
1691 if self._inline:
1699 if self._inline:
1692 start += (startrev + 1) * self._io.size
1700 start += (startrev + 1) * self._io.size
1693 end += (endrev + 1) * self._io.size
1701 end += (endrev + 1) * self._io.size
1694 length = end - start
1702 length = end - start
1695
1703
1696 return start, self._getsegment(start, length, df=df)
1704 return start, self._getsegment(start, length, df=df)
1697
1705
1698 def _chunk(self, rev, df=None):
1706 def _chunk(self, rev, df=None):
1699 """Obtain a single decompressed chunk for a revision.
1707 """Obtain a single decompressed chunk for a revision.
1700
1708
1701 Accepts an integer revision and an optional already-open file handle
1709 Accepts an integer revision and an optional already-open file handle
1702 to be used for reading. If used, the seek position of the file will not
1710 to be used for reading. If used, the seek position of the file will not
1703 be preserved.
1711 be preserved.
1704
1712
1705 Returns a str holding uncompressed data for the requested revision.
1713 Returns a str holding uncompressed data for the requested revision.
1706 """
1714 """
1707 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1715 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1708
1716
1709 def _chunks(self, revs, df=None, targetsize=None):
1717 def _chunks(self, revs, df=None, targetsize=None):
1710 """Obtain decompressed chunks for the specified revisions.
1718 """Obtain decompressed chunks for the specified revisions.
1711
1719
1712 Accepts an iterable of numeric revisions that are assumed to be in
1720 Accepts an iterable of numeric revisions that are assumed to be in
1713 ascending order. Also accepts an optional already-open file handle
1721 ascending order. Also accepts an optional already-open file handle
1714 to be used for reading. If used, the seek position of the file will
1722 to be used for reading. If used, the seek position of the file will
1715 not be preserved.
1723 not be preserved.
1716
1724
1717 This function is similar to calling ``self._chunk()`` multiple times,
1725 This function is similar to calling ``self._chunk()`` multiple times,
1718 but is faster.
1726 but is faster.
1719
1727
1720 Returns a list with decompressed data for each requested revision.
1728 Returns a list with decompressed data for each requested revision.
1721 """
1729 """
1722 if not revs:
1730 if not revs:
1723 return []
1731 return []
1724 start = self.start
1732 start = self.start
1725 length = self.length
1733 length = self.length
1726 inline = self._inline
1734 inline = self._inline
1727 iosize = self._io.size
1735 iosize = self._io.size
1728 buffer = util.buffer
1736 buffer = util.buffer
1729
1737
1730 l = []
1738 l = []
1731 ladd = l.append
1739 ladd = l.append
1732
1740
1733 if not self._withsparseread:
1741 if not self._withsparseread:
1734 slicedchunks = (revs,)
1742 slicedchunks = (revs,)
1735 else:
1743 else:
1736 slicedchunks = deltautil.slicechunk(
1744 slicedchunks = deltautil.slicechunk(
1737 self, revs, targetsize=targetsize
1745 self, revs, targetsize=targetsize
1738 )
1746 )
1739
1747
1740 for revschunk in slicedchunks:
1748 for revschunk in slicedchunks:
1741 firstrev = revschunk[0]
1749 firstrev = revschunk[0]
1742 # Skip trailing revisions with empty diff
1750 # Skip trailing revisions with empty diff
1743 for lastrev in revschunk[::-1]:
1751 for lastrev in revschunk[::-1]:
1744 if length(lastrev) != 0:
1752 if length(lastrev) != 0:
1745 break
1753 break
1746
1754
1747 try:
1755 try:
1748 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1756 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1749 except OverflowError:
1757 except OverflowError:
1750 # issue4215 - we can't cache a run of chunks greater than
1758 # issue4215 - we can't cache a run of chunks greater than
1751 # 2G on Windows
1759 # 2G on Windows
1752 return [self._chunk(rev, df=df) for rev in revschunk]
1760 return [self._chunk(rev, df=df) for rev in revschunk]
1753
1761
1754 decomp = self.decompress
1762 decomp = self.decompress
1755 for rev in revschunk:
1763 for rev in revschunk:
1756 chunkstart = start(rev)
1764 chunkstart = start(rev)
1757 if inline:
1765 if inline:
1758 chunkstart += (rev + 1) * iosize
1766 chunkstart += (rev + 1) * iosize
1759 chunklength = length(rev)
1767 chunklength = length(rev)
1760 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1768 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1761
1769
1762 return l
1770 return l
1763
1771
1764 def _chunkclear(self):
1772 def _chunkclear(self):
1765 """Clear the raw chunk cache."""
1773 """Clear the raw chunk cache."""
1766 self._chunkcache = (0, b'')
1774 self._chunkcache = (0, b'')
1767
1775
1768 def deltaparent(self, rev):
1776 def deltaparent(self, rev):
1769 """return deltaparent of the given revision"""
1777 """return deltaparent of the given revision"""
1770 base = self.index[rev][3]
1778 base = self.index[rev][3]
1771 if base == rev:
1779 if base == rev:
1772 return nullrev
1780 return nullrev
1773 elif self._generaldelta:
1781 elif self._generaldelta:
1774 return base
1782 return base
1775 else:
1783 else:
1776 return rev - 1
1784 return rev - 1
1777
1785
1778 def issnapshot(self, rev):
1786 def issnapshot(self, rev):
1779 """tells whether rev is a snapshot"""
1787 """tells whether rev is a snapshot"""
1780 if not self._sparserevlog:
1788 if not self._sparserevlog:
1781 return self.deltaparent(rev) == nullrev
1789 return self.deltaparent(rev) == nullrev
1782 elif util.safehasattr(self.index, b'issnapshot'):
1790 elif util.safehasattr(self.index, b'issnapshot'):
1783 # directly assign the method to cache the testing and access
1791 # directly assign the method to cache the testing and access
1784 self.issnapshot = self.index.issnapshot
1792 self.issnapshot = self.index.issnapshot
1785 return self.issnapshot(rev)
1793 return self.issnapshot(rev)
1786 if rev == nullrev:
1794 if rev == nullrev:
1787 return True
1795 return True
1788 entry = self.index[rev]
1796 entry = self.index[rev]
1789 base = entry[3]
1797 base = entry[3]
1790 if base == rev:
1798 if base == rev:
1791 return True
1799 return True
1792 if base == nullrev:
1800 if base == nullrev:
1793 return True
1801 return True
1794 p1 = entry[5]
1802 p1 = entry[5]
1795 p2 = entry[6]
1803 p2 = entry[6]
1796 if base == p1 or base == p2:
1804 if base == p1 or base == p2:
1797 return False
1805 return False
1798 return self.issnapshot(base)
1806 return self.issnapshot(base)
1799
1807
1800 def snapshotdepth(self, rev):
1808 def snapshotdepth(self, rev):
1801 """number of snapshot in the chain before this one"""
1809 """number of snapshot in the chain before this one"""
1802 if not self.issnapshot(rev):
1810 if not self.issnapshot(rev):
1803 raise error.ProgrammingError(b'revision %d not a snapshot')
1811 raise error.ProgrammingError(b'revision %d not a snapshot')
1804 return len(self._deltachain(rev)[0]) - 1
1812 return len(self._deltachain(rev)[0]) - 1
1805
1813
1806 def revdiff(self, rev1, rev2):
1814 def revdiff(self, rev1, rev2):
1807 """return or calculate a delta between two revisions
1815 """return or calculate a delta between two revisions
1808
1816
1809 The delta calculated is in binary form and is intended to be written to
1817 The delta calculated is in binary form and is intended to be written to
1810 revlog data directly. So this function needs raw revision data.
1818 revlog data directly. So this function needs raw revision data.
1811 """
1819 """
1812 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1820 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1813 return bytes(self._chunk(rev2))
1821 return bytes(self._chunk(rev2))
1814
1822
1815 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1823 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1816
1824
1817 def _processflags(self, text, flags, operation, raw=False):
1825 def _processflags(self, text, flags, operation, raw=False):
1818 """deprecated entry point to access flag processors"""
1826 """deprecated entry point to access flag processors"""
1819 msg = b'_processflag(...) use the specialized variant'
1827 msg = b'_processflag(...) use the specialized variant'
1820 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1828 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1821 if raw:
1829 if raw:
1822 return text, flagutil.processflagsraw(self, text, flags)
1830 return text, flagutil.processflagsraw(self, text, flags)
1823 elif operation == b'read':
1831 elif operation == b'read':
1824 return flagutil.processflagsread(self, text, flags)
1832 return flagutil.processflagsread(self, text, flags)
1825 else: # write operation
1833 else: # write operation
1826 return flagutil.processflagswrite(self, text, flags, None)
1834 return flagutil.processflagswrite(self, text, flags, None)
1827
1835
1828 def revision(self, nodeorrev, _df=None, raw=False):
1836 def revision(self, nodeorrev, _df=None, raw=False):
1829 """return an uncompressed revision of a given node or revision
1837 """return an uncompressed revision of a given node or revision
1830 number.
1838 number.
1831
1839
1832 _df - an existing file handle to read from. (internal-only)
1840 _df - an existing file handle to read from. (internal-only)
1833 raw - an optional argument specifying if the revision data is to be
1841 raw - an optional argument specifying if the revision data is to be
1834 treated as raw data when applying flag transforms. 'raw' should be set
1842 treated as raw data when applying flag transforms. 'raw' should be set
1835 to True when generating changegroups or in debug commands.
1843 to True when generating changegroups or in debug commands.
1836 """
1844 """
1837 if raw:
1845 if raw:
1838 msg = (
1846 msg = (
1839 b'revlog.revision(..., raw=True) is deprecated, '
1847 b'revlog.revision(..., raw=True) is deprecated, '
1840 b'use revlog.rawdata(...)'
1848 b'use revlog.rawdata(...)'
1841 )
1849 )
1842 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1850 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1843 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1851 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1844
1852
1845 def sidedata(self, nodeorrev, _df=None):
1853 def sidedata(self, nodeorrev, _df=None):
1846 """a map of extra data related to the changeset but not part of the hash
1854 """a map of extra data related to the changeset but not part of the hash
1847
1855
1848 This function currently return a dictionary. However, more advanced
1856 This function currently return a dictionary. However, more advanced
1849 mapping object will likely be used in the future for a more
1857 mapping object will likely be used in the future for a more
1850 efficient/lazy code.
1858 efficient/lazy code.
1851 """
1859 """
1852 return self._revisiondata(nodeorrev, _df)[1]
1860 return self._revisiondata(nodeorrev, _df)[1]
1853
1861
1854 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1862 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1855 # deal with <nodeorrev> argument type
1863 # deal with <nodeorrev> argument type
1856 if isinstance(nodeorrev, int):
1864 if isinstance(nodeorrev, int):
1857 rev = nodeorrev
1865 rev = nodeorrev
1858 node = self.node(rev)
1866 node = self.node(rev)
1859 else:
1867 else:
1860 node = nodeorrev
1868 node = nodeorrev
1861 rev = None
1869 rev = None
1862
1870
1863 # fast path the special `nullid` rev
1871 # fast path the special `nullid` rev
1864 if node == nullid:
1872 if node == nullid:
1865 return b"", {}
1873 return b"", {}
1866
1874
1867 # ``rawtext`` is the text as stored inside the revlog. Might be the
1875 # ``rawtext`` is the text as stored inside the revlog. Might be the
1868 # revision or might need to be processed to retrieve the revision.
1876 # revision or might need to be processed to retrieve the revision.
1869 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1877 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1870
1878
1871 if raw and validated:
1879 if raw and validated:
1872 # if we don't want to process the raw text and that raw
1880 # if we don't want to process the raw text and that raw
1873 # text is cached, we can exit early.
1881 # text is cached, we can exit early.
1874 return rawtext, {}
1882 return rawtext, {}
1875 if rev is None:
1883 if rev is None:
1876 rev = self.rev(node)
1884 rev = self.rev(node)
1877 # the revlog's flag for this revision
1885 # the revlog's flag for this revision
1878 # (usually alter its state or content)
1886 # (usually alter its state or content)
1879 flags = self.flags(rev)
1887 flags = self.flags(rev)
1880
1888
1881 if validated and flags == REVIDX_DEFAULT_FLAGS:
1889 if validated and flags == REVIDX_DEFAULT_FLAGS:
1882 # no extra flags set, no flag processor runs, text = rawtext
1890 # no extra flags set, no flag processor runs, text = rawtext
1883 return rawtext, {}
1891 return rawtext, {}
1884
1892
1885 sidedata = {}
1893 sidedata = {}
1886 if raw:
1894 if raw:
1887 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1895 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1888 text = rawtext
1896 text = rawtext
1889 else:
1897 else:
1890 try:
1898 try:
1891 r = flagutil.processflagsread(self, rawtext, flags)
1899 r = flagutil.processflagsread(self, rawtext, flags)
1892 except error.SidedataHashError as exc:
1900 except error.SidedataHashError as exc:
1893 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1901 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1894 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1902 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1895 raise error.RevlogError(msg)
1903 raise error.RevlogError(msg)
1896 text, validatehash, sidedata = r
1904 text, validatehash, sidedata = r
1897 if validatehash:
1905 if validatehash:
1898 self.checkhash(text, node, rev=rev)
1906 self.checkhash(text, node, rev=rev)
1899 if not validated:
1907 if not validated:
1900 self._revisioncache = (node, rev, rawtext)
1908 self._revisioncache = (node, rev, rawtext)
1901
1909
1902 return text, sidedata
1910 return text, sidedata
1903
1911
1904 def _rawtext(self, node, rev, _df=None):
1912 def _rawtext(self, node, rev, _df=None):
1905 """return the possibly unvalidated rawtext for a revision
1913 """return the possibly unvalidated rawtext for a revision
1906
1914
1907 returns (rev, rawtext, validated)
1915 returns (rev, rawtext, validated)
1908 """
1916 """
1909
1917
1910 # revision in the cache (could be useful to apply delta)
1918 # revision in the cache (could be useful to apply delta)
1911 cachedrev = None
1919 cachedrev = None
1912 # An intermediate text to apply deltas to
1920 # An intermediate text to apply deltas to
1913 basetext = None
1921 basetext = None
1914
1922
1915 # Check if we have the entry in cache
1923 # Check if we have the entry in cache
1916 # The cache entry looks like (node, rev, rawtext)
1924 # The cache entry looks like (node, rev, rawtext)
1917 if self._revisioncache:
1925 if self._revisioncache:
1918 if self._revisioncache[0] == node:
1926 if self._revisioncache[0] == node:
1919 return (rev, self._revisioncache[2], True)
1927 return (rev, self._revisioncache[2], True)
1920 cachedrev = self._revisioncache[1]
1928 cachedrev = self._revisioncache[1]
1921
1929
1922 if rev is None:
1930 if rev is None:
1923 rev = self.rev(node)
1931 rev = self.rev(node)
1924
1932
1925 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1933 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1926 if stopped:
1934 if stopped:
1927 basetext = self._revisioncache[2]
1935 basetext = self._revisioncache[2]
1928
1936
1929 # drop cache to save memory, the caller is expected to
1937 # drop cache to save memory, the caller is expected to
1930 # update self._revisioncache after validating the text
1938 # update self._revisioncache after validating the text
1931 self._revisioncache = None
1939 self._revisioncache = None
1932
1940
1933 targetsize = None
1941 targetsize = None
1934 rawsize = self.index[rev][2]
1942 rawsize = self.index[rev][2]
1935 if 0 <= rawsize:
1943 if 0 <= rawsize:
1936 targetsize = 4 * rawsize
1944 targetsize = 4 * rawsize
1937
1945
1938 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1946 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1939 if basetext is None:
1947 if basetext is None:
1940 basetext = bytes(bins[0])
1948 basetext = bytes(bins[0])
1941 bins = bins[1:]
1949 bins = bins[1:]
1942
1950
1943 rawtext = mdiff.patches(basetext, bins)
1951 rawtext = mdiff.patches(basetext, bins)
1944 del basetext # let us have a chance to free memory early
1952 del basetext # let us have a chance to free memory early
1945 return (rev, rawtext, False)
1953 return (rev, rawtext, False)
1946
1954
1947 def rawdata(self, nodeorrev, _df=None):
1955 def rawdata(self, nodeorrev, _df=None):
1948 """return an uncompressed raw data of a given node or revision number.
1956 """return an uncompressed raw data of a given node or revision number.
1949
1957
1950 _df - an existing file handle to read from. (internal-only)
1958 _df - an existing file handle to read from. (internal-only)
1951 """
1959 """
1952 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1960 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1953
1961
1954 def hash(self, text, p1, p2):
1962 def hash(self, text, p1, p2):
1955 """Compute a node hash.
1963 """Compute a node hash.
1956
1964
1957 Available as a function so that subclasses can replace the hash
1965 Available as a function so that subclasses can replace the hash
1958 as needed.
1966 as needed.
1959 """
1967 """
1960 return storageutil.hashrevisionsha1(text, p1, p2)
1968 return storageutil.hashrevisionsha1(text, p1, p2)
1961
1969
1962 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1970 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1963 """Check node hash integrity.
1971 """Check node hash integrity.
1964
1972
1965 Available as a function so that subclasses can extend hash mismatch
1973 Available as a function so that subclasses can extend hash mismatch
1966 behaviors as needed.
1974 behaviors as needed.
1967 """
1975 """
1968 try:
1976 try:
1969 if p1 is None and p2 is None:
1977 if p1 is None and p2 is None:
1970 p1, p2 = self.parents(node)
1978 p1, p2 = self.parents(node)
1971 if node != self.hash(text, p1, p2):
1979 if node != self.hash(text, p1, p2):
1972 # Clear the revision cache on hash failure. The revision cache
1980 # Clear the revision cache on hash failure. The revision cache
1973 # only stores the raw revision and clearing the cache does have
1981 # only stores the raw revision and clearing the cache does have
1974 # the side-effect that we won't have a cache hit when the raw
1982 # the side-effect that we won't have a cache hit when the raw
1975 # revision data is accessed. But this case should be rare and
1983 # revision data is accessed. But this case should be rare and
1976 # it is extra work to teach the cache about the hash
1984 # it is extra work to teach the cache about the hash
1977 # verification state.
1985 # verification state.
1978 if self._revisioncache and self._revisioncache[0] == node:
1986 if self._revisioncache and self._revisioncache[0] == node:
1979 self._revisioncache = None
1987 self._revisioncache = None
1980
1988
1981 revornode = rev
1989 revornode = rev
1982 if revornode is None:
1990 if revornode is None:
1983 revornode = templatefilters.short(hex(node))
1991 revornode = templatefilters.short(hex(node))
1984 raise error.RevlogError(
1992 raise error.RevlogError(
1985 _(b"integrity check failed on %s:%s")
1993 _(b"integrity check failed on %s:%s")
1986 % (self.indexfile, pycompat.bytestr(revornode))
1994 % (self.indexfile, pycompat.bytestr(revornode))
1987 )
1995 )
1988 except error.RevlogError:
1996 except error.RevlogError:
1989 if self._censorable and storageutil.iscensoredtext(text):
1997 if self._censorable and storageutil.iscensoredtext(text):
1990 raise error.CensoredNodeError(self.indexfile, node, text)
1998 raise error.CensoredNodeError(self.indexfile, node, text)
1991 raise
1999 raise
1992
2000
1993 def _enforceinlinesize(self, tr, fp=None):
2001 def _enforceinlinesize(self, tr, fp=None):
1994 """Check if the revlog is too big for inline and convert if so.
2002 """Check if the revlog is too big for inline and convert if so.
1995
2003
1996 This should be called after revisions are added to the revlog. If the
2004 This should be called after revisions are added to the revlog. If the
1997 revlog has grown too large to be an inline revlog, it will convert it
2005 revlog has grown too large to be an inline revlog, it will convert it
1998 to use multiple index and data files.
2006 to use multiple index and data files.
1999 """
2007 """
2000 tiprev = len(self) - 1
2008 tiprev = len(self) - 1
2001 if (
2009 if (
2002 not self._inline
2010 not self._inline
2003 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
2011 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
2004 ):
2012 ):
2005 return
2013 return
2006
2014
2007 troffset = tr.findoffset(self.indexfile)
2015 troffset = tr.findoffset(self.indexfile)
2008 if troffset is None:
2016 if troffset is None:
2009 raise error.RevlogError(
2017 raise error.RevlogError(
2010 _(b"%s not found in the transaction") % self.indexfile
2018 _(b"%s not found in the transaction") % self.indexfile
2011 )
2019 )
2012 trindex = 0
2020 trindex = 0
2013 tr.add(self.datafile, 0)
2021 tr.add(self.datafile, 0)
2014
2022
2015 if fp:
2023 if fp:
2016 fp.flush()
2024 fp.flush()
2017 fp.close()
2025 fp.close()
2018 # We can't use the cached file handle after close(). So prevent
2026 # We can't use the cached file handle after close(). So prevent
2019 # its usage.
2027 # its usage.
2020 self._writinghandles = None
2028 self._writinghandles = None
2021
2029
2022 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
2030 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
2023 for r in self:
2031 for r in self:
2024 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
2032 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
2025 if troffset <= self.start(r):
2033 if troffset <= self.start(r):
2026 trindex = r
2034 trindex = r
2027
2035
2028 with self._indexfp(b'w') as fp:
2036 with self._indexfp(b'w') as fp:
2029 self.version &= ~FLAG_INLINE_DATA
2037 self.version &= ~FLAG_INLINE_DATA
2030 self._inline = False
2038 self._inline = False
2031 io = self._io
2039 io = self._io
2032 for i in self:
2040 for i in self:
2033 e = io.packentry(self.index[i], self.node, self.version, i)
2041 e = io.packentry(self.index[i], self.node, self.version, i)
2034 fp.write(e)
2042 fp.write(e)
2035
2043
2036 # the temp file replace the real index when we exit the context
2044 # the temp file replace the real index when we exit the context
2037 # manager
2045 # manager
2038
2046
2039 tr.replace(self.indexfile, trindex * self._io.size)
2047 tr.replace(self.indexfile, trindex * self._io.size)
2040 nodemaputil.setup_persistent_nodemap(tr, self)
2048 nodemaputil.setup_persistent_nodemap(tr, self)
2041 self._chunkclear()
2049 self._chunkclear()
2042
2050
2043 def _nodeduplicatecallback(self, transaction, node):
2051 def _nodeduplicatecallback(self, transaction, node):
2044 """called when trying to add a node already stored."""
2052 """called when trying to add a node already stored."""
2045
2053
2046 def addrevision(
2054 def addrevision(
2047 self,
2055 self,
2048 text,
2056 text,
2049 transaction,
2057 transaction,
2050 link,
2058 link,
2051 p1,
2059 p1,
2052 p2,
2060 p2,
2053 cachedelta=None,
2061 cachedelta=None,
2054 node=None,
2062 node=None,
2055 flags=REVIDX_DEFAULT_FLAGS,
2063 flags=REVIDX_DEFAULT_FLAGS,
2056 deltacomputer=None,
2064 deltacomputer=None,
2057 sidedata=None,
2065 sidedata=None,
2058 ):
2066 ):
2059 """add a revision to the log
2067 """add a revision to the log
2060
2068
2061 text - the revision data to add
2069 text - the revision data to add
2062 transaction - the transaction object used for rollback
2070 transaction - the transaction object used for rollback
2063 link - the linkrev data to add
2071 link - the linkrev data to add
2064 p1, p2 - the parent nodeids of the revision
2072 p1, p2 - the parent nodeids of the revision
2065 cachedelta - an optional precomputed delta
2073 cachedelta - an optional precomputed delta
2066 node - nodeid of revision; typically node is not specified, and it is
2074 node - nodeid of revision; typically node is not specified, and it is
2067 computed by default as hash(text, p1, p2), however subclasses might
2075 computed by default as hash(text, p1, p2), however subclasses might
2068 use different hashing method (and override checkhash() in such case)
2076 use different hashing method (and override checkhash() in such case)
2069 flags - the known flags to set on the revision
2077 flags - the known flags to set on the revision
2070 deltacomputer - an optional deltacomputer instance shared between
2078 deltacomputer - an optional deltacomputer instance shared between
2071 multiple calls
2079 multiple calls
2072 """
2080 """
2073 if link == nullrev:
2081 if link == nullrev:
2074 raise error.RevlogError(
2082 raise error.RevlogError(
2075 _(b"attempted to add linkrev -1 to %s") % self.indexfile
2083 _(b"attempted to add linkrev -1 to %s") % self.indexfile
2076 )
2084 )
2077
2085
2078 if sidedata is None:
2086 if sidedata is None:
2079 sidedata = {}
2087 sidedata = {}
2080 flags = flags & ~REVIDX_SIDEDATA
2088 flags = flags & ~REVIDX_SIDEDATA
2081 elif not self.hassidedata:
2089 elif not self.hassidedata:
2082 raise error.ProgrammingError(
2090 raise error.ProgrammingError(
2083 _(b"trying to add sidedata to a revlog who don't support them")
2091 _(b"trying to add sidedata to a revlog who don't support them")
2084 )
2092 )
2085 else:
2093 else:
2086 flags |= REVIDX_SIDEDATA
2094 flags |= REVIDX_SIDEDATA
2087
2095
2088 if flags:
2096 if flags:
2089 node = node or self.hash(text, p1, p2)
2097 node = node or self.hash(text, p1, p2)
2090
2098
2091 rawtext, validatehash = flagutil.processflagswrite(
2099 rawtext, validatehash = flagutil.processflagswrite(
2092 self, text, flags, sidedata=sidedata
2100 self, text, flags, sidedata=sidedata
2093 )
2101 )
2094
2102
2095 # If the flag processor modifies the revision data, ignore any provided
2103 # If the flag processor modifies the revision data, ignore any provided
2096 # cachedelta.
2104 # cachedelta.
2097 if rawtext != text:
2105 if rawtext != text:
2098 cachedelta = None
2106 cachedelta = None
2099
2107
2100 if len(rawtext) > _maxentrysize:
2108 if len(rawtext) > _maxentrysize:
2101 raise error.RevlogError(
2109 raise error.RevlogError(
2102 _(
2110 _(
2103 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2111 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2104 )
2112 )
2105 % (self.indexfile, len(rawtext))
2113 % (self.indexfile, len(rawtext))
2106 )
2114 )
2107
2115
2108 node = node or self.hash(rawtext, p1, p2)
2116 node = node or self.hash(rawtext, p1, p2)
2109 rev = self.index.get_rev(node)
2117 rev = self.index.get_rev(node)
2110 if rev is not None:
2118 if rev is not None:
2111 return rev
2119 return rev
2112
2120
2113 if validatehash:
2121 if validatehash:
2114 self.checkhash(rawtext, node, p1=p1, p2=p2)
2122 self.checkhash(rawtext, node, p1=p1, p2=p2)
2115
2123
2116 return self.addrawrevision(
2124 return self.addrawrevision(
2117 rawtext,
2125 rawtext,
2118 transaction,
2126 transaction,
2119 link,
2127 link,
2120 p1,
2128 p1,
2121 p2,
2129 p2,
2122 node,
2130 node,
2123 flags,
2131 flags,
2124 cachedelta=cachedelta,
2132 cachedelta=cachedelta,
2125 deltacomputer=deltacomputer,
2133 deltacomputer=deltacomputer,
2126 )
2134 )
2127
2135
2128 def addrawrevision(
2136 def addrawrevision(
2129 self,
2137 self,
2130 rawtext,
2138 rawtext,
2131 transaction,
2139 transaction,
2132 link,
2140 link,
2133 p1,
2141 p1,
2134 p2,
2142 p2,
2135 node,
2143 node,
2136 flags,
2144 flags,
2137 cachedelta=None,
2145 cachedelta=None,
2138 deltacomputer=None,
2146 deltacomputer=None,
2139 ):
2147 ):
2140 """add a raw revision with known flags, node and parents
2148 """add a raw revision with known flags, node and parents
2141 useful when reusing a revision not stored in this revlog (ex: received
2149 useful when reusing a revision not stored in this revlog (ex: received
2142 over wire, or read from an external bundle).
2150 over wire, or read from an external bundle).
2143 """
2151 """
2144 dfh = None
2152 dfh = None
2145 if not self._inline:
2153 if not self._inline:
2146 dfh = self._datafp(b"a+")
2154 dfh = self._datafp(b"a+")
2147 ifh = self._indexfp(b"a+")
2155 ifh = self._indexfp(b"a+")
2148 try:
2156 try:
2149 return self._addrevision(
2157 return self._addrevision(
2150 node,
2158 node,
2151 rawtext,
2159 rawtext,
2152 transaction,
2160 transaction,
2153 link,
2161 link,
2154 p1,
2162 p1,
2155 p2,
2163 p2,
2156 flags,
2164 flags,
2157 cachedelta,
2165 cachedelta,
2158 ifh,
2166 ifh,
2159 dfh,
2167 dfh,
2160 deltacomputer=deltacomputer,
2168 deltacomputer=deltacomputer,
2161 )
2169 )
2162 finally:
2170 finally:
2163 if dfh:
2171 if dfh:
2164 dfh.close()
2172 dfh.close()
2165 ifh.close()
2173 ifh.close()
2166
2174
2167 def compress(self, data):
2175 def compress(self, data):
2168 """Generate a possibly-compressed representation of data."""
2176 """Generate a possibly-compressed representation of data."""
2169 if not data:
2177 if not data:
2170 return b'', data
2178 return b'', data
2171
2179
2172 compressed = self._compressor.compress(data)
2180 compressed = self._compressor.compress(data)
2173
2181
2174 if compressed:
2182 if compressed:
2175 # The revlog compressor added the header in the returned data.
2183 # The revlog compressor added the header in the returned data.
2176 return b'', compressed
2184 return b'', compressed
2177
2185
2178 if data[0:1] == b'\0':
2186 if data[0:1] == b'\0':
2179 return b'', data
2187 return b'', data
2180 return b'u', data
2188 return b'u', data
2181
2189
2182 def decompress(self, data):
2190 def decompress(self, data):
2183 """Decompress a revlog chunk.
2191 """Decompress a revlog chunk.
2184
2192
2185 The chunk is expected to begin with a header identifying the
2193 The chunk is expected to begin with a header identifying the
2186 format type so it can be routed to an appropriate decompressor.
2194 format type so it can be routed to an appropriate decompressor.
2187 """
2195 """
2188 if not data:
2196 if not data:
2189 return data
2197 return data
2190
2198
2191 # Revlogs are read much more frequently than they are written and many
2199 # Revlogs are read much more frequently than they are written and many
2192 # chunks only take microseconds to decompress, so performance is
2200 # chunks only take microseconds to decompress, so performance is
2193 # important here.
2201 # important here.
2194 #
2202 #
2195 # We can make a few assumptions about revlogs:
2203 # We can make a few assumptions about revlogs:
2196 #
2204 #
2197 # 1) the majority of chunks will be compressed (as opposed to inline
2205 # 1) the majority of chunks will be compressed (as opposed to inline
2198 # raw data).
2206 # raw data).
2199 # 2) decompressing *any* data will likely by at least 10x slower than
2207 # 2) decompressing *any* data will likely by at least 10x slower than
2200 # returning raw inline data.
2208 # returning raw inline data.
2201 # 3) we want to prioritize common and officially supported compression
2209 # 3) we want to prioritize common and officially supported compression
2202 # engines
2210 # engines
2203 #
2211 #
2204 # It follows that we want to optimize for "decompress compressed data
2212 # It follows that we want to optimize for "decompress compressed data
2205 # when encoded with common and officially supported compression engines"
2213 # when encoded with common and officially supported compression engines"
2206 # case over "raw data" and "data encoded by less common or non-official
2214 # case over "raw data" and "data encoded by less common or non-official
2207 # compression engines." That is why we have the inline lookup first
2215 # compression engines." That is why we have the inline lookup first
2208 # followed by the compengines lookup.
2216 # followed by the compengines lookup.
2209 #
2217 #
2210 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2218 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2211 # compressed chunks. And this matters for changelog and manifest reads.
2219 # compressed chunks. And this matters for changelog and manifest reads.
2212 t = data[0:1]
2220 t = data[0:1]
2213
2221
2214 if t == b'x':
2222 if t == b'x':
2215 try:
2223 try:
2216 return _zlibdecompress(data)
2224 return _zlibdecompress(data)
2217 except zlib.error as e:
2225 except zlib.error as e:
2218 raise error.RevlogError(
2226 raise error.RevlogError(
2219 _(b'revlog decompress error: %s')
2227 _(b'revlog decompress error: %s')
2220 % stringutil.forcebytestr(e)
2228 % stringutil.forcebytestr(e)
2221 )
2229 )
2222 # '\0' is more common than 'u' so it goes first.
2230 # '\0' is more common than 'u' so it goes first.
2223 elif t == b'\0':
2231 elif t == b'\0':
2224 return data
2232 return data
2225 elif t == b'u':
2233 elif t == b'u':
2226 return util.buffer(data, 1)
2234 return util.buffer(data, 1)
2227
2235
2228 try:
2236 try:
2229 compressor = self._decompressors[t]
2237 compressor = self._decompressors[t]
2230 except KeyError:
2238 except KeyError:
2231 try:
2239 try:
2232 engine = util.compengines.forrevlogheader(t)
2240 engine = util.compengines.forrevlogheader(t)
2233 compressor = engine.revlogcompressor(self._compengineopts)
2241 compressor = engine.revlogcompressor(self._compengineopts)
2234 self._decompressors[t] = compressor
2242 self._decompressors[t] = compressor
2235 except KeyError:
2243 except KeyError:
2236 raise error.RevlogError(_(b'unknown compression type %r') % t)
2244 raise error.RevlogError(_(b'unknown compression type %r') % t)
2237
2245
2238 return compressor.decompress(data)
2246 return compressor.decompress(data)
2239
2247
2240 def _addrevision(
2248 def _addrevision(
2241 self,
2249 self,
2242 node,
2250 node,
2243 rawtext,
2251 rawtext,
2244 transaction,
2252 transaction,
2245 link,
2253 link,
2246 p1,
2254 p1,
2247 p2,
2255 p2,
2248 flags,
2256 flags,
2249 cachedelta,
2257 cachedelta,
2250 ifh,
2258 ifh,
2251 dfh,
2259 dfh,
2252 alwayscache=False,
2260 alwayscache=False,
2253 deltacomputer=None,
2261 deltacomputer=None,
2254 ):
2262 ):
2255 """internal function to add revisions to the log
2263 """internal function to add revisions to the log
2256
2264
2257 see addrevision for argument descriptions.
2265 see addrevision for argument descriptions.
2258
2266
2259 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2267 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2260
2268
2261 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2269 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2262 be used.
2270 be used.
2263
2271
2264 invariants:
2272 invariants:
2265 - rawtext is optional (can be None); if not set, cachedelta must be set.
2273 - rawtext is optional (can be None); if not set, cachedelta must be set.
2266 if both are set, they must correspond to each other.
2274 if both are set, they must correspond to each other.
2267 """
2275 """
2268 if node == nullid:
2276 if node == nullid:
2269 raise error.RevlogError(
2277 raise error.RevlogError(
2270 _(b"%s: attempt to add null revision") % self.indexfile
2278 _(b"%s: attempt to add null revision") % self.indexfile
2271 )
2279 )
2272 if node == wdirid or node in wdirfilenodeids:
2280 if node == wdirid or node in wdirfilenodeids:
2273 raise error.RevlogError(
2281 raise error.RevlogError(
2274 _(b"%s: attempt to add wdir revision") % self.indexfile
2282 _(b"%s: attempt to add wdir revision") % self.indexfile
2275 )
2283 )
2276
2284
2277 if self._inline:
2285 if self._inline:
2278 fh = ifh
2286 fh = ifh
2279 else:
2287 else:
2280 fh = dfh
2288 fh = dfh
2281
2289
2282 btext = [rawtext]
2290 btext = [rawtext]
2283
2291
2284 curr = len(self)
2292 curr = len(self)
2285 prev = curr - 1
2293 prev = curr - 1
2286 offset = self.end(prev)
2294 offset = self.end(prev)
2295
2296 if self._concurrencychecker:
2297 if self._inline:
2298 # offset is "as if" it were in the .d file, so we need to add on
2299 # the size of the entry metadata.
2300 self._concurrencychecker(
2301 ifh, self.indexfile, offset + curr * self._io.size
2302 )
2303 else:
2304 # Entries in the .i are a consistent size.
2305 self._concurrencychecker(
2306 ifh, self.indexfile, curr * self._io.size
2307 )
2308 self._concurrencychecker(dfh, self.datafile, offset)
2309
2287 p1r, p2r = self.rev(p1), self.rev(p2)
2310 p1r, p2r = self.rev(p1), self.rev(p2)
2288
2311
2289 # full versions are inserted when the needed deltas
2312 # full versions are inserted when the needed deltas
2290 # become comparable to the uncompressed text
2313 # become comparable to the uncompressed text
2291 if rawtext is None:
2314 if rawtext is None:
2292 # need rawtext size, before changed by flag processors, which is
2315 # need rawtext size, before changed by flag processors, which is
2293 # the non-raw size. use revlog explicitly to avoid filelog's extra
2316 # the non-raw size. use revlog explicitly to avoid filelog's extra
2294 # logic that might remove metadata size.
2317 # logic that might remove metadata size.
2295 textlen = mdiff.patchedsize(
2318 textlen = mdiff.patchedsize(
2296 revlog.size(self, cachedelta[0]), cachedelta[1]
2319 revlog.size(self, cachedelta[0]), cachedelta[1]
2297 )
2320 )
2298 else:
2321 else:
2299 textlen = len(rawtext)
2322 textlen = len(rawtext)
2300
2323
2301 if deltacomputer is None:
2324 if deltacomputer is None:
2302 deltacomputer = deltautil.deltacomputer(self)
2325 deltacomputer = deltautil.deltacomputer(self)
2303
2326
2304 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2327 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2305
2328
2306 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2329 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2307
2330
2308 e = (
2331 e = (
2309 offset_type(offset, flags),
2332 offset_type(offset, flags),
2310 deltainfo.deltalen,
2333 deltainfo.deltalen,
2311 textlen,
2334 textlen,
2312 deltainfo.base,
2335 deltainfo.base,
2313 link,
2336 link,
2314 p1r,
2337 p1r,
2315 p2r,
2338 p2r,
2316 node,
2339 node,
2317 )
2340 )
2318 self.index.append(e)
2341 self.index.append(e)
2319
2342
2320 entry = self._io.packentry(e, self.node, self.version, curr)
2343 entry = self._io.packentry(e, self.node, self.version, curr)
2321 self._writeentry(
2344 self._writeentry(
2322 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2345 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2323 )
2346 )
2324
2347
2325 rawtext = btext[0]
2348 rawtext = btext[0]
2326
2349
2327 if alwayscache and rawtext is None:
2350 if alwayscache and rawtext is None:
2328 rawtext = deltacomputer.buildtext(revinfo, fh)
2351 rawtext = deltacomputer.buildtext(revinfo, fh)
2329
2352
2330 if type(rawtext) == bytes: # only accept immutable objects
2353 if type(rawtext) == bytes: # only accept immutable objects
2331 self._revisioncache = (node, curr, rawtext)
2354 self._revisioncache = (node, curr, rawtext)
2332 self._chainbasecache[curr] = deltainfo.chainbase
2355 self._chainbasecache[curr] = deltainfo.chainbase
2333 return curr
2356 return curr
2334
2357
2335 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2358 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2336 # Files opened in a+ mode have inconsistent behavior on various
2359 # Files opened in a+ mode have inconsistent behavior on various
2337 # platforms. Windows requires that a file positioning call be made
2360 # platforms. Windows requires that a file positioning call be made
2338 # when the file handle transitions between reads and writes. See
2361 # when the file handle transitions between reads and writes. See
2339 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2362 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2340 # platforms, Python or the platform itself can be buggy. Some versions
2363 # platforms, Python or the platform itself can be buggy. Some versions
2341 # of Solaris have been observed to not append at the end of the file
2364 # of Solaris have been observed to not append at the end of the file
2342 # if the file was seeked to before the end. See issue4943 for more.
2365 # if the file was seeked to before the end. See issue4943 for more.
2343 #
2366 #
2344 # We work around this issue by inserting a seek() before writing.
2367 # We work around this issue by inserting a seek() before writing.
2345 # Note: This is likely not necessary on Python 3. However, because
2368 # Note: This is likely not necessary on Python 3. However, because
2346 # the file handle is reused for reads and may be seeked there, we need
2369 # the file handle is reused for reads and may be seeked there, we need
2347 # to be careful before changing this.
2370 # to be careful before changing this.
2348 ifh.seek(0, os.SEEK_END)
2371 ifh.seek(0, os.SEEK_END)
2349 if dfh:
2372 if dfh:
2350 dfh.seek(0, os.SEEK_END)
2373 dfh.seek(0, os.SEEK_END)
2351
2374
2352 curr = len(self) - 1
2375 curr = len(self) - 1
2353 if not self._inline:
2376 if not self._inline:
2354 transaction.add(self.datafile, offset)
2377 transaction.add(self.datafile, offset)
2355 transaction.add(self.indexfile, curr * len(entry))
2378 transaction.add(self.indexfile, curr * len(entry))
2356 if data[0]:
2379 if data[0]:
2357 dfh.write(data[0])
2380 dfh.write(data[0])
2358 dfh.write(data[1])
2381 dfh.write(data[1])
2359 ifh.write(entry)
2382 ifh.write(entry)
2360 else:
2383 else:
2361 offset += curr * self._io.size
2384 offset += curr * self._io.size
2362 transaction.add(self.indexfile, offset)
2385 transaction.add(self.indexfile, offset)
2363 ifh.write(entry)
2386 ifh.write(entry)
2364 ifh.write(data[0])
2387 ifh.write(data[0])
2365 ifh.write(data[1])
2388 ifh.write(data[1])
2366 self._enforceinlinesize(transaction, ifh)
2389 self._enforceinlinesize(transaction, ifh)
2367 nodemaputil.setup_persistent_nodemap(transaction, self)
2390 nodemaputil.setup_persistent_nodemap(transaction, self)
2368
2391
2369 def addgroup(
2392 def addgroup(
2370 self,
2393 self,
2371 deltas,
2394 deltas,
2372 linkmapper,
2395 linkmapper,
2373 transaction,
2396 transaction,
2374 alwayscache=False,
2397 alwayscache=False,
2375 addrevisioncb=None,
2398 addrevisioncb=None,
2376 duplicaterevisioncb=None,
2399 duplicaterevisioncb=None,
2377 ):
2400 ):
2378 """
2401 """
2379 add a delta group
2402 add a delta group
2380
2403
2381 given a set of deltas, add them to the revision log. the
2404 given a set of deltas, add them to the revision log. the
2382 first delta is against its parent, which should be in our
2405 first delta is against its parent, which should be in our
2383 log, the rest are against the previous delta.
2406 log, the rest are against the previous delta.
2384
2407
2385 If ``addrevisioncb`` is defined, it will be called with arguments of
2408 If ``addrevisioncb`` is defined, it will be called with arguments of
2386 this revlog and the node that was added.
2409 this revlog and the node that was added.
2387 """
2410 """
2388
2411
2389 if self._writinghandles:
2412 if self._writinghandles:
2390 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2413 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2391
2414
2392 r = len(self)
2415 r = len(self)
2393 end = 0
2416 end = 0
2394 if r:
2417 if r:
2395 end = self.end(r - 1)
2418 end = self.end(r - 1)
2396 ifh = self._indexfp(b"a+")
2419 ifh = self._indexfp(b"a+")
2397 isize = r * self._io.size
2420 isize = r * self._io.size
2398 if self._inline:
2421 if self._inline:
2399 transaction.add(self.indexfile, end + isize)
2422 transaction.add(self.indexfile, end + isize)
2400 dfh = None
2423 dfh = None
2401 else:
2424 else:
2402 transaction.add(self.indexfile, isize)
2425 transaction.add(self.indexfile, isize)
2403 transaction.add(self.datafile, end)
2426 transaction.add(self.datafile, end)
2404 dfh = self._datafp(b"a+")
2427 dfh = self._datafp(b"a+")
2405
2428
2406 def flush():
2429 def flush():
2407 if dfh:
2430 if dfh:
2408 dfh.flush()
2431 dfh.flush()
2409 ifh.flush()
2432 ifh.flush()
2410
2433
2411 self._writinghandles = (ifh, dfh)
2434 self._writinghandles = (ifh, dfh)
2412 empty = True
2435 empty = True
2413
2436
2414 try:
2437 try:
2415 deltacomputer = deltautil.deltacomputer(self)
2438 deltacomputer = deltautil.deltacomputer(self)
2416 # loop through our set of deltas
2439 # loop through our set of deltas
2417 for data in deltas:
2440 for data in deltas:
2418 node, p1, p2, linknode, deltabase, delta, flags = data
2441 node, p1, p2, linknode, deltabase, delta, flags = data
2419 link = linkmapper(linknode)
2442 link = linkmapper(linknode)
2420 flags = flags or REVIDX_DEFAULT_FLAGS
2443 flags = flags or REVIDX_DEFAULT_FLAGS
2421
2444
2422 rev = self.index.get_rev(node)
2445 rev = self.index.get_rev(node)
2423 if rev is not None:
2446 if rev is not None:
2424 # this can happen if two branches make the same change
2447 # this can happen if two branches make the same change
2425 self._nodeduplicatecallback(transaction, rev)
2448 self._nodeduplicatecallback(transaction, rev)
2426 if duplicaterevisioncb:
2449 if duplicaterevisioncb:
2427 duplicaterevisioncb(self, rev)
2450 duplicaterevisioncb(self, rev)
2428 empty = False
2451 empty = False
2429 continue
2452 continue
2430
2453
2431 for p in (p1, p2):
2454 for p in (p1, p2):
2432 if not self.index.has_node(p):
2455 if not self.index.has_node(p):
2433 raise error.LookupError(
2456 raise error.LookupError(
2434 p, self.indexfile, _(b'unknown parent')
2457 p, self.indexfile, _(b'unknown parent')
2435 )
2458 )
2436
2459
2437 if not self.index.has_node(deltabase):
2460 if not self.index.has_node(deltabase):
2438 raise error.LookupError(
2461 raise error.LookupError(
2439 deltabase, self.indexfile, _(b'unknown delta base')
2462 deltabase, self.indexfile, _(b'unknown delta base')
2440 )
2463 )
2441
2464
2442 baserev = self.rev(deltabase)
2465 baserev = self.rev(deltabase)
2443
2466
2444 if baserev != nullrev and self.iscensored(baserev):
2467 if baserev != nullrev and self.iscensored(baserev):
2445 # if base is censored, delta must be full replacement in a
2468 # if base is censored, delta must be full replacement in a
2446 # single patch operation
2469 # single patch operation
2447 hlen = struct.calcsize(b">lll")
2470 hlen = struct.calcsize(b">lll")
2448 oldlen = self.rawsize(baserev)
2471 oldlen = self.rawsize(baserev)
2449 newlen = len(delta) - hlen
2472 newlen = len(delta) - hlen
2450 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2473 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2451 raise error.CensoredBaseError(
2474 raise error.CensoredBaseError(
2452 self.indexfile, self.node(baserev)
2475 self.indexfile, self.node(baserev)
2453 )
2476 )
2454
2477
2455 if not flags and self._peek_iscensored(baserev, delta, flush):
2478 if not flags and self._peek_iscensored(baserev, delta, flush):
2456 flags |= REVIDX_ISCENSORED
2479 flags |= REVIDX_ISCENSORED
2457
2480
2458 # We assume consumers of addrevisioncb will want to retrieve
2481 # We assume consumers of addrevisioncb will want to retrieve
2459 # the added revision, which will require a call to
2482 # the added revision, which will require a call to
2460 # revision(). revision() will fast path if there is a cache
2483 # revision(). revision() will fast path if there is a cache
2461 # hit. So, we tell _addrevision() to always cache in this case.
2484 # hit. So, we tell _addrevision() to always cache in this case.
2462 # We're only using addgroup() in the context of changegroup
2485 # We're only using addgroup() in the context of changegroup
2463 # generation so the revision data can always be handled as raw
2486 # generation so the revision data can always be handled as raw
2464 # by the flagprocessor.
2487 # by the flagprocessor.
2465 rev = self._addrevision(
2488 rev = self._addrevision(
2466 node,
2489 node,
2467 None,
2490 None,
2468 transaction,
2491 transaction,
2469 link,
2492 link,
2470 p1,
2493 p1,
2471 p2,
2494 p2,
2472 flags,
2495 flags,
2473 (baserev, delta),
2496 (baserev, delta),
2474 ifh,
2497 ifh,
2475 dfh,
2498 dfh,
2476 alwayscache=alwayscache,
2499 alwayscache=alwayscache,
2477 deltacomputer=deltacomputer,
2500 deltacomputer=deltacomputer,
2478 )
2501 )
2479
2502
2480 if addrevisioncb:
2503 if addrevisioncb:
2481 addrevisioncb(self, rev)
2504 addrevisioncb(self, rev)
2482 empty = False
2505 empty = False
2483
2506
2484 if not dfh and not self._inline:
2507 if not dfh and not self._inline:
2485 # addrevision switched from inline to conventional
2508 # addrevision switched from inline to conventional
2486 # reopen the index
2509 # reopen the index
2487 ifh.close()
2510 ifh.close()
2488 dfh = self._datafp(b"a+")
2511 dfh = self._datafp(b"a+")
2489 ifh = self._indexfp(b"a+")
2512 ifh = self._indexfp(b"a+")
2490 self._writinghandles = (ifh, dfh)
2513 self._writinghandles = (ifh, dfh)
2491 finally:
2514 finally:
2492 self._writinghandles = None
2515 self._writinghandles = None
2493
2516
2494 if dfh:
2517 if dfh:
2495 dfh.close()
2518 dfh.close()
2496 ifh.close()
2519 ifh.close()
2497 return not empty
2520 return not empty
2498
2521
2499 def iscensored(self, rev):
2522 def iscensored(self, rev):
2500 """Check if a file revision is censored."""
2523 """Check if a file revision is censored."""
2501 if not self._censorable:
2524 if not self._censorable:
2502 return False
2525 return False
2503
2526
2504 return self.flags(rev) & REVIDX_ISCENSORED
2527 return self.flags(rev) & REVIDX_ISCENSORED
2505
2528
2506 def _peek_iscensored(self, baserev, delta, flush):
2529 def _peek_iscensored(self, baserev, delta, flush):
2507 """Quickly check if a delta produces a censored revision."""
2530 """Quickly check if a delta produces a censored revision."""
2508 if not self._censorable:
2531 if not self._censorable:
2509 return False
2532 return False
2510
2533
2511 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2534 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2512
2535
2513 def getstrippoint(self, minlink):
2536 def getstrippoint(self, minlink):
2514 """find the minimum rev that must be stripped to strip the linkrev
2537 """find the minimum rev that must be stripped to strip the linkrev
2515
2538
2516 Returns a tuple containing the minimum rev and a set of all revs that
2539 Returns a tuple containing the minimum rev and a set of all revs that
2517 have linkrevs that will be broken by this strip.
2540 have linkrevs that will be broken by this strip.
2518 """
2541 """
2519 return storageutil.resolvestripinfo(
2542 return storageutil.resolvestripinfo(
2520 minlink,
2543 minlink,
2521 len(self) - 1,
2544 len(self) - 1,
2522 self.headrevs(),
2545 self.headrevs(),
2523 self.linkrev,
2546 self.linkrev,
2524 self.parentrevs,
2547 self.parentrevs,
2525 )
2548 )
2526
2549
2527 def strip(self, minlink, transaction):
2550 def strip(self, minlink, transaction):
2528 """truncate the revlog on the first revision with a linkrev >= minlink
2551 """truncate the revlog on the first revision with a linkrev >= minlink
2529
2552
2530 This function is called when we're stripping revision minlink and
2553 This function is called when we're stripping revision minlink and
2531 its descendants from the repository.
2554 its descendants from the repository.
2532
2555
2533 We have to remove all revisions with linkrev >= minlink, because
2556 We have to remove all revisions with linkrev >= minlink, because
2534 the equivalent changelog revisions will be renumbered after the
2557 the equivalent changelog revisions will be renumbered after the
2535 strip.
2558 strip.
2536
2559
2537 So we truncate the revlog on the first of these revisions, and
2560 So we truncate the revlog on the first of these revisions, and
2538 trust that the caller has saved the revisions that shouldn't be
2561 trust that the caller has saved the revisions that shouldn't be
2539 removed and that it'll re-add them after this truncation.
2562 removed and that it'll re-add them after this truncation.
2540 """
2563 """
2541 if len(self) == 0:
2564 if len(self) == 0:
2542 return
2565 return
2543
2566
2544 rev, _ = self.getstrippoint(minlink)
2567 rev, _ = self.getstrippoint(minlink)
2545 if rev == len(self):
2568 if rev == len(self):
2546 return
2569 return
2547
2570
2548 # first truncate the files on disk
2571 # first truncate the files on disk
2549 end = self.start(rev)
2572 end = self.start(rev)
2550 if not self._inline:
2573 if not self._inline:
2551 transaction.add(self.datafile, end)
2574 transaction.add(self.datafile, end)
2552 end = rev * self._io.size
2575 end = rev * self._io.size
2553 else:
2576 else:
2554 end += rev * self._io.size
2577 end += rev * self._io.size
2555
2578
2556 transaction.add(self.indexfile, end)
2579 transaction.add(self.indexfile, end)
2557
2580
2558 # then reset internal state in memory to forget those revisions
2581 # then reset internal state in memory to forget those revisions
2559 self._revisioncache = None
2582 self._revisioncache = None
2560 self._chaininfocache = util.lrucachedict(500)
2583 self._chaininfocache = util.lrucachedict(500)
2561 self._chunkclear()
2584 self._chunkclear()
2562
2585
2563 del self.index[rev:-1]
2586 del self.index[rev:-1]
2564
2587
2565 def checksize(self):
2588 def checksize(self):
2566 """Check size of index and data files
2589 """Check size of index and data files
2567
2590
2568 return a (dd, di) tuple.
2591 return a (dd, di) tuple.
2569 - dd: extra bytes for the "data" file
2592 - dd: extra bytes for the "data" file
2570 - di: extra bytes for the "index" file
2593 - di: extra bytes for the "index" file
2571
2594
2572 A healthy revlog will return (0, 0).
2595 A healthy revlog will return (0, 0).
2573 """
2596 """
2574 expected = 0
2597 expected = 0
2575 if len(self):
2598 if len(self):
2576 expected = max(0, self.end(len(self) - 1))
2599 expected = max(0, self.end(len(self) - 1))
2577
2600
2578 try:
2601 try:
2579 with self._datafp() as f:
2602 with self._datafp() as f:
2580 f.seek(0, io.SEEK_END)
2603 f.seek(0, io.SEEK_END)
2581 actual = f.tell()
2604 actual = f.tell()
2582 dd = actual - expected
2605 dd = actual - expected
2583 except IOError as inst:
2606 except IOError as inst:
2584 if inst.errno != errno.ENOENT:
2607 if inst.errno != errno.ENOENT:
2585 raise
2608 raise
2586 dd = 0
2609 dd = 0
2587
2610
2588 try:
2611 try:
2589 f = self.opener(self.indexfile)
2612 f = self.opener(self.indexfile)
2590 f.seek(0, io.SEEK_END)
2613 f.seek(0, io.SEEK_END)
2591 actual = f.tell()
2614 actual = f.tell()
2592 f.close()
2615 f.close()
2593 s = self._io.size
2616 s = self._io.size
2594 i = max(0, actual // s)
2617 i = max(0, actual // s)
2595 di = actual - (i * s)
2618 di = actual - (i * s)
2596 if self._inline:
2619 if self._inline:
2597 databytes = 0
2620 databytes = 0
2598 for r in self:
2621 for r in self:
2599 databytes += max(0, self.length(r))
2622 databytes += max(0, self.length(r))
2600 dd = 0
2623 dd = 0
2601 di = actual - len(self) * s - databytes
2624 di = actual - len(self) * s - databytes
2602 except IOError as inst:
2625 except IOError as inst:
2603 if inst.errno != errno.ENOENT:
2626 if inst.errno != errno.ENOENT:
2604 raise
2627 raise
2605 di = 0
2628 di = 0
2606
2629
2607 return (dd, di)
2630 return (dd, di)
2608
2631
2609 def files(self):
2632 def files(self):
2610 res = [self.indexfile]
2633 res = [self.indexfile]
2611 if not self._inline:
2634 if not self._inline:
2612 res.append(self.datafile)
2635 res.append(self.datafile)
2613 return res
2636 return res
2614
2637
2615 def emitrevisions(
2638 def emitrevisions(
2616 self,
2639 self,
2617 nodes,
2640 nodes,
2618 nodesorder=None,
2641 nodesorder=None,
2619 revisiondata=False,
2642 revisiondata=False,
2620 assumehaveparentrevisions=False,
2643 assumehaveparentrevisions=False,
2621 deltamode=repository.CG_DELTAMODE_STD,
2644 deltamode=repository.CG_DELTAMODE_STD,
2622 ):
2645 ):
2623 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2646 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2624 raise error.ProgrammingError(
2647 raise error.ProgrammingError(
2625 b'unhandled value for nodesorder: %s' % nodesorder
2648 b'unhandled value for nodesorder: %s' % nodesorder
2626 )
2649 )
2627
2650
2628 if nodesorder is None and not self._generaldelta:
2651 if nodesorder is None and not self._generaldelta:
2629 nodesorder = b'storage'
2652 nodesorder = b'storage'
2630
2653
2631 if (
2654 if (
2632 not self._storedeltachains
2655 not self._storedeltachains
2633 and deltamode != repository.CG_DELTAMODE_PREV
2656 and deltamode != repository.CG_DELTAMODE_PREV
2634 ):
2657 ):
2635 deltamode = repository.CG_DELTAMODE_FULL
2658 deltamode = repository.CG_DELTAMODE_FULL
2636
2659
2637 return storageutil.emitrevisions(
2660 return storageutil.emitrevisions(
2638 self,
2661 self,
2639 nodes,
2662 nodes,
2640 nodesorder,
2663 nodesorder,
2641 revlogrevisiondelta,
2664 revlogrevisiondelta,
2642 deltaparentfn=self.deltaparent,
2665 deltaparentfn=self.deltaparent,
2643 candeltafn=self.candelta,
2666 candeltafn=self.candelta,
2644 rawsizefn=self.rawsize,
2667 rawsizefn=self.rawsize,
2645 revdifffn=self.revdiff,
2668 revdifffn=self.revdiff,
2646 flagsfn=self.flags,
2669 flagsfn=self.flags,
2647 deltamode=deltamode,
2670 deltamode=deltamode,
2648 revisiondata=revisiondata,
2671 revisiondata=revisiondata,
2649 assumehaveparentrevisions=assumehaveparentrevisions,
2672 assumehaveparentrevisions=assumehaveparentrevisions,
2650 )
2673 )
2651
2674
2652 DELTAREUSEALWAYS = b'always'
2675 DELTAREUSEALWAYS = b'always'
2653 DELTAREUSESAMEREVS = b'samerevs'
2676 DELTAREUSESAMEREVS = b'samerevs'
2654 DELTAREUSENEVER = b'never'
2677 DELTAREUSENEVER = b'never'
2655
2678
2656 DELTAREUSEFULLADD = b'fulladd'
2679 DELTAREUSEFULLADD = b'fulladd'
2657
2680
2658 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2681 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2659
2682
2660 def clone(
2683 def clone(
2661 self,
2684 self,
2662 tr,
2685 tr,
2663 destrevlog,
2686 destrevlog,
2664 addrevisioncb=None,
2687 addrevisioncb=None,
2665 deltareuse=DELTAREUSESAMEREVS,
2688 deltareuse=DELTAREUSESAMEREVS,
2666 forcedeltabothparents=None,
2689 forcedeltabothparents=None,
2667 sidedatacompanion=None,
2690 sidedatacompanion=None,
2668 ):
2691 ):
2669 """Copy this revlog to another, possibly with format changes.
2692 """Copy this revlog to another, possibly with format changes.
2670
2693
2671 The destination revlog will contain the same revisions and nodes.
2694 The destination revlog will contain the same revisions and nodes.
2672 However, it may not be bit-for-bit identical due to e.g. delta encoding
2695 However, it may not be bit-for-bit identical due to e.g. delta encoding
2673 differences.
2696 differences.
2674
2697
2675 The ``deltareuse`` argument control how deltas from the existing revlog
2698 The ``deltareuse`` argument control how deltas from the existing revlog
2676 are preserved in the destination revlog. The argument can have the
2699 are preserved in the destination revlog. The argument can have the
2677 following values:
2700 following values:
2678
2701
2679 DELTAREUSEALWAYS
2702 DELTAREUSEALWAYS
2680 Deltas will always be reused (if possible), even if the destination
2703 Deltas will always be reused (if possible), even if the destination
2681 revlog would not select the same revisions for the delta. This is the
2704 revlog would not select the same revisions for the delta. This is the
2682 fastest mode of operation.
2705 fastest mode of operation.
2683 DELTAREUSESAMEREVS
2706 DELTAREUSESAMEREVS
2684 Deltas will be reused if the destination revlog would pick the same
2707 Deltas will be reused if the destination revlog would pick the same
2685 revisions for the delta. This mode strikes a balance between speed
2708 revisions for the delta. This mode strikes a balance between speed
2686 and optimization.
2709 and optimization.
2687 DELTAREUSENEVER
2710 DELTAREUSENEVER
2688 Deltas will never be reused. This is the slowest mode of execution.
2711 Deltas will never be reused. This is the slowest mode of execution.
2689 This mode can be used to recompute deltas (e.g. if the diff/delta
2712 This mode can be used to recompute deltas (e.g. if the diff/delta
2690 algorithm changes).
2713 algorithm changes).
2691 DELTAREUSEFULLADD
2714 DELTAREUSEFULLADD
2692 Revision will be re-added as if their were new content. This is
2715 Revision will be re-added as if their were new content. This is
2693 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2716 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2694 eg: large file detection and handling.
2717 eg: large file detection and handling.
2695
2718
2696 Delta computation can be slow, so the choice of delta reuse policy can
2719 Delta computation can be slow, so the choice of delta reuse policy can
2697 significantly affect run time.
2720 significantly affect run time.
2698
2721
2699 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2722 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2700 two extremes. Deltas will be reused if they are appropriate. But if the
2723 two extremes. Deltas will be reused if they are appropriate. But if the
2701 delta could choose a better revision, it will do so. This means if you
2724 delta could choose a better revision, it will do so. This means if you
2702 are converting a non-generaldelta revlog to a generaldelta revlog,
2725 are converting a non-generaldelta revlog to a generaldelta revlog,
2703 deltas will be recomputed if the delta's parent isn't a parent of the
2726 deltas will be recomputed if the delta's parent isn't a parent of the
2704 revision.
2727 revision.
2705
2728
2706 In addition to the delta policy, the ``forcedeltabothparents``
2729 In addition to the delta policy, the ``forcedeltabothparents``
2707 argument controls whether to force compute deltas against both parents
2730 argument controls whether to force compute deltas against both parents
2708 for merges. By default, the current default is used.
2731 for merges. By default, the current default is used.
2709
2732
2710 If not None, the `sidedatacompanion` is callable that accept two
2733 If not None, the `sidedatacompanion` is callable that accept two
2711 arguments:
2734 arguments:
2712
2735
2713 (srcrevlog, rev)
2736 (srcrevlog, rev)
2714
2737
2715 and return a quintet that control changes to sidedata content from the
2738 and return a quintet that control changes to sidedata content from the
2716 old revision to the new clone result:
2739 old revision to the new clone result:
2717
2740
2718 (dropall, filterout, update, new_flags, dropped_flags)
2741 (dropall, filterout, update, new_flags, dropped_flags)
2719
2742
2720 * if `dropall` is True, all sidedata should be dropped
2743 * if `dropall` is True, all sidedata should be dropped
2721 * `filterout` is a set of sidedata keys that should be dropped
2744 * `filterout` is a set of sidedata keys that should be dropped
2722 * `update` is a mapping of additionnal/new key -> value
2745 * `update` is a mapping of additionnal/new key -> value
2723 * new_flags is a bitfields of new flags that the revision should get
2746 * new_flags is a bitfields of new flags that the revision should get
2724 * dropped_flags is a bitfields of new flags that the revision shoudl not longer have
2747 * dropped_flags is a bitfields of new flags that the revision shoudl not longer have
2725 """
2748 """
2726 if deltareuse not in self.DELTAREUSEALL:
2749 if deltareuse not in self.DELTAREUSEALL:
2727 raise ValueError(
2750 raise ValueError(
2728 _(b'value for deltareuse invalid: %s') % deltareuse
2751 _(b'value for deltareuse invalid: %s') % deltareuse
2729 )
2752 )
2730
2753
2731 if len(destrevlog):
2754 if len(destrevlog):
2732 raise ValueError(_(b'destination revlog is not empty'))
2755 raise ValueError(_(b'destination revlog is not empty'))
2733
2756
2734 if getattr(self, 'filteredrevs', None):
2757 if getattr(self, 'filteredrevs', None):
2735 raise ValueError(_(b'source revlog has filtered revisions'))
2758 raise ValueError(_(b'source revlog has filtered revisions'))
2736 if getattr(destrevlog, 'filteredrevs', None):
2759 if getattr(destrevlog, 'filteredrevs', None):
2737 raise ValueError(_(b'destination revlog has filtered revisions'))
2760 raise ValueError(_(b'destination revlog has filtered revisions'))
2738
2761
2739 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2762 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2740 # if possible.
2763 # if possible.
2741 oldlazydelta = destrevlog._lazydelta
2764 oldlazydelta = destrevlog._lazydelta
2742 oldlazydeltabase = destrevlog._lazydeltabase
2765 oldlazydeltabase = destrevlog._lazydeltabase
2743 oldamd = destrevlog._deltabothparents
2766 oldamd = destrevlog._deltabothparents
2744
2767
2745 try:
2768 try:
2746 if deltareuse == self.DELTAREUSEALWAYS:
2769 if deltareuse == self.DELTAREUSEALWAYS:
2747 destrevlog._lazydeltabase = True
2770 destrevlog._lazydeltabase = True
2748 destrevlog._lazydelta = True
2771 destrevlog._lazydelta = True
2749 elif deltareuse == self.DELTAREUSESAMEREVS:
2772 elif deltareuse == self.DELTAREUSESAMEREVS:
2750 destrevlog._lazydeltabase = False
2773 destrevlog._lazydeltabase = False
2751 destrevlog._lazydelta = True
2774 destrevlog._lazydelta = True
2752 elif deltareuse == self.DELTAREUSENEVER:
2775 elif deltareuse == self.DELTAREUSENEVER:
2753 destrevlog._lazydeltabase = False
2776 destrevlog._lazydeltabase = False
2754 destrevlog._lazydelta = False
2777 destrevlog._lazydelta = False
2755
2778
2756 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2779 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2757
2780
2758 self._clone(
2781 self._clone(
2759 tr,
2782 tr,
2760 destrevlog,
2783 destrevlog,
2761 addrevisioncb,
2784 addrevisioncb,
2762 deltareuse,
2785 deltareuse,
2763 forcedeltabothparents,
2786 forcedeltabothparents,
2764 sidedatacompanion,
2787 sidedatacompanion,
2765 )
2788 )
2766
2789
2767 finally:
2790 finally:
2768 destrevlog._lazydelta = oldlazydelta
2791 destrevlog._lazydelta = oldlazydelta
2769 destrevlog._lazydeltabase = oldlazydeltabase
2792 destrevlog._lazydeltabase = oldlazydeltabase
2770 destrevlog._deltabothparents = oldamd
2793 destrevlog._deltabothparents = oldamd
2771
2794
2772 def _clone(
2795 def _clone(
2773 self,
2796 self,
2774 tr,
2797 tr,
2775 destrevlog,
2798 destrevlog,
2776 addrevisioncb,
2799 addrevisioncb,
2777 deltareuse,
2800 deltareuse,
2778 forcedeltabothparents,
2801 forcedeltabothparents,
2779 sidedatacompanion,
2802 sidedatacompanion,
2780 ):
2803 ):
2781 """perform the core duty of `revlog.clone` after parameter processing"""
2804 """perform the core duty of `revlog.clone` after parameter processing"""
2782 deltacomputer = deltautil.deltacomputer(destrevlog)
2805 deltacomputer = deltautil.deltacomputer(destrevlog)
2783 index = self.index
2806 index = self.index
2784 for rev in self:
2807 for rev in self:
2785 entry = index[rev]
2808 entry = index[rev]
2786
2809
2787 # Some classes override linkrev to take filtered revs into
2810 # Some classes override linkrev to take filtered revs into
2788 # account. Use raw entry from index.
2811 # account. Use raw entry from index.
2789 flags = entry[0] & 0xFFFF
2812 flags = entry[0] & 0xFFFF
2790 linkrev = entry[4]
2813 linkrev = entry[4]
2791 p1 = index[entry[5]][7]
2814 p1 = index[entry[5]][7]
2792 p2 = index[entry[6]][7]
2815 p2 = index[entry[6]][7]
2793 node = entry[7]
2816 node = entry[7]
2794
2817
2795 sidedataactions = (False, [], {}, 0, 0)
2818 sidedataactions = (False, [], {}, 0, 0)
2796 if sidedatacompanion is not None:
2819 if sidedatacompanion is not None:
2797 sidedataactions = sidedatacompanion(self, rev)
2820 sidedataactions = sidedatacompanion(self, rev)
2798
2821
2799 # (Possibly) reuse the delta from the revlog if allowed and
2822 # (Possibly) reuse the delta from the revlog if allowed and
2800 # the revlog chunk is a delta.
2823 # the revlog chunk is a delta.
2801 cachedelta = None
2824 cachedelta = None
2802 rawtext = None
2825 rawtext = None
2803 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2826 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2804 dropall = sidedataactions[0]
2827 dropall = sidedataactions[0]
2805 filterout = sidedataactions[1]
2828 filterout = sidedataactions[1]
2806 update = sidedataactions[2]
2829 update = sidedataactions[2]
2807 new_flags = sidedataactions[3]
2830 new_flags = sidedataactions[3]
2808 dropped_flags = sidedataactions[4]
2831 dropped_flags = sidedataactions[4]
2809 text, sidedata = self._revisiondata(rev)
2832 text, sidedata = self._revisiondata(rev)
2810 if dropall:
2833 if dropall:
2811 sidedata = {}
2834 sidedata = {}
2812 for key in filterout:
2835 for key in filterout:
2813 sidedata.pop(key, None)
2836 sidedata.pop(key, None)
2814 sidedata.update(update)
2837 sidedata.update(update)
2815 if not sidedata:
2838 if not sidedata:
2816 sidedata = None
2839 sidedata = None
2817
2840
2818 flags |= new_flags
2841 flags |= new_flags
2819 flags &= ~dropped_flags
2842 flags &= ~dropped_flags
2820
2843
2821 destrevlog.addrevision(
2844 destrevlog.addrevision(
2822 text,
2845 text,
2823 tr,
2846 tr,
2824 linkrev,
2847 linkrev,
2825 p1,
2848 p1,
2826 p2,
2849 p2,
2827 cachedelta=cachedelta,
2850 cachedelta=cachedelta,
2828 node=node,
2851 node=node,
2829 flags=flags,
2852 flags=flags,
2830 deltacomputer=deltacomputer,
2853 deltacomputer=deltacomputer,
2831 sidedata=sidedata,
2854 sidedata=sidedata,
2832 )
2855 )
2833 else:
2856 else:
2834 if destrevlog._lazydelta:
2857 if destrevlog._lazydelta:
2835 dp = self.deltaparent(rev)
2858 dp = self.deltaparent(rev)
2836 if dp != nullrev:
2859 if dp != nullrev:
2837 cachedelta = (dp, bytes(self._chunk(rev)))
2860 cachedelta = (dp, bytes(self._chunk(rev)))
2838
2861
2839 if not cachedelta:
2862 if not cachedelta:
2840 rawtext = self.rawdata(rev)
2863 rawtext = self.rawdata(rev)
2841
2864
2842 ifh = destrevlog.opener(
2865 ifh = destrevlog.opener(
2843 destrevlog.indexfile, b'a+', checkambig=False
2866 destrevlog.indexfile, b'a+', checkambig=False
2844 )
2867 )
2845 dfh = None
2868 dfh = None
2846 if not destrevlog._inline:
2869 if not destrevlog._inline:
2847 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2870 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2848 try:
2871 try:
2849 destrevlog._addrevision(
2872 destrevlog._addrevision(
2850 node,
2873 node,
2851 rawtext,
2874 rawtext,
2852 tr,
2875 tr,
2853 linkrev,
2876 linkrev,
2854 p1,
2877 p1,
2855 p2,
2878 p2,
2856 flags,
2879 flags,
2857 cachedelta,
2880 cachedelta,
2858 ifh,
2881 ifh,
2859 dfh,
2882 dfh,
2860 deltacomputer=deltacomputer,
2883 deltacomputer=deltacomputer,
2861 )
2884 )
2862 finally:
2885 finally:
2863 if dfh:
2886 if dfh:
2864 dfh.close()
2887 dfh.close()
2865 ifh.close()
2888 ifh.close()
2866
2889
2867 if addrevisioncb:
2890 if addrevisioncb:
2868 addrevisioncb(self, rev, node)
2891 addrevisioncb(self, rev, node)
2869
2892
2870 def censorrevision(self, tr, censornode, tombstone=b''):
2893 def censorrevision(self, tr, censornode, tombstone=b''):
2871 if (self.version & 0xFFFF) == REVLOGV0:
2894 if (self.version & 0xFFFF) == REVLOGV0:
2872 raise error.RevlogError(
2895 raise error.RevlogError(
2873 _(b'cannot censor with version %d revlogs') % self.version
2896 _(b'cannot censor with version %d revlogs') % self.version
2874 )
2897 )
2875
2898
2876 censorrev = self.rev(censornode)
2899 censorrev = self.rev(censornode)
2877 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2900 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2878
2901
2879 if len(tombstone) > self.rawsize(censorrev):
2902 if len(tombstone) > self.rawsize(censorrev):
2880 raise error.Abort(
2903 raise error.Abort(
2881 _(b'censor tombstone must be no longer than censored data')
2904 _(b'censor tombstone must be no longer than censored data')
2882 )
2905 )
2883
2906
2884 # Rewriting the revlog in place is hard. Our strategy for censoring is
2907 # Rewriting the revlog in place is hard. Our strategy for censoring is
2885 # to create a new revlog, copy all revisions to it, then replace the
2908 # to create a new revlog, copy all revisions to it, then replace the
2886 # revlogs on transaction close.
2909 # revlogs on transaction close.
2887
2910
2888 newindexfile = self.indexfile + b'.tmpcensored'
2911 newindexfile = self.indexfile + b'.tmpcensored'
2889 newdatafile = self.datafile + b'.tmpcensored'
2912 newdatafile = self.datafile + b'.tmpcensored'
2890
2913
2891 # This is a bit dangerous. We could easily have a mismatch of state.
2914 # This is a bit dangerous. We could easily have a mismatch of state.
2892 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2915 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2893 newrl.version = self.version
2916 newrl.version = self.version
2894 newrl._generaldelta = self._generaldelta
2917 newrl._generaldelta = self._generaldelta
2895 newrl._io = self._io
2918 newrl._io = self._io
2896
2919
2897 for rev in self.revs():
2920 for rev in self.revs():
2898 node = self.node(rev)
2921 node = self.node(rev)
2899 p1, p2 = self.parents(node)
2922 p1, p2 = self.parents(node)
2900
2923
2901 if rev == censorrev:
2924 if rev == censorrev:
2902 newrl.addrawrevision(
2925 newrl.addrawrevision(
2903 tombstone,
2926 tombstone,
2904 tr,
2927 tr,
2905 self.linkrev(censorrev),
2928 self.linkrev(censorrev),
2906 p1,
2929 p1,
2907 p2,
2930 p2,
2908 censornode,
2931 censornode,
2909 REVIDX_ISCENSORED,
2932 REVIDX_ISCENSORED,
2910 )
2933 )
2911
2934
2912 if newrl.deltaparent(rev) != nullrev:
2935 if newrl.deltaparent(rev) != nullrev:
2913 raise error.Abort(
2936 raise error.Abort(
2914 _(
2937 _(
2915 b'censored revision stored as delta; '
2938 b'censored revision stored as delta; '
2916 b'cannot censor'
2939 b'cannot censor'
2917 ),
2940 ),
2918 hint=_(
2941 hint=_(
2919 b'censoring of revlogs is not '
2942 b'censoring of revlogs is not '
2920 b'fully implemented; please report '
2943 b'fully implemented; please report '
2921 b'this bug'
2944 b'this bug'
2922 ),
2945 ),
2923 )
2946 )
2924 continue
2947 continue
2925
2948
2926 if self.iscensored(rev):
2949 if self.iscensored(rev):
2927 if self.deltaparent(rev) != nullrev:
2950 if self.deltaparent(rev) != nullrev:
2928 raise error.Abort(
2951 raise error.Abort(
2929 _(
2952 _(
2930 b'cannot censor due to censored '
2953 b'cannot censor due to censored '
2931 b'revision having delta stored'
2954 b'revision having delta stored'
2932 )
2955 )
2933 )
2956 )
2934 rawtext = self._chunk(rev)
2957 rawtext = self._chunk(rev)
2935 else:
2958 else:
2936 rawtext = self.rawdata(rev)
2959 rawtext = self.rawdata(rev)
2937
2960
2938 newrl.addrawrevision(
2961 newrl.addrawrevision(
2939 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2962 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2940 )
2963 )
2941
2964
2942 tr.addbackup(self.indexfile, location=b'store')
2965 tr.addbackup(self.indexfile, location=b'store')
2943 if not self._inline:
2966 if not self._inline:
2944 tr.addbackup(self.datafile, location=b'store')
2967 tr.addbackup(self.datafile, location=b'store')
2945
2968
2946 self.opener.rename(newrl.indexfile, self.indexfile)
2969 self.opener.rename(newrl.indexfile, self.indexfile)
2947 if not self._inline:
2970 if not self._inline:
2948 self.opener.rename(newrl.datafile, self.datafile)
2971 self.opener.rename(newrl.datafile, self.datafile)
2949
2972
2950 self.clearcaches()
2973 self.clearcaches()
2951 self._loadindex()
2974 self._loadindex()
2952
2975
2953 def verifyintegrity(self, state):
2976 def verifyintegrity(self, state):
2954 """Verifies the integrity of the revlog.
2977 """Verifies the integrity of the revlog.
2955
2978
2956 Yields ``revlogproblem`` instances describing problems that are
2979 Yields ``revlogproblem`` instances describing problems that are
2957 found.
2980 found.
2958 """
2981 """
2959 dd, di = self.checksize()
2982 dd, di = self.checksize()
2960 if dd:
2983 if dd:
2961 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2984 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2962 if di:
2985 if di:
2963 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2986 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2964
2987
2965 version = self.version & 0xFFFF
2988 version = self.version & 0xFFFF
2966
2989
2967 # The verifier tells us what version revlog we should be.
2990 # The verifier tells us what version revlog we should be.
2968 if version != state[b'expectedversion']:
2991 if version != state[b'expectedversion']:
2969 yield revlogproblem(
2992 yield revlogproblem(
2970 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2993 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2971 % (self.indexfile, version, state[b'expectedversion'])
2994 % (self.indexfile, version, state[b'expectedversion'])
2972 )
2995 )
2973
2996
2974 state[b'skipread'] = set()
2997 state[b'skipread'] = set()
2975 state[b'safe_renamed'] = set()
2998 state[b'safe_renamed'] = set()
2976
2999
2977 for rev in self:
3000 for rev in self:
2978 node = self.node(rev)
3001 node = self.node(rev)
2979
3002
2980 # Verify contents. 4 cases to care about:
3003 # Verify contents. 4 cases to care about:
2981 #
3004 #
2982 # common: the most common case
3005 # common: the most common case
2983 # rename: with a rename
3006 # rename: with a rename
2984 # meta: file content starts with b'\1\n', the metadata
3007 # meta: file content starts with b'\1\n', the metadata
2985 # header defined in filelog.py, but without a rename
3008 # header defined in filelog.py, but without a rename
2986 # ext: content stored externally
3009 # ext: content stored externally
2987 #
3010 #
2988 # More formally, their differences are shown below:
3011 # More formally, their differences are shown below:
2989 #
3012 #
2990 # | common | rename | meta | ext
3013 # | common | rename | meta | ext
2991 # -------------------------------------------------------
3014 # -------------------------------------------------------
2992 # flags() | 0 | 0 | 0 | not 0
3015 # flags() | 0 | 0 | 0 | not 0
2993 # renamed() | False | True | False | ?
3016 # renamed() | False | True | False | ?
2994 # rawtext[0:2]=='\1\n'| False | True | True | ?
3017 # rawtext[0:2]=='\1\n'| False | True | True | ?
2995 #
3018 #
2996 # "rawtext" means the raw text stored in revlog data, which
3019 # "rawtext" means the raw text stored in revlog data, which
2997 # could be retrieved by "rawdata(rev)". "text"
3020 # could be retrieved by "rawdata(rev)". "text"
2998 # mentioned below is "revision(rev)".
3021 # mentioned below is "revision(rev)".
2999 #
3022 #
3000 # There are 3 different lengths stored physically:
3023 # There are 3 different lengths stored physically:
3001 # 1. L1: rawsize, stored in revlog index
3024 # 1. L1: rawsize, stored in revlog index
3002 # 2. L2: len(rawtext), stored in revlog data
3025 # 2. L2: len(rawtext), stored in revlog data
3003 # 3. L3: len(text), stored in revlog data if flags==0, or
3026 # 3. L3: len(text), stored in revlog data if flags==0, or
3004 # possibly somewhere else if flags!=0
3027 # possibly somewhere else if flags!=0
3005 #
3028 #
3006 # L1 should be equal to L2. L3 could be different from them.
3029 # L1 should be equal to L2. L3 could be different from them.
3007 # "text" may or may not affect commit hash depending on flag
3030 # "text" may or may not affect commit hash depending on flag
3008 # processors (see flagutil.addflagprocessor).
3031 # processors (see flagutil.addflagprocessor).
3009 #
3032 #
3010 # | common | rename | meta | ext
3033 # | common | rename | meta | ext
3011 # -------------------------------------------------
3034 # -------------------------------------------------
3012 # rawsize() | L1 | L1 | L1 | L1
3035 # rawsize() | L1 | L1 | L1 | L1
3013 # size() | L1 | L2-LM | L1(*) | L1 (?)
3036 # size() | L1 | L2-LM | L1(*) | L1 (?)
3014 # len(rawtext) | L2 | L2 | L2 | L2
3037 # len(rawtext) | L2 | L2 | L2 | L2
3015 # len(text) | L2 | L2 | L2 | L3
3038 # len(text) | L2 | L2 | L2 | L3
3016 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3039 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3017 #
3040 #
3018 # LM: length of metadata, depending on rawtext
3041 # LM: length of metadata, depending on rawtext
3019 # (*): not ideal, see comment in filelog.size
3042 # (*): not ideal, see comment in filelog.size
3020 # (?): could be "- len(meta)" if the resolved content has
3043 # (?): could be "- len(meta)" if the resolved content has
3021 # rename metadata
3044 # rename metadata
3022 #
3045 #
3023 # Checks needed to be done:
3046 # Checks needed to be done:
3024 # 1. length check: L1 == L2, in all cases.
3047 # 1. length check: L1 == L2, in all cases.
3025 # 2. hash check: depending on flag processor, we may need to
3048 # 2. hash check: depending on flag processor, we may need to
3026 # use either "text" (external), or "rawtext" (in revlog).
3049 # use either "text" (external), or "rawtext" (in revlog).
3027
3050
3028 try:
3051 try:
3029 skipflags = state.get(b'skipflags', 0)
3052 skipflags = state.get(b'skipflags', 0)
3030 if skipflags:
3053 if skipflags:
3031 skipflags &= self.flags(rev)
3054 skipflags &= self.flags(rev)
3032
3055
3033 _verify_revision(self, skipflags, state, node)
3056 _verify_revision(self, skipflags, state, node)
3034
3057
3035 l1 = self.rawsize(rev)
3058 l1 = self.rawsize(rev)
3036 l2 = len(self.rawdata(node))
3059 l2 = len(self.rawdata(node))
3037
3060
3038 if l1 != l2:
3061 if l1 != l2:
3039 yield revlogproblem(
3062 yield revlogproblem(
3040 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3063 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3041 node=node,
3064 node=node,
3042 )
3065 )
3043
3066
3044 except error.CensoredNodeError:
3067 except error.CensoredNodeError:
3045 if state[b'erroroncensored']:
3068 if state[b'erroroncensored']:
3046 yield revlogproblem(
3069 yield revlogproblem(
3047 error=_(b'censored file data'), node=node
3070 error=_(b'censored file data'), node=node
3048 )
3071 )
3049 state[b'skipread'].add(node)
3072 state[b'skipread'].add(node)
3050 except Exception as e:
3073 except Exception as e:
3051 yield revlogproblem(
3074 yield revlogproblem(
3052 error=_(b'unpacking %s: %s')
3075 error=_(b'unpacking %s: %s')
3053 % (short(node), stringutil.forcebytestr(e)),
3076 % (short(node), stringutil.forcebytestr(e)),
3054 node=node,
3077 node=node,
3055 )
3078 )
3056 state[b'skipread'].add(node)
3079 state[b'skipread'].add(node)
3057
3080
3058 def storageinfo(
3081 def storageinfo(
3059 self,
3082 self,
3060 exclusivefiles=False,
3083 exclusivefiles=False,
3061 sharedfiles=False,
3084 sharedfiles=False,
3062 revisionscount=False,
3085 revisionscount=False,
3063 trackedsize=False,
3086 trackedsize=False,
3064 storedsize=False,
3087 storedsize=False,
3065 ):
3088 ):
3066 d = {}
3089 d = {}
3067
3090
3068 if exclusivefiles:
3091 if exclusivefiles:
3069 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
3092 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
3070 if not self._inline:
3093 if not self._inline:
3071 d[b'exclusivefiles'].append((self.opener, self.datafile))
3094 d[b'exclusivefiles'].append((self.opener, self.datafile))
3072
3095
3073 if sharedfiles:
3096 if sharedfiles:
3074 d[b'sharedfiles'] = []
3097 d[b'sharedfiles'] = []
3075
3098
3076 if revisionscount:
3099 if revisionscount:
3077 d[b'revisionscount'] = len(self)
3100 d[b'revisionscount'] = len(self)
3078
3101
3079 if trackedsize:
3102 if trackedsize:
3080 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3103 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3081
3104
3082 if storedsize:
3105 if storedsize:
3083 d[b'storedsize'] = sum(
3106 d[b'storedsize'] = sum(
3084 self.opener.stat(path).st_size for path in self.files()
3107 self.opener.stat(path).st_size for path in self.files()
3085 )
3108 )
3086
3109
3087 return d
3110 return d
@@ -1,750 +1,754 b''
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .pycompat import getattr
16 from .pycompat import getattr
17 from .node import hex
17 from .node import hex
18 from . import (
18 from . import (
19 changelog,
19 changelog,
20 error,
20 error,
21 manifest,
21 manifest,
22 policy,
22 policy,
23 pycompat,
23 pycompat,
24 util,
24 util,
25 vfs as vfsmod,
25 vfs as vfsmod,
26 )
26 )
27 from .utils import hashutil
27 from .utils import hashutil
28
28
29 parsers = policy.importmod('parsers')
29 parsers = policy.importmod('parsers')
30 # how much bytes should be read from fncache in one read
30 # how much bytes should be read from fncache in one read
31 # It is done to prevent loading large fncache files into memory
31 # It is done to prevent loading large fncache files into memory
32 fncache_chunksize = 10 ** 6
32 fncache_chunksize = 10 ** 6
33
33
34
34
35 def _matchtrackedpath(path, matcher):
35 def _matchtrackedpath(path, matcher):
36 """parses a fncache entry and returns whether the entry is tracking a path
36 """parses a fncache entry and returns whether the entry is tracking a path
37 matched by matcher or not.
37 matched by matcher or not.
38
38
39 If matcher is None, returns True"""
39 If matcher is None, returns True"""
40
40
41 if matcher is None:
41 if matcher is None:
42 return True
42 return True
43 path = decodedir(path)
43 path = decodedir(path)
44 if path.startswith(b'data/'):
44 if path.startswith(b'data/'):
45 return matcher(path[len(b'data/') : -len(b'.i')])
45 return matcher(path[len(b'data/') : -len(b'.i')])
46 elif path.startswith(b'meta/'):
46 elif path.startswith(b'meta/'):
47 return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')])
47 return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')])
48
48
49 raise error.ProgrammingError(b"cannot decode path %s" % path)
49 raise error.ProgrammingError(b"cannot decode path %s" % path)
50
50
51
51
52 # This avoids a collision between a file named foo and a dir named
52 # This avoids a collision between a file named foo and a dir named
53 # foo.i or foo.d
53 # foo.i or foo.d
54 def _encodedir(path):
54 def _encodedir(path):
55 """
55 """
56 >>> _encodedir(b'data/foo.i')
56 >>> _encodedir(b'data/foo.i')
57 'data/foo.i'
57 'data/foo.i'
58 >>> _encodedir(b'data/foo.i/bla.i')
58 >>> _encodedir(b'data/foo.i/bla.i')
59 'data/foo.i.hg/bla.i'
59 'data/foo.i.hg/bla.i'
60 >>> _encodedir(b'data/foo.i.hg/bla.i')
60 >>> _encodedir(b'data/foo.i.hg/bla.i')
61 'data/foo.i.hg.hg/bla.i'
61 'data/foo.i.hg.hg/bla.i'
62 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
62 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
63 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
63 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
64 """
64 """
65 return (
65 return (
66 path.replace(b".hg/", b".hg.hg/")
66 path.replace(b".hg/", b".hg.hg/")
67 .replace(b".i/", b".i.hg/")
67 .replace(b".i/", b".i.hg/")
68 .replace(b".d/", b".d.hg/")
68 .replace(b".d/", b".d.hg/")
69 )
69 )
70
70
71
71
72 encodedir = getattr(parsers, 'encodedir', _encodedir)
72 encodedir = getattr(parsers, 'encodedir', _encodedir)
73
73
74
74
75 def decodedir(path):
75 def decodedir(path):
76 """
76 """
77 >>> decodedir(b'data/foo.i')
77 >>> decodedir(b'data/foo.i')
78 'data/foo.i'
78 'data/foo.i'
79 >>> decodedir(b'data/foo.i.hg/bla.i')
79 >>> decodedir(b'data/foo.i.hg/bla.i')
80 'data/foo.i/bla.i'
80 'data/foo.i/bla.i'
81 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
81 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
82 'data/foo.i.hg/bla.i'
82 'data/foo.i.hg/bla.i'
83 """
83 """
84 if b".hg/" not in path:
84 if b".hg/" not in path:
85 return path
85 return path
86 return (
86 return (
87 path.replace(b".d.hg/", b".d/")
87 path.replace(b".d.hg/", b".d/")
88 .replace(b".i.hg/", b".i/")
88 .replace(b".i.hg/", b".i/")
89 .replace(b".hg.hg/", b".hg/")
89 .replace(b".hg.hg/", b".hg/")
90 )
90 )
91
91
92
92
93 def _reserved():
93 def _reserved():
94 """characters that are problematic for filesystems
94 """characters that are problematic for filesystems
95
95
96 * ascii escapes (0..31)
96 * ascii escapes (0..31)
97 * ascii hi (126..255)
97 * ascii hi (126..255)
98 * windows specials
98 * windows specials
99
99
100 these characters will be escaped by encodefunctions
100 these characters will be escaped by encodefunctions
101 """
101 """
102 winreserved = [ord(x) for x in u'\\:*?"<>|']
102 winreserved = [ord(x) for x in u'\\:*?"<>|']
103 for x in range(32):
103 for x in range(32):
104 yield x
104 yield x
105 for x in range(126, 256):
105 for x in range(126, 256):
106 yield x
106 yield x
107 for x in winreserved:
107 for x in winreserved:
108 yield x
108 yield x
109
109
110
110
111 def _buildencodefun():
111 def _buildencodefun():
112 """
112 """
113 >>> enc, dec = _buildencodefun()
113 >>> enc, dec = _buildencodefun()
114
114
115 >>> enc(b'nothing/special.txt')
115 >>> enc(b'nothing/special.txt')
116 'nothing/special.txt'
116 'nothing/special.txt'
117 >>> dec(b'nothing/special.txt')
117 >>> dec(b'nothing/special.txt')
118 'nothing/special.txt'
118 'nothing/special.txt'
119
119
120 >>> enc(b'HELLO')
120 >>> enc(b'HELLO')
121 '_h_e_l_l_o'
121 '_h_e_l_l_o'
122 >>> dec(b'_h_e_l_l_o')
122 >>> dec(b'_h_e_l_l_o')
123 'HELLO'
123 'HELLO'
124
124
125 >>> enc(b'hello:world?')
125 >>> enc(b'hello:world?')
126 'hello~3aworld~3f'
126 'hello~3aworld~3f'
127 >>> dec(b'hello~3aworld~3f')
127 >>> dec(b'hello~3aworld~3f')
128 'hello:world?'
128 'hello:world?'
129
129
130 >>> enc(b'the\\x07quick\\xADshot')
130 >>> enc(b'the\\x07quick\\xADshot')
131 'the~07quick~adshot'
131 'the~07quick~adshot'
132 >>> dec(b'the~07quick~adshot')
132 >>> dec(b'the~07quick~adshot')
133 'the\\x07quick\\xadshot'
133 'the\\x07quick\\xadshot'
134 """
134 """
135 e = b'_'
135 e = b'_'
136 xchr = pycompat.bytechr
136 xchr = pycompat.bytechr
137 asciistr = list(map(xchr, range(127)))
137 asciistr = list(map(xchr, range(127)))
138 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
138 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
139
139
140 cmap = {x: x for x in asciistr}
140 cmap = {x: x for x in asciistr}
141 for x in _reserved():
141 for x in _reserved():
142 cmap[xchr(x)] = b"~%02x" % x
142 cmap[xchr(x)] = b"~%02x" % x
143 for x in capitals + [ord(e)]:
143 for x in capitals + [ord(e)]:
144 cmap[xchr(x)] = e + xchr(x).lower()
144 cmap[xchr(x)] = e + xchr(x).lower()
145
145
146 dmap = {}
146 dmap = {}
147 for k, v in pycompat.iteritems(cmap):
147 for k, v in pycompat.iteritems(cmap):
148 dmap[v] = k
148 dmap[v] = k
149
149
150 def decode(s):
150 def decode(s):
151 i = 0
151 i = 0
152 while i < len(s):
152 while i < len(s):
153 for l in pycompat.xrange(1, 4):
153 for l in pycompat.xrange(1, 4):
154 try:
154 try:
155 yield dmap[s[i : i + l]]
155 yield dmap[s[i : i + l]]
156 i += l
156 i += l
157 break
157 break
158 except KeyError:
158 except KeyError:
159 pass
159 pass
160 else:
160 else:
161 raise KeyError
161 raise KeyError
162
162
163 return (
163 return (
164 lambda s: b''.join(
164 lambda s: b''.join(
165 [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))]
165 [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))]
166 ),
166 ),
167 lambda s: b''.join(list(decode(s))),
167 lambda s: b''.join(list(decode(s))),
168 )
168 )
169
169
170
170
171 _encodefname, _decodefname = _buildencodefun()
171 _encodefname, _decodefname = _buildencodefun()
172
172
173
173
174 def encodefilename(s):
174 def encodefilename(s):
175 """
175 """
176 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
176 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
177 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
177 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
178 """
178 """
179 return _encodefname(encodedir(s))
179 return _encodefname(encodedir(s))
180
180
181
181
182 def decodefilename(s):
182 def decodefilename(s):
183 """
183 """
184 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
184 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
185 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
185 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
186 """
186 """
187 return decodedir(_decodefname(s))
187 return decodedir(_decodefname(s))
188
188
189
189
190 def _buildlowerencodefun():
190 def _buildlowerencodefun():
191 """
191 """
192 >>> f = _buildlowerencodefun()
192 >>> f = _buildlowerencodefun()
193 >>> f(b'nothing/special.txt')
193 >>> f(b'nothing/special.txt')
194 'nothing/special.txt'
194 'nothing/special.txt'
195 >>> f(b'HELLO')
195 >>> f(b'HELLO')
196 'hello'
196 'hello'
197 >>> f(b'hello:world?')
197 >>> f(b'hello:world?')
198 'hello~3aworld~3f'
198 'hello~3aworld~3f'
199 >>> f(b'the\\x07quick\\xADshot')
199 >>> f(b'the\\x07quick\\xADshot')
200 'the~07quick~adshot'
200 'the~07quick~adshot'
201 """
201 """
202 xchr = pycompat.bytechr
202 xchr = pycompat.bytechr
203 cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)}
203 cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)}
204 for x in _reserved():
204 for x in _reserved():
205 cmap[xchr(x)] = b"~%02x" % x
205 cmap[xchr(x)] = b"~%02x" % x
206 for x in range(ord(b"A"), ord(b"Z") + 1):
206 for x in range(ord(b"A"), ord(b"Z") + 1):
207 cmap[xchr(x)] = xchr(x).lower()
207 cmap[xchr(x)] = xchr(x).lower()
208
208
209 def lowerencode(s):
209 def lowerencode(s):
210 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
210 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
211
211
212 return lowerencode
212 return lowerencode
213
213
214
214
215 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
215 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
216
216
217 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
217 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
218 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
218 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
219 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
219 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
220
220
221
221
222 def _auxencode(path, dotencode):
222 def _auxencode(path, dotencode):
223 """
223 """
224 Encodes filenames containing names reserved by Windows or which end in
224 Encodes filenames containing names reserved by Windows or which end in
225 period or space. Does not touch other single reserved characters c.
225 period or space. Does not touch other single reserved characters c.
226 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
226 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
227 Additionally encodes space or period at the beginning, if dotencode is
227 Additionally encodes space or period at the beginning, if dotencode is
228 True. Parameter path is assumed to be all lowercase.
228 True. Parameter path is assumed to be all lowercase.
229 A segment only needs encoding if a reserved name appears as a
229 A segment only needs encoding if a reserved name appears as a
230 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
230 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
231 doesn't need encoding.
231 doesn't need encoding.
232
232
233 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
233 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
234 >>> _auxencode(s.split(b'/'), True)
234 >>> _auxencode(s.split(b'/'), True)
235 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
235 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
236 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
236 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
237 >>> _auxencode(s.split(b'/'), False)
237 >>> _auxencode(s.split(b'/'), False)
238 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
238 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
239 >>> _auxencode([b'foo. '], True)
239 >>> _auxencode([b'foo. '], True)
240 ['foo.~20']
240 ['foo.~20']
241 >>> _auxencode([b' .foo'], True)
241 >>> _auxencode([b' .foo'], True)
242 ['~20.foo']
242 ['~20.foo']
243 """
243 """
244 for i, n in enumerate(path):
244 for i, n in enumerate(path):
245 if not n:
245 if not n:
246 continue
246 continue
247 if dotencode and n[0] in b'. ':
247 if dotencode and n[0] in b'. ':
248 n = b"~%02x" % ord(n[0:1]) + n[1:]
248 n = b"~%02x" % ord(n[0:1]) + n[1:]
249 path[i] = n
249 path[i] = n
250 else:
250 else:
251 l = n.find(b'.')
251 l = n.find(b'.')
252 if l == -1:
252 if l == -1:
253 l = len(n)
253 l = len(n)
254 if (l == 3 and n[:3] in _winres3) or (
254 if (l == 3 and n[:3] in _winres3) or (
255 l == 4
255 l == 4
256 and n[3:4] <= b'9'
256 and n[3:4] <= b'9'
257 and n[3:4] >= b'1'
257 and n[3:4] >= b'1'
258 and n[:3] in _winres4
258 and n[:3] in _winres4
259 ):
259 ):
260 # encode third letter ('aux' -> 'au~78')
260 # encode third letter ('aux' -> 'au~78')
261 ec = b"~%02x" % ord(n[2:3])
261 ec = b"~%02x" % ord(n[2:3])
262 n = n[0:2] + ec + n[3:]
262 n = n[0:2] + ec + n[3:]
263 path[i] = n
263 path[i] = n
264 if n[-1] in b'. ':
264 if n[-1] in b'. ':
265 # encode last period or space ('foo...' -> 'foo..~2e')
265 # encode last period or space ('foo...' -> 'foo..~2e')
266 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
266 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
267 return path
267 return path
268
268
269
269
270 _maxstorepathlen = 120
270 _maxstorepathlen = 120
271 _dirprefixlen = 8
271 _dirprefixlen = 8
272 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
272 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
273
273
274
274
275 def _hashencode(path, dotencode):
275 def _hashencode(path, dotencode):
276 digest = hex(hashutil.sha1(path).digest())
276 digest = hex(hashutil.sha1(path).digest())
277 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
277 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
278 parts = _auxencode(le, dotencode)
278 parts = _auxencode(le, dotencode)
279 basename = parts[-1]
279 basename = parts[-1]
280 _root, ext = os.path.splitext(basename)
280 _root, ext = os.path.splitext(basename)
281 sdirs = []
281 sdirs = []
282 sdirslen = 0
282 sdirslen = 0
283 for p in parts[:-1]:
283 for p in parts[:-1]:
284 d = p[:_dirprefixlen]
284 d = p[:_dirprefixlen]
285 if d[-1] in b'. ':
285 if d[-1] in b'. ':
286 # Windows can't access dirs ending in period or space
286 # Windows can't access dirs ending in period or space
287 d = d[:-1] + b'_'
287 d = d[:-1] + b'_'
288 if sdirslen == 0:
288 if sdirslen == 0:
289 t = len(d)
289 t = len(d)
290 else:
290 else:
291 t = sdirslen + 1 + len(d)
291 t = sdirslen + 1 + len(d)
292 if t > _maxshortdirslen:
292 if t > _maxshortdirslen:
293 break
293 break
294 sdirs.append(d)
294 sdirs.append(d)
295 sdirslen = t
295 sdirslen = t
296 dirs = b'/'.join(sdirs)
296 dirs = b'/'.join(sdirs)
297 if len(dirs) > 0:
297 if len(dirs) > 0:
298 dirs += b'/'
298 dirs += b'/'
299 res = b'dh/' + dirs + digest + ext
299 res = b'dh/' + dirs + digest + ext
300 spaceleft = _maxstorepathlen - len(res)
300 spaceleft = _maxstorepathlen - len(res)
301 if spaceleft > 0:
301 if spaceleft > 0:
302 filler = basename[:spaceleft]
302 filler = basename[:spaceleft]
303 res = b'dh/' + dirs + filler + digest + ext
303 res = b'dh/' + dirs + filler + digest + ext
304 return res
304 return res
305
305
306
306
307 def _hybridencode(path, dotencode):
307 def _hybridencode(path, dotencode):
308 """encodes path with a length limit
308 """encodes path with a length limit
309
309
310 Encodes all paths that begin with 'data/', according to the following.
310 Encodes all paths that begin with 'data/', according to the following.
311
311
312 Default encoding (reversible):
312 Default encoding (reversible):
313
313
314 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
314 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
315 characters are encoded as '~xx', where xx is the two digit hex code
315 characters are encoded as '~xx', where xx is the two digit hex code
316 of the character (see encodefilename).
316 of the character (see encodefilename).
317 Relevant path components consisting of Windows reserved filenames are
317 Relevant path components consisting of Windows reserved filenames are
318 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
318 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
319
319
320 Hashed encoding (not reversible):
320 Hashed encoding (not reversible):
321
321
322 If the default-encoded path is longer than _maxstorepathlen, a
322 If the default-encoded path is longer than _maxstorepathlen, a
323 non-reversible hybrid hashing of the path is done instead.
323 non-reversible hybrid hashing of the path is done instead.
324 This encoding uses up to _dirprefixlen characters of all directory
324 This encoding uses up to _dirprefixlen characters of all directory
325 levels of the lowerencoded path, but not more levels than can fit into
325 levels of the lowerencoded path, but not more levels than can fit into
326 _maxshortdirslen.
326 _maxshortdirslen.
327 Then follows the filler followed by the sha digest of the full path.
327 Then follows the filler followed by the sha digest of the full path.
328 The filler is the beginning of the basename of the lowerencoded path
328 The filler is the beginning of the basename of the lowerencoded path
329 (the basename is everything after the last path separator). The filler
329 (the basename is everything after the last path separator). The filler
330 is as long as possible, filling in characters from the basename until
330 is as long as possible, filling in characters from the basename until
331 the encoded path has _maxstorepathlen characters (or all chars of the
331 the encoded path has _maxstorepathlen characters (or all chars of the
332 basename have been taken).
332 basename have been taken).
333 The extension (e.g. '.i' or '.d') is preserved.
333 The extension (e.g. '.i' or '.d') is preserved.
334
334
335 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
335 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
336 encoding was used.
336 encoding was used.
337 """
337 """
338 path = encodedir(path)
338 path = encodedir(path)
339 ef = _encodefname(path).split(b'/')
339 ef = _encodefname(path).split(b'/')
340 res = b'/'.join(_auxencode(ef, dotencode))
340 res = b'/'.join(_auxencode(ef, dotencode))
341 if len(res) > _maxstorepathlen:
341 if len(res) > _maxstorepathlen:
342 res = _hashencode(path, dotencode)
342 res = _hashencode(path, dotencode)
343 return res
343 return res
344
344
345
345
346 def _pathencode(path):
346 def _pathencode(path):
347 de = encodedir(path)
347 de = encodedir(path)
348 if len(path) > _maxstorepathlen:
348 if len(path) > _maxstorepathlen:
349 return _hashencode(de, True)
349 return _hashencode(de, True)
350 ef = _encodefname(de).split(b'/')
350 ef = _encodefname(de).split(b'/')
351 res = b'/'.join(_auxencode(ef, True))
351 res = b'/'.join(_auxencode(ef, True))
352 if len(res) > _maxstorepathlen:
352 if len(res) > _maxstorepathlen:
353 return _hashencode(de, True)
353 return _hashencode(de, True)
354 return res
354 return res
355
355
356
356
357 _pathencode = getattr(parsers, 'pathencode', _pathencode)
357 _pathencode = getattr(parsers, 'pathencode', _pathencode)
358
358
359
359
360 def _plainhybridencode(f):
360 def _plainhybridencode(f):
361 return _hybridencode(f, False)
361 return _hybridencode(f, False)
362
362
363
363
364 def _calcmode(vfs):
364 def _calcmode(vfs):
365 try:
365 try:
366 # files in .hg/ will be created using this mode
366 # files in .hg/ will be created using this mode
367 mode = vfs.stat().st_mode
367 mode = vfs.stat().st_mode
368 # avoid some useless chmods
368 # avoid some useless chmods
369 if (0o777 & ~util.umask) == (0o777 & mode):
369 if (0o777 & ~util.umask) == (0o777 & mode):
370 mode = None
370 mode = None
371 except OSError:
371 except OSError:
372 mode = None
372 mode = None
373 return mode
373 return mode
374
374
375
375
376 _data = [
376 _data = [
377 b'bookmarks',
377 b'bookmarks',
378 b'narrowspec',
378 b'narrowspec',
379 b'data',
379 b'data',
380 b'meta',
380 b'meta',
381 b'00manifest.d',
381 b'00manifest.d',
382 b'00manifest.i',
382 b'00manifest.i',
383 b'00changelog.d',
383 b'00changelog.d',
384 b'00changelog.i',
384 b'00changelog.i',
385 b'phaseroots',
385 b'phaseroots',
386 b'obsstore',
386 b'obsstore',
387 b'requires',
387 b'requires',
388 ]
388 ]
389
389
390 REVLOG_FILES_EXT = (b'.i', b'.d', b'.n', b'.nd')
390 REVLOG_FILES_EXT = (b'.i', b'.d', b'.n', b'.nd')
391
391
392
392
393 def isrevlog(f, kind, st):
393 def isrevlog(f, kind, st):
394 if kind != stat.S_IFREG:
394 if kind != stat.S_IFREG:
395 return False
395 return False
396 return f.endswith(REVLOG_FILES_EXT)
396 return f.endswith(REVLOG_FILES_EXT)
397
397
398
398
399 class basicstore(object):
399 class basicstore(object):
400 '''base class for local repository stores'''
400 '''base class for local repository stores'''
401
401
402 def __init__(self, path, vfstype):
402 def __init__(self, path, vfstype):
403 vfs = vfstype(path)
403 vfs = vfstype(path)
404 self.path = vfs.base
404 self.path = vfs.base
405 self.createmode = _calcmode(vfs)
405 self.createmode = _calcmode(vfs)
406 vfs.createmode = self.createmode
406 vfs.createmode = self.createmode
407 self.rawvfs = vfs
407 self.rawvfs = vfs
408 self.vfs = vfsmod.filtervfs(vfs, encodedir)
408 self.vfs = vfsmod.filtervfs(vfs, encodedir)
409 self.opener = self.vfs
409 self.opener = self.vfs
410
410
411 def join(self, f):
411 def join(self, f):
412 return self.path + b'/' + encodedir(f)
412 return self.path + b'/' + encodedir(f)
413
413
414 def _walk(self, relpath, recurse, filefilter=isrevlog):
414 def _walk(self, relpath, recurse, filefilter=isrevlog):
415 '''yields (unencoded, encoded, size)'''
415 '''yields (unencoded, encoded, size)'''
416 path = self.path
416 path = self.path
417 if relpath:
417 if relpath:
418 path += b'/' + relpath
418 path += b'/' + relpath
419 striplen = len(self.path) + 1
419 striplen = len(self.path) + 1
420 l = []
420 l = []
421 if self.rawvfs.isdir(path):
421 if self.rawvfs.isdir(path):
422 visit = [path]
422 visit = [path]
423 readdir = self.rawvfs.readdir
423 readdir = self.rawvfs.readdir
424 while visit:
424 while visit:
425 p = visit.pop()
425 p = visit.pop()
426 for f, kind, st in readdir(p, stat=True):
426 for f, kind, st in readdir(p, stat=True):
427 fp = p + b'/' + f
427 fp = p + b'/' + f
428 if filefilter(f, kind, st):
428 if filefilter(f, kind, st):
429 n = util.pconvert(fp[striplen:])
429 n = util.pconvert(fp[striplen:])
430 l.append((decodedir(n), n, st.st_size))
430 l.append((decodedir(n), n, st.st_size))
431 elif kind == stat.S_IFDIR and recurse:
431 elif kind == stat.S_IFDIR and recurse:
432 visit.append(fp)
432 visit.append(fp)
433 l.sort()
433 l.sort()
434 return l
434 return l
435
435
436 def changelog(self, trypending):
436 def changelog(self, trypending, concurrencychecker=None):
437 return changelog.changelog(self.vfs, trypending=trypending)
437 return changelog.changelog(
438 self.vfs,
439 trypending=trypending,
440 concurrencychecker=concurrencychecker,
441 )
438
442
439 def manifestlog(self, repo, storenarrowmatch):
443 def manifestlog(self, repo, storenarrowmatch):
440 rootstore = manifest.manifestrevlog(self.vfs)
444 rootstore = manifest.manifestrevlog(self.vfs)
441 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
445 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
442
446
443 def datafiles(self, matcher=None):
447 def datafiles(self, matcher=None):
444 return self._walk(b'data', True) + self._walk(b'meta', True)
448 return self._walk(b'data', True) + self._walk(b'meta', True)
445
449
446 def topfiles(self):
450 def topfiles(self):
447 # yield manifest before changelog
451 # yield manifest before changelog
448 return reversed(self._walk(b'', False))
452 return reversed(self._walk(b'', False))
449
453
450 def walk(self, matcher=None):
454 def walk(self, matcher=None):
451 """yields (unencoded, encoded, size)
455 """yields (unencoded, encoded, size)
452
456
453 if a matcher is passed, storage files of only those tracked paths
457 if a matcher is passed, storage files of only those tracked paths
454 are passed with matches the matcher
458 are passed with matches the matcher
455 """
459 """
456 # yield data files first
460 # yield data files first
457 for x in self.datafiles(matcher):
461 for x in self.datafiles(matcher):
458 yield x
462 yield x
459 for x in self.topfiles():
463 for x in self.topfiles():
460 yield x
464 yield x
461
465
462 def copylist(self):
466 def copylist(self):
463 return _data
467 return _data
464
468
465 def write(self, tr):
469 def write(self, tr):
466 pass
470 pass
467
471
468 def invalidatecaches(self):
472 def invalidatecaches(self):
469 pass
473 pass
470
474
471 def markremoved(self, fn):
475 def markremoved(self, fn):
472 pass
476 pass
473
477
474 def __contains__(self, path):
478 def __contains__(self, path):
475 '''Checks if the store contains path'''
479 '''Checks if the store contains path'''
476 path = b"/".join((b"data", path))
480 path = b"/".join((b"data", path))
477 # file?
481 # file?
478 if self.vfs.exists(path + b".i"):
482 if self.vfs.exists(path + b".i"):
479 return True
483 return True
480 # dir?
484 # dir?
481 if not path.endswith(b"/"):
485 if not path.endswith(b"/"):
482 path = path + b"/"
486 path = path + b"/"
483 return self.vfs.exists(path)
487 return self.vfs.exists(path)
484
488
485
489
486 class encodedstore(basicstore):
490 class encodedstore(basicstore):
487 def __init__(self, path, vfstype):
491 def __init__(self, path, vfstype):
488 vfs = vfstype(path + b'/store')
492 vfs = vfstype(path + b'/store')
489 self.path = vfs.base
493 self.path = vfs.base
490 self.createmode = _calcmode(vfs)
494 self.createmode = _calcmode(vfs)
491 vfs.createmode = self.createmode
495 vfs.createmode = self.createmode
492 self.rawvfs = vfs
496 self.rawvfs = vfs
493 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
497 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
494 self.opener = self.vfs
498 self.opener = self.vfs
495
499
496 def datafiles(self, matcher=None):
500 def datafiles(self, matcher=None):
497 for a, b, size in super(encodedstore, self).datafiles():
501 for a, b, size in super(encodedstore, self).datafiles():
498 try:
502 try:
499 a = decodefilename(a)
503 a = decodefilename(a)
500 except KeyError:
504 except KeyError:
501 a = None
505 a = None
502 if a is not None and not _matchtrackedpath(a, matcher):
506 if a is not None and not _matchtrackedpath(a, matcher):
503 continue
507 continue
504 yield a, b, size
508 yield a, b, size
505
509
506 def join(self, f):
510 def join(self, f):
507 return self.path + b'/' + encodefilename(f)
511 return self.path + b'/' + encodefilename(f)
508
512
509 def copylist(self):
513 def copylist(self):
510 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
514 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
511
515
512
516
513 class fncache(object):
517 class fncache(object):
514 # the filename used to be partially encoded
518 # the filename used to be partially encoded
515 # hence the encodedir/decodedir dance
519 # hence the encodedir/decodedir dance
516 def __init__(self, vfs):
520 def __init__(self, vfs):
517 self.vfs = vfs
521 self.vfs = vfs
518 self.entries = None
522 self.entries = None
519 self._dirty = False
523 self._dirty = False
520 # set of new additions to fncache
524 # set of new additions to fncache
521 self.addls = set()
525 self.addls = set()
522
526
523 def ensureloaded(self, warn=None):
527 def ensureloaded(self, warn=None):
524 """read the fncache file if not already read.
528 """read the fncache file if not already read.
525
529
526 If the file on disk is corrupted, raise. If warn is provided,
530 If the file on disk is corrupted, raise. If warn is provided,
527 warn and keep going instead."""
531 warn and keep going instead."""
528 if self.entries is None:
532 if self.entries is None:
529 self._load(warn)
533 self._load(warn)
530
534
531 def _load(self, warn=None):
535 def _load(self, warn=None):
532 '''fill the entries from the fncache file'''
536 '''fill the entries from the fncache file'''
533 self._dirty = False
537 self._dirty = False
534 try:
538 try:
535 fp = self.vfs(b'fncache', mode=b'rb')
539 fp = self.vfs(b'fncache', mode=b'rb')
536 except IOError:
540 except IOError:
537 # skip nonexistent file
541 # skip nonexistent file
538 self.entries = set()
542 self.entries = set()
539 return
543 return
540
544
541 self.entries = set()
545 self.entries = set()
542 chunk = b''
546 chunk = b''
543 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
547 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
544 chunk += c
548 chunk += c
545 try:
549 try:
546 p = chunk.rindex(b'\n')
550 p = chunk.rindex(b'\n')
547 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
551 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
548 chunk = chunk[p + 1 :]
552 chunk = chunk[p + 1 :]
549 except ValueError:
553 except ValueError:
550 # substring '\n' not found, maybe the entry is bigger than the
554 # substring '\n' not found, maybe the entry is bigger than the
551 # chunksize, so let's keep iterating
555 # chunksize, so let's keep iterating
552 pass
556 pass
553
557
554 if chunk:
558 if chunk:
555 msg = _(b"fncache does not ends with a newline")
559 msg = _(b"fncache does not ends with a newline")
556 if warn:
560 if warn:
557 warn(msg + b'\n')
561 warn(msg + b'\n')
558 else:
562 else:
559 raise error.Abort(
563 raise error.Abort(
560 msg,
564 msg,
561 hint=_(
565 hint=_(
562 b"use 'hg debugrebuildfncache' to "
566 b"use 'hg debugrebuildfncache' to "
563 b"rebuild the fncache"
567 b"rebuild the fncache"
564 ),
568 ),
565 )
569 )
566 self._checkentries(fp, warn)
570 self._checkentries(fp, warn)
567 fp.close()
571 fp.close()
568
572
569 def _checkentries(self, fp, warn):
573 def _checkentries(self, fp, warn):
570 """ make sure there is no empty string in entries """
574 """ make sure there is no empty string in entries """
571 if b'' in self.entries:
575 if b'' in self.entries:
572 fp.seek(0)
576 fp.seek(0)
573 for n, line in enumerate(util.iterfile(fp)):
577 for n, line in enumerate(util.iterfile(fp)):
574 if not line.rstrip(b'\n'):
578 if not line.rstrip(b'\n'):
575 t = _(b'invalid entry in fncache, line %d') % (n + 1)
579 t = _(b'invalid entry in fncache, line %d') % (n + 1)
576 if warn:
580 if warn:
577 warn(t + b'\n')
581 warn(t + b'\n')
578 else:
582 else:
579 raise error.Abort(t)
583 raise error.Abort(t)
580
584
581 def write(self, tr):
585 def write(self, tr):
582 if self._dirty:
586 if self._dirty:
583 assert self.entries is not None
587 assert self.entries is not None
584 self.entries = self.entries | self.addls
588 self.entries = self.entries | self.addls
585 self.addls = set()
589 self.addls = set()
586 tr.addbackup(b'fncache')
590 tr.addbackup(b'fncache')
587 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
591 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
588 if self.entries:
592 if self.entries:
589 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
593 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
590 fp.close()
594 fp.close()
591 self._dirty = False
595 self._dirty = False
592 if self.addls:
596 if self.addls:
593 # if we have just new entries, let's append them to the fncache
597 # if we have just new entries, let's append them to the fncache
594 tr.addbackup(b'fncache')
598 tr.addbackup(b'fncache')
595 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
599 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
596 if self.addls:
600 if self.addls:
597 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
601 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
598 fp.close()
602 fp.close()
599 self.entries = None
603 self.entries = None
600 self.addls = set()
604 self.addls = set()
601
605
602 def add(self, fn):
606 def add(self, fn):
603 if self.entries is None:
607 if self.entries is None:
604 self._load()
608 self._load()
605 if fn not in self.entries:
609 if fn not in self.entries:
606 self.addls.add(fn)
610 self.addls.add(fn)
607
611
608 def remove(self, fn):
612 def remove(self, fn):
609 if self.entries is None:
613 if self.entries is None:
610 self._load()
614 self._load()
611 if fn in self.addls:
615 if fn in self.addls:
612 self.addls.remove(fn)
616 self.addls.remove(fn)
613 return
617 return
614 try:
618 try:
615 self.entries.remove(fn)
619 self.entries.remove(fn)
616 self._dirty = True
620 self._dirty = True
617 except KeyError:
621 except KeyError:
618 pass
622 pass
619
623
620 def __contains__(self, fn):
624 def __contains__(self, fn):
621 if fn in self.addls:
625 if fn in self.addls:
622 return True
626 return True
623 if self.entries is None:
627 if self.entries is None:
624 self._load()
628 self._load()
625 return fn in self.entries
629 return fn in self.entries
626
630
627 def __iter__(self):
631 def __iter__(self):
628 if self.entries is None:
632 if self.entries is None:
629 self._load()
633 self._load()
630 return iter(self.entries | self.addls)
634 return iter(self.entries | self.addls)
631
635
632
636
633 class _fncachevfs(vfsmod.proxyvfs):
637 class _fncachevfs(vfsmod.proxyvfs):
634 def __init__(self, vfs, fnc, encode):
638 def __init__(self, vfs, fnc, encode):
635 vfsmod.proxyvfs.__init__(self, vfs)
639 vfsmod.proxyvfs.__init__(self, vfs)
636 self.fncache = fnc
640 self.fncache = fnc
637 self.encode = encode
641 self.encode = encode
638
642
639 def __call__(self, path, mode=b'r', *args, **kw):
643 def __call__(self, path, mode=b'r', *args, **kw):
640 encoded = self.encode(path)
644 encoded = self.encode(path)
641 if mode not in (b'r', b'rb') and (
645 if mode not in (b'r', b'rb') and (
642 path.startswith(b'data/') or path.startswith(b'meta/')
646 path.startswith(b'data/') or path.startswith(b'meta/')
643 ):
647 ):
644 # do not trigger a fncache load when adding a file that already is
648 # do not trigger a fncache load when adding a file that already is
645 # known to exist.
649 # known to exist.
646 notload = self.fncache.entries is None and self.vfs.exists(encoded)
650 notload = self.fncache.entries is None and self.vfs.exists(encoded)
647 if notload and b'a' in mode and not self.vfs.stat(encoded).st_size:
651 if notload and b'a' in mode and not self.vfs.stat(encoded).st_size:
648 # when appending to an existing file, if the file has size zero,
652 # when appending to an existing file, if the file has size zero,
649 # it should be considered as missing. Such zero-size files are
653 # it should be considered as missing. Such zero-size files are
650 # the result of truncation when a transaction is aborted.
654 # the result of truncation when a transaction is aborted.
651 notload = False
655 notload = False
652 if not notload:
656 if not notload:
653 self.fncache.add(path)
657 self.fncache.add(path)
654 return self.vfs(encoded, mode, *args, **kw)
658 return self.vfs(encoded, mode, *args, **kw)
655
659
656 def join(self, path):
660 def join(self, path):
657 if path:
661 if path:
658 return self.vfs.join(self.encode(path))
662 return self.vfs.join(self.encode(path))
659 else:
663 else:
660 return self.vfs.join(path)
664 return self.vfs.join(path)
661
665
662
666
663 class fncachestore(basicstore):
667 class fncachestore(basicstore):
664 def __init__(self, path, vfstype, dotencode):
668 def __init__(self, path, vfstype, dotencode):
665 if dotencode:
669 if dotencode:
666 encode = _pathencode
670 encode = _pathencode
667 else:
671 else:
668 encode = _plainhybridencode
672 encode = _plainhybridencode
669 self.encode = encode
673 self.encode = encode
670 vfs = vfstype(path + b'/store')
674 vfs = vfstype(path + b'/store')
671 self.path = vfs.base
675 self.path = vfs.base
672 self.pathsep = self.path + b'/'
676 self.pathsep = self.path + b'/'
673 self.createmode = _calcmode(vfs)
677 self.createmode = _calcmode(vfs)
674 vfs.createmode = self.createmode
678 vfs.createmode = self.createmode
675 self.rawvfs = vfs
679 self.rawvfs = vfs
676 fnc = fncache(vfs)
680 fnc = fncache(vfs)
677 self.fncache = fnc
681 self.fncache = fnc
678 self.vfs = _fncachevfs(vfs, fnc, encode)
682 self.vfs = _fncachevfs(vfs, fnc, encode)
679 self.opener = self.vfs
683 self.opener = self.vfs
680
684
681 def join(self, f):
685 def join(self, f):
682 return self.pathsep + self.encode(f)
686 return self.pathsep + self.encode(f)
683
687
684 def getsize(self, path):
688 def getsize(self, path):
685 return self.rawvfs.stat(path).st_size
689 return self.rawvfs.stat(path).st_size
686
690
687 def datafiles(self, matcher=None):
691 def datafiles(self, matcher=None):
688 for f in sorted(self.fncache):
692 for f in sorted(self.fncache):
689 if not _matchtrackedpath(f, matcher):
693 if not _matchtrackedpath(f, matcher):
690 continue
694 continue
691 ef = self.encode(f)
695 ef = self.encode(f)
692 try:
696 try:
693 yield f, ef, self.getsize(ef)
697 yield f, ef, self.getsize(ef)
694 except OSError as err:
698 except OSError as err:
695 if err.errno != errno.ENOENT:
699 if err.errno != errno.ENOENT:
696 raise
700 raise
697
701
698 def copylist(self):
702 def copylist(self):
699 d = (
703 d = (
700 b'bookmarks',
704 b'bookmarks',
701 b'narrowspec',
705 b'narrowspec',
702 b'data',
706 b'data',
703 b'meta',
707 b'meta',
704 b'dh',
708 b'dh',
705 b'fncache',
709 b'fncache',
706 b'phaseroots',
710 b'phaseroots',
707 b'obsstore',
711 b'obsstore',
708 b'00manifest.d',
712 b'00manifest.d',
709 b'00manifest.i',
713 b'00manifest.i',
710 b'00changelog.d',
714 b'00changelog.d',
711 b'00changelog.i',
715 b'00changelog.i',
712 b'requires',
716 b'requires',
713 )
717 )
714 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
718 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
715
719
716 def write(self, tr):
720 def write(self, tr):
717 self.fncache.write(tr)
721 self.fncache.write(tr)
718
722
719 def invalidatecaches(self):
723 def invalidatecaches(self):
720 self.fncache.entries = None
724 self.fncache.entries = None
721 self.fncache.addls = set()
725 self.fncache.addls = set()
722
726
723 def markremoved(self, fn):
727 def markremoved(self, fn):
724 self.fncache.remove(fn)
728 self.fncache.remove(fn)
725
729
726 def _exists(self, f):
730 def _exists(self, f):
727 ef = self.encode(f)
731 ef = self.encode(f)
728 try:
732 try:
729 self.getsize(ef)
733 self.getsize(ef)
730 return True
734 return True
731 except OSError as err:
735 except OSError as err:
732 if err.errno != errno.ENOENT:
736 if err.errno != errno.ENOENT:
733 raise
737 raise
734 # nonexistent entry
738 # nonexistent entry
735 return False
739 return False
736
740
737 def __contains__(self, path):
741 def __contains__(self, path):
738 '''Checks if the store contains path'''
742 '''Checks if the store contains path'''
739 path = b"/".join((b"data", path))
743 path = b"/".join((b"data", path))
740 # check for files (exact match)
744 # check for files (exact match)
741 e = path + b'.i'
745 e = path + b'.i'
742 if e in self.fncache and self._exists(e):
746 if e in self.fncache and self._exists(e):
743 return True
747 return True
744 # now check for directories (prefix match)
748 # now check for directories (prefix match)
745 if not path.endswith(b'/'):
749 if not path.endswith(b'/'):
746 path += b'/'
750 path += b'/'
747 for e in self.fncache:
751 for e in self.fncache:
748 if e.startswith(path) and self._exists(e):
752 if e.startswith(path) and self._exists(e):
749 return True
753 return True
750 return False
754 return False
General Comments 0
You need to be logged in to leave comments. Login now