##// END OF EJS Templates
commitctx: extract the function in a dedicated module...
marmoute -
r45759:ae5c1a3b default
parent child Browse files
Show More
@@ -0,0 +1,215 b''
1 # commit.py - fonction to perform commit
2 #
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
5
6 from __future__ import absolute_import
7
8 import errno
9 import weakref
10
11 from .i18n import _
12 from .node import (
13 hex,
14 nullrev,
15 )
16
17 from . import (
18 metadata,
19 phases,
20 scmutil,
21 subrepoutil,
22 )
23
24
25 def commitctx(repo, ctx, error=False, origctx=None):
26 """Add a new revision to the target repository.
27 Revision information is passed via the context argument.
28
29 ctx.files() should list all files involved in this commit, i.e.
30 modified/added/removed files. On merge, it may be wider than the
31 ctx.files() to be committed, since any file nodes derived directly
32 from p1 or p2 are excluded from the committed ctx.files().
33
34 origctx is for convert to work around the problem that bug
35 fixes to the files list in changesets change hashes. For
36 convert to be the identity, it can pass an origctx and this
37 function will use the same files list when it makes sense to
38 do so.
39 """
40 repo = repo.unfiltered()
41
42 p1, p2 = ctx.p1(), ctx.p2()
43 user = ctx.user()
44
45 if repo.filecopiesmode == b'changeset-sidedata':
46 writechangesetcopy = True
47 writefilecopymeta = True
48 writecopiesto = None
49 else:
50 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
51 writefilecopymeta = writecopiesto != b'changeset-only'
52 writechangesetcopy = writecopiesto in (
53 b'changeset-only',
54 b'compatibility',
55 )
56 p1copies, p2copies = None, None
57 if writechangesetcopy:
58 p1copies = ctx.p1copies()
59 p2copies = ctx.p2copies()
60 filesadded, filesremoved = None, None
61 with repo.lock(), repo.transaction(b"commit") as tr:
62 trp = weakref.proxy(tr)
63
64 if ctx.manifestnode():
65 # reuse an existing manifest revision
66 repo.ui.debug(b'reusing known manifest\n')
67 mn = ctx.manifestnode()
68 files = ctx.files()
69 if writechangesetcopy:
70 filesadded = ctx.filesadded()
71 filesremoved = ctx.filesremoved()
72 elif not ctx.files():
73 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
74 mn = p1.manifestnode()
75 files = []
76 else:
77 m1ctx = p1.manifestctx()
78 m2ctx = p2.manifestctx()
79 mctx = m1ctx.copy()
80
81 m = mctx.read()
82 m1 = m1ctx.read()
83 m2 = m2ctx.read()
84
85 # check in files
86 added = []
87 filesadded = []
88 removed = list(ctx.removed())
89 touched = []
90 linkrev = len(repo)
91 repo.ui.note(_(b"committing files:\n"))
92 uipathfn = scmutil.getuipathfn(repo)
93 for f in sorted(ctx.modified() + ctx.added()):
94 repo.ui.note(uipathfn(f) + b"\n")
95 try:
96 fctx = ctx[f]
97 if fctx is None:
98 removed.append(f)
99 else:
100 added.append(f)
101 m[f], is_touched = repo._filecommit(
102 fctx, m1, m2, linkrev, trp, writefilecopymeta,
103 )
104 if is_touched:
105 touched.append(f)
106 if writechangesetcopy and is_touched == 'added':
107 filesadded.append(f)
108 m.setflag(f, fctx.flags())
109 except OSError:
110 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
111 raise
112 except IOError as inst:
113 errcode = getattr(inst, 'errno', errno.ENOENT)
114 if error or errcode and errcode != errno.ENOENT:
115 repo.ui.warn(
116 _(b"trouble committing %s!\n") % uipathfn(f)
117 )
118 raise
119
120 # update manifest
121 removed = [f for f in removed if f in m1 or f in m2]
122 drop = sorted([f for f in removed if f in m])
123 for f in drop:
124 del m[f]
125 if p2.rev() != nullrev:
126 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
127 removed = [f for f in removed if not rf(f)]
128
129 touched.extend(removed)
130
131 if writechangesetcopy:
132 filesremoved = removed
133
134 files = touched
135 md = None
136 if not files:
137 # if no "files" actually changed in terms of the changelog,
138 # try hard to detect unmodified manifest entry so that the
139 # exact same commit can be reproduced later on convert.
140 md = m1.diff(m, scmutil.matchfiles(repo, ctx.files()))
141 if not files and md:
142 repo.ui.debug(
143 b'not reusing manifest (no file change in '
144 b'changelog, but manifest differs)\n'
145 )
146 if files or md:
147 repo.ui.note(_(b"committing manifest\n"))
148 # we're using narrowmatch here since it's already applied at
149 # other stages (such as dirstate.walk), so we're already
150 # ignoring things outside of narrowspec in most cases. The
151 # one case where we might have files outside the narrowspec
152 # at this point is merges, and we already error out in the
153 # case where the merge has files outside of the narrowspec,
154 # so this is safe.
155 mn = mctx.write(
156 trp,
157 linkrev,
158 p1.manifestnode(),
159 p2.manifestnode(),
160 added,
161 drop,
162 match=repo.narrowmatch(),
163 )
164 else:
165 repo.ui.debug(
166 b'reusing manifest from p1 (listed files '
167 b'actually unchanged)\n'
168 )
169 mn = p1.manifestnode()
170
171 if writecopiesto == b'changeset-only':
172 # If writing only to changeset extras, use None to indicate that
173 # no entry should be written. If writing to both, write an empty
174 # entry to prevent the reader from falling back to reading
175 # filelogs.
176 p1copies = p1copies or None
177 p2copies = p2copies or None
178 filesadded = filesadded or None
179 filesremoved = filesremoved or None
180
181 if origctx and origctx.manifestnode() == mn:
182 files = origctx.files()
183
184 # update changelog
185 repo.ui.note(_(b"committing changelog\n"))
186 repo.changelog.delayupdate(tr)
187 n = repo.changelog.add(
188 mn,
189 files,
190 ctx.description(),
191 trp,
192 p1.node(),
193 p2.node(),
194 user,
195 ctx.date(),
196 ctx.extra().copy(),
197 p1copies,
198 p2copies,
199 filesadded,
200 filesremoved,
201 )
202 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
203 repo.hook(
204 b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
205 )
206 # set the new commit is proper phase
207 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
208 if targetphase:
209 # retract boundary do not alter parent changeset.
210 # if a parent have higher the resulting phase will
211 # be compliant anyway
212 #
213 # if minimal phase was 0 we don't need to retract anything
214 phases.registernew(repo, tr, targetphase, [n])
215 return n
@@ -1,3806 +1,3612 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 commit,
35 context,
36 context,
36 dirstate,
37 dirstate,
37 dirstateguard,
38 dirstateguard,
38 discovery,
39 discovery,
39 encoding,
40 encoding,
40 error,
41 error,
41 exchange,
42 exchange,
42 extensions,
43 extensions,
43 filelog,
44 filelog,
44 hook,
45 hook,
45 lock as lockmod,
46 lock as lockmod,
46 match as matchmod,
47 match as matchmod,
47 mergestate as mergestatemod,
48 mergestate as mergestatemod,
48 mergeutil,
49 mergeutil,
49 metadata,
50 namespaces,
50 namespaces,
51 narrowspec,
51 narrowspec,
52 obsolete,
52 obsolete,
53 pathutil,
53 pathutil,
54 phases,
54 phases,
55 pushkey,
55 pushkey,
56 pycompat,
56 pycompat,
57 rcutil,
57 rcutil,
58 repoview,
58 repoview,
59 revset,
59 revset,
60 revsetlang,
60 revsetlang,
61 scmutil,
61 scmutil,
62 sparse,
62 sparse,
63 store as storemod,
63 store as storemod,
64 subrepoutil,
64 subrepoutil,
65 tags as tagsmod,
65 tags as tagsmod,
66 transaction,
66 transaction,
67 txnutil,
67 txnutil,
68 util,
68 util,
69 vfs as vfsmod,
69 vfs as vfsmod,
70 )
70 )
71
71
72 from .interfaces import (
72 from .interfaces import (
73 repository,
73 repository,
74 util as interfaceutil,
74 util as interfaceutil,
75 )
75 )
76
76
77 from .utils import (
77 from .utils import (
78 hashutil,
78 hashutil,
79 procutil,
79 procutil,
80 stringutil,
80 stringutil,
81 )
81 )
82
82
83 from .revlogutils import constants as revlogconst
83 from .revlogutils import constants as revlogconst
84
84
85 release = lockmod.release
85 release = lockmod.release
86 urlerr = util.urlerr
86 urlerr = util.urlerr
87 urlreq = util.urlreq
87 urlreq = util.urlreq
88
88
89 # set of (path, vfs-location) tuples. vfs-location is:
89 # set of (path, vfs-location) tuples. vfs-location is:
90 # - 'plain for vfs relative paths
90 # - 'plain for vfs relative paths
91 # - '' for svfs relative paths
91 # - '' for svfs relative paths
92 _cachedfiles = set()
92 _cachedfiles = set()
93
93
94
94
95 class _basefilecache(scmutil.filecache):
95 class _basefilecache(scmutil.filecache):
96 """All filecache usage on repo are done for logic that should be unfiltered
96 """All filecache usage on repo are done for logic that should be unfiltered
97 """
97 """
98
98
99 def __get__(self, repo, type=None):
99 def __get__(self, repo, type=None):
100 if repo is None:
100 if repo is None:
101 return self
101 return self
102 # proxy to unfiltered __dict__ since filtered repo has no entry
102 # proxy to unfiltered __dict__ since filtered repo has no entry
103 unfi = repo.unfiltered()
103 unfi = repo.unfiltered()
104 try:
104 try:
105 return unfi.__dict__[self.sname]
105 return unfi.__dict__[self.sname]
106 except KeyError:
106 except KeyError:
107 pass
107 pass
108 return super(_basefilecache, self).__get__(unfi, type)
108 return super(_basefilecache, self).__get__(unfi, type)
109
109
110 def set(self, repo, value):
110 def set(self, repo, value):
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
112
112
113
113
114 class repofilecache(_basefilecache):
114 class repofilecache(_basefilecache):
115 """filecache for files in .hg but outside of .hg/store"""
115 """filecache for files in .hg but outside of .hg/store"""
116
116
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(repofilecache, self).__init__(*paths)
118 super(repofilecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, b'plain'))
120 _cachedfiles.add((path, b'plain'))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.vfs.join(fname)
123 return obj.vfs.join(fname)
124
124
125
125
126 class storecache(_basefilecache):
126 class storecache(_basefilecache):
127 """filecache for files in the store"""
127 """filecache for files in the store"""
128
128
129 def __init__(self, *paths):
129 def __init__(self, *paths):
130 super(storecache, self).__init__(*paths)
130 super(storecache, self).__init__(*paths)
131 for path in paths:
131 for path in paths:
132 _cachedfiles.add((path, b''))
132 _cachedfiles.add((path, b''))
133
133
134 def join(self, obj, fname):
134 def join(self, obj, fname):
135 return obj.sjoin(fname)
135 return obj.sjoin(fname)
136
136
137
137
138 class mixedrepostorecache(_basefilecache):
138 class mixedrepostorecache(_basefilecache):
139 """filecache for a mix files in .hg/store and outside"""
139 """filecache for a mix files in .hg/store and outside"""
140
140
141 def __init__(self, *pathsandlocations):
141 def __init__(self, *pathsandlocations):
142 # scmutil.filecache only uses the path for passing back into our
142 # scmutil.filecache only uses the path for passing back into our
143 # join(), so we can safely pass a list of paths and locations
143 # join(), so we can safely pass a list of paths and locations
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
146
146
147 def join(self, obj, fnameandlocation):
147 def join(self, obj, fnameandlocation):
148 fname, location = fnameandlocation
148 fname, location = fnameandlocation
149 if location == b'plain':
149 if location == b'plain':
150 return obj.vfs.join(fname)
150 return obj.vfs.join(fname)
151 else:
151 else:
152 if location != b'':
152 if location != b'':
153 raise error.ProgrammingError(
153 raise error.ProgrammingError(
154 b'unexpected location: %s' % location
154 b'unexpected location: %s' % location
155 )
155 )
156 return obj.sjoin(fname)
156 return obj.sjoin(fname)
157
157
158
158
159 def isfilecached(repo, name):
159 def isfilecached(repo, name):
160 """check if a repo has already cached "name" filecache-ed property
160 """check if a repo has already cached "name" filecache-ed property
161
161
162 This returns (cachedobj-or-None, iscached) tuple.
162 This returns (cachedobj-or-None, iscached) tuple.
163 """
163 """
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
165 if not cacheentry:
165 if not cacheentry:
166 return None, False
166 return None, False
167 return cacheentry.obj, True
167 return cacheentry.obj, True
168
168
169
169
170 class unfilteredpropertycache(util.propertycache):
170 class unfilteredpropertycache(util.propertycache):
171 """propertycache that apply to unfiltered repo only"""
171 """propertycache that apply to unfiltered repo only"""
172
172
173 def __get__(self, repo, type=None):
173 def __get__(self, repo, type=None):
174 unfi = repo.unfiltered()
174 unfi = repo.unfiltered()
175 if unfi is repo:
175 if unfi is repo:
176 return super(unfilteredpropertycache, self).__get__(unfi)
176 return super(unfilteredpropertycache, self).__get__(unfi)
177 return getattr(unfi, self.name)
177 return getattr(unfi, self.name)
178
178
179
179
180 class filteredpropertycache(util.propertycache):
180 class filteredpropertycache(util.propertycache):
181 """propertycache that must take filtering in account"""
181 """propertycache that must take filtering in account"""
182
182
183 def cachevalue(self, obj, value):
183 def cachevalue(self, obj, value):
184 object.__setattr__(obj, self.name, value)
184 object.__setattr__(obj, self.name, value)
185
185
186
186
187 def hasunfilteredcache(repo, name):
187 def hasunfilteredcache(repo, name):
188 """check if a repo has an unfilteredpropertycache value for <name>"""
188 """check if a repo has an unfilteredpropertycache value for <name>"""
189 return name in vars(repo.unfiltered())
189 return name in vars(repo.unfiltered())
190
190
191
191
192 def unfilteredmethod(orig):
192 def unfilteredmethod(orig):
193 """decorate method that always need to be run on unfiltered version"""
193 """decorate method that always need to be run on unfiltered version"""
194
194
195 def wrapper(repo, *args, **kwargs):
195 def wrapper(repo, *args, **kwargs):
196 return orig(repo.unfiltered(), *args, **kwargs)
196 return orig(repo.unfiltered(), *args, **kwargs)
197
197
198 return wrapper
198 return wrapper
199
199
200
200
201 moderncaps = {
201 moderncaps = {
202 b'lookup',
202 b'lookup',
203 b'branchmap',
203 b'branchmap',
204 b'pushkey',
204 b'pushkey',
205 b'known',
205 b'known',
206 b'getbundle',
206 b'getbundle',
207 b'unbundle',
207 b'unbundle',
208 }
208 }
209 legacycaps = moderncaps.union({b'changegroupsubset'})
209 legacycaps = moderncaps.union({b'changegroupsubset'})
210
210
211
211
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
213 class localcommandexecutor(object):
213 class localcommandexecutor(object):
214 def __init__(self, peer):
214 def __init__(self, peer):
215 self._peer = peer
215 self._peer = peer
216 self._sent = False
216 self._sent = False
217 self._closed = False
217 self._closed = False
218
218
219 def __enter__(self):
219 def __enter__(self):
220 return self
220 return self
221
221
222 def __exit__(self, exctype, excvalue, exctb):
222 def __exit__(self, exctype, excvalue, exctb):
223 self.close()
223 self.close()
224
224
225 def callcommand(self, command, args):
225 def callcommand(self, command, args):
226 if self._sent:
226 if self._sent:
227 raise error.ProgrammingError(
227 raise error.ProgrammingError(
228 b'callcommand() cannot be used after sendcommands()'
228 b'callcommand() cannot be used after sendcommands()'
229 )
229 )
230
230
231 if self._closed:
231 if self._closed:
232 raise error.ProgrammingError(
232 raise error.ProgrammingError(
233 b'callcommand() cannot be used after close()'
233 b'callcommand() cannot be used after close()'
234 )
234 )
235
235
236 # We don't need to support anything fancy. Just call the named
236 # We don't need to support anything fancy. Just call the named
237 # method on the peer and return a resolved future.
237 # method on the peer and return a resolved future.
238 fn = getattr(self._peer, pycompat.sysstr(command))
238 fn = getattr(self._peer, pycompat.sysstr(command))
239
239
240 f = pycompat.futures.Future()
240 f = pycompat.futures.Future()
241
241
242 try:
242 try:
243 result = fn(**pycompat.strkwargs(args))
243 result = fn(**pycompat.strkwargs(args))
244 except Exception:
244 except Exception:
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
246 else:
246 else:
247 f.set_result(result)
247 f.set_result(result)
248
248
249 return f
249 return f
250
250
251 def sendcommands(self):
251 def sendcommands(self):
252 self._sent = True
252 self._sent = True
253
253
254 def close(self):
254 def close(self):
255 self._closed = True
255 self._closed = True
256
256
257
257
258 @interfaceutil.implementer(repository.ipeercommands)
258 @interfaceutil.implementer(repository.ipeercommands)
259 class localpeer(repository.peer):
259 class localpeer(repository.peer):
260 '''peer for a local repo; reflects only the most recent API'''
260 '''peer for a local repo; reflects only the most recent API'''
261
261
262 def __init__(self, repo, caps=None):
262 def __init__(self, repo, caps=None):
263 super(localpeer, self).__init__()
263 super(localpeer, self).__init__()
264
264
265 if caps is None:
265 if caps is None:
266 caps = moderncaps.copy()
266 caps = moderncaps.copy()
267 self._repo = repo.filtered(b'served')
267 self._repo = repo.filtered(b'served')
268 self.ui = repo.ui
268 self.ui = repo.ui
269 self._caps = repo._restrictcapabilities(caps)
269 self._caps = repo._restrictcapabilities(caps)
270
270
271 # Begin of _basepeer interface.
271 # Begin of _basepeer interface.
272
272
273 def url(self):
273 def url(self):
274 return self._repo.url()
274 return self._repo.url()
275
275
276 def local(self):
276 def local(self):
277 return self._repo
277 return self._repo
278
278
279 def peer(self):
279 def peer(self):
280 return self
280 return self
281
281
282 def canpush(self):
282 def canpush(self):
283 return True
283 return True
284
284
285 def close(self):
285 def close(self):
286 self._repo.close()
286 self._repo.close()
287
287
288 # End of _basepeer interface.
288 # End of _basepeer interface.
289
289
290 # Begin of _basewirecommands interface.
290 # Begin of _basewirecommands interface.
291
291
292 def branchmap(self):
292 def branchmap(self):
293 return self._repo.branchmap()
293 return self._repo.branchmap()
294
294
295 def capabilities(self):
295 def capabilities(self):
296 return self._caps
296 return self._caps
297
297
298 def clonebundles(self):
298 def clonebundles(self):
299 return self._repo.tryread(b'clonebundles.manifest')
299 return self._repo.tryread(b'clonebundles.manifest')
300
300
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
302 """Used to test argument passing over the wire"""
302 """Used to test argument passing over the wire"""
303 return b"%s %s %s %s %s" % (
303 return b"%s %s %s %s %s" % (
304 one,
304 one,
305 two,
305 two,
306 pycompat.bytestr(three),
306 pycompat.bytestr(three),
307 pycompat.bytestr(four),
307 pycompat.bytestr(four),
308 pycompat.bytestr(five),
308 pycompat.bytestr(five),
309 )
309 )
310
310
311 def getbundle(
311 def getbundle(
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
313 ):
313 ):
314 chunks = exchange.getbundlechunks(
314 chunks = exchange.getbundlechunks(
315 self._repo,
315 self._repo,
316 source,
316 source,
317 heads=heads,
317 heads=heads,
318 common=common,
318 common=common,
319 bundlecaps=bundlecaps,
319 bundlecaps=bundlecaps,
320 **kwargs
320 **kwargs
321 )[1]
321 )[1]
322 cb = util.chunkbuffer(chunks)
322 cb = util.chunkbuffer(chunks)
323
323
324 if exchange.bundle2requested(bundlecaps):
324 if exchange.bundle2requested(bundlecaps):
325 # When requesting a bundle2, getbundle returns a stream to make the
325 # When requesting a bundle2, getbundle returns a stream to make the
326 # wire level function happier. We need to build a proper object
326 # wire level function happier. We need to build a proper object
327 # from it in local peer.
327 # from it in local peer.
328 return bundle2.getunbundler(self.ui, cb)
328 return bundle2.getunbundler(self.ui, cb)
329 else:
329 else:
330 return changegroup.getunbundler(b'01', cb, None)
330 return changegroup.getunbundler(b'01', cb, None)
331
331
332 def heads(self):
332 def heads(self):
333 return self._repo.heads()
333 return self._repo.heads()
334
334
335 def known(self, nodes):
335 def known(self, nodes):
336 return self._repo.known(nodes)
336 return self._repo.known(nodes)
337
337
338 def listkeys(self, namespace):
338 def listkeys(self, namespace):
339 return self._repo.listkeys(namespace)
339 return self._repo.listkeys(namespace)
340
340
341 def lookup(self, key):
341 def lookup(self, key):
342 return self._repo.lookup(key)
342 return self._repo.lookup(key)
343
343
344 def pushkey(self, namespace, key, old, new):
344 def pushkey(self, namespace, key, old, new):
345 return self._repo.pushkey(namespace, key, old, new)
345 return self._repo.pushkey(namespace, key, old, new)
346
346
347 def stream_out(self):
347 def stream_out(self):
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
349
349
350 def unbundle(self, bundle, heads, url):
350 def unbundle(self, bundle, heads, url):
351 """apply a bundle on a repo
351 """apply a bundle on a repo
352
352
353 This function handles the repo locking itself."""
353 This function handles the repo locking itself."""
354 try:
354 try:
355 try:
355 try:
356 bundle = exchange.readbundle(self.ui, bundle, None)
356 bundle = exchange.readbundle(self.ui, bundle, None)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
358 if util.safehasattr(ret, b'getchunks'):
358 if util.safehasattr(ret, b'getchunks'):
359 # This is a bundle20 object, turn it into an unbundler.
359 # This is a bundle20 object, turn it into an unbundler.
360 # This little dance should be dropped eventually when the
360 # This little dance should be dropped eventually when the
361 # API is finally improved.
361 # API is finally improved.
362 stream = util.chunkbuffer(ret.getchunks())
362 stream = util.chunkbuffer(ret.getchunks())
363 ret = bundle2.getunbundler(self.ui, stream)
363 ret = bundle2.getunbundler(self.ui, stream)
364 return ret
364 return ret
365 except Exception as exc:
365 except Exception as exc:
366 # If the exception contains output salvaged from a bundle2
366 # If the exception contains output salvaged from a bundle2
367 # reply, we need to make sure it is printed before continuing
367 # reply, we need to make sure it is printed before continuing
368 # to fail. So we build a bundle2 with such output and consume
368 # to fail. So we build a bundle2 with such output and consume
369 # it directly.
369 # it directly.
370 #
370 #
371 # This is not very elegant but allows a "simple" solution for
371 # This is not very elegant but allows a "simple" solution for
372 # issue4594
372 # issue4594
373 output = getattr(exc, '_bundle2salvagedoutput', ())
373 output = getattr(exc, '_bundle2salvagedoutput', ())
374 if output:
374 if output:
375 bundler = bundle2.bundle20(self._repo.ui)
375 bundler = bundle2.bundle20(self._repo.ui)
376 for out in output:
376 for out in output:
377 bundler.addpart(out)
377 bundler.addpart(out)
378 stream = util.chunkbuffer(bundler.getchunks())
378 stream = util.chunkbuffer(bundler.getchunks())
379 b = bundle2.getunbundler(self.ui, stream)
379 b = bundle2.getunbundler(self.ui, stream)
380 bundle2.processbundle(self._repo, b)
380 bundle2.processbundle(self._repo, b)
381 raise
381 raise
382 except error.PushRaced as exc:
382 except error.PushRaced as exc:
383 raise error.ResponseError(
383 raise error.ResponseError(
384 _(b'push failed:'), stringutil.forcebytestr(exc)
384 _(b'push failed:'), stringutil.forcebytestr(exc)
385 )
385 )
386
386
387 # End of _basewirecommands interface.
387 # End of _basewirecommands interface.
388
388
389 # Begin of peer interface.
389 # Begin of peer interface.
390
390
391 def commandexecutor(self):
391 def commandexecutor(self):
392 return localcommandexecutor(self)
392 return localcommandexecutor(self)
393
393
394 # End of peer interface.
394 # End of peer interface.
395
395
396
396
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
398 class locallegacypeer(localpeer):
398 class locallegacypeer(localpeer):
399 '''peer extension which implements legacy methods too; used for tests with
399 '''peer extension which implements legacy methods too; used for tests with
400 restricted capabilities'''
400 restricted capabilities'''
401
401
402 def __init__(self, repo):
402 def __init__(self, repo):
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
404
404
405 # Begin of baselegacywirecommands interface.
405 # Begin of baselegacywirecommands interface.
406
406
407 def between(self, pairs):
407 def between(self, pairs):
408 return self._repo.between(pairs)
408 return self._repo.between(pairs)
409
409
410 def branches(self, nodes):
410 def branches(self, nodes):
411 return self._repo.branches(nodes)
411 return self._repo.branches(nodes)
412
412
413 def changegroup(self, nodes, source):
413 def changegroup(self, nodes, source):
414 outgoing = discovery.outgoing(
414 outgoing = discovery.outgoing(
415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
416 )
416 )
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
418
418
419 def changegroupsubset(self, bases, heads, source):
419 def changegroupsubset(self, bases, heads, source):
420 outgoing = discovery.outgoing(
420 outgoing = discovery.outgoing(
421 self._repo, missingroots=bases, ancestorsof=heads
421 self._repo, missingroots=bases, ancestorsof=heads
422 )
422 )
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424
424
425 # End of baselegacywirecommands interface.
425 # End of baselegacywirecommands interface.
426
426
427
427
428 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # Increment the sub-version when the revlog v2 format changes to lock out old
429 # clients.
429 # clients.
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
431
431
432 # A repository with the sparserevlog feature will have delta chains that
432 # A repository with the sparserevlog feature will have delta chains that
433 # can spread over a larger span. Sparse reading cuts these large spans into
433 # can spread over a larger span. Sparse reading cuts these large spans into
434 # pieces, so that each piece isn't too big.
434 # pieces, so that each piece isn't too big.
435 # Without the sparserevlog capability, reading from the repository could use
435 # Without the sparserevlog capability, reading from the repository could use
436 # huge amounts of memory, because the whole span would be read at once,
436 # huge amounts of memory, because the whole span would be read at once,
437 # including all the intermediate revisions that aren't pertinent for the chain.
437 # including all the intermediate revisions that aren't pertinent for the chain.
438 # This is why once a repository has enabled sparse-read, it becomes required.
438 # This is why once a repository has enabled sparse-read, it becomes required.
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
440
440
441 # A repository with the sidedataflag requirement will allow to store extra
441 # A repository with the sidedataflag requirement will allow to store extra
442 # information for revision without altering their original hashes.
442 # information for revision without altering their original hashes.
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
444
444
445 # A repository with the the copies-sidedata-changeset requirement will store
445 # A repository with the the copies-sidedata-changeset requirement will store
446 # copies related information in changeset's sidedata.
446 # copies related information in changeset's sidedata.
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
448
448
449 # The repository use persistent nodemap for the changelog and the manifest.
449 # The repository use persistent nodemap for the changelog and the manifest.
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
451
451
452 # Functions receiving (ui, features) that extensions can register to impact
452 # Functions receiving (ui, features) that extensions can register to impact
453 # the ability to load repositories with custom requirements. Only
453 # the ability to load repositories with custom requirements. Only
454 # functions defined in loaded extensions are called.
454 # functions defined in loaded extensions are called.
455 #
455 #
456 # The function receives a set of requirement strings that the repository
456 # The function receives a set of requirement strings that the repository
457 # is capable of opening. Functions will typically add elements to the
457 # is capable of opening. Functions will typically add elements to the
458 # set to reflect that the extension knows how to handle that requirements.
458 # set to reflect that the extension knows how to handle that requirements.
459 featuresetupfuncs = set()
459 featuresetupfuncs = set()
460
460
461
461
462 def makelocalrepository(baseui, path, intents=None):
462 def makelocalrepository(baseui, path, intents=None):
463 """Create a local repository object.
463 """Create a local repository object.
464
464
465 Given arguments needed to construct a local repository, this function
465 Given arguments needed to construct a local repository, this function
466 performs various early repository loading functionality (such as
466 performs various early repository loading functionality (such as
467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
468 the repository can be opened, derives a type suitable for representing
468 the repository can be opened, derives a type suitable for representing
469 that repository, and returns an instance of it.
469 that repository, and returns an instance of it.
470
470
471 The returned object conforms to the ``repository.completelocalrepository``
471 The returned object conforms to the ``repository.completelocalrepository``
472 interface.
472 interface.
473
473
474 The repository type is derived by calling a series of factory functions
474 The repository type is derived by calling a series of factory functions
475 for each aspect/interface of the final repository. These are defined by
475 for each aspect/interface of the final repository. These are defined by
476 ``REPO_INTERFACES``.
476 ``REPO_INTERFACES``.
477
477
478 Each factory function is called to produce a type implementing a specific
478 Each factory function is called to produce a type implementing a specific
479 interface. The cumulative list of returned types will be combined into a
479 interface. The cumulative list of returned types will be combined into a
480 new type and that type will be instantiated to represent the local
480 new type and that type will be instantiated to represent the local
481 repository.
481 repository.
482
482
483 The factory functions each receive various state that may be consulted
483 The factory functions each receive various state that may be consulted
484 as part of deriving a type.
484 as part of deriving a type.
485
485
486 Extensions should wrap these factory functions to customize repository type
486 Extensions should wrap these factory functions to customize repository type
487 creation. Note that an extension's wrapped function may be called even if
487 creation. Note that an extension's wrapped function may be called even if
488 that extension is not loaded for the repo being constructed. Extensions
488 that extension is not loaded for the repo being constructed. Extensions
489 should check if their ``__name__`` appears in the
489 should check if their ``__name__`` appears in the
490 ``extensionmodulenames`` set passed to the factory function and no-op if
490 ``extensionmodulenames`` set passed to the factory function and no-op if
491 not.
491 not.
492 """
492 """
493 ui = baseui.copy()
493 ui = baseui.copy()
494 # Prevent copying repo configuration.
494 # Prevent copying repo configuration.
495 ui.copy = baseui.copy
495 ui.copy = baseui.copy
496
496
497 # Working directory VFS rooted at repository root.
497 # Working directory VFS rooted at repository root.
498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
499
499
500 # Main VFS for .hg/ directory.
500 # Main VFS for .hg/ directory.
501 hgpath = wdirvfs.join(b'.hg')
501 hgpath = wdirvfs.join(b'.hg')
502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
503
503
504 # The .hg/ path should exist and should be a directory. All other
504 # The .hg/ path should exist and should be a directory. All other
505 # cases are errors.
505 # cases are errors.
506 if not hgvfs.isdir():
506 if not hgvfs.isdir():
507 try:
507 try:
508 hgvfs.stat()
508 hgvfs.stat()
509 except OSError as e:
509 except OSError as e:
510 if e.errno != errno.ENOENT:
510 if e.errno != errno.ENOENT:
511 raise
511 raise
512 except ValueError as e:
512 except ValueError as e:
513 # Can be raised on Python 3.8 when path is invalid.
513 # Can be raised on Python 3.8 when path is invalid.
514 raise error.Abort(
514 raise error.Abort(
515 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
515 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
516 )
516 )
517
517
518 raise error.RepoError(_(b'repository %s not found') % path)
518 raise error.RepoError(_(b'repository %s not found') % path)
519
519
520 # .hg/requires file contains a newline-delimited list of
520 # .hg/requires file contains a newline-delimited list of
521 # features/capabilities the opener (us) must have in order to use
521 # features/capabilities the opener (us) must have in order to use
522 # the repository. This file was introduced in Mercurial 0.9.2,
522 # the repository. This file was introduced in Mercurial 0.9.2,
523 # which means very old repositories may not have one. We assume
523 # which means very old repositories may not have one. We assume
524 # a missing file translates to no requirements.
524 # a missing file translates to no requirements.
525 try:
525 try:
526 requirements = set(hgvfs.read(b'requires').splitlines())
526 requirements = set(hgvfs.read(b'requires').splitlines())
527 except IOError as e:
527 except IOError as e:
528 if e.errno != errno.ENOENT:
528 if e.errno != errno.ENOENT:
529 raise
529 raise
530 requirements = set()
530 requirements = set()
531
531
532 # The .hg/hgrc file may load extensions or contain config options
532 # The .hg/hgrc file may load extensions or contain config options
533 # that influence repository construction. Attempt to load it and
533 # that influence repository construction. Attempt to load it and
534 # process any new extensions that it may have pulled in.
534 # process any new extensions that it may have pulled in.
535 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
535 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
536 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
536 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
537 extensions.loadall(ui)
537 extensions.loadall(ui)
538 extensions.populateui(ui)
538 extensions.populateui(ui)
539
539
540 # Set of module names of extensions loaded for this repository.
540 # Set of module names of extensions loaded for this repository.
541 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
541 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
542
542
543 supportedrequirements = gathersupportedrequirements(ui)
543 supportedrequirements = gathersupportedrequirements(ui)
544
544
545 # We first validate the requirements are known.
545 # We first validate the requirements are known.
546 ensurerequirementsrecognized(requirements, supportedrequirements)
546 ensurerequirementsrecognized(requirements, supportedrequirements)
547
547
548 # Then we validate that the known set is reasonable to use together.
548 # Then we validate that the known set is reasonable to use together.
549 ensurerequirementscompatible(ui, requirements)
549 ensurerequirementscompatible(ui, requirements)
550
550
551 # TODO there are unhandled edge cases related to opening repositories with
551 # TODO there are unhandled edge cases related to opening repositories with
552 # shared storage. If storage is shared, we should also test for requirements
552 # shared storage. If storage is shared, we should also test for requirements
553 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
553 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
554 # that repo, as that repo may load extensions needed to open it. This is a
554 # that repo, as that repo may load extensions needed to open it. This is a
555 # bit complicated because we don't want the other hgrc to overwrite settings
555 # bit complicated because we don't want the other hgrc to overwrite settings
556 # in this hgrc.
556 # in this hgrc.
557 #
557 #
558 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
558 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
559 # file when sharing repos. But if a requirement is added after the share is
559 # file when sharing repos. But if a requirement is added after the share is
560 # performed, thereby introducing a new requirement for the opener, we may
560 # performed, thereby introducing a new requirement for the opener, we may
561 # will not see that and could encounter a run-time error interacting with
561 # will not see that and could encounter a run-time error interacting with
562 # that shared store since it has an unknown-to-us requirement.
562 # that shared store since it has an unknown-to-us requirement.
563
563
564 # At this point, we know we should be capable of opening the repository.
564 # At this point, we know we should be capable of opening the repository.
565 # Now get on with doing that.
565 # Now get on with doing that.
566
566
567 features = set()
567 features = set()
568
568
569 # The "store" part of the repository holds versioned data. How it is
569 # The "store" part of the repository holds versioned data. How it is
570 # accessed is determined by various requirements. The ``shared`` or
570 # accessed is determined by various requirements. The ``shared`` or
571 # ``relshared`` requirements indicate the store lives in the path contained
571 # ``relshared`` requirements indicate the store lives in the path contained
572 # in the ``.hg/sharedpath`` file. This is an absolute path for
572 # in the ``.hg/sharedpath`` file. This is an absolute path for
573 # ``shared`` and relative to ``.hg/`` for ``relshared``.
573 # ``shared`` and relative to ``.hg/`` for ``relshared``.
574 if b'shared' in requirements or b'relshared' in requirements:
574 if b'shared' in requirements or b'relshared' in requirements:
575 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
575 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
576 if b'relshared' in requirements:
576 if b'relshared' in requirements:
577 sharedpath = hgvfs.join(sharedpath)
577 sharedpath = hgvfs.join(sharedpath)
578
578
579 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
579 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
580
580
581 if not sharedvfs.exists():
581 if not sharedvfs.exists():
582 raise error.RepoError(
582 raise error.RepoError(
583 _(b'.hg/sharedpath points to nonexistent directory %s')
583 _(b'.hg/sharedpath points to nonexistent directory %s')
584 % sharedvfs.base
584 % sharedvfs.base
585 )
585 )
586
586
587 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
587 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
588
588
589 storebasepath = sharedvfs.base
589 storebasepath = sharedvfs.base
590 cachepath = sharedvfs.join(b'cache')
590 cachepath = sharedvfs.join(b'cache')
591 else:
591 else:
592 storebasepath = hgvfs.base
592 storebasepath = hgvfs.base
593 cachepath = hgvfs.join(b'cache')
593 cachepath = hgvfs.join(b'cache')
594 wcachepath = hgvfs.join(b'wcache')
594 wcachepath = hgvfs.join(b'wcache')
595
595
596 # The store has changed over time and the exact layout is dictated by
596 # The store has changed over time and the exact layout is dictated by
597 # requirements. The store interface abstracts differences across all
597 # requirements. The store interface abstracts differences across all
598 # of them.
598 # of them.
599 store = makestore(
599 store = makestore(
600 requirements,
600 requirements,
601 storebasepath,
601 storebasepath,
602 lambda base: vfsmod.vfs(base, cacheaudited=True),
602 lambda base: vfsmod.vfs(base, cacheaudited=True),
603 )
603 )
604 hgvfs.createmode = store.createmode
604 hgvfs.createmode = store.createmode
605
605
606 storevfs = store.vfs
606 storevfs = store.vfs
607 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
607 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
608
608
609 # The cache vfs is used to manage cache files.
609 # The cache vfs is used to manage cache files.
610 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
610 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
611 cachevfs.createmode = store.createmode
611 cachevfs.createmode = store.createmode
612 # The cache vfs is used to manage cache files related to the working copy
612 # The cache vfs is used to manage cache files related to the working copy
613 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
613 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
614 wcachevfs.createmode = store.createmode
614 wcachevfs.createmode = store.createmode
615
615
616 # Now resolve the type for the repository object. We do this by repeatedly
616 # Now resolve the type for the repository object. We do this by repeatedly
617 # calling a factory function to produces types for specific aspects of the
617 # calling a factory function to produces types for specific aspects of the
618 # repo's operation. The aggregate returned types are used as base classes
618 # repo's operation. The aggregate returned types are used as base classes
619 # for a dynamically-derived type, which will represent our new repository.
619 # for a dynamically-derived type, which will represent our new repository.
620
620
621 bases = []
621 bases = []
622 extrastate = {}
622 extrastate = {}
623
623
624 for iface, fn in REPO_INTERFACES:
624 for iface, fn in REPO_INTERFACES:
625 # We pass all potentially useful state to give extensions tons of
625 # We pass all potentially useful state to give extensions tons of
626 # flexibility.
626 # flexibility.
627 typ = fn()(
627 typ = fn()(
628 ui=ui,
628 ui=ui,
629 intents=intents,
629 intents=intents,
630 requirements=requirements,
630 requirements=requirements,
631 features=features,
631 features=features,
632 wdirvfs=wdirvfs,
632 wdirvfs=wdirvfs,
633 hgvfs=hgvfs,
633 hgvfs=hgvfs,
634 store=store,
634 store=store,
635 storevfs=storevfs,
635 storevfs=storevfs,
636 storeoptions=storevfs.options,
636 storeoptions=storevfs.options,
637 cachevfs=cachevfs,
637 cachevfs=cachevfs,
638 wcachevfs=wcachevfs,
638 wcachevfs=wcachevfs,
639 extensionmodulenames=extensionmodulenames,
639 extensionmodulenames=extensionmodulenames,
640 extrastate=extrastate,
640 extrastate=extrastate,
641 baseclasses=bases,
641 baseclasses=bases,
642 )
642 )
643
643
644 if not isinstance(typ, type):
644 if not isinstance(typ, type):
645 raise error.ProgrammingError(
645 raise error.ProgrammingError(
646 b'unable to construct type for %s' % iface
646 b'unable to construct type for %s' % iface
647 )
647 )
648
648
649 bases.append(typ)
649 bases.append(typ)
650
650
651 # type() allows you to use characters in type names that wouldn't be
651 # type() allows you to use characters in type names that wouldn't be
652 # recognized as Python symbols in source code. We abuse that to add
652 # recognized as Python symbols in source code. We abuse that to add
653 # rich information about our constructed repo.
653 # rich information about our constructed repo.
654 name = pycompat.sysstr(
654 name = pycompat.sysstr(
655 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
655 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
656 )
656 )
657
657
658 cls = type(name, tuple(bases), {})
658 cls = type(name, tuple(bases), {})
659
659
660 return cls(
660 return cls(
661 baseui=baseui,
661 baseui=baseui,
662 ui=ui,
662 ui=ui,
663 origroot=path,
663 origroot=path,
664 wdirvfs=wdirvfs,
664 wdirvfs=wdirvfs,
665 hgvfs=hgvfs,
665 hgvfs=hgvfs,
666 requirements=requirements,
666 requirements=requirements,
667 supportedrequirements=supportedrequirements,
667 supportedrequirements=supportedrequirements,
668 sharedpath=storebasepath,
668 sharedpath=storebasepath,
669 store=store,
669 store=store,
670 cachevfs=cachevfs,
670 cachevfs=cachevfs,
671 wcachevfs=wcachevfs,
671 wcachevfs=wcachevfs,
672 features=features,
672 features=features,
673 intents=intents,
673 intents=intents,
674 )
674 )
675
675
676
676
677 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
677 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
678 """Load hgrc files/content into a ui instance.
678 """Load hgrc files/content into a ui instance.
679
679
680 This is called during repository opening to load any additional
680 This is called during repository opening to load any additional
681 config files or settings relevant to the current repository.
681 config files or settings relevant to the current repository.
682
682
683 Returns a bool indicating whether any additional configs were loaded.
683 Returns a bool indicating whether any additional configs were loaded.
684
684
685 Extensions should monkeypatch this function to modify how per-repo
685 Extensions should monkeypatch this function to modify how per-repo
686 configs are loaded. For example, an extension may wish to pull in
686 configs are loaded. For example, an extension may wish to pull in
687 configs from alternate files or sources.
687 configs from alternate files or sources.
688 """
688 """
689 if not rcutil.use_repo_hgrc():
689 if not rcutil.use_repo_hgrc():
690 return False
690 return False
691 try:
691 try:
692 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
692 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
693 return True
693 return True
694 except IOError:
694 except IOError:
695 return False
695 return False
696
696
697
697
698 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
698 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
699 """Perform additional actions after .hg/hgrc is loaded.
699 """Perform additional actions after .hg/hgrc is loaded.
700
700
701 This function is called during repository loading immediately after
701 This function is called during repository loading immediately after
702 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
702 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
703
703
704 The function can be used to validate configs, automatically add
704 The function can be used to validate configs, automatically add
705 options (including extensions) based on requirements, etc.
705 options (including extensions) based on requirements, etc.
706 """
706 """
707
707
708 # Map of requirements to list of extensions to load automatically when
708 # Map of requirements to list of extensions to load automatically when
709 # requirement is present.
709 # requirement is present.
710 autoextensions = {
710 autoextensions = {
711 b'git': [b'git'],
711 b'git': [b'git'],
712 b'largefiles': [b'largefiles'],
712 b'largefiles': [b'largefiles'],
713 b'lfs': [b'lfs'],
713 b'lfs': [b'lfs'],
714 }
714 }
715
715
716 for requirement, names in sorted(autoextensions.items()):
716 for requirement, names in sorted(autoextensions.items()):
717 if requirement not in requirements:
717 if requirement not in requirements:
718 continue
718 continue
719
719
720 for name in names:
720 for name in names:
721 if not ui.hasconfig(b'extensions', name):
721 if not ui.hasconfig(b'extensions', name):
722 ui.setconfig(b'extensions', name, b'', source=b'autoload')
722 ui.setconfig(b'extensions', name, b'', source=b'autoload')
723
723
724
724
725 def gathersupportedrequirements(ui):
725 def gathersupportedrequirements(ui):
726 """Determine the complete set of recognized requirements."""
726 """Determine the complete set of recognized requirements."""
727 # Start with all requirements supported by this file.
727 # Start with all requirements supported by this file.
728 supported = set(localrepository._basesupported)
728 supported = set(localrepository._basesupported)
729
729
730 # Execute ``featuresetupfuncs`` entries if they belong to an extension
730 # Execute ``featuresetupfuncs`` entries if they belong to an extension
731 # relevant to this ui instance.
731 # relevant to this ui instance.
732 modules = {m.__name__ for n, m in extensions.extensions(ui)}
732 modules = {m.__name__ for n, m in extensions.extensions(ui)}
733
733
734 for fn in featuresetupfuncs:
734 for fn in featuresetupfuncs:
735 if fn.__module__ in modules:
735 if fn.__module__ in modules:
736 fn(ui, supported)
736 fn(ui, supported)
737
737
738 # Add derived requirements from registered compression engines.
738 # Add derived requirements from registered compression engines.
739 for name in util.compengines:
739 for name in util.compengines:
740 engine = util.compengines[name]
740 engine = util.compengines[name]
741 if engine.available() and engine.revlogheader():
741 if engine.available() and engine.revlogheader():
742 supported.add(b'exp-compression-%s' % name)
742 supported.add(b'exp-compression-%s' % name)
743 if engine.name() == b'zstd':
743 if engine.name() == b'zstd':
744 supported.add(b'revlog-compression-zstd')
744 supported.add(b'revlog-compression-zstd')
745
745
746 return supported
746 return supported
747
747
748
748
749 def ensurerequirementsrecognized(requirements, supported):
749 def ensurerequirementsrecognized(requirements, supported):
750 """Validate that a set of local requirements is recognized.
750 """Validate that a set of local requirements is recognized.
751
751
752 Receives a set of requirements. Raises an ``error.RepoError`` if there
752 Receives a set of requirements. Raises an ``error.RepoError`` if there
753 exists any requirement in that set that currently loaded code doesn't
753 exists any requirement in that set that currently loaded code doesn't
754 recognize.
754 recognize.
755
755
756 Returns a set of supported requirements.
756 Returns a set of supported requirements.
757 """
757 """
758 missing = set()
758 missing = set()
759
759
760 for requirement in requirements:
760 for requirement in requirements:
761 if requirement in supported:
761 if requirement in supported:
762 continue
762 continue
763
763
764 if not requirement or not requirement[0:1].isalnum():
764 if not requirement or not requirement[0:1].isalnum():
765 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
765 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
766
766
767 missing.add(requirement)
767 missing.add(requirement)
768
768
769 if missing:
769 if missing:
770 raise error.RequirementError(
770 raise error.RequirementError(
771 _(b'repository requires features unknown to this Mercurial: %s')
771 _(b'repository requires features unknown to this Mercurial: %s')
772 % b' '.join(sorted(missing)),
772 % b' '.join(sorted(missing)),
773 hint=_(
773 hint=_(
774 b'see https://mercurial-scm.org/wiki/MissingRequirement '
774 b'see https://mercurial-scm.org/wiki/MissingRequirement '
775 b'for more information'
775 b'for more information'
776 ),
776 ),
777 )
777 )
778
778
779
779
780 def ensurerequirementscompatible(ui, requirements):
780 def ensurerequirementscompatible(ui, requirements):
781 """Validates that a set of recognized requirements is mutually compatible.
781 """Validates that a set of recognized requirements is mutually compatible.
782
782
783 Some requirements may not be compatible with others or require
783 Some requirements may not be compatible with others or require
784 config options that aren't enabled. This function is called during
784 config options that aren't enabled. This function is called during
785 repository opening to ensure that the set of requirements needed
785 repository opening to ensure that the set of requirements needed
786 to open a repository is sane and compatible with config options.
786 to open a repository is sane and compatible with config options.
787
787
788 Extensions can monkeypatch this function to perform additional
788 Extensions can monkeypatch this function to perform additional
789 checking.
789 checking.
790
790
791 ``error.RepoError`` should be raised on failure.
791 ``error.RepoError`` should be raised on failure.
792 """
792 """
793 if b'exp-sparse' in requirements and not sparse.enabled:
793 if b'exp-sparse' in requirements and not sparse.enabled:
794 raise error.RepoError(
794 raise error.RepoError(
795 _(
795 _(
796 b'repository is using sparse feature but '
796 b'repository is using sparse feature but '
797 b'sparse is not enabled; enable the '
797 b'sparse is not enabled; enable the '
798 b'"sparse" extensions to access'
798 b'"sparse" extensions to access'
799 )
799 )
800 )
800 )
801
801
802
802
803 def makestore(requirements, path, vfstype):
803 def makestore(requirements, path, vfstype):
804 """Construct a storage object for a repository."""
804 """Construct a storage object for a repository."""
805 if b'store' in requirements:
805 if b'store' in requirements:
806 if b'fncache' in requirements:
806 if b'fncache' in requirements:
807 return storemod.fncachestore(
807 return storemod.fncachestore(
808 path, vfstype, b'dotencode' in requirements
808 path, vfstype, b'dotencode' in requirements
809 )
809 )
810
810
811 return storemod.encodedstore(path, vfstype)
811 return storemod.encodedstore(path, vfstype)
812
812
813 return storemod.basicstore(path, vfstype)
813 return storemod.basicstore(path, vfstype)
814
814
815
815
816 def resolvestorevfsoptions(ui, requirements, features):
816 def resolvestorevfsoptions(ui, requirements, features):
817 """Resolve the options to pass to the store vfs opener.
817 """Resolve the options to pass to the store vfs opener.
818
818
819 The returned dict is used to influence behavior of the storage layer.
819 The returned dict is used to influence behavior of the storage layer.
820 """
820 """
821 options = {}
821 options = {}
822
822
823 if b'treemanifest' in requirements:
823 if b'treemanifest' in requirements:
824 options[b'treemanifest'] = True
824 options[b'treemanifest'] = True
825
825
826 # experimental config: format.manifestcachesize
826 # experimental config: format.manifestcachesize
827 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
827 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
828 if manifestcachesize is not None:
828 if manifestcachesize is not None:
829 options[b'manifestcachesize'] = manifestcachesize
829 options[b'manifestcachesize'] = manifestcachesize
830
830
831 # In the absence of another requirement superseding a revlog-related
831 # In the absence of another requirement superseding a revlog-related
832 # requirement, we have to assume the repo is using revlog version 0.
832 # requirement, we have to assume the repo is using revlog version 0.
833 # This revlog format is super old and we don't bother trying to parse
833 # This revlog format is super old and we don't bother trying to parse
834 # opener options for it because those options wouldn't do anything
834 # opener options for it because those options wouldn't do anything
835 # meaningful on such old repos.
835 # meaningful on such old repos.
836 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
836 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
837 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
837 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
838 else: # explicitly mark repo as using revlogv0
838 else: # explicitly mark repo as using revlogv0
839 options[b'revlogv0'] = True
839 options[b'revlogv0'] = True
840
840
841 if COPIESSDC_REQUIREMENT in requirements:
841 if COPIESSDC_REQUIREMENT in requirements:
842 options[b'copies-storage'] = b'changeset-sidedata'
842 options[b'copies-storage'] = b'changeset-sidedata'
843 else:
843 else:
844 writecopiesto = ui.config(b'experimental', b'copies.write-to')
844 writecopiesto = ui.config(b'experimental', b'copies.write-to')
845 copiesextramode = (b'changeset-only', b'compatibility')
845 copiesextramode = (b'changeset-only', b'compatibility')
846 if writecopiesto in copiesextramode:
846 if writecopiesto in copiesextramode:
847 options[b'copies-storage'] = b'extra'
847 options[b'copies-storage'] = b'extra'
848
848
849 return options
849 return options
850
850
851
851
852 def resolverevlogstorevfsoptions(ui, requirements, features):
852 def resolverevlogstorevfsoptions(ui, requirements, features):
853 """Resolve opener options specific to revlogs."""
853 """Resolve opener options specific to revlogs."""
854
854
855 options = {}
855 options = {}
856 options[b'flagprocessors'] = {}
856 options[b'flagprocessors'] = {}
857
857
858 if b'revlogv1' in requirements:
858 if b'revlogv1' in requirements:
859 options[b'revlogv1'] = True
859 options[b'revlogv1'] = True
860 if REVLOGV2_REQUIREMENT in requirements:
860 if REVLOGV2_REQUIREMENT in requirements:
861 options[b'revlogv2'] = True
861 options[b'revlogv2'] = True
862
862
863 if b'generaldelta' in requirements:
863 if b'generaldelta' in requirements:
864 options[b'generaldelta'] = True
864 options[b'generaldelta'] = True
865
865
866 # experimental config: format.chunkcachesize
866 # experimental config: format.chunkcachesize
867 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
867 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
868 if chunkcachesize is not None:
868 if chunkcachesize is not None:
869 options[b'chunkcachesize'] = chunkcachesize
869 options[b'chunkcachesize'] = chunkcachesize
870
870
871 deltabothparents = ui.configbool(
871 deltabothparents = ui.configbool(
872 b'storage', b'revlog.optimize-delta-parent-choice'
872 b'storage', b'revlog.optimize-delta-parent-choice'
873 )
873 )
874 options[b'deltabothparents'] = deltabothparents
874 options[b'deltabothparents'] = deltabothparents
875
875
876 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
876 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
877 lazydeltabase = False
877 lazydeltabase = False
878 if lazydelta:
878 if lazydelta:
879 lazydeltabase = ui.configbool(
879 lazydeltabase = ui.configbool(
880 b'storage', b'revlog.reuse-external-delta-parent'
880 b'storage', b'revlog.reuse-external-delta-parent'
881 )
881 )
882 if lazydeltabase is None:
882 if lazydeltabase is None:
883 lazydeltabase = not scmutil.gddeltaconfig(ui)
883 lazydeltabase = not scmutil.gddeltaconfig(ui)
884 options[b'lazydelta'] = lazydelta
884 options[b'lazydelta'] = lazydelta
885 options[b'lazydeltabase'] = lazydeltabase
885 options[b'lazydeltabase'] = lazydeltabase
886
886
887 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
887 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
888 if 0 <= chainspan:
888 if 0 <= chainspan:
889 options[b'maxdeltachainspan'] = chainspan
889 options[b'maxdeltachainspan'] = chainspan
890
890
891 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
891 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
892 if mmapindexthreshold is not None:
892 if mmapindexthreshold is not None:
893 options[b'mmapindexthreshold'] = mmapindexthreshold
893 options[b'mmapindexthreshold'] = mmapindexthreshold
894
894
895 withsparseread = ui.configbool(b'experimental', b'sparse-read')
895 withsparseread = ui.configbool(b'experimental', b'sparse-read')
896 srdensitythres = float(
896 srdensitythres = float(
897 ui.config(b'experimental', b'sparse-read.density-threshold')
897 ui.config(b'experimental', b'sparse-read.density-threshold')
898 )
898 )
899 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
899 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
900 options[b'with-sparse-read'] = withsparseread
900 options[b'with-sparse-read'] = withsparseread
901 options[b'sparse-read-density-threshold'] = srdensitythres
901 options[b'sparse-read-density-threshold'] = srdensitythres
902 options[b'sparse-read-min-gap-size'] = srmingapsize
902 options[b'sparse-read-min-gap-size'] = srmingapsize
903
903
904 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
904 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
905 options[b'sparse-revlog'] = sparserevlog
905 options[b'sparse-revlog'] = sparserevlog
906 if sparserevlog:
906 if sparserevlog:
907 options[b'generaldelta'] = True
907 options[b'generaldelta'] = True
908
908
909 sidedata = SIDEDATA_REQUIREMENT in requirements
909 sidedata = SIDEDATA_REQUIREMENT in requirements
910 options[b'side-data'] = sidedata
910 options[b'side-data'] = sidedata
911
911
912 maxchainlen = None
912 maxchainlen = None
913 if sparserevlog:
913 if sparserevlog:
914 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
914 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
915 # experimental config: format.maxchainlen
915 # experimental config: format.maxchainlen
916 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
916 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
917 if maxchainlen is not None:
917 if maxchainlen is not None:
918 options[b'maxchainlen'] = maxchainlen
918 options[b'maxchainlen'] = maxchainlen
919
919
920 for r in requirements:
920 for r in requirements:
921 # we allow multiple compression engine requirement to co-exist because
921 # we allow multiple compression engine requirement to co-exist because
922 # strickly speaking, revlog seems to support mixed compression style.
922 # strickly speaking, revlog seems to support mixed compression style.
923 #
923 #
924 # The compression used for new entries will be "the last one"
924 # The compression used for new entries will be "the last one"
925 prefix = r.startswith
925 prefix = r.startswith
926 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
926 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
927 options[b'compengine'] = r.split(b'-', 2)[2]
927 options[b'compengine'] = r.split(b'-', 2)[2]
928
928
929 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
929 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
930 if options[b'zlib.level'] is not None:
930 if options[b'zlib.level'] is not None:
931 if not (0 <= options[b'zlib.level'] <= 9):
931 if not (0 <= options[b'zlib.level'] <= 9):
932 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
932 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
933 raise error.Abort(msg % options[b'zlib.level'])
933 raise error.Abort(msg % options[b'zlib.level'])
934 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
934 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
935 if options[b'zstd.level'] is not None:
935 if options[b'zstd.level'] is not None:
936 if not (0 <= options[b'zstd.level'] <= 22):
936 if not (0 <= options[b'zstd.level'] <= 22):
937 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
937 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
938 raise error.Abort(msg % options[b'zstd.level'])
938 raise error.Abort(msg % options[b'zstd.level'])
939
939
940 if repository.NARROW_REQUIREMENT in requirements:
940 if repository.NARROW_REQUIREMENT in requirements:
941 options[b'enableellipsis'] = True
941 options[b'enableellipsis'] = True
942
942
943 if ui.configbool(b'experimental', b'rust.index'):
943 if ui.configbool(b'experimental', b'rust.index'):
944 options[b'rust.index'] = True
944 options[b'rust.index'] = True
945 if NODEMAP_REQUIREMENT in requirements:
945 if NODEMAP_REQUIREMENT in requirements:
946 options[b'persistent-nodemap'] = True
946 options[b'persistent-nodemap'] = True
947 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
947 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
948 options[b'persistent-nodemap.mmap'] = True
948 options[b'persistent-nodemap.mmap'] = True
949 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
949 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
950 options[b'persistent-nodemap.mode'] = epnm
950 options[b'persistent-nodemap.mode'] = epnm
951 if ui.configbool(b'devel', b'persistent-nodemap'):
951 if ui.configbool(b'devel', b'persistent-nodemap'):
952 options[b'devel-force-nodemap'] = True
952 options[b'devel-force-nodemap'] = True
953
953
954 return options
954 return options
955
955
956
956
957 def makemain(**kwargs):
957 def makemain(**kwargs):
958 """Produce a type conforming to ``ilocalrepositorymain``."""
958 """Produce a type conforming to ``ilocalrepositorymain``."""
959 return localrepository
959 return localrepository
960
960
961
961
962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
963 class revlogfilestorage(object):
963 class revlogfilestorage(object):
964 """File storage when using revlogs."""
964 """File storage when using revlogs."""
965
965
966 def file(self, path):
966 def file(self, path):
967 if path[0] == b'/':
967 if path[0] == b'/':
968 path = path[1:]
968 path = path[1:]
969
969
970 return filelog.filelog(self.svfs, path)
970 return filelog.filelog(self.svfs, path)
971
971
972
972
973 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
973 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
974 class revlognarrowfilestorage(object):
974 class revlognarrowfilestorage(object):
975 """File storage when using revlogs and narrow files."""
975 """File storage when using revlogs and narrow files."""
976
976
977 def file(self, path):
977 def file(self, path):
978 if path[0] == b'/':
978 if path[0] == b'/':
979 path = path[1:]
979 path = path[1:]
980
980
981 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
981 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
982
982
983
983
984 def makefilestorage(requirements, features, **kwargs):
984 def makefilestorage(requirements, features, **kwargs):
985 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
985 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
986 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
986 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
987 features.add(repository.REPO_FEATURE_STREAM_CLONE)
987 features.add(repository.REPO_FEATURE_STREAM_CLONE)
988
988
989 if repository.NARROW_REQUIREMENT in requirements:
989 if repository.NARROW_REQUIREMENT in requirements:
990 return revlognarrowfilestorage
990 return revlognarrowfilestorage
991 else:
991 else:
992 return revlogfilestorage
992 return revlogfilestorage
993
993
994
994
995 # List of repository interfaces and factory functions for them. Each
995 # List of repository interfaces and factory functions for them. Each
996 # will be called in order during ``makelocalrepository()`` to iteratively
996 # will be called in order during ``makelocalrepository()`` to iteratively
997 # derive the final type for a local repository instance. We capture the
997 # derive the final type for a local repository instance. We capture the
998 # function as a lambda so we don't hold a reference and the module-level
998 # function as a lambda so we don't hold a reference and the module-level
999 # functions can be wrapped.
999 # functions can be wrapped.
1000 REPO_INTERFACES = [
1000 REPO_INTERFACES = [
1001 (repository.ilocalrepositorymain, lambda: makemain),
1001 (repository.ilocalrepositorymain, lambda: makemain),
1002 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1002 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1003 ]
1003 ]
1004
1004
1005
1005
1006 @interfaceutil.implementer(repository.ilocalrepositorymain)
1006 @interfaceutil.implementer(repository.ilocalrepositorymain)
1007 class localrepository(object):
1007 class localrepository(object):
1008 """Main class for representing local repositories.
1008 """Main class for representing local repositories.
1009
1009
1010 All local repositories are instances of this class.
1010 All local repositories are instances of this class.
1011
1011
1012 Constructed on its own, instances of this class are not usable as
1012 Constructed on its own, instances of this class are not usable as
1013 repository objects. To obtain a usable repository object, call
1013 repository objects. To obtain a usable repository object, call
1014 ``hg.repository()``, ``localrepo.instance()``, or
1014 ``hg.repository()``, ``localrepo.instance()``, or
1015 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1015 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1016 ``instance()`` adds support for creating new repositories.
1016 ``instance()`` adds support for creating new repositories.
1017 ``hg.repository()`` adds more extension integration, including calling
1017 ``hg.repository()`` adds more extension integration, including calling
1018 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1018 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1019 used.
1019 used.
1020 """
1020 """
1021
1021
1022 # obsolete experimental requirements:
1022 # obsolete experimental requirements:
1023 # - manifestv2: An experimental new manifest format that allowed
1023 # - manifestv2: An experimental new manifest format that allowed
1024 # for stem compression of long paths. Experiment ended up not
1024 # for stem compression of long paths. Experiment ended up not
1025 # being successful (repository sizes went up due to worse delta
1025 # being successful (repository sizes went up due to worse delta
1026 # chains), and the code was deleted in 4.6.
1026 # chains), and the code was deleted in 4.6.
1027 supportedformats = {
1027 supportedformats = {
1028 b'revlogv1',
1028 b'revlogv1',
1029 b'generaldelta',
1029 b'generaldelta',
1030 b'treemanifest',
1030 b'treemanifest',
1031 COPIESSDC_REQUIREMENT,
1031 COPIESSDC_REQUIREMENT,
1032 REVLOGV2_REQUIREMENT,
1032 REVLOGV2_REQUIREMENT,
1033 SIDEDATA_REQUIREMENT,
1033 SIDEDATA_REQUIREMENT,
1034 SPARSEREVLOG_REQUIREMENT,
1034 SPARSEREVLOG_REQUIREMENT,
1035 NODEMAP_REQUIREMENT,
1035 NODEMAP_REQUIREMENT,
1036 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1036 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1037 }
1037 }
1038 _basesupported = supportedformats | {
1038 _basesupported = supportedformats | {
1039 b'store',
1039 b'store',
1040 b'fncache',
1040 b'fncache',
1041 b'shared',
1041 b'shared',
1042 b'relshared',
1042 b'relshared',
1043 b'dotencode',
1043 b'dotencode',
1044 b'exp-sparse',
1044 b'exp-sparse',
1045 b'internal-phase',
1045 b'internal-phase',
1046 }
1046 }
1047
1047
1048 # list of prefix for file which can be written without 'wlock'
1048 # list of prefix for file which can be written without 'wlock'
1049 # Extensions should extend this list when needed
1049 # Extensions should extend this list when needed
1050 _wlockfreeprefix = {
1050 _wlockfreeprefix = {
1051 # We migh consider requiring 'wlock' for the next
1051 # We migh consider requiring 'wlock' for the next
1052 # two, but pretty much all the existing code assume
1052 # two, but pretty much all the existing code assume
1053 # wlock is not needed so we keep them excluded for
1053 # wlock is not needed so we keep them excluded for
1054 # now.
1054 # now.
1055 b'hgrc',
1055 b'hgrc',
1056 b'requires',
1056 b'requires',
1057 # XXX cache is a complicatged business someone
1057 # XXX cache is a complicatged business someone
1058 # should investigate this in depth at some point
1058 # should investigate this in depth at some point
1059 b'cache/',
1059 b'cache/',
1060 # XXX shouldn't be dirstate covered by the wlock?
1060 # XXX shouldn't be dirstate covered by the wlock?
1061 b'dirstate',
1061 b'dirstate',
1062 # XXX bisect was still a bit too messy at the time
1062 # XXX bisect was still a bit too messy at the time
1063 # this changeset was introduced. Someone should fix
1063 # this changeset was introduced. Someone should fix
1064 # the remainig bit and drop this line
1064 # the remainig bit and drop this line
1065 b'bisect.state',
1065 b'bisect.state',
1066 }
1066 }
1067
1067
1068 def __init__(
1068 def __init__(
1069 self,
1069 self,
1070 baseui,
1070 baseui,
1071 ui,
1071 ui,
1072 origroot,
1072 origroot,
1073 wdirvfs,
1073 wdirvfs,
1074 hgvfs,
1074 hgvfs,
1075 requirements,
1075 requirements,
1076 supportedrequirements,
1076 supportedrequirements,
1077 sharedpath,
1077 sharedpath,
1078 store,
1078 store,
1079 cachevfs,
1079 cachevfs,
1080 wcachevfs,
1080 wcachevfs,
1081 features,
1081 features,
1082 intents=None,
1082 intents=None,
1083 ):
1083 ):
1084 """Create a new local repository instance.
1084 """Create a new local repository instance.
1085
1085
1086 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1086 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1087 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1087 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1088 object.
1088 object.
1089
1089
1090 Arguments:
1090 Arguments:
1091
1091
1092 baseui
1092 baseui
1093 ``ui.ui`` instance that ``ui`` argument was based off of.
1093 ``ui.ui`` instance that ``ui`` argument was based off of.
1094
1094
1095 ui
1095 ui
1096 ``ui.ui`` instance for use by the repository.
1096 ``ui.ui`` instance for use by the repository.
1097
1097
1098 origroot
1098 origroot
1099 ``bytes`` path to working directory root of this repository.
1099 ``bytes`` path to working directory root of this repository.
1100
1100
1101 wdirvfs
1101 wdirvfs
1102 ``vfs.vfs`` rooted at the working directory.
1102 ``vfs.vfs`` rooted at the working directory.
1103
1103
1104 hgvfs
1104 hgvfs
1105 ``vfs.vfs`` rooted at .hg/
1105 ``vfs.vfs`` rooted at .hg/
1106
1106
1107 requirements
1107 requirements
1108 ``set`` of bytestrings representing repository opening requirements.
1108 ``set`` of bytestrings representing repository opening requirements.
1109
1109
1110 supportedrequirements
1110 supportedrequirements
1111 ``set`` of bytestrings representing repository requirements that we
1111 ``set`` of bytestrings representing repository requirements that we
1112 know how to open. May be a supetset of ``requirements``.
1112 know how to open. May be a supetset of ``requirements``.
1113
1113
1114 sharedpath
1114 sharedpath
1115 ``bytes`` Defining path to storage base directory. Points to a
1115 ``bytes`` Defining path to storage base directory. Points to a
1116 ``.hg/`` directory somewhere.
1116 ``.hg/`` directory somewhere.
1117
1117
1118 store
1118 store
1119 ``store.basicstore`` (or derived) instance providing access to
1119 ``store.basicstore`` (or derived) instance providing access to
1120 versioned storage.
1120 versioned storage.
1121
1121
1122 cachevfs
1122 cachevfs
1123 ``vfs.vfs`` used for cache files.
1123 ``vfs.vfs`` used for cache files.
1124
1124
1125 wcachevfs
1125 wcachevfs
1126 ``vfs.vfs`` used for cache files related to the working copy.
1126 ``vfs.vfs`` used for cache files related to the working copy.
1127
1127
1128 features
1128 features
1129 ``set`` of bytestrings defining features/capabilities of this
1129 ``set`` of bytestrings defining features/capabilities of this
1130 instance.
1130 instance.
1131
1131
1132 intents
1132 intents
1133 ``set`` of system strings indicating what this repo will be used
1133 ``set`` of system strings indicating what this repo will be used
1134 for.
1134 for.
1135 """
1135 """
1136 self.baseui = baseui
1136 self.baseui = baseui
1137 self.ui = ui
1137 self.ui = ui
1138 self.origroot = origroot
1138 self.origroot = origroot
1139 # vfs rooted at working directory.
1139 # vfs rooted at working directory.
1140 self.wvfs = wdirvfs
1140 self.wvfs = wdirvfs
1141 self.root = wdirvfs.base
1141 self.root = wdirvfs.base
1142 # vfs rooted at .hg/. Used to access most non-store paths.
1142 # vfs rooted at .hg/. Used to access most non-store paths.
1143 self.vfs = hgvfs
1143 self.vfs = hgvfs
1144 self.path = hgvfs.base
1144 self.path = hgvfs.base
1145 self.requirements = requirements
1145 self.requirements = requirements
1146 self.supported = supportedrequirements
1146 self.supported = supportedrequirements
1147 self.sharedpath = sharedpath
1147 self.sharedpath = sharedpath
1148 self.store = store
1148 self.store = store
1149 self.cachevfs = cachevfs
1149 self.cachevfs = cachevfs
1150 self.wcachevfs = wcachevfs
1150 self.wcachevfs = wcachevfs
1151 self.features = features
1151 self.features = features
1152
1152
1153 self.filtername = None
1153 self.filtername = None
1154
1154
1155 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1155 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1156 b'devel', b'check-locks'
1156 b'devel', b'check-locks'
1157 ):
1157 ):
1158 self.vfs.audit = self._getvfsward(self.vfs.audit)
1158 self.vfs.audit = self._getvfsward(self.vfs.audit)
1159 # A list of callback to shape the phase if no data were found.
1159 # A list of callback to shape the phase if no data were found.
1160 # Callback are in the form: func(repo, roots) --> processed root.
1160 # Callback are in the form: func(repo, roots) --> processed root.
1161 # This list it to be filled by extension during repo setup
1161 # This list it to be filled by extension during repo setup
1162 self._phasedefaults = []
1162 self._phasedefaults = []
1163
1163
1164 color.setup(self.ui)
1164 color.setup(self.ui)
1165
1165
1166 self.spath = self.store.path
1166 self.spath = self.store.path
1167 self.svfs = self.store.vfs
1167 self.svfs = self.store.vfs
1168 self.sjoin = self.store.join
1168 self.sjoin = self.store.join
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1170 b'devel', b'check-locks'
1170 b'devel', b'check-locks'
1171 ):
1171 ):
1172 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1172 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1173 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1173 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1174 else: # standard vfs
1174 else: # standard vfs
1175 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1175 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1176
1176
1177 self._dirstatevalidatewarned = False
1177 self._dirstatevalidatewarned = False
1178
1178
1179 self._branchcaches = branchmap.BranchMapCache()
1179 self._branchcaches = branchmap.BranchMapCache()
1180 self._revbranchcache = None
1180 self._revbranchcache = None
1181 self._filterpats = {}
1181 self._filterpats = {}
1182 self._datafilters = {}
1182 self._datafilters = {}
1183 self._transref = self._lockref = self._wlockref = None
1183 self._transref = self._lockref = self._wlockref = None
1184
1184
1185 # A cache for various files under .hg/ that tracks file changes,
1185 # A cache for various files under .hg/ that tracks file changes,
1186 # (used by the filecache decorator)
1186 # (used by the filecache decorator)
1187 #
1187 #
1188 # Maps a property name to its util.filecacheentry
1188 # Maps a property name to its util.filecacheentry
1189 self._filecache = {}
1189 self._filecache = {}
1190
1190
1191 # hold sets of revision to be filtered
1191 # hold sets of revision to be filtered
1192 # should be cleared when something might have changed the filter value:
1192 # should be cleared when something might have changed the filter value:
1193 # - new changesets,
1193 # - new changesets,
1194 # - phase change,
1194 # - phase change,
1195 # - new obsolescence marker,
1195 # - new obsolescence marker,
1196 # - working directory parent change,
1196 # - working directory parent change,
1197 # - bookmark changes
1197 # - bookmark changes
1198 self.filteredrevcache = {}
1198 self.filteredrevcache = {}
1199
1199
1200 # post-dirstate-status hooks
1200 # post-dirstate-status hooks
1201 self._postdsstatus = []
1201 self._postdsstatus = []
1202
1202
1203 # generic mapping between names and nodes
1203 # generic mapping between names and nodes
1204 self.names = namespaces.namespaces()
1204 self.names = namespaces.namespaces()
1205
1205
1206 # Key to signature value.
1206 # Key to signature value.
1207 self._sparsesignaturecache = {}
1207 self._sparsesignaturecache = {}
1208 # Signature to cached matcher instance.
1208 # Signature to cached matcher instance.
1209 self._sparsematchercache = {}
1209 self._sparsematchercache = {}
1210
1210
1211 self._extrafilterid = repoview.extrafilter(ui)
1211 self._extrafilterid = repoview.extrafilter(ui)
1212
1212
1213 self.filecopiesmode = None
1213 self.filecopiesmode = None
1214 if COPIESSDC_REQUIREMENT in self.requirements:
1214 if COPIESSDC_REQUIREMENT in self.requirements:
1215 self.filecopiesmode = b'changeset-sidedata'
1215 self.filecopiesmode = b'changeset-sidedata'
1216
1216
1217 def _getvfsward(self, origfunc):
1217 def _getvfsward(self, origfunc):
1218 """build a ward for self.vfs"""
1218 """build a ward for self.vfs"""
1219 rref = weakref.ref(self)
1219 rref = weakref.ref(self)
1220
1220
1221 def checkvfs(path, mode=None):
1221 def checkvfs(path, mode=None):
1222 ret = origfunc(path, mode=mode)
1222 ret = origfunc(path, mode=mode)
1223 repo = rref()
1223 repo = rref()
1224 if (
1224 if (
1225 repo is None
1225 repo is None
1226 or not util.safehasattr(repo, b'_wlockref')
1226 or not util.safehasattr(repo, b'_wlockref')
1227 or not util.safehasattr(repo, b'_lockref')
1227 or not util.safehasattr(repo, b'_lockref')
1228 ):
1228 ):
1229 return
1229 return
1230 if mode in (None, b'r', b'rb'):
1230 if mode in (None, b'r', b'rb'):
1231 return
1231 return
1232 if path.startswith(repo.path):
1232 if path.startswith(repo.path):
1233 # truncate name relative to the repository (.hg)
1233 # truncate name relative to the repository (.hg)
1234 path = path[len(repo.path) + 1 :]
1234 path = path[len(repo.path) + 1 :]
1235 if path.startswith(b'cache/'):
1235 if path.startswith(b'cache/'):
1236 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1236 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1237 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1237 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1238 # path prefixes covered by 'lock'
1238 # path prefixes covered by 'lock'
1239 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1239 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1240 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1240 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1241 if repo._currentlock(repo._lockref) is None:
1241 if repo._currentlock(repo._lockref) is None:
1242 repo.ui.develwarn(
1242 repo.ui.develwarn(
1243 b'write with no lock: "%s"' % path,
1243 b'write with no lock: "%s"' % path,
1244 stacklevel=3,
1244 stacklevel=3,
1245 config=b'check-locks',
1245 config=b'check-locks',
1246 )
1246 )
1247 elif repo._currentlock(repo._wlockref) is None:
1247 elif repo._currentlock(repo._wlockref) is None:
1248 # rest of vfs files are covered by 'wlock'
1248 # rest of vfs files are covered by 'wlock'
1249 #
1249 #
1250 # exclude special files
1250 # exclude special files
1251 for prefix in self._wlockfreeprefix:
1251 for prefix in self._wlockfreeprefix:
1252 if path.startswith(prefix):
1252 if path.startswith(prefix):
1253 return
1253 return
1254 repo.ui.develwarn(
1254 repo.ui.develwarn(
1255 b'write with no wlock: "%s"' % path,
1255 b'write with no wlock: "%s"' % path,
1256 stacklevel=3,
1256 stacklevel=3,
1257 config=b'check-locks',
1257 config=b'check-locks',
1258 )
1258 )
1259 return ret
1259 return ret
1260
1260
1261 return checkvfs
1261 return checkvfs
1262
1262
1263 def _getsvfsward(self, origfunc):
1263 def _getsvfsward(self, origfunc):
1264 """build a ward for self.svfs"""
1264 """build a ward for self.svfs"""
1265 rref = weakref.ref(self)
1265 rref = weakref.ref(self)
1266
1266
1267 def checksvfs(path, mode=None):
1267 def checksvfs(path, mode=None):
1268 ret = origfunc(path, mode=mode)
1268 ret = origfunc(path, mode=mode)
1269 repo = rref()
1269 repo = rref()
1270 if repo is None or not util.safehasattr(repo, b'_lockref'):
1270 if repo is None or not util.safehasattr(repo, b'_lockref'):
1271 return
1271 return
1272 if mode in (None, b'r', b'rb'):
1272 if mode in (None, b'r', b'rb'):
1273 return
1273 return
1274 if path.startswith(repo.sharedpath):
1274 if path.startswith(repo.sharedpath):
1275 # truncate name relative to the repository (.hg)
1275 # truncate name relative to the repository (.hg)
1276 path = path[len(repo.sharedpath) + 1 :]
1276 path = path[len(repo.sharedpath) + 1 :]
1277 if repo._currentlock(repo._lockref) is None:
1277 if repo._currentlock(repo._lockref) is None:
1278 repo.ui.develwarn(
1278 repo.ui.develwarn(
1279 b'write with no lock: "%s"' % path, stacklevel=4
1279 b'write with no lock: "%s"' % path, stacklevel=4
1280 )
1280 )
1281 return ret
1281 return ret
1282
1282
1283 return checksvfs
1283 return checksvfs
1284
1284
1285 def close(self):
1285 def close(self):
1286 self._writecaches()
1286 self._writecaches()
1287
1287
1288 def _writecaches(self):
1288 def _writecaches(self):
1289 if self._revbranchcache:
1289 if self._revbranchcache:
1290 self._revbranchcache.write()
1290 self._revbranchcache.write()
1291
1291
1292 def _restrictcapabilities(self, caps):
1292 def _restrictcapabilities(self, caps):
1293 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1293 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1294 caps = set(caps)
1294 caps = set(caps)
1295 capsblob = bundle2.encodecaps(
1295 capsblob = bundle2.encodecaps(
1296 bundle2.getrepocaps(self, role=b'client')
1296 bundle2.getrepocaps(self, role=b'client')
1297 )
1297 )
1298 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1298 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1299 return caps
1299 return caps
1300
1300
1301 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1301 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1302 # self -> auditor -> self._checknested -> self
1302 # self -> auditor -> self._checknested -> self
1303
1303
1304 @property
1304 @property
1305 def auditor(self):
1305 def auditor(self):
1306 # This is only used by context.workingctx.match in order to
1306 # This is only used by context.workingctx.match in order to
1307 # detect files in subrepos.
1307 # detect files in subrepos.
1308 return pathutil.pathauditor(self.root, callback=self._checknested)
1308 return pathutil.pathauditor(self.root, callback=self._checknested)
1309
1309
1310 @property
1310 @property
1311 def nofsauditor(self):
1311 def nofsauditor(self):
1312 # This is only used by context.basectx.match in order to detect
1312 # This is only used by context.basectx.match in order to detect
1313 # files in subrepos.
1313 # files in subrepos.
1314 return pathutil.pathauditor(
1314 return pathutil.pathauditor(
1315 self.root, callback=self._checknested, realfs=False, cached=True
1315 self.root, callback=self._checknested, realfs=False, cached=True
1316 )
1316 )
1317
1317
1318 def _checknested(self, path):
1318 def _checknested(self, path):
1319 """Determine if path is a legal nested repository."""
1319 """Determine if path is a legal nested repository."""
1320 if not path.startswith(self.root):
1320 if not path.startswith(self.root):
1321 return False
1321 return False
1322 subpath = path[len(self.root) + 1 :]
1322 subpath = path[len(self.root) + 1 :]
1323 normsubpath = util.pconvert(subpath)
1323 normsubpath = util.pconvert(subpath)
1324
1324
1325 # XXX: Checking against the current working copy is wrong in
1325 # XXX: Checking against the current working copy is wrong in
1326 # the sense that it can reject things like
1326 # the sense that it can reject things like
1327 #
1327 #
1328 # $ hg cat -r 10 sub/x.txt
1328 # $ hg cat -r 10 sub/x.txt
1329 #
1329 #
1330 # if sub/ is no longer a subrepository in the working copy
1330 # if sub/ is no longer a subrepository in the working copy
1331 # parent revision.
1331 # parent revision.
1332 #
1332 #
1333 # However, it can of course also allow things that would have
1333 # However, it can of course also allow things that would have
1334 # been rejected before, such as the above cat command if sub/
1334 # been rejected before, such as the above cat command if sub/
1335 # is a subrepository now, but was a normal directory before.
1335 # is a subrepository now, but was a normal directory before.
1336 # The old path auditor would have rejected by mistake since it
1336 # The old path auditor would have rejected by mistake since it
1337 # panics when it sees sub/.hg/.
1337 # panics when it sees sub/.hg/.
1338 #
1338 #
1339 # All in all, checking against the working copy seems sensible
1339 # All in all, checking against the working copy seems sensible
1340 # since we want to prevent access to nested repositories on
1340 # since we want to prevent access to nested repositories on
1341 # the filesystem *now*.
1341 # the filesystem *now*.
1342 ctx = self[None]
1342 ctx = self[None]
1343 parts = util.splitpath(subpath)
1343 parts = util.splitpath(subpath)
1344 while parts:
1344 while parts:
1345 prefix = b'/'.join(parts)
1345 prefix = b'/'.join(parts)
1346 if prefix in ctx.substate:
1346 if prefix in ctx.substate:
1347 if prefix == normsubpath:
1347 if prefix == normsubpath:
1348 return True
1348 return True
1349 else:
1349 else:
1350 sub = ctx.sub(prefix)
1350 sub = ctx.sub(prefix)
1351 return sub.checknested(subpath[len(prefix) + 1 :])
1351 return sub.checknested(subpath[len(prefix) + 1 :])
1352 else:
1352 else:
1353 parts.pop()
1353 parts.pop()
1354 return False
1354 return False
1355
1355
1356 def peer(self):
1356 def peer(self):
1357 return localpeer(self) # not cached to avoid reference cycle
1357 return localpeer(self) # not cached to avoid reference cycle
1358
1358
1359 def unfiltered(self):
1359 def unfiltered(self):
1360 """Return unfiltered version of the repository
1360 """Return unfiltered version of the repository
1361
1361
1362 Intended to be overwritten by filtered repo."""
1362 Intended to be overwritten by filtered repo."""
1363 return self
1363 return self
1364
1364
1365 def filtered(self, name, visibilityexceptions=None):
1365 def filtered(self, name, visibilityexceptions=None):
1366 """Return a filtered version of a repository
1366 """Return a filtered version of a repository
1367
1367
1368 The `name` parameter is the identifier of the requested view. This
1368 The `name` parameter is the identifier of the requested view. This
1369 will return a repoview object set "exactly" to the specified view.
1369 will return a repoview object set "exactly" to the specified view.
1370
1370
1371 This function does not apply recursive filtering to a repository. For
1371 This function does not apply recursive filtering to a repository. For
1372 example calling `repo.filtered("served")` will return a repoview using
1372 example calling `repo.filtered("served")` will return a repoview using
1373 the "served" view, regardless of the initial view used by `repo`.
1373 the "served" view, regardless of the initial view used by `repo`.
1374
1374
1375 In other word, there is always only one level of `repoview` "filtering".
1375 In other word, there is always only one level of `repoview` "filtering".
1376 """
1376 """
1377 if self._extrafilterid is not None and b'%' not in name:
1377 if self._extrafilterid is not None and b'%' not in name:
1378 name = name + b'%' + self._extrafilterid
1378 name = name + b'%' + self._extrafilterid
1379
1379
1380 cls = repoview.newtype(self.unfiltered().__class__)
1380 cls = repoview.newtype(self.unfiltered().__class__)
1381 return cls(self, name, visibilityexceptions)
1381 return cls(self, name, visibilityexceptions)
1382
1382
1383 @mixedrepostorecache(
1383 @mixedrepostorecache(
1384 (b'bookmarks', b'plain'),
1384 (b'bookmarks', b'plain'),
1385 (b'bookmarks.current', b'plain'),
1385 (b'bookmarks.current', b'plain'),
1386 (b'bookmarks', b''),
1386 (b'bookmarks', b''),
1387 (b'00changelog.i', b''),
1387 (b'00changelog.i', b''),
1388 )
1388 )
1389 def _bookmarks(self):
1389 def _bookmarks(self):
1390 # Since the multiple files involved in the transaction cannot be
1390 # Since the multiple files involved in the transaction cannot be
1391 # written atomically (with current repository format), there is a race
1391 # written atomically (with current repository format), there is a race
1392 # condition here.
1392 # condition here.
1393 #
1393 #
1394 # 1) changelog content A is read
1394 # 1) changelog content A is read
1395 # 2) outside transaction update changelog to content B
1395 # 2) outside transaction update changelog to content B
1396 # 3) outside transaction update bookmark file referring to content B
1396 # 3) outside transaction update bookmark file referring to content B
1397 # 4) bookmarks file content is read and filtered against changelog-A
1397 # 4) bookmarks file content is read and filtered against changelog-A
1398 #
1398 #
1399 # When this happens, bookmarks against nodes missing from A are dropped.
1399 # When this happens, bookmarks against nodes missing from A are dropped.
1400 #
1400 #
1401 # Having this happening during read is not great, but it become worse
1401 # Having this happening during read is not great, but it become worse
1402 # when this happen during write because the bookmarks to the "unknown"
1402 # when this happen during write because the bookmarks to the "unknown"
1403 # nodes will be dropped for good. However, writes happen within locks.
1403 # nodes will be dropped for good. However, writes happen within locks.
1404 # This locking makes it possible to have a race free consistent read.
1404 # This locking makes it possible to have a race free consistent read.
1405 # For this purpose data read from disc before locking are
1405 # For this purpose data read from disc before locking are
1406 # "invalidated" right after the locks are taken. This invalidations are
1406 # "invalidated" right after the locks are taken. This invalidations are
1407 # "light", the `filecache` mechanism keep the data in memory and will
1407 # "light", the `filecache` mechanism keep the data in memory and will
1408 # reuse them if the underlying files did not changed. Not parsing the
1408 # reuse them if the underlying files did not changed. Not parsing the
1409 # same data multiple times helps performances.
1409 # same data multiple times helps performances.
1410 #
1410 #
1411 # Unfortunately in the case describe above, the files tracked by the
1411 # Unfortunately in the case describe above, the files tracked by the
1412 # bookmarks file cache might not have changed, but the in-memory
1412 # bookmarks file cache might not have changed, but the in-memory
1413 # content is still "wrong" because we used an older changelog content
1413 # content is still "wrong" because we used an older changelog content
1414 # to process the on-disk data. So after locking, the changelog would be
1414 # to process the on-disk data. So after locking, the changelog would be
1415 # refreshed but `_bookmarks` would be preserved.
1415 # refreshed but `_bookmarks` would be preserved.
1416 # Adding `00changelog.i` to the list of tracked file is not
1416 # Adding `00changelog.i` to the list of tracked file is not
1417 # enough, because at the time we build the content for `_bookmarks` in
1417 # enough, because at the time we build the content for `_bookmarks` in
1418 # (4), the changelog file has already diverged from the content used
1418 # (4), the changelog file has already diverged from the content used
1419 # for loading `changelog` in (1)
1419 # for loading `changelog` in (1)
1420 #
1420 #
1421 # To prevent the issue, we force the changelog to be explicitly
1421 # To prevent the issue, we force the changelog to be explicitly
1422 # reloaded while computing `_bookmarks`. The data race can still happen
1422 # reloaded while computing `_bookmarks`. The data race can still happen
1423 # without the lock (with a narrower window), but it would no longer go
1423 # without the lock (with a narrower window), but it would no longer go
1424 # undetected during the lock time refresh.
1424 # undetected during the lock time refresh.
1425 #
1425 #
1426 # The new schedule is as follow
1426 # The new schedule is as follow
1427 #
1427 #
1428 # 1) filecache logic detect that `_bookmarks` needs to be computed
1428 # 1) filecache logic detect that `_bookmarks` needs to be computed
1429 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1429 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1430 # 3) We force `changelog` filecache to be tested
1430 # 3) We force `changelog` filecache to be tested
1431 # 4) cachestat for `changelog` are captured (for changelog)
1431 # 4) cachestat for `changelog` are captured (for changelog)
1432 # 5) `_bookmarks` is computed and cached
1432 # 5) `_bookmarks` is computed and cached
1433 #
1433 #
1434 # The step in (3) ensure we have a changelog at least as recent as the
1434 # The step in (3) ensure we have a changelog at least as recent as the
1435 # cache stat computed in (1). As a result at locking time:
1435 # cache stat computed in (1). As a result at locking time:
1436 # * if the changelog did not changed since (1) -> we can reuse the data
1436 # * if the changelog did not changed since (1) -> we can reuse the data
1437 # * otherwise -> the bookmarks get refreshed.
1437 # * otherwise -> the bookmarks get refreshed.
1438 self._refreshchangelog()
1438 self._refreshchangelog()
1439 return bookmarks.bmstore(self)
1439 return bookmarks.bmstore(self)
1440
1440
1441 def _refreshchangelog(self):
1441 def _refreshchangelog(self):
1442 """make sure the in memory changelog match the on-disk one"""
1442 """make sure the in memory changelog match the on-disk one"""
1443 if 'changelog' in vars(self) and self.currenttransaction() is None:
1443 if 'changelog' in vars(self) and self.currenttransaction() is None:
1444 del self.changelog
1444 del self.changelog
1445
1445
1446 @property
1446 @property
1447 def _activebookmark(self):
1447 def _activebookmark(self):
1448 return self._bookmarks.active
1448 return self._bookmarks.active
1449
1449
1450 # _phasesets depend on changelog. what we need is to call
1450 # _phasesets depend on changelog. what we need is to call
1451 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1451 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1452 # can't be easily expressed in filecache mechanism.
1452 # can't be easily expressed in filecache mechanism.
1453 @storecache(b'phaseroots', b'00changelog.i')
1453 @storecache(b'phaseroots', b'00changelog.i')
1454 def _phasecache(self):
1454 def _phasecache(self):
1455 return phases.phasecache(self, self._phasedefaults)
1455 return phases.phasecache(self, self._phasedefaults)
1456
1456
1457 @storecache(b'obsstore')
1457 @storecache(b'obsstore')
1458 def obsstore(self):
1458 def obsstore(self):
1459 return obsolete.makestore(self.ui, self)
1459 return obsolete.makestore(self.ui, self)
1460
1460
1461 @storecache(b'00changelog.i')
1461 @storecache(b'00changelog.i')
1462 def changelog(self):
1462 def changelog(self):
1463 # load dirstate before changelog to avoid race see issue6303
1463 # load dirstate before changelog to avoid race see issue6303
1464 self.dirstate.prefetch_parents()
1464 self.dirstate.prefetch_parents()
1465 return self.store.changelog(txnutil.mayhavepending(self.root))
1465 return self.store.changelog(txnutil.mayhavepending(self.root))
1466
1466
1467 @storecache(b'00manifest.i')
1467 @storecache(b'00manifest.i')
1468 def manifestlog(self):
1468 def manifestlog(self):
1469 return self.store.manifestlog(self, self._storenarrowmatch)
1469 return self.store.manifestlog(self, self._storenarrowmatch)
1470
1470
1471 @repofilecache(b'dirstate')
1471 @repofilecache(b'dirstate')
1472 def dirstate(self):
1472 def dirstate(self):
1473 return self._makedirstate()
1473 return self._makedirstate()
1474
1474
1475 def _makedirstate(self):
1475 def _makedirstate(self):
1476 """Extension point for wrapping the dirstate per-repo."""
1476 """Extension point for wrapping the dirstate per-repo."""
1477 sparsematchfn = lambda: sparse.matcher(self)
1477 sparsematchfn = lambda: sparse.matcher(self)
1478
1478
1479 return dirstate.dirstate(
1479 return dirstate.dirstate(
1480 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1480 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1481 )
1481 )
1482
1482
1483 def _dirstatevalidate(self, node):
1483 def _dirstatevalidate(self, node):
1484 try:
1484 try:
1485 self.changelog.rev(node)
1485 self.changelog.rev(node)
1486 return node
1486 return node
1487 except error.LookupError:
1487 except error.LookupError:
1488 if not self._dirstatevalidatewarned:
1488 if not self._dirstatevalidatewarned:
1489 self._dirstatevalidatewarned = True
1489 self._dirstatevalidatewarned = True
1490 self.ui.warn(
1490 self.ui.warn(
1491 _(b"warning: ignoring unknown working parent %s!\n")
1491 _(b"warning: ignoring unknown working parent %s!\n")
1492 % short(node)
1492 % short(node)
1493 )
1493 )
1494 return nullid
1494 return nullid
1495
1495
1496 @storecache(narrowspec.FILENAME)
1496 @storecache(narrowspec.FILENAME)
1497 def narrowpats(self):
1497 def narrowpats(self):
1498 """matcher patterns for this repository's narrowspec
1498 """matcher patterns for this repository's narrowspec
1499
1499
1500 A tuple of (includes, excludes).
1500 A tuple of (includes, excludes).
1501 """
1501 """
1502 return narrowspec.load(self)
1502 return narrowspec.load(self)
1503
1503
1504 @storecache(narrowspec.FILENAME)
1504 @storecache(narrowspec.FILENAME)
1505 def _storenarrowmatch(self):
1505 def _storenarrowmatch(self):
1506 if repository.NARROW_REQUIREMENT not in self.requirements:
1506 if repository.NARROW_REQUIREMENT not in self.requirements:
1507 return matchmod.always()
1507 return matchmod.always()
1508 include, exclude = self.narrowpats
1508 include, exclude = self.narrowpats
1509 return narrowspec.match(self.root, include=include, exclude=exclude)
1509 return narrowspec.match(self.root, include=include, exclude=exclude)
1510
1510
1511 @storecache(narrowspec.FILENAME)
1511 @storecache(narrowspec.FILENAME)
1512 def _narrowmatch(self):
1512 def _narrowmatch(self):
1513 if repository.NARROW_REQUIREMENT not in self.requirements:
1513 if repository.NARROW_REQUIREMENT not in self.requirements:
1514 return matchmod.always()
1514 return matchmod.always()
1515 narrowspec.checkworkingcopynarrowspec(self)
1515 narrowspec.checkworkingcopynarrowspec(self)
1516 include, exclude = self.narrowpats
1516 include, exclude = self.narrowpats
1517 return narrowspec.match(self.root, include=include, exclude=exclude)
1517 return narrowspec.match(self.root, include=include, exclude=exclude)
1518
1518
1519 def narrowmatch(self, match=None, includeexact=False):
1519 def narrowmatch(self, match=None, includeexact=False):
1520 """matcher corresponding the the repo's narrowspec
1520 """matcher corresponding the the repo's narrowspec
1521
1521
1522 If `match` is given, then that will be intersected with the narrow
1522 If `match` is given, then that will be intersected with the narrow
1523 matcher.
1523 matcher.
1524
1524
1525 If `includeexact` is True, then any exact matches from `match` will
1525 If `includeexact` is True, then any exact matches from `match` will
1526 be included even if they're outside the narrowspec.
1526 be included even if they're outside the narrowspec.
1527 """
1527 """
1528 if match:
1528 if match:
1529 if includeexact and not self._narrowmatch.always():
1529 if includeexact and not self._narrowmatch.always():
1530 # do not exclude explicitly-specified paths so that they can
1530 # do not exclude explicitly-specified paths so that they can
1531 # be warned later on
1531 # be warned later on
1532 em = matchmod.exact(match.files())
1532 em = matchmod.exact(match.files())
1533 nm = matchmod.unionmatcher([self._narrowmatch, em])
1533 nm = matchmod.unionmatcher([self._narrowmatch, em])
1534 return matchmod.intersectmatchers(match, nm)
1534 return matchmod.intersectmatchers(match, nm)
1535 return matchmod.intersectmatchers(match, self._narrowmatch)
1535 return matchmod.intersectmatchers(match, self._narrowmatch)
1536 return self._narrowmatch
1536 return self._narrowmatch
1537
1537
1538 def setnarrowpats(self, newincludes, newexcludes):
1538 def setnarrowpats(self, newincludes, newexcludes):
1539 narrowspec.save(self, newincludes, newexcludes)
1539 narrowspec.save(self, newincludes, newexcludes)
1540 self.invalidate(clearfilecache=True)
1540 self.invalidate(clearfilecache=True)
1541
1541
1542 @unfilteredpropertycache
1542 @unfilteredpropertycache
1543 def _quick_access_changeid_null(self):
1543 def _quick_access_changeid_null(self):
1544 return {
1544 return {
1545 b'null': (nullrev, nullid),
1545 b'null': (nullrev, nullid),
1546 nullrev: (nullrev, nullid),
1546 nullrev: (nullrev, nullid),
1547 nullid: (nullrev, nullid),
1547 nullid: (nullrev, nullid),
1548 }
1548 }
1549
1549
1550 @unfilteredpropertycache
1550 @unfilteredpropertycache
1551 def _quick_access_changeid_wc(self):
1551 def _quick_access_changeid_wc(self):
1552 # also fast path access to the working copy parents
1552 # also fast path access to the working copy parents
1553 # however, only do it for filter that ensure wc is visible.
1553 # however, only do it for filter that ensure wc is visible.
1554 quick = {}
1554 quick = {}
1555 cl = self.unfiltered().changelog
1555 cl = self.unfiltered().changelog
1556 for node in self.dirstate.parents():
1556 for node in self.dirstate.parents():
1557 if node == nullid:
1557 if node == nullid:
1558 continue
1558 continue
1559 rev = cl.index.get_rev(node)
1559 rev = cl.index.get_rev(node)
1560 if rev is None:
1560 if rev is None:
1561 # unknown working copy parent case:
1561 # unknown working copy parent case:
1562 #
1562 #
1563 # skip the fast path and let higher code deal with it
1563 # skip the fast path and let higher code deal with it
1564 continue
1564 continue
1565 pair = (rev, node)
1565 pair = (rev, node)
1566 quick[rev] = pair
1566 quick[rev] = pair
1567 quick[node] = pair
1567 quick[node] = pair
1568 # also add the parents of the parents
1568 # also add the parents of the parents
1569 for r in cl.parentrevs(rev):
1569 for r in cl.parentrevs(rev):
1570 if r == nullrev:
1570 if r == nullrev:
1571 continue
1571 continue
1572 n = cl.node(r)
1572 n = cl.node(r)
1573 pair = (r, n)
1573 pair = (r, n)
1574 quick[r] = pair
1574 quick[r] = pair
1575 quick[n] = pair
1575 quick[n] = pair
1576 p1node = self.dirstate.p1()
1576 p1node = self.dirstate.p1()
1577 if p1node != nullid:
1577 if p1node != nullid:
1578 quick[b'.'] = quick[p1node]
1578 quick[b'.'] = quick[p1node]
1579 return quick
1579 return quick
1580
1580
1581 @unfilteredmethod
1581 @unfilteredmethod
1582 def _quick_access_changeid_invalidate(self):
1582 def _quick_access_changeid_invalidate(self):
1583 if '_quick_access_changeid_wc' in vars(self):
1583 if '_quick_access_changeid_wc' in vars(self):
1584 del self.__dict__['_quick_access_changeid_wc']
1584 del self.__dict__['_quick_access_changeid_wc']
1585
1585
1586 @property
1586 @property
1587 def _quick_access_changeid(self):
1587 def _quick_access_changeid(self):
1588 """an helper dictionnary for __getitem__ calls
1588 """an helper dictionnary for __getitem__ calls
1589
1589
1590 This contains a list of symbol we can recognise right away without
1590 This contains a list of symbol we can recognise right away without
1591 further processing.
1591 further processing.
1592 """
1592 """
1593 mapping = self._quick_access_changeid_null
1593 mapping = self._quick_access_changeid_null
1594 if self.filtername in repoview.filter_has_wc:
1594 if self.filtername in repoview.filter_has_wc:
1595 mapping = mapping.copy()
1595 mapping = mapping.copy()
1596 mapping.update(self._quick_access_changeid_wc)
1596 mapping.update(self._quick_access_changeid_wc)
1597 return mapping
1597 return mapping
1598
1598
1599 def __getitem__(self, changeid):
1599 def __getitem__(self, changeid):
1600 # dealing with special cases
1600 # dealing with special cases
1601 if changeid is None:
1601 if changeid is None:
1602 return context.workingctx(self)
1602 return context.workingctx(self)
1603 if isinstance(changeid, context.basectx):
1603 if isinstance(changeid, context.basectx):
1604 return changeid
1604 return changeid
1605
1605
1606 # dealing with multiple revisions
1606 # dealing with multiple revisions
1607 if isinstance(changeid, slice):
1607 if isinstance(changeid, slice):
1608 # wdirrev isn't contiguous so the slice shouldn't include it
1608 # wdirrev isn't contiguous so the slice shouldn't include it
1609 return [
1609 return [
1610 self[i]
1610 self[i]
1611 for i in pycompat.xrange(*changeid.indices(len(self)))
1611 for i in pycompat.xrange(*changeid.indices(len(self)))
1612 if i not in self.changelog.filteredrevs
1612 if i not in self.changelog.filteredrevs
1613 ]
1613 ]
1614
1614
1615 # dealing with some special values
1615 # dealing with some special values
1616 quick_access = self._quick_access_changeid.get(changeid)
1616 quick_access = self._quick_access_changeid.get(changeid)
1617 if quick_access is not None:
1617 if quick_access is not None:
1618 rev, node = quick_access
1618 rev, node = quick_access
1619 return context.changectx(self, rev, node, maybe_filtered=False)
1619 return context.changectx(self, rev, node, maybe_filtered=False)
1620 if changeid == b'tip':
1620 if changeid == b'tip':
1621 node = self.changelog.tip()
1621 node = self.changelog.tip()
1622 rev = self.changelog.rev(node)
1622 rev = self.changelog.rev(node)
1623 return context.changectx(self, rev, node)
1623 return context.changectx(self, rev, node)
1624
1624
1625 # dealing with arbitrary values
1625 # dealing with arbitrary values
1626 try:
1626 try:
1627 if isinstance(changeid, int):
1627 if isinstance(changeid, int):
1628 node = self.changelog.node(changeid)
1628 node = self.changelog.node(changeid)
1629 rev = changeid
1629 rev = changeid
1630 elif changeid == b'.':
1630 elif changeid == b'.':
1631 # this is a hack to delay/avoid loading obsmarkers
1631 # this is a hack to delay/avoid loading obsmarkers
1632 # when we know that '.' won't be hidden
1632 # when we know that '.' won't be hidden
1633 node = self.dirstate.p1()
1633 node = self.dirstate.p1()
1634 rev = self.unfiltered().changelog.rev(node)
1634 rev = self.unfiltered().changelog.rev(node)
1635 elif len(changeid) == 20:
1635 elif len(changeid) == 20:
1636 try:
1636 try:
1637 node = changeid
1637 node = changeid
1638 rev = self.changelog.rev(changeid)
1638 rev = self.changelog.rev(changeid)
1639 except error.FilteredLookupError:
1639 except error.FilteredLookupError:
1640 changeid = hex(changeid) # for the error message
1640 changeid = hex(changeid) # for the error message
1641 raise
1641 raise
1642 except LookupError:
1642 except LookupError:
1643 # check if it might have come from damaged dirstate
1643 # check if it might have come from damaged dirstate
1644 #
1644 #
1645 # XXX we could avoid the unfiltered if we had a recognizable
1645 # XXX we could avoid the unfiltered if we had a recognizable
1646 # exception for filtered changeset access
1646 # exception for filtered changeset access
1647 if (
1647 if (
1648 self.local()
1648 self.local()
1649 and changeid in self.unfiltered().dirstate.parents()
1649 and changeid in self.unfiltered().dirstate.parents()
1650 ):
1650 ):
1651 msg = _(b"working directory has unknown parent '%s'!")
1651 msg = _(b"working directory has unknown parent '%s'!")
1652 raise error.Abort(msg % short(changeid))
1652 raise error.Abort(msg % short(changeid))
1653 changeid = hex(changeid) # for the error message
1653 changeid = hex(changeid) # for the error message
1654 raise
1654 raise
1655
1655
1656 elif len(changeid) == 40:
1656 elif len(changeid) == 40:
1657 node = bin(changeid)
1657 node = bin(changeid)
1658 rev = self.changelog.rev(node)
1658 rev = self.changelog.rev(node)
1659 else:
1659 else:
1660 raise error.ProgrammingError(
1660 raise error.ProgrammingError(
1661 b"unsupported changeid '%s' of type %s"
1661 b"unsupported changeid '%s' of type %s"
1662 % (changeid, pycompat.bytestr(type(changeid)))
1662 % (changeid, pycompat.bytestr(type(changeid)))
1663 )
1663 )
1664
1664
1665 return context.changectx(self, rev, node)
1665 return context.changectx(self, rev, node)
1666
1666
1667 except (error.FilteredIndexError, error.FilteredLookupError):
1667 except (error.FilteredIndexError, error.FilteredLookupError):
1668 raise error.FilteredRepoLookupError(
1668 raise error.FilteredRepoLookupError(
1669 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1669 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1670 )
1670 )
1671 except (IndexError, LookupError):
1671 except (IndexError, LookupError):
1672 raise error.RepoLookupError(
1672 raise error.RepoLookupError(
1673 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1673 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1674 )
1674 )
1675 except error.WdirUnsupported:
1675 except error.WdirUnsupported:
1676 return context.workingctx(self)
1676 return context.workingctx(self)
1677
1677
1678 def __contains__(self, changeid):
1678 def __contains__(self, changeid):
1679 """True if the given changeid exists
1679 """True if the given changeid exists
1680
1680
1681 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1681 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1682 specified.
1682 specified.
1683 """
1683 """
1684 try:
1684 try:
1685 self[changeid]
1685 self[changeid]
1686 return True
1686 return True
1687 except error.RepoLookupError:
1687 except error.RepoLookupError:
1688 return False
1688 return False
1689
1689
1690 def __nonzero__(self):
1690 def __nonzero__(self):
1691 return True
1691 return True
1692
1692
1693 __bool__ = __nonzero__
1693 __bool__ = __nonzero__
1694
1694
1695 def __len__(self):
1695 def __len__(self):
1696 # no need to pay the cost of repoview.changelog
1696 # no need to pay the cost of repoview.changelog
1697 unfi = self.unfiltered()
1697 unfi = self.unfiltered()
1698 return len(unfi.changelog)
1698 return len(unfi.changelog)
1699
1699
1700 def __iter__(self):
1700 def __iter__(self):
1701 return iter(self.changelog)
1701 return iter(self.changelog)
1702
1702
1703 def revs(self, expr, *args):
1703 def revs(self, expr, *args):
1704 '''Find revisions matching a revset.
1704 '''Find revisions matching a revset.
1705
1705
1706 The revset is specified as a string ``expr`` that may contain
1706 The revset is specified as a string ``expr`` that may contain
1707 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1707 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1708
1708
1709 Revset aliases from the configuration are not expanded. To expand
1709 Revset aliases from the configuration are not expanded. To expand
1710 user aliases, consider calling ``scmutil.revrange()`` or
1710 user aliases, consider calling ``scmutil.revrange()`` or
1711 ``repo.anyrevs([expr], user=True)``.
1711 ``repo.anyrevs([expr], user=True)``.
1712
1712
1713 Returns a smartset.abstractsmartset, which is a list-like interface
1713 Returns a smartset.abstractsmartset, which is a list-like interface
1714 that contains integer revisions.
1714 that contains integer revisions.
1715 '''
1715 '''
1716 tree = revsetlang.spectree(expr, *args)
1716 tree = revsetlang.spectree(expr, *args)
1717 return revset.makematcher(tree)(self)
1717 return revset.makematcher(tree)(self)
1718
1718
1719 def set(self, expr, *args):
1719 def set(self, expr, *args):
1720 '''Find revisions matching a revset and emit changectx instances.
1720 '''Find revisions matching a revset and emit changectx instances.
1721
1721
1722 This is a convenience wrapper around ``revs()`` that iterates the
1722 This is a convenience wrapper around ``revs()`` that iterates the
1723 result and is a generator of changectx instances.
1723 result and is a generator of changectx instances.
1724
1724
1725 Revset aliases from the configuration are not expanded. To expand
1725 Revset aliases from the configuration are not expanded. To expand
1726 user aliases, consider calling ``scmutil.revrange()``.
1726 user aliases, consider calling ``scmutil.revrange()``.
1727 '''
1727 '''
1728 for r in self.revs(expr, *args):
1728 for r in self.revs(expr, *args):
1729 yield self[r]
1729 yield self[r]
1730
1730
1731 def anyrevs(self, specs, user=False, localalias=None):
1731 def anyrevs(self, specs, user=False, localalias=None):
1732 '''Find revisions matching one of the given revsets.
1732 '''Find revisions matching one of the given revsets.
1733
1733
1734 Revset aliases from the configuration are not expanded by default. To
1734 Revset aliases from the configuration are not expanded by default. To
1735 expand user aliases, specify ``user=True``. To provide some local
1735 expand user aliases, specify ``user=True``. To provide some local
1736 definitions overriding user aliases, set ``localalias`` to
1736 definitions overriding user aliases, set ``localalias`` to
1737 ``{name: definitionstring}``.
1737 ``{name: definitionstring}``.
1738 '''
1738 '''
1739 if specs == [b'null']:
1739 if specs == [b'null']:
1740 return revset.baseset([nullrev])
1740 return revset.baseset([nullrev])
1741 if specs == [b'.']:
1741 if specs == [b'.']:
1742 quick_data = self._quick_access_changeid.get(b'.')
1742 quick_data = self._quick_access_changeid.get(b'.')
1743 if quick_data is not None:
1743 if quick_data is not None:
1744 return revset.baseset([quick_data[0]])
1744 return revset.baseset([quick_data[0]])
1745 if user:
1745 if user:
1746 m = revset.matchany(
1746 m = revset.matchany(
1747 self.ui,
1747 self.ui,
1748 specs,
1748 specs,
1749 lookup=revset.lookupfn(self),
1749 lookup=revset.lookupfn(self),
1750 localalias=localalias,
1750 localalias=localalias,
1751 )
1751 )
1752 else:
1752 else:
1753 m = revset.matchany(None, specs, localalias=localalias)
1753 m = revset.matchany(None, specs, localalias=localalias)
1754 return m(self)
1754 return m(self)
1755
1755
1756 def url(self):
1756 def url(self):
1757 return b'file:' + self.root
1757 return b'file:' + self.root
1758
1758
1759 def hook(self, name, throw=False, **args):
1759 def hook(self, name, throw=False, **args):
1760 """Call a hook, passing this repo instance.
1760 """Call a hook, passing this repo instance.
1761
1761
1762 This a convenience method to aid invoking hooks. Extensions likely
1762 This a convenience method to aid invoking hooks. Extensions likely
1763 won't call this unless they have registered a custom hook or are
1763 won't call this unless they have registered a custom hook or are
1764 replacing code that is expected to call a hook.
1764 replacing code that is expected to call a hook.
1765 """
1765 """
1766 return hook.hook(self.ui, self, name, throw, **args)
1766 return hook.hook(self.ui, self, name, throw, **args)
1767
1767
1768 @filteredpropertycache
1768 @filteredpropertycache
1769 def _tagscache(self):
1769 def _tagscache(self):
1770 '''Returns a tagscache object that contains various tags related
1770 '''Returns a tagscache object that contains various tags related
1771 caches.'''
1771 caches.'''
1772
1772
1773 # This simplifies its cache management by having one decorated
1773 # This simplifies its cache management by having one decorated
1774 # function (this one) and the rest simply fetch things from it.
1774 # function (this one) and the rest simply fetch things from it.
1775 class tagscache(object):
1775 class tagscache(object):
1776 def __init__(self):
1776 def __init__(self):
1777 # These two define the set of tags for this repository. tags
1777 # These two define the set of tags for this repository. tags
1778 # maps tag name to node; tagtypes maps tag name to 'global' or
1778 # maps tag name to node; tagtypes maps tag name to 'global' or
1779 # 'local'. (Global tags are defined by .hgtags across all
1779 # 'local'. (Global tags are defined by .hgtags across all
1780 # heads, and local tags are defined in .hg/localtags.)
1780 # heads, and local tags are defined in .hg/localtags.)
1781 # They constitute the in-memory cache of tags.
1781 # They constitute the in-memory cache of tags.
1782 self.tags = self.tagtypes = None
1782 self.tags = self.tagtypes = None
1783
1783
1784 self.nodetagscache = self.tagslist = None
1784 self.nodetagscache = self.tagslist = None
1785
1785
1786 cache = tagscache()
1786 cache = tagscache()
1787 cache.tags, cache.tagtypes = self._findtags()
1787 cache.tags, cache.tagtypes = self._findtags()
1788
1788
1789 return cache
1789 return cache
1790
1790
1791 def tags(self):
1791 def tags(self):
1792 '''return a mapping of tag to node'''
1792 '''return a mapping of tag to node'''
1793 t = {}
1793 t = {}
1794 if self.changelog.filteredrevs:
1794 if self.changelog.filteredrevs:
1795 tags, tt = self._findtags()
1795 tags, tt = self._findtags()
1796 else:
1796 else:
1797 tags = self._tagscache.tags
1797 tags = self._tagscache.tags
1798 rev = self.changelog.rev
1798 rev = self.changelog.rev
1799 for k, v in pycompat.iteritems(tags):
1799 for k, v in pycompat.iteritems(tags):
1800 try:
1800 try:
1801 # ignore tags to unknown nodes
1801 # ignore tags to unknown nodes
1802 rev(v)
1802 rev(v)
1803 t[k] = v
1803 t[k] = v
1804 except (error.LookupError, ValueError):
1804 except (error.LookupError, ValueError):
1805 pass
1805 pass
1806 return t
1806 return t
1807
1807
1808 def _findtags(self):
1808 def _findtags(self):
1809 '''Do the hard work of finding tags. Return a pair of dicts
1809 '''Do the hard work of finding tags. Return a pair of dicts
1810 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1810 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1811 maps tag name to a string like \'global\' or \'local\'.
1811 maps tag name to a string like \'global\' or \'local\'.
1812 Subclasses or extensions are free to add their own tags, but
1812 Subclasses or extensions are free to add their own tags, but
1813 should be aware that the returned dicts will be retained for the
1813 should be aware that the returned dicts will be retained for the
1814 duration of the localrepo object.'''
1814 duration of the localrepo object.'''
1815
1815
1816 # XXX what tagtype should subclasses/extensions use? Currently
1816 # XXX what tagtype should subclasses/extensions use? Currently
1817 # mq and bookmarks add tags, but do not set the tagtype at all.
1817 # mq and bookmarks add tags, but do not set the tagtype at all.
1818 # Should each extension invent its own tag type? Should there
1818 # Should each extension invent its own tag type? Should there
1819 # be one tagtype for all such "virtual" tags? Or is the status
1819 # be one tagtype for all such "virtual" tags? Or is the status
1820 # quo fine?
1820 # quo fine?
1821
1821
1822 # map tag name to (node, hist)
1822 # map tag name to (node, hist)
1823 alltags = tagsmod.findglobaltags(self.ui, self)
1823 alltags = tagsmod.findglobaltags(self.ui, self)
1824 # map tag name to tag type
1824 # map tag name to tag type
1825 tagtypes = {tag: b'global' for tag in alltags}
1825 tagtypes = {tag: b'global' for tag in alltags}
1826
1826
1827 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1827 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1828
1828
1829 # Build the return dicts. Have to re-encode tag names because
1829 # Build the return dicts. Have to re-encode tag names because
1830 # the tags module always uses UTF-8 (in order not to lose info
1830 # the tags module always uses UTF-8 (in order not to lose info
1831 # writing to the cache), but the rest of Mercurial wants them in
1831 # writing to the cache), but the rest of Mercurial wants them in
1832 # local encoding.
1832 # local encoding.
1833 tags = {}
1833 tags = {}
1834 for (name, (node, hist)) in pycompat.iteritems(alltags):
1834 for (name, (node, hist)) in pycompat.iteritems(alltags):
1835 if node != nullid:
1835 if node != nullid:
1836 tags[encoding.tolocal(name)] = node
1836 tags[encoding.tolocal(name)] = node
1837 tags[b'tip'] = self.changelog.tip()
1837 tags[b'tip'] = self.changelog.tip()
1838 tagtypes = {
1838 tagtypes = {
1839 encoding.tolocal(name): value
1839 encoding.tolocal(name): value
1840 for (name, value) in pycompat.iteritems(tagtypes)
1840 for (name, value) in pycompat.iteritems(tagtypes)
1841 }
1841 }
1842 return (tags, tagtypes)
1842 return (tags, tagtypes)
1843
1843
1844 def tagtype(self, tagname):
1844 def tagtype(self, tagname):
1845 '''
1845 '''
1846 return the type of the given tag. result can be:
1846 return the type of the given tag. result can be:
1847
1847
1848 'local' : a local tag
1848 'local' : a local tag
1849 'global' : a global tag
1849 'global' : a global tag
1850 None : tag does not exist
1850 None : tag does not exist
1851 '''
1851 '''
1852
1852
1853 return self._tagscache.tagtypes.get(tagname)
1853 return self._tagscache.tagtypes.get(tagname)
1854
1854
1855 def tagslist(self):
1855 def tagslist(self):
1856 '''return a list of tags ordered by revision'''
1856 '''return a list of tags ordered by revision'''
1857 if not self._tagscache.tagslist:
1857 if not self._tagscache.tagslist:
1858 l = []
1858 l = []
1859 for t, n in pycompat.iteritems(self.tags()):
1859 for t, n in pycompat.iteritems(self.tags()):
1860 l.append((self.changelog.rev(n), t, n))
1860 l.append((self.changelog.rev(n), t, n))
1861 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1861 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1862
1862
1863 return self._tagscache.tagslist
1863 return self._tagscache.tagslist
1864
1864
1865 def nodetags(self, node):
1865 def nodetags(self, node):
1866 '''return the tags associated with a node'''
1866 '''return the tags associated with a node'''
1867 if not self._tagscache.nodetagscache:
1867 if not self._tagscache.nodetagscache:
1868 nodetagscache = {}
1868 nodetagscache = {}
1869 for t, n in pycompat.iteritems(self._tagscache.tags):
1869 for t, n in pycompat.iteritems(self._tagscache.tags):
1870 nodetagscache.setdefault(n, []).append(t)
1870 nodetagscache.setdefault(n, []).append(t)
1871 for tags in pycompat.itervalues(nodetagscache):
1871 for tags in pycompat.itervalues(nodetagscache):
1872 tags.sort()
1872 tags.sort()
1873 self._tagscache.nodetagscache = nodetagscache
1873 self._tagscache.nodetagscache = nodetagscache
1874 return self._tagscache.nodetagscache.get(node, [])
1874 return self._tagscache.nodetagscache.get(node, [])
1875
1875
1876 def nodebookmarks(self, node):
1876 def nodebookmarks(self, node):
1877 """return the list of bookmarks pointing to the specified node"""
1877 """return the list of bookmarks pointing to the specified node"""
1878 return self._bookmarks.names(node)
1878 return self._bookmarks.names(node)
1879
1879
1880 def branchmap(self):
1880 def branchmap(self):
1881 '''returns a dictionary {branch: [branchheads]} with branchheads
1881 '''returns a dictionary {branch: [branchheads]} with branchheads
1882 ordered by increasing revision number'''
1882 ordered by increasing revision number'''
1883 return self._branchcaches[self]
1883 return self._branchcaches[self]
1884
1884
1885 @unfilteredmethod
1885 @unfilteredmethod
1886 def revbranchcache(self):
1886 def revbranchcache(self):
1887 if not self._revbranchcache:
1887 if not self._revbranchcache:
1888 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1888 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1889 return self._revbranchcache
1889 return self._revbranchcache
1890
1890
1891 def branchtip(self, branch, ignoremissing=False):
1891 def branchtip(self, branch, ignoremissing=False):
1892 '''return the tip node for a given branch
1892 '''return the tip node for a given branch
1893
1893
1894 If ignoremissing is True, then this method will not raise an error.
1894 If ignoremissing is True, then this method will not raise an error.
1895 This is helpful for callers that only expect None for a missing branch
1895 This is helpful for callers that only expect None for a missing branch
1896 (e.g. namespace).
1896 (e.g. namespace).
1897
1897
1898 '''
1898 '''
1899 try:
1899 try:
1900 return self.branchmap().branchtip(branch)
1900 return self.branchmap().branchtip(branch)
1901 except KeyError:
1901 except KeyError:
1902 if not ignoremissing:
1902 if not ignoremissing:
1903 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1903 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1904 else:
1904 else:
1905 pass
1905 pass
1906
1906
1907 def lookup(self, key):
1907 def lookup(self, key):
1908 node = scmutil.revsymbol(self, key).node()
1908 node = scmutil.revsymbol(self, key).node()
1909 if node is None:
1909 if node is None:
1910 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1910 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1911 return node
1911 return node
1912
1912
1913 def lookupbranch(self, key):
1913 def lookupbranch(self, key):
1914 if self.branchmap().hasbranch(key):
1914 if self.branchmap().hasbranch(key):
1915 return key
1915 return key
1916
1916
1917 return scmutil.revsymbol(self, key).branch()
1917 return scmutil.revsymbol(self, key).branch()
1918
1918
1919 def known(self, nodes):
1919 def known(self, nodes):
1920 cl = self.changelog
1920 cl = self.changelog
1921 get_rev = cl.index.get_rev
1921 get_rev = cl.index.get_rev
1922 filtered = cl.filteredrevs
1922 filtered = cl.filteredrevs
1923 result = []
1923 result = []
1924 for n in nodes:
1924 for n in nodes:
1925 r = get_rev(n)
1925 r = get_rev(n)
1926 resp = not (r is None or r in filtered)
1926 resp = not (r is None or r in filtered)
1927 result.append(resp)
1927 result.append(resp)
1928 return result
1928 return result
1929
1929
1930 def local(self):
1930 def local(self):
1931 return self
1931 return self
1932
1932
1933 def publishing(self):
1933 def publishing(self):
1934 # it's safe (and desirable) to trust the publish flag unconditionally
1934 # it's safe (and desirable) to trust the publish flag unconditionally
1935 # so that we don't finalize changes shared between users via ssh or nfs
1935 # so that we don't finalize changes shared between users via ssh or nfs
1936 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1936 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1937
1937
1938 def cancopy(self):
1938 def cancopy(self):
1939 # so statichttprepo's override of local() works
1939 # so statichttprepo's override of local() works
1940 if not self.local():
1940 if not self.local():
1941 return False
1941 return False
1942 if not self.publishing():
1942 if not self.publishing():
1943 return True
1943 return True
1944 # if publishing we can't copy if there is filtered content
1944 # if publishing we can't copy if there is filtered content
1945 return not self.filtered(b'visible').changelog.filteredrevs
1945 return not self.filtered(b'visible').changelog.filteredrevs
1946
1946
1947 def shared(self):
1947 def shared(self):
1948 '''the type of shared repository (None if not shared)'''
1948 '''the type of shared repository (None if not shared)'''
1949 if self.sharedpath != self.path:
1949 if self.sharedpath != self.path:
1950 return b'store'
1950 return b'store'
1951 return None
1951 return None
1952
1952
1953 def wjoin(self, f, *insidef):
1953 def wjoin(self, f, *insidef):
1954 return self.vfs.reljoin(self.root, f, *insidef)
1954 return self.vfs.reljoin(self.root, f, *insidef)
1955
1955
1956 def setparents(self, p1, p2=nullid):
1956 def setparents(self, p1, p2=nullid):
1957 self[None].setparents(p1, p2)
1957 self[None].setparents(p1, p2)
1958 self._quick_access_changeid_invalidate()
1958 self._quick_access_changeid_invalidate()
1959
1959
1960 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1960 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1961 """changeid must be a changeset revision, if specified.
1961 """changeid must be a changeset revision, if specified.
1962 fileid can be a file revision or node."""
1962 fileid can be a file revision or node."""
1963 return context.filectx(
1963 return context.filectx(
1964 self, path, changeid, fileid, changectx=changectx
1964 self, path, changeid, fileid, changectx=changectx
1965 )
1965 )
1966
1966
1967 def getcwd(self):
1967 def getcwd(self):
1968 return self.dirstate.getcwd()
1968 return self.dirstate.getcwd()
1969
1969
1970 def pathto(self, f, cwd=None):
1970 def pathto(self, f, cwd=None):
1971 return self.dirstate.pathto(f, cwd)
1971 return self.dirstate.pathto(f, cwd)
1972
1972
1973 def _loadfilter(self, filter):
1973 def _loadfilter(self, filter):
1974 if filter not in self._filterpats:
1974 if filter not in self._filterpats:
1975 l = []
1975 l = []
1976 for pat, cmd in self.ui.configitems(filter):
1976 for pat, cmd in self.ui.configitems(filter):
1977 if cmd == b'!':
1977 if cmd == b'!':
1978 continue
1978 continue
1979 mf = matchmod.match(self.root, b'', [pat])
1979 mf = matchmod.match(self.root, b'', [pat])
1980 fn = None
1980 fn = None
1981 params = cmd
1981 params = cmd
1982 for name, filterfn in pycompat.iteritems(self._datafilters):
1982 for name, filterfn in pycompat.iteritems(self._datafilters):
1983 if cmd.startswith(name):
1983 if cmd.startswith(name):
1984 fn = filterfn
1984 fn = filterfn
1985 params = cmd[len(name) :].lstrip()
1985 params = cmd[len(name) :].lstrip()
1986 break
1986 break
1987 if not fn:
1987 if not fn:
1988 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1988 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1989 fn.__name__ = 'commandfilter'
1989 fn.__name__ = 'commandfilter'
1990 # Wrap old filters not supporting keyword arguments
1990 # Wrap old filters not supporting keyword arguments
1991 if not pycompat.getargspec(fn)[2]:
1991 if not pycompat.getargspec(fn)[2]:
1992 oldfn = fn
1992 oldfn = fn
1993 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1993 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1994 fn.__name__ = 'compat-' + oldfn.__name__
1994 fn.__name__ = 'compat-' + oldfn.__name__
1995 l.append((mf, fn, params))
1995 l.append((mf, fn, params))
1996 self._filterpats[filter] = l
1996 self._filterpats[filter] = l
1997 return self._filterpats[filter]
1997 return self._filterpats[filter]
1998
1998
1999 def _filter(self, filterpats, filename, data):
1999 def _filter(self, filterpats, filename, data):
2000 for mf, fn, cmd in filterpats:
2000 for mf, fn, cmd in filterpats:
2001 if mf(filename):
2001 if mf(filename):
2002 self.ui.debug(
2002 self.ui.debug(
2003 b"filtering %s through %s\n"
2003 b"filtering %s through %s\n"
2004 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2004 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2005 )
2005 )
2006 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2006 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2007 break
2007 break
2008
2008
2009 return data
2009 return data
2010
2010
2011 @unfilteredpropertycache
2011 @unfilteredpropertycache
2012 def _encodefilterpats(self):
2012 def _encodefilterpats(self):
2013 return self._loadfilter(b'encode')
2013 return self._loadfilter(b'encode')
2014
2014
2015 @unfilteredpropertycache
2015 @unfilteredpropertycache
2016 def _decodefilterpats(self):
2016 def _decodefilterpats(self):
2017 return self._loadfilter(b'decode')
2017 return self._loadfilter(b'decode')
2018
2018
2019 def adddatafilter(self, name, filter):
2019 def adddatafilter(self, name, filter):
2020 self._datafilters[name] = filter
2020 self._datafilters[name] = filter
2021
2021
2022 def wread(self, filename):
2022 def wread(self, filename):
2023 if self.wvfs.islink(filename):
2023 if self.wvfs.islink(filename):
2024 data = self.wvfs.readlink(filename)
2024 data = self.wvfs.readlink(filename)
2025 else:
2025 else:
2026 data = self.wvfs.read(filename)
2026 data = self.wvfs.read(filename)
2027 return self._filter(self._encodefilterpats, filename, data)
2027 return self._filter(self._encodefilterpats, filename, data)
2028
2028
2029 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2029 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2030 """write ``data`` into ``filename`` in the working directory
2030 """write ``data`` into ``filename`` in the working directory
2031
2031
2032 This returns length of written (maybe decoded) data.
2032 This returns length of written (maybe decoded) data.
2033 """
2033 """
2034 data = self._filter(self._decodefilterpats, filename, data)
2034 data = self._filter(self._decodefilterpats, filename, data)
2035 if b'l' in flags:
2035 if b'l' in flags:
2036 self.wvfs.symlink(data, filename)
2036 self.wvfs.symlink(data, filename)
2037 else:
2037 else:
2038 self.wvfs.write(
2038 self.wvfs.write(
2039 filename, data, backgroundclose=backgroundclose, **kwargs
2039 filename, data, backgroundclose=backgroundclose, **kwargs
2040 )
2040 )
2041 if b'x' in flags:
2041 if b'x' in flags:
2042 self.wvfs.setflags(filename, False, True)
2042 self.wvfs.setflags(filename, False, True)
2043 else:
2043 else:
2044 self.wvfs.setflags(filename, False, False)
2044 self.wvfs.setflags(filename, False, False)
2045 return len(data)
2045 return len(data)
2046
2046
2047 def wwritedata(self, filename, data):
2047 def wwritedata(self, filename, data):
2048 return self._filter(self._decodefilterpats, filename, data)
2048 return self._filter(self._decodefilterpats, filename, data)
2049
2049
2050 def currenttransaction(self):
2050 def currenttransaction(self):
2051 """return the current transaction or None if non exists"""
2051 """return the current transaction or None if non exists"""
2052 if self._transref:
2052 if self._transref:
2053 tr = self._transref()
2053 tr = self._transref()
2054 else:
2054 else:
2055 tr = None
2055 tr = None
2056
2056
2057 if tr and tr.running():
2057 if tr and tr.running():
2058 return tr
2058 return tr
2059 return None
2059 return None
2060
2060
2061 def transaction(self, desc, report=None):
2061 def transaction(self, desc, report=None):
2062 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2062 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2063 b'devel', b'check-locks'
2063 b'devel', b'check-locks'
2064 ):
2064 ):
2065 if self._currentlock(self._lockref) is None:
2065 if self._currentlock(self._lockref) is None:
2066 raise error.ProgrammingError(b'transaction requires locking')
2066 raise error.ProgrammingError(b'transaction requires locking')
2067 tr = self.currenttransaction()
2067 tr = self.currenttransaction()
2068 if tr is not None:
2068 if tr is not None:
2069 return tr.nest(name=desc)
2069 return tr.nest(name=desc)
2070
2070
2071 # abort here if the journal already exists
2071 # abort here if the journal already exists
2072 if self.svfs.exists(b"journal"):
2072 if self.svfs.exists(b"journal"):
2073 raise error.RepoError(
2073 raise error.RepoError(
2074 _(b"abandoned transaction found"),
2074 _(b"abandoned transaction found"),
2075 hint=_(b"run 'hg recover' to clean up transaction"),
2075 hint=_(b"run 'hg recover' to clean up transaction"),
2076 )
2076 )
2077
2077
2078 idbase = b"%.40f#%f" % (random.random(), time.time())
2078 idbase = b"%.40f#%f" % (random.random(), time.time())
2079 ha = hex(hashutil.sha1(idbase).digest())
2079 ha = hex(hashutil.sha1(idbase).digest())
2080 txnid = b'TXN:' + ha
2080 txnid = b'TXN:' + ha
2081 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2081 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2082
2082
2083 self._writejournal(desc)
2083 self._writejournal(desc)
2084 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2084 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2085 if report:
2085 if report:
2086 rp = report
2086 rp = report
2087 else:
2087 else:
2088 rp = self.ui.warn
2088 rp = self.ui.warn
2089 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2089 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2090 # we must avoid cyclic reference between repo and transaction.
2090 # we must avoid cyclic reference between repo and transaction.
2091 reporef = weakref.ref(self)
2091 reporef = weakref.ref(self)
2092 # Code to track tag movement
2092 # Code to track tag movement
2093 #
2093 #
2094 # Since tags are all handled as file content, it is actually quite hard
2094 # Since tags are all handled as file content, it is actually quite hard
2095 # to track these movement from a code perspective. So we fallback to a
2095 # to track these movement from a code perspective. So we fallback to a
2096 # tracking at the repository level. One could envision to track changes
2096 # tracking at the repository level. One could envision to track changes
2097 # to the '.hgtags' file through changegroup apply but that fails to
2097 # to the '.hgtags' file through changegroup apply but that fails to
2098 # cope with case where transaction expose new heads without changegroup
2098 # cope with case where transaction expose new heads without changegroup
2099 # being involved (eg: phase movement).
2099 # being involved (eg: phase movement).
2100 #
2100 #
2101 # For now, We gate the feature behind a flag since this likely comes
2101 # For now, We gate the feature behind a flag since this likely comes
2102 # with performance impacts. The current code run more often than needed
2102 # with performance impacts. The current code run more often than needed
2103 # and do not use caches as much as it could. The current focus is on
2103 # and do not use caches as much as it could. The current focus is on
2104 # the behavior of the feature so we disable it by default. The flag
2104 # the behavior of the feature so we disable it by default. The flag
2105 # will be removed when we are happy with the performance impact.
2105 # will be removed when we are happy with the performance impact.
2106 #
2106 #
2107 # Once this feature is no longer experimental move the following
2107 # Once this feature is no longer experimental move the following
2108 # documentation to the appropriate help section:
2108 # documentation to the appropriate help section:
2109 #
2109 #
2110 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2110 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2111 # tags (new or changed or deleted tags). In addition the details of
2111 # tags (new or changed or deleted tags). In addition the details of
2112 # these changes are made available in a file at:
2112 # these changes are made available in a file at:
2113 # ``REPOROOT/.hg/changes/tags.changes``.
2113 # ``REPOROOT/.hg/changes/tags.changes``.
2114 # Make sure you check for HG_TAG_MOVED before reading that file as it
2114 # Make sure you check for HG_TAG_MOVED before reading that file as it
2115 # might exist from a previous transaction even if no tag were touched
2115 # might exist from a previous transaction even if no tag were touched
2116 # in this one. Changes are recorded in a line base format::
2116 # in this one. Changes are recorded in a line base format::
2117 #
2117 #
2118 # <action> <hex-node> <tag-name>\n
2118 # <action> <hex-node> <tag-name>\n
2119 #
2119 #
2120 # Actions are defined as follow:
2120 # Actions are defined as follow:
2121 # "-R": tag is removed,
2121 # "-R": tag is removed,
2122 # "+A": tag is added,
2122 # "+A": tag is added,
2123 # "-M": tag is moved (old value),
2123 # "-M": tag is moved (old value),
2124 # "+M": tag is moved (new value),
2124 # "+M": tag is moved (new value),
2125 tracktags = lambda x: None
2125 tracktags = lambda x: None
2126 # experimental config: experimental.hook-track-tags
2126 # experimental config: experimental.hook-track-tags
2127 shouldtracktags = self.ui.configbool(
2127 shouldtracktags = self.ui.configbool(
2128 b'experimental', b'hook-track-tags'
2128 b'experimental', b'hook-track-tags'
2129 )
2129 )
2130 if desc != b'strip' and shouldtracktags:
2130 if desc != b'strip' and shouldtracktags:
2131 oldheads = self.changelog.headrevs()
2131 oldheads = self.changelog.headrevs()
2132
2132
2133 def tracktags(tr2):
2133 def tracktags(tr2):
2134 repo = reporef()
2134 repo = reporef()
2135 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2135 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2136 newheads = repo.changelog.headrevs()
2136 newheads = repo.changelog.headrevs()
2137 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2137 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2138 # notes: we compare lists here.
2138 # notes: we compare lists here.
2139 # As we do it only once buiding set would not be cheaper
2139 # As we do it only once buiding set would not be cheaper
2140 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2140 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2141 if changes:
2141 if changes:
2142 tr2.hookargs[b'tag_moved'] = b'1'
2142 tr2.hookargs[b'tag_moved'] = b'1'
2143 with repo.vfs(
2143 with repo.vfs(
2144 b'changes/tags.changes', b'w', atomictemp=True
2144 b'changes/tags.changes', b'w', atomictemp=True
2145 ) as changesfile:
2145 ) as changesfile:
2146 # note: we do not register the file to the transaction
2146 # note: we do not register the file to the transaction
2147 # because we needs it to still exist on the transaction
2147 # because we needs it to still exist on the transaction
2148 # is close (for txnclose hooks)
2148 # is close (for txnclose hooks)
2149 tagsmod.writediff(changesfile, changes)
2149 tagsmod.writediff(changesfile, changes)
2150
2150
2151 def validate(tr2):
2151 def validate(tr2):
2152 """will run pre-closing hooks"""
2152 """will run pre-closing hooks"""
2153 # XXX the transaction API is a bit lacking here so we take a hacky
2153 # XXX the transaction API is a bit lacking here so we take a hacky
2154 # path for now
2154 # path for now
2155 #
2155 #
2156 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2156 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2157 # dict is copied before these run. In addition we needs the data
2157 # dict is copied before these run. In addition we needs the data
2158 # available to in memory hooks too.
2158 # available to in memory hooks too.
2159 #
2159 #
2160 # Moreover, we also need to make sure this runs before txnclose
2160 # Moreover, we also need to make sure this runs before txnclose
2161 # hooks and there is no "pending" mechanism that would execute
2161 # hooks and there is no "pending" mechanism that would execute
2162 # logic only if hooks are about to run.
2162 # logic only if hooks are about to run.
2163 #
2163 #
2164 # Fixing this limitation of the transaction is also needed to track
2164 # Fixing this limitation of the transaction is also needed to track
2165 # other families of changes (bookmarks, phases, obsolescence).
2165 # other families of changes (bookmarks, phases, obsolescence).
2166 #
2166 #
2167 # This will have to be fixed before we remove the experimental
2167 # This will have to be fixed before we remove the experimental
2168 # gating.
2168 # gating.
2169 tracktags(tr2)
2169 tracktags(tr2)
2170 repo = reporef()
2170 repo = reporef()
2171
2171
2172 singleheadopt = (b'experimental', b'single-head-per-branch')
2172 singleheadopt = (b'experimental', b'single-head-per-branch')
2173 singlehead = repo.ui.configbool(*singleheadopt)
2173 singlehead = repo.ui.configbool(*singleheadopt)
2174 if singlehead:
2174 if singlehead:
2175 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2175 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2176 accountclosed = singleheadsub.get(
2176 accountclosed = singleheadsub.get(
2177 b"account-closed-heads", False
2177 b"account-closed-heads", False
2178 )
2178 )
2179 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2179 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2180 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2180 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2181 for name, (old, new) in sorted(
2181 for name, (old, new) in sorted(
2182 tr.changes[b'bookmarks'].items()
2182 tr.changes[b'bookmarks'].items()
2183 ):
2183 ):
2184 args = tr.hookargs.copy()
2184 args = tr.hookargs.copy()
2185 args.update(bookmarks.preparehookargs(name, old, new))
2185 args.update(bookmarks.preparehookargs(name, old, new))
2186 repo.hook(
2186 repo.hook(
2187 b'pretxnclose-bookmark',
2187 b'pretxnclose-bookmark',
2188 throw=True,
2188 throw=True,
2189 **pycompat.strkwargs(args)
2189 **pycompat.strkwargs(args)
2190 )
2190 )
2191 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2191 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2192 cl = repo.unfiltered().changelog
2192 cl = repo.unfiltered().changelog
2193 for revs, (old, new) in tr.changes[b'phases']:
2193 for revs, (old, new) in tr.changes[b'phases']:
2194 for rev in revs:
2194 for rev in revs:
2195 args = tr.hookargs.copy()
2195 args = tr.hookargs.copy()
2196 node = hex(cl.node(rev))
2196 node = hex(cl.node(rev))
2197 args.update(phases.preparehookargs(node, old, new))
2197 args.update(phases.preparehookargs(node, old, new))
2198 repo.hook(
2198 repo.hook(
2199 b'pretxnclose-phase',
2199 b'pretxnclose-phase',
2200 throw=True,
2200 throw=True,
2201 **pycompat.strkwargs(args)
2201 **pycompat.strkwargs(args)
2202 )
2202 )
2203
2203
2204 repo.hook(
2204 repo.hook(
2205 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2205 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2206 )
2206 )
2207
2207
2208 def releasefn(tr, success):
2208 def releasefn(tr, success):
2209 repo = reporef()
2209 repo = reporef()
2210 if repo is None:
2210 if repo is None:
2211 # If the repo has been GC'd (and this release function is being
2211 # If the repo has been GC'd (and this release function is being
2212 # called from transaction.__del__), there's not much we can do,
2212 # called from transaction.__del__), there's not much we can do,
2213 # so just leave the unfinished transaction there and let the
2213 # so just leave the unfinished transaction there and let the
2214 # user run `hg recover`.
2214 # user run `hg recover`.
2215 return
2215 return
2216 if success:
2216 if success:
2217 # this should be explicitly invoked here, because
2217 # this should be explicitly invoked here, because
2218 # in-memory changes aren't written out at closing
2218 # in-memory changes aren't written out at closing
2219 # transaction, if tr.addfilegenerator (via
2219 # transaction, if tr.addfilegenerator (via
2220 # dirstate.write or so) isn't invoked while
2220 # dirstate.write or so) isn't invoked while
2221 # transaction running
2221 # transaction running
2222 repo.dirstate.write(None)
2222 repo.dirstate.write(None)
2223 else:
2223 else:
2224 # discard all changes (including ones already written
2224 # discard all changes (including ones already written
2225 # out) in this transaction
2225 # out) in this transaction
2226 narrowspec.restorebackup(self, b'journal.narrowspec')
2226 narrowspec.restorebackup(self, b'journal.narrowspec')
2227 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2227 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2228 repo.dirstate.restorebackup(None, b'journal.dirstate')
2228 repo.dirstate.restorebackup(None, b'journal.dirstate')
2229
2229
2230 repo.invalidate(clearfilecache=True)
2230 repo.invalidate(clearfilecache=True)
2231
2231
2232 tr = transaction.transaction(
2232 tr = transaction.transaction(
2233 rp,
2233 rp,
2234 self.svfs,
2234 self.svfs,
2235 vfsmap,
2235 vfsmap,
2236 b"journal",
2236 b"journal",
2237 b"undo",
2237 b"undo",
2238 aftertrans(renames),
2238 aftertrans(renames),
2239 self.store.createmode,
2239 self.store.createmode,
2240 validator=validate,
2240 validator=validate,
2241 releasefn=releasefn,
2241 releasefn=releasefn,
2242 checkambigfiles=_cachedfiles,
2242 checkambigfiles=_cachedfiles,
2243 name=desc,
2243 name=desc,
2244 )
2244 )
2245 tr.changes[b'origrepolen'] = len(self)
2245 tr.changes[b'origrepolen'] = len(self)
2246 tr.changes[b'obsmarkers'] = set()
2246 tr.changes[b'obsmarkers'] = set()
2247 tr.changes[b'phases'] = []
2247 tr.changes[b'phases'] = []
2248 tr.changes[b'bookmarks'] = {}
2248 tr.changes[b'bookmarks'] = {}
2249
2249
2250 tr.hookargs[b'txnid'] = txnid
2250 tr.hookargs[b'txnid'] = txnid
2251 tr.hookargs[b'txnname'] = desc
2251 tr.hookargs[b'txnname'] = desc
2252 tr.hookargs[b'changes'] = tr.changes
2252 tr.hookargs[b'changes'] = tr.changes
2253 # note: writing the fncache only during finalize mean that the file is
2253 # note: writing the fncache only during finalize mean that the file is
2254 # outdated when running hooks. As fncache is used for streaming clone,
2254 # outdated when running hooks. As fncache is used for streaming clone,
2255 # this is not expected to break anything that happen during the hooks.
2255 # this is not expected to break anything that happen during the hooks.
2256 tr.addfinalize(b'flush-fncache', self.store.write)
2256 tr.addfinalize(b'flush-fncache', self.store.write)
2257
2257
2258 def txnclosehook(tr2):
2258 def txnclosehook(tr2):
2259 """To be run if transaction is successful, will schedule a hook run
2259 """To be run if transaction is successful, will schedule a hook run
2260 """
2260 """
2261 # Don't reference tr2 in hook() so we don't hold a reference.
2261 # Don't reference tr2 in hook() so we don't hold a reference.
2262 # This reduces memory consumption when there are multiple
2262 # This reduces memory consumption when there are multiple
2263 # transactions per lock. This can likely go away if issue5045
2263 # transactions per lock. This can likely go away if issue5045
2264 # fixes the function accumulation.
2264 # fixes the function accumulation.
2265 hookargs = tr2.hookargs
2265 hookargs = tr2.hookargs
2266
2266
2267 def hookfunc(unused_success):
2267 def hookfunc(unused_success):
2268 repo = reporef()
2268 repo = reporef()
2269 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2269 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2270 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2270 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2271 for name, (old, new) in bmchanges:
2271 for name, (old, new) in bmchanges:
2272 args = tr.hookargs.copy()
2272 args = tr.hookargs.copy()
2273 args.update(bookmarks.preparehookargs(name, old, new))
2273 args.update(bookmarks.preparehookargs(name, old, new))
2274 repo.hook(
2274 repo.hook(
2275 b'txnclose-bookmark',
2275 b'txnclose-bookmark',
2276 throw=False,
2276 throw=False,
2277 **pycompat.strkwargs(args)
2277 **pycompat.strkwargs(args)
2278 )
2278 )
2279
2279
2280 if hook.hashook(repo.ui, b'txnclose-phase'):
2280 if hook.hashook(repo.ui, b'txnclose-phase'):
2281 cl = repo.unfiltered().changelog
2281 cl = repo.unfiltered().changelog
2282 phasemv = sorted(
2282 phasemv = sorted(
2283 tr.changes[b'phases'], key=lambda r: r[0][0]
2283 tr.changes[b'phases'], key=lambda r: r[0][0]
2284 )
2284 )
2285 for revs, (old, new) in phasemv:
2285 for revs, (old, new) in phasemv:
2286 for rev in revs:
2286 for rev in revs:
2287 args = tr.hookargs.copy()
2287 args = tr.hookargs.copy()
2288 node = hex(cl.node(rev))
2288 node = hex(cl.node(rev))
2289 args.update(phases.preparehookargs(node, old, new))
2289 args.update(phases.preparehookargs(node, old, new))
2290 repo.hook(
2290 repo.hook(
2291 b'txnclose-phase',
2291 b'txnclose-phase',
2292 throw=False,
2292 throw=False,
2293 **pycompat.strkwargs(args)
2293 **pycompat.strkwargs(args)
2294 )
2294 )
2295
2295
2296 repo.hook(
2296 repo.hook(
2297 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2297 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2298 )
2298 )
2299
2299
2300 reporef()._afterlock(hookfunc)
2300 reporef()._afterlock(hookfunc)
2301
2301
2302 tr.addfinalize(b'txnclose-hook', txnclosehook)
2302 tr.addfinalize(b'txnclose-hook', txnclosehook)
2303 # Include a leading "-" to make it happen before the transaction summary
2303 # Include a leading "-" to make it happen before the transaction summary
2304 # reports registered via scmutil.registersummarycallback() whose names
2304 # reports registered via scmutil.registersummarycallback() whose names
2305 # are 00-txnreport etc. That way, the caches will be warm when the
2305 # are 00-txnreport etc. That way, the caches will be warm when the
2306 # callbacks run.
2306 # callbacks run.
2307 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2307 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2308
2308
2309 def txnaborthook(tr2):
2309 def txnaborthook(tr2):
2310 """To be run if transaction is aborted
2310 """To be run if transaction is aborted
2311 """
2311 """
2312 reporef().hook(
2312 reporef().hook(
2313 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2313 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2314 )
2314 )
2315
2315
2316 tr.addabort(b'txnabort-hook', txnaborthook)
2316 tr.addabort(b'txnabort-hook', txnaborthook)
2317 # avoid eager cache invalidation. in-memory data should be identical
2317 # avoid eager cache invalidation. in-memory data should be identical
2318 # to stored data if transaction has no error.
2318 # to stored data if transaction has no error.
2319 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2319 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2320 self._transref = weakref.ref(tr)
2320 self._transref = weakref.ref(tr)
2321 scmutil.registersummarycallback(self, tr, desc)
2321 scmutil.registersummarycallback(self, tr, desc)
2322 return tr
2322 return tr
2323
2323
2324 def _journalfiles(self):
2324 def _journalfiles(self):
2325 return (
2325 return (
2326 (self.svfs, b'journal'),
2326 (self.svfs, b'journal'),
2327 (self.svfs, b'journal.narrowspec'),
2327 (self.svfs, b'journal.narrowspec'),
2328 (self.vfs, b'journal.narrowspec.dirstate'),
2328 (self.vfs, b'journal.narrowspec.dirstate'),
2329 (self.vfs, b'journal.dirstate'),
2329 (self.vfs, b'journal.dirstate'),
2330 (self.vfs, b'journal.branch'),
2330 (self.vfs, b'journal.branch'),
2331 (self.vfs, b'journal.desc'),
2331 (self.vfs, b'journal.desc'),
2332 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2332 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2333 (self.svfs, b'journal.phaseroots'),
2333 (self.svfs, b'journal.phaseroots'),
2334 )
2334 )
2335
2335
2336 def undofiles(self):
2336 def undofiles(self):
2337 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2337 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2338
2338
2339 @unfilteredmethod
2339 @unfilteredmethod
2340 def _writejournal(self, desc):
2340 def _writejournal(self, desc):
2341 self.dirstate.savebackup(None, b'journal.dirstate')
2341 self.dirstate.savebackup(None, b'journal.dirstate')
2342 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2342 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2343 narrowspec.savebackup(self, b'journal.narrowspec')
2343 narrowspec.savebackup(self, b'journal.narrowspec')
2344 self.vfs.write(
2344 self.vfs.write(
2345 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2345 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2346 )
2346 )
2347 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2347 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2348 bookmarksvfs = bookmarks.bookmarksvfs(self)
2348 bookmarksvfs = bookmarks.bookmarksvfs(self)
2349 bookmarksvfs.write(
2349 bookmarksvfs.write(
2350 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2350 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2351 )
2351 )
2352 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2352 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2353
2353
2354 def recover(self):
2354 def recover(self):
2355 with self.lock():
2355 with self.lock():
2356 if self.svfs.exists(b"journal"):
2356 if self.svfs.exists(b"journal"):
2357 self.ui.status(_(b"rolling back interrupted transaction\n"))
2357 self.ui.status(_(b"rolling back interrupted transaction\n"))
2358 vfsmap = {
2358 vfsmap = {
2359 b'': self.svfs,
2359 b'': self.svfs,
2360 b'plain': self.vfs,
2360 b'plain': self.vfs,
2361 }
2361 }
2362 transaction.rollback(
2362 transaction.rollback(
2363 self.svfs,
2363 self.svfs,
2364 vfsmap,
2364 vfsmap,
2365 b"journal",
2365 b"journal",
2366 self.ui.warn,
2366 self.ui.warn,
2367 checkambigfiles=_cachedfiles,
2367 checkambigfiles=_cachedfiles,
2368 )
2368 )
2369 self.invalidate()
2369 self.invalidate()
2370 return True
2370 return True
2371 else:
2371 else:
2372 self.ui.warn(_(b"no interrupted transaction available\n"))
2372 self.ui.warn(_(b"no interrupted transaction available\n"))
2373 return False
2373 return False
2374
2374
2375 def rollback(self, dryrun=False, force=False):
2375 def rollback(self, dryrun=False, force=False):
2376 wlock = lock = dsguard = None
2376 wlock = lock = dsguard = None
2377 try:
2377 try:
2378 wlock = self.wlock()
2378 wlock = self.wlock()
2379 lock = self.lock()
2379 lock = self.lock()
2380 if self.svfs.exists(b"undo"):
2380 if self.svfs.exists(b"undo"):
2381 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2381 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2382
2382
2383 return self._rollback(dryrun, force, dsguard)
2383 return self._rollback(dryrun, force, dsguard)
2384 else:
2384 else:
2385 self.ui.warn(_(b"no rollback information available\n"))
2385 self.ui.warn(_(b"no rollback information available\n"))
2386 return 1
2386 return 1
2387 finally:
2387 finally:
2388 release(dsguard, lock, wlock)
2388 release(dsguard, lock, wlock)
2389
2389
2390 @unfilteredmethod # Until we get smarter cache management
2390 @unfilteredmethod # Until we get smarter cache management
2391 def _rollback(self, dryrun, force, dsguard):
2391 def _rollback(self, dryrun, force, dsguard):
2392 ui = self.ui
2392 ui = self.ui
2393 try:
2393 try:
2394 args = self.vfs.read(b'undo.desc').splitlines()
2394 args = self.vfs.read(b'undo.desc').splitlines()
2395 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2395 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2396 if len(args) >= 3:
2396 if len(args) >= 3:
2397 detail = args[2]
2397 detail = args[2]
2398 oldtip = oldlen - 1
2398 oldtip = oldlen - 1
2399
2399
2400 if detail and ui.verbose:
2400 if detail and ui.verbose:
2401 msg = _(
2401 msg = _(
2402 b'repository tip rolled back to revision %d'
2402 b'repository tip rolled back to revision %d'
2403 b' (undo %s: %s)\n'
2403 b' (undo %s: %s)\n'
2404 ) % (oldtip, desc, detail)
2404 ) % (oldtip, desc, detail)
2405 else:
2405 else:
2406 msg = _(
2406 msg = _(
2407 b'repository tip rolled back to revision %d (undo %s)\n'
2407 b'repository tip rolled back to revision %d (undo %s)\n'
2408 ) % (oldtip, desc)
2408 ) % (oldtip, desc)
2409 except IOError:
2409 except IOError:
2410 msg = _(b'rolling back unknown transaction\n')
2410 msg = _(b'rolling back unknown transaction\n')
2411 desc = None
2411 desc = None
2412
2412
2413 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2413 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2414 raise error.Abort(
2414 raise error.Abort(
2415 _(
2415 _(
2416 b'rollback of last commit while not checked out '
2416 b'rollback of last commit while not checked out '
2417 b'may lose data'
2417 b'may lose data'
2418 ),
2418 ),
2419 hint=_(b'use -f to force'),
2419 hint=_(b'use -f to force'),
2420 )
2420 )
2421
2421
2422 ui.status(msg)
2422 ui.status(msg)
2423 if dryrun:
2423 if dryrun:
2424 return 0
2424 return 0
2425
2425
2426 parents = self.dirstate.parents()
2426 parents = self.dirstate.parents()
2427 self.destroying()
2427 self.destroying()
2428 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2428 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2429 transaction.rollback(
2429 transaction.rollback(
2430 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2430 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2431 )
2431 )
2432 bookmarksvfs = bookmarks.bookmarksvfs(self)
2432 bookmarksvfs = bookmarks.bookmarksvfs(self)
2433 if bookmarksvfs.exists(b'undo.bookmarks'):
2433 if bookmarksvfs.exists(b'undo.bookmarks'):
2434 bookmarksvfs.rename(
2434 bookmarksvfs.rename(
2435 b'undo.bookmarks', b'bookmarks', checkambig=True
2435 b'undo.bookmarks', b'bookmarks', checkambig=True
2436 )
2436 )
2437 if self.svfs.exists(b'undo.phaseroots'):
2437 if self.svfs.exists(b'undo.phaseroots'):
2438 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2438 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2439 self.invalidate()
2439 self.invalidate()
2440
2440
2441 has_node = self.changelog.index.has_node
2441 has_node = self.changelog.index.has_node
2442 parentgone = any(not has_node(p) for p in parents)
2442 parentgone = any(not has_node(p) for p in parents)
2443 if parentgone:
2443 if parentgone:
2444 # prevent dirstateguard from overwriting already restored one
2444 # prevent dirstateguard from overwriting already restored one
2445 dsguard.close()
2445 dsguard.close()
2446
2446
2447 narrowspec.restorebackup(self, b'undo.narrowspec')
2447 narrowspec.restorebackup(self, b'undo.narrowspec')
2448 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2448 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2449 self.dirstate.restorebackup(None, b'undo.dirstate')
2449 self.dirstate.restorebackup(None, b'undo.dirstate')
2450 try:
2450 try:
2451 branch = self.vfs.read(b'undo.branch')
2451 branch = self.vfs.read(b'undo.branch')
2452 self.dirstate.setbranch(encoding.tolocal(branch))
2452 self.dirstate.setbranch(encoding.tolocal(branch))
2453 except IOError:
2453 except IOError:
2454 ui.warn(
2454 ui.warn(
2455 _(
2455 _(
2456 b'named branch could not be reset: '
2456 b'named branch could not be reset: '
2457 b'current branch is still \'%s\'\n'
2457 b'current branch is still \'%s\'\n'
2458 )
2458 )
2459 % self.dirstate.branch()
2459 % self.dirstate.branch()
2460 )
2460 )
2461
2461
2462 parents = tuple([p.rev() for p in self[None].parents()])
2462 parents = tuple([p.rev() for p in self[None].parents()])
2463 if len(parents) > 1:
2463 if len(parents) > 1:
2464 ui.status(
2464 ui.status(
2465 _(
2465 _(
2466 b'working directory now based on '
2466 b'working directory now based on '
2467 b'revisions %d and %d\n'
2467 b'revisions %d and %d\n'
2468 )
2468 )
2469 % parents
2469 % parents
2470 )
2470 )
2471 else:
2471 else:
2472 ui.status(
2472 ui.status(
2473 _(b'working directory now based on revision %d\n') % parents
2473 _(b'working directory now based on revision %d\n') % parents
2474 )
2474 )
2475 mergestatemod.mergestate.clean(self, self[b'.'].node())
2475 mergestatemod.mergestate.clean(self, self[b'.'].node())
2476
2476
2477 # TODO: if we know which new heads may result from this rollback, pass
2477 # TODO: if we know which new heads may result from this rollback, pass
2478 # them to destroy(), which will prevent the branchhead cache from being
2478 # them to destroy(), which will prevent the branchhead cache from being
2479 # invalidated.
2479 # invalidated.
2480 self.destroyed()
2480 self.destroyed()
2481 return 0
2481 return 0
2482
2482
2483 def _buildcacheupdater(self, newtransaction):
2483 def _buildcacheupdater(self, newtransaction):
2484 """called during transaction to build the callback updating cache
2484 """called during transaction to build the callback updating cache
2485
2485
2486 Lives on the repository to help extension who might want to augment
2486 Lives on the repository to help extension who might want to augment
2487 this logic. For this purpose, the created transaction is passed to the
2487 this logic. For this purpose, the created transaction is passed to the
2488 method.
2488 method.
2489 """
2489 """
2490 # we must avoid cyclic reference between repo and transaction.
2490 # we must avoid cyclic reference between repo and transaction.
2491 reporef = weakref.ref(self)
2491 reporef = weakref.ref(self)
2492
2492
2493 def updater(tr):
2493 def updater(tr):
2494 repo = reporef()
2494 repo = reporef()
2495 repo.updatecaches(tr)
2495 repo.updatecaches(tr)
2496
2496
2497 return updater
2497 return updater
2498
2498
2499 @unfilteredmethod
2499 @unfilteredmethod
2500 def updatecaches(self, tr=None, full=False):
2500 def updatecaches(self, tr=None, full=False):
2501 """warm appropriate caches
2501 """warm appropriate caches
2502
2502
2503 If this function is called after a transaction closed. The transaction
2503 If this function is called after a transaction closed. The transaction
2504 will be available in the 'tr' argument. This can be used to selectively
2504 will be available in the 'tr' argument. This can be used to selectively
2505 update caches relevant to the changes in that transaction.
2505 update caches relevant to the changes in that transaction.
2506
2506
2507 If 'full' is set, make sure all caches the function knows about have
2507 If 'full' is set, make sure all caches the function knows about have
2508 up-to-date data. Even the ones usually loaded more lazily.
2508 up-to-date data. Even the ones usually loaded more lazily.
2509 """
2509 """
2510 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2510 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2511 # During strip, many caches are invalid but
2511 # During strip, many caches are invalid but
2512 # later call to `destroyed` will refresh them.
2512 # later call to `destroyed` will refresh them.
2513 return
2513 return
2514
2514
2515 if tr is None or tr.changes[b'origrepolen'] < len(self):
2515 if tr is None or tr.changes[b'origrepolen'] < len(self):
2516 # accessing the 'ser ved' branchmap should refresh all the others,
2516 # accessing the 'ser ved' branchmap should refresh all the others,
2517 self.ui.debug(b'updating the branch cache\n')
2517 self.ui.debug(b'updating the branch cache\n')
2518 self.filtered(b'served').branchmap()
2518 self.filtered(b'served').branchmap()
2519 self.filtered(b'served.hidden').branchmap()
2519 self.filtered(b'served.hidden').branchmap()
2520
2520
2521 if full:
2521 if full:
2522 unfi = self.unfiltered()
2522 unfi = self.unfiltered()
2523
2523
2524 self.changelog.update_caches(transaction=tr)
2524 self.changelog.update_caches(transaction=tr)
2525 self.manifestlog.update_caches(transaction=tr)
2525 self.manifestlog.update_caches(transaction=tr)
2526
2526
2527 rbc = unfi.revbranchcache()
2527 rbc = unfi.revbranchcache()
2528 for r in unfi.changelog:
2528 for r in unfi.changelog:
2529 rbc.branchinfo(r)
2529 rbc.branchinfo(r)
2530 rbc.write()
2530 rbc.write()
2531
2531
2532 # ensure the working copy parents are in the manifestfulltextcache
2532 # ensure the working copy parents are in the manifestfulltextcache
2533 for ctx in self[b'.'].parents():
2533 for ctx in self[b'.'].parents():
2534 ctx.manifest() # accessing the manifest is enough
2534 ctx.manifest() # accessing the manifest is enough
2535
2535
2536 # accessing fnode cache warms the cache
2536 # accessing fnode cache warms the cache
2537 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2537 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2538 # accessing tags warm the cache
2538 # accessing tags warm the cache
2539 self.tags()
2539 self.tags()
2540 self.filtered(b'served').tags()
2540 self.filtered(b'served').tags()
2541
2541
2542 # The `full` arg is documented as updating even the lazily-loaded
2542 # The `full` arg is documented as updating even the lazily-loaded
2543 # caches immediately, so we're forcing a write to cause these caches
2543 # caches immediately, so we're forcing a write to cause these caches
2544 # to be warmed up even if they haven't explicitly been requested
2544 # to be warmed up even if they haven't explicitly been requested
2545 # yet (if they've never been used by hg, they won't ever have been
2545 # yet (if they've never been used by hg, they won't ever have been
2546 # written, even if they're a subset of another kind of cache that
2546 # written, even if they're a subset of another kind of cache that
2547 # *has* been used).
2547 # *has* been used).
2548 for filt in repoview.filtertable.keys():
2548 for filt in repoview.filtertable.keys():
2549 filtered = self.filtered(filt)
2549 filtered = self.filtered(filt)
2550 filtered.branchmap().write(filtered)
2550 filtered.branchmap().write(filtered)
2551
2551
2552 def invalidatecaches(self):
2552 def invalidatecaches(self):
2553
2553
2554 if '_tagscache' in vars(self):
2554 if '_tagscache' in vars(self):
2555 # can't use delattr on proxy
2555 # can't use delattr on proxy
2556 del self.__dict__['_tagscache']
2556 del self.__dict__['_tagscache']
2557
2557
2558 self._branchcaches.clear()
2558 self._branchcaches.clear()
2559 self.invalidatevolatilesets()
2559 self.invalidatevolatilesets()
2560 self._sparsesignaturecache.clear()
2560 self._sparsesignaturecache.clear()
2561
2561
2562 def invalidatevolatilesets(self):
2562 def invalidatevolatilesets(self):
2563 self.filteredrevcache.clear()
2563 self.filteredrevcache.clear()
2564 obsolete.clearobscaches(self)
2564 obsolete.clearobscaches(self)
2565 self._quick_access_changeid_invalidate()
2565 self._quick_access_changeid_invalidate()
2566
2566
2567 def invalidatedirstate(self):
2567 def invalidatedirstate(self):
2568 '''Invalidates the dirstate, causing the next call to dirstate
2568 '''Invalidates the dirstate, causing the next call to dirstate
2569 to check if it was modified since the last time it was read,
2569 to check if it was modified since the last time it was read,
2570 rereading it if it has.
2570 rereading it if it has.
2571
2571
2572 This is different to dirstate.invalidate() that it doesn't always
2572 This is different to dirstate.invalidate() that it doesn't always
2573 rereads the dirstate. Use dirstate.invalidate() if you want to
2573 rereads the dirstate. Use dirstate.invalidate() if you want to
2574 explicitly read the dirstate again (i.e. restoring it to a previous
2574 explicitly read the dirstate again (i.e. restoring it to a previous
2575 known good state).'''
2575 known good state).'''
2576 if hasunfilteredcache(self, 'dirstate'):
2576 if hasunfilteredcache(self, 'dirstate'):
2577 for k in self.dirstate._filecache:
2577 for k in self.dirstate._filecache:
2578 try:
2578 try:
2579 delattr(self.dirstate, k)
2579 delattr(self.dirstate, k)
2580 except AttributeError:
2580 except AttributeError:
2581 pass
2581 pass
2582 delattr(self.unfiltered(), 'dirstate')
2582 delattr(self.unfiltered(), 'dirstate')
2583
2583
2584 def invalidate(self, clearfilecache=False):
2584 def invalidate(self, clearfilecache=False):
2585 '''Invalidates both store and non-store parts other than dirstate
2585 '''Invalidates both store and non-store parts other than dirstate
2586
2586
2587 If a transaction is running, invalidation of store is omitted,
2587 If a transaction is running, invalidation of store is omitted,
2588 because discarding in-memory changes might cause inconsistency
2588 because discarding in-memory changes might cause inconsistency
2589 (e.g. incomplete fncache causes unintentional failure, but
2589 (e.g. incomplete fncache causes unintentional failure, but
2590 redundant one doesn't).
2590 redundant one doesn't).
2591 '''
2591 '''
2592 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2592 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2593 for k in list(self._filecache.keys()):
2593 for k in list(self._filecache.keys()):
2594 # dirstate is invalidated separately in invalidatedirstate()
2594 # dirstate is invalidated separately in invalidatedirstate()
2595 if k == b'dirstate':
2595 if k == b'dirstate':
2596 continue
2596 continue
2597 if (
2597 if (
2598 k == b'changelog'
2598 k == b'changelog'
2599 and self.currenttransaction()
2599 and self.currenttransaction()
2600 and self.changelog._delayed
2600 and self.changelog._delayed
2601 ):
2601 ):
2602 # The changelog object may store unwritten revisions. We don't
2602 # The changelog object may store unwritten revisions. We don't
2603 # want to lose them.
2603 # want to lose them.
2604 # TODO: Solve the problem instead of working around it.
2604 # TODO: Solve the problem instead of working around it.
2605 continue
2605 continue
2606
2606
2607 if clearfilecache:
2607 if clearfilecache:
2608 del self._filecache[k]
2608 del self._filecache[k]
2609 try:
2609 try:
2610 delattr(unfiltered, k)
2610 delattr(unfiltered, k)
2611 except AttributeError:
2611 except AttributeError:
2612 pass
2612 pass
2613 self.invalidatecaches()
2613 self.invalidatecaches()
2614 if not self.currenttransaction():
2614 if not self.currenttransaction():
2615 # TODO: Changing contents of store outside transaction
2615 # TODO: Changing contents of store outside transaction
2616 # causes inconsistency. We should make in-memory store
2616 # causes inconsistency. We should make in-memory store
2617 # changes detectable, and abort if changed.
2617 # changes detectable, and abort if changed.
2618 self.store.invalidatecaches()
2618 self.store.invalidatecaches()
2619
2619
2620 def invalidateall(self):
2620 def invalidateall(self):
2621 '''Fully invalidates both store and non-store parts, causing the
2621 '''Fully invalidates both store and non-store parts, causing the
2622 subsequent operation to reread any outside changes.'''
2622 subsequent operation to reread any outside changes.'''
2623 # extension should hook this to invalidate its caches
2623 # extension should hook this to invalidate its caches
2624 self.invalidate()
2624 self.invalidate()
2625 self.invalidatedirstate()
2625 self.invalidatedirstate()
2626
2626
2627 @unfilteredmethod
2627 @unfilteredmethod
2628 def _refreshfilecachestats(self, tr):
2628 def _refreshfilecachestats(self, tr):
2629 """Reload stats of cached files so that they are flagged as valid"""
2629 """Reload stats of cached files so that they are flagged as valid"""
2630 for k, ce in self._filecache.items():
2630 for k, ce in self._filecache.items():
2631 k = pycompat.sysstr(k)
2631 k = pycompat.sysstr(k)
2632 if k == 'dirstate' or k not in self.__dict__:
2632 if k == 'dirstate' or k not in self.__dict__:
2633 continue
2633 continue
2634 ce.refresh()
2634 ce.refresh()
2635
2635
2636 def _lock(
2636 def _lock(
2637 self,
2637 self,
2638 vfs,
2638 vfs,
2639 lockname,
2639 lockname,
2640 wait,
2640 wait,
2641 releasefn,
2641 releasefn,
2642 acquirefn,
2642 acquirefn,
2643 desc,
2643 desc,
2644 inheritchecker=None,
2644 inheritchecker=None,
2645 parentenvvar=None,
2645 parentenvvar=None,
2646 ):
2646 ):
2647 parentlock = None
2647 parentlock = None
2648 # the contents of parentenvvar are used by the underlying lock to
2648 # the contents of parentenvvar are used by the underlying lock to
2649 # determine whether it can be inherited
2649 # determine whether it can be inherited
2650 if parentenvvar is not None:
2650 if parentenvvar is not None:
2651 parentlock = encoding.environ.get(parentenvvar)
2651 parentlock = encoding.environ.get(parentenvvar)
2652
2652
2653 timeout = 0
2653 timeout = 0
2654 warntimeout = 0
2654 warntimeout = 0
2655 if wait:
2655 if wait:
2656 timeout = self.ui.configint(b"ui", b"timeout")
2656 timeout = self.ui.configint(b"ui", b"timeout")
2657 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2657 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2658 # internal config: ui.signal-safe-lock
2658 # internal config: ui.signal-safe-lock
2659 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2659 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2660
2660
2661 l = lockmod.trylock(
2661 l = lockmod.trylock(
2662 self.ui,
2662 self.ui,
2663 vfs,
2663 vfs,
2664 lockname,
2664 lockname,
2665 timeout,
2665 timeout,
2666 warntimeout,
2666 warntimeout,
2667 releasefn=releasefn,
2667 releasefn=releasefn,
2668 acquirefn=acquirefn,
2668 acquirefn=acquirefn,
2669 desc=desc,
2669 desc=desc,
2670 inheritchecker=inheritchecker,
2670 inheritchecker=inheritchecker,
2671 parentlock=parentlock,
2671 parentlock=parentlock,
2672 signalsafe=signalsafe,
2672 signalsafe=signalsafe,
2673 )
2673 )
2674 return l
2674 return l
2675
2675
2676 def _afterlock(self, callback):
2676 def _afterlock(self, callback):
2677 """add a callback to be run when the repository is fully unlocked
2677 """add a callback to be run when the repository is fully unlocked
2678
2678
2679 The callback will be executed when the outermost lock is released
2679 The callback will be executed when the outermost lock is released
2680 (with wlock being higher level than 'lock')."""
2680 (with wlock being higher level than 'lock')."""
2681 for ref in (self._wlockref, self._lockref):
2681 for ref in (self._wlockref, self._lockref):
2682 l = ref and ref()
2682 l = ref and ref()
2683 if l and l.held:
2683 if l and l.held:
2684 l.postrelease.append(callback)
2684 l.postrelease.append(callback)
2685 break
2685 break
2686 else: # no lock have been found.
2686 else: # no lock have been found.
2687 callback(True)
2687 callback(True)
2688
2688
2689 def lock(self, wait=True):
2689 def lock(self, wait=True):
2690 '''Lock the repository store (.hg/store) and return a weak reference
2690 '''Lock the repository store (.hg/store) and return a weak reference
2691 to the lock. Use this before modifying the store (e.g. committing or
2691 to the lock. Use this before modifying the store (e.g. committing or
2692 stripping). If you are opening a transaction, get a lock as well.)
2692 stripping). If you are opening a transaction, get a lock as well.)
2693
2693
2694 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2694 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2695 'wlock' first to avoid a dead-lock hazard.'''
2695 'wlock' first to avoid a dead-lock hazard.'''
2696 l = self._currentlock(self._lockref)
2696 l = self._currentlock(self._lockref)
2697 if l is not None:
2697 if l is not None:
2698 l.lock()
2698 l.lock()
2699 return l
2699 return l
2700
2700
2701 l = self._lock(
2701 l = self._lock(
2702 vfs=self.svfs,
2702 vfs=self.svfs,
2703 lockname=b"lock",
2703 lockname=b"lock",
2704 wait=wait,
2704 wait=wait,
2705 releasefn=None,
2705 releasefn=None,
2706 acquirefn=self.invalidate,
2706 acquirefn=self.invalidate,
2707 desc=_(b'repository %s') % self.origroot,
2707 desc=_(b'repository %s') % self.origroot,
2708 )
2708 )
2709 self._lockref = weakref.ref(l)
2709 self._lockref = weakref.ref(l)
2710 return l
2710 return l
2711
2711
2712 def _wlockchecktransaction(self):
2712 def _wlockchecktransaction(self):
2713 if self.currenttransaction() is not None:
2713 if self.currenttransaction() is not None:
2714 raise error.LockInheritanceContractViolation(
2714 raise error.LockInheritanceContractViolation(
2715 b'wlock cannot be inherited in the middle of a transaction'
2715 b'wlock cannot be inherited in the middle of a transaction'
2716 )
2716 )
2717
2717
2718 def wlock(self, wait=True):
2718 def wlock(self, wait=True):
2719 '''Lock the non-store parts of the repository (everything under
2719 '''Lock the non-store parts of the repository (everything under
2720 .hg except .hg/store) and return a weak reference to the lock.
2720 .hg except .hg/store) and return a weak reference to the lock.
2721
2721
2722 Use this before modifying files in .hg.
2722 Use this before modifying files in .hg.
2723
2723
2724 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2724 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2725 'wlock' first to avoid a dead-lock hazard.'''
2725 'wlock' first to avoid a dead-lock hazard.'''
2726 l = self._wlockref and self._wlockref()
2726 l = self._wlockref and self._wlockref()
2727 if l is not None and l.held:
2727 if l is not None and l.held:
2728 l.lock()
2728 l.lock()
2729 return l
2729 return l
2730
2730
2731 # We do not need to check for non-waiting lock acquisition. Such
2731 # We do not need to check for non-waiting lock acquisition. Such
2732 # acquisition would not cause dead-lock as they would just fail.
2732 # acquisition would not cause dead-lock as they would just fail.
2733 if wait and (
2733 if wait and (
2734 self.ui.configbool(b'devel', b'all-warnings')
2734 self.ui.configbool(b'devel', b'all-warnings')
2735 or self.ui.configbool(b'devel', b'check-locks')
2735 or self.ui.configbool(b'devel', b'check-locks')
2736 ):
2736 ):
2737 if self._currentlock(self._lockref) is not None:
2737 if self._currentlock(self._lockref) is not None:
2738 self.ui.develwarn(b'"wlock" acquired after "lock"')
2738 self.ui.develwarn(b'"wlock" acquired after "lock"')
2739
2739
2740 def unlock():
2740 def unlock():
2741 if self.dirstate.pendingparentchange():
2741 if self.dirstate.pendingparentchange():
2742 self.dirstate.invalidate()
2742 self.dirstate.invalidate()
2743 else:
2743 else:
2744 self.dirstate.write(None)
2744 self.dirstate.write(None)
2745
2745
2746 self._filecache[b'dirstate'].refresh()
2746 self._filecache[b'dirstate'].refresh()
2747
2747
2748 l = self._lock(
2748 l = self._lock(
2749 self.vfs,
2749 self.vfs,
2750 b"wlock",
2750 b"wlock",
2751 wait,
2751 wait,
2752 unlock,
2752 unlock,
2753 self.invalidatedirstate,
2753 self.invalidatedirstate,
2754 _(b'working directory of %s') % self.origroot,
2754 _(b'working directory of %s') % self.origroot,
2755 inheritchecker=self._wlockchecktransaction,
2755 inheritchecker=self._wlockchecktransaction,
2756 parentenvvar=b'HG_WLOCK_LOCKER',
2756 parentenvvar=b'HG_WLOCK_LOCKER',
2757 )
2757 )
2758 self._wlockref = weakref.ref(l)
2758 self._wlockref = weakref.ref(l)
2759 return l
2759 return l
2760
2760
2761 def _currentlock(self, lockref):
2761 def _currentlock(self, lockref):
2762 """Returns the lock if it's held, or None if it's not."""
2762 """Returns the lock if it's held, or None if it's not."""
2763 if lockref is None:
2763 if lockref is None:
2764 return None
2764 return None
2765 l = lockref()
2765 l = lockref()
2766 if l is None or not l.held:
2766 if l is None or not l.held:
2767 return None
2767 return None
2768 return l
2768 return l
2769
2769
2770 def currentwlock(self):
2770 def currentwlock(self):
2771 """Returns the wlock if it's held, or None if it's not."""
2771 """Returns the wlock if it's held, or None if it's not."""
2772 return self._currentlock(self._wlockref)
2772 return self._currentlock(self._wlockref)
2773
2773
2774 def _filecommit(
2774 def _filecommit(
2775 self, fctx, manifest1, manifest2, linkrev, tr, includecopymeta,
2775 self, fctx, manifest1, manifest2, linkrev, tr, includecopymeta,
2776 ):
2776 ):
2777 """
2777 """
2778 commit an individual file as part of a larger transaction
2778 commit an individual file as part of a larger transaction
2779
2779
2780 input:
2780 input:
2781
2781
2782 fctx: a file context with the content we are trying to commit
2782 fctx: a file context with the content we are trying to commit
2783 manifest1: manifest of changeset first parent
2783 manifest1: manifest of changeset first parent
2784 manifest2: manifest of changeset second parent
2784 manifest2: manifest of changeset second parent
2785 linkrev: revision number of the changeset being created
2785 linkrev: revision number of the changeset being created
2786 tr: current transation
2786 tr: current transation
2787 individual: boolean, set to False to skip storing the copy data
2787 individual: boolean, set to False to skip storing the copy data
2788 (only used by the Google specific feature of using
2788 (only used by the Google specific feature of using
2789 changeset extra as copy source of truth).
2789 changeset extra as copy source of truth).
2790
2790
2791 output: (filenode, touched)
2791 output: (filenode, touched)
2792
2792
2793 filenode: the filenode that should be used by this changeset
2793 filenode: the filenode that should be used by this changeset
2794 touched: one of: None, 'added' or 'modified'
2794 touched: one of: None, 'added' or 'modified'
2795 """
2795 """
2796
2796
2797 fname = fctx.path()
2797 fname = fctx.path()
2798 fparent1 = manifest1.get(fname, nullid)
2798 fparent1 = manifest1.get(fname, nullid)
2799 fparent2 = manifest2.get(fname, nullid)
2799 fparent2 = manifest2.get(fname, nullid)
2800 touched = None
2800 touched = None
2801 if fparent1 == fparent2 == nullid:
2801 if fparent1 == fparent2 == nullid:
2802 touched = 'added'
2802 touched = 'added'
2803
2803
2804 if isinstance(fctx, context.filectx):
2804 if isinstance(fctx, context.filectx):
2805 # This block fast path most comparisons which are usually done. It
2805 # This block fast path most comparisons which are usually done. It
2806 # assumes that bare filectx is used and no merge happened, hence no
2806 # assumes that bare filectx is used and no merge happened, hence no
2807 # need to create a new file revision in this case.
2807 # need to create a new file revision in this case.
2808 node = fctx.filenode()
2808 node = fctx.filenode()
2809 if node in [fparent1, fparent2]:
2809 if node in [fparent1, fparent2]:
2810 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2810 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2811 if (
2811 if (
2812 fparent1 != nullid
2812 fparent1 != nullid
2813 and manifest1.flags(fname) != fctx.flags()
2813 and manifest1.flags(fname) != fctx.flags()
2814 ) or (
2814 ) or (
2815 fparent2 != nullid
2815 fparent2 != nullid
2816 and manifest2.flags(fname) != fctx.flags()
2816 and manifest2.flags(fname) != fctx.flags()
2817 ):
2817 ):
2818 touched = 'modified'
2818 touched = 'modified'
2819 return node, touched
2819 return node, touched
2820
2820
2821 flog = self.file(fname)
2821 flog = self.file(fname)
2822 meta = {}
2822 meta = {}
2823 cfname = fctx.copysource()
2823 cfname = fctx.copysource()
2824 fnode = None
2824 fnode = None
2825
2825
2826 if cfname and cfname != fname:
2826 if cfname and cfname != fname:
2827 # Mark the new revision of this file as a copy of another
2827 # Mark the new revision of this file as a copy of another
2828 # file. This copy data will effectively act as a parent
2828 # file. This copy data will effectively act as a parent
2829 # of this new revision. If this is a merge, the first
2829 # of this new revision. If this is a merge, the first
2830 # parent will be the nullid (meaning "look up the copy data")
2830 # parent will be the nullid (meaning "look up the copy data")
2831 # and the second one will be the other parent. For example:
2831 # and the second one will be the other parent. For example:
2832 #
2832 #
2833 # 0 --- 1 --- 3 rev1 changes file foo
2833 # 0 --- 1 --- 3 rev1 changes file foo
2834 # \ / rev2 renames foo to bar and changes it
2834 # \ / rev2 renames foo to bar and changes it
2835 # \- 2 -/ rev3 should have bar with all changes and
2835 # \- 2 -/ rev3 should have bar with all changes and
2836 # should record that bar descends from
2836 # should record that bar descends from
2837 # bar in rev2 and foo in rev1
2837 # bar in rev2 and foo in rev1
2838 #
2838 #
2839 # this allows this merge to succeed:
2839 # this allows this merge to succeed:
2840 #
2840 #
2841 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2841 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2842 # \ / merging rev3 and rev4 should use bar@rev2
2842 # \ / merging rev3 and rev4 should use bar@rev2
2843 # \- 2 --- 4 as the merge base
2843 # \- 2 --- 4 as the merge base
2844 #
2844 #
2845
2845
2846 cnode = manifest1.get(cfname)
2846 cnode = manifest1.get(cfname)
2847 newfparent = fparent2
2847 newfparent = fparent2
2848
2848
2849 if manifest2: # branch merge
2849 if manifest2: # branch merge
2850 if fparent2 == nullid or cnode is None: # copied on remote side
2850 if fparent2 == nullid or cnode is None: # copied on remote side
2851 if cfname in manifest2:
2851 if cfname in manifest2:
2852 cnode = manifest2[cfname]
2852 cnode = manifest2[cfname]
2853 newfparent = fparent1
2853 newfparent = fparent1
2854
2854
2855 # Here, we used to search backwards through history to try to find
2855 # Here, we used to search backwards through history to try to find
2856 # where the file copy came from if the source of a copy was not in
2856 # where the file copy came from if the source of a copy was not in
2857 # the parent directory. However, this doesn't actually make sense to
2857 # the parent directory. However, this doesn't actually make sense to
2858 # do (what does a copy from something not in your working copy even
2858 # do (what does a copy from something not in your working copy even
2859 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2859 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2860 # the user that copy information was dropped, so if they didn't
2860 # the user that copy information was dropped, so if they didn't
2861 # expect this outcome it can be fixed, but this is the correct
2861 # expect this outcome it can be fixed, but this is the correct
2862 # behavior in this circumstance.
2862 # behavior in this circumstance.
2863
2863
2864 if cnode:
2864 if cnode:
2865 self.ui.debug(
2865 self.ui.debug(
2866 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2866 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2867 )
2867 )
2868 if includecopymeta:
2868 if includecopymeta:
2869 meta[b"copy"] = cfname
2869 meta[b"copy"] = cfname
2870 meta[b"copyrev"] = hex(cnode)
2870 meta[b"copyrev"] = hex(cnode)
2871 fparent1, fparent2 = nullid, newfparent
2871 fparent1, fparent2 = nullid, newfparent
2872 else:
2872 else:
2873 self.ui.warn(
2873 self.ui.warn(
2874 _(
2874 _(
2875 b"warning: can't find ancestor for '%s' "
2875 b"warning: can't find ancestor for '%s' "
2876 b"copied from '%s'!\n"
2876 b"copied from '%s'!\n"
2877 )
2877 )
2878 % (fname, cfname)
2878 % (fname, cfname)
2879 )
2879 )
2880
2880
2881 elif fparent1 == nullid:
2881 elif fparent1 == nullid:
2882 fparent1, fparent2 = fparent2, nullid
2882 fparent1, fparent2 = fparent2, nullid
2883 elif fparent2 != nullid:
2883 elif fparent2 != nullid:
2884 # is one parent an ancestor of the other?
2884 # is one parent an ancestor of the other?
2885 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2885 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2886 if fparent1 in fparentancestors:
2886 if fparent1 in fparentancestors:
2887 fparent1, fparent2 = fparent2, nullid
2887 fparent1, fparent2 = fparent2, nullid
2888 elif fparent2 in fparentancestors:
2888 elif fparent2 in fparentancestors:
2889 fparent2 = nullid
2889 fparent2 = nullid
2890 elif not fparentancestors:
2890 elif not fparentancestors:
2891 # TODO: this whole if-else might be simplified much more
2891 # TODO: this whole if-else might be simplified much more
2892 ms = mergestatemod.mergestate.read(self)
2892 ms = mergestatemod.mergestate.read(self)
2893 if (
2893 if (
2894 fname in ms
2894 fname in ms
2895 and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
2895 and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
2896 ):
2896 ):
2897 fparent1, fparent2 = fparent2, nullid
2897 fparent1, fparent2 = fparent2, nullid
2898
2898
2899 # is the file changed?
2899 # is the file changed?
2900 text = fctx.data()
2900 text = fctx.data()
2901 if fparent2 != nullid or meta or flog.cmp(fparent1, text):
2901 if fparent2 != nullid or meta or flog.cmp(fparent1, text):
2902 if touched is None: # do not overwrite added
2902 if touched is None: # do not overwrite added
2903 touched = 'modified'
2903 touched = 'modified'
2904 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2904 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2905 # are just the flags changed during merge?
2905 # are just the flags changed during merge?
2906 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2906 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2907 touched = 'modified'
2907 touched = 'modified'
2908 fnode = fparent1
2908 fnode = fparent1
2909 else:
2909 else:
2910 fnode = fparent1
2910 fnode = fparent1
2911 return fnode, touched
2911 return fnode, touched
2912
2912
2913 def checkcommitpatterns(self, wctx, match, status, fail):
2913 def checkcommitpatterns(self, wctx, match, status, fail):
2914 """check for commit arguments that aren't committable"""
2914 """check for commit arguments that aren't committable"""
2915 if match.isexact() or match.prefix():
2915 if match.isexact() or match.prefix():
2916 matched = set(status.modified + status.added + status.removed)
2916 matched = set(status.modified + status.added + status.removed)
2917
2917
2918 for f in match.files():
2918 for f in match.files():
2919 f = self.dirstate.normalize(f)
2919 f = self.dirstate.normalize(f)
2920 if f == b'.' or f in matched or f in wctx.substate:
2920 if f == b'.' or f in matched or f in wctx.substate:
2921 continue
2921 continue
2922 if f in status.deleted:
2922 if f in status.deleted:
2923 fail(f, _(b'file not found!'))
2923 fail(f, _(b'file not found!'))
2924 # Is it a directory that exists or used to exist?
2924 # Is it a directory that exists or used to exist?
2925 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2925 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2926 d = f + b'/'
2926 d = f + b'/'
2927 for mf in matched:
2927 for mf in matched:
2928 if mf.startswith(d):
2928 if mf.startswith(d):
2929 break
2929 break
2930 else:
2930 else:
2931 fail(f, _(b"no match under directory!"))
2931 fail(f, _(b"no match under directory!"))
2932 elif f not in self.dirstate:
2932 elif f not in self.dirstate:
2933 fail(f, _(b"file not tracked!"))
2933 fail(f, _(b"file not tracked!"))
2934
2934
2935 @unfilteredmethod
2935 @unfilteredmethod
2936 def commit(
2936 def commit(
2937 self,
2937 self,
2938 text=b"",
2938 text=b"",
2939 user=None,
2939 user=None,
2940 date=None,
2940 date=None,
2941 match=None,
2941 match=None,
2942 force=False,
2942 force=False,
2943 editor=None,
2943 editor=None,
2944 extra=None,
2944 extra=None,
2945 ):
2945 ):
2946 """Add a new revision to current repository.
2946 """Add a new revision to current repository.
2947
2947
2948 Revision information is gathered from the working directory,
2948 Revision information is gathered from the working directory,
2949 match can be used to filter the committed files. If editor is
2949 match can be used to filter the committed files. If editor is
2950 supplied, it is called to get a commit message.
2950 supplied, it is called to get a commit message.
2951 """
2951 """
2952 if extra is None:
2952 if extra is None:
2953 extra = {}
2953 extra = {}
2954
2954
2955 def fail(f, msg):
2955 def fail(f, msg):
2956 raise error.Abort(b'%s: %s' % (f, msg))
2956 raise error.Abort(b'%s: %s' % (f, msg))
2957
2957
2958 if not match:
2958 if not match:
2959 match = matchmod.always()
2959 match = matchmod.always()
2960
2960
2961 if not force:
2961 if not force:
2962 match.bad = fail
2962 match.bad = fail
2963
2963
2964 # lock() for recent changelog (see issue4368)
2964 # lock() for recent changelog (see issue4368)
2965 with self.wlock(), self.lock():
2965 with self.wlock(), self.lock():
2966 wctx = self[None]
2966 wctx = self[None]
2967 merge = len(wctx.parents()) > 1
2967 merge = len(wctx.parents()) > 1
2968
2968
2969 if not force and merge and not match.always():
2969 if not force and merge and not match.always():
2970 raise error.Abort(
2970 raise error.Abort(
2971 _(
2971 _(
2972 b'cannot partially commit a merge '
2972 b'cannot partially commit a merge '
2973 b'(do not specify files or patterns)'
2973 b'(do not specify files or patterns)'
2974 )
2974 )
2975 )
2975 )
2976
2976
2977 status = self.status(match=match, clean=force)
2977 status = self.status(match=match, clean=force)
2978 if force:
2978 if force:
2979 status.modified.extend(
2979 status.modified.extend(
2980 status.clean
2980 status.clean
2981 ) # mq may commit clean files
2981 ) # mq may commit clean files
2982
2982
2983 # check subrepos
2983 # check subrepos
2984 subs, commitsubs, newstate = subrepoutil.precommit(
2984 subs, commitsubs, newstate = subrepoutil.precommit(
2985 self.ui, wctx, status, match, force=force
2985 self.ui, wctx, status, match, force=force
2986 )
2986 )
2987
2987
2988 # make sure all explicit patterns are matched
2988 # make sure all explicit patterns are matched
2989 if not force:
2989 if not force:
2990 self.checkcommitpatterns(wctx, match, status, fail)
2990 self.checkcommitpatterns(wctx, match, status, fail)
2991
2991
2992 cctx = context.workingcommitctx(
2992 cctx = context.workingcommitctx(
2993 self, status, text, user, date, extra
2993 self, status, text, user, date, extra
2994 )
2994 )
2995
2995
2996 ms = mergestatemod.mergestate.read(self)
2996 ms = mergestatemod.mergestate.read(self)
2997 mergeutil.checkunresolved(ms)
2997 mergeutil.checkunresolved(ms)
2998
2998
2999 # internal config: ui.allowemptycommit
2999 # internal config: ui.allowemptycommit
3000 if cctx.isempty() and not self.ui.configbool(
3000 if cctx.isempty() and not self.ui.configbool(
3001 b'ui', b'allowemptycommit'
3001 b'ui', b'allowemptycommit'
3002 ):
3002 ):
3003 self.ui.debug(b'nothing to commit, clearing merge state\n')
3003 self.ui.debug(b'nothing to commit, clearing merge state\n')
3004 ms.reset()
3004 ms.reset()
3005 return None
3005 return None
3006
3006
3007 if merge and cctx.deleted():
3007 if merge and cctx.deleted():
3008 raise error.Abort(_(b"cannot commit merge with missing files"))
3008 raise error.Abort(_(b"cannot commit merge with missing files"))
3009
3009
3010 if editor:
3010 if editor:
3011 cctx._text = editor(self, cctx, subs)
3011 cctx._text = editor(self, cctx, subs)
3012 edited = text != cctx._text
3012 edited = text != cctx._text
3013
3013
3014 # Save commit message in case this transaction gets rolled back
3014 # Save commit message in case this transaction gets rolled back
3015 # (e.g. by a pretxncommit hook). Leave the content alone on
3015 # (e.g. by a pretxncommit hook). Leave the content alone on
3016 # the assumption that the user will use the same editor again.
3016 # the assumption that the user will use the same editor again.
3017 msgfn = self.savecommitmessage(cctx._text)
3017 msgfn = self.savecommitmessage(cctx._text)
3018
3018
3019 # commit subs and write new state
3019 # commit subs and write new state
3020 if subs:
3020 if subs:
3021 uipathfn = scmutil.getuipathfn(self)
3021 uipathfn = scmutil.getuipathfn(self)
3022 for s in sorted(commitsubs):
3022 for s in sorted(commitsubs):
3023 sub = wctx.sub(s)
3023 sub = wctx.sub(s)
3024 self.ui.status(
3024 self.ui.status(
3025 _(b'committing subrepository %s\n')
3025 _(b'committing subrepository %s\n')
3026 % uipathfn(subrepoutil.subrelpath(sub))
3026 % uipathfn(subrepoutil.subrelpath(sub))
3027 )
3027 )
3028 sr = sub.commit(cctx._text, user, date)
3028 sr = sub.commit(cctx._text, user, date)
3029 newstate[s] = (newstate[s][0], sr)
3029 newstate[s] = (newstate[s][0], sr)
3030 subrepoutil.writestate(self, newstate)
3030 subrepoutil.writestate(self, newstate)
3031
3031
3032 p1, p2 = self.dirstate.parents()
3032 p1, p2 = self.dirstate.parents()
3033 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3033 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3034 try:
3034 try:
3035 self.hook(
3035 self.hook(
3036 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3036 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3037 )
3037 )
3038 with self.transaction(b'commit'):
3038 with self.transaction(b'commit'):
3039 ret = self.commitctx(cctx, True)
3039 ret = self.commitctx(cctx, True)
3040 # update bookmarks, dirstate and mergestate
3040 # update bookmarks, dirstate and mergestate
3041 bookmarks.update(self, [p1, p2], ret)
3041 bookmarks.update(self, [p1, p2], ret)
3042 cctx.markcommitted(ret)
3042 cctx.markcommitted(ret)
3043 ms.reset()
3043 ms.reset()
3044 except: # re-raises
3044 except: # re-raises
3045 if edited:
3045 if edited:
3046 self.ui.write(
3046 self.ui.write(
3047 _(b'note: commit message saved in %s\n') % msgfn
3047 _(b'note: commit message saved in %s\n') % msgfn
3048 )
3048 )
3049 self.ui.write(
3049 self.ui.write(
3050 _(
3050 _(
3051 b"note: use 'hg commit --logfile "
3051 b"note: use 'hg commit --logfile "
3052 b".hg/last-message.txt --edit' to reuse it\n"
3052 b".hg/last-message.txt --edit' to reuse it\n"
3053 )
3053 )
3054 )
3054 )
3055 raise
3055 raise
3056
3056
3057 def commithook(unused_success):
3057 def commithook(unused_success):
3058 # hack for command that use a temporary commit (eg: histedit)
3058 # hack for command that use a temporary commit (eg: histedit)
3059 # temporary commit got stripped before hook release
3059 # temporary commit got stripped before hook release
3060 if self.changelog.hasnode(ret):
3060 if self.changelog.hasnode(ret):
3061 self.hook(
3061 self.hook(
3062 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3062 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3063 )
3063 )
3064
3064
3065 self._afterlock(commithook)
3065 self._afterlock(commithook)
3066 return ret
3066 return ret
3067
3067
3068 @unfilteredmethod
3068 @unfilteredmethod
3069 def commitctx(self, ctx, error=False, origctx=None):
3069 def commitctx(self, ctx, error=False, origctx=None):
3070 """Add a new revision to current repository.
3070 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3071 Revision information is passed via the context argument.
3072
3073 ctx.files() should list all files involved in this commit, i.e.
3074 modified/added/removed files. On merge, it may be wider than the
3075 ctx.files() to be committed, since any file nodes derived directly
3076 from p1 or p2 are excluded from the committed ctx.files().
3077
3078 origctx is for convert to work around the problem that bug
3079 fixes to the files list in changesets change hashes. For
3080 convert to be the identity, it can pass an origctx and this
3081 function will use the same files list when it makes sense to
3082 do so.
3083 """
3084
3085 p1, p2 = ctx.p1(), ctx.p2()
3086 user = ctx.user()
3087
3088 if self.filecopiesmode == b'changeset-sidedata':
3089 writechangesetcopy = True
3090 writefilecopymeta = True
3091 writecopiesto = None
3092 else:
3093 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3094 writefilecopymeta = writecopiesto != b'changeset-only'
3095 writechangesetcopy = writecopiesto in (
3096 b'changeset-only',
3097 b'compatibility',
3098 )
3099 p1copies, p2copies = None, None
3100 if writechangesetcopy:
3101 p1copies = ctx.p1copies()
3102 p2copies = ctx.p2copies()
3103 filesadded, filesremoved = None, None
3104 with self.lock(), self.transaction(b"commit") as tr:
3105 trp = weakref.proxy(tr)
3106
3107 if ctx.manifestnode():
3108 # reuse an existing manifest revision
3109 self.ui.debug(b'reusing known manifest\n')
3110 mn = ctx.manifestnode()
3111 files = ctx.files()
3112 if writechangesetcopy:
3113 filesadded = ctx.filesadded()
3114 filesremoved = ctx.filesremoved()
3115 elif not ctx.files():
3116 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3117 mn = p1.manifestnode()
3118 files = []
3119 else:
3120 m1ctx = p1.manifestctx()
3121 m2ctx = p2.manifestctx()
3122 mctx = m1ctx.copy()
3123
3124 m = mctx.read()
3125 m1 = m1ctx.read()
3126 m2 = m2ctx.read()
3127
3128 # check in files
3129 added = []
3130 filesadded = []
3131 removed = list(ctx.removed())
3132 touched = []
3133 linkrev = len(self)
3134 self.ui.note(_(b"committing files:\n"))
3135 uipathfn = scmutil.getuipathfn(self)
3136 for f in sorted(ctx.modified() + ctx.added()):
3137 self.ui.note(uipathfn(f) + b"\n")
3138 try:
3139 fctx = ctx[f]
3140 if fctx is None:
3141 removed.append(f)
3142 else:
3143 added.append(f)
3144 m[f], is_touched = self._filecommit(
3145 fctx, m1, m2, linkrev, trp, writefilecopymeta,
3146 )
3147 if is_touched:
3148 touched.append(f)
3149 if writechangesetcopy and is_touched == 'added':
3150 filesadded.append(f)
3151 m.setflag(f, fctx.flags())
3152 except OSError:
3153 self.ui.warn(
3154 _(b"trouble committing %s!\n") % uipathfn(f)
3155 )
3156 raise
3157 except IOError as inst:
3158 errcode = getattr(inst, 'errno', errno.ENOENT)
3159 if error or errcode and errcode != errno.ENOENT:
3160 self.ui.warn(
3161 _(b"trouble committing %s!\n") % uipathfn(f)
3162 )
3163 raise
3164
3165 # update manifest
3166 removed = [f for f in removed if f in m1 or f in m2]
3167 drop = sorted([f for f in removed if f in m])
3168 for f in drop:
3169 del m[f]
3170 if p2.rev() != nullrev:
3171 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
3172 removed = [f for f in removed if not rf(f)]
3173
3174 touched.extend(removed)
3175
3176 if writechangesetcopy:
3177 filesremoved = removed
3178
3179 files = touched
3180 md = None
3181 if not files:
3182 # if no "files" actually changed in terms of the changelog,
3183 # try hard to detect unmodified manifest entry so that the
3184 # exact same commit can be reproduced later on convert.
3185 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3186 if not files and md:
3187 self.ui.debug(
3188 b'not reusing manifest (no file change in '
3189 b'changelog, but manifest differs)\n'
3190 )
3191 if files or md:
3192 self.ui.note(_(b"committing manifest\n"))
3193 # we're using narrowmatch here since it's already applied at
3194 # other stages (such as dirstate.walk), so we're already
3195 # ignoring things outside of narrowspec in most cases. The
3196 # one case where we might have files outside the narrowspec
3197 # at this point is merges, and we already error out in the
3198 # case where the merge has files outside of the narrowspec,
3199 # so this is safe.
3200 mn = mctx.write(
3201 trp,
3202 linkrev,
3203 p1.manifestnode(),
3204 p2.manifestnode(),
3205 added,
3206 drop,
3207 match=self.narrowmatch(),
3208 )
3209 else:
3210 self.ui.debug(
3211 b'reusing manifest from p1 (listed files '
3212 b'actually unchanged)\n'
3213 )
3214 mn = p1.manifestnode()
3215
3216 if writecopiesto == b'changeset-only':
3217 # If writing only to changeset extras, use None to indicate that
3218 # no entry should be written. If writing to both, write an empty
3219 # entry to prevent the reader from falling back to reading
3220 # filelogs.
3221 p1copies = p1copies or None
3222 p2copies = p2copies or None
3223 filesadded = filesadded or None
3224 filesremoved = filesremoved or None
3225
3226 if origctx and origctx.manifestnode() == mn:
3227 files = origctx.files()
3228
3229 # update changelog
3230 self.ui.note(_(b"committing changelog\n"))
3231 self.changelog.delayupdate(tr)
3232 n = self.changelog.add(
3233 mn,
3234 files,
3235 ctx.description(),
3236 trp,
3237 p1.node(),
3238 p2.node(),
3239 user,
3240 ctx.date(),
3241 ctx.extra().copy(),
3242 p1copies,
3243 p2copies,
3244 filesadded,
3245 filesremoved,
3246 )
3247 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3248 self.hook(
3249 b'pretxncommit',
3250 throw=True,
3251 node=hex(n),
3252 parent1=xp1,
3253 parent2=xp2,
3254 )
3255 # set the new commit is proper phase
3256 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3257 if targetphase:
3258 # retract boundary do not alter parent changeset.
3259 # if a parent have higher the resulting phase will
3260 # be compliant anyway
3261 #
3262 # if minimal phase was 0 we don't need to retract anything
3263 phases.registernew(self, tr, targetphase, [n])
3264 return n
3265
3071
3266 @unfilteredmethod
3072 @unfilteredmethod
3267 def destroying(self):
3073 def destroying(self):
3268 '''Inform the repository that nodes are about to be destroyed.
3074 '''Inform the repository that nodes are about to be destroyed.
3269 Intended for use by strip and rollback, so there's a common
3075 Intended for use by strip and rollback, so there's a common
3270 place for anything that has to be done before destroying history.
3076 place for anything that has to be done before destroying history.
3271
3077
3272 This is mostly useful for saving state that is in memory and waiting
3078 This is mostly useful for saving state that is in memory and waiting
3273 to be flushed when the current lock is released. Because a call to
3079 to be flushed when the current lock is released. Because a call to
3274 destroyed is imminent, the repo will be invalidated causing those
3080 destroyed is imminent, the repo will be invalidated causing those
3275 changes to stay in memory (waiting for the next unlock), or vanish
3081 changes to stay in memory (waiting for the next unlock), or vanish
3276 completely.
3082 completely.
3277 '''
3083 '''
3278 # When using the same lock to commit and strip, the phasecache is left
3084 # When using the same lock to commit and strip, the phasecache is left
3279 # dirty after committing. Then when we strip, the repo is invalidated,
3085 # dirty after committing. Then when we strip, the repo is invalidated,
3280 # causing those changes to disappear.
3086 # causing those changes to disappear.
3281 if '_phasecache' in vars(self):
3087 if '_phasecache' in vars(self):
3282 self._phasecache.write()
3088 self._phasecache.write()
3283
3089
3284 @unfilteredmethod
3090 @unfilteredmethod
3285 def destroyed(self):
3091 def destroyed(self):
3286 '''Inform the repository that nodes have been destroyed.
3092 '''Inform the repository that nodes have been destroyed.
3287 Intended for use by strip and rollback, so there's a common
3093 Intended for use by strip and rollback, so there's a common
3288 place for anything that has to be done after destroying history.
3094 place for anything that has to be done after destroying history.
3289 '''
3095 '''
3290 # When one tries to:
3096 # When one tries to:
3291 # 1) destroy nodes thus calling this method (e.g. strip)
3097 # 1) destroy nodes thus calling this method (e.g. strip)
3292 # 2) use phasecache somewhere (e.g. commit)
3098 # 2) use phasecache somewhere (e.g. commit)
3293 #
3099 #
3294 # then 2) will fail because the phasecache contains nodes that were
3100 # then 2) will fail because the phasecache contains nodes that were
3295 # removed. We can either remove phasecache from the filecache,
3101 # removed. We can either remove phasecache from the filecache,
3296 # causing it to reload next time it is accessed, or simply filter
3102 # causing it to reload next time it is accessed, or simply filter
3297 # the removed nodes now and write the updated cache.
3103 # the removed nodes now and write the updated cache.
3298 self._phasecache.filterunknown(self)
3104 self._phasecache.filterunknown(self)
3299 self._phasecache.write()
3105 self._phasecache.write()
3300
3106
3301 # refresh all repository caches
3107 # refresh all repository caches
3302 self.updatecaches()
3108 self.updatecaches()
3303
3109
3304 # Ensure the persistent tag cache is updated. Doing it now
3110 # Ensure the persistent tag cache is updated. Doing it now
3305 # means that the tag cache only has to worry about destroyed
3111 # means that the tag cache only has to worry about destroyed
3306 # heads immediately after a strip/rollback. That in turn
3112 # heads immediately after a strip/rollback. That in turn
3307 # guarantees that "cachetip == currenttip" (comparing both rev
3113 # guarantees that "cachetip == currenttip" (comparing both rev
3308 # and node) always means no nodes have been added or destroyed.
3114 # and node) always means no nodes have been added or destroyed.
3309
3115
3310 # XXX this is suboptimal when qrefresh'ing: we strip the current
3116 # XXX this is suboptimal when qrefresh'ing: we strip the current
3311 # head, refresh the tag cache, then immediately add a new head.
3117 # head, refresh the tag cache, then immediately add a new head.
3312 # But I think doing it this way is necessary for the "instant
3118 # But I think doing it this way is necessary for the "instant
3313 # tag cache retrieval" case to work.
3119 # tag cache retrieval" case to work.
3314 self.invalidate()
3120 self.invalidate()
3315
3121
3316 def status(
3122 def status(
3317 self,
3123 self,
3318 node1=b'.',
3124 node1=b'.',
3319 node2=None,
3125 node2=None,
3320 match=None,
3126 match=None,
3321 ignored=False,
3127 ignored=False,
3322 clean=False,
3128 clean=False,
3323 unknown=False,
3129 unknown=False,
3324 listsubrepos=False,
3130 listsubrepos=False,
3325 ):
3131 ):
3326 '''a convenience method that calls node1.status(node2)'''
3132 '''a convenience method that calls node1.status(node2)'''
3327 return self[node1].status(
3133 return self[node1].status(
3328 node2, match, ignored, clean, unknown, listsubrepos
3134 node2, match, ignored, clean, unknown, listsubrepos
3329 )
3135 )
3330
3136
3331 def addpostdsstatus(self, ps):
3137 def addpostdsstatus(self, ps):
3332 """Add a callback to run within the wlock, at the point at which status
3138 """Add a callback to run within the wlock, at the point at which status
3333 fixups happen.
3139 fixups happen.
3334
3140
3335 On status completion, callback(wctx, status) will be called with the
3141 On status completion, callback(wctx, status) will be called with the
3336 wlock held, unless the dirstate has changed from underneath or the wlock
3142 wlock held, unless the dirstate has changed from underneath or the wlock
3337 couldn't be grabbed.
3143 couldn't be grabbed.
3338
3144
3339 Callbacks should not capture and use a cached copy of the dirstate --
3145 Callbacks should not capture and use a cached copy of the dirstate --
3340 it might change in the meanwhile. Instead, they should access the
3146 it might change in the meanwhile. Instead, they should access the
3341 dirstate via wctx.repo().dirstate.
3147 dirstate via wctx.repo().dirstate.
3342
3148
3343 This list is emptied out after each status run -- extensions should
3149 This list is emptied out after each status run -- extensions should
3344 make sure it adds to this list each time dirstate.status is called.
3150 make sure it adds to this list each time dirstate.status is called.
3345 Extensions should also make sure they don't call this for statuses
3151 Extensions should also make sure they don't call this for statuses
3346 that don't involve the dirstate.
3152 that don't involve the dirstate.
3347 """
3153 """
3348
3154
3349 # The list is located here for uniqueness reasons -- it is actually
3155 # The list is located here for uniqueness reasons -- it is actually
3350 # managed by the workingctx, but that isn't unique per-repo.
3156 # managed by the workingctx, but that isn't unique per-repo.
3351 self._postdsstatus.append(ps)
3157 self._postdsstatus.append(ps)
3352
3158
3353 def postdsstatus(self):
3159 def postdsstatus(self):
3354 """Used by workingctx to get the list of post-dirstate-status hooks."""
3160 """Used by workingctx to get the list of post-dirstate-status hooks."""
3355 return self._postdsstatus
3161 return self._postdsstatus
3356
3162
3357 def clearpostdsstatus(self):
3163 def clearpostdsstatus(self):
3358 """Used by workingctx to clear post-dirstate-status hooks."""
3164 """Used by workingctx to clear post-dirstate-status hooks."""
3359 del self._postdsstatus[:]
3165 del self._postdsstatus[:]
3360
3166
3361 def heads(self, start=None):
3167 def heads(self, start=None):
3362 if start is None:
3168 if start is None:
3363 cl = self.changelog
3169 cl = self.changelog
3364 headrevs = reversed(cl.headrevs())
3170 headrevs = reversed(cl.headrevs())
3365 return [cl.node(rev) for rev in headrevs]
3171 return [cl.node(rev) for rev in headrevs]
3366
3172
3367 heads = self.changelog.heads(start)
3173 heads = self.changelog.heads(start)
3368 # sort the output in rev descending order
3174 # sort the output in rev descending order
3369 return sorted(heads, key=self.changelog.rev, reverse=True)
3175 return sorted(heads, key=self.changelog.rev, reverse=True)
3370
3176
3371 def branchheads(self, branch=None, start=None, closed=False):
3177 def branchheads(self, branch=None, start=None, closed=False):
3372 '''return a (possibly filtered) list of heads for the given branch
3178 '''return a (possibly filtered) list of heads for the given branch
3373
3179
3374 Heads are returned in topological order, from newest to oldest.
3180 Heads are returned in topological order, from newest to oldest.
3375 If branch is None, use the dirstate branch.
3181 If branch is None, use the dirstate branch.
3376 If start is not None, return only heads reachable from start.
3182 If start is not None, return only heads reachable from start.
3377 If closed is True, return heads that are marked as closed as well.
3183 If closed is True, return heads that are marked as closed as well.
3378 '''
3184 '''
3379 if branch is None:
3185 if branch is None:
3380 branch = self[None].branch()
3186 branch = self[None].branch()
3381 branches = self.branchmap()
3187 branches = self.branchmap()
3382 if not branches.hasbranch(branch):
3188 if not branches.hasbranch(branch):
3383 return []
3189 return []
3384 # the cache returns heads ordered lowest to highest
3190 # the cache returns heads ordered lowest to highest
3385 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3191 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3386 if start is not None:
3192 if start is not None:
3387 # filter out the heads that cannot be reached from startrev
3193 # filter out the heads that cannot be reached from startrev
3388 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3194 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3389 bheads = [h for h in bheads if h in fbheads]
3195 bheads = [h for h in bheads if h in fbheads]
3390 return bheads
3196 return bheads
3391
3197
3392 def branches(self, nodes):
3198 def branches(self, nodes):
3393 if not nodes:
3199 if not nodes:
3394 nodes = [self.changelog.tip()]
3200 nodes = [self.changelog.tip()]
3395 b = []
3201 b = []
3396 for n in nodes:
3202 for n in nodes:
3397 t = n
3203 t = n
3398 while True:
3204 while True:
3399 p = self.changelog.parents(n)
3205 p = self.changelog.parents(n)
3400 if p[1] != nullid or p[0] == nullid:
3206 if p[1] != nullid or p[0] == nullid:
3401 b.append((t, n, p[0], p[1]))
3207 b.append((t, n, p[0], p[1]))
3402 break
3208 break
3403 n = p[0]
3209 n = p[0]
3404 return b
3210 return b
3405
3211
3406 def between(self, pairs):
3212 def between(self, pairs):
3407 r = []
3213 r = []
3408
3214
3409 for top, bottom in pairs:
3215 for top, bottom in pairs:
3410 n, l, i = top, [], 0
3216 n, l, i = top, [], 0
3411 f = 1
3217 f = 1
3412
3218
3413 while n != bottom and n != nullid:
3219 while n != bottom and n != nullid:
3414 p = self.changelog.parents(n)[0]
3220 p = self.changelog.parents(n)[0]
3415 if i == f:
3221 if i == f:
3416 l.append(n)
3222 l.append(n)
3417 f = f * 2
3223 f = f * 2
3418 n = p
3224 n = p
3419 i += 1
3225 i += 1
3420
3226
3421 r.append(l)
3227 r.append(l)
3422
3228
3423 return r
3229 return r
3424
3230
3425 def checkpush(self, pushop):
3231 def checkpush(self, pushop):
3426 """Extensions can override this function if additional checks have
3232 """Extensions can override this function if additional checks have
3427 to be performed before pushing, or call it if they override push
3233 to be performed before pushing, or call it if they override push
3428 command.
3234 command.
3429 """
3235 """
3430
3236
3431 @unfilteredpropertycache
3237 @unfilteredpropertycache
3432 def prepushoutgoinghooks(self):
3238 def prepushoutgoinghooks(self):
3433 """Return util.hooks consists of a pushop with repo, remote, outgoing
3239 """Return util.hooks consists of a pushop with repo, remote, outgoing
3434 methods, which are called before pushing changesets.
3240 methods, which are called before pushing changesets.
3435 """
3241 """
3436 return util.hooks()
3242 return util.hooks()
3437
3243
3438 def pushkey(self, namespace, key, old, new):
3244 def pushkey(self, namespace, key, old, new):
3439 try:
3245 try:
3440 tr = self.currenttransaction()
3246 tr = self.currenttransaction()
3441 hookargs = {}
3247 hookargs = {}
3442 if tr is not None:
3248 if tr is not None:
3443 hookargs.update(tr.hookargs)
3249 hookargs.update(tr.hookargs)
3444 hookargs = pycompat.strkwargs(hookargs)
3250 hookargs = pycompat.strkwargs(hookargs)
3445 hookargs['namespace'] = namespace
3251 hookargs['namespace'] = namespace
3446 hookargs['key'] = key
3252 hookargs['key'] = key
3447 hookargs['old'] = old
3253 hookargs['old'] = old
3448 hookargs['new'] = new
3254 hookargs['new'] = new
3449 self.hook(b'prepushkey', throw=True, **hookargs)
3255 self.hook(b'prepushkey', throw=True, **hookargs)
3450 except error.HookAbort as exc:
3256 except error.HookAbort as exc:
3451 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3257 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3452 if exc.hint:
3258 if exc.hint:
3453 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3259 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3454 return False
3260 return False
3455 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3261 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3456 ret = pushkey.push(self, namespace, key, old, new)
3262 ret = pushkey.push(self, namespace, key, old, new)
3457
3263
3458 def runhook(unused_success):
3264 def runhook(unused_success):
3459 self.hook(
3265 self.hook(
3460 b'pushkey',
3266 b'pushkey',
3461 namespace=namespace,
3267 namespace=namespace,
3462 key=key,
3268 key=key,
3463 old=old,
3269 old=old,
3464 new=new,
3270 new=new,
3465 ret=ret,
3271 ret=ret,
3466 )
3272 )
3467
3273
3468 self._afterlock(runhook)
3274 self._afterlock(runhook)
3469 return ret
3275 return ret
3470
3276
3471 def listkeys(self, namespace):
3277 def listkeys(self, namespace):
3472 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3278 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3473 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3279 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3474 values = pushkey.list(self, namespace)
3280 values = pushkey.list(self, namespace)
3475 self.hook(b'listkeys', namespace=namespace, values=values)
3281 self.hook(b'listkeys', namespace=namespace, values=values)
3476 return values
3282 return values
3477
3283
3478 def debugwireargs(self, one, two, three=None, four=None, five=None):
3284 def debugwireargs(self, one, two, three=None, four=None, five=None):
3479 '''used to test argument passing over the wire'''
3285 '''used to test argument passing over the wire'''
3480 return b"%s %s %s %s %s" % (
3286 return b"%s %s %s %s %s" % (
3481 one,
3287 one,
3482 two,
3288 two,
3483 pycompat.bytestr(three),
3289 pycompat.bytestr(three),
3484 pycompat.bytestr(four),
3290 pycompat.bytestr(four),
3485 pycompat.bytestr(five),
3291 pycompat.bytestr(five),
3486 )
3292 )
3487
3293
3488 def savecommitmessage(self, text):
3294 def savecommitmessage(self, text):
3489 fp = self.vfs(b'last-message.txt', b'wb')
3295 fp = self.vfs(b'last-message.txt', b'wb')
3490 try:
3296 try:
3491 fp.write(text)
3297 fp.write(text)
3492 finally:
3298 finally:
3493 fp.close()
3299 fp.close()
3494 return self.pathto(fp.name[len(self.root) + 1 :])
3300 return self.pathto(fp.name[len(self.root) + 1 :])
3495
3301
3496
3302
3497 # used to avoid circular references so destructors work
3303 # used to avoid circular references so destructors work
3498 def aftertrans(files):
3304 def aftertrans(files):
3499 renamefiles = [tuple(t) for t in files]
3305 renamefiles = [tuple(t) for t in files]
3500
3306
3501 def a():
3307 def a():
3502 for vfs, src, dest in renamefiles:
3308 for vfs, src, dest in renamefiles:
3503 # if src and dest refer to a same file, vfs.rename is a no-op,
3309 # if src and dest refer to a same file, vfs.rename is a no-op,
3504 # leaving both src and dest on disk. delete dest to make sure
3310 # leaving both src and dest on disk. delete dest to make sure
3505 # the rename couldn't be such a no-op.
3311 # the rename couldn't be such a no-op.
3506 vfs.tryunlink(dest)
3312 vfs.tryunlink(dest)
3507 try:
3313 try:
3508 vfs.rename(src, dest)
3314 vfs.rename(src, dest)
3509 except OSError: # journal file does not yet exist
3315 except OSError: # journal file does not yet exist
3510 pass
3316 pass
3511
3317
3512 return a
3318 return a
3513
3319
3514
3320
3515 def undoname(fn):
3321 def undoname(fn):
3516 base, name = os.path.split(fn)
3322 base, name = os.path.split(fn)
3517 assert name.startswith(b'journal')
3323 assert name.startswith(b'journal')
3518 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3324 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3519
3325
3520
3326
3521 def instance(ui, path, create, intents=None, createopts=None):
3327 def instance(ui, path, create, intents=None, createopts=None):
3522 localpath = util.urllocalpath(path)
3328 localpath = util.urllocalpath(path)
3523 if create:
3329 if create:
3524 createrepository(ui, localpath, createopts=createopts)
3330 createrepository(ui, localpath, createopts=createopts)
3525
3331
3526 return makelocalrepository(ui, localpath, intents=intents)
3332 return makelocalrepository(ui, localpath, intents=intents)
3527
3333
3528
3334
3529 def islocal(path):
3335 def islocal(path):
3530 return True
3336 return True
3531
3337
3532
3338
3533 def defaultcreateopts(ui, createopts=None):
3339 def defaultcreateopts(ui, createopts=None):
3534 """Populate the default creation options for a repository.
3340 """Populate the default creation options for a repository.
3535
3341
3536 A dictionary of explicitly requested creation options can be passed
3342 A dictionary of explicitly requested creation options can be passed
3537 in. Missing keys will be populated.
3343 in. Missing keys will be populated.
3538 """
3344 """
3539 createopts = dict(createopts or {})
3345 createopts = dict(createopts or {})
3540
3346
3541 if b'backend' not in createopts:
3347 if b'backend' not in createopts:
3542 # experimental config: storage.new-repo-backend
3348 # experimental config: storage.new-repo-backend
3543 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3349 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3544
3350
3545 return createopts
3351 return createopts
3546
3352
3547
3353
3548 def newreporequirements(ui, createopts):
3354 def newreporequirements(ui, createopts):
3549 """Determine the set of requirements for a new local repository.
3355 """Determine the set of requirements for a new local repository.
3550
3356
3551 Extensions can wrap this function to specify custom requirements for
3357 Extensions can wrap this function to specify custom requirements for
3552 new repositories.
3358 new repositories.
3553 """
3359 """
3554 # If the repo is being created from a shared repository, we copy
3360 # If the repo is being created from a shared repository, we copy
3555 # its requirements.
3361 # its requirements.
3556 if b'sharedrepo' in createopts:
3362 if b'sharedrepo' in createopts:
3557 requirements = set(createopts[b'sharedrepo'].requirements)
3363 requirements = set(createopts[b'sharedrepo'].requirements)
3558 if createopts.get(b'sharedrelative'):
3364 if createopts.get(b'sharedrelative'):
3559 requirements.add(b'relshared')
3365 requirements.add(b'relshared')
3560 else:
3366 else:
3561 requirements.add(b'shared')
3367 requirements.add(b'shared')
3562
3368
3563 return requirements
3369 return requirements
3564
3370
3565 if b'backend' not in createopts:
3371 if b'backend' not in createopts:
3566 raise error.ProgrammingError(
3372 raise error.ProgrammingError(
3567 b'backend key not present in createopts; '
3373 b'backend key not present in createopts; '
3568 b'was defaultcreateopts() called?'
3374 b'was defaultcreateopts() called?'
3569 )
3375 )
3570
3376
3571 if createopts[b'backend'] != b'revlogv1':
3377 if createopts[b'backend'] != b'revlogv1':
3572 raise error.Abort(
3378 raise error.Abort(
3573 _(
3379 _(
3574 b'unable to determine repository requirements for '
3380 b'unable to determine repository requirements for '
3575 b'storage backend: %s'
3381 b'storage backend: %s'
3576 )
3382 )
3577 % createopts[b'backend']
3383 % createopts[b'backend']
3578 )
3384 )
3579
3385
3580 requirements = {b'revlogv1'}
3386 requirements = {b'revlogv1'}
3581 if ui.configbool(b'format', b'usestore'):
3387 if ui.configbool(b'format', b'usestore'):
3582 requirements.add(b'store')
3388 requirements.add(b'store')
3583 if ui.configbool(b'format', b'usefncache'):
3389 if ui.configbool(b'format', b'usefncache'):
3584 requirements.add(b'fncache')
3390 requirements.add(b'fncache')
3585 if ui.configbool(b'format', b'dotencode'):
3391 if ui.configbool(b'format', b'dotencode'):
3586 requirements.add(b'dotencode')
3392 requirements.add(b'dotencode')
3587
3393
3588 compengines = ui.configlist(b'format', b'revlog-compression')
3394 compengines = ui.configlist(b'format', b'revlog-compression')
3589 for compengine in compengines:
3395 for compengine in compengines:
3590 if compengine in util.compengines:
3396 if compengine in util.compengines:
3591 break
3397 break
3592 else:
3398 else:
3593 raise error.Abort(
3399 raise error.Abort(
3594 _(
3400 _(
3595 b'compression engines %s defined by '
3401 b'compression engines %s defined by '
3596 b'format.revlog-compression not available'
3402 b'format.revlog-compression not available'
3597 )
3403 )
3598 % b', '.join(b'"%s"' % e for e in compengines),
3404 % b', '.join(b'"%s"' % e for e in compengines),
3599 hint=_(
3405 hint=_(
3600 b'run "hg debuginstall" to list available '
3406 b'run "hg debuginstall" to list available '
3601 b'compression engines'
3407 b'compression engines'
3602 ),
3408 ),
3603 )
3409 )
3604
3410
3605 # zlib is the historical default and doesn't need an explicit requirement.
3411 # zlib is the historical default and doesn't need an explicit requirement.
3606 if compengine == b'zstd':
3412 if compengine == b'zstd':
3607 requirements.add(b'revlog-compression-zstd')
3413 requirements.add(b'revlog-compression-zstd')
3608 elif compengine != b'zlib':
3414 elif compengine != b'zlib':
3609 requirements.add(b'exp-compression-%s' % compengine)
3415 requirements.add(b'exp-compression-%s' % compengine)
3610
3416
3611 if scmutil.gdinitconfig(ui):
3417 if scmutil.gdinitconfig(ui):
3612 requirements.add(b'generaldelta')
3418 requirements.add(b'generaldelta')
3613 if ui.configbool(b'format', b'sparse-revlog'):
3419 if ui.configbool(b'format', b'sparse-revlog'):
3614 requirements.add(SPARSEREVLOG_REQUIREMENT)
3420 requirements.add(SPARSEREVLOG_REQUIREMENT)
3615
3421
3616 # experimental config: format.exp-use-side-data
3422 # experimental config: format.exp-use-side-data
3617 if ui.configbool(b'format', b'exp-use-side-data'):
3423 if ui.configbool(b'format', b'exp-use-side-data'):
3618 requirements.add(SIDEDATA_REQUIREMENT)
3424 requirements.add(SIDEDATA_REQUIREMENT)
3619 # experimental config: format.exp-use-copies-side-data-changeset
3425 # experimental config: format.exp-use-copies-side-data-changeset
3620 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3426 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3621 requirements.add(SIDEDATA_REQUIREMENT)
3427 requirements.add(SIDEDATA_REQUIREMENT)
3622 requirements.add(COPIESSDC_REQUIREMENT)
3428 requirements.add(COPIESSDC_REQUIREMENT)
3623 if ui.configbool(b'experimental', b'treemanifest'):
3429 if ui.configbool(b'experimental', b'treemanifest'):
3624 requirements.add(b'treemanifest')
3430 requirements.add(b'treemanifest')
3625
3431
3626 revlogv2 = ui.config(b'experimental', b'revlogv2')
3432 revlogv2 = ui.config(b'experimental', b'revlogv2')
3627 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3433 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3628 requirements.remove(b'revlogv1')
3434 requirements.remove(b'revlogv1')
3629 # generaldelta is implied by revlogv2.
3435 # generaldelta is implied by revlogv2.
3630 requirements.discard(b'generaldelta')
3436 requirements.discard(b'generaldelta')
3631 requirements.add(REVLOGV2_REQUIREMENT)
3437 requirements.add(REVLOGV2_REQUIREMENT)
3632 # experimental config: format.internal-phase
3438 # experimental config: format.internal-phase
3633 if ui.configbool(b'format', b'internal-phase'):
3439 if ui.configbool(b'format', b'internal-phase'):
3634 requirements.add(b'internal-phase')
3440 requirements.add(b'internal-phase')
3635
3441
3636 if createopts.get(b'narrowfiles'):
3442 if createopts.get(b'narrowfiles'):
3637 requirements.add(repository.NARROW_REQUIREMENT)
3443 requirements.add(repository.NARROW_REQUIREMENT)
3638
3444
3639 if createopts.get(b'lfs'):
3445 if createopts.get(b'lfs'):
3640 requirements.add(b'lfs')
3446 requirements.add(b'lfs')
3641
3447
3642 if ui.configbool(b'format', b'bookmarks-in-store'):
3448 if ui.configbool(b'format', b'bookmarks-in-store'):
3643 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3449 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3644
3450
3645 if ui.configbool(b'format', b'use-persistent-nodemap'):
3451 if ui.configbool(b'format', b'use-persistent-nodemap'):
3646 requirements.add(NODEMAP_REQUIREMENT)
3452 requirements.add(NODEMAP_REQUIREMENT)
3647
3453
3648 return requirements
3454 return requirements
3649
3455
3650
3456
3651 def filterknowncreateopts(ui, createopts):
3457 def filterknowncreateopts(ui, createopts):
3652 """Filters a dict of repo creation options against options that are known.
3458 """Filters a dict of repo creation options against options that are known.
3653
3459
3654 Receives a dict of repo creation options and returns a dict of those
3460 Receives a dict of repo creation options and returns a dict of those
3655 options that we don't know how to handle.
3461 options that we don't know how to handle.
3656
3462
3657 This function is called as part of repository creation. If the
3463 This function is called as part of repository creation. If the
3658 returned dict contains any items, repository creation will not
3464 returned dict contains any items, repository creation will not
3659 be allowed, as it means there was a request to create a repository
3465 be allowed, as it means there was a request to create a repository
3660 with options not recognized by loaded code.
3466 with options not recognized by loaded code.
3661
3467
3662 Extensions can wrap this function to filter out creation options
3468 Extensions can wrap this function to filter out creation options
3663 they know how to handle.
3469 they know how to handle.
3664 """
3470 """
3665 known = {
3471 known = {
3666 b'backend',
3472 b'backend',
3667 b'lfs',
3473 b'lfs',
3668 b'narrowfiles',
3474 b'narrowfiles',
3669 b'sharedrepo',
3475 b'sharedrepo',
3670 b'sharedrelative',
3476 b'sharedrelative',
3671 b'shareditems',
3477 b'shareditems',
3672 b'shallowfilestore',
3478 b'shallowfilestore',
3673 }
3479 }
3674
3480
3675 return {k: v for k, v in createopts.items() if k not in known}
3481 return {k: v for k, v in createopts.items() if k not in known}
3676
3482
3677
3483
3678 def createrepository(ui, path, createopts=None):
3484 def createrepository(ui, path, createopts=None):
3679 """Create a new repository in a vfs.
3485 """Create a new repository in a vfs.
3680
3486
3681 ``path`` path to the new repo's working directory.
3487 ``path`` path to the new repo's working directory.
3682 ``createopts`` options for the new repository.
3488 ``createopts`` options for the new repository.
3683
3489
3684 The following keys for ``createopts`` are recognized:
3490 The following keys for ``createopts`` are recognized:
3685
3491
3686 backend
3492 backend
3687 The storage backend to use.
3493 The storage backend to use.
3688 lfs
3494 lfs
3689 Repository will be created with ``lfs`` requirement. The lfs extension
3495 Repository will be created with ``lfs`` requirement. The lfs extension
3690 will automatically be loaded when the repository is accessed.
3496 will automatically be loaded when the repository is accessed.
3691 narrowfiles
3497 narrowfiles
3692 Set up repository to support narrow file storage.
3498 Set up repository to support narrow file storage.
3693 sharedrepo
3499 sharedrepo
3694 Repository object from which storage should be shared.
3500 Repository object from which storage should be shared.
3695 sharedrelative
3501 sharedrelative
3696 Boolean indicating if the path to the shared repo should be
3502 Boolean indicating if the path to the shared repo should be
3697 stored as relative. By default, the pointer to the "parent" repo
3503 stored as relative. By default, the pointer to the "parent" repo
3698 is stored as an absolute path.
3504 is stored as an absolute path.
3699 shareditems
3505 shareditems
3700 Set of items to share to the new repository (in addition to storage).
3506 Set of items to share to the new repository (in addition to storage).
3701 shallowfilestore
3507 shallowfilestore
3702 Indicates that storage for files should be shallow (not all ancestor
3508 Indicates that storage for files should be shallow (not all ancestor
3703 revisions are known).
3509 revisions are known).
3704 """
3510 """
3705 createopts = defaultcreateopts(ui, createopts=createopts)
3511 createopts = defaultcreateopts(ui, createopts=createopts)
3706
3512
3707 unknownopts = filterknowncreateopts(ui, createopts)
3513 unknownopts = filterknowncreateopts(ui, createopts)
3708
3514
3709 if not isinstance(unknownopts, dict):
3515 if not isinstance(unknownopts, dict):
3710 raise error.ProgrammingError(
3516 raise error.ProgrammingError(
3711 b'filterknowncreateopts() did not return a dict'
3517 b'filterknowncreateopts() did not return a dict'
3712 )
3518 )
3713
3519
3714 if unknownopts:
3520 if unknownopts:
3715 raise error.Abort(
3521 raise error.Abort(
3716 _(
3522 _(
3717 b'unable to create repository because of unknown '
3523 b'unable to create repository because of unknown '
3718 b'creation option: %s'
3524 b'creation option: %s'
3719 )
3525 )
3720 % b', '.join(sorted(unknownopts)),
3526 % b', '.join(sorted(unknownopts)),
3721 hint=_(b'is a required extension not loaded?'),
3527 hint=_(b'is a required extension not loaded?'),
3722 )
3528 )
3723
3529
3724 requirements = newreporequirements(ui, createopts=createopts)
3530 requirements = newreporequirements(ui, createopts=createopts)
3725
3531
3726 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3532 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3727
3533
3728 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3534 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3729 if hgvfs.exists():
3535 if hgvfs.exists():
3730 raise error.RepoError(_(b'repository %s already exists') % path)
3536 raise error.RepoError(_(b'repository %s already exists') % path)
3731
3537
3732 if b'sharedrepo' in createopts:
3538 if b'sharedrepo' in createopts:
3733 sharedpath = createopts[b'sharedrepo'].sharedpath
3539 sharedpath = createopts[b'sharedrepo'].sharedpath
3734
3540
3735 if createopts.get(b'sharedrelative'):
3541 if createopts.get(b'sharedrelative'):
3736 try:
3542 try:
3737 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3543 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3738 except (IOError, ValueError) as e:
3544 except (IOError, ValueError) as e:
3739 # ValueError is raised on Windows if the drive letters differ
3545 # ValueError is raised on Windows if the drive letters differ
3740 # on each path.
3546 # on each path.
3741 raise error.Abort(
3547 raise error.Abort(
3742 _(b'cannot calculate relative path'),
3548 _(b'cannot calculate relative path'),
3743 hint=stringutil.forcebytestr(e),
3549 hint=stringutil.forcebytestr(e),
3744 )
3550 )
3745
3551
3746 if not wdirvfs.exists():
3552 if not wdirvfs.exists():
3747 wdirvfs.makedirs()
3553 wdirvfs.makedirs()
3748
3554
3749 hgvfs.makedir(notindexed=True)
3555 hgvfs.makedir(notindexed=True)
3750 if b'sharedrepo' not in createopts:
3556 if b'sharedrepo' not in createopts:
3751 hgvfs.mkdir(b'cache')
3557 hgvfs.mkdir(b'cache')
3752 hgvfs.mkdir(b'wcache')
3558 hgvfs.mkdir(b'wcache')
3753
3559
3754 if b'store' in requirements and b'sharedrepo' not in createopts:
3560 if b'store' in requirements and b'sharedrepo' not in createopts:
3755 hgvfs.mkdir(b'store')
3561 hgvfs.mkdir(b'store')
3756
3562
3757 # We create an invalid changelog outside the store so very old
3563 # We create an invalid changelog outside the store so very old
3758 # Mercurial versions (which didn't know about the requirements
3564 # Mercurial versions (which didn't know about the requirements
3759 # file) encounter an error on reading the changelog. This
3565 # file) encounter an error on reading the changelog. This
3760 # effectively locks out old clients and prevents them from
3566 # effectively locks out old clients and prevents them from
3761 # mucking with a repo in an unknown format.
3567 # mucking with a repo in an unknown format.
3762 #
3568 #
3763 # The revlog header has version 2, which won't be recognized by
3569 # The revlog header has version 2, which won't be recognized by
3764 # such old clients.
3570 # such old clients.
3765 hgvfs.append(
3571 hgvfs.append(
3766 b'00changelog.i',
3572 b'00changelog.i',
3767 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3573 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3768 b'layout',
3574 b'layout',
3769 )
3575 )
3770
3576
3771 scmutil.writerequires(hgvfs, requirements)
3577 scmutil.writerequires(hgvfs, requirements)
3772
3578
3773 # Write out file telling readers where to find the shared store.
3579 # Write out file telling readers where to find the shared store.
3774 if b'sharedrepo' in createopts:
3580 if b'sharedrepo' in createopts:
3775 hgvfs.write(b'sharedpath', sharedpath)
3581 hgvfs.write(b'sharedpath', sharedpath)
3776
3582
3777 if createopts.get(b'shareditems'):
3583 if createopts.get(b'shareditems'):
3778 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3584 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3779 hgvfs.write(b'shared', shared)
3585 hgvfs.write(b'shared', shared)
3780
3586
3781
3587
3782 def poisonrepository(repo):
3588 def poisonrepository(repo):
3783 """Poison a repository instance so it can no longer be used."""
3589 """Poison a repository instance so it can no longer be used."""
3784 # Perform any cleanup on the instance.
3590 # Perform any cleanup on the instance.
3785 repo.close()
3591 repo.close()
3786
3592
3787 # Our strategy is to replace the type of the object with one that
3593 # Our strategy is to replace the type of the object with one that
3788 # has all attribute lookups result in error.
3594 # has all attribute lookups result in error.
3789 #
3595 #
3790 # But we have to allow the close() method because some constructors
3596 # But we have to allow the close() method because some constructors
3791 # of repos call close() on repo references.
3597 # of repos call close() on repo references.
3792 class poisonedrepository(object):
3598 class poisonedrepository(object):
3793 def __getattribute__(self, item):
3599 def __getattribute__(self, item):
3794 if item == 'close':
3600 if item == 'close':
3795 return object.__getattribute__(self, item)
3601 return object.__getattribute__(self, item)
3796
3602
3797 raise error.ProgrammingError(
3603 raise error.ProgrammingError(
3798 b'repo instances should not be used after unshare'
3604 b'repo instances should not be used after unshare'
3799 )
3605 )
3800
3606
3801 def close(self):
3607 def close(self):
3802 pass
3608 pass
3803
3609
3804 # We may have a repoview, which intercepts __setattr__. So be sure
3610 # We may have a repoview, which intercepts __setattr__. So be sure
3805 # we operate at the lowest level possible.
3611 # we operate at the lowest level possible.
3806 object.__setattr__(repo, '__class__', poisonedrepository)
3612 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now