##// END OF EJS Templates
commitctx: extract _filecommit too...
marmoute -
r45760:ce9ee81d default
parent child Browse files
Show More
@@ -1,215 +1,354 b''
1 # commit.py - fonction to perform commit
1 # commit.py - fonction to perform commit
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9 import weakref
9 import weakref
10
10
11 from .i18n import _
11 from .i18n import _
12 from .node import (
12 from .node import (
13 hex,
13 hex,
14 nullid,
14 nullrev,
15 nullrev,
15 )
16 )
16
17
17 from . import (
18 from . import (
19 context,
20 mergestate,
18 metadata,
21 metadata,
19 phases,
22 phases,
20 scmutil,
23 scmutil,
21 subrepoutil,
24 subrepoutil,
22 )
25 )
23
26
24
27
25 def commitctx(repo, ctx, error=False, origctx=None):
28 def commitctx(repo, ctx, error=False, origctx=None):
26 """Add a new revision to the target repository.
29 """Add a new revision to the target repository.
27 Revision information is passed via the context argument.
30 Revision information is passed via the context argument.
28
31
29 ctx.files() should list all files involved in this commit, i.e.
32 ctx.files() should list all files involved in this commit, i.e.
30 modified/added/removed files. On merge, it may be wider than the
33 modified/added/removed files. On merge, it may be wider than the
31 ctx.files() to be committed, since any file nodes derived directly
34 ctx.files() to be committed, since any file nodes derived directly
32 from p1 or p2 are excluded from the committed ctx.files().
35 from p1 or p2 are excluded from the committed ctx.files().
33
36
34 origctx is for convert to work around the problem that bug
37 origctx is for convert to work around the problem that bug
35 fixes to the files list in changesets change hashes. For
38 fixes to the files list in changesets change hashes. For
36 convert to be the identity, it can pass an origctx and this
39 convert to be the identity, it can pass an origctx and this
37 function will use the same files list when it makes sense to
40 function will use the same files list when it makes sense to
38 do so.
41 do so.
39 """
42 """
40 repo = repo.unfiltered()
43 repo = repo.unfiltered()
41
44
42 p1, p2 = ctx.p1(), ctx.p2()
45 p1, p2 = ctx.p1(), ctx.p2()
43 user = ctx.user()
46 user = ctx.user()
44
47
45 if repo.filecopiesmode == b'changeset-sidedata':
48 if repo.filecopiesmode == b'changeset-sidedata':
46 writechangesetcopy = True
49 writechangesetcopy = True
47 writefilecopymeta = True
50 writefilecopymeta = True
48 writecopiesto = None
51 writecopiesto = None
49 else:
52 else:
50 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
53 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
51 writefilecopymeta = writecopiesto != b'changeset-only'
54 writefilecopymeta = writecopiesto != b'changeset-only'
52 writechangesetcopy = writecopiesto in (
55 writechangesetcopy = writecopiesto in (
53 b'changeset-only',
56 b'changeset-only',
54 b'compatibility',
57 b'compatibility',
55 )
58 )
56 p1copies, p2copies = None, None
59 p1copies, p2copies = None, None
57 if writechangesetcopy:
60 if writechangesetcopy:
58 p1copies = ctx.p1copies()
61 p1copies = ctx.p1copies()
59 p2copies = ctx.p2copies()
62 p2copies = ctx.p2copies()
60 filesadded, filesremoved = None, None
63 filesadded, filesremoved = None, None
61 with repo.lock(), repo.transaction(b"commit") as tr:
64 with repo.lock(), repo.transaction(b"commit") as tr:
62 trp = weakref.proxy(tr)
65 trp = weakref.proxy(tr)
63
66
64 if ctx.manifestnode():
67 if ctx.manifestnode():
65 # reuse an existing manifest revision
68 # reuse an existing manifest revision
66 repo.ui.debug(b'reusing known manifest\n')
69 repo.ui.debug(b'reusing known manifest\n')
67 mn = ctx.manifestnode()
70 mn = ctx.manifestnode()
68 files = ctx.files()
71 files = ctx.files()
69 if writechangesetcopy:
72 if writechangesetcopy:
70 filesadded = ctx.filesadded()
73 filesadded = ctx.filesadded()
71 filesremoved = ctx.filesremoved()
74 filesremoved = ctx.filesremoved()
72 elif not ctx.files():
75 elif not ctx.files():
73 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
76 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
74 mn = p1.manifestnode()
77 mn = p1.manifestnode()
75 files = []
78 files = []
76 else:
79 else:
77 m1ctx = p1.manifestctx()
80 m1ctx = p1.manifestctx()
78 m2ctx = p2.manifestctx()
81 m2ctx = p2.manifestctx()
79 mctx = m1ctx.copy()
82 mctx = m1ctx.copy()
80
83
81 m = mctx.read()
84 m = mctx.read()
82 m1 = m1ctx.read()
85 m1 = m1ctx.read()
83 m2 = m2ctx.read()
86 m2 = m2ctx.read()
84
87
85 # check in files
88 # check in files
86 added = []
89 added = []
87 filesadded = []
90 filesadded = []
88 removed = list(ctx.removed())
91 removed = list(ctx.removed())
89 touched = []
92 touched = []
90 linkrev = len(repo)
93 linkrev = len(repo)
91 repo.ui.note(_(b"committing files:\n"))
94 repo.ui.note(_(b"committing files:\n"))
92 uipathfn = scmutil.getuipathfn(repo)
95 uipathfn = scmutil.getuipathfn(repo)
93 for f in sorted(ctx.modified() + ctx.added()):
96 for f in sorted(ctx.modified() + ctx.added()):
94 repo.ui.note(uipathfn(f) + b"\n")
97 repo.ui.note(uipathfn(f) + b"\n")
95 try:
98 try:
96 fctx = ctx[f]
99 fctx = ctx[f]
97 if fctx is None:
100 if fctx is None:
98 removed.append(f)
101 removed.append(f)
99 else:
102 else:
100 added.append(f)
103 added.append(f)
101 m[f], is_touched = repo._filecommit(
104 m[f], is_touched = _filecommit(
102 fctx, m1, m2, linkrev, trp, writefilecopymeta,
105 repo, fctx, m1, m2, linkrev, trp, writefilecopymeta,
103 )
106 )
104 if is_touched:
107 if is_touched:
105 touched.append(f)
108 touched.append(f)
106 if writechangesetcopy and is_touched == 'added':
109 if writechangesetcopy and is_touched == 'added':
107 filesadded.append(f)
110 filesadded.append(f)
108 m.setflag(f, fctx.flags())
111 m.setflag(f, fctx.flags())
109 except OSError:
112 except OSError:
110 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
113 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
111 raise
114 raise
112 except IOError as inst:
115 except IOError as inst:
113 errcode = getattr(inst, 'errno', errno.ENOENT)
116 errcode = getattr(inst, 'errno', errno.ENOENT)
114 if error or errcode and errcode != errno.ENOENT:
117 if error or errcode and errcode != errno.ENOENT:
115 repo.ui.warn(
118 repo.ui.warn(
116 _(b"trouble committing %s!\n") % uipathfn(f)
119 _(b"trouble committing %s!\n") % uipathfn(f)
117 )
120 )
118 raise
121 raise
119
122
120 # update manifest
123 # update manifest
121 removed = [f for f in removed if f in m1 or f in m2]
124 removed = [f for f in removed if f in m1 or f in m2]
122 drop = sorted([f for f in removed if f in m])
125 drop = sorted([f for f in removed if f in m])
123 for f in drop:
126 for f in drop:
124 del m[f]
127 del m[f]
125 if p2.rev() != nullrev:
128 if p2.rev() != nullrev:
126 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
129 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
127 removed = [f for f in removed if not rf(f)]
130 removed = [f for f in removed if not rf(f)]
128
131
129 touched.extend(removed)
132 touched.extend(removed)
130
133
131 if writechangesetcopy:
134 if writechangesetcopy:
132 filesremoved = removed
135 filesremoved = removed
133
136
134 files = touched
137 files = touched
135 md = None
138 md = None
136 if not files:
139 if not files:
137 # if no "files" actually changed in terms of the changelog,
140 # if no "files" actually changed in terms of the changelog,
138 # try hard to detect unmodified manifest entry so that the
141 # try hard to detect unmodified manifest entry so that the
139 # exact same commit can be reproduced later on convert.
142 # exact same commit can be reproduced later on convert.
140 md = m1.diff(m, scmutil.matchfiles(repo, ctx.files()))
143 md = m1.diff(m, scmutil.matchfiles(repo, ctx.files()))
141 if not files and md:
144 if not files and md:
142 repo.ui.debug(
145 repo.ui.debug(
143 b'not reusing manifest (no file change in '
146 b'not reusing manifest (no file change in '
144 b'changelog, but manifest differs)\n'
147 b'changelog, but manifest differs)\n'
145 )
148 )
146 if files or md:
149 if files or md:
147 repo.ui.note(_(b"committing manifest\n"))
150 repo.ui.note(_(b"committing manifest\n"))
148 # we're using narrowmatch here since it's already applied at
151 # we're using narrowmatch here since it's already applied at
149 # other stages (such as dirstate.walk), so we're already
152 # other stages (such as dirstate.walk), so we're already
150 # ignoring things outside of narrowspec in most cases. The
153 # ignoring things outside of narrowspec in most cases. The
151 # one case where we might have files outside the narrowspec
154 # one case where we might have files outside the narrowspec
152 # at this point is merges, and we already error out in the
155 # at this point is merges, and we already error out in the
153 # case where the merge has files outside of the narrowspec,
156 # case where the merge has files outside of the narrowspec,
154 # so this is safe.
157 # so this is safe.
155 mn = mctx.write(
158 mn = mctx.write(
156 trp,
159 trp,
157 linkrev,
160 linkrev,
158 p1.manifestnode(),
161 p1.manifestnode(),
159 p2.manifestnode(),
162 p2.manifestnode(),
160 added,
163 added,
161 drop,
164 drop,
162 match=repo.narrowmatch(),
165 match=repo.narrowmatch(),
163 )
166 )
164 else:
167 else:
165 repo.ui.debug(
168 repo.ui.debug(
166 b'reusing manifest from p1 (listed files '
169 b'reusing manifest from p1 (listed files '
167 b'actually unchanged)\n'
170 b'actually unchanged)\n'
168 )
171 )
169 mn = p1.manifestnode()
172 mn = p1.manifestnode()
170
173
171 if writecopiesto == b'changeset-only':
174 if writecopiesto == b'changeset-only':
172 # If writing only to changeset extras, use None to indicate that
175 # If writing only to changeset extras, use None to indicate that
173 # no entry should be written. If writing to both, write an empty
176 # no entry should be written. If writing to both, write an empty
174 # entry to prevent the reader from falling back to reading
177 # entry to prevent the reader from falling back to reading
175 # filelogs.
178 # filelogs.
176 p1copies = p1copies or None
179 p1copies = p1copies or None
177 p2copies = p2copies or None
180 p2copies = p2copies or None
178 filesadded = filesadded or None
181 filesadded = filesadded or None
179 filesremoved = filesremoved or None
182 filesremoved = filesremoved or None
180
183
181 if origctx and origctx.manifestnode() == mn:
184 if origctx and origctx.manifestnode() == mn:
182 files = origctx.files()
185 files = origctx.files()
183
186
184 # update changelog
187 # update changelog
185 repo.ui.note(_(b"committing changelog\n"))
188 repo.ui.note(_(b"committing changelog\n"))
186 repo.changelog.delayupdate(tr)
189 repo.changelog.delayupdate(tr)
187 n = repo.changelog.add(
190 n = repo.changelog.add(
188 mn,
191 mn,
189 files,
192 files,
190 ctx.description(),
193 ctx.description(),
191 trp,
194 trp,
192 p1.node(),
195 p1.node(),
193 p2.node(),
196 p2.node(),
194 user,
197 user,
195 ctx.date(),
198 ctx.date(),
196 ctx.extra().copy(),
199 ctx.extra().copy(),
197 p1copies,
200 p1copies,
198 p2copies,
201 p2copies,
199 filesadded,
202 filesadded,
200 filesremoved,
203 filesremoved,
201 )
204 )
202 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
205 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
203 repo.hook(
206 repo.hook(
204 b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
207 b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
205 )
208 )
206 # set the new commit is proper phase
209 # set the new commit is proper phase
207 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
210 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
208 if targetphase:
211 if targetphase:
209 # retract boundary do not alter parent changeset.
212 # retract boundary do not alter parent changeset.
210 # if a parent have higher the resulting phase will
213 # if a parent have higher the resulting phase will
211 # be compliant anyway
214 # be compliant anyway
212 #
215 #
213 # if minimal phase was 0 we don't need to retract anything
216 # if minimal phase was 0 we don't need to retract anything
214 phases.registernew(repo, tr, targetphase, [n])
217 phases.registernew(repo, tr, targetphase, [n])
215 return n
218 return n
219
220
221 def _filecommit(
222 repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta,
223 ):
224 """
225 commit an individual file as part of a larger transaction
226
227 input:
228
229 fctx: a file context with the content we are trying to commit
230 manifest1: manifest of changeset first parent
231 manifest2: manifest of changeset second parent
232 linkrev: revision number of the changeset being created
233 tr: current transation
234 individual: boolean, set to False to skip storing the copy data
235 (only used by the Google specific feature of using
236 changeset extra as copy source of truth).
237
238 output: (filenode, touched)
239
240 filenode: the filenode that should be used by this changeset
241 touched: one of: None, 'added' or 'modified'
242 """
243
244 fname = fctx.path()
245 fparent1 = manifest1.get(fname, nullid)
246 fparent2 = manifest2.get(fname, nullid)
247 touched = None
248 if fparent1 == fparent2 == nullid:
249 touched = 'added'
250
251 if isinstance(fctx, context.filectx):
252 # This block fast path most comparisons which are usually done. It
253 # assumes that bare filectx is used and no merge happened, hence no
254 # need to create a new file revision in this case.
255 node = fctx.filenode()
256 if node in [fparent1, fparent2]:
257 repo.ui.debug(b'reusing %s filelog entry\n' % fname)
258 if (
259 fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
260 ) or (
261 fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
262 ):
263 touched = 'modified'
264 return node, touched
265
266 flog = repo.file(fname)
267 meta = {}
268 cfname = fctx.copysource()
269 fnode = None
270
271 if cfname and cfname != fname:
272 # Mark the new revision of this file as a copy of another
273 # file. This copy data will effectively act as a parent
274 # of this new revision. If this is a merge, the first
275 # parent will be the nullid (meaning "look up the copy data")
276 # and the second one will be the other parent. For example:
277 #
278 # 0 --- 1 --- 3 rev1 changes file foo
279 # \ / rev2 renames foo to bar and changes it
280 # \- 2 -/ rev3 should have bar with all changes and
281 # should record that bar descends from
282 # bar in rev2 and foo in rev1
283 #
284 # this allows this merge to succeed:
285 #
286 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
287 # \ / merging rev3 and rev4 should use bar@rev2
288 # \- 2 --- 4 as the merge base
289 #
290
291 cnode = manifest1.get(cfname)
292 newfparent = fparent2
293
294 if manifest2: # branch merge
295 if fparent2 == nullid or cnode is None: # copied on remote side
296 if cfname in manifest2:
297 cnode = manifest2[cfname]
298 newfparent = fparent1
299
300 # Here, we used to search backwards through history to try to find
301 # where the file copy came from if the source of a copy was not in
302 # the parent directory. However, this doesn't actually make sense to
303 # do (what does a copy from something not in your working copy even
304 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
305 # the user that copy information was dropped, so if they didn't
306 # expect this outcome it can be fixed, but this is the correct
307 # behavior in this circumstance.
308
309 if cnode:
310 repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
311 if includecopymeta:
312 meta[b"copy"] = cfname
313 meta[b"copyrev"] = hex(cnode)
314 fparent1, fparent2 = nullid, newfparent
315 else:
316 repo.ui.warn(
317 _(
318 b"warning: can't find ancestor for '%s' "
319 b"copied from '%s'!\n"
320 )
321 % (fname, cfname)
322 )
323
324 elif fparent1 == nullid:
325 fparent1, fparent2 = fparent2, nullid
326 elif fparent2 != nullid:
327 # is one parent an ancestor of the other?
328 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
329 if fparent1 in fparentancestors:
330 fparent1, fparent2 = fparent2, nullid
331 elif fparent2 in fparentancestors:
332 fparent2 = nullid
333 elif not fparentancestors:
334 # TODO: this whole if-else might be simplified much more
335 ms = mergestate.mergestate.read(repo)
336 if (
337 fname in ms
338 and ms[fname] == mergestate.MERGE_RECORD_MERGED_OTHER
339 ):
340 fparent1, fparent2 = fparent2, nullid
341
342 # is the file changed?
343 text = fctx.data()
344 if fparent2 != nullid or meta or flog.cmp(fparent1, text):
345 if touched is None: # do not overwrite added
346 touched = 'modified'
347 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
348 # are just the flags changed during merge?
349 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
350 touched = 'modified'
351 fnode = fparent1
352 else:
353 fnode = fparent1
354 return fnode, touched
@@ -1,3612 +1,3473 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 commit,
35 commit,
36 context,
36 context,
37 dirstate,
37 dirstate,
38 dirstateguard,
38 dirstateguard,
39 discovery,
39 discovery,
40 encoding,
40 encoding,
41 error,
41 error,
42 exchange,
42 exchange,
43 extensions,
43 extensions,
44 filelog,
44 filelog,
45 hook,
45 hook,
46 lock as lockmod,
46 lock as lockmod,
47 match as matchmod,
47 match as matchmod,
48 mergestate as mergestatemod,
48 mergestate as mergestatemod,
49 mergeutil,
49 mergeutil,
50 namespaces,
50 namespaces,
51 narrowspec,
51 narrowspec,
52 obsolete,
52 obsolete,
53 pathutil,
53 pathutil,
54 phases,
54 phases,
55 pushkey,
55 pushkey,
56 pycompat,
56 pycompat,
57 rcutil,
57 rcutil,
58 repoview,
58 repoview,
59 revset,
59 revset,
60 revsetlang,
60 revsetlang,
61 scmutil,
61 scmutil,
62 sparse,
62 sparse,
63 store as storemod,
63 store as storemod,
64 subrepoutil,
64 subrepoutil,
65 tags as tagsmod,
65 tags as tagsmod,
66 transaction,
66 transaction,
67 txnutil,
67 txnutil,
68 util,
68 util,
69 vfs as vfsmod,
69 vfs as vfsmod,
70 )
70 )
71
71
72 from .interfaces import (
72 from .interfaces import (
73 repository,
73 repository,
74 util as interfaceutil,
74 util as interfaceutil,
75 )
75 )
76
76
77 from .utils import (
77 from .utils import (
78 hashutil,
78 hashutil,
79 procutil,
79 procutil,
80 stringutil,
80 stringutil,
81 )
81 )
82
82
83 from .revlogutils import constants as revlogconst
83 from .revlogutils import constants as revlogconst
84
84
85 release = lockmod.release
85 release = lockmod.release
86 urlerr = util.urlerr
86 urlerr = util.urlerr
87 urlreq = util.urlreq
87 urlreq = util.urlreq
88
88
89 # set of (path, vfs-location) tuples. vfs-location is:
89 # set of (path, vfs-location) tuples. vfs-location is:
90 # - 'plain for vfs relative paths
90 # - 'plain for vfs relative paths
91 # - '' for svfs relative paths
91 # - '' for svfs relative paths
92 _cachedfiles = set()
92 _cachedfiles = set()
93
93
94
94
95 class _basefilecache(scmutil.filecache):
95 class _basefilecache(scmutil.filecache):
96 """All filecache usage on repo are done for logic that should be unfiltered
96 """All filecache usage on repo are done for logic that should be unfiltered
97 """
97 """
98
98
99 def __get__(self, repo, type=None):
99 def __get__(self, repo, type=None):
100 if repo is None:
100 if repo is None:
101 return self
101 return self
102 # proxy to unfiltered __dict__ since filtered repo has no entry
102 # proxy to unfiltered __dict__ since filtered repo has no entry
103 unfi = repo.unfiltered()
103 unfi = repo.unfiltered()
104 try:
104 try:
105 return unfi.__dict__[self.sname]
105 return unfi.__dict__[self.sname]
106 except KeyError:
106 except KeyError:
107 pass
107 pass
108 return super(_basefilecache, self).__get__(unfi, type)
108 return super(_basefilecache, self).__get__(unfi, type)
109
109
110 def set(self, repo, value):
110 def set(self, repo, value):
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
112
112
113
113
114 class repofilecache(_basefilecache):
114 class repofilecache(_basefilecache):
115 """filecache for files in .hg but outside of .hg/store"""
115 """filecache for files in .hg but outside of .hg/store"""
116
116
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(repofilecache, self).__init__(*paths)
118 super(repofilecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, b'plain'))
120 _cachedfiles.add((path, b'plain'))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.vfs.join(fname)
123 return obj.vfs.join(fname)
124
124
125
125
126 class storecache(_basefilecache):
126 class storecache(_basefilecache):
127 """filecache for files in the store"""
127 """filecache for files in the store"""
128
128
129 def __init__(self, *paths):
129 def __init__(self, *paths):
130 super(storecache, self).__init__(*paths)
130 super(storecache, self).__init__(*paths)
131 for path in paths:
131 for path in paths:
132 _cachedfiles.add((path, b''))
132 _cachedfiles.add((path, b''))
133
133
134 def join(self, obj, fname):
134 def join(self, obj, fname):
135 return obj.sjoin(fname)
135 return obj.sjoin(fname)
136
136
137
137
138 class mixedrepostorecache(_basefilecache):
138 class mixedrepostorecache(_basefilecache):
139 """filecache for a mix files in .hg/store and outside"""
139 """filecache for a mix files in .hg/store and outside"""
140
140
141 def __init__(self, *pathsandlocations):
141 def __init__(self, *pathsandlocations):
142 # scmutil.filecache only uses the path for passing back into our
142 # scmutil.filecache only uses the path for passing back into our
143 # join(), so we can safely pass a list of paths and locations
143 # join(), so we can safely pass a list of paths and locations
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
146
146
147 def join(self, obj, fnameandlocation):
147 def join(self, obj, fnameandlocation):
148 fname, location = fnameandlocation
148 fname, location = fnameandlocation
149 if location == b'plain':
149 if location == b'plain':
150 return obj.vfs.join(fname)
150 return obj.vfs.join(fname)
151 else:
151 else:
152 if location != b'':
152 if location != b'':
153 raise error.ProgrammingError(
153 raise error.ProgrammingError(
154 b'unexpected location: %s' % location
154 b'unexpected location: %s' % location
155 )
155 )
156 return obj.sjoin(fname)
156 return obj.sjoin(fname)
157
157
158
158
159 def isfilecached(repo, name):
159 def isfilecached(repo, name):
160 """check if a repo has already cached "name" filecache-ed property
160 """check if a repo has already cached "name" filecache-ed property
161
161
162 This returns (cachedobj-or-None, iscached) tuple.
162 This returns (cachedobj-or-None, iscached) tuple.
163 """
163 """
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
165 if not cacheentry:
165 if not cacheentry:
166 return None, False
166 return None, False
167 return cacheentry.obj, True
167 return cacheentry.obj, True
168
168
169
169
170 class unfilteredpropertycache(util.propertycache):
170 class unfilteredpropertycache(util.propertycache):
171 """propertycache that apply to unfiltered repo only"""
171 """propertycache that apply to unfiltered repo only"""
172
172
173 def __get__(self, repo, type=None):
173 def __get__(self, repo, type=None):
174 unfi = repo.unfiltered()
174 unfi = repo.unfiltered()
175 if unfi is repo:
175 if unfi is repo:
176 return super(unfilteredpropertycache, self).__get__(unfi)
176 return super(unfilteredpropertycache, self).__get__(unfi)
177 return getattr(unfi, self.name)
177 return getattr(unfi, self.name)
178
178
179
179
180 class filteredpropertycache(util.propertycache):
180 class filteredpropertycache(util.propertycache):
181 """propertycache that must take filtering in account"""
181 """propertycache that must take filtering in account"""
182
182
183 def cachevalue(self, obj, value):
183 def cachevalue(self, obj, value):
184 object.__setattr__(obj, self.name, value)
184 object.__setattr__(obj, self.name, value)
185
185
186
186
187 def hasunfilteredcache(repo, name):
187 def hasunfilteredcache(repo, name):
188 """check if a repo has an unfilteredpropertycache value for <name>"""
188 """check if a repo has an unfilteredpropertycache value for <name>"""
189 return name in vars(repo.unfiltered())
189 return name in vars(repo.unfiltered())
190
190
191
191
192 def unfilteredmethod(orig):
192 def unfilteredmethod(orig):
193 """decorate method that always need to be run on unfiltered version"""
193 """decorate method that always need to be run on unfiltered version"""
194
194
195 def wrapper(repo, *args, **kwargs):
195 def wrapper(repo, *args, **kwargs):
196 return orig(repo.unfiltered(), *args, **kwargs)
196 return orig(repo.unfiltered(), *args, **kwargs)
197
197
198 return wrapper
198 return wrapper
199
199
200
200
201 moderncaps = {
201 moderncaps = {
202 b'lookup',
202 b'lookup',
203 b'branchmap',
203 b'branchmap',
204 b'pushkey',
204 b'pushkey',
205 b'known',
205 b'known',
206 b'getbundle',
206 b'getbundle',
207 b'unbundle',
207 b'unbundle',
208 }
208 }
209 legacycaps = moderncaps.union({b'changegroupsubset'})
209 legacycaps = moderncaps.union({b'changegroupsubset'})
210
210
211
211
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
213 class localcommandexecutor(object):
213 class localcommandexecutor(object):
214 def __init__(self, peer):
214 def __init__(self, peer):
215 self._peer = peer
215 self._peer = peer
216 self._sent = False
216 self._sent = False
217 self._closed = False
217 self._closed = False
218
218
219 def __enter__(self):
219 def __enter__(self):
220 return self
220 return self
221
221
222 def __exit__(self, exctype, excvalue, exctb):
222 def __exit__(self, exctype, excvalue, exctb):
223 self.close()
223 self.close()
224
224
225 def callcommand(self, command, args):
225 def callcommand(self, command, args):
226 if self._sent:
226 if self._sent:
227 raise error.ProgrammingError(
227 raise error.ProgrammingError(
228 b'callcommand() cannot be used after sendcommands()'
228 b'callcommand() cannot be used after sendcommands()'
229 )
229 )
230
230
231 if self._closed:
231 if self._closed:
232 raise error.ProgrammingError(
232 raise error.ProgrammingError(
233 b'callcommand() cannot be used after close()'
233 b'callcommand() cannot be used after close()'
234 )
234 )
235
235
236 # We don't need to support anything fancy. Just call the named
236 # We don't need to support anything fancy. Just call the named
237 # method on the peer and return a resolved future.
237 # method on the peer and return a resolved future.
238 fn = getattr(self._peer, pycompat.sysstr(command))
238 fn = getattr(self._peer, pycompat.sysstr(command))
239
239
240 f = pycompat.futures.Future()
240 f = pycompat.futures.Future()
241
241
242 try:
242 try:
243 result = fn(**pycompat.strkwargs(args))
243 result = fn(**pycompat.strkwargs(args))
244 except Exception:
244 except Exception:
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
246 else:
246 else:
247 f.set_result(result)
247 f.set_result(result)
248
248
249 return f
249 return f
250
250
251 def sendcommands(self):
251 def sendcommands(self):
252 self._sent = True
252 self._sent = True
253
253
254 def close(self):
254 def close(self):
255 self._closed = True
255 self._closed = True
256
256
257
257
258 @interfaceutil.implementer(repository.ipeercommands)
258 @interfaceutil.implementer(repository.ipeercommands)
259 class localpeer(repository.peer):
259 class localpeer(repository.peer):
260 '''peer for a local repo; reflects only the most recent API'''
260 '''peer for a local repo; reflects only the most recent API'''
261
261
262 def __init__(self, repo, caps=None):
262 def __init__(self, repo, caps=None):
263 super(localpeer, self).__init__()
263 super(localpeer, self).__init__()
264
264
265 if caps is None:
265 if caps is None:
266 caps = moderncaps.copy()
266 caps = moderncaps.copy()
267 self._repo = repo.filtered(b'served')
267 self._repo = repo.filtered(b'served')
268 self.ui = repo.ui
268 self.ui = repo.ui
269 self._caps = repo._restrictcapabilities(caps)
269 self._caps = repo._restrictcapabilities(caps)
270
270
271 # Begin of _basepeer interface.
271 # Begin of _basepeer interface.
272
272
273 def url(self):
273 def url(self):
274 return self._repo.url()
274 return self._repo.url()
275
275
276 def local(self):
276 def local(self):
277 return self._repo
277 return self._repo
278
278
279 def peer(self):
279 def peer(self):
280 return self
280 return self
281
281
282 def canpush(self):
282 def canpush(self):
283 return True
283 return True
284
284
285 def close(self):
285 def close(self):
286 self._repo.close()
286 self._repo.close()
287
287
288 # End of _basepeer interface.
288 # End of _basepeer interface.
289
289
290 # Begin of _basewirecommands interface.
290 # Begin of _basewirecommands interface.
291
291
292 def branchmap(self):
292 def branchmap(self):
293 return self._repo.branchmap()
293 return self._repo.branchmap()
294
294
295 def capabilities(self):
295 def capabilities(self):
296 return self._caps
296 return self._caps
297
297
298 def clonebundles(self):
298 def clonebundles(self):
299 return self._repo.tryread(b'clonebundles.manifest')
299 return self._repo.tryread(b'clonebundles.manifest')
300
300
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
302 """Used to test argument passing over the wire"""
302 """Used to test argument passing over the wire"""
303 return b"%s %s %s %s %s" % (
303 return b"%s %s %s %s %s" % (
304 one,
304 one,
305 two,
305 two,
306 pycompat.bytestr(three),
306 pycompat.bytestr(three),
307 pycompat.bytestr(four),
307 pycompat.bytestr(four),
308 pycompat.bytestr(five),
308 pycompat.bytestr(five),
309 )
309 )
310
310
311 def getbundle(
311 def getbundle(
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
313 ):
313 ):
314 chunks = exchange.getbundlechunks(
314 chunks = exchange.getbundlechunks(
315 self._repo,
315 self._repo,
316 source,
316 source,
317 heads=heads,
317 heads=heads,
318 common=common,
318 common=common,
319 bundlecaps=bundlecaps,
319 bundlecaps=bundlecaps,
320 **kwargs
320 **kwargs
321 )[1]
321 )[1]
322 cb = util.chunkbuffer(chunks)
322 cb = util.chunkbuffer(chunks)
323
323
324 if exchange.bundle2requested(bundlecaps):
324 if exchange.bundle2requested(bundlecaps):
325 # When requesting a bundle2, getbundle returns a stream to make the
325 # When requesting a bundle2, getbundle returns a stream to make the
326 # wire level function happier. We need to build a proper object
326 # wire level function happier. We need to build a proper object
327 # from it in local peer.
327 # from it in local peer.
328 return bundle2.getunbundler(self.ui, cb)
328 return bundle2.getunbundler(self.ui, cb)
329 else:
329 else:
330 return changegroup.getunbundler(b'01', cb, None)
330 return changegroup.getunbundler(b'01', cb, None)
331
331
332 def heads(self):
332 def heads(self):
333 return self._repo.heads()
333 return self._repo.heads()
334
334
335 def known(self, nodes):
335 def known(self, nodes):
336 return self._repo.known(nodes)
336 return self._repo.known(nodes)
337
337
338 def listkeys(self, namespace):
338 def listkeys(self, namespace):
339 return self._repo.listkeys(namespace)
339 return self._repo.listkeys(namespace)
340
340
341 def lookup(self, key):
341 def lookup(self, key):
342 return self._repo.lookup(key)
342 return self._repo.lookup(key)
343
343
344 def pushkey(self, namespace, key, old, new):
344 def pushkey(self, namespace, key, old, new):
345 return self._repo.pushkey(namespace, key, old, new)
345 return self._repo.pushkey(namespace, key, old, new)
346
346
347 def stream_out(self):
347 def stream_out(self):
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
349
349
350 def unbundle(self, bundle, heads, url):
350 def unbundle(self, bundle, heads, url):
351 """apply a bundle on a repo
351 """apply a bundle on a repo
352
352
353 This function handles the repo locking itself."""
353 This function handles the repo locking itself."""
354 try:
354 try:
355 try:
355 try:
356 bundle = exchange.readbundle(self.ui, bundle, None)
356 bundle = exchange.readbundle(self.ui, bundle, None)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
358 if util.safehasattr(ret, b'getchunks'):
358 if util.safehasattr(ret, b'getchunks'):
359 # This is a bundle20 object, turn it into an unbundler.
359 # This is a bundle20 object, turn it into an unbundler.
360 # This little dance should be dropped eventually when the
360 # This little dance should be dropped eventually when the
361 # API is finally improved.
361 # API is finally improved.
362 stream = util.chunkbuffer(ret.getchunks())
362 stream = util.chunkbuffer(ret.getchunks())
363 ret = bundle2.getunbundler(self.ui, stream)
363 ret = bundle2.getunbundler(self.ui, stream)
364 return ret
364 return ret
365 except Exception as exc:
365 except Exception as exc:
366 # If the exception contains output salvaged from a bundle2
366 # If the exception contains output salvaged from a bundle2
367 # reply, we need to make sure it is printed before continuing
367 # reply, we need to make sure it is printed before continuing
368 # to fail. So we build a bundle2 with such output and consume
368 # to fail. So we build a bundle2 with such output and consume
369 # it directly.
369 # it directly.
370 #
370 #
371 # This is not very elegant but allows a "simple" solution for
371 # This is not very elegant but allows a "simple" solution for
372 # issue4594
372 # issue4594
373 output = getattr(exc, '_bundle2salvagedoutput', ())
373 output = getattr(exc, '_bundle2salvagedoutput', ())
374 if output:
374 if output:
375 bundler = bundle2.bundle20(self._repo.ui)
375 bundler = bundle2.bundle20(self._repo.ui)
376 for out in output:
376 for out in output:
377 bundler.addpart(out)
377 bundler.addpart(out)
378 stream = util.chunkbuffer(bundler.getchunks())
378 stream = util.chunkbuffer(bundler.getchunks())
379 b = bundle2.getunbundler(self.ui, stream)
379 b = bundle2.getunbundler(self.ui, stream)
380 bundle2.processbundle(self._repo, b)
380 bundle2.processbundle(self._repo, b)
381 raise
381 raise
382 except error.PushRaced as exc:
382 except error.PushRaced as exc:
383 raise error.ResponseError(
383 raise error.ResponseError(
384 _(b'push failed:'), stringutil.forcebytestr(exc)
384 _(b'push failed:'), stringutil.forcebytestr(exc)
385 )
385 )
386
386
387 # End of _basewirecommands interface.
387 # End of _basewirecommands interface.
388
388
389 # Begin of peer interface.
389 # Begin of peer interface.
390
390
391 def commandexecutor(self):
391 def commandexecutor(self):
392 return localcommandexecutor(self)
392 return localcommandexecutor(self)
393
393
394 # End of peer interface.
394 # End of peer interface.
395
395
396
396
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
398 class locallegacypeer(localpeer):
398 class locallegacypeer(localpeer):
399 '''peer extension which implements legacy methods too; used for tests with
399 '''peer extension which implements legacy methods too; used for tests with
400 restricted capabilities'''
400 restricted capabilities'''
401
401
402 def __init__(self, repo):
402 def __init__(self, repo):
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
404
404
405 # Begin of baselegacywirecommands interface.
405 # Begin of baselegacywirecommands interface.
406
406
407 def between(self, pairs):
407 def between(self, pairs):
408 return self._repo.between(pairs)
408 return self._repo.between(pairs)
409
409
410 def branches(self, nodes):
410 def branches(self, nodes):
411 return self._repo.branches(nodes)
411 return self._repo.branches(nodes)
412
412
413 def changegroup(self, nodes, source):
413 def changegroup(self, nodes, source):
414 outgoing = discovery.outgoing(
414 outgoing = discovery.outgoing(
415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
416 )
416 )
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
418
418
419 def changegroupsubset(self, bases, heads, source):
419 def changegroupsubset(self, bases, heads, source):
420 outgoing = discovery.outgoing(
420 outgoing = discovery.outgoing(
421 self._repo, missingroots=bases, ancestorsof=heads
421 self._repo, missingroots=bases, ancestorsof=heads
422 )
422 )
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424
424
425 # End of baselegacywirecommands interface.
425 # End of baselegacywirecommands interface.
426
426
427
427
428 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # Increment the sub-version when the revlog v2 format changes to lock out old
429 # clients.
429 # clients.
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
431
431
432 # A repository with the sparserevlog feature will have delta chains that
432 # A repository with the sparserevlog feature will have delta chains that
433 # can spread over a larger span. Sparse reading cuts these large spans into
433 # can spread over a larger span. Sparse reading cuts these large spans into
434 # pieces, so that each piece isn't too big.
434 # pieces, so that each piece isn't too big.
435 # Without the sparserevlog capability, reading from the repository could use
435 # Without the sparserevlog capability, reading from the repository could use
436 # huge amounts of memory, because the whole span would be read at once,
436 # huge amounts of memory, because the whole span would be read at once,
437 # including all the intermediate revisions that aren't pertinent for the chain.
437 # including all the intermediate revisions that aren't pertinent for the chain.
438 # This is why once a repository has enabled sparse-read, it becomes required.
438 # This is why once a repository has enabled sparse-read, it becomes required.
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
440
440
441 # A repository with the sidedataflag requirement will allow to store extra
441 # A repository with the sidedataflag requirement will allow to store extra
442 # information for revision without altering their original hashes.
442 # information for revision without altering their original hashes.
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
444
444
445 # A repository with the the copies-sidedata-changeset requirement will store
445 # A repository with the the copies-sidedata-changeset requirement will store
446 # copies related information in changeset's sidedata.
446 # copies related information in changeset's sidedata.
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
448
448
449 # The repository use persistent nodemap for the changelog and the manifest.
449 # The repository use persistent nodemap for the changelog and the manifest.
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
451
451
452 # Functions receiving (ui, features) that extensions can register to impact
452 # Functions receiving (ui, features) that extensions can register to impact
453 # the ability to load repositories with custom requirements. Only
453 # the ability to load repositories with custom requirements. Only
454 # functions defined in loaded extensions are called.
454 # functions defined in loaded extensions are called.
455 #
455 #
456 # The function receives a set of requirement strings that the repository
456 # The function receives a set of requirement strings that the repository
457 # is capable of opening. Functions will typically add elements to the
457 # is capable of opening. Functions will typically add elements to the
458 # set to reflect that the extension knows how to handle that requirements.
458 # set to reflect that the extension knows how to handle that requirements.
459 featuresetupfuncs = set()
459 featuresetupfuncs = set()
460
460
461
461
462 def makelocalrepository(baseui, path, intents=None):
462 def makelocalrepository(baseui, path, intents=None):
463 """Create a local repository object.
463 """Create a local repository object.
464
464
465 Given arguments needed to construct a local repository, this function
465 Given arguments needed to construct a local repository, this function
466 performs various early repository loading functionality (such as
466 performs various early repository loading functionality (such as
467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
468 the repository can be opened, derives a type suitable for representing
468 the repository can be opened, derives a type suitable for representing
469 that repository, and returns an instance of it.
469 that repository, and returns an instance of it.
470
470
471 The returned object conforms to the ``repository.completelocalrepository``
471 The returned object conforms to the ``repository.completelocalrepository``
472 interface.
472 interface.
473
473
474 The repository type is derived by calling a series of factory functions
474 The repository type is derived by calling a series of factory functions
475 for each aspect/interface of the final repository. These are defined by
475 for each aspect/interface of the final repository. These are defined by
476 ``REPO_INTERFACES``.
476 ``REPO_INTERFACES``.
477
477
478 Each factory function is called to produce a type implementing a specific
478 Each factory function is called to produce a type implementing a specific
479 interface. The cumulative list of returned types will be combined into a
479 interface. The cumulative list of returned types will be combined into a
480 new type and that type will be instantiated to represent the local
480 new type and that type will be instantiated to represent the local
481 repository.
481 repository.
482
482
483 The factory functions each receive various state that may be consulted
483 The factory functions each receive various state that may be consulted
484 as part of deriving a type.
484 as part of deriving a type.
485
485
486 Extensions should wrap these factory functions to customize repository type
486 Extensions should wrap these factory functions to customize repository type
487 creation. Note that an extension's wrapped function may be called even if
487 creation. Note that an extension's wrapped function may be called even if
488 that extension is not loaded for the repo being constructed. Extensions
488 that extension is not loaded for the repo being constructed. Extensions
489 should check if their ``__name__`` appears in the
489 should check if their ``__name__`` appears in the
490 ``extensionmodulenames`` set passed to the factory function and no-op if
490 ``extensionmodulenames`` set passed to the factory function and no-op if
491 not.
491 not.
492 """
492 """
493 ui = baseui.copy()
493 ui = baseui.copy()
494 # Prevent copying repo configuration.
494 # Prevent copying repo configuration.
495 ui.copy = baseui.copy
495 ui.copy = baseui.copy
496
496
497 # Working directory VFS rooted at repository root.
497 # Working directory VFS rooted at repository root.
498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
499
499
500 # Main VFS for .hg/ directory.
500 # Main VFS for .hg/ directory.
501 hgpath = wdirvfs.join(b'.hg')
501 hgpath = wdirvfs.join(b'.hg')
502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
503
503
504 # The .hg/ path should exist and should be a directory. All other
504 # The .hg/ path should exist and should be a directory. All other
505 # cases are errors.
505 # cases are errors.
506 if not hgvfs.isdir():
506 if not hgvfs.isdir():
507 try:
507 try:
508 hgvfs.stat()
508 hgvfs.stat()
509 except OSError as e:
509 except OSError as e:
510 if e.errno != errno.ENOENT:
510 if e.errno != errno.ENOENT:
511 raise
511 raise
512 except ValueError as e:
512 except ValueError as e:
513 # Can be raised on Python 3.8 when path is invalid.
513 # Can be raised on Python 3.8 when path is invalid.
514 raise error.Abort(
514 raise error.Abort(
515 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
515 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
516 )
516 )
517
517
518 raise error.RepoError(_(b'repository %s not found') % path)
518 raise error.RepoError(_(b'repository %s not found') % path)
519
519
520 # .hg/requires file contains a newline-delimited list of
520 # .hg/requires file contains a newline-delimited list of
521 # features/capabilities the opener (us) must have in order to use
521 # features/capabilities the opener (us) must have in order to use
522 # the repository. This file was introduced in Mercurial 0.9.2,
522 # the repository. This file was introduced in Mercurial 0.9.2,
523 # which means very old repositories may not have one. We assume
523 # which means very old repositories may not have one. We assume
524 # a missing file translates to no requirements.
524 # a missing file translates to no requirements.
525 try:
525 try:
526 requirements = set(hgvfs.read(b'requires').splitlines())
526 requirements = set(hgvfs.read(b'requires').splitlines())
527 except IOError as e:
527 except IOError as e:
528 if e.errno != errno.ENOENT:
528 if e.errno != errno.ENOENT:
529 raise
529 raise
530 requirements = set()
530 requirements = set()
531
531
532 # The .hg/hgrc file may load extensions or contain config options
532 # The .hg/hgrc file may load extensions or contain config options
533 # that influence repository construction. Attempt to load it and
533 # that influence repository construction. Attempt to load it and
534 # process any new extensions that it may have pulled in.
534 # process any new extensions that it may have pulled in.
535 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
535 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
536 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
536 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
537 extensions.loadall(ui)
537 extensions.loadall(ui)
538 extensions.populateui(ui)
538 extensions.populateui(ui)
539
539
540 # Set of module names of extensions loaded for this repository.
540 # Set of module names of extensions loaded for this repository.
541 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
541 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
542
542
543 supportedrequirements = gathersupportedrequirements(ui)
543 supportedrequirements = gathersupportedrequirements(ui)
544
544
545 # We first validate the requirements are known.
545 # We first validate the requirements are known.
546 ensurerequirementsrecognized(requirements, supportedrequirements)
546 ensurerequirementsrecognized(requirements, supportedrequirements)
547
547
548 # Then we validate that the known set is reasonable to use together.
548 # Then we validate that the known set is reasonable to use together.
549 ensurerequirementscompatible(ui, requirements)
549 ensurerequirementscompatible(ui, requirements)
550
550
551 # TODO there are unhandled edge cases related to opening repositories with
551 # TODO there are unhandled edge cases related to opening repositories with
552 # shared storage. If storage is shared, we should also test for requirements
552 # shared storage. If storage is shared, we should also test for requirements
553 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
553 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
554 # that repo, as that repo may load extensions needed to open it. This is a
554 # that repo, as that repo may load extensions needed to open it. This is a
555 # bit complicated because we don't want the other hgrc to overwrite settings
555 # bit complicated because we don't want the other hgrc to overwrite settings
556 # in this hgrc.
556 # in this hgrc.
557 #
557 #
558 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
558 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
559 # file when sharing repos. But if a requirement is added after the share is
559 # file when sharing repos. But if a requirement is added after the share is
560 # performed, thereby introducing a new requirement for the opener, we may
560 # performed, thereby introducing a new requirement for the opener, we may
561 # will not see that and could encounter a run-time error interacting with
561 # will not see that and could encounter a run-time error interacting with
562 # that shared store since it has an unknown-to-us requirement.
562 # that shared store since it has an unknown-to-us requirement.
563
563
564 # At this point, we know we should be capable of opening the repository.
564 # At this point, we know we should be capable of opening the repository.
565 # Now get on with doing that.
565 # Now get on with doing that.
566
566
567 features = set()
567 features = set()
568
568
569 # The "store" part of the repository holds versioned data. How it is
569 # The "store" part of the repository holds versioned data. How it is
570 # accessed is determined by various requirements. The ``shared`` or
570 # accessed is determined by various requirements. The ``shared`` or
571 # ``relshared`` requirements indicate the store lives in the path contained
571 # ``relshared`` requirements indicate the store lives in the path contained
572 # in the ``.hg/sharedpath`` file. This is an absolute path for
572 # in the ``.hg/sharedpath`` file. This is an absolute path for
573 # ``shared`` and relative to ``.hg/`` for ``relshared``.
573 # ``shared`` and relative to ``.hg/`` for ``relshared``.
574 if b'shared' in requirements or b'relshared' in requirements:
574 if b'shared' in requirements or b'relshared' in requirements:
575 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
575 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
576 if b'relshared' in requirements:
576 if b'relshared' in requirements:
577 sharedpath = hgvfs.join(sharedpath)
577 sharedpath = hgvfs.join(sharedpath)
578
578
579 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
579 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
580
580
581 if not sharedvfs.exists():
581 if not sharedvfs.exists():
582 raise error.RepoError(
582 raise error.RepoError(
583 _(b'.hg/sharedpath points to nonexistent directory %s')
583 _(b'.hg/sharedpath points to nonexistent directory %s')
584 % sharedvfs.base
584 % sharedvfs.base
585 )
585 )
586
586
587 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
587 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
588
588
589 storebasepath = sharedvfs.base
589 storebasepath = sharedvfs.base
590 cachepath = sharedvfs.join(b'cache')
590 cachepath = sharedvfs.join(b'cache')
591 else:
591 else:
592 storebasepath = hgvfs.base
592 storebasepath = hgvfs.base
593 cachepath = hgvfs.join(b'cache')
593 cachepath = hgvfs.join(b'cache')
594 wcachepath = hgvfs.join(b'wcache')
594 wcachepath = hgvfs.join(b'wcache')
595
595
596 # The store has changed over time and the exact layout is dictated by
596 # The store has changed over time and the exact layout is dictated by
597 # requirements. The store interface abstracts differences across all
597 # requirements. The store interface abstracts differences across all
598 # of them.
598 # of them.
599 store = makestore(
599 store = makestore(
600 requirements,
600 requirements,
601 storebasepath,
601 storebasepath,
602 lambda base: vfsmod.vfs(base, cacheaudited=True),
602 lambda base: vfsmod.vfs(base, cacheaudited=True),
603 )
603 )
604 hgvfs.createmode = store.createmode
604 hgvfs.createmode = store.createmode
605
605
606 storevfs = store.vfs
606 storevfs = store.vfs
607 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
607 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
608
608
609 # The cache vfs is used to manage cache files.
609 # The cache vfs is used to manage cache files.
610 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
610 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
611 cachevfs.createmode = store.createmode
611 cachevfs.createmode = store.createmode
612 # The cache vfs is used to manage cache files related to the working copy
612 # The cache vfs is used to manage cache files related to the working copy
613 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
613 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
614 wcachevfs.createmode = store.createmode
614 wcachevfs.createmode = store.createmode
615
615
616 # Now resolve the type for the repository object. We do this by repeatedly
616 # Now resolve the type for the repository object. We do this by repeatedly
617 # calling a factory function to produces types for specific aspects of the
617 # calling a factory function to produces types for specific aspects of the
618 # repo's operation. The aggregate returned types are used as base classes
618 # repo's operation. The aggregate returned types are used as base classes
619 # for a dynamically-derived type, which will represent our new repository.
619 # for a dynamically-derived type, which will represent our new repository.
620
620
621 bases = []
621 bases = []
622 extrastate = {}
622 extrastate = {}
623
623
624 for iface, fn in REPO_INTERFACES:
624 for iface, fn in REPO_INTERFACES:
625 # We pass all potentially useful state to give extensions tons of
625 # We pass all potentially useful state to give extensions tons of
626 # flexibility.
626 # flexibility.
627 typ = fn()(
627 typ = fn()(
628 ui=ui,
628 ui=ui,
629 intents=intents,
629 intents=intents,
630 requirements=requirements,
630 requirements=requirements,
631 features=features,
631 features=features,
632 wdirvfs=wdirvfs,
632 wdirvfs=wdirvfs,
633 hgvfs=hgvfs,
633 hgvfs=hgvfs,
634 store=store,
634 store=store,
635 storevfs=storevfs,
635 storevfs=storevfs,
636 storeoptions=storevfs.options,
636 storeoptions=storevfs.options,
637 cachevfs=cachevfs,
637 cachevfs=cachevfs,
638 wcachevfs=wcachevfs,
638 wcachevfs=wcachevfs,
639 extensionmodulenames=extensionmodulenames,
639 extensionmodulenames=extensionmodulenames,
640 extrastate=extrastate,
640 extrastate=extrastate,
641 baseclasses=bases,
641 baseclasses=bases,
642 )
642 )
643
643
644 if not isinstance(typ, type):
644 if not isinstance(typ, type):
645 raise error.ProgrammingError(
645 raise error.ProgrammingError(
646 b'unable to construct type for %s' % iface
646 b'unable to construct type for %s' % iface
647 )
647 )
648
648
649 bases.append(typ)
649 bases.append(typ)
650
650
651 # type() allows you to use characters in type names that wouldn't be
651 # type() allows you to use characters in type names that wouldn't be
652 # recognized as Python symbols in source code. We abuse that to add
652 # recognized as Python symbols in source code. We abuse that to add
653 # rich information about our constructed repo.
653 # rich information about our constructed repo.
654 name = pycompat.sysstr(
654 name = pycompat.sysstr(
655 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
655 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
656 )
656 )
657
657
658 cls = type(name, tuple(bases), {})
658 cls = type(name, tuple(bases), {})
659
659
660 return cls(
660 return cls(
661 baseui=baseui,
661 baseui=baseui,
662 ui=ui,
662 ui=ui,
663 origroot=path,
663 origroot=path,
664 wdirvfs=wdirvfs,
664 wdirvfs=wdirvfs,
665 hgvfs=hgvfs,
665 hgvfs=hgvfs,
666 requirements=requirements,
666 requirements=requirements,
667 supportedrequirements=supportedrequirements,
667 supportedrequirements=supportedrequirements,
668 sharedpath=storebasepath,
668 sharedpath=storebasepath,
669 store=store,
669 store=store,
670 cachevfs=cachevfs,
670 cachevfs=cachevfs,
671 wcachevfs=wcachevfs,
671 wcachevfs=wcachevfs,
672 features=features,
672 features=features,
673 intents=intents,
673 intents=intents,
674 )
674 )
675
675
676
676
677 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
677 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
678 """Load hgrc files/content into a ui instance.
678 """Load hgrc files/content into a ui instance.
679
679
680 This is called during repository opening to load any additional
680 This is called during repository opening to load any additional
681 config files or settings relevant to the current repository.
681 config files or settings relevant to the current repository.
682
682
683 Returns a bool indicating whether any additional configs were loaded.
683 Returns a bool indicating whether any additional configs were loaded.
684
684
685 Extensions should monkeypatch this function to modify how per-repo
685 Extensions should monkeypatch this function to modify how per-repo
686 configs are loaded. For example, an extension may wish to pull in
686 configs are loaded. For example, an extension may wish to pull in
687 configs from alternate files or sources.
687 configs from alternate files or sources.
688 """
688 """
689 if not rcutil.use_repo_hgrc():
689 if not rcutil.use_repo_hgrc():
690 return False
690 return False
691 try:
691 try:
692 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
692 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
693 return True
693 return True
694 except IOError:
694 except IOError:
695 return False
695 return False
696
696
697
697
698 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
698 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
699 """Perform additional actions after .hg/hgrc is loaded.
699 """Perform additional actions after .hg/hgrc is loaded.
700
700
701 This function is called during repository loading immediately after
701 This function is called during repository loading immediately after
702 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
702 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
703
703
704 The function can be used to validate configs, automatically add
704 The function can be used to validate configs, automatically add
705 options (including extensions) based on requirements, etc.
705 options (including extensions) based on requirements, etc.
706 """
706 """
707
707
708 # Map of requirements to list of extensions to load automatically when
708 # Map of requirements to list of extensions to load automatically when
709 # requirement is present.
709 # requirement is present.
710 autoextensions = {
710 autoextensions = {
711 b'git': [b'git'],
711 b'git': [b'git'],
712 b'largefiles': [b'largefiles'],
712 b'largefiles': [b'largefiles'],
713 b'lfs': [b'lfs'],
713 b'lfs': [b'lfs'],
714 }
714 }
715
715
716 for requirement, names in sorted(autoextensions.items()):
716 for requirement, names in sorted(autoextensions.items()):
717 if requirement not in requirements:
717 if requirement not in requirements:
718 continue
718 continue
719
719
720 for name in names:
720 for name in names:
721 if not ui.hasconfig(b'extensions', name):
721 if not ui.hasconfig(b'extensions', name):
722 ui.setconfig(b'extensions', name, b'', source=b'autoload')
722 ui.setconfig(b'extensions', name, b'', source=b'autoload')
723
723
724
724
725 def gathersupportedrequirements(ui):
725 def gathersupportedrequirements(ui):
726 """Determine the complete set of recognized requirements."""
726 """Determine the complete set of recognized requirements."""
727 # Start with all requirements supported by this file.
727 # Start with all requirements supported by this file.
728 supported = set(localrepository._basesupported)
728 supported = set(localrepository._basesupported)
729
729
730 # Execute ``featuresetupfuncs`` entries if they belong to an extension
730 # Execute ``featuresetupfuncs`` entries if they belong to an extension
731 # relevant to this ui instance.
731 # relevant to this ui instance.
732 modules = {m.__name__ for n, m in extensions.extensions(ui)}
732 modules = {m.__name__ for n, m in extensions.extensions(ui)}
733
733
734 for fn in featuresetupfuncs:
734 for fn in featuresetupfuncs:
735 if fn.__module__ in modules:
735 if fn.__module__ in modules:
736 fn(ui, supported)
736 fn(ui, supported)
737
737
738 # Add derived requirements from registered compression engines.
738 # Add derived requirements from registered compression engines.
739 for name in util.compengines:
739 for name in util.compengines:
740 engine = util.compengines[name]
740 engine = util.compengines[name]
741 if engine.available() and engine.revlogheader():
741 if engine.available() and engine.revlogheader():
742 supported.add(b'exp-compression-%s' % name)
742 supported.add(b'exp-compression-%s' % name)
743 if engine.name() == b'zstd':
743 if engine.name() == b'zstd':
744 supported.add(b'revlog-compression-zstd')
744 supported.add(b'revlog-compression-zstd')
745
745
746 return supported
746 return supported
747
747
748
748
749 def ensurerequirementsrecognized(requirements, supported):
749 def ensurerequirementsrecognized(requirements, supported):
750 """Validate that a set of local requirements is recognized.
750 """Validate that a set of local requirements is recognized.
751
751
752 Receives a set of requirements. Raises an ``error.RepoError`` if there
752 Receives a set of requirements. Raises an ``error.RepoError`` if there
753 exists any requirement in that set that currently loaded code doesn't
753 exists any requirement in that set that currently loaded code doesn't
754 recognize.
754 recognize.
755
755
756 Returns a set of supported requirements.
756 Returns a set of supported requirements.
757 """
757 """
758 missing = set()
758 missing = set()
759
759
760 for requirement in requirements:
760 for requirement in requirements:
761 if requirement in supported:
761 if requirement in supported:
762 continue
762 continue
763
763
764 if not requirement or not requirement[0:1].isalnum():
764 if not requirement or not requirement[0:1].isalnum():
765 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
765 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
766
766
767 missing.add(requirement)
767 missing.add(requirement)
768
768
769 if missing:
769 if missing:
770 raise error.RequirementError(
770 raise error.RequirementError(
771 _(b'repository requires features unknown to this Mercurial: %s')
771 _(b'repository requires features unknown to this Mercurial: %s')
772 % b' '.join(sorted(missing)),
772 % b' '.join(sorted(missing)),
773 hint=_(
773 hint=_(
774 b'see https://mercurial-scm.org/wiki/MissingRequirement '
774 b'see https://mercurial-scm.org/wiki/MissingRequirement '
775 b'for more information'
775 b'for more information'
776 ),
776 ),
777 )
777 )
778
778
779
779
780 def ensurerequirementscompatible(ui, requirements):
780 def ensurerequirementscompatible(ui, requirements):
781 """Validates that a set of recognized requirements is mutually compatible.
781 """Validates that a set of recognized requirements is mutually compatible.
782
782
783 Some requirements may not be compatible with others or require
783 Some requirements may not be compatible with others or require
784 config options that aren't enabled. This function is called during
784 config options that aren't enabled. This function is called during
785 repository opening to ensure that the set of requirements needed
785 repository opening to ensure that the set of requirements needed
786 to open a repository is sane and compatible with config options.
786 to open a repository is sane and compatible with config options.
787
787
788 Extensions can monkeypatch this function to perform additional
788 Extensions can monkeypatch this function to perform additional
789 checking.
789 checking.
790
790
791 ``error.RepoError`` should be raised on failure.
791 ``error.RepoError`` should be raised on failure.
792 """
792 """
793 if b'exp-sparse' in requirements and not sparse.enabled:
793 if b'exp-sparse' in requirements and not sparse.enabled:
794 raise error.RepoError(
794 raise error.RepoError(
795 _(
795 _(
796 b'repository is using sparse feature but '
796 b'repository is using sparse feature but '
797 b'sparse is not enabled; enable the '
797 b'sparse is not enabled; enable the '
798 b'"sparse" extensions to access'
798 b'"sparse" extensions to access'
799 )
799 )
800 )
800 )
801
801
802
802
803 def makestore(requirements, path, vfstype):
803 def makestore(requirements, path, vfstype):
804 """Construct a storage object for a repository."""
804 """Construct a storage object for a repository."""
805 if b'store' in requirements:
805 if b'store' in requirements:
806 if b'fncache' in requirements:
806 if b'fncache' in requirements:
807 return storemod.fncachestore(
807 return storemod.fncachestore(
808 path, vfstype, b'dotencode' in requirements
808 path, vfstype, b'dotencode' in requirements
809 )
809 )
810
810
811 return storemod.encodedstore(path, vfstype)
811 return storemod.encodedstore(path, vfstype)
812
812
813 return storemod.basicstore(path, vfstype)
813 return storemod.basicstore(path, vfstype)
814
814
815
815
816 def resolvestorevfsoptions(ui, requirements, features):
816 def resolvestorevfsoptions(ui, requirements, features):
817 """Resolve the options to pass to the store vfs opener.
817 """Resolve the options to pass to the store vfs opener.
818
818
819 The returned dict is used to influence behavior of the storage layer.
819 The returned dict is used to influence behavior of the storage layer.
820 """
820 """
821 options = {}
821 options = {}
822
822
823 if b'treemanifest' in requirements:
823 if b'treemanifest' in requirements:
824 options[b'treemanifest'] = True
824 options[b'treemanifest'] = True
825
825
826 # experimental config: format.manifestcachesize
826 # experimental config: format.manifestcachesize
827 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
827 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
828 if manifestcachesize is not None:
828 if manifestcachesize is not None:
829 options[b'manifestcachesize'] = manifestcachesize
829 options[b'manifestcachesize'] = manifestcachesize
830
830
831 # In the absence of another requirement superseding a revlog-related
831 # In the absence of another requirement superseding a revlog-related
832 # requirement, we have to assume the repo is using revlog version 0.
832 # requirement, we have to assume the repo is using revlog version 0.
833 # This revlog format is super old and we don't bother trying to parse
833 # This revlog format is super old and we don't bother trying to parse
834 # opener options for it because those options wouldn't do anything
834 # opener options for it because those options wouldn't do anything
835 # meaningful on such old repos.
835 # meaningful on such old repos.
836 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
836 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
837 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
837 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
838 else: # explicitly mark repo as using revlogv0
838 else: # explicitly mark repo as using revlogv0
839 options[b'revlogv0'] = True
839 options[b'revlogv0'] = True
840
840
841 if COPIESSDC_REQUIREMENT in requirements:
841 if COPIESSDC_REQUIREMENT in requirements:
842 options[b'copies-storage'] = b'changeset-sidedata'
842 options[b'copies-storage'] = b'changeset-sidedata'
843 else:
843 else:
844 writecopiesto = ui.config(b'experimental', b'copies.write-to')
844 writecopiesto = ui.config(b'experimental', b'copies.write-to')
845 copiesextramode = (b'changeset-only', b'compatibility')
845 copiesextramode = (b'changeset-only', b'compatibility')
846 if writecopiesto in copiesextramode:
846 if writecopiesto in copiesextramode:
847 options[b'copies-storage'] = b'extra'
847 options[b'copies-storage'] = b'extra'
848
848
849 return options
849 return options
850
850
851
851
852 def resolverevlogstorevfsoptions(ui, requirements, features):
852 def resolverevlogstorevfsoptions(ui, requirements, features):
853 """Resolve opener options specific to revlogs."""
853 """Resolve opener options specific to revlogs."""
854
854
855 options = {}
855 options = {}
856 options[b'flagprocessors'] = {}
856 options[b'flagprocessors'] = {}
857
857
858 if b'revlogv1' in requirements:
858 if b'revlogv1' in requirements:
859 options[b'revlogv1'] = True
859 options[b'revlogv1'] = True
860 if REVLOGV2_REQUIREMENT in requirements:
860 if REVLOGV2_REQUIREMENT in requirements:
861 options[b'revlogv2'] = True
861 options[b'revlogv2'] = True
862
862
863 if b'generaldelta' in requirements:
863 if b'generaldelta' in requirements:
864 options[b'generaldelta'] = True
864 options[b'generaldelta'] = True
865
865
866 # experimental config: format.chunkcachesize
866 # experimental config: format.chunkcachesize
867 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
867 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
868 if chunkcachesize is not None:
868 if chunkcachesize is not None:
869 options[b'chunkcachesize'] = chunkcachesize
869 options[b'chunkcachesize'] = chunkcachesize
870
870
871 deltabothparents = ui.configbool(
871 deltabothparents = ui.configbool(
872 b'storage', b'revlog.optimize-delta-parent-choice'
872 b'storage', b'revlog.optimize-delta-parent-choice'
873 )
873 )
874 options[b'deltabothparents'] = deltabothparents
874 options[b'deltabothparents'] = deltabothparents
875
875
876 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
876 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
877 lazydeltabase = False
877 lazydeltabase = False
878 if lazydelta:
878 if lazydelta:
879 lazydeltabase = ui.configbool(
879 lazydeltabase = ui.configbool(
880 b'storage', b'revlog.reuse-external-delta-parent'
880 b'storage', b'revlog.reuse-external-delta-parent'
881 )
881 )
882 if lazydeltabase is None:
882 if lazydeltabase is None:
883 lazydeltabase = not scmutil.gddeltaconfig(ui)
883 lazydeltabase = not scmutil.gddeltaconfig(ui)
884 options[b'lazydelta'] = lazydelta
884 options[b'lazydelta'] = lazydelta
885 options[b'lazydeltabase'] = lazydeltabase
885 options[b'lazydeltabase'] = lazydeltabase
886
886
887 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
887 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
888 if 0 <= chainspan:
888 if 0 <= chainspan:
889 options[b'maxdeltachainspan'] = chainspan
889 options[b'maxdeltachainspan'] = chainspan
890
890
891 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
891 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
892 if mmapindexthreshold is not None:
892 if mmapindexthreshold is not None:
893 options[b'mmapindexthreshold'] = mmapindexthreshold
893 options[b'mmapindexthreshold'] = mmapindexthreshold
894
894
895 withsparseread = ui.configbool(b'experimental', b'sparse-read')
895 withsparseread = ui.configbool(b'experimental', b'sparse-read')
896 srdensitythres = float(
896 srdensitythres = float(
897 ui.config(b'experimental', b'sparse-read.density-threshold')
897 ui.config(b'experimental', b'sparse-read.density-threshold')
898 )
898 )
899 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
899 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
900 options[b'with-sparse-read'] = withsparseread
900 options[b'with-sparse-read'] = withsparseread
901 options[b'sparse-read-density-threshold'] = srdensitythres
901 options[b'sparse-read-density-threshold'] = srdensitythres
902 options[b'sparse-read-min-gap-size'] = srmingapsize
902 options[b'sparse-read-min-gap-size'] = srmingapsize
903
903
904 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
904 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
905 options[b'sparse-revlog'] = sparserevlog
905 options[b'sparse-revlog'] = sparserevlog
906 if sparserevlog:
906 if sparserevlog:
907 options[b'generaldelta'] = True
907 options[b'generaldelta'] = True
908
908
909 sidedata = SIDEDATA_REQUIREMENT in requirements
909 sidedata = SIDEDATA_REQUIREMENT in requirements
910 options[b'side-data'] = sidedata
910 options[b'side-data'] = sidedata
911
911
912 maxchainlen = None
912 maxchainlen = None
913 if sparserevlog:
913 if sparserevlog:
914 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
914 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
915 # experimental config: format.maxchainlen
915 # experimental config: format.maxchainlen
916 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
916 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
917 if maxchainlen is not None:
917 if maxchainlen is not None:
918 options[b'maxchainlen'] = maxchainlen
918 options[b'maxchainlen'] = maxchainlen
919
919
920 for r in requirements:
920 for r in requirements:
921 # we allow multiple compression engine requirement to co-exist because
921 # we allow multiple compression engine requirement to co-exist because
922 # strickly speaking, revlog seems to support mixed compression style.
922 # strickly speaking, revlog seems to support mixed compression style.
923 #
923 #
924 # The compression used for new entries will be "the last one"
924 # The compression used for new entries will be "the last one"
925 prefix = r.startswith
925 prefix = r.startswith
926 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
926 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
927 options[b'compengine'] = r.split(b'-', 2)[2]
927 options[b'compengine'] = r.split(b'-', 2)[2]
928
928
929 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
929 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
930 if options[b'zlib.level'] is not None:
930 if options[b'zlib.level'] is not None:
931 if not (0 <= options[b'zlib.level'] <= 9):
931 if not (0 <= options[b'zlib.level'] <= 9):
932 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
932 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
933 raise error.Abort(msg % options[b'zlib.level'])
933 raise error.Abort(msg % options[b'zlib.level'])
934 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
934 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
935 if options[b'zstd.level'] is not None:
935 if options[b'zstd.level'] is not None:
936 if not (0 <= options[b'zstd.level'] <= 22):
936 if not (0 <= options[b'zstd.level'] <= 22):
937 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
937 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
938 raise error.Abort(msg % options[b'zstd.level'])
938 raise error.Abort(msg % options[b'zstd.level'])
939
939
940 if repository.NARROW_REQUIREMENT in requirements:
940 if repository.NARROW_REQUIREMENT in requirements:
941 options[b'enableellipsis'] = True
941 options[b'enableellipsis'] = True
942
942
943 if ui.configbool(b'experimental', b'rust.index'):
943 if ui.configbool(b'experimental', b'rust.index'):
944 options[b'rust.index'] = True
944 options[b'rust.index'] = True
945 if NODEMAP_REQUIREMENT in requirements:
945 if NODEMAP_REQUIREMENT in requirements:
946 options[b'persistent-nodemap'] = True
946 options[b'persistent-nodemap'] = True
947 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
947 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
948 options[b'persistent-nodemap.mmap'] = True
948 options[b'persistent-nodemap.mmap'] = True
949 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
949 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
950 options[b'persistent-nodemap.mode'] = epnm
950 options[b'persistent-nodemap.mode'] = epnm
951 if ui.configbool(b'devel', b'persistent-nodemap'):
951 if ui.configbool(b'devel', b'persistent-nodemap'):
952 options[b'devel-force-nodemap'] = True
952 options[b'devel-force-nodemap'] = True
953
953
954 return options
954 return options
955
955
956
956
957 def makemain(**kwargs):
957 def makemain(**kwargs):
958 """Produce a type conforming to ``ilocalrepositorymain``."""
958 """Produce a type conforming to ``ilocalrepositorymain``."""
959 return localrepository
959 return localrepository
960
960
961
961
962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
963 class revlogfilestorage(object):
963 class revlogfilestorage(object):
964 """File storage when using revlogs."""
964 """File storage when using revlogs."""
965
965
966 def file(self, path):
966 def file(self, path):
967 if path[0] == b'/':
967 if path[0] == b'/':
968 path = path[1:]
968 path = path[1:]
969
969
970 return filelog.filelog(self.svfs, path)
970 return filelog.filelog(self.svfs, path)
971
971
972
972
973 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
973 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
974 class revlognarrowfilestorage(object):
974 class revlognarrowfilestorage(object):
975 """File storage when using revlogs and narrow files."""
975 """File storage when using revlogs and narrow files."""
976
976
977 def file(self, path):
977 def file(self, path):
978 if path[0] == b'/':
978 if path[0] == b'/':
979 path = path[1:]
979 path = path[1:]
980
980
981 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
981 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
982
982
983
983
984 def makefilestorage(requirements, features, **kwargs):
984 def makefilestorage(requirements, features, **kwargs):
985 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
985 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
986 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
986 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
987 features.add(repository.REPO_FEATURE_STREAM_CLONE)
987 features.add(repository.REPO_FEATURE_STREAM_CLONE)
988
988
989 if repository.NARROW_REQUIREMENT in requirements:
989 if repository.NARROW_REQUIREMENT in requirements:
990 return revlognarrowfilestorage
990 return revlognarrowfilestorage
991 else:
991 else:
992 return revlogfilestorage
992 return revlogfilestorage
993
993
994
994
995 # List of repository interfaces and factory functions for them. Each
995 # List of repository interfaces and factory functions for them. Each
996 # will be called in order during ``makelocalrepository()`` to iteratively
996 # will be called in order during ``makelocalrepository()`` to iteratively
997 # derive the final type for a local repository instance. We capture the
997 # derive the final type for a local repository instance. We capture the
998 # function as a lambda so we don't hold a reference and the module-level
998 # function as a lambda so we don't hold a reference and the module-level
999 # functions can be wrapped.
999 # functions can be wrapped.
1000 REPO_INTERFACES = [
1000 REPO_INTERFACES = [
1001 (repository.ilocalrepositorymain, lambda: makemain),
1001 (repository.ilocalrepositorymain, lambda: makemain),
1002 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1002 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1003 ]
1003 ]
1004
1004
1005
1005
1006 @interfaceutil.implementer(repository.ilocalrepositorymain)
1006 @interfaceutil.implementer(repository.ilocalrepositorymain)
1007 class localrepository(object):
1007 class localrepository(object):
1008 """Main class for representing local repositories.
1008 """Main class for representing local repositories.
1009
1009
1010 All local repositories are instances of this class.
1010 All local repositories are instances of this class.
1011
1011
1012 Constructed on its own, instances of this class are not usable as
1012 Constructed on its own, instances of this class are not usable as
1013 repository objects. To obtain a usable repository object, call
1013 repository objects. To obtain a usable repository object, call
1014 ``hg.repository()``, ``localrepo.instance()``, or
1014 ``hg.repository()``, ``localrepo.instance()``, or
1015 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1015 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1016 ``instance()`` adds support for creating new repositories.
1016 ``instance()`` adds support for creating new repositories.
1017 ``hg.repository()`` adds more extension integration, including calling
1017 ``hg.repository()`` adds more extension integration, including calling
1018 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1018 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1019 used.
1019 used.
1020 """
1020 """
1021
1021
1022 # obsolete experimental requirements:
1022 # obsolete experimental requirements:
1023 # - manifestv2: An experimental new manifest format that allowed
1023 # - manifestv2: An experimental new manifest format that allowed
1024 # for stem compression of long paths. Experiment ended up not
1024 # for stem compression of long paths. Experiment ended up not
1025 # being successful (repository sizes went up due to worse delta
1025 # being successful (repository sizes went up due to worse delta
1026 # chains), and the code was deleted in 4.6.
1026 # chains), and the code was deleted in 4.6.
1027 supportedformats = {
1027 supportedformats = {
1028 b'revlogv1',
1028 b'revlogv1',
1029 b'generaldelta',
1029 b'generaldelta',
1030 b'treemanifest',
1030 b'treemanifest',
1031 COPIESSDC_REQUIREMENT,
1031 COPIESSDC_REQUIREMENT,
1032 REVLOGV2_REQUIREMENT,
1032 REVLOGV2_REQUIREMENT,
1033 SIDEDATA_REQUIREMENT,
1033 SIDEDATA_REQUIREMENT,
1034 SPARSEREVLOG_REQUIREMENT,
1034 SPARSEREVLOG_REQUIREMENT,
1035 NODEMAP_REQUIREMENT,
1035 NODEMAP_REQUIREMENT,
1036 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1036 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1037 }
1037 }
1038 _basesupported = supportedformats | {
1038 _basesupported = supportedformats | {
1039 b'store',
1039 b'store',
1040 b'fncache',
1040 b'fncache',
1041 b'shared',
1041 b'shared',
1042 b'relshared',
1042 b'relshared',
1043 b'dotencode',
1043 b'dotencode',
1044 b'exp-sparse',
1044 b'exp-sparse',
1045 b'internal-phase',
1045 b'internal-phase',
1046 }
1046 }
1047
1047
1048 # list of prefix for file which can be written without 'wlock'
1048 # list of prefix for file which can be written without 'wlock'
1049 # Extensions should extend this list when needed
1049 # Extensions should extend this list when needed
1050 _wlockfreeprefix = {
1050 _wlockfreeprefix = {
1051 # We migh consider requiring 'wlock' for the next
1051 # We migh consider requiring 'wlock' for the next
1052 # two, but pretty much all the existing code assume
1052 # two, but pretty much all the existing code assume
1053 # wlock is not needed so we keep them excluded for
1053 # wlock is not needed so we keep them excluded for
1054 # now.
1054 # now.
1055 b'hgrc',
1055 b'hgrc',
1056 b'requires',
1056 b'requires',
1057 # XXX cache is a complicatged business someone
1057 # XXX cache is a complicatged business someone
1058 # should investigate this in depth at some point
1058 # should investigate this in depth at some point
1059 b'cache/',
1059 b'cache/',
1060 # XXX shouldn't be dirstate covered by the wlock?
1060 # XXX shouldn't be dirstate covered by the wlock?
1061 b'dirstate',
1061 b'dirstate',
1062 # XXX bisect was still a bit too messy at the time
1062 # XXX bisect was still a bit too messy at the time
1063 # this changeset was introduced. Someone should fix
1063 # this changeset was introduced. Someone should fix
1064 # the remainig bit and drop this line
1064 # the remainig bit and drop this line
1065 b'bisect.state',
1065 b'bisect.state',
1066 }
1066 }
1067
1067
1068 def __init__(
1068 def __init__(
1069 self,
1069 self,
1070 baseui,
1070 baseui,
1071 ui,
1071 ui,
1072 origroot,
1072 origroot,
1073 wdirvfs,
1073 wdirvfs,
1074 hgvfs,
1074 hgvfs,
1075 requirements,
1075 requirements,
1076 supportedrequirements,
1076 supportedrequirements,
1077 sharedpath,
1077 sharedpath,
1078 store,
1078 store,
1079 cachevfs,
1079 cachevfs,
1080 wcachevfs,
1080 wcachevfs,
1081 features,
1081 features,
1082 intents=None,
1082 intents=None,
1083 ):
1083 ):
1084 """Create a new local repository instance.
1084 """Create a new local repository instance.
1085
1085
1086 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1086 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1087 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1087 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1088 object.
1088 object.
1089
1089
1090 Arguments:
1090 Arguments:
1091
1091
1092 baseui
1092 baseui
1093 ``ui.ui`` instance that ``ui`` argument was based off of.
1093 ``ui.ui`` instance that ``ui`` argument was based off of.
1094
1094
1095 ui
1095 ui
1096 ``ui.ui`` instance for use by the repository.
1096 ``ui.ui`` instance for use by the repository.
1097
1097
1098 origroot
1098 origroot
1099 ``bytes`` path to working directory root of this repository.
1099 ``bytes`` path to working directory root of this repository.
1100
1100
1101 wdirvfs
1101 wdirvfs
1102 ``vfs.vfs`` rooted at the working directory.
1102 ``vfs.vfs`` rooted at the working directory.
1103
1103
1104 hgvfs
1104 hgvfs
1105 ``vfs.vfs`` rooted at .hg/
1105 ``vfs.vfs`` rooted at .hg/
1106
1106
1107 requirements
1107 requirements
1108 ``set`` of bytestrings representing repository opening requirements.
1108 ``set`` of bytestrings representing repository opening requirements.
1109
1109
1110 supportedrequirements
1110 supportedrequirements
1111 ``set`` of bytestrings representing repository requirements that we
1111 ``set`` of bytestrings representing repository requirements that we
1112 know how to open. May be a supetset of ``requirements``.
1112 know how to open. May be a supetset of ``requirements``.
1113
1113
1114 sharedpath
1114 sharedpath
1115 ``bytes`` Defining path to storage base directory. Points to a
1115 ``bytes`` Defining path to storage base directory. Points to a
1116 ``.hg/`` directory somewhere.
1116 ``.hg/`` directory somewhere.
1117
1117
1118 store
1118 store
1119 ``store.basicstore`` (or derived) instance providing access to
1119 ``store.basicstore`` (or derived) instance providing access to
1120 versioned storage.
1120 versioned storage.
1121
1121
1122 cachevfs
1122 cachevfs
1123 ``vfs.vfs`` used for cache files.
1123 ``vfs.vfs`` used for cache files.
1124
1124
1125 wcachevfs
1125 wcachevfs
1126 ``vfs.vfs`` used for cache files related to the working copy.
1126 ``vfs.vfs`` used for cache files related to the working copy.
1127
1127
1128 features
1128 features
1129 ``set`` of bytestrings defining features/capabilities of this
1129 ``set`` of bytestrings defining features/capabilities of this
1130 instance.
1130 instance.
1131
1131
1132 intents
1132 intents
1133 ``set`` of system strings indicating what this repo will be used
1133 ``set`` of system strings indicating what this repo will be used
1134 for.
1134 for.
1135 """
1135 """
1136 self.baseui = baseui
1136 self.baseui = baseui
1137 self.ui = ui
1137 self.ui = ui
1138 self.origroot = origroot
1138 self.origroot = origroot
1139 # vfs rooted at working directory.
1139 # vfs rooted at working directory.
1140 self.wvfs = wdirvfs
1140 self.wvfs = wdirvfs
1141 self.root = wdirvfs.base
1141 self.root = wdirvfs.base
1142 # vfs rooted at .hg/. Used to access most non-store paths.
1142 # vfs rooted at .hg/. Used to access most non-store paths.
1143 self.vfs = hgvfs
1143 self.vfs = hgvfs
1144 self.path = hgvfs.base
1144 self.path = hgvfs.base
1145 self.requirements = requirements
1145 self.requirements = requirements
1146 self.supported = supportedrequirements
1146 self.supported = supportedrequirements
1147 self.sharedpath = sharedpath
1147 self.sharedpath = sharedpath
1148 self.store = store
1148 self.store = store
1149 self.cachevfs = cachevfs
1149 self.cachevfs = cachevfs
1150 self.wcachevfs = wcachevfs
1150 self.wcachevfs = wcachevfs
1151 self.features = features
1151 self.features = features
1152
1152
1153 self.filtername = None
1153 self.filtername = None
1154
1154
1155 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1155 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1156 b'devel', b'check-locks'
1156 b'devel', b'check-locks'
1157 ):
1157 ):
1158 self.vfs.audit = self._getvfsward(self.vfs.audit)
1158 self.vfs.audit = self._getvfsward(self.vfs.audit)
1159 # A list of callback to shape the phase if no data were found.
1159 # A list of callback to shape the phase if no data were found.
1160 # Callback are in the form: func(repo, roots) --> processed root.
1160 # Callback are in the form: func(repo, roots) --> processed root.
1161 # This list it to be filled by extension during repo setup
1161 # This list it to be filled by extension during repo setup
1162 self._phasedefaults = []
1162 self._phasedefaults = []
1163
1163
1164 color.setup(self.ui)
1164 color.setup(self.ui)
1165
1165
1166 self.spath = self.store.path
1166 self.spath = self.store.path
1167 self.svfs = self.store.vfs
1167 self.svfs = self.store.vfs
1168 self.sjoin = self.store.join
1168 self.sjoin = self.store.join
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1170 b'devel', b'check-locks'
1170 b'devel', b'check-locks'
1171 ):
1171 ):
1172 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1172 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1173 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1173 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1174 else: # standard vfs
1174 else: # standard vfs
1175 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1175 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1176
1176
1177 self._dirstatevalidatewarned = False
1177 self._dirstatevalidatewarned = False
1178
1178
1179 self._branchcaches = branchmap.BranchMapCache()
1179 self._branchcaches = branchmap.BranchMapCache()
1180 self._revbranchcache = None
1180 self._revbranchcache = None
1181 self._filterpats = {}
1181 self._filterpats = {}
1182 self._datafilters = {}
1182 self._datafilters = {}
1183 self._transref = self._lockref = self._wlockref = None
1183 self._transref = self._lockref = self._wlockref = None
1184
1184
1185 # A cache for various files under .hg/ that tracks file changes,
1185 # A cache for various files under .hg/ that tracks file changes,
1186 # (used by the filecache decorator)
1186 # (used by the filecache decorator)
1187 #
1187 #
1188 # Maps a property name to its util.filecacheentry
1188 # Maps a property name to its util.filecacheentry
1189 self._filecache = {}
1189 self._filecache = {}
1190
1190
1191 # hold sets of revision to be filtered
1191 # hold sets of revision to be filtered
1192 # should be cleared when something might have changed the filter value:
1192 # should be cleared when something might have changed the filter value:
1193 # - new changesets,
1193 # - new changesets,
1194 # - phase change,
1194 # - phase change,
1195 # - new obsolescence marker,
1195 # - new obsolescence marker,
1196 # - working directory parent change,
1196 # - working directory parent change,
1197 # - bookmark changes
1197 # - bookmark changes
1198 self.filteredrevcache = {}
1198 self.filteredrevcache = {}
1199
1199
1200 # post-dirstate-status hooks
1200 # post-dirstate-status hooks
1201 self._postdsstatus = []
1201 self._postdsstatus = []
1202
1202
1203 # generic mapping between names and nodes
1203 # generic mapping between names and nodes
1204 self.names = namespaces.namespaces()
1204 self.names = namespaces.namespaces()
1205
1205
1206 # Key to signature value.
1206 # Key to signature value.
1207 self._sparsesignaturecache = {}
1207 self._sparsesignaturecache = {}
1208 # Signature to cached matcher instance.
1208 # Signature to cached matcher instance.
1209 self._sparsematchercache = {}
1209 self._sparsematchercache = {}
1210
1210
1211 self._extrafilterid = repoview.extrafilter(ui)
1211 self._extrafilterid = repoview.extrafilter(ui)
1212
1212
1213 self.filecopiesmode = None
1213 self.filecopiesmode = None
1214 if COPIESSDC_REQUIREMENT in self.requirements:
1214 if COPIESSDC_REQUIREMENT in self.requirements:
1215 self.filecopiesmode = b'changeset-sidedata'
1215 self.filecopiesmode = b'changeset-sidedata'
1216
1216
1217 def _getvfsward(self, origfunc):
1217 def _getvfsward(self, origfunc):
1218 """build a ward for self.vfs"""
1218 """build a ward for self.vfs"""
1219 rref = weakref.ref(self)
1219 rref = weakref.ref(self)
1220
1220
1221 def checkvfs(path, mode=None):
1221 def checkvfs(path, mode=None):
1222 ret = origfunc(path, mode=mode)
1222 ret = origfunc(path, mode=mode)
1223 repo = rref()
1223 repo = rref()
1224 if (
1224 if (
1225 repo is None
1225 repo is None
1226 or not util.safehasattr(repo, b'_wlockref')
1226 or not util.safehasattr(repo, b'_wlockref')
1227 or not util.safehasattr(repo, b'_lockref')
1227 or not util.safehasattr(repo, b'_lockref')
1228 ):
1228 ):
1229 return
1229 return
1230 if mode in (None, b'r', b'rb'):
1230 if mode in (None, b'r', b'rb'):
1231 return
1231 return
1232 if path.startswith(repo.path):
1232 if path.startswith(repo.path):
1233 # truncate name relative to the repository (.hg)
1233 # truncate name relative to the repository (.hg)
1234 path = path[len(repo.path) + 1 :]
1234 path = path[len(repo.path) + 1 :]
1235 if path.startswith(b'cache/'):
1235 if path.startswith(b'cache/'):
1236 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1236 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1237 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1237 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1238 # path prefixes covered by 'lock'
1238 # path prefixes covered by 'lock'
1239 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1239 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1240 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1240 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1241 if repo._currentlock(repo._lockref) is None:
1241 if repo._currentlock(repo._lockref) is None:
1242 repo.ui.develwarn(
1242 repo.ui.develwarn(
1243 b'write with no lock: "%s"' % path,
1243 b'write with no lock: "%s"' % path,
1244 stacklevel=3,
1244 stacklevel=3,
1245 config=b'check-locks',
1245 config=b'check-locks',
1246 )
1246 )
1247 elif repo._currentlock(repo._wlockref) is None:
1247 elif repo._currentlock(repo._wlockref) is None:
1248 # rest of vfs files are covered by 'wlock'
1248 # rest of vfs files are covered by 'wlock'
1249 #
1249 #
1250 # exclude special files
1250 # exclude special files
1251 for prefix in self._wlockfreeprefix:
1251 for prefix in self._wlockfreeprefix:
1252 if path.startswith(prefix):
1252 if path.startswith(prefix):
1253 return
1253 return
1254 repo.ui.develwarn(
1254 repo.ui.develwarn(
1255 b'write with no wlock: "%s"' % path,
1255 b'write with no wlock: "%s"' % path,
1256 stacklevel=3,
1256 stacklevel=3,
1257 config=b'check-locks',
1257 config=b'check-locks',
1258 )
1258 )
1259 return ret
1259 return ret
1260
1260
1261 return checkvfs
1261 return checkvfs
1262
1262
1263 def _getsvfsward(self, origfunc):
1263 def _getsvfsward(self, origfunc):
1264 """build a ward for self.svfs"""
1264 """build a ward for self.svfs"""
1265 rref = weakref.ref(self)
1265 rref = weakref.ref(self)
1266
1266
1267 def checksvfs(path, mode=None):
1267 def checksvfs(path, mode=None):
1268 ret = origfunc(path, mode=mode)
1268 ret = origfunc(path, mode=mode)
1269 repo = rref()
1269 repo = rref()
1270 if repo is None or not util.safehasattr(repo, b'_lockref'):
1270 if repo is None or not util.safehasattr(repo, b'_lockref'):
1271 return
1271 return
1272 if mode in (None, b'r', b'rb'):
1272 if mode in (None, b'r', b'rb'):
1273 return
1273 return
1274 if path.startswith(repo.sharedpath):
1274 if path.startswith(repo.sharedpath):
1275 # truncate name relative to the repository (.hg)
1275 # truncate name relative to the repository (.hg)
1276 path = path[len(repo.sharedpath) + 1 :]
1276 path = path[len(repo.sharedpath) + 1 :]
1277 if repo._currentlock(repo._lockref) is None:
1277 if repo._currentlock(repo._lockref) is None:
1278 repo.ui.develwarn(
1278 repo.ui.develwarn(
1279 b'write with no lock: "%s"' % path, stacklevel=4
1279 b'write with no lock: "%s"' % path, stacklevel=4
1280 )
1280 )
1281 return ret
1281 return ret
1282
1282
1283 return checksvfs
1283 return checksvfs
1284
1284
1285 def close(self):
1285 def close(self):
1286 self._writecaches()
1286 self._writecaches()
1287
1287
1288 def _writecaches(self):
1288 def _writecaches(self):
1289 if self._revbranchcache:
1289 if self._revbranchcache:
1290 self._revbranchcache.write()
1290 self._revbranchcache.write()
1291
1291
1292 def _restrictcapabilities(self, caps):
1292 def _restrictcapabilities(self, caps):
1293 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1293 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1294 caps = set(caps)
1294 caps = set(caps)
1295 capsblob = bundle2.encodecaps(
1295 capsblob = bundle2.encodecaps(
1296 bundle2.getrepocaps(self, role=b'client')
1296 bundle2.getrepocaps(self, role=b'client')
1297 )
1297 )
1298 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1298 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1299 return caps
1299 return caps
1300
1300
1301 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1301 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1302 # self -> auditor -> self._checknested -> self
1302 # self -> auditor -> self._checknested -> self
1303
1303
1304 @property
1304 @property
1305 def auditor(self):
1305 def auditor(self):
1306 # This is only used by context.workingctx.match in order to
1306 # This is only used by context.workingctx.match in order to
1307 # detect files in subrepos.
1307 # detect files in subrepos.
1308 return pathutil.pathauditor(self.root, callback=self._checknested)
1308 return pathutil.pathauditor(self.root, callback=self._checknested)
1309
1309
1310 @property
1310 @property
1311 def nofsauditor(self):
1311 def nofsauditor(self):
1312 # This is only used by context.basectx.match in order to detect
1312 # This is only used by context.basectx.match in order to detect
1313 # files in subrepos.
1313 # files in subrepos.
1314 return pathutil.pathauditor(
1314 return pathutil.pathauditor(
1315 self.root, callback=self._checknested, realfs=False, cached=True
1315 self.root, callback=self._checknested, realfs=False, cached=True
1316 )
1316 )
1317
1317
1318 def _checknested(self, path):
1318 def _checknested(self, path):
1319 """Determine if path is a legal nested repository."""
1319 """Determine if path is a legal nested repository."""
1320 if not path.startswith(self.root):
1320 if not path.startswith(self.root):
1321 return False
1321 return False
1322 subpath = path[len(self.root) + 1 :]
1322 subpath = path[len(self.root) + 1 :]
1323 normsubpath = util.pconvert(subpath)
1323 normsubpath = util.pconvert(subpath)
1324
1324
1325 # XXX: Checking against the current working copy is wrong in
1325 # XXX: Checking against the current working copy is wrong in
1326 # the sense that it can reject things like
1326 # the sense that it can reject things like
1327 #
1327 #
1328 # $ hg cat -r 10 sub/x.txt
1328 # $ hg cat -r 10 sub/x.txt
1329 #
1329 #
1330 # if sub/ is no longer a subrepository in the working copy
1330 # if sub/ is no longer a subrepository in the working copy
1331 # parent revision.
1331 # parent revision.
1332 #
1332 #
1333 # However, it can of course also allow things that would have
1333 # However, it can of course also allow things that would have
1334 # been rejected before, such as the above cat command if sub/
1334 # been rejected before, such as the above cat command if sub/
1335 # is a subrepository now, but was a normal directory before.
1335 # is a subrepository now, but was a normal directory before.
1336 # The old path auditor would have rejected by mistake since it
1336 # The old path auditor would have rejected by mistake since it
1337 # panics when it sees sub/.hg/.
1337 # panics when it sees sub/.hg/.
1338 #
1338 #
1339 # All in all, checking against the working copy seems sensible
1339 # All in all, checking against the working copy seems sensible
1340 # since we want to prevent access to nested repositories on
1340 # since we want to prevent access to nested repositories on
1341 # the filesystem *now*.
1341 # the filesystem *now*.
1342 ctx = self[None]
1342 ctx = self[None]
1343 parts = util.splitpath(subpath)
1343 parts = util.splitpath(subpath)
1344 while parts:
1344 while parts:
1345 prefix = b'/'.join(parts)
1345 prefix = b'/'.join(parts)
1346 if prefix in ctx.substate:
1346 if prefix in ctx.substate:
1347 if prefix == normsubpath:
1347 if prefix == normsubpath:
1348 return True
1348 return True
1349 else:
1349 else:
1350 sub = ctx.sub(prefix)
1350 sub = ctx.sub(prefix)
1351 return sub.checknested(subpath[len(prefix) + 1 :])
1351 return sub.checknested(subpath[len(prefix) + 1 :])
1352 else:
1352 else:
1353 parts.pop()
1353 parts.pop()
1354 return False
1354 return False
1355
1355
1356 def peer(self):
1356 def peer(self):
1357 return localpeer(self) # not cached to avoid reference cycle
1357 return localpeer(self) # not cached to avoid reference cycle
1358
1358
1359 def unfiltered(self):
1359 def unfiltered(self):
1360 """Return unfiltered version of the repository
1360 """Return unfiltered version of the repository
1361
1361
1362 Intended to be overwritten by filtered repo."""
1362 Intended to be overwritten by filtered repo."""
1363 return self
1363 return self
1364
1364
1365 def filtered(self, name, visibilityexceptions=None):
1365 def filtered(self, name, visibilityexceptions=None):
1366 """Return a filtered version of a repository
1366 """Return a filtered version of a repository
1367
1367
1368 The `name` parameter is the identifier of the requested view. This
1368 The `name` parameter is the identifier of the requested view. This
1369 will return a repoview object set "exactly" to the specified view.
1369 will return a repoview object set "exactly" to the specified view.
1370
1370
1371 This function does not apply recursive filtering to a repository. For
1371 This function does not apply recursive filtering to a repository. For
1372 example calling `repo.filtered("served")` will return a repoview using
1372 example calling `repo.filtered("served")` will return a repoview using
1373 the "served" view, regardless of the initial view used by `repo`.
1373 the "served" view, regardless of the initial view used by `repo`.
1374
1374
1375 In other word, there is always only one level of `repoview` "filtering".
1375 In other word, there is always only one level of `repoview` "filtering".
1376 """
1376 """
1377 if self._extrafilterid is not None and b'%' not in name:
1377 if self._extrafilterid is not None and b'%' not in name:
1378 name = name + b'%' + self._extrafilterid
1378 name = name + b'%' + self._extrafilterid
1379
1379
1380 cls = repoview.newtype(self.unfiltered().__class__)
1380 cls = repoview.newtype(self.unfiltered().__class__)
1381 return cls(self, name, visibilityexceptions)
1381 return cls(self, name, visibilityexceptions)
1382
1382
1383 @mixedrepostorecache(
1383 @mixedrepostorecache(
1384 (b'bookmarks', b'plain'),
1384 (b'bookmarks', b'plain'),
1385 (b'bookmarks.current', b'plain'),
1385 (b'bookmarks.current', b'plain'),
1386 (b'bookmarks', b''),
1386 (b'bookmarks', b''),
1387 (b'00changelog.i', b''),
1387 (b'00changelog.i', b''),
1388 )
1388 )
1389 def _bookmarks(self):
1389 def _bookmarks(self):
1390 # Since the multiple files involved in the transaction cannot be
1390 # Since the multiple files involved in the transaction cannot be
1391 # written atomically (with current repository format), there is a race
1391 # written atomically (with current repository format), there is a race
1392 # condition here.
1392 # condition here.
1393 #
1393 #
1394 # 1) changelog content A is read
1394 # 1) changelog content A is read
1395 # 2) outside transaction update changelog to content B
1395 # 2) outside transaction update changelog to content B
1396 # 3) outside transaction update bookmark file referring to content B
1396 # 3) outside transaction update bookmark file referring to content B
1397 # 4) bookmarks file content is read and filtered against changelog-A
1397 # 4) bookmarks file content is read and filtered against changelog-A
1398 #
1398 #
1399 # When this happens, bookmarks against nodes missing from A are dropped.
1399 # When this happens, bookmarks against nodes missing from A are dropped.
1400 #
1400 #
1401 # Having this happening during read is not great, but it become worse
1401 # Having this happening during read is not great, but it become worse
1402 # when this happen during write because the bookmarks to the "unknown"
1402 # when this happen during write because the bookmarks to the "unknown"
1403 # nodes will be dropped for good. However, writes happen within locks.
1403 # nodes will be dropped for good. However, writes happen within locks.
1404 # This locking makes it possible to have a race free consistent read.
1404 # This locking makes it possible to have a race free consistent read.
1405 # For this purpose data read from disc before locking are
1405 # For this purpose data read from disc before locking are
1406 # "invalidated" right after the locks are taken. This invalidations are
1406 # "invalidated" right after the locks are taken. This invalidations are
1407 # "light", the `filecache` mechanism keep the data in memory and will
1407 # "light", the `filecache` mechanism keep the data in memory and will
1408 # reuse them if the underlying files did not changed. Not parsing the
1408 # reuse them if the underlying files did not changed. Not parsing the
1409 # same data multiple times helps performances.
1409 # same data multiple times helps performances.
1410 #
1410 #
1411 # Unfortunately in the case describe above, the files tracked by the
1411 # Unfortunately in the case describe above, the files tracked by the
1412 # bookmarks file cache might not have changed, but the in-memory
1412 # bookmarks file cache might not have changed, but the in-memory
1413 # content is still "wrong" because we used an older changelog content
1413 # content is still "wrong" because we used an older changelog content
1414 # to process the on-disk data. So after locking, the changelog would be
1414 # to process the on-disk data. So after locking, the changelog would be
1415 # refreshed but `_bookmarks` would be preserved.
1415 # refreshed but `_bookmarks` would be preserved.
1416 # Adding `00changelog.i` to the list of tracked file is not
1416 # Adding `00changelog.i` to the list of tracked file is not
1417 # enough, because at the time we build the content for `_bookmarks` in
1417 # enough, because at the time we build the content for `_bookmarks` in
1418 # (4), the changelog file has already diverged from the content used
1418 # (4), the changelog file has already diverged from the content used
1419 # for loading `changelog` in (1)
1419 # for loading `changelog` in (1)
1420 #
1420 #
1421 # To prevent the issue, we force the changelog to be explicitly
1421 # To prevent the issue, we force the changelog to be explicitly
1422 # reloaded while computing `_bookmarks`. The data race can still happen
1422 # reloaded while computing `_bookmarks`. The data race can still happen
1423 # without the lock (with a narrower window), but it would no longer go
1423 # without the lock (with a narrower window), but it would no longer go
1424 # undetected during the lock time refresh.
1424 # undetected during the lock time refresh.
1425 #
1425 #
1426 # The new schedule is as follow
1426 # The new schedule is as follow
1427 #
1427 #
1428 # 1) filecache logic detect that `_bookmarks` needs to be computed
1428 # 1) filecache logic detect that `_bookmarks` needs to be computed
1429 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1429 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1430 # 3) We force `changelog` filecache to be tested
1430 # 3) We force `changelog` filecache to be tested
1431 # 4) cachestat for `changelog` are captured (for changelog)
1431 # 4) cachestat for `changelog` are captured (for changelog)
1432 # 5) `_bookmarks` is computed and cached
1432 # 5) `_bookmarks` is computed and cached
1433 #
1433 #
1434 # The step in (3) ensure we have a changelog at least as recent as the
1434 # The step in (3) ensure we have a changelog at least as recent as the
1435 # cache stat computed in (1). As a result at locking time:
1435 # cache stat computed in (1). As a result at locking time:
1436 # * if the changelog did not changed since (1) -> we can reuse the data
1436 # * if the changelog did not changed since (1) -> we can reuse the data
1437 # * otherwise -> the bookmarks get refreshed.
1437 # * otherwise -> the bookmarks get refreshed.
1438 self._refreshchangelog()
1438 self._refreshchangelog()
1439 return bookmarks.bmstore(self)
1439 return bookmarks.bmstore(self)
1440
1440
1441 def _refreshchangelog(self):
1441 def _refreshchangelog(self):
1442 """make sure the in memory changelog match the on-disk one"""
1442 """make sure the in memory changelog match the on-disk one"""
1443 if 'changelog' in vars(self) and self.currenttransaction() is None:
1443 if 'changelog' in vars(self) and self.currenttransaction() is None:
1444 del self.changelog
1444 del self.changelog
1445
1445
1446 @property
1446 @property
1447 def _activebookmark(self):
1447 def _activebookmark(self):
1448 return self._bookmarks.active
1448 return self._bookmarks.active
1449
1449
1450 # _phasesets depend on changelog. what we need is to call
1450 # _phasesets depend on changelog. what we need is to call
1451 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1451 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1452 # can't be easily expressed in filecache mechanism.
1452 # can't be easily expressed in filecache mechanism.
1453 @storecache(b'phaseroots', b'00changelog.i')
1453 @storecache(b'phaseroots', b'00changelog.i')
1454 def _phasecache(self):
1454 def _phasecache(self):
1455 return phases.phasecache(self, self._phasedefaults)
1455 return phases.phasecache(self, self._phasedefaults)
1456
1456
1457 @storecache(b'obsstore')
1457 @storecache(b'obsstore')
1458 def obsstore(self):
1458 def obsstore(self):
1459 return obsolete.makestore(self.ui, self)
1459 return obsolete.makestore(self.ui, self)
1460
1460
1461 @storecache(b'00changelog.i')
1461 @storecache(b'00changelog.i')
1462 def changelog(self):
1462 def changelog(self):
1463 # load dirstate before changelog to avoid race see issue6303
1463 # load dirstate before changelog to avoid race see issue6303
1464 self.dirstate.prefetch_parents()
1464 self.dirstate.prefetch_parents()
1465 return self.store.changelog(txnutil.mayhavepending(self.root))
1465 return self.store.changelog(txnutil.mayhavepending(self.root))
1466
1466
1467 @storecache(b'00manifest.i')
1467 @storecache(b'00manifest.i')
1468 def manifestlog(self):
1468 def manifestlog(self):
1469 return self.store.manifestlog(self, self._storenarrowmatch)
1469 return self.store.manifestlog(self, self._storenarrowmatch)
1470
1470
1471 @repofilecache(b'dirstate')
1471 @repofilecache(b'dirstate')
1472 def dirstate(self):
1472 def dirstate(self):
1473 return self._makedirstate()
1473 return self._makedirstate()
1474
1474
1475 def _makedirstate(self):
1475 def _makedirstate(self):
1476 """Extension point for wrapping the dirstate per-repo."""
1476 """Extension point for wrapping the dirstate per-repo."""
1477 sparsematchfn = lambda: sparse.matcher(self)
1477 sparsematchfn = lambda: sparse.matcher(self)
1478
1478
1479 return dirstate.dirstate(
1479 return dirstate.dirstate(
1480 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1480 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1481 )
1481 )
1482
1482
1483 def _dirstatevalidate(self, node):
1483 def _dirstatevalidate(self, node):
1484 try:
1484 try:
1485 self.changelog.rev(node)
1485 self.changelog.rev(node)
1486 return node
1486 return node
1487 except error.LookupError:
1487 except error.LookupError:
1488 if not self._dirstatevalidatewarned:
1488 if not self._dirstatevalidatewarned:
1489 self._dirstatevalidatewarned = True
1489 self._dirstatevalidatewarned = True
1490 self.ui.warn(
1490 self.ui.warn(
1491 _(b"warning: ignoring unknown working parent %s!\n")
1491 _(b"warning: ignoring unknown working parent %s!\n")
1492 % short(node)
1492 % short(node)
1493 )
1493 )
1494 return nullid
1494 return nullid
1495
1495
1496 @storecache(narrowspec.FILENAME)
1496 @storecache(narrowspec.FILENAME)
1497 def narrowpats(self):
1497 def narrowpats(self):
1498 """matcher patterns for this repository's narrowspec
1498 """matcher patterns for this repository's narrowspec
1499
1499
1500 A tuple of (includes, excludes).
1500 A tuple of (includes, excludes).
1501 """
1501 """
1502 return narrowspec.load(self)
1502 return narrowspec.load(self)
1503
1503
1504 @storecache(narrowspec.FILENAME)
1504 @storecache(narrowspec.FILENAME)
1505 def _storenarrowmatch(self):
1505 def _storenarrowmatch(self):
1506 if repository.NARROW_REQUIREMENT not in self.requirements:
1506 if repository.NARROW_REQUIREMENT not in self.requirements:
1507 return matchmod.always()
1507 return matchmod.always()
1508 include, exclude = self.narrowpats
1508 include, exclude = self.narrowpats
1509 return narrowspec.match(self.root, include=include, exclude=exclude)
1509 return narrowspec.match(self.root, include=include, exclude=exclude)
1510
1510
1511 @storecache(narrowspec.FILENAME)
1511 @storecache(narrowspec.FILENAME)
1512 def _narrowmatch(self):
1512 def _narrowmatch(self):
1513 if repository.NARROW_REQUIREMENT not in self.requirements:
1513 if repository.NARROW_REQUIREMENT not in self.requirements:
1514 return matchmod.always()
1514 return matchmod.always()
1515 narrowspec.checkworkingcopynarrowspec(self)
1515 narrowspec.checkworkingcopynarrowspec(self)
1516 include, exclude = self.narrowpats
1516 include, exclude = self.narrowpats
1517 return narrowspec.match(self.root, include=include, exclude=exclude)
1517 return narrowspec.match(self.root, include=include, exclude=exclude)
1518
1518
1519 def narrowmatch(self, match=None, includeexact=False):
1519 def narrowmatch(self, match=None, includeexact=False):
1520 """matcher corresponding the the repo's narrowspec
1520 """matcher corresponding the the repo's narrowspec
1521
1521
1522 If `match` is given, then that will be intersected with the narrow
1522 If `match` is given, then that will be intersected with the narrow
1523 matcher.
1523 matcher.
1524
1524
1525 If `includeexact` is True, then any exact matches from `match` will
1525 If `includeexact` is True, then any exact matches from `match` will
1526 be included even if they're outside the narrowspec.
1526 be included even if they're outside the narrowspec.
1527 """
1527 """
1528 if match:
1528 if match:
1529 if includeexact and not self._narrowmatch.always():
1529 if includeexact and not self._narrowmatch.always():
1530 # do not exclude explicitly-specified paths so that they can
1530 # do not exclude explicitly-specified paths so that they can
1531 # be warned later on
1531 # be warned later on
1532 em = matchmod.exact(match.files())
1532 em = matchmod.exact(match.files())
1533 nm = matchmod.unionmatcher([self._narrowmatch, em])
1533 nm = matchmod.unionmatcher([self._narrowmatch, em])
1534 return matchmod.intersectmatchers(match, nm)
1534 return matchmod.intersectmatchers(match, nm)
1535 return matchmod.intersectmatchers(match, self._narrowmatch)
1535 return matchmod.intersectmatchers(match, self._narrowmatch)
1536 return self._narrowmatch
1536 return self._narrowmatch
1537
1537
1538 def setnarrowpats(self, newincludes, newexcludes):
1538 def setnarrowpats(self, newincludes, newexcludes):
1539 narrowspec.save(self, newincludes, newexcludes)
1539 narrowspec.save(self, newincludes, newexcludes)
1540 self.invalidate(clearfilecache=True)
1540 self.invalidate(clearfilecache=True)
1541
1541
1542 @unfilteredpropertycache
1542 @unfilteredpropertycache
1543 def _quick_access_changeid_null(self):
1543 def _quick_access_changeid_null(self):
1544 return {
1544 return {
1545 b'null': (nullrev, nullid),
1545 b'null': (nullrev, nullid),
1546 nullrev: (nullrev, nullid),
1546 nullrev: (nullrev, nullid),
1547 nullid: (nullrev, nullid),
1547 nullid: (nullrev, nullid),
1548 }
1548 }
1549
1549
1550 @unfilteredpropertycache
1550 @unfilteredpropertycache
1551 def _quick_access_changeid_wc(self):
1551 def _quick_access_changeid_wc(self):
1552 # also fast path access to the working copy parents
1552 # also fast path access to the working copy parents
1553 # however, only do it for filter that ensure wc is visible.
1553 # however, only do it for filter that ensure wc is visible.
1554 quick = {}
1554 quick = {}
1555 cl = self.unfiltered().changelog
1555 cl = self.unfiltered().changelog
1556 for node in self.dirstate.parents():
1556 for node in self.dirstate.parents():
1557 if node == nullid:
1557 if node == nullid:
1558 continue
1558 continue
1559 rev = cl.index.get_rev(node)
1559 rev = cl.index.get_rev(node)
1560 if rev is None:
1560 if rev is None:
1561 # unknown working copy parent case:
1561 # unknown working copy parent case:
1562 #
1562 #
1563 # skip the fast path and let higher code deal with it
1563 # skip the fast path and let higher code deal with it
1564 continue
1564 continue
1565 pair = (rev, node)
1565 pair = (rev, node)
1566 quick[rev] = pair
1566 quick[rev] = pair
1567 quick[node] = pair
1567 quick[node] = pair
1568 # also add the parents of the parents
1568 # also add the parents of the parents
1569 for r in cl.parentrevs(rev):
1569 for r in cl.parentrevs(rev):
1570 if r == nullrev:
1570 if r == nullrev:
1571 continue
1571 continue
1572 n = cl.node(r)
1572 n = cl.node(r)
1573 pair = (r, n)
1573 pair = (r, n)
1574 quick[r] = pair
1574 quick[r] = pair
1575 quick[n] = pair
1575 quick[n] = pair
1576 p1node = self.dirstate.p1()
1576 p1node = self.dirstate.p1()
1577 if p1node != nullid:
1577 if p1node != nullid:
1578 quick[b'.'] = quick[p1node]
1578 quick[b'.'] = quick[p1node]
1579 return quick
1579 return quick
1580
1580
1581 @unfilteredmethod
1581 @unfilteredmethod
1582 def _quick_access_changeid_invalidate(self):
1582 def _quick_access_changeid_invalidate(self):
1583 if '_quick_access_changeid_wc' in vars(self):
1583 if '_quick_access_changeid_wc' in vars(self):
1584 del self.__dict__['_quick_access_changeid_wc']
1584 del self.__dict__['_quick_access_changeid_wc']
1585
1585
1586 @property
1586 @property
1587 def _quick_access_changeid(self):
1587 def _quick_access_changeid(self):
1588 """an helper dictionnary for __getitem__ calls
1588 """an helper dictionnary for __getitem__ calls
1589
1589
1590 This contains a list of symbol we can recognise right away without
1590 This contains a list of symbol we can recognise right away without
1591 further processing.
1591 further processing.
1592 """
1592 """
1593 mapping = self._quick_access_changeid_null
1593 mapping = self._quick_access_changeid_null
1594 if self.filtername in repoview.filter_has_wc:
1594 if self.filtername in repoview.filter_has_wc:
1595 mapping = mapping.copy()
1595 mapping = mapping.copy()
1596 mapping.update(self._quick_access_changeid_wc)
1596 mapping.update(self._quick_access_changeid_wc)
1597 return mapping
1597 return mapping
1598
1598
1599 def __getitem__(self, changeid):
1599 def __getitem__(self, changeid):
1600 # dealing with special cases
1600 # dealing with special cases
1601 if changeid is None:
1601 if changeid is None:
1602 return context.workingctx(self)
1602 return context.workingctx(self)
1603 if isinstance(changeid, context.basectx):
1603 if isinstance(changeid, context.basectx):
1604 return changeid
1604 return changeid
1605
1605
1606 # dealing with multiple revisions
1606 # dealing with multiple revisions
1607 if isinstance(changeid, slice):
1607 if isinstance(changeid, slice):
1608 # wdirrev isn't contiguous so the slice shouldn't include it
1608 # wdirrev isn't contiguous so the slice shouldn't include it
1609 return [
1609 return [
1610 self[i]
1610 self[i]
1611 for i in pycompat.xrange(*changeid.indices(len(self)))
1611 for i in pycompat.xrange(*changeid.indices(len(self)))
1612 if i not in self.changelog.filteredrevs
1612 if i not in self.changelog.filteredrevs
1613 ]
1613 ]
1614
1614
1615 # dealing with some special values
1615 # dealing with some special values
1616 quick_access = self._quick_access_changeid.get(changeid)
1616 quick_access = self._quick_access_changeid.get(changeid)
1617 if quick_access is not None:
1617 if quick_access is not None:
1618 rev, node = quick_access
1618 rev, node = quick_access
1619 return context.changectx(self, rev, node, maybe_filtered=False)
1619 return context.changectx(self, rev, node, maybe_filtered=False)
1620 if changeid == b'tip':
1620 if changeid == b'tip':
1621 node = self.changelog.tip()
1621 node = self.changelog.tip()
1622 rev = self.changelog.rev(node)
1622 rev = self.changelog.rev(node)
1623 return context.changectx(self, rev, node)
1623 return context.changectx(self, rev, node)
1624
1624
1625 # dealing with arbitrary values
1625 # dealing with arbitrary values
1626 try:
1626 try:
1627 if isinstance(changeid, int):
1627 if isinstance(changeid, int):
1628 node = self.changelog.node(changeid)
1628 node = self.changelog.node(changeid)
1629 rev = changeid
1629 rev = changeid
1630 elif changeid == b'.':
1630 elif changeid == b'.':
1631 # this is a hack to delay/avoid loading obsmarkers
1631 # this is a hack to delay/avoid loading obsmarkers
1632 # when we know that '.' won't be hidden
1632 # when we know that '.' won't be hidden
1633 node = self.dirstate.p1()
1633 node = self.dirstate.p1()
1634 rev = self.unfiltered().changelog.rev(node)
1634 rev = self.unfiltered().changelog.rev(node)
1635 elif len(changeid) == 20:
1635 elif len(changeid) == 20:
1636 try:
1636 try:
1637 node = changeid
1637 node = changeid
1638 rev = self.changelog.rev(changeid)
1638 rev = self.changelog.rev(changeid)
1639 except error.FilteredLookupError:
1639 except error.FilteredLookupError:
1640 changeid = hex(changeid) # for the error message
1640 changeid = hex(changeid) # for the error message
1641 raise
1641 raise
1642 except LookupError:
1642 except LookupError:
1643 # check if it might have come from damaged dirstate
1643 # check if it might have come from damaged dirstate
1644 #
1644 #
1645 # XXX we could avoid the unfiltered if we had a recognizable
1645 # XXX we could avoid the unfiltered if we had a recognizable
1646 # exception for filtered changeset access
1646 # exception for filtered changeset access
1647 if (
1647 if (
1648 self.local()
1648 self.local()
1649 and changeid in self.unfiltered().dirstate.parents()
1649 and changeid in self.unfiltered().dirstate.parents()
1650 ):
1650 ):
1651 msg = _(b"working directory has unknown parent '%s'!")
1651 msg = _(b"working directory has unknown parent '%s'!")
1652 raise error.Abort(msg % short(changeid))
1652 raise error.Abort(msg % short(changeid))
1653 changeid = hex(changeid) # for the error message
1653 changeid = hex(changeid) # for the error message
1654 raise
1654 raise
1655
1655
1656 elif len(changeid) == 40:
1656 elif len(changeid) == 40:
1657 node = bin(changeid)
1657 node = bin(changeid)
1658 rev = self.changelog.rev(node)
1658 rev = self.changelog.rev(node)
1659 else:
1659 else:
1660 raise error.ProgrammingError(
1660 raise error.ProgrammingError(
1661 b"unsupported changeid '%s' of type %s"
1661 b"unsupported changeid '%s' of type %s"
1662 % (changeid, pycompat.bytestr(type(changeid)))
1662 % (changeid, pycompat.bytestr(type(changeid)))
1663 )
1663 )
1664
1664
1665 return context.changectx(self, rev, node)
1665 return context.changectx(self, rev, node)
1666
1666
1667 except (error.FilteredIndexError, error.FilteredLookupError):
1667 except (error.FilteredIndexError, error.FilteredLookupError):
1668 raise error.FilteredRepoLookupError(
1668 raise error.FilteredRepoLookupError(
1669 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1669 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1670 )
1670 )
1671 except (IndexError, LookupError):
1671 except (IndexError, LookupError):
1672 raise error.RepoLookupError(
1672 raise error.RepoLookupError(
1673 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1673 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1674 )
1674 )
1675 except error.WdirUnsupported:
1675 except error.WdirUnsupported:
1676 return context.workingctx(self)
1676 return context.workingctx(self)
1677
1677
1678 def __contains__(self, changeid):
1678 def __contains__(self, changeid):
1679 """True if the given changeid exists
1679 """True if the given changeid exists
1680
1680
1681 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1681 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1682 specified.
1682 specified.
1683 """
1683 """
1684 try:
1684 try:
1685 self[changeid]
1685 self[changeid]
1686 return True
1686 return True
1687 except error.RepoLookupError:
1687 except error.RepoLookupError:
1688 return False
1688 return False
1689
1689
1690 def __nonzero__(self):
1690 def __nonzero__(self):
1691 return True
1691 return True
1692
1692
1693 __bool__ = __nonzero__
1693 __bool__ = __nonzero__
1694
1694
1695 def __len__(self):
1695 def __len__(self):
1696 # no need to pay the cost of repoview.changelog
1696 # no need to pay the cost of repoview.changelog
1697 unfi = self.unfiltered()
1697 unfi = self.unfiltered()
1698 return len(unfi.changelog)
1698 return len(unfi.changelog)
1699
1699
1700 def __iter__(self):
1700 def __iter__(self):
1701 return iter(self.changelog)
1701 return iter(self.changelog)
1702
1702
1703 def revs(self, expr, *args):
1703 def revs(self, expr, *args):
1704 '''Find revisions matching a revset.
1704 '''Find revisions matching a revset.
1705
1705
1706 The revset is specified as a string ``expr`` that may contain
1706 The revset is specified as a string ``expr`` that may contain
1707 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1707 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1708
1708
1709 Revset aliases from the configuration are not expanded. To expand
1709 Revset aliases from the configuration are not expanded. To expand
1710 user aliases, consider calling ``scmutil.revrange()`` or
1710 user aliases, consider calling ``scmutil.revrange()`` or
1711 ``repo.anyrevs([expr], user=True)``.
1711 ``repo.anyrevs([expr], user=True)``.
1712
1712
1713 Returns a smartset.abstractsmartset, which is a list-like interface
1713 Returns a smartset.abstractsmartset, which is a list-like interface
1714 that contains integer revisions.
1714 that contains integer revisions.
1715 '''
1715 '''
1716 tree = revsetlang.spectree(expr, *args)
1716 tree = revsetlang.spectree(expr, *args)
1717 return revset.makematcher(tree)(self)
1717 return revset.makematcher(tree)(self)
1718
1718
1719 def set(self, expr, *args):
1719 def set(self, expr, *args):
1720 '''Find revisions matching a revset and emit changectx instances.
1720 '''Find revisions matching a revset and emit changectx instances.
1721
1721
1722 This is a convenience wrapper around ``revs()`` that iterates the
1722 This is a convenience wrapper around ``revs()`` that iterates the
1723 result and is a generator of changectx instances.
1723 result and is a generator of changectx instances.
1724
1724
1725 Revset aliases from the configuration are not expanded. To expand
1725 Revset aliases from the configuration are not expanded. To expand
1726 user aliases, consider calling ``scmutil.revrange()``.
1726 user aliases, consider calling ``scmutil.revrange()``.
1727 '''
1727 '''
1728 for r in self.revs(expr, *args):
1728 for r in self.revs(expr, *args):
1729 yield self[r]
1729 yield self[r]
1730
1730
1731 def anyrevs(self, specs, user=False, localalias=None):
1731 def anyrevs(self, specs, user=False, localalias=None):
1732 '''Find revisions matching one of the given revsets.
1732 '''Find revisions matching one of the given revsets.
1733
1733
1734 Revset aliases from the configuration are not expanded by default. To
1734 Revset aliases from the configuration are not expanded by default. To
1735 expand user aliases, specify ``user=True``. To provide some local
1735 expand user aliases, specify ``user=True``. To provide some local
1736 definitions overriding user aliases, set ``localalias`` to
1736 definitions overriding user aliases, set ``localalias`` to
1737 ``{name: definitionstring}``.
1737 ``{name: definitionstring}``.
1738 '''
1738 '''
1739 if specs == [b'null']:
1739 if specs == [b'null']:
1740 return revset.baseset([nullrev])
1740 return revset.baseset([nullrev])
1741 if specs == [b'.']:
1741 if specs == [b'.']:
1742 quick_data = self._quick_access_changeid.get(b'.')
1742 quick_data = self._quick_access_changeid.get(b'.')
1743 if quick_data is not None:
1743 if quick_data is not None:
1744 return revset.baseset([quick_data[0]])
1744 return revset.baseset([quick_data[0]])
1745 if user:
1745 if user:
1746 m = revset.matchany(
1746 m = revset.matchany(
1747 self.ui,
1747 self.ui,
1748 specs,
1748 specs,
1749 lookup=revset.lookupfn(self),
1749 lookup=revset.lookupfn(self),
1750 localalias=localalias,
1750 localalias=localalias,
1751 )
1751 )
1752 else:
1752 else:
1753 m = revset.matchany(None, specs, localalias=localalias)
1753 m = revset.matchany(None, specs, localalias=localalias)
1754 return m(self)
1754 return m(self)
1755
1755
1756 def url(self):
1756 def url(self):
1757 return b'file:' + self.root
1757 return b'file:' + self.root
1758
1758
1759 def hook(self, name, throw=False, **args):
1759 def hook(self, name, throw=False, **args):
1760 """Call a hook, passing this repo instance.
1760 """Call a hook, passing this repo instance.
1761
1761
1762 This a convenience method to aid invoking hooks. Extensions likely
1762 This a convenience method to aid invoking hooks. Extensions likely
1763 won't call this unless they have registered a custom hook or are
1763 won't call this unless they have registered a custom hook or are
1764 replacing code that is expected to call a hook.
1764 replacing code that is expected to call a hook.
1765 """
1765 """
1766 return hook.hook(self.ui, self, name, throw, **args)
1766 return hook.hook(self.ui, self, name, throw, **args)
1767
1767
1768 @filteredpropertycache
1768 @filteredpropertycache
1769 def _tagscache(self):
1769 def _tagscache(self):
1770 '''Returns a tagscache object that contains various tags related
1770 '''Returns a tagscache object that contains various tags related
1771 caches.'''
1771 caches.'''
1772
1772
1773 # This simplifies its cache management by having one decorated
1773 # This simplifies its cache management by having one decorated
1774 # function (this one) and the rest simply fetch things from it.
1774 # function (this one) and the rest simply fetch things from it.
1775 class tagscache(object):
1775 class tagscache(object):
1776 def __init__(self):
1776 def __init__(self):
1777 # These two define the set of tags for this repository. tags
1777 # These two define the set of tags for this repository. tags
1778 # maps tag name to node; tagtypes maps tag name to 'global' or
1778 # maps tag name to node; tagtypes maps tag name to 'global' or
1779 # 'local'. (Global tags are defined by .hgtags across all
1779 # 'local'. (Global tags are defined by .hgtags across all
1780 # heads, and local tags are defined in .hg/localtags.)
1780 # heads, and local tags are defined in .hg/localtags.)
1781 # They constitute the in-memory cache of tags.
1781 # They constitute the in-memory cache of tags.
1782 self.tags = self.tagtypes = None
1782 self.tags = self.tagtypes = None
1783
1783
1784 self.nodetagscache = self.tagslist = None
1784 self.nodetagscache = self.tagslist = None
1785
1785
1786 cache = tagscache()
1786 cache = tagscache()
1787 cache.tags, cache.tagtypes = self._findtags()
1787 cache.tags, cache.tagtypes = self._findtags()
1788
1788
1789 return cache
1789 return cache
1790
1790
1791 def tags(self):
1791 def tags(self):
1792 '''return a mapping of tag to node'''
1792 '''return a mapping of tag to node'''
1793 t = {}
1793 t = {}
1794 if self.changelog.filteredrevs:
1794 if self.changelog.filteredrevs:
1795 tags, tt = self._findtags()
1795 tags, tt = self._findtags()
1796 else:
1796 else:
1797 tags = self._tagscache.tags
1797 tags = self._tagscache.tags
1798 rev = self.changelog.rev
1798 rev = self.changelog.rev
1799 for k, v in pycompat.iteritems(tags):
1799 for k, v in pycompat.iteritems(tags):
1800 try:
1800 try:
1801 # ignore tags to unknown nodes
1801 # ignore tags to unknown nodes
1802 rev(v)
1802 rev(v)
1803 t[k] = v
1803 t[k] = v
1804 except (error.LookupError, ValueError):
1804 except (error.LookupError, ValueError):
1805 pass
1805 pass
1806 return t
1806 return t
1807
1807
1808 def _findtags(self):
1808 def _findtags(self):
1809 '''Do the hard work of finding tags. Return a pair of dicts
1809 '''Do the hard work of finding tags. Return a pair of dicts
1810 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1810 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1811 maps tag name to a string like \'global\' or \'local\'.
1811 maps tag name to a string like \'global\' or \'local\'.
1812 Subclasses or extensions are free to add their own tags, but
1812 Subclasses or extensions are free to add their own tags, but
1813 should be aware that the returned dicts will be retained for the
1813 should be aware that the returned dicts will be retained for the
1814 duration of the localrepo object.'''
1814 duration of the localrepo object.'''
1815
1815
1816 # XXX what tagtype should subclasses/extensions use? Currently
1816 # XXX what tagtype should subclasses/extensions use? Currently
1817 # mq and bookmarks add tags, but do not set the tagtype at all.
1817 # mq and bookmarks add tags, but do not set the tagtype at all.
1818 # Should each extension invent its own tag type? Should there
1818 # Should each extension invent its own tag type? Should there
1819 # be one tagtype for all such "virtual" tags? Or is the status
1819 # be one tagtype for all such "virtual" tags? Or is the status
1820 # quo fine?
1820 # quo fine?
1821
1821
1822 # map tag name to (node, hist)
1822 # map tag name to (node, hist)
1823 alltags = tagsmod.findglobaltags(self.ui, self)
1823 alltags = tagsmod.findglobaltags(self.ui, self)
1824 # map tag name to tag type
1824 # map tag name to tag type
1825 tagtypes = {tag: b'global' for tag in alltags}
1825 tagtypes = {tag: b'global' for tag in alltags}
1826
1826
1827 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1827 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1828
1828
1829 # Build the return dicts. Have to re-encode tag names because
1829 # Build the return dicts. Have to re-encode tag names because
1830 # the tags module always uses UTF-8 (in order not to lose info
1830 # the tags module always uses UTF-8 (in order not to lose info
1831 # writing to the cache), but the rest of Mercurial wants them in
1831 # writing to the cache), but the rest of Mercurial wants them in
1832 # local encoding.
1832 # local encoding.
1833 tags = {}
1833 tags = {}
1834 for (name, (node, hist)) in pycompat.iteritems(alltags):
1834 for (name, (node, hist)) in pycompat.iteritems(alltags):
1835 if node != nullid:
1835 if node != nullid:
1836 tags[encoding.tolocal(name)] = node
1836 tags[encoding.tolocal(name)] = node
1837 tags[b'tip'] = self.changelog.tip()
1837 tags[b'tip'] = self.changelog.tip()
1838 tagtypes = {
1838 tagtypes = {
1839 encoding.tolocal(name): value
1839 encoding.tolocal(name): value
1840 for (name, value) in pycompat.iteritems(tagtypes)
1840 for (name, value) in pycompat.iteritems(tagtypes)
1841 }
1841 }
1842 return (tags, tagtypes)
1842 return (tags, tagtypes)
1843
1843
1844 def tagtype(self, tagname):
1844 def tagtype(self, tagname):
1845 '''
1845 '''
1846 return the type of the given tag. result can be:
1846 return the type of the given tag. result can be:
1847
1847
1848 'local' : a local tag
1848 'local' : a local tag
1849 'global' : a global tag
1849 'global' : a global tag
1850 None : tag does not exist
1850 None : tag does not exist
1851 '''
1851 '''
1852
1852
1853 return self._tagscache.tagtypes.get(tagname)
1853 return self._tagscache.tagtypes.get(tagname)
1854
1854
1855 def tagslist(self):
1855 def tagslist(self):
1856 '''return a list of tags ordered by revision'''
1856 '''return a list of tags ordered by revision'''
1857 if not self._tagscache.tagslist:
1857 if not self._tagscache.tagslist:
1858 l = []
1858 l = []
1859 for t, n in pycompat.iteritems(self.tags()):
1859 for t, n in pycompat.iteritems(self.tags()):
1860 l.append((self.changelog.rev(n), t, n))
1860 l.append((self.changelog.rev(n), t, n))
1861 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1861 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1862
1862
1863 return self._tagscache.tagslist
1863 return self._tagscache.tagslist
1864
1864
1865 def nodetags(self, node):
1865 def nodetags(self, node):
1866 '''return the tags associated with a node'''
1866 '''return the tags associated with a node'''
1867 if not self._tagscache.nodetagscache:
1867 if not self._tagscache.nodetagscache:
1868 nodetagscache = {}
1868 nodetagscache = {}
1869 for t, n in pycompat.iteritems(self._tagscache.tags):
1869 for t, n in pycompat.iteritems(self._tagscache.tags):
1870 nodetagscache.setdefault(n, []).append(t)
1870 nodetagscache.setdefault(n, []).append(t)
1871 for tags in pycompat.itervalues(nodetagscache):
1871 for tags in pycompat.itervalues(nodetagscache):
1872 tags.sort()
1872 tags.sort()
1873 self._tagscache.nodetagscache = nodetagscache
1873 self._tagscache.nodetagscache = nodetagscache
1874 return self._tagscache.nodetagscache.get(node, [])
1874 return self._tagscache.nodetagscache.get(node, [])
1875
1875
1876 def nodebookmarks(self, node):
1876 def nodebookmarks(self, node):
1877 """return the list of bookmarks pointing to the specified node"""
1877 """return the list of bookmarks pointing to the specified node"""
1878 return self._bookmarks.names(node)
1878 return self._bookmarks.names(node)
1879
1879
1880 def branchmap(self):
1880 def branchmap(self):
1881 '''returns a dictionary {branch: [branchheads]} with branchheads
1881 '''returns a dictionary {branch: [branchheads]} with branchheads
1882 ordered by increasing revision number'''
1882 ordered by increasing revision number'''
1883 return self._branchcaches[self]
1883 return self._branchcaches[self]
1884
1884
1885 @unfilteredmethod
1885 @unfilteredmethod
1886 def revbranchcache(self):
1886 def revbranchcache(self):
1887 if not self._revbranchcache:
1887 if not self._revbranchcache:
1888 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1888 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1889 return self._revbranchcache
1889 return self._revbranchcache
1890
1890
1891 def branchtip(self, branch, ignoremissing=False):
1891 def branchtip(self, branch, ignoremissing=False):
1892 '''return the tip node for a given branch
1892 '''return the tip node for a given branch
1893
1893
1894 If ignoremissing is True, then this method will not raise an error.
1894 If ignoremissing is True, then this method will not raise an error.
1895 This is helpful for callers that only expect None for a missing branch
1895 This is helpful for callers that only expect None for a missing branch
1896 (e.g. namespace).
1896 (e.g. namespace).
1897
1897
1898 '''
1898 '''
1899 try:
1899 try:
1900 return self.branchmap().branchtip(branch)
1900 return self.branchmap().branchtip(branch)
1901 except KeyError:
1901 except KeyError:
1902 if not ignoremissing:
1902 if not ignoremissing:
1903 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1903 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1904 else:
1904 else:
1905 pass
1905 pass
1906
1906
1907 def lookup(self, key):
1907 def lookup(self, key):
1908 node = scmutil.revsymbol(self, key).node()
1908 node = scmutil.revsymbol(self, key).node()
1909 if node is None:
1909 if node is None:
1910 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1910 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1911 return node
1911 return node
1912
1912
1913 def lookupbranch(self, key):
1913 def lookupbranch(self, key):
1914 if self.branchmap().hasbranch(key):
1914 if self.branchmap().hasbranch(key):
1915 return key
1915 return key
1916
1916
1917 return scmutil.revsymbol(self, key).branch()
1917 return scmutil.revsymbol(self, key).branch()
1918
1918
1919 def known(self, nodes):
1919 def known(self, nodes):
1920 cl = self.changelog
1920 cl = self.changelog
1921 get_rev = cl.index.get_rev
1921 get_rev = cl.index.get_rev
1922 filtered = cl.filteredrevs
1922 filtered = cl.filteredrevs
1923 result = []
1923 result = []
1924 for n in nodes:
1924 for n in nodes:
1925 r = get_rev(n)
1925 r = get_rev(n)
1926 resp = not (r is None or r in filtered)
1926 resp = not (r is None or r in filtered)
1927 result.append(resp)
1927 result.append(resp)
1928 return result
1928 return result
1929
1929
1930 def local(self):
1930 def local(self):
1931 return self
1931 return self
1932
1932
1933 def publishing(self):
1933 def publishing(self):
1934 # it's safe (and desirable) to trust the publish flag unconditionally
1934 # it's safe (and desirable) to trust the publish flag unconditionally
1935 # so that we don't finalize changes shared between users via ssh or nfs
1935 # so that we don't finalize changes shared between users via ssh or nfs
1936 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1936 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1937
1937
1938 def cancopy(self):
1938 def cancopy(self):
1939 # so statichttprepo's override of local() works
1939 # so statichttprepo's override of local() works
1940 if not self.local():
1940 if not self.local():
1941 return False
1941 return False
1942 if not self.publishing():
1942 if not self.publishing():
1943 return True
1943 return True
1944 # if publishing we can't copy if there is filtered content
1944 # if publishing we can't copy if there is filtered content
1945 return not self.filtered(b'visible').changelog.filteredrevs
1945 return not self.filtered(b'visible').changelog.filteredrevs
1946
1946
1947 def shared(self):
1947 def shared(self):
1948 '''the type of shared repository (None if not shared)'''
1948 '''the type of shared repository (None if not shared)'''
1949 if self.sharedpath != self.path:
1949 if self.sharedpath != self.path:
1950 return b'store'
1950 return b'store'
1951 return None
1951 return None
1952
1952
1953 def wjoin(self, f, *insidef):
1953 def wjoin(self, f, *insidef):
1954 return self.vfs.reljoin(self.root, f, *insidef)
1954 return self.vfs.reljoin(self.root, f, *insidef)
1955
1955
1956 def setparents(self, p1, p2=nullid):
1956 def setparents(self, p1, p2=nullid):
1957 self[None].setparents(p1, p2)
1957 self[None].setparents(p1, p2)
1958 self._quick_access_changeid_invalidate()
1958 self._quick_access_changeid_invalidate()
1959
1959
1960 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1960 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1961 """changeid must be a changeset revision, if specified.
1961 """changeid must be a changeset revision, if specified.
1962 fileid can be a file revision or node."""
1962 fileid can be a file revision or node."""
1963 return context.filectx(
1963 return context.filectx(
1964 self, path, changeid, fileid, changectx=changectx
1964 self, path, changeid, fileid, changectx=changectx
1965 )
1965 )
1966
1966
1967 def getcwd(self):
1967 def getcwd(self):
1968 return self.dirstate.getcwd()
1968 return self.dirstate.getcwd()
1969
1969
1970 def pathto(self, f, cwd=None):
1970 def pathto(self, f, cwd=None):
1971 return self.dirstate.pathto(f, cwd)
1971 return self.dirstate.pathto(f, cwd)
1972
1972
1973 def _loadfilter(self, filter):
1973 def _loadfilter(self, filter):
1974 if filter not in self._filterpats:
1974 if filter not in self._filterpats:
1975 l = []
1975 l = []
1976 for pat, cmd in self.ui.configitems(filter):
1976 for pat, cmd in self.ui.configitems(filter):
1977 if cmd == b'!':
1977 if cmd == b'!':
1978 continue
1978 continue
1979 mf = matchmod.match(self.root, b'', [pat])
1979 mf = matchmod.match(self.root, b'', [pat])
1980 fn = None
1980 fn = None
1981 params = cmd
1981 params = cmd
1982 for name, filterfn in pycompat.iteritems(self._datafilters):
1982 for name, filterfn in pycompat.iteritems(self._datafilters):
1983 if cmd.startswith(name):
1983 if cmd.startswith(name):
1984 fn = filterfn
1984 fn = filterfn
1985 params = cmd[len(name) :].lstrip()
1985 params = cmd[len(name) :].lstrip()
1986 break
1986 break
1987 if not fn:
1987 if not fn:
1988 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1988 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1989 fn.__name__ = 'commandfilter'
1989 fn.__name__ = 'commandfilter'
1990 # Wrap old filters not supporting keyword arguments
1990 # Wrap old filters not supporting keyword arguments
1991 if not pycompat.getargspec(fn)[2]:
1991 if not pycompat.getargspec(fn)[2]:
1992 oldfn = fn
1992 oldfn = fn
1993 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1993 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1994 fn.__name__ = 'compat-' + oldfn.__name__
1994 fn.__name__ = 'compat-' + oldfn.__name__
1995 l.append((mf, fn, params))
1995 l.append((mf, fn, params))
1996 self._filterpats[filter] = l
1996 self._filterpats[filter] = l
1997 return self._filterpats[filter]
1997 return self._filterpats[filter]
1998
1998
1999 def _filter(self, filterpats, filename, data):
1999 def _filter(self, filterpats, filename, data):
2000 for mf, fn, cmd in filterpats:
2000 for mf, fn, cmd in filterpats:
2001 if mf(filename):
2001 if mf(filename):
2002 self.ui.debug(
2002 self.ui.debug(
2003 b"filtering %s through %s\n"
2003 b"filtering %s through %s\n"
2004 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2004 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2005 )
2005 )
2006 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2006 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2007 break
2007 break
2008
2008
2009 return data
2009 return data
2010
2010
2011 @unfilteredpropertycache
2011 @unfilteredpropertycache
2012 def _encodefilterpats(self):
2012 def _encodefilterpats(self):
2013 return self._loadfilter(b'encode')
2013 return self._loadfilter(b'encode')
2014
2014
2015 @unfilteredpropertycache
2015 @unfilteredpropertycache
2016 def _decodefilterpats(self):
2016 def _decodefilterpats(self):
2017 return self._loadfilter(b'decode')
2017 return self._loadfilter(b'decode')
2018
2018
2019 def adddatafilter(self, name, filter):
2019 def adddatafilter(self, name, filter):
2020 self._datafilters[name] = filter
2020 self._datafilters[name] = filter
2021
2021
2022 def wread(self, filename):
2022 def wread(self, filename):
2023 if self.wvfs.islink(filename):
2023 if self.wvfs.islink(filename):
2024 data = self.wvfs.readlink(filename)
2024 data = self.wvfs.readlink(filename)
2025 else:
2025 else:
2026 data = self.wvfs.read(filename)
2026 data = self.wvfs.read(filename)
2027 return self._filter(self._encodefilterpats, filename, data)
2027 return self._filter(self._encodefilterpats, filename, data)
2028
2028
2029 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2029 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2030 """write ``data`` into ``filename`` in the working directory
2030 """write ``data`` into ``filename`` in the working directory
2031
2031
2032 This returns length of written (maybe decoded) data.
2032 This returns length of written (maybe decoded) data.
2033 """
2033 """
2034 data = self._filter(self._decodefilterpats, filename, data)
2034 data = self._filter(self._decodefilterpats, filename, data)
2035 if b'l' in flags:
2035 if b'l' in flags:
2036 self.wvfs.symlink(data, filename)
2036 self.wvfs.symlink(data, filename)
2037 else:
2037 else:
2038 self.wvfs.write(
2038 self.wvfs.write(
2039 filename, data, backgroundclose=backgroundclose, **kwargs
2039 filename, data, backgroundclose=backgroundclose, **kwargs
2040 )
2040 )
2041 if b'x' in flags:
2041 if b'x' in flags:
2042 self.wvfs.setflags(filename, False, True)
2042 self.wvfs.setflags(filename, False, True)
2043 else:
2043 else:
2044 self.wvfs.setflags(filename, False, False)
2044 self.wvfs.setflags(filename, False, False)
2045 return len(data)
2045 return len(data)
2046
2046
2047 def wwritedata(self, filename, data):
2047 def wwritedata(self, filename, data):
2048 return self._filter(self._decodefilterpats, filename, data)
2048 return self._filter(self._decodefilterpats, filename, data)
2049
2049
2050 def currenttransaction(self):
2050 def currenttransaction(self):
2051 """return the current transaction or None if non exists"""
2051 """return the current transaction or None if non exists"""
2052 if self._transref:
2052 if self._transref:
2053 tr = self._transref()
2053 tr = self._transref()
2054 else:
2054 else:
2055 tr = None
2055 tr = None
2056
2056
2057 if tr and tr.running():
2057 if tr and tr.running():
2058 return tr
2058 return tr
2059 return None
2059 return None
2060
2060
2061 def transaction(self, desc, report=None):
2061 def transaction(self, desc, report=None):
2062 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2062 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2063 b'devel', b'check-locks'
2063 b'devel', b'check-locks'
2064 ):
2064 ):
2065 if self._currentlock(self._lockref) is None:
2065 if self._currentlock(self._lockref) is None:
2066 raise error.ProgrammingError(b'transaction requires locking')
2066 raise error.ProgrammingError(b'transaction requires locking')
2067 tr = self.currenttransaction()
2067 tr = self.currenttransaction()
2068 if tr is not None:
2068 if tr is not None:
2069 return tr.nest(name=desc)
2069 return tr.nest(name=desc)
2070
2070
2071 # abort here if the journal already exists
2071 # abort here if the journal already exists
2072 if self.svfs.exists(b"journal"):
2072 if self.svfs.exists(b"journal"):
2073 raise error.RepoError(
2073 raise error.RepoError(
2074 _(b"abandoned transaction found"),
2074 _(b"abandoned transaction found"),
2075 hint=_(b"run 'hg recover' to clean up transaction"),
2075 hint=_(b"run 'hg recover' to clean up transaction"),
2076 )
2076 )
2077
2077
2078 idbase = b"%.40f#%f" % (random.random(), time.time())
2078 idbase = b"%.40f#%f" % (random.random(), time.time())
2079 ha = hex(hashutil.sha1(idbase).digest())
2079 ha = hex(hashutil.sha1(idbase).digest())
2080 txnid = b'TXN:' + ha
2080 txnid = b'TXN:' + ha
2081 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2081 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2082
2082
2083 self._writejournal(desc)
2083 self._writejournal(desc)
2084 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2084 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2085 if report:
2085 if report:
2086 rp = report
2086 rp = report
2087 else:
2087 else:
2088 rp = self.ui.warn
2088 rp = self.ui.warn
2089 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2089 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2090 # we must avoid cyclic reference between repo and transaction.
2090 # we must avoid cyclic reference between repo and transaction.
2091 reporef = weakref.ref(self)
2091 reporef = weakref.ref(self)
2092 # Code to track tag movement
2092 # Code to track tag movement
2093 #
2093 #
2094 # Since tags are all handled as file content, it is actually quite hard
2094 # Since tags are all handled as file content, it is actually quite hard
2095 # to track these movement from a code perspective. So we fallback to a
2095 # to track these movement from a code perspective. So we fallback to a
2096 # tracking at the repository level. One could envision to track changes
2096 # tracking at the repository level. One could envision to track changes
2097 # to the '.hgtags' file through changegroup apply but that fails to
2097 # to the '.hgtags' file through changegroup apply but that fails to
2098 # cope with case where transaction expose new heads without changegroup
2098 # cope with case where transaction expose new heads without changegroup
2099 # being involved (eg: phase movement).
2099 # being involved (eg: phase movement).
2100 #
2100 #
2101 # For now, We gate the feature behind a flag since this likely comes
2101 # For now, We gate the feature behind a flag since this likely comes
2102 # with performance impacts. The current code run more often than needed
2102 # with performance impacts. The current code run more often than needed
2103 # and do not use caches as much as it could. The current focus is on
2103 # and do not use caches as much as it could. The current focus is on
2104 # the behavior of the feature so we disable it by default. The flag
2104 # the behavior of the feature so we disable it by default. The flag
2105 # will be removed when we are happy with the performance impact.
2105 # will be removed when we are happy with the performance impact.
2106 #
2106 #
2107 # Once this feature is no longer experimental move the following
2107 # Once this feature is no longer experimental move the following
2108 # documentation to the appropriate help section:
2108 # documentation to the appropriate help section:
2109 #
2109 #
2110 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2110 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2111 # tags (new or changed or deleted tags). In addition the details of
2111 # tags (new or changed or deleted tags). In addition the details of
2112 # these changes are made available in a file at:
2112 # these changes are made available in a file at:
2113 # ``REPOROOT/.hg/changes/tags.changes``.
2113 # ``REPOROOT/.hg/changes/tags.changes``.
2114 # Make sure you check for HG_TAG_MOVED before reading that file as it
2114 # Make sure you check for HG_TAG_MOVED before reading that file as it
2115 # might exist from a previous transaction even if no tag were touched
2115 # might exist from a previous transaction even if no tag were touched
2116 # in this one. Changes are recorded in a line base format::
2116 # in this one. Changes are recorded in a line base format::
2117 #
2117 #
2118 # <action> <hex-node> <tag-name>\n
2118 # <action> <hex-node> <tag-name>\n
2119 #
2119 #
2120 # Actions are defined as follow:
2120 # Actions are defined as follow:
2121 # "-R": tag is removed,
2121 # "-R": tag is removed,
2122 # "+A": tag is added,
2122 # "+A": tag is added,
2123 # "-M": tag is moved (old value),
2123 # "-M": tag is moved (old value),
2124 # "+M": tag is moved (new value),
2124 # "+M": tag is moved (new value),
2125 tracktags = lambda x: None
2125 tracktags = lambda x: None
2126 # experimental config: experimental.hook-track-tags
2126 # experimental config: experimental.hook-track-tags
2127 shouldtracktags = self.ui.configbool(
2127 shouldtracktags = self.ui.configbool(
2128 b'experimental', b'hook-track-tags'
2128 b'experimental', b'hook-track-tags'
2129 )
2129 )
2130 if desc != b'strip' and shouldtracktags:
2130 if desc != b'strip' and shouldtracktags:
2131 oldheads = self.changelog.headrevs()
2131 oldheads = self.changelog.headrevs()
2132
2132
2133 def tracktags(tr2):
2133 def tracktags(tr2):
2134 repo = reporef()
2134 repo = reporef()
2135 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2135 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2136 newheads = repo.changelog.headrevs()
2136 newheads = repo.changelog.headrevs()
2137 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2137 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2138 # notes: we compare lists here.
2138 # notes: we compare lists here.
2139 # As we do it only once buiding set would not be cheaper
2139 # As we do it only once buiding set would not be cheaper
2140 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2140 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2141 if changes:
2141 if changes:
2142 tr2.hookargs[b'tag_moved'] = b'1'
2142 tr2.hookargs[b'tag_moved'] = b'1'
2143 with repo.vfs(
2143 with repo.vfs(
2144 b'changes/tags.changes', b'w', atomictemp=True
2144 b'changes/tags.changes', b'w', atomictemp=True
2145 ) as changesfile:
2145 ) as changesfile:
2146 # note: we do not register the file to the transaction
2146 # note: we do not register the file to the transaction
2147 # because we needs it to still exist on the transaction
2147 # because we needs it to still exist on the transaction
2148 # is close (for txnclose hooks)
2148 # is close (for txnclose hooks)
2149 tagsmod.writediff(changesfile, changes)
2149 tagsmod.writediff(changesfile, changes)
2150
2150
2151 def validate(tr2):
2151 def validate(tr2):
2152 """will run pre-closing hooks"""
2152 """will run pre-closing hooks"""
2153 # XXX the transaction API is a bit lacking here so we take a hacky
2153 # XXX the transaction API is a bit lacking here so we take a hacky
2154 # path for now
2154 # path for now
2155 #
2155 #
2156 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2156 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2157 # dict is copied before these run. In addition we needs the data
2157 # dict is copied before these run. In addition we needs the data
2158 # available to in memory hooks too.
2158 # available to in memory hooks too.
2159 #
2159 #
2160 # Moreover, we also need to make sure this runs before txnclose
2160 # Moreover, we also need to make sure this runs before txnclose
2161 # hooks and there is no "pending" mechanism that would execute
2161 # hooks and there is no "pending" mechanism that would execute
2162 # logic only if hooks are about to run.
2162 # logic only if hooks are about to run.
2163 #
2163 #
2164 # Fixing this limitation of the transaction is also needed to track
2164 # Fixing this limitation of the transaction is also needed to track
2165 # other families of changes (bookmarks, phases, obsolescence).
2165 # other families of changes (bookmarks, phases, obsolescence).
2166 #
2166 #
2167 # This will have to be fixed before we remove the experimental
2167 # This will have to be fixed before we remove the experimental
2168 # gating.
2168 # gating.
2169 tracktags(tr2)
2169 tracktags(tr2)
2170 repo = reporef()
2170 repo = reporef()
2171
2171
2172 singleheadopt = (b'experimental', b'single-head-per-branch')
2172 singleheadopt = (b'experimental', b'single-head-per-branch')
2173 singlehead = repo.ui.configbool(*singleheadopt)
2173 singlehead = repo.ui.configbool(*singleheadopt)
2174 if singlehead:
2174 if singlehead:
2175 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2175 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2176 accountclosed = singleheadsub.get(
2176 accountclosed = singleheadsub.get(
2177 b"account-closed-heads", False
2177 b"account-closed-heads", False
2178 )
2178 )
2179 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2179 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2180 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2180 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2181 for name, (old, new) in sorted(
2181 for name, (old, new) in sorted(
2182 tr.changes[b'bookmarks'].items()
2182 tr.changes[b'bookmarks'].items()
2183 ):
2183 ):
2184 args = tr.hookargs.copy()
2184 args = tr.hookargs.copy()
2185 args.update(bookmarks.preparehookargs(name, old, new))
2185 args.update(bookmarks.preparehookargs(name, old, new))
2186 repo.hook(
2186 repo.hook(
2187 b'pretxnclose-bookmark',
2187 b'pretxnclose-bookmark',
2188 throw=True,
2188 throw=True,
2189 **pycompat.strkwargs(args)
2189 **pycompat.strkwargs(args)
2190 )
2190 )
2191 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2191 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2192 cl = repo.unfiltered().changelog
2192 cl = repo.unfiltered().changelog
2193 for revs, (old, new) in tr.changes[b'phases']:
2193 for revs, (old, new) in tr.changes[b'phases']:
2194 for rev in revs:
2194 for rev in revs:
2195 args = tr.hookargs.copy()
2195 args = tr.hookargs.copy()
2196 node = hex(cl.node(rev))
2196 node = hex(cl.node(rev))
2197 args.update(phases.preparehookargs(node, old, new))
2197 args.update(phases.preparehookargs(node, old, new))
2198 repo.hook(
2198 repo.hook(
2199 b'pretxnclose-phase',
2199 b'pretxnclose-phase',
2200 throw=True,
2200 throw=True,
2201 **pycompat.strkwargs(args)
2201 **pycompat.strkwargs(args)
2202 )
2202 )
2203
2203
2204 repo.hook(
2204 repo.hook(
2205 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2205 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2206 )
2206 )
2207
2207
2208 def releasefn(tr, success):
2208 def releasefn(tr, success):
2209 repo = reporef()
2209 repo = reporef()
2210 if repo is None:
2210 if repo is None:
2211 # If the repo has been GC'd (and this release function is being
2211 # If the repo has been GC'd (and this release function is being
2212 # called from transaction.__del__), there's not much we can do,
2212 # called from transaction.__del__), there's not much we can do,
2213 # so just leave the unfinished transaction there and let the
2213 # so just leave the unfinished transaction there and let the
2214 # user run `hg recover`.
2214 # user run `hg recover`.
2215 return
2215 return
2216 if success:
2216 if success:
2217 # this should be explicitly invoked here, because
2217 # this should be explicitly invoked here, because
2218 # in-memory changes aren't written out at closing
2218 # in-memory changes aren't written out at closing
2219 # transaction, if tr.addfilegenerator (via
2219 # transaction, if tr.addfilegenerator (via
2220 # dirstate.write or so) isn't invoked while
2220 # dirstate.write or so) isn't invoked while
2221 # transaction running
2221 # transaction running
2222 repo.dirstate.write(None)
2222 repo.dirstate.write(None)
2223 else:
2223 else:
2224 # discard all changes (including ones already written
2224 # discard all changes (including ones already written
2225 # out) in this transaction
2225 # out) in this transaction
2226 narrowspec.restorebackup(self, b'journal.narrowspec')
2226 narrowspec.restorebackup(self, b'journal.narrowspec')
2227 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2227 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2228 repo.dirstate.restorebackup(None, b'journal.dirstate')
2228 repo.dirstate.restorebackup(None, b'journal.dirstate')
2229
2229
2230 repo.invalidate(clearfilecache=True)
2230 repo.invalidate(clearfilecache=True)
2231
2231
2232 tr = transaction.transaction(
2232 tr = transaction.transaction(
2233 rp,
2233 rp,
2234 self.svfs,
2234 self.svfs,
2235 vfsmap,
2235 vfsmap,
2236 b"journal",
2236 b"journal",
2237 b"undo",
2237 b"undo",
2238 aftertrans(renames),
2238 aftertrans(renames),
2239 self.store.createmode,
2239 self.store.createmode,
2240 validator=validate,
2240 validator=validate,
2241 releasefn=releasefn,
2241 releasefn=releasefn,
2242 checkambigfiles=_cachedfiles,
2242 checkambigfiles=_cachedfiles,
2243 name=desc,
2243 name=desc,
2244 )
2244 )
2245 tr.changes[b'origrepolen'] = len(self)
2245 tr.changes[b'origrepolen'] = len(self)
2246 tr.changes[b'obsmarkers'] = set()
2246 tr.changes[b'obsmarkers'] = set()
2247 tr.changes[b'phases'] = []
2247 tr.changes[b'phases'] = []
2248 tr.changes[b'bookmarks'] = {}
2248 tr.changes[b'bookmarks'] = {}
2249
2249
2250 tr.hookargs[b'txnid'] = txnid
2250 tr.hookargs[b'txnid'] = txnid
2251 tr.hookargs[b'txnname'] = desc
2251 tr.hookargs[b'txnname'] = desc
2252 tr.hookargs[b'changes'] = tr.changes
2252 tr.hookargs[b'changes'] = tr.changes
2253 # note: writing the fncache only during finalize mean that the file is
2253 # note: writing the fncache only during finalize mean that the file is
2254 # outdated when running hooks. As fncache is used for streaming clone,
2254 # outdated when running hooks. As fncache is used for streaming clone,
2255 # this is not expected to break anything that happen during the hooks.
2255 # this is not expected to break anything that happen during the hooks.
2256 tr.addfinalize(b'flush-fncache', self.store.write)
2256 tr.addfinalize(b'flush-fncache', self.store.write)
2257
2257
2258 def txnclosehook(tr2):
2258 def txnclosehook(tr2):
2259 """To be run if transaction is successful, will schedule a hook run
2259 """To be run if transaction is successful, will schedule a hook run
2260 """
2260 """
2261 # Don't reference tr2 in hook() so we don't hold a reference.
2261 # Don't reference tr2 in hook() so we don't hold a reference.
2262 # This reduces memory consumption when there are multiple
2262 # This reduces memory consumption when there are multiple
2263 # transactions per lock. This can likely go away if issue5045
2263 # transactions per lock. This can likely go away if issue5045
2264 # fixes the function accumulation.
2264 # fixes the function accumulation.
2265 hookargs = tr2.hookargs
2265 hookargs = tr2.hookargs
2266
2266
2267 def hookfunc(unused_success):
2267 def hookfunc(unused_success):
2268 repo = reporef()
2268 repo = reporef()
2269 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2269 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2270 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2270 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2271 for name, (old, new) in bmchanges:
2271 for name, (old, new) in bmchanges:
2272 args = tr.hookargs.copy()
2272 args = tr.hookargs.copy()
2273 args.update(bookmarks.preparehookargs(name, old, new))
2273 args.update(bookmarks.preparehookargs(name, old, new))
2274 repo.hook(
2274 repo.hook(
2275 b'txnclose-bookmark',
2275 b'txnclose-bookmark',
2276 throw=False,
2276 throw=False,
2277 **pycompat.strkwargs(args)
2277 **pycompat.strkwargs(args)
2278 )
2278 )
2279
2279
2280 if hook.hashook(repo.ui, b'txnclose-phase'):
2280 if hook.hashook(repo.ui, b'txnclose-phase'):
2281 cl = repo.unfiltered().changelog
2281 cl = repo.unfiltered().changelog
2282 phasemv = sorted(
2282 phasemv = sorted(
2283 tr.changes[b'phases'], key=lambda r: r[0][0]
2283 tr.changes[b'phases'], key=lambda r: r[0][0]
2284 )
2284 )
2285 for revs, (old, new) in phasemv:
2285 for revs, (old, new) in phasemv:
2286 for rev in revs:
2286 for rev in revs:
2287 args = tr.hookargs.copy()
2287 args = tr.hookargs.copy()
2288 node = hex(cl.node(rev))
2288 node = hex(cl.node(rev))
2289 args.update(phases.preparehookargs(node, old, new))
2289 args.update(phases.preparehookargs(node, old, new))
2290 repo.hook(
2290 repo.hook(
2291 b'txnclose-phase',
2291 b'txnclose-phase',
2292 throw=False,
2292 throw=False,
2293 **pycompat.strkwargs(args)
2293 **pycompat.strkwargs(args)
2294 )
2294 )
2295
2295
2296 repo.hook(
2296 repo.hook(
2297 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2297 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2298 )
2298 )
2299
2299
2300 reporef()._afterlock(hookfunc)
2300 reporef()._afterlock(hookfunc)
2301
2301
2302 tr.addfinalize(b'txnclose-hook', txnclosehook)
2302 tr.addfinalize(b'txnclose-hook', txnclosehook)
2303 # Include a leading "-" to make it happen before the transaction summary
2303 # Include a leading "-" to make it happen before the transaction summary
2304 # reports registered via scmutil.registersummarycallback() whose names
2304 # reports registered via scmutil.registersummarycallback() whose names
2305 # are 00-txnreport etc. That way, the caches will be warm when the
2305 # are 00-txnreport etc. That way, the caches will be warm when the
2306 # callbacks run.
2306 # callbacks run.
2307 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2307 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2308
2308
2309 def txnaborthook(tr2):
2309 def txnaborthook(tr2):
2310 """To be run if transaction is aborted
2310 """To be run if transaction is aborted
2311 """
2311 """
2312 reporef().hook(
2312 reporef().hook(
2313 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2313 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2314 )
2314 )
2315
2315
2316 tr.addabort(b'txnabort-hook', txnaborthook)
2316 tr.addabort(b'txnabort-hook', txnaborthook)
2317 # avoid eager cache invalidation. in-memory data should be identical
2317 # avoid eager cache invalidation. in-memory data should be identical
2318 # to stored data if transaction has no error.
2318 # to stored data if transaction has no error.
2319 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2319 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2320 self._transref = weakref.ref(tr)
2320 self._transref = weakref.ref(tr)
2321 scmutil.registersummarycallback(self, tr, desc)
2321 scmutil.registersummarycallback(self, tr, desc)
2322 return tr
2322 return tr
2323
2323
2324 def _journalfiles(self):
2324 def _journalfiles(self):
2325 return (
2325 return (
2326 (self.svfs, b'journal'),
2326 (self.svfs, b'journal'),
2327 (self.svfs, b'journal.narrowspec'),
2327 (self.svfs, b'journal.narrowspec'),
2328 (self.vfs, b'journal.narrowspec.dirstate'),
2328 (self.vfs, b'journal.narrowspec.dirstate'),
2329 (self.vfs, b'journal.dirstate'),
2329 (self.vfs, b'journal.dirstate'),
2330 (self.vfs, b'journal.branch'),
2330 (self.vfs, b'journal.branch'),
2331 (self.vfs, b'journal.desc'),
2331 (self.vfs, b'journal.desc'),
2332 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2332 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2333 (self.svfs, b'journal.phaseroots'),
2333 (self.svfs, b'journal.phaseroots'),
2334 )
2334 )
2335
2335
2336 def undofiles(self):
2336 def undofiles(self):
2337 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2337 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2338
2338
2339 @unfilteredmethod
2339 @unfilteredmethod
2340 def _writejournal(self, desc):
2340 def _writejournal(self, desc):
2341 self.dirstate.savebackup(None, b'journal.dirstate')
2341 self.dirstate.savebackup(None, b'journal.dirstate')
2342 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2342 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2343 narrowspec.savebackup(self, b'journal.narrowspec')
2343 narrowspec.savebackup(self, b'journal.narrowspec')
2344 self.vfs.write(
2344 self.vfs.write(
2345 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2345 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2346 )
2346 )
2347 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2347 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2348 bookmarksvfs = bookmarks.bookmarksvfs(self)
2348 bookmarksvfs = bookmarks.bookmarksvfs(self)
2349 bookmarksvfs.write(
2349 bookmarksvfs.write(
2350 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2350 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2351 )
2351 )
2352 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2352 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2353
2353
2354 def recover(self):
2354 def recover(self):
2355 with self.lock():
2355 with self.lock():
2356 if self.svfs.exists(b"journal"):
2356 if self.svfs.exists(b"journal"):
2357 self.ui.status(_(b"rolling back interrupted transaction\n"))
2357 self.ui.status(_(b"rolling back interrupted transaction\n"))
2358 vfsmap = {
2358 vfsmap = {
2359 b'': self.svfs,
2359 b'': self.svfs,
2360 b'plain': self.vfs,
2360 b'plain': self.vfs,
2361 }
2361 }
2362 transaction.rollback(
2362 transaction.rollback(
2363 self.svfs,
2363 self.svfs,
2364 vfsmap,
2364 vfsmap,
2365 b"journal",
2365 b"journal",
2366 self.ui.warn,
2366 self.ui.warn,
2367 checkambigfiles=_cachedfiles,
2367 checkambigfiles=_cachedfiles,
2368 )
2368 )
2369 self.invalidate()
2369 self.invalidate()
2370 return True
2370 return True
2371 else:
2371 else:
2372 self.ui.warn(_(b"no interrupted transaction available\n"))
2372 self.ui.warn(_(b"no interrupted transaction available\n"))
2373 return False
2373 return False
2374
2374
2375 def rollback(self, dryrun=False, force=False):
2375 def rollback(self, dryrun=False, force=False):
2376 wlock = lock = dsguard = None
2376 wlock = lock = dsguard = None
2377 try:
2377 try:
2378 wlock = self.wlock()
2378 wlock = self.wlock()
2379 lock = self.lock()
2379 lock = self.lock()
2380 if self.svfs.exists(b"undo"):
2380 if self.svfs.exists(b"undo"):
2381 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2381 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2382
2382
2383 return self._rollback(dryrun, force, dsguard)
2383 return self._rollback(dryrun, force, dsguard)
2384 else:
2384 else:
2385 self.ui.warn(_(b"no rollback information available\n"))
2385 self.ui.warn(_(b"no rollback information available\n"))
2386 return 1
2386 return 1
2387 finally:
2387 finally:
2388 release(dsguard, lock, wlock)
2388 release(dsguard, lock, wlock)
2389
2389
2390 @unfilteredmethod # Until we get smarter cache management
2390 @unfilteredmethod # Until we get smarter cache management
2391 def _rollback(self, dryrun, force, dsguard):
2391 def _rollback(self, dryrun, force, dsguard):
2392 ui = self.ui
2392 ui = self.ui
2393 try:
2393 try:
2394 args = self.vfs.read(b'undo.desc').splitlines()
2394 args = self.vfs.read(b'undo.desc').splitlines()
2395 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2395 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2396 if len(args) >= 3:
2396 if len(args) >= 3:
2397 detail = args[2]
2397 detail = args[2]
2398 oldtip = oldlen - 1
2398 oldtip = oldlen - 1
2399
2399
2400 if detail and ui.verbose:
2400 if detail and ui.verbose:
2401 msg = _(
2401 msg = _(
2402 b'repository tip rolled back to revision %d'
2402 b'repository tip rolled back to revision %d'
2403 b' (undo %s: %s)\n'
2403 b' (undo %s: %s)\n'
2404 ) % (oldtip, desc, detail)
2404 ) % (oldtip, desc, detail)
2405 else:
2405 else:
2406 msg = _(
2406 msg = _(
2407 b'repository tip rolled back to revision %d (undo %s)\n'
2407 b'repository tip rolled back to revision %d (undo %s)\n'
2408 ) % (oldtip, desc)
2408 ) % (oldtip, desc)
2409 except IOError:
2409 except IOError:
2410 msg = _(b'rolling back unknown transaction\n')
2410 msg = _(b'rolling back unknown transaction\n')
2411 desc = None
2411 desc = None
2412
2412
2413 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2413 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2414 raise error.Abort(
2414 raise error.Abort(
2415 _(
2415 _(
2416 b'rollback of last commit while not checked out '
2416 b'rollback of last commit while not checked out '
2417 b'may lose data'
2417 b'may lose data'
2418 ),
2418 ),
2419 hint=_(b'use -f to force'),
2419 hint=_(b'use -f to force'),
2420 )
2420 )
2421
2421
2422 ui.status(msg)
2422 ui.status(msg)
2423 if dryrun:
2423 if dryrun:
2424 return 0
2424 return 0
2425
2425
2426 parents = self.dirstate.parents()
2426 parents = self.dirstate.parents()
2427 self.destroying()
2427 self.destroying()
2428 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2428 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2429 transaction.rollback(
2429 transaction.rollback(
2430 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2430 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2431 )
2431 )
2432 bookmarksvfs = bookmarks.bookmarksvfs(self)
2432 bookmarksvfs = bookmarks.bookmarksvfs(self)
2433 if bookmarksvfs.exists(b'undo.bookmarks'):
2433 if bookmarksvfs.exists(b'undo.bookmarks'):
2434 bookmarksvfs.rename(
2434 bookmarksvfs.rename(
2435 b'undo.bookmarks', b'bookmarks', checkambig=True
2435 b'undo.bookmarks', b'bookmarks', checkambig=True
2436 )
2436 )
2437 if self.svfs.exists(b'undo.phaseroots'):
2437 if self.svfs.exists(b'undo.phaseroots'):
2438 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2438 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2439 self.invalidate()
2439 self.invalidate()
2440
2440
2441 has_node = self.changelog.index.has_node
2441 has_node = self.changelog.index.has_node
2442 parentgone = any(not has_node(p) for p in parents)
2442 parentgone = any(not has_node(p) for p in parents)
2443 if parentgone:
2443 if parentgone:
2444 # prevent dirstateguard from overwriting already restored one
2444 # prevent dirstateguard from overwriting already restored one
2445 dsguard.close()
2445 dsguard.close()
2446
2446
2447 narrowspec.restorebackup(self, b'undo.narrowspec')
2447 narrowspec.restorebackup(self, b'undo.narrowspec')
2448 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2448 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2449 self.dirstate.restorebackup(None, b'undo.dirstate')
2449 self.dirstate.restorebackup(None, b'undo.dirstate')
2450 try:
2450 try:
2451 branch = self.vfs.read(b'undo.branch')
2451 branch = self.vfs.read(b'undo.branch')
2452 self.dirstate.setbranch(encoding.tolocal(branch))
2452 self.dirstate.setbranch(encoding.tolocal(branch))
2453 except IOError:
2453 except IOError:
2454 ui.warn(
2454 ui.warn(
2455 _(
2455 _(
2456 b'named branch could not be reset: '
2456 b'named branch could not be reset: '
2457 b'current branch is still \'%s\'\n'
2457 b'current branch is still \'%s\'\n'
2458 )
2458 )
2459 % self.dirstate.branch()
2459 % self.dirstate.branch()
2460 )
2460 )
2461
2461
2462 parents = tuple([p.rev() for p in self[None].parents()])
2462 parents = tuple([p.rev() for p in self[None].parents()])
2463 if len(parents) > 1:
2463 if len(parents) > 1:
2464 ui.status(
2464 ui.status(
2465 _(
2465 _(
2466 b'working directory now based on '
2466 b'working directory now based on '
2467 b'revisions %d and %d\n'
2467 b'revisions %d and %d\n'
2468 )
2468 )
2469 % parents
2469 % parents
2470 )
2470 )
2471 else:
2471 else:
2472 ui.status(
2472 ui.status(
2473 _(b'working directory now based on revision %d\n') % parents
2473 _(b'working directory now based on revision %d\n') % parents
2474 )
2474 )
2475 mergestatemod.mergestate.clean(self, self[b'.'].node())
2475 mergestatemod.mergestate.clean(self, self[b'.'].node())
2476
2476
2477 # TODO: if we know which new heads may result from this rollback, pass
2477 # TODO: if we know which new heads may result from this rollback, pass
2478 # them to destroy(), which will prevent the branchhead cache from being
2478 # them to destroy(), which will prevent the branchhead cache from being
2479 # invalidated.
2479 # invalidated.
2480 self.destroyed()
2480 self.destroyed()
2481 return 0
2481 return 0
2482
2482
2483 def _buildcacheupdater(self, newtransaction):
2483 def _buildcacheupdater(self, newtransaction):
2484 """called during transaction to build the callback updating cache
2484 """called during transaction to build the callback updating cache
2485
2485
2486 Lives on the repository to help extension who might want to augment
2486 Lives on the repository to help extension who might want to augment
2487 this logic. For this purpose, the created transaction is passed to the
2487 this logic. For this purpose, the created transaction is passed to the
2488 method.
2488 method.
2489 """
2489 """
2490 # we must avoid cyclic reference between repo and transaction.
2490 # we must avoid cyclic reference between repo and transaction.
2491 reporef = weakref.ref(self)
2491 reporef = weakref.ref(self)
2492
2492
2493 def updater(tr):
2493 def updater(tr):
2494 repo = reporef()
2494 repo = reporef()
2495 repo.updatecaches(tr)
2495 repo.updatecaches(tr)
2496
2496
2497 return updater
2497 return updater
2498
2498
2499 @unfilteredmethod
2499 @unfilteredmethod
2500 def updatecaches(self, tr=None, full=False):
2500 def updatecaches(self, tr=None, full=False):
2501 """warm appropriate caches
2501 """warm appropriate caches
2502
2502
2503 If this function is called after a transaction closed. The transaction
2503 If this function is called after a transaction closed. The transaction
2504 will be available in the 'tr' argument. This can be used to selectively
2504 will be available in the 'tr' argument. This can be used to selectively
2505 update caches relevant to the changes in that transaction.
2505 update caches relevant to the changes in that transaction.
2506
2506
2507 If 'full' is set, make sure all caches the function knows about have
2507 If 'full' is set, make sure all caches the function knows about have
2508 up-to-date data. Even the ones usually loaded more lazily.
2508 up-to-date data. Even the ones usually loaded more lazily.
2509 """
2509 """
2510 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2510 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2511 # During strip, many caches are invalid but
2511 # During strip, many caches are invalid but
2512 # later call to `destroyed` will refresh them.
2512 # later call to `destroyed` will refresh them.
2513 return
2513 return
2514
2514
2515 if tr is None or tr.changes[b'origrepolen'] < len(self):
2515 if tr is None or tr.changes[b'origrepolen'] < len(self):
2516 # accessing the 'ser ved' branchmap should refresh all the others,
2516 # accessing the 'ser ved' branchmap should refresh all the others,
2517 self.ui.debug(b'updating the branch cache\n')
2517 self.ui.debug(b'updating the branch cache\n')
2518 self.filtered(b'served').branchmap()
2518 self.filtered(b'served').branchmap()
2519 self.filtered(b'served.hidden').branchmap()
2519 self.filtered(b'served.hidden').branchmap()
2520
2520
2521 if full:
2521 if full:
2522 unfi = self.unfiltered()
2522 unfi = self.unfiltered()
2523
2523
2524 self.changelog.update_caches(transaction=tr)
2524 self.changelog.update_caches(transaction=tr)
2525 self.manifestlog.update_caches(transaction=tr)
2525 self.manifestlog.update_caches(transaction=tr)
2526
2526
2527 rbc = unfi.revbranchcache()
2527 rbc = unfi.revbranchcache()
2528 for r in unfi.changelog:
2528 for r in unfi.changelog:
2529 rbc.branchinfo(r)
2529 rbc.branchinfo(r)
2530 rbc.write()
2530 rbc.write()
2531
2531
2532 # ensure the working copy parents are in the manifestfulltextcache
2532 # ensure the working copy parents are in the manifestfulltextcache
2533 for ctx in self[b'.'].parents():
2533 for ctx in self[b'.'].parents():
2534 ctx.manifest() # accessing the manifest is enough
2534 ctx.manifest() # accessing the manifest is enough
2535
2535
2536 # accessing fnode cache warms the cache
2536 # accessing fnode cache warms the cache
2537 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2537 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2538 # accessing tags warm the cache
2538 # accessing tags warm the cache
2539 self.tags()
2539 self.tags()
2540 self.filtered(b'served').tags()
2540 self.filtered(b'served').tags()
2541
2541
2542 # The `full` arg is documented as updating even the lazily-loaded
2542 # The `full` arg is documented as updating even the lazily-loaded
2543 # caches immediately, so we're forcing a write to cause these caches
2543 # caches immediately, so we're forcing a write to cause these caches
2544 # to be warmed up even if they haven't explicitly been requested
2544 # to be warmed up even if they haven't explicitly been requested
2545 # yet (if they've never been used by hg, they won't ever have been
2545 # yet (if they've never been used by hg, they won't ever have been
2546 # written, even if they're a subset of another kind of cache that
2546 # written, even if they're a subset of another kind of cache that
2547 # *has* been used).
2547 # *has* been used).
2548 for filt in repoview.filtertable.keys():
2548 for filt in repoview.filtertable.keys():
2549 filtered = self.filtered(filt)
2549 filtered = self.filtered(filt)
2550 filtered.branchmap().write(filtered)
2550 filtered.branchmap().write(filtered)
2551
2551
2552 def invalidatecaches(self):
2552 def invalidatecaches(self):
2553
2553
2554 if '_tagscache' in vars(self):
2554 if '_tagscache' in vars(self):
2555 # can't use delattr on proxy
2555 # can't use delattr on proxy
2556 del self.__dict__['_tagscache']
2556 del self.__dict__['_tagscache']
2557
2557
2558 self._branchcaches.clear()
2558 self._branchcaches.clear()
2559 self.invalidatevolatilesets()
2559 self.invalidatevolatilesets()
2560 self._sparsesignaturecache.clear()
2560 self._sparsesignaturecache.clear()
2561
2561
2562 def invalidatevolatilesets(self):
2562 def invalidatevolatilesets(self):
2563 self.filteredrevcache.clear()
2563 self.filteredrevcache.clear()
2564 obsolete.clearobscaches(self)
2564 obsolete.clearobscaches(self)
2565 self._quick_access_changeid_invalidate()
2565 self._quick_access_changeid_invalidate()
2566
2566
2567 def invalidatedirstate(self):
2567 def invalidatedirstate(self):
2568 '''Invalidates the dirstate, causing the next call to dirstate
2568 '''Invalidates the dirstate, causing the next call to dirstate
2569 to check if it was modified since the last time it was read,
2569 to check if it was modified since the last time it was read,
2570 rereading it if it has.
2570 rereading it if it has.
2571
2571
2572 This is different to dirstate.invalidate() that it doesn't always
2572 This is different to dirstate.invalidate() that it doesn't always
2573 rereads the dirstate. Use dirstate.invalidate() if you want to
2573 rereads the dirstate. Use dirstate.invalidate() if you want to
2574 explicitly read the dirstate again (i.e. restoring it to a previous
2574 explicitly read the dirstate again (i.e. restoring it to a previous
2575 known good state).'''
2575 known good state).'''
2576 if hasunfilteredcache(self, 'dirstate'):
2576 if hasunfilteredcache(self, 'dirstate'):
2577 for k in self.dirstate._filecache:
2577 for k in self.dirstate._filecache:
2578 try:
2578 try:
2579 delattr(self.dirstate, k)
2579 delattr(self.dirstate, k)
2580 except AttributeError:
2580 except AttributeError:
2581 pass
2581 pass
2582 delattr(self.unfiltered(), 'dirstate')
2582 delattr(self.unfiltered(), 'dirstate')
2583
2583
2584 def invalidate(self, clearfilecache=False):
2584 def invalidate(self, clearfilecache=False):
2585 '''Invalidates both store and non-store parts other than dirstate
2585 '''Invalidates both store and non-store parts other than dirstate
2586
2586
2587 If a transaction is running, invalidation of store is omitted,
2587 If a transaction is running, invalidation of store is omitted,
2588 because discarding in-memory changes might cause inconsistency
2588 because discarding in-memory changes might cause inconsistency
2589 (e.g. incomplete fncache causes unintentional failure, but
2589 (e.g. incomplete fncache causes unintentional failure, but
2590 redundant one doesn't).
2590 redundant one doesn't).
2591 '''
2591 '''
2592 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2592 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2593 for k in list(self._filecache.keys()):
2593 for k in list(self._filecache.keys()):
2594 # dirstate is invalidated separately in invalidatedirstate()
2594 # dirstate is invalidated separately in invalidatedirstate()
2595 if k == b'dirstate':
2595 if k == b'dirstate':
2596 continue
2596 continue
2597 if (
2597 if (
2598 k == b'changelog'
2598 k == b'changelog'
2599 and self.currenttransaction()
2599 and self.currenttransaction()
2600 and self.changelog._delayed
2600 and self.changelog._delayed
2601 ):
2601 ):
2602 # The changelog object may store unwritten revisions. We don't
2602 # The changelog object may store unwritten revisions. We don't
2603 # want to lose them.
2603 # want to lose them.
2604 # TODO: Solve the problem instead of working around it.
2604 # TODO: Solve the problem instead of working around it.
2605 continue
2605 continue
2606
2606
2607 if clearfilecache:
2607 if clearfilecache:
2608 del self._filecache[k]
2608 del self._filecache[k]
2609 try:
2609 try:
2610 delattr(unfiltered, k)
2610 delattr(unfiltered, k)
2611 except AttributeError:
2611 except AttributeError:
2612 pass
2612 pass
2613 self.invalidatecaches()
2613 self.invalidatecaches()
2614 if not self.currenttransaction():
2614 if not self.currenttransaction():
2615 # TODO: Changing contents of store outside transaction
2615 # TODO: Changing contents of store outside transaction
2616 # causes inconsistency. We should make in-memory store
2616 # causes inconsistency. We should make in-memory store
2617 # changes detectable, and abort if changed.
2617 # changes detectable, and abort if changed.
2618 self.store.invalidatecaches()
2618 self.store.invalidatecaches()
2619
2619
2620 def invalidateall(self):
2620 def invalidateall(self):
2621 '''Fully invalidates both store and non-store parts, causing the
2621 '''Fully invalidates both store and non-store parts, causing the
2622 subsequent operation to reread any outside changes.'''
2622 subsequent operation to reread any outside changes.'''
2623 # extension should hook this to invalidate its caches
2623 # extension should hook this to invalidate its caches
2624 self.invalidate()
2624 self.invalidate()
2625 self.invalidatedirstate()
2625 self.invalidatedirstate()
2626
2626
2627 @unfilteredmethod
2627 @unfilteredmethod
2628 def _refreshfilecachestats(self, tr):
2628 def _refreshfilecachestats(self, tr):
2629 """Reload stats of cached files so that they are flagged as valid"""
2629 """Reload stats of cached files so that they are flagged as valid"""
2630 for k, ce in self._filecache.items():
2630 for k, ce in self._filecache.items():
2631 k = pycompat.sysstr(k)
2631 k = pycompat.sysstr(k)
2632 if k == 'dirstate' or k not in self.__dict__:
2632 if k == 'dirstate' or k not in self.__dict__:
2633 continue
2633 continue
2634 ce.refresh()
2634 ce.refresh()
2635
2635
2636 def _lock(
2636 def _lock(
2637 self,
2637 self,
2638 vfs,
2638 vfs,
2639 lockname,
2639 lockname,
2640 wait,
2640 wait,
2641 releasefn,
2641 releasefn,
2642 acquirefn,
2642 acquirefn,
2643 desc,
2643 desc,
2644 inheritchecker=None,
2644 inheritchecker=None,
2645 parentenvvar=None,
2645 parentenvvar=None,
2646 ):
2646 ):
2647 parentlock = None
2647 parentlock = None
2648 # the contents of parentenvvar are used by the underlying lock to
2648 # the contents of parentenvvar are used by the underlying lock to
2649 # determine whether it can be inherited
2649 # determine whether it can be inherited
2650 if parentenvvar is not None:
2650 if parentenvvar is not None:
2651 parentlock = encoding.environ.get(parentenvvar)
2651 parentlock = encoding.environ.get(parentenvvar)
2652
2652
2653 timeout = 0
2653 timeout = 0
2654 warntimeout = 0
2654 warntimeout = 0
2655 if wait:
2655 if wait:
2656 timeout = self.ui.configint(b"ui", b"timeout")
2656 timeout = self.ui.configint(b"ui", b"timeout")
2657 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2657 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2658 # internal config: ui.signal-safe-lock
2658 # internal config: ui.signal-safe-lock
2659 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2659 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2660
2660
2661 l = lockmod.trylock(
2661 l = lockmod.trylock(
2662 self.ui,
2662 self.ui,
2663 vfs,
2663 vfs,
2664 lockname,
2664 lockname,
2665 timeout,
2665 timeout,
2666 warntimeout,
2666 warntimeout,
2667 releasefn=releasefn,
2667 releasefn=releasefn,
2668 acquirefn=acquirefn,
2668 acquirefn=acquirefn,
2669 desc=desc,
2669 desc=desc,
2670 inheritchecker=inheritchecker,
2670 inheritchecker=inheritchecker,
2671 parentlock=parentlock,
2671 parentlock=parentlock,
2672 signalsafe=signalsafe,
2672 signalsafe=signalsafe,
2673 )
2673 )
2674 return l
2674 return l
2675
2675
2676 def _afterlock(self, callback):
2676 def _afterlock(self, callback):
2677 """add a callback to be run when the repository is fully unlocked
2677 """add a callback to be run when the repository is fully unlocked
2678
2678
2679 The callback will be executed when the outermost lock is released
2679 The callback will be executed when the outermost lock is released
2680 (with wlock being higher level than 'lock')."""
2680 (with wlock being higher level than 'lock')."""
2681 for ref in (self._wlockref, self._lockref):
2681 for ref in (self._wlockref, self._lockref):
2682 l = ref and ref()
2682 l = ref and ref()
2683 if l and l.held:
2683 if l and l.held:
2684 l.postrelease.append(callback)
2684 l.postrelease.append(callback)
2685 break
2685 break
2686 else: # no lock have been found.
2686 else: # no lock have been found.
2687 callback(True)
2687 callback(True)
2688
2688
2689 def lock(self, wait=True):
2689 def lock(self, wait=True):
2690 '''Lock the repository store (.hg/store) and return a weak reference
2690 '''Lock the repository store (.hg/store) and return a weak reference
2691 to the lock. Use this before modifying the store (e.g. committing or
2691 to the lock. Use this before modifying the store (e.g. committing or
2692 stripping). If you are opening a transaction, get a lock as well.)
2692 stripping). If you are opening a transaction, get a lock as well.)
2693
2693
2694 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2694 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2695 'wlock' first to avoid a dead-lock hazard.'''
2695 'wlock' first to avoid a dead-lock hazard.'''
2696 l = self._currentlock(self._lockref)
2696 l = self._currentlock(self._lockref)
2697 if l is not None:
2697 if l is not None:
2698 l.lock()
2698 l.lock()
2699 return l
2699 return l
2700
2700
2701 l = self._lock(
2701 l = self._lock(
2702 vfs=self.svfs,
2702 vfs=self.svfs,
2703 lockname=b"lock",
2703 lockname=b"lock",
2704 wait=wait,
2704 wait=wait,
2705 releasefn=None,
2705 releasefn=None,
2706 acquirefn=self.invalidate,
2706 acquirefn=self.invalidate,
2707 desc=_(b'repository %s') % self.origroot,
2707 desc=_(b'repository %s') % self.origroot,
2708 )
2708 )
2709 self._lockref = weakref.ref(l)
2709 self._lockref = weakref.ref(l)
2710 return l
2710 return l
2711
2711
2712 def _wlockchecktransaction(self):
2712 def _wlockchecktransaction(self):
2713 if self.currenttransaction() is not None:
2713 if self.currenttransaction() is not None:
2714 raise error.LockInheritanceContractViolation(
2714 raise error.LockInheritanceContractViolation(
2715 b'wlock cannot be inherited in the middle of a transaction'
2715 b'wlock cannot be inherited in the middle of a transaction'
2716 )
2716 )
2717
2717
2718 def wlock(self, wait=True):
2718 def wlock(self, wait=True):
2719 '''Lock the non-store parts of the repository (everything under
2719 '''Lock the non-store parts of the repository (everything under
2720 .hg except .hg/store) and return a weak reference to the lock.
2720 .hg except .hg/store) and return a weak reference to the lock.
2721
2721
2722 Use this before modifying files in .hg.
2722 Use this before modifying files in .hg.
2723
2723
2724 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2724 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2725 'wlock' first to avoid a dead-lock hazard.'''
2725 'wlock' first to avoid a dead-lock hazard.'''
2726 l = self._wlockref and self._wlockref()
2726 l = self._wlockref and self._wlockref()
2727 if l is not None and l.held:
2727 if l is not None and l.held:
2728 l.lock()
2728 l.lock()
2729 return l
2729 return l
2730
2730
2731 # We do not need to check for non-waiting lock acquisition. Such
2731 # We do not need to check for non-waiting lock acquisition. Such
2732 # acquisition would not cause dead-lock as they would just fail.
2732 # acquisition would not cause dead-lock as they would just fail.
2733 if wait and (
2733 if wait and (
2734 self.ui.configbool(b'devel', b'all-warnings')
2734 self.ui.configbool(b'devel', b'all-warnings')
2735 or self.ui.configbool(b'devel', b'check-locks')
2735 or self.ui.configbool(b'devel', b'check-locks')
2736 ):
2736 ):
2737 if self._currentlock(self._lockref) is not None:
2737 if self._currentlock(self._lockref) is not None:
2738 self.ui.develwarn(b'"wlock" acquired after "lock"')
2738 self.ui.develwarn(b'"wlock" acquired after "lock"')
2739
2739
2740 def unlock():
2740 def unlock():
2741 if self.dirstate.pendingparentchange():
2741 if self.dirstate.pendingparentchange():
2742 self.dirstate.invalidate()
2742 self.dirstate.invalidate()
2743 else:
2743 else:
2744 self.dirstate.write(None)
2744 self.dirstate.write(None)
2745
2745
2746 self._filecache[b'dirstate'].refresh()
2746 self._filecache[b'dirstate'].refresh()
2747
2747
2748 l = self._lock(
2748 l = self._lock(
2749 self.vfs,
2749 self.vfs,
2750 b"wlock",
2750 b"wlock",
2751 wait,
2751 wait,
2752 unlock,
2752 unlock,
2753 self.invalidatedirstate,
2753 self.invalidatedirstate,
2754 _(b'working directory of %s') % self.origroot,
2754 _(b'working directory of %s') % self.origroot,
2755 inheritchecker=self._wlockchecktransaction,
2755 inheritchecker=self._wlockchecktransaction,
2756 parentenvvar=b'HG_WLOCK_LOCKER',
2756 parentenvvar=b'HG_WLOCK_LOCKER',
2757 )
2757 )
2758 self._wlockref = weakref.ref(l)
2758 self._wlockref = weakref.ref(l)
2759 return l
2759 return l
2760
2760
2761 def _currentlock(self, lockref):
2761 def _currentlock(self, lockref):
2762 """Returns the lock if it's held, or None if it's not."""
2762 """Returns the lock if it's held, or None if it's not."""
2763 if lockref is None:
2763 if lockref is None:
2764 return None
2764 return None
2765 l = lockref()
2765 l = lockref()
2766 if l is None or not l.held:
2766 if l is None or not l.held:
2767 return None
2767 return None
2768 return l
2768 return l
2769
2769
2770 def currentwlock(self):
2770 def currentwlock(self):
2771 """Returns the wlock if it's held, or None if it's not."""
2771 """Returns the wlock if it's held, or None if it's not."""
2772 return self._currentlock(self._wlockref)
2772 return self._currentlock(self._wlockref)
2773
2773
2774 def _filecommit(
2775 self, fctx, manifest1, manifest2, linkrev, tr, includecopymeta,
2776 ):
2777 """
2778 commit an individual file as part of a larger transaction
2779
2780 input:
2781
2782 fctx: a file context with the content we are trying to commit
2783 manifest1: manifest of changeset first parent
2784 manifest2: manifest of changeset second parent
2785 linkrev: revision number of the changeset being created
2786 tr: current transation
2787 individual: boolean, set to False to skip storing the copy data
2788 (only used by the Google specific feature of using
2789 changeset extra as copy source of truth).
2790
2791 output: (filenode, touched)
2792
2793 filenode: the filenode that should be used by this changeset
2794 touched: one of: None, 'added' or 'modified'
2795 """
2796
2797 fname = fctx.path()
2798 fparent1 = manifest1.get(fname, nullid)
2799 fparent2 = manifest2.get(fname, nullid)
2800 touched = None
2801 if fparent1 == fparent2 == nullid:
2802 touched = 'added'
2803
2804 if isinstance(fctx, context.filectx):
2805 # This block fast path most comparisons which are usually done. It
2806 # assumes that bare filectx is used and no merge happened, hence no
2807 # need to create a new file revision in this case.
2808 node = fctx.filenode()
2809 if node in [fparent1, fparent2]:
2810 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2811 if (
2812 fparent1 != nullid
2813 and manifest1.flags(fname) != fctx.flags()
2814 ) or (
2815 fparent2 != nullid
2816 and manifest2.flags(fname) != fctx.flags()
2817 ):
2818 touched = 'modified'
2819 return node, touched
2820
2821 flog = self.file(fname)
2822 meta = {}
2823 cfname = fctx.copysource()
2824 fnode = None
2825
2826 if cfname and cfname != fname:
2827 # Mark the new revision of this file as a copy of another
2828 # file. This copy data will effectively act as a parent
2829 # of this new revision. If this is a merge, the first
2830 # parent will be the nullid (meaning "look up the copy data")
2831 # and the second one will be the other parent. For example:
2832 #
2833 # 0 --- 1 --- 3 rev1 changes file foo
2834 # \ / rev2 renames foo to bar and changes it
2835 # \- 2 -/ rev3 should have bar with all changes and
2836 # should record that bar descends from
2837 # bar in rev2 and foo in rev1
2838 #
2839 # this allows this merge to succeed:
2840 #
2841 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2842 # \ / merging rev3 and rev4 should use bar@rev2
2843 # \- 2 --- 4 as the merge base
2844 #
2845
2846 cnode = manifest1.get(cfname)
2847 newfparent = fparent2
2848
2849 if manifest2: # branch merge
2850 if fparent2 == nullid or cnode is None: # copied on remote side
2851 if cfname in manifest2:
2852 cnode = manifest2[cfname]
2853 newfparent = fparent1
2854
2855 # Here, we used to search backwards through history to try to find
2856 # where the file copy came from if the source of a copy was not in
2857 # the parent directory. However, this doesn't actually make sense to
2858 # do (what does a copy from something not in your working copy even
2859 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2860 # the user that copy information was dropped, so if they didn't
2861 # expect this outcome it can be fixed, but this is the correct
2862 # behavior in this circumstance.
2863
2864 if cnode:
2865 self.ui.debug(
2866 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2867 )
2868 if includecopymeta:
2869 meta[b"copy"] = cfname
2870 meta[b"copyrev"] = hex(cnode)
2871 fparent1, fparent2 = nullid, newfparent
2872 else:
2873 self.ui.warn(
2874 _(
2875 b"warning: can't find ancestor for '%s' "
2876 b"copied from '%s'!\n"
2877 )
2878 % (fname, cfname)
2879 )
2880
2881 elif fparent1 == nullid:
2882 fparent1, fparent2 = fparent2, nullid
2883 elif fparent2 != nullid:
2884 # is one parent an ancestor of the other?
2885 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2886 if fparent1 in fparentancestors:
2887 fparent1, fparent2 = fparent2, nullid
2888 elif fparent2 in fparentancestors:
2889 fparent2 = nullid
2890 elif not fparentancestors:
2891 # TODO: this whole if-else might be simplified much more
2892 ms = mergestatemod.mergestate.read(self)
2893 if (
2894 fname in ms
2895 and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
2896 ):
2897 fparent1, fparent2 = fparent2, nullid
2898
2899 # is the file changed?
2900 text = fctx.data()
2901 if fparent2 != nullid or meta or flog.cmp(fparent1, text):
2902 if touched is None: # do not overwrite added
2903 touched = 'modified'
2904 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2905 # are just the flags changed during merge?
2906 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2907 touched = 'modified'
2908 fnode = fparent1
2909 else:
2910 fnode = fparent1
2911 return fnode, touched
2912
2913 def checkcommitpatterns(self, wctx, match, status, fail):
2774 def checkcommitpatterns(self, wctx, match, status, fail):
2914 """check for commit arguments that aren't committable"""
2775 """check for commit arguments that aren't committable"""
2915 if match.isexact() or match.prefix():
2776 if match.isexact() or match.prefix():
2916 matched = set(status.modified + status.added + status.removed)
2777 matched = set(status.modified + status.added + status.removed)
2917
2778
2918 for f in match.files():
2779 for f in match.files():
2919 f = self.dirstate.normalize(f)
2780 f = self.dirstate.normalize(f)
2920 if f == b'.' or f in matched or f in wctx.substate:
2781 if f == b'.' or f in matched or f in wctx.substate:
2921 continue
2782 continue
2922 if f in status.deleted:
2783 if f in status.deleted:
2923 fail(f, _(b'file not found!'))
2784 fail(f, _(b'file not found!'))
2924 # Is it a directory that exists or used to exist?
2785 # Is it a directory that exists or used to exist?
2925 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2786 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2926 d = f + b'/'
2787 d = f + b'/'
2927 for mf in matched:
2788 for mf in matched:
2928 if mf.startswith(d):
2789 if mf.startswith(d):
2929 break
2790 break
2930 else:
2791 else:
2931 fail(f, _(b"no match under directory!"))
2792 fail(f, _(b"no match under directory!"))
2932 elif f not in self.dirstate:
2793 elif f not in self.dirstate:
2933 fail(f, _(b"file not tracked!"))
2794 fail(f, _(b"file not tracked!"))
2934
2795
2935 @unfilteredmethod
2796 @unfilteredmethod
2936 def commit(
2797 def commit(
2937 self,
2798 self,
2938 text=b"",
2799 text=b"",
2939 user=None,
2800 user=None,
2940 date=None,
2801 date=None,
2941 match=None,
2802 match=None,
2942 force=False,
2803 force=False,
2943 editor=None,
2804 editor=None,
2944 extra=None,
2805 extra=None,
2945 ):
2806 ):
2946 """Add a new revision to current repository.
2807 """Add a new revision to current repository.
2947
2808
2948 Revision information is gathered from the working directory,
2809 Revision information is gathered from the working directory,
2949 match can be used to filter the committed files. If editor is
2810 match can be used to filter the committed files. If editor is
2950 supplied, it is called to get a commit message.
2811 supplied, it is called to get a commit message.
2951 """
2812 """
2952 if extra is None:
2813 if extra is None:
2953 extra = {}
2814 extra = {}
2954
2815
2955 def fail(f, msg):
2816 def fail(f, msg):
2956 raise error.Abort(b'%s: %s' % (f, msg))
2817 raise error.Abort(b'%s: %s' % (f, msg))
2957
2818
2958 if not match:
2819 if not match:
2959 match = matchmod.always()
2820 match = matchmod.always()
2960
2821
2961 if not force:
2822 if not force:
2962 match.bad = fail
2823 match.bad = fail
2963
2824
2964 # lock() for recent changelog (see issue4368)
2825 # lock() for recent changelog (see issue4368)
2965 with self.wlock(), self.lock():
2826 with self.wlock(), self.lock():
2966 wctx = self[None]
2827 wctx = self[None]
2967 merge = len(wctx.parents()) > 1
2828 merge = len(wctx.parents()) > 1
2968
2829
2969 if not force and merge and not match.always():
2830 if not force and merge and not match.always():
2970 raise error.Abort(
2831 raise error.Abort(
2971 _(
2832 _(
2972 b'cannot partially commit a merge '
2833 b'cannot partially commit a merge '
2973 b'(do not specify files or patterns)'
2834 b'(do not specify files or patterns)'
2974 )
2835 )
2975 )
2836 )
2976
2837
2977 status = self.status(match=match, clean=force)
2838 status = self.status(match=match, clean=force)
2978 if force:
2839 if force:
2979 status.modified.extend(
2840 status.modified.extend(
2980 status.clean
2841 status.clean
2981 ) # mq may commit clean files
2842 ) # mq may commit clean files
2982
2843
2983 # check subrepos
2844 # check subrepos
2984 subs, commitsubs, newstate = subrepoutil.precommit(
2845 subs, commitsubs, newstate = subrepoutil.precommit(
2985 self.ui, wctx, status, match, force=force
2846 self.ui, wctx, status, match, force=force
2986 )
2847 )
2987
2848
2988 # make sure all explicit patterns are matched
2849 # make sure all explicit patterns are matched
2989 if not force:
2850 if not force:
2990 self.checkcommitpatterns(wctx, match, status, fail)
2851 self.checkcommitpatterns(wctx, match, status, fail)
2991
2852
2992 cctx = context.workingcommitctx(
2853 cctx = context.workingcommitctx(
2993 self, status, text, user, date, extra
2854 self, status, text, user, date, extra
2994 )
2855 )
2995
2856
2996 ms = mergestatemod.mergestate.read(self)
2857 ms = mergestatemod.mergestate.read(self)
2997 mergeutil.checkunresolved(ms)
2858 mergeutil.checkunresolved(ms)
2998
2859
2999 # internal config: ui.allowemptycommit
2860 # internal config: ui.allowemptycommit
3000 if cctx.isempty() and not self.ui.configbool(
2861 if cctx.isempty() and not self.ui.configbool(
3001 b'ui', b'allowemptycommit'
2862 b'ui', b'allowemptycommit'
3002 ):
2863 ):
3003 self.ui.debug(b'nothing to commit, clearing merge state\n')
2864 self.ui.debug(b'nothing to commit, clearing merge state\n')
3004 ms.reset()
2865 ms.reset()
3005 return None
2866 return None
3006
2867
3007 if merge and cctx.deleted():
2868 if merge and cctx.deleted():
3008 raise error.Abort(_(b"cannot commit merge with missing files"))
2869 raise error.Abort(_(b"cannot commit merge with missing files"))
3009
2870
3010 if editor:
2871 if editor:
3011 cctx._text = editor(self, cctx, subs)
2872 cctx._text = editor(self, cctx, subs)
3012 edited = text != cctx._text
2873 edited = text != cctx._text
3013
2874
3014 # Save commit message in case this transaction gets rolled back
2875 # Save commit message in case this transaction gets rolled back
3015 # (e.g. by a pretxncommit hook). Leave the content alone on
2876 # (e.g. by a pretxncommit hook). Leave the content alone on
3016 # the assumption that the user will use the same editor again.
2877 # the assumption that the user will use the same editor again.
3017 msgfn = self.savecommitmessage(cctx._text)
2878 msgfn = self.savecommitmessage(cctx._text)
3018
2879
3019 # commit subs and write new state
2880 # commit subs and write new state
3020 if subs:
2881 if subs:
3021 uipathfn = scmutil.getuipathfn(self)
2882 uipathfn = scmutil.getuipathfn(self)
3022 for s in sorted(commitsubs):
2883 for s in sorted(commitsubs):
3023 sub = wctx.sub(s)
2884 sub = wctx.sub(s)
3024 self.ui.status(
2885 self.ui.status(
3025 _(b'committing subrepository %s\n')
2886 _(b'committing subrepository %s\n')
3026 % uipathfn(subrepoutil.subrelpath(sub))
2887 % uipathfn(subrepoutil.subrelpath(sub))
3027 )
2888 )
3028 sr = sub.commit(cctx._text, user, date)
2889 sr = sub.commit(cctx._text, user, date)
3029 newstate[s] = (newstate[s][0], sr)
2890 newstate[s] = (newstate[s][0], sr)
3030 subrepoutil.writestate(self, newstate)
2891 subrepoutil.writestate(self, newstate)
3031
2892
3032 p1, p2 = self.dirstate.parents()
2893 p1, p2 = self.dirstate.parents()
3033 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2894 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3034 try:
2895 try:
3035 self.hook(
2896 self.hook(
3036 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2897 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3037 )
2898 )
3038 with self.transaction(b'commit'):
2899 with self.transaction(b'commit'):
3039 ret = self.commitctx(cctx, True)
2900 ret = self.commitctx(cctx, True)
3040 # update bookmarks, dirstate and mergestate
2901 # update bookmarks, dirstate and mergestate
3041 bookmarks.update(self, [p1, p2], ret)
2902 bookmarks.update(self, [p1, p2], ret)
3042 cctx.markcommitted(ret)
2903 cctx.markcommitted(ret)
3043 ms.reset()
2904 ms.reset()
3044 except: # re-raises
2905 except: # re-raises
3045 if edited:
2906 if edited:
3046 self.ui.write(
2907 self.ui.write(
3047 _(b'note: commit message saved in %s\n') % msgfn
2908 _(b'note: commit message saved in %s\n') % msgfn
3048 )
2909 )
3049 self.ui.write(
2910 self.ui.write(
3050 _(
2911 _(
3051 b"note: use 'hg commit --logfile "
2912 b"note: use 'hg commit --logfile "
3052 b".hg/last-message.txt --edit' to reuse it\n"
2913 b".hg/last-message.txt --edit' to reuse it\n"
3053 )
2914 )
3054 )
2915 )
3055 raise
2916 raise
3056
2917
3057 def commithook(unused_success):
2918 def commithook(unused_success):
3058 # hack for command that use a temporary commit (eg: histedit)
2919 # hack for command that use a temporary commit (eg: histedit)
3059 # temporary commit got stripped before hook release
2920 # temporary commit got stripped before hook release
3060 if self.changelog.hasnode(ret):
2921 if self.changelog.hasnode(ret):
3061 self.hook(
2922 self.hook(
3062 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2923 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3063 )
2924 )
3064
2925
3065 self._afterlock(commithook)
2926 self._afterlock(commithook)
3066 return ret
2927 return ret
3067
2928
3068 @unfilteredmethod
2929 @unfilteredmethod
3069 def commitctx(self, ctx, error=False, origctx=None):
2930 def commitctx(self, ctx, error=False, origctx=None):
3070 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2931 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3071
2932
3072 @unfilteredmethod
2933 @unfilteredmethod
3073 def destroying(self):
2934 def destroying(self):
3074 '''Inform the repository that nodes are about to be destroyed.
2935 '''Inform the repository that nodes are about to be destroyed.
3075 Intended for use by strip and rollback, so there's a common
2936 Intended for use by strip and rollback, so there's a common
3076 place for anything that has to be done before destroying history.
2937 place for anything that has to be done before destroying history.
3077
2938
3078 This is mostly useful for saving state that is in memory and waiting
2939 This is mostly useful for saving state that is in memory and waiting
3079 to be flushed when the current lock is released. Because a call to
2940 to be flushed when the current lock is released. Because a call to
3080 destroyed is imminent, the repo will be invalidated causing those
2941 destroyed is imminent, the repo will be invalidated causing those
3081 changes to stay in memory (waiting for the next unlock), or vanish
2942 changes to stay in memory (waiting for the next unlock), or vanish
3082 completely.
2943 completely.
3083 '''
2944 '''
3084 # When using the same lock to commit and strip, the phasecache is left
2945 # When using the same lock to commit and strip, the phasecache is left
3085 # dirty after committing. Then when we strip, the repo is invalidated,
2946 # dirty after committing. Then when we strip, the repo is invalidated,
3086 # causing those changes to disappear.
2947 # causing those changes to disappear.
3087 if '_phasecache' in vars(self):
2948 if '_phasecache' in vars(self):
3088 self._phasecache.write()
2949 self._phasecache.write()
3089
2950
3090 @unfilteredmethod
2951 @unfilteredmethod
3091 def destroyed(self):
2952 def destroyed(self):
3092 '''Inform the repository that nodes have been destroyed.
2953 '''Inform the repository that nodes have been destroyed.
3093 Intended for use by strip and rollback, so there's a common
2954 Intended for use by strip and rollback, so there's a common
3094 place for anything that has to be done after destroying history.
2955 place for anything that has to be done after destroying history.
3095 '''
2956 '''
3096 # When one tries to:
2957 # When one tries to:
3097 # 1) destroy nodes thus calling this method (e.g. strip)
2958 # 1) destroy nodes thus calling this method (e.g. strip)
3098 # 2) use phasecache somewhere (e.g. commit)
2959 # 2) use phasecache somewhere (e.g. commit)
3099 #
2960 #
3100 # then 2) will fail because the phasecache contains nodes that were
2961 # then 2) will fail because the phasecache contains nodes that were
3101 # removed. We can either remove phasecache from the filecache,
2962 # removed. We can either remove phasecache from the filecache,
3102 # causing it to reload next time it is accessed, or simply filter
2963 # causing it to reload next time it is accessed, or simply filter
3103 # the removed nodes now and write the updated cache.
2964 # the removed nodes now and write the updated cache.
3104 self._phasecache.filterunknown(self)
2965 self._phasecache.filterunknown(self)
3105 self._phasecache.write()
2966 self._phasecache.write()
3106
2967
3107 # refresh all repository caches
2968 # refresh all repository caches
3108 self.updatecaches()
2969 self.updatecaches()
3109
2970
3110 # Ensure the persistent tag cache is updated. Doing it now
2971 # Ensure the persistent tag cache is updated. Doing it now
3111 # means that the tag cache only has to worry about destroyed
2972 # means that the tag cache only has to worry about destroyed
3112 # heads immediately after a strip/rollback. That in turn
2973 # heads immediately after a strip/rollback. That in turn
3113 # guarantees that "cachetip == currenttip" (comparing both rev
2974 # guarantees that "cachetip == currenttip" (comparing both rev
3114 # and node) always means no nodes have been added or destroyed.
2975 # and node) always means no nodes have been added or destroyed.
3115
2976
3116 # XXX this is suboptimal when qrefresh'ing: we strip the current
2977 # XXX this is suboptimal when qrefresh'ing: we strip the current
3117 # head, refresh the tag cache, then immediately add a new head.
2978 # head, refresh the tag cache, then immediately add a new head.
3118 # But I think doing it this way is necessary for the "instant
2979 # But I think doing it this way is necessary for the "instant
3119 # tag cache retrieval" case to work.
2980 # tag cache retrieval" case to work.
3120 self.invalidate()
2981 self.invalidate()
3121
2982
3122 def status(
2983 def status(
3123 self,
2984 self,
3124 node1=b'.',
2985 node1=b'.',
3125 node2=None,
2986 node2=None,
3126 match=None,
2987 match=None,
3127 ignored=False,
2988 ignored=False,
3128 clean=False,
2989 clean=False,
3129 unknown=False,
2990 unknown=False,
3130 listsubrepos=False,
2991 listsubrepos=False,
3131 ):
2992 ):
3132 '''a convenience method that calls node1.status(node2)'''
2993 '''a convenience method that calls node1.status(node2)'''
3133 return self[node1].status(
2994 return self[node1].status(
3134 node2, match, ignored, clean, unknown, listsubrepos
2995 node2, match, ignored, clean, unknown, listsubrepos
3135 )
2996 )
3136
2997
3137 def addpostdsstatus(self, ps):
2998 def addpostdsstatus(self, ps):
3138 """Add a callback to run within the wlock, at the point at which status
2999 """Add a callback to run within the wlock, at the point at which status
3139 fixups happen.
3000 fixups happen.
3140
3001
3141 On status completion, callback(wctx, status) will be called with the
3002 On status completion, callback(wctx, status) will be called with the
3142 wlock held, unless the dirstate has changed from underneath or the wlock
3003 wlock held, unless the dirstate has changed from underneath or the wlock
3143 couldn't be grabbed.
3004 couldn't be grabbed.
3144
3005
3145 Callbacks should not capture and use a cached copy of the dirstate --
3006 Callbacks should not capture and use a cached copy of the dirstate --
3146 it might change in the meanwhile. Instead, they should access the
3007 it might change in the meanwhile. Instead, they should access the
3147 dirstate via wctx.repo().dirstate.
3008 dirstate via wctx.repo().dirstate.
3148
3009
3149 This list is emptied out after each status run -- extensions should
3010 This list is emptied out after each status run -- extensions should
3150 make sure it adds to this list each time dirstate.status is called.
3011 make sure it adds to this list each time dirstate.status is called.
3151 Extensions should also make sure they don't call this for statuses
3012 Extensions should also make sure they don't call this for statuses
3152 that don't involve the dirstate.
3013 that don't involve the dirstate.
3153 """
3014 """
3154
3015
3155 # The list is located here for uniqueness reasons -- it is actually
3016 # The list is located here for uniqueness reasons -- it is actually
3156 # managed by the workingctx, but that isn't unique per-repo.
3017 # managed by the workingctx, but that isn't unique per-repo.
3157 self._postdsstatus.append(ps)
3018 self._postdsstatus.append(ps)
3158
3019
3159 def postdsstatus(self):
3020 def postdsstatus(self):
3160 """Used by workingctx to get the list of post-dirstate-status hooks."""
3021 """Used by workingctx to get the list of post-dirstate-status hooks."""
3161 return self._postdsstatus
3022 return self._postdsstatus
3162
3023
3163 def clearpostdsstatus(self):
3024 def clearpostdsstatus(self):
3164 """Used by workingctx to clear post-dirstate-status hooks."""
3025 """Used by workingctx to clear post-dirstate-status hooks."""
3165 del self._postdsstatus[:]
3026 del self._postdsstatus[:]
3166
3027
3167 def heads(self, start=None):
3028 def heads(self, start=None):
3168 if start is None:
3029 if start is None:
3169 cl = self.changelog
3030 cl = self.changelog
3170 headrevs = reversed(cl.headrevs())
3031 headrevs = reversed(cl.headrevs())
3171 return [cl.node(rev) for rev in headrevs]
3032 return [cl.node(rev) for rev in headrevs]
3172
3033
3173 heads = self.changelog.heads(start)
3034 heads = self.changelog.heads(start)
3174 # sort the output in rev descending order
3035 # sort the output in rev descending order
3175 return sorted(heads, key=self.changelog.rev, reverse=True)
3036 return sorted(heads, key=self.changelog.rev, reverse=True)
3176
3037
3177 def branchheads(self, branch=None, start=None, closed=False):
3038 def branchheads(self, branch=None, start=None, closed=False):
3178 '''return a (possibly filtered) list of heads for the given branch
3039 '''return a (possibly filtered) list of heads for the given branch
3179
3040
3180 Heads are returned in topological order, from newest to oldest.
3041 Heads are returned in topological order, from newest to oldest.
3181 If branch is None, use the dirstate branch.
3042 If branch is None, use the dirstate branch.
3182 If start is not None, return only heads reachable from start.
3043 If start is not None, return only heads reachable from start.
3183 If closed is True, return heads that are marked as closed as well.
3044 If closed is True, return heads that are marked as closed as well.
3184 '''
3045 '''
3185 if branch is None:
3046 if branch is None:
3186 branch = self[None].branch()
3047 branch = self[None].branch()
3187 branches = self.branchmap()
3048 branches = self.branchmap()
3188 if not branches.hasbranch(branch):
3049 if not branches.hasbranch(branch):
3189 return []
3050 return []
3190 # the cache returns heads ordered lowest to highest
3051 # the cache returns heads ordered lowest to highest
3191 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3052 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3192 if start is not None:
3053 if start is not None:
3193 # filter out the heads that cannot be reached from startrev
3054 # filter out the heads that cannot be reached from startrev
3194 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3055 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3195 bheads = [h for h in bheads if h in fbheads]
3056 bheads = [h for h in bheads if h in fbheads]
3196 return bheads
3057 return bheads
3197
3058
3198 def branches(self, nodes):
3059 def branches(self, nodes):
3199 if not nodes:
3060 if not nodes:
3200 nodes = [self.changelog.tip()]
3061 nodes = [self.changelog.tip()]
3201 b = []
3062 b = []
3202 for n in nodes:
3063 for n in nodes:
3203 t = n
3064 t = n
3204 while True:
3065 while True:
3205 p = self.changelog.parents(n)
3066 p = self.changelog.parents(n)
3206 if p[1] != nullid or p[0] == nullid:
3067 if p[1] != nullid or p[0] == nullid:
3207 b.append((t, n, p[0], p[1]))
3068 b.append((t, n, p[0], p[1]))
3208 break
3069 break
3209 n = p[0]
3070 n = p[0]
3210 return b
3071 return b
3211
3072
3212 def between(self, pairs):
3073 def between(self, pairs):
3213 r = []
3074 r = []
3214
3075
3215 for top, bottom in pairs:
3076 for top, bottom in pairs:
3216 n, l, i = top, [], 0
3077 n, l, i = top, [], 0
3217 f = 1
3078 f = 1
3218
3079
3219 while n != bottom and n != nullid:
3080 while n != bottom and n != nullid:
3220 p = self.changelog.parents(n)[0]
3081 p = self.changelog.parents(n)[0]
3221 if i == f:
3082 if i == f:
3222 l.append(n)
3083 l.append(n)
3223 f = f * 2
3084 f = f * 2
3224 n = p
3085 n = p
3225 i += 1
3086 i += 1
3226
3087
3227 r.append(l)
3088 r.append(l)
3228
3089
3229 return r
3090 return r
3230
3091
3231 def checkpush(self, pushop):
3092 def checkpush(self, pushop):
3232 """Extensions can override this function if additional checks have
3093 """Extensions can override this function if additional checks have
3233 to be performed before pushing, or call it if they override push
3094 to be performed before pushing, or call it if they override push
3234 command.
3095 command.
3235 """
3096 """
3236
3097
3237 @unfilteredpropertycache
3098 @unfilteredpropertycache
3238 def prepushoutgoinghooks(self):
3099 def prepushoutgoinghooks(self):
3239 """Return util.hooks consists of a pushop with repo, remote, outgoing
3100 """Return util.hooks consists of a pushop with repo, remote, outgoing
3240 methods, which are called before pushing changesets.
3101 methods, which are called before pushing changesets.
3241 """
3102 """
3242 return util.hooks()
3103 return util.hooks()
3243
3104
3244 def pushkey(self, namespace, key, old, new):
3105 def pushkey(self, namespace, key, old, new):
3245 try:
3106 try:
3246 tr = self.currenttransaction()
3107 tr = self.currenttransaction()
3247 hookargs = {}
3108 hookargs = {}
3248 if tr is not None:
3109 if tr is not None:
3249 hookargs.update(tr.hookargs)
3110 hookargs.update(tr.hookargs)
3250 hookargs = pycompat.strkwargs(hookargs)
3111 hookargs = pycompat.strkwargs(hookargs)
3251 hookargs['namespace'] = namespace
3112 hookargs['namespace'] = namespace
3252 hookargs['key'] = key
3113 hookargs['key'] = key
3253 hookargs['old'] = old
3114 hookargs['old'] = old
3254 hookargs['new'] = new
3115 hookargs['new'] = new
3255 self.hook(b'prepushkey', throw=True, **hookargs)
3116 self.hook(b'prepushkey', throw=True, **hookargs)
3256 except error.HookAbort as exc:
3117 except error.HookAbort as exc:
3257 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3118 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3258 if exc.hint:
3119 if exc.hint:
3259 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3120 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3260 return False
3121 return False
3261 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3122 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3262 ret = pushkey.push(self, namespace, key, old, new)
3123 ret = pushkey.push(self, namespace, key, old, new)
3263
3124
3264 def runhook(unused_success):
3125 def runhook(unused_success):
3265 self.hook(
3126 self.hook(
3266 b'pushkey',
3127 b'pushkey',
3267 namespace=namespace,
3128 namespace=namespace,
3268 key=key,
3129 key=key,
3269 old=old,
3130 old=old,
3270 new=new,
3131 new=new,
3271 ret=ret,
3132 ret=ret,
3272 )
3133 )
3273
3134
3274 self._afterlock(runhook)
3135 self._afterlock(runhook)
3275 return ret
3136 return ret
3276
3137
3277 def listkeys(self, namespace):
3138 def listkeys(self, namespace):
3278 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3139 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3279 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3140 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3280 values = pushkey.list(self, namespace)
3141 values = pushkey.list(self, namespace)
3281 self.hook(b'listkeys', namespace=namespace, values=values)
3142 self.hook(b'listkeys', namespace=namespace, values=values)
3282 return values
3143 return values
3283
3144
3284 def debugwireargs(self, one, two, three=None, four=None, five=None):
3145 def debugwireargs(self, one, two, three=None, four=None, five=None):
3285 '''used to test argument passing over the wire'''
3146 '''used to test argument passing over the wire'''
3286 return b"%s %s %s %s %s" % (
3147 return b"%s %s %s %s %s" % (
3287 one,
3148 one,
3288 two,
3149 two,
3289 pycompat.bytestr(three),
3150 pycompat.bytestr(three),
3290 pycompat.bytestr(four),
3151 pycompat.bytestr(four),
3291 pycompat.bytestr(five),
3152 pycompat.bytestr(five),
3292 )
3153 )
3293
3154
3294 def savecommitmessage(self, text):
3155 def savecommitmessage(self, text):
3295 fp = self.vfs(b'last-message.txt', b'wb')
3156 fp = self.vfs(b'last-message.txt', b'wb')
3296 try:
3157 try:
3297 fp.write(text)
3158 fp.write(text)
3298 finally:
3159 finally:
3299 fp.close()
3160 fp.close()
3300 return self.pathto(fp.name[len(self.root) + 1 :])
3161 return self.pathto(fp.name[len(self.root) + 1 :])
3301
3162
3302
3163
3303 # used to avoid circular references so destructors work
3164 # used to avoid circular references so destructors work
3304 def aftertrans(files):
3165 def aftertrans(files):
3305 renamefiles = [tuple(t) for t in files]
3166 renamefiles = [tuple(t) for t in files]
3306
3167
3307 def a():
3168 def a():
3308 for vfs, src, dest in renamefiles:
3169 for vfs, src, dest in renamefiles:
3309 # if src and dest refer to a same file, vfs.rename is a no-op,
3170 # if src and dest refer to a same file, vfs.rename is a no-op,
3310 # leaving both src and dest on disk. delete dest to make sure
3171 # leaving both src and dest on disk. delete dest to make sure
3311 # the rename couldn't be such a no-op.
3172 # the rename couldn't be such a no-op.
3312 vfs.tryunlink(dest)
3173 vfs.tryunlink(dest)
3313 try:
3174 try:
3314 vfs.rename(src, dest)
3175 vfs.rename(src, dest)
3315 except OSError: # journal file does not yet exist
3176 except OSError: # journal file does not yet exist
3316 pass
3177 pass
3317
3178
3318 return a
3179 return a
3319
3180
3320
3181
3321 def undoname(fn):
3182 def undoname(fn):
3322 base, name = os.path.split(fn)
3183 base, name = os.path.split(fn)
3323 assert name.startswith(b'journal')
3184 assert name.startswith(b'journal')
3324 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3185 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3325
3186
3326
3187
3327 def instance(ui, path, create, intents=None, createopts=None):
3188 def instance(ui, path, create, intents=None, createopts=None):
3328 localpath = util.urllocalpath(path)
3189 localpath = util.urllocalpath(path)
3329 if create:
3190 if create:
3330 createrepository(ui, localpath, createopts=createopts)
3191 createrepository(ui, localpath, createopts=createopts)
3331
3192
3332 return makelocalrepository(ui, localpath, intents=intents)
3193 return makelocalrepository(ui, localpath, intents=intents)
3333
3194
3334
3195
3335 def islocal(path):
3196 def islocal(path):
3336 return True
3197 return True
3337
3198
3338
3199
3339 def defaultcreateopts(ui, createopts=None):
3200 def defaultcreateopts(ui, createopts=None):
3340 """Populate the default creation options for a repository.
3201 """Populate the default creation options for a repository.
3341
3202
3342 A dictionary of explicitly requested creation options can be passed
3203 A dictionary of explicitly requested creation options can be passed
3343 in. Missing keys will be populated.
3204 in. Missing keys will be populated.
3344 """
3205 """
3345 createopts = dict(createopts or {})
3206 createopts = dict(createopts or {})
3346
3207
3347 if b'backend' not in createopts:
3208 if b'backend' not in createopts:
3348 # experimental config: storage.new-repo-backend
3209 # experimental config: storage.new-repo-backend
3349 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3210 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3350
3211
3351 return createopts
3212 return createopts
3352
3213
3353
3214
3354 def newreporequirements(ui, createopts):
3215 def newreporequirements(ui, createopts):
3355 """Determine the set of requirements for a new local repository.
3216 """Determine the set of requirements for a new local repository.
3356
3217
3357 Extensions can wrap this function to specify custom requirements for
3218 Extensions can wrap this function to specify custom requirements for
3358 new repositories.
3219 new repositories.
3359 """
3220 """
3360 # If the repo is being created from a shared repository, we copy
3221 # If the repo is being created from a shared repository, we copy
3361 # its requirements.
3222 # its requirements.
3362 if b'sharedrepo' in createopts:
3223 if b'sharedrepo' in createopts:
3363 requirements = set(createopts[b'sharedrepo'].requirements)
3224 requirements = set(createopts[b'sharedrepo'].requirements)
3364 if createopts.get(b'sharedrelative'):
3225 if createopts.get(b'sharedrelative'):
3365 requirements.add(b'relshared')
3226 requirements.add(b'relshared')
3366 else:
3227 else:
3367 requirements.add(b'shared')
3228 requirements.add(b'shared')
3368
3229
3369 return requirements
3230 return requirements
3370
3231
3371 if b'backend' not in createopts:
3232 if b'backend' not in createopts:
3372 raise error.ProgrammingError(
3233 raise error.ProgrammingError(
3373 b'backend key not present in createopts; '
3234 b'backend key not present in createopts; '
3374 b'was defaultcreateopts() called?'
3235 b'was defaultcreateopts() called?'
3375 )
3236 )
3376
3237
3377 if createopts[b'backend'] != b'revlogv1':
3238 if createopts[b'backend'] != b'revlogv1':
3378 raise error.Abort(
3239 raise error.Abort(
3379 _(
3240 _(
3380 b'unable to determine repository requirements for '
3241 b'unable to determine repository requirements for '
3381 b'storage backend: %s'
3242 b'storage backend: %s'
3382 )
3243 )
3383 % createopts[b'backend']
3244 % createopts[b'backend']
3384 )
3245 )
3385
3246
3386 requirements = {b'revlogv1'}
3247 requirements = {b'revlogv1'}
3387 if ui.configbool(b'format', b'usestore'):
3248 if ui.configbool(b'format', b'usestore'):
3388 requirements.add(b'store')
3249 requirements.add(b'store')
3389 if ui.configbool(b'format', b'usefncache'):
3250 if ui.configbool(b'format', b'usefncache'):
3390 requirements.add(b'fncache')
3251 requirements.add(b'fncache')
3391 if ui.configbool(b'format', b'dotencode'):
3252 if ui.configbool(b'format', b'dotencode'):
3392 requirements.add(b'dotencode')
3253 requirements.add(b'dotencode')
3393
3254
3394 compengines = ui.configlist(b'format', b'revlog-compression')
3255 compengines = ui.configlist(b'format', b'revlog-compression')
3395 for compengine in compengines:
3256 for compengine in compengines:
3396 if compengine in util.compengines:
3257 if compengine in util.compengines:
3397 break
3258 break
3398 else:
3259 else:
3399 raise error.Abort(
3260 raise error.Abort(
3400 _(
3261 _(
3401 b'compression engines %s defined by '
3262 b'compression engines %s defined by '
3402 b'format.revlog-compression not available'
3263 b'format.revlog-compression not available'
3403 )
3264 )
3404 % b', '.join(b'"%s"' % e for e in compengines),
3265 % b', '.join(b'"%s"' % e for e in compengines),
3405 hint=_(
3266 hint=_(
3406 b'run "hg debuginstall" to list available '
3267 b'run "hg debuginstall" to list available '
3407 b'compression engines'
3268 b'compression engines'
3408 ),
3269 ),
3409 )
3270 )
3410
3271
3411 # zlib is the historical default and doesn't need an explicit requirement.
3272 # zlib is the historical default and doesn't need an explicit requirement.
3412 if compengine == b'zstd':
3273 if compengine == b'zstd':
3413 requirements.add(b'revlog-compression-zstd')
3274 requirements.add(b'revlog-compression-zstd')
3414 elif compengine != b'zlib':
3275 elif compengine != b'zlib':
3415 requirements.add(b'exp-compression-%s' % compengine)
3276 requirements.add(b'exp-compression-%s' % compengine)
3416
3277
3417 if scmutil.gdinitconfig(ui):
3278 if scmutil.gdinitconfig(ui):
3418 requirements.add(b'generaldelta')
3279 requirements.add(b'generaldelta')
3419 if ui.configbool(b'format', b'sparse-revlog'):
3280 if ui.configbool(b'format', b'sparse-revlog'):
3420 requirements.add(SPARSEREVLOG_REQUIREMENT)
3281 requirements.add(SPARSEREVLOG_REQUIREMENT)
3421
3282
3422 # experimental config: format.exp-use-side-data
3283 # experimental config: format.exp-use-side-data
3423 if ui.configbool(b'format', b'exp-use-side-data'):
3284 if ui.configbool(b'format', b'exp-use-side-data'):
3424 requirements.add(SIDEDATA_REQUIREMENT)
3285 requirements.add(SIDEDATA_REQUIREMENT)
3425 # experimental config: format.exp-use-copies-side-data-changeset
3286 # experimental config: format.exp-use-copies-side-data-changeset
3426 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3287 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3427 requirements.add(SIDEDATA_REQUIREMENT)
3288 requirements.add(SIDEDATA_REQUIREMENT)
3428 requirements.add(COPIESSDC_REQUIREMENT)
3289 requirements.add(COPIESSDC_REQUIREMENT)
3429 if ui.configbool(b'experimental', b'treemanifest'):
3290 if ui.configbool(b'experimental', b'treemanifest'):
3430 requirements.add(b'treemanifest')
3291 requirements.add(b'treemanifest')
3431
3292
3432 revlogv2 = ui.config(b'experimental', b'revlogv2')
3293 revlogv2 = ui.config(b'experimental', b'revlogv2')
3433 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3294 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3434 requirements.remove(b'revlogv1')
3295 requirements.remove(b'revlogv1')
3435 # generaldelta is implied by revlogv2.
3296 # generaldelta is implied by revlogv2.
3436 requirements.discard(b'generaldelta')
3297 requirements.discard(b'generaldelta')
3437 requirements.add(REVLOGV2_REQUIREMENT)
3298 requirements.add(REVLOGV2_REQUIREMENT)
3438 # experimental config: format.internal-phase
3299 # experimental config: format.internal-phase
3439 if ui.configbool(b'format', b'internal-phase'):
3300 if ui.configbool(b'format', b'internal-phase'):
3440 requirements.add(b'internal-phase')
3301 requirements.add(b'internal-phase')
3441
3302
3442 if createopts.get(b'narrowfiles'):
3303 if createopts.get(b'narrowfiles'):
3443 requirements.add(repository.NARROW_REQUIREMENT)
3304 requirements.add(repository.NARROW_REQUIREMENT)
3444
3305
3445 if createopts.get(b'lfs'):
3306 if createopts.get(b'lfs'):
3446 requirements.add(b'lfs')
3307 requirements.add(b'lfs')
3447
3308
3448 if ui.configbool(b'format', b'bookmarks-in-store'):
3309 if ui.configbool(b'format', b'bookmarks-in-store'):
3449 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3310 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3450
3311
3451 if ui.configbool(b'format', b'use-persistent-nodemap'):
3312 if ui.configbool(b'format', b'use-persistent-nodemap'):
3452 requirements.add(NODEMAP_REQUIREMENT)
3313 requirements.add(NODEMAP_REQUIREMENT)
3453
3314
3454 return requirements
3315 return requirements
3455
3316
3456
3317
3457 def filterknowncreateopts(ui, createopts):
3318 def filterknowncreateopts(ui, createopts):
3458 """Filters a dict of repo creation options against options that are known.
3319 """Filters a dict of repo creation options against options that are known.
3459
3320
3460 Receives a dict of repo creation options and returns a dict of those
3321 Receives a dict of repo creation options and returns a dict of those
3461 options that we don't know how to handle.
3322 options that we don't know how to handle.
3462
3323
3463 This function is called as part of repository creation. If the
3324 This function is called as part of repository creation. If the
3464 returned dict contains any items, repository creation will not
3325 returned dict contains any items, repository creation will not
3465 be allowed, as it means there was a request to create a repository
3326 be allowed, as it means there was a request to create a repository
3466 with options not recognized by loaded code.
3327 with options not recognized by loaded code.
3467
3328
3468 Extensions can wrap this function to filter out creation options
3329 Extensions can wrap this function to filter out creation options
3469 they know how to handle.
3330 they know how to handle.
3470 """
3331 """
3471 known = {
3332 known = {
3472 b'backend',
3333 b'backend',
3473 b'lfs',
3334 b'lfs',
3474 b'narrowfiles',
3335 b'narrowfiles',
3475 b'sharedrepo',
3336 b'sharedrepo',
3476 b'sharedrelative',
3337 b'sharedrelative',
3477 b'shareditems',
3338 b'shareditems',
3478 b'shallowfilestore',
3339 b'shallowfilestore',
3479 }
3340 }
3480
3341
3481 return {k: v for k, v in createopts.items() if k not in known}
3342 return {k: v for k, v in createopts.items() if k not in known}
3482
3343
3483
3344
3484 def createrepository(ui, path, createopts=None):
3345 def createrepository(ui, path, createopts=None):
3485 """Create a new repository in a vfs.
3346 """Create a new repository in a vfs.
3486
3347
3487 ``path`` path to the new repo's working directory.
3348 ``path`` path to the new repo's working directory.
3488 ``createopts`` options for the new repository.
3349 ``createopts`` options for the new repository.
3489
3350
3490 The following keys for ``createopts`` are recognized:
3351 The following keys for ``createopts`` are recognized:
3491
3352
3492 backend
3353 backend
3493 The storage backend to use.
3354 The storage backend to use.
3494 lfs
3355 lfs
3495 Repository will be created with ``lfs`` requirement. The lfs extension
3356 Repository will be created with ``lfs`` requirement. The lfs extension
3496 will automatically be loaded when the repository is accessed.
3357 will automatically be loaded when the repository is accessed.
3497 narrowfiles
3358 narrowfiles
3498 Set up repository to support narrow file storage.
3359 Set up repository to support narrow file storage.
3499 sharedrepo
3360 sharedrepo
3500 Repository object from which storage should be shared.
3361 Repository object from which storage should be shared.
3501 sharedrelative
3362 sharedrelative
3502 Boolean indicating if the path to the shared repo should be
3363 Boolean indicating if the path to the shared repo should be
3503 stored as relative. By default, the pointer to the "parent" repo
3364 stored as relative. By default, the pointer to the "parent" repo
3504 is stored as an absolute path.
3365 is stored as an absolute path.
3505 shareditems
3366 shareditems
3506 Set of items to share to the new repository (in addition to storage).
3367 Set of items to share to the new repository (in addition to storage).
3507 shallowfilestore
3368 shallowfilestore
3508 Indicates that storage for files should be shallow (not all ancestor
3369 Indicates that storage for files should be shallow (not all ancestor
3509 revisions are known).
3370 revisions are known).
3510 """
3371 """
3511 createopts = defaultcreateopts(ui, createopts=createopts)
3372 createopts = defaultcreateopts(ui, createopts=createopts)
3512
3373
3513 unknownopts = filterknowncreateopts(ui, createopts)
3374 unknownopts = filterknowncreateopts(ui, createopts)
3514
3375
3515 if not isinstance(unknownopts, dict):
3376 if not isinstance(unknownopts, dict):
3516 raise error.ProgrammingError(
3377 raise error.ProgrammingError(
3517 b'filterknowncreateopts() did not return a dict'
3378 b'filterknowncreateopts() did not return a dict'
3518 )
3379 )
3519
3380
3520 if unknownopts:
3381 if unknownopts:
3521 raise error.Abort(
3382 raise error.Abort(
3522 _(
3383 _(
3523 b'unable to create repository because of unknown '
3384 b'unable to create repository because of unknown '
3524 b'creation option: %s'
3385 b'creation option: %s'
3525 )
3386 )
3526 % b', '.join(sorted(unknownopts)),
3387 % b', '.join(sorted(unknownopts)),
3527 hint=_(b'is a required extension not loaded?'),
3388 hint=_(b'is a required extension not loaded?'),
3528 )
3389 )
3529
3390
3530 requirements = newreporequirements(ui, createopts=createopts)
3391 requirements = newreporequirements(ui, createopts=createopts)
3531
3392
3532 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3393 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3533
3394
3534 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3395 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3535 if hgvfs.exists():
3396 if hgvfs.exists():
3536 raise error.RepoError(_(b'repository %s already exists') % path)
3397 raise error.RepoError(_(b'repository %s already exists') % path)
3537
3398
3538 if b'sharedrepo' in createopts:
3399 if b'sharedrepo' in createopts:
3539 sharedpath = createopts[b'sharedrepo'].sharedpath
3400 sharedpath = createopts[b'sharedrepo'].sharedpath
3540
3401
3541 if createopts.get(b'sharedrelative'):
3402 if createopts.get(b'sharedrelative'):
3542 try:
3403 try:
3543 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3404 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3544 except (IOError, ValueError) as e:
3405 except (IOError, ValueError) as e:
3545 # ValueError is raised on Windows if the drive letters differ
3406 # ValueError is raised on Windows if the drive letters differ
3546 # on each path.
3407 # on each path.
3547 raise error.Abort(
3408 raise error.Abort(
3548 _(b'cannot calculate relative path'),
3409 _(b'cannot calculate relative path'),
3549 hint=stringutil.forcebytestr(e),
3410 hint=stringutil.forcebytestr(e),
3550 )
3411 )
3551
3412
3552 if not wdirvfs.exists():
3413 if not wdirvfs.exists():
3553 wdirvfs.makedirs()
3414 wdirvfs.makedirs()
3554
3415
3555 hgvfs.makedir(notindexed=True)
3416 hgvfs.makedir(notindexed=True)
3556 if b'sharedrepo' not in createopts:
3417 if b'sharedrepo' not in createopts:
3557 hgvfs.mkdir(b'cache')
3418 hgvfs.mkdir(b'cache')
3558 hgvfs.mkdir(b'wcache')
3419 hgvfs.mkdir(b'wcache')
3559
3420
3560 if b'store' in requirements and b'sharedrepo' not in createopts:
3421 if b'store' in requirements and b'sharedrepo' not in createopts:
3561 hgvfs.mkdir(b'store')
3422 hgvfs.mkdir(b'store')
3562
3423
3563 # We create an invalid changelog outside the store so very old
3424 # We create an invalid changelog outside the store so very old
3564 # Mercurial versions (which didn't know about the requirements
3425 # Mercurial versions (which didn't know about the requirements
3565 # file) encounter an error on reading the changelog. This
3426 # file) encounter an error on reading the changelog. This
3566 # effectively locks out old clients and prevents them from
3427 # effectively locks out old clients and prevents them from
3567 # mucking with a repo in an unknown format.
3428 # mucking with a repo in an unknown format.
3568 #
3429 #
3569 # The revlog header has version 2, which won't be recognized by
3430 # The revlog header has version 2, which won't be recognized by
3570 # such old clients.
3431 # such old clients.
3571 hgvfs.append(
3432 hgvfs.append(
3572 b'00changelog.i',
3433 b'00changelog.i',
3573 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3434 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3574 b'layout',
3435 b'layout',
3575 )
3436 )
3576
3437
3577 scmutil.writerequires(hgvfs, requirements)
3438 scmutil.writerequires(hgvfs, requirements)
3578
3439
3579 # Write out file telling readers where to find the shared store.
3440 # Write out file telling readers where to find the shared store.
3580 if b'sharedrepo' in createopts:
3441 if b'sharedrepo' in createopts:
3581 hgvfs.write(b'sharedpath', sharedpath)
3442 hgvfs.write(b'sharedpath', sharedpath)
3582
3443
3583 if createopts.get(b'shareditems'):
3444 if createopts.get(b'shareditems'):
3584 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3445 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3585 hgvfs.write(b'shared', shared)
3446 hgvfs.write(b'shared', shared)
3586
3447
3587
3448
3588 def poisonrepository(repo):
3449 def poisonrepository(repo):
3589 """Poison a repository instance so it can no longer be used."""
3450 """Poison a repository instance so it can no longer be used."""
3590 # Perform any cleanup on the instance.
3451 # Perform any cleanup on the instance.
3591 repo.close()
3452 repo.close()
3592
3453
3593 # Our strategy is to replace the type of the object with one that
3454 # Our strategy is to replace the type of the object with one that
3594 # has all attribute lookups result in error.
3455 # has all attribute lookups result in error.
3595 #
3456 #
3596 # But we have to allow the close() method because some constructors
3457 # But we have to allow the close() method because some constructors
3597 # of repos call close() on repo references.
3458 # of repos call close() on repo references.
3598 class poisonedrepository(object):
3459 class poisonedrepository(object):
3599 def __getattribute__(self, item):
3460 def __getattribute__(self, item):
3600 if item == 'close':
3461 if item == 'close':
3601 return object.__getattribute__(self, item)
3462 return object.__getattribute__(self, item)
3602
3463
3603 raise error.ProgrammingError(
3464 raise error.ProgrammingError(
3604 b'repo instances should not be used after unshare'
3465 b'repo instances should not be used after unshare'
3605 )
3466 )
3606
3467
3607 def close(self):
3468 def close(self):
3608 pass
3469 pass
3609
3470
3610 # We may have a repoview, which intercepts __setattr__. So be sure
3471 # We may have a repoview, which intercepts __setattr__. So be sure
3611 # we operate at the lowest level possible.
3472 # we operate at the lowest level possible.
3612 object.__setattr__(repo, '__class__', poisonedrepository)
3473 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,1203 +1,1202 b''
1 $ cat >> "$HGRCPATH" << EOF
1 $ cat >> "$HGRCPATH" << EOF
2 > [ui]
2 > [ui]
3 > merge = :merge3
3 > merge = :merge3
4 > EOF
4 > EOF
5
5
6 init
6 init
7
7
8 $ hg init repo
8 $ hg init repo
9 $ cd repo
9 $ cd repo
10
10
11 commit
11 commit
12
12
13 $ echo 'a' > a
13 $ echo 'a' > a
14 $ hg ci -A -m test -u nobody -d '1 0'
14 $ hg ci -A -m test -u nobody -d '1 0'
15 adding a
15 adding a
16
16
17 annotate -c
17 annotate -c
18
18
19 $ hg annotate -c a
19 $ hg annotate -c a
20 8435f90966e4: a
20 8435f90966e4: a
21
21
22 annotate -cl
22 annotate -cl
23
23
24 $ hg annotate -cl a
24 $ hg annotate -cl a
25 8435f90966e4:1: a
25 8435f90966e4:1: a
26
26
27 annotate -d
27 annotate -d
28
28
29 $ hg annotate -d a
29 $ hg annotate -d a
30 Thu Jan 01 00:00:01 1970 +0000: a
30 Thu Jan 01 00:00:01 1970 +0000: a
31
31
32 annotate -n
32 annotate -n
33
33
34 $ hg annotate -n a
34 $ hg annotate -n a
35 0: a
35 0: a
36
36
37 annotate -nl
37 annotate -nl
38
38
39 $ hg annotate -nl a
39 $ hg annotate -nl a
40 0:1: a
40 0:1: a
41
41
42 annotate -u
42 annotate -u
43
43
44 $ hg annotate -u a
44 $ hg annotate -u a
45 nobody: a
45 nobody: a
46
46
47 annotate -cdnu
47 annotate -cdnu
48
48
49 $ hg annotate -cdnu a
49 $ hg annotate -cdnu a
50 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
50 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
51
51
52 annotate -cdnul
52 annotate -cdnul
53
53
54 $ hg annotate -cdnul a
54 $ hg annotate -cdnul a
55 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
55 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
56
56
57 annotate (JSON)
57 annotate (JSON)
58
58
59 $ hg annotate -Tjson a
59 $ hg annotate -Tjson a
60 [
60 [
61 {
61 {
62 "lines": [{"line": "a\n", "rev": 0}],
62 "lines": [{"line": "a\n", "rev": 0}],
63 "path": "a"
63 "path": "a"
64 }
64 }
65 ]
65 ]
66
66
67 $ hg annotate -Tjson -cdfnul a
67 $ hg annotate -Tjson -cdfnul a
68 [
68 [
69 {
69 {
70 "lines": [{"date": [1.0, 0], "line": "a\n", "lineno": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "path": "a", "rev": 0, "user": "nobody"}],
70 "lines": [{"date": [1.0, 0], "line": "a\n", "lineno": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "path": "a", "rev": 0, "user": "nobody"}],
71 "path": "a"
71 "path": "a"
72 }
72 }
73 ]
73 ]
74
74
75 log-like templating
75 log-like templating
76
76
77 $ hg annotate -T'{lines % "{rev} {node|shortest}: {line}"}' a
77 $ hg annotate -T'{lines % "{rev} {node|shortest}: {line}"}' a
78 0 8435: a
78 0 8435: a
79
79
80 '{lineno}' field should be populated as necessary
80 '{lineno}' field should be populated as necessary
81
81
82 $ hg annotate -T'{lines % "{rev}:{lineno}: {line}"}' a
82 $ hg annotate -T'{lines % "{rev}:{lineno}: {line}"}' a
83 0:1: a
83 0:1: a
84 $ hg annotate -Ta a \
84 $ hg annotate -Ta a \
85 > --config templates.a='"{lines % "{rev}:{lineno}: {line}"}"'
85 > --config templates.a='"{lines % "{rev}:{lineno}: {line}"}"'
86 0:1: a
86 0:1: a
87
87
88 $ cat <<EOF >>a
88 $ cat <<EOF >>a
89 > a
89 > a
90 > a
90 > a
91 > EOF
91 > EOF
92 $ hg ci -ma1 -d '1 0'
92 $ hg ci -ma1 -d '1 0'
93 $ hg cp a b
93 $ hg cp a b
94 $ hg ci -mb -d '1 0'
94 $ hg ci -mb -d '1 0'
95 $ cat <<EOF >> b
95 $ cat <<EOF >> b
96 > b4
96 > b4
97 > b5
97 > b5
98 > b6
98 > b6
99 > EOF
99 > EOF
100 $ hg ci -mb2 -d '2 0'
100 $ hg ci -mb2 -d '2 0'
101
101
102 default output of '{lines}' should be readable
102 default output of '{lines}' should be readable
103
103
104 $ hg annotate -T'{lines}' a
104 $ hg annotate -T'{lines}' a
105 0: a
105 0: a
106 1: a
106 1: a
107 1: a
107 1: a
108 $ hg annotate -T'{join(lines, "\n")}' a
108 $ hg annotate -T'{join(lines, "\n")}' a
109 0: a
109 0: a
110
110
111 1: a
111 1: a
112
112
113 1: a
113 1: a
114
114
115 several filters can be applied to '{lines}'
115 several filters can be applied to '{lines}'
116
116
117 $ hg annotate -T'{lines|json}\n' a
117 $ hg annotate -T'{lines|json}\n' a
118 [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}]
118 [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}]
119 $ hg annotate -T'{lines|stringify}' a
119 $ hg annotate -T'{lines|stringify}' a
120 0: a
120 0: a
121 1: a
121 1: a
122 1: a
122 1: a
123 $ hg annotate -T'{lines|count}\n' a
123 $ hg annotate -T'{lines|count}\n' a
124 3
124 3
125
125
126 annotate multiple files (JSON)
126 annotate multiple files (JSON)
127
127
128 $ hg annotate -Tjson a b
128 $ hg annotate -Tjson a b
129 [
129 [
130 {
130 {
131 "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}],
131 "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}],
132 "path": "a"
132 "path": "a"
133 },
133 },
134 {
134 {
135 "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}, {"line": "b4\n", "rev": 3}, {"line": "b5\n", "rev": 3}, {"line": "b6\n", "rev": 3}],
135 "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}, {"line": "b4\n", "rev": 3}, {"line": "b5\n", "rev": 3}, {"line": "b6\n", "rev": 3}],
136 "path": "b"
136 "path": "b"
137 }
137 }
138 ]
138 ]
139
139
140 annotate multiple files (template)
140 annotate multiple files (template)
141
141
142 $ hg annotate -T'== {path} ==\n{lines % "{rev}: {line}"}' a b
142 $ hg annotate -T'== {path} ==\n{lines % "{rev}: {line}"}' a b
143 == a ==
143 == a ==
144 0: a
144 0: a
145 1: a
145 1: a
146 1: a
146 1: a
147 == b ==
147 == b ==
148 0: a
148 0: a
149 1: a
149 1: a
150 1: a
150 1: a
151 3: b4
151 3: b4
152 3: b5
152 3: b5
153 3: b6
153 3: b6
154
154
155 annotate -n b
155 annotate -n b
156
156
157 $ hg annotate -n b
157 $ hg annotate -n b
158 0: a
158 0: a
159 1: a
159 1: a
160 1: a
160 1: a
161 3: b4
161 3: b4
162 3: b5
162 3: b5
163 3: b6
163 3: b6
164
164
165 annotate --no-follow b
165 annotate --no-follow b
166
166
167 $ hg annotate --no-follow b
167 $ hg annotate --no-follow b
168 2: a
168 2: a
169 2: a
169 2: a
170 2: a
170 2: a
171 3: b4
171 3: b4
172 3: b5
172 3: b5
173 3: b6
173 3: b6
174
174
175 annotate -nl b
175 annotate -nl b
176
176
177 $ hg annotate -nl b
177 $ hg annotate -nl b
178 0:1: a
178 0:1: a
179 1:2: a
179 1:2: a
180 1:3: a
180 1:3: a
181 3:4: b4
181 3:4: b4
182 3:5: b5
182 3:5: b5
183 3:6: b6
183 3:6: b6
184
184
185 annotate -nf b
185 annotate -nf b
186
186
187 $ hg annotate -nf b
187 $ hg annotate -nf b
188 0 a: a
188 0 a: a
189 1 a: a
189 1 a: a
190 1 a: a
190 1 a: a
191 3 b: b4
191 3 b: b4
192 3 b: b5
192 3 b: b5
193 3 b: b6
193 3 b: b6
194
194
195 annotate -nlf b
195 annotate -nlf b
196
196
197 $ hg annotate -nlf b
197 $ hg annotate -nlf b
198 0 a:1: a
198 0 a:1: a
199 1 a:2: a
199 1 a:2: a
200 1 a:3: a
200 1 a:3: a
201 3 b:4: b4
201 3 b:4: b4
202 3 b:5: b5
202 3 b:5: b5
203 3 b:6: b6
203 3 b:6: b6
204
204
205 $ hg up -C 2
205 $ hg up -C 2
206 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
206 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
207 $ cat <<EOF >> b
207 $ cat <<EOF >> b
208 > b4
208 > b4
209 > c
209 > c
210 > b5
210 > b5
211 > EOF
211 > EOF
212 $ hg ci -mb2.1 -d '2 0'
212 $ hg ci -mb2.1 -d '2 0'
213 created new head
213 created new head
214 $ hg merge
214 $ hg merge
215 merging b
215 merging b
216 warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
216 warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
217 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
217 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
218 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
218 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
219 [1]
219 [1]
220 $ cat b
220 $ cat b
221 a
221 a
222 a
222 a
223 a
223 a
224 <<<<<<< working copy: 5fbdc1152d97 - test: b2.1
224 <<<<<<< working copy: 5fbdc1152d97 - test: b2.1
225 b4
225 b4
226 c
226 c
227 b5
227 b5
228 ||||||| base
228 ||||||| base
229 =======
229 =======
230 b4
230 b4
231 b5
231 b5
232 b6
232 b6
233 >>>>>>> merge rev: 37ec9f5c3d1f - test: b2
233 >>>>>>> merge rev: 37ec9f5c3d1f - test: b2
234 $ cat <<EOF > b
234 $ cat <<EOF > b
235 > a
235 > a
236 > a
236 > a
237 > a
237 > a
238 > b4
238 > b4
239 > c
239 > c
240 > b5
240 > b5
241 > EOF
241 > EOF
242 $ hg resolve --mark -q
242 $ hg resolve --mark -q
243 $ rm b.orig
243 $ rm b.orig
244 $ hg ci -mmergeb -d '3 0'
244 $ hg ci -mmergeb -d '3 0'
245
245
246 annotate after merge
246 annotate after merge
247
247
248 $ hg annotate -nf b
248 $ hg annotate -nf b
249 0 a: a
249 0 a: a
250 1 a: a
250 1 a: a
251 1 a: a
251 1 a: a
252 3 b: b4
252 3 b: b4
253 4 b: c
253 4 b: c
254 3 b: b5
254 3 b: b5
255
255
256 annotate after merge with -l
256 annotate after merge with -l
257
257
258 $ hg annotate -nlf b
258 $ hg annotate -nlf b
259 0 a:1: a
259 0 a:1: a
260 1 a:2: a
260 1 a:2: a
261 1 a:3: a
261 1 a:3: a
262 3 b:4: b4
262 3 b:4: b4
263 4 b:5: c
263 4 b:5: c
264 3 b:5: b5
264 3 b:5: b5
265
265
266 $ hg up -C 1
266 $ hg up -C 1
267 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
267 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
268 $ hg cp a b
268 $ hg cp a b
269 $ cat <<EOF > b
269 $ cat <<EOF > b
270 > a
270 > a
271 > z
271 > z
272 > a
272 > a
273 > EOF
273 > EOF
274 $ hg ci -mc -d '3 0'
274 $ hg ci -mc -d '3 0'
275 created new head
275 created new head
276 Work around the pure version not resolving the conflict like native code
276 Work around the pure version not resolving the conflict like native code
277 #if pure
277 #if pure
278 $ hg merge
278 $ hg merge
279 merging b
279 merging b
280 warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
280 warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
281 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
281 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
282 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
282 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
283 [1]
283 [1]
284 $ cat <<EOF > b
284 $ cat <<EOF > b
285 > a
285 > a
286 > z
286 > z
287 > a
287 > a
288 > b4
288 > b4
289 > c
289 > c
290 > b5
290 > b5
291 > EOF
291 > EOF
292 $ hg resolve -m b
292 $ hg resolve -m b
293 (no more unresolved files)
293 (no more unresolved files)
294 $ rm b.orig
294 $ rm b.orig
295 #else
295 #else
296 $ hg merge
296 $ hg merge
297 merging b
297 merging b
298 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
298 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
299 (branch merge, don't forget to commit)
299 (branch merge, don't forget to commit)
300 #endif
300 #endif
301 $ echo d >> b
301 $ echo d >> b
302 $ hg ci -mmerge2 -d '4 0'
302 $ hg ci -mmerge2 -d '4 0'
303
303
304 annotate after rename merge
304 annotate after rename merge
305
305
306 $ hg annotate -nf b
306 $ hg annotate -nf b
307 0 a: a
307 0 a: a
308 6 b: z
308 6 b: z
309 1 a: a
309 1 a: a
310 3 b: b4
310 3 b: b4
311 4 b: c
311 4 b: c
312 3 b: b5
312 3 b: b5
313 7 b: d
313 7 b: d
314
314
315 annotate after rename merge with -l
315 annotate after rename merge with -l
316
316
317 $ hg annotate -nlf b
317 $ hg annotate -nlf b
318 0 a:1: a
318 0 a:1: a
319 6 b:2: z
319 6 b:2: z
320 1 a:3: a
320 1 a:3: a
321 3 b:4: b4
321 3 b:4: b4
322 4 b:5: c
322 4 b:5: c
323 3 b:5: b5
323 3 b:5: b5
324 7 b:7: d
324 7 b:7: d
325
325
326 --skip nothing (should be the same as no --skip at all)
326 --skip nothing (should be the same as no --skip at all)
327
327
328 $ hg annotate -nlf b --skip '1::0'
328 $ hg annotate -nlf b --skip '1::0'
329 0 a:1: a
329 0 a:1: a
330 6 b:2: z
330 6 b:2: z
331 1 a:3: a
331 1 a:3: a
332 3 b:4: b4
332 3 b:4: b4
333 4 b:5: c
333 4 b:5: c
334 3 b:5: b5
334 3 b:5: b5
335 7 b:7: d
335 7 b:7: d
336
336
337 --skip a modified line. Note a slight behavior difference in pure - this is
337 --skip a modified line. Note a slight behavior difference in pure - this is
338 because the pure code comes up with slightly different deltas internally.
338 because the pure code comes up with slightly different deltas internally.
339
339
340 $ hg annotate -nlf b --skip 6
340 $ hg annotate -nlf b --skip 6
341 0 a:1: a
341 0 a:1: a
342 1 a:2* z (no-pure !)
342 1 a:2* z (no-pure !)
343 0 a:1* z (pure !)
343 0 a:1* z (pure !)
344 1 a:3: a
344 1 a:3: a
345 3 b:4: b4
345 3 b:4: b4
346 4 b:5: c
346 4 b:5: c
347 3 b:5: b5
347 3 b:5: b5
348 7 b:7: d
348 7 b:7: d
349
349
350 --skip added lines (and test multiple skip)
350 --skip added lines (and test multiple skip)
351
351
352 $ hg annotate -nlf b --skip 3
352 $ hg annotate -nlf b --skip 3
353 0 a:1: a
353 0 a:1: a
354 6 b:2: z
354 6 b:2: z
355 1 a:3: a
355 1 a:3: a
356 1 a:3* b4
356 1 a:3* b4
357 4 b:5: c
357 4 b:5: c
358 1 a:3* b5
358 1 a:3* b5
359 7 b:7: d
359 7 b:7: d
360
360
361 $ hg annotate -nlf b --skip 4
361 $ hg annotate -nlf b --skip 4
362 0 a:1: a
362 0 a:1: a
363 6 b:2: z
363 6 b:2: z
364 1 a:3: a
364 1 a:3: a
365 3 b:4: b4
365 3 b:4: b4
366 1 a:3* c
366 1 a:3* c
367 3 b:5: b5
367 3 b:5: b5
368 7 b:7: d
368 7 b:7: d
369
369
370 $ hg annotate -nlf b --skip 3 --skip 4
370 $ hg annotate -nlf b --skip 3 --skip 4
371 0 a:1: a
371 0 a:1: a
372 6 b:2: z
372 6 b:2: z
373 1 a:3: a
373 1 a:3: a
374 1 a:3* b4
374 1 a:3* b4
375 1 a:3* c
375 1 a:3* c
376 1 a:3* b5
376 1 a:3* b5
377 7 b:7: d
377 7 b:7: d
378
378
379 $ hg annotate -nlf b --skip 'merge()'
379 $ hg annotate -nlf b --skip 'merge()'
380 0 a:1: a
380 0 a:1: a
381 6 b:2: z
381 6 b:2: z
382 1 a:3: a
382 1 a:3: a
383 3 b:4: b4
383 3 b:4: b4
384 4 b:5: c
384 4 b:5: c
385 3 b:5: b5
385 3 b:5: b5
386 3 b:5* d
386 3 b:5* d
387
387
388 --skip everything -- use the revision the file was introduced in
388 --skip everything -- use the revision the file was introduced in
389
389
390 $ hg annotate -nlf b --skip 'all()'
390 $ hg annotate -nlf b --skip 'all()'
391 0 a:1: a
391 0 a:1: a
392 0 a:1* z
392 0 a:1* z
393 0 a:1* a
393 0 a:1* a
394 0 a:1* b4
394 0 a:1* b4
395 0 a:1* c
395 0 a:1* c
396 0 a:1* b5
396 0 a:1* b5
397 0 a:1* d
397 0 a:1* d
398
398
399 Issue2807: alignment of line numbers with -l
399 Issue2807: alignment of line numbers with -l
400
400
401 $ echo more >> b
401 $ echo more >> b
402 $ hg ci -mmore -d '5 0'
402 $ hg ci -mmore -d '5 0'
403 $ echo more >> b
403 $ echo more >> b
404 $ hg ci -mmore -d '6 0'
404 $ hg ci -mmore -d '6 0'
405 $ echo more >> b
405 $ echo more >> b
406 $ hg ci -mmore -d '7 0'
406 $ hg ci -mmore -d '7 0'
407 $ hg annotate -nlf b
407 $ hg annotate -nlf b
408 0 a: 1: a
408 0 a: 1: a
409 6 b: 2: z
409 6 b: 2: z
410 1 a: 3: a
410 1 a: 3: a
411 3 b: 4: b4
411 3 b: 4: b4
412 4 b: 5: c
412 4 b: 5: c
413 3 b: 5: b5
413 3 b: 5: b5
414 7 b: 7: d
414 7 b: 7: d
415 8 b: 8: more
415 8 b: 8: more
416 9 b: 9: more
416 9 b: 9: more
417 10 b:10: more
417 10 b:10: more
418
418
419 linkrev vs rev
419 linkrev vs rev
420
420
421 $ hg annotate -r tip -n a
421 $ hg annotate -r tip -n a
422 0: a
422 0: a
423 1: a
423 1: a
424 1: a
424 1: a
425
425
426 linkrev vs rev with -l
426 linkrev vs rev with -l
427
427
428 $ hg annotate -r tip -nl a
428 $ hg annotate -r tip -nl a
429 0:1: a
429 0:1: a
430 1:2: a
430 1:2: a
431 1:3: a
431 1:3: a
432
432
433 Issue589: "undelete" sequence leads to crash
433 Issue589: "undelete" sequence leads to crash
434
434
435 annotate was crashing when trying to --follow something
435 annotate was crashing when trying to --follow something
436
436
437 like A -> B -> A
437 like A -> B -> A
438
438
439 generate ABA rename configuration
439 generate ABA rename configuration
440
440
441 $ echo foo > foo
441 $ echo foo > foo
442 $ hg add foo
442 $ hg add foo
443 $ hg ci -m addfoo
443 $ hg ci -m addfoo
444 $ hg rename foo bar
444 $ hg rename foo bar
445 $ hg ci -m renamefoo
445 $ hg ci -m renamefoo
446 $ hg rename bar foo
446 $ hg rename bar foo
447 $ hg ci -m renamebar
447 $ hg ci -m renamebar
448
448
449 annotate after ABA with follow
449 annotate after ABA with follow
450
450
451 $ hg annotate --follow foo
451 $ hg annotate --follow foo
452 foo: foo
452 foo: foo
453
453
454 missing file
454 missing file
455
455
456 $ hg ann nosuchfile
456 $ hg ann nosuchfile
457 abort: nosuchfile: no such file in rev e9e6b4fa872f
457 abort: nosuchfile: no such file in rev e9e6b4fa872f
458 [255]
458 [255]
459
459
460 annotate file without '\n' on last line
460 annotate file without '\n' on last line
461
461
462 $ printf "" > c
462 $ printf "" > c
463 $ hg ci -A -m test -u nobody -d '1 0'
463 $ hg ci -A -m test -u nobody -d '1 0'
464 adding c
464 adding c
465 $ hg annotate c
465 $ hg annotate c
466 $ printf "a\nb" > c
466 $ printf "a\nb" > c
467 $ hg ci -m test
467 $ hg ci -m test
468 $ hg annotate c
468 $ hg annotate c
469 [0-9]+: a (re)
469 [0-9]+: a (re)
470 [0-9]+: b (re)
470 [0-9]+: b (re)
471
471
472 Issue3841: check annotation of the file of which filelog includes
472 Issue3841: check annotation of the file of which filelog includes
473 merging between the revision and its ancestor
473 merging between the revision and its ancestor
474
474
475 to reproduce the situation with recent Mercurial, this script uses (1)
475 to reproduce the situation with recent Mercurial, this script uses (1)
476 "hg debugsetparents" to merge without ancestor check by "hg merge",
476 "hg debugsetparents" to merge without ancestor check by "hg merge",
477 and (2) the extension to allow filelog merging between the revision
477 and (2) the extension to allow filelog merging between the revision
478 and its ancestor by overriding "repo._filecommit".
478 and its ancestor by overriding "repo._filecommit".
479
479
480 $ cat > ../legacyrepo.py <<EOF
480 $ cat > ../legacyrepo.py <<EOF
481 > from __future__ import absolute_import
481 > from __future__ import absolute_import
482 > from mercurial import error, node
482 > from mercurial import commit, error, extensions, node
483 > def reposetup(ui, repo):
483 > def _filecommit(orig, repo, fctx, manifest1, manifest2,
484 > class legacyrepo(repo.__class__):
484 > linkrev, tr, includecopymeta):
485 > def _filecommit(self, fctx, manifest1, manifest2,
485 > fname = fctx.path()
486 > linkrev, tr, includecopymeta):
486 > text = fctx.data()
487 > fname = fctx.path()
487 > flog = repo.file(fname)
488 > text = fctx.data()
488 > fparent1 = manifest1.get(fname, node.nullid)
489 > flog = self.file(fname)
489 > fparent2 = manifest2.get(fname, node.nullid)
490 > fparent1 = manifest1.get(fname, node.nullid)
490 > meta = {}
491 > fparent2 = manifest2.get(fname, node.nullid)
491 > copy = fctx.copysource()
492 > meta = {}
492 > if copy and copy != fname:
493 > copy = fctx.copysource()
493 > raise error.Abort('copying is not supported')
494 > if copy and copy != fname:
494 > if fparent2 != node.nullid:
495 > raise error.Abort('copying is not supported')
495 > return flog.add(text, meta, tr, linkrev,
496 > if fparent2 != node.nullid:
496 > fparent1, fparent2), 'modified'
497 > return flog.add(text, meta, tr, linkrev,
497 > raise error.Abort('only merging is supported')
498 > fparent1, fparent2), 'modified'
498 > def uisetup(ui):
499 > raise error.Abort('only merging is supported')
499 > extensions.wrapfunction(commit, '_filecommit', _filecommit)
500 > repo.__class__ = legacyrepo
501 > EOF
500 > EOF
502
501
503 $ cat > baz <<EOF
502 $ cat > baz <<EOF
504 > 1
503 > 1
505 > 2
504 > 2
506 > 3
505 > 3
507 > 4
506 > 4
508 > 5
507 > 5
509 > EOF
508 > EOF
510 $ hg add baz
509 $ hg add baz
511 $ hg commit -m "baz:0"
510 $ hg commit -m "baz:0"
512
511
513 $ cat > baz <<EOF
512 $ cat > baz <<EOF
514 > 1 baz:1
513 > 1 baz:1
515 > 2
514 > 2
516 > 3
515 > 3
517 > 4
516 > 4
518 > 5
517 > 5
519 > EOF
518 > EOF
520 $ hg commit -m "baz:1"
519 $ hg commit -m "baz:1"
521
520
522 $ cat > baz <<EOF
521 $ cat > baz <<EOF
523 > 1 baz:1
522 > 1 baz:1
524 > 2 baz:2
523 > 2 baz:2
525 > 3
524 > 3
526 > 4
525 > 4
527 > 5
526 > 5
528 > EOF
527 > EOF
529 $ hg debugsetparents 17 17
528 $ hg debugsetparents 17 17
530 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
529 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
531 $ hg debugindexdot baz
530 $ hg debugindexdot baz
532 digraph G {
531 digraph G {
533 -1 -> 0
532 -1 -> 0
534 0 -> 1
533 0 -> 1
535 1 -> 2
534 1 -> 2
536 1 -> 2
535 1 -> 2
537 }
536 }
538 $ hg annotate baz
537 $ hg annotate baz
539 17: 1 baz:1
538 17: 1 baz:1
540 18: 2 baz:2
539 18: 2 baz:2
541 16: 3
540 16: 3
542 16: 4
541 16: 4
543 16: 5
542 16: 5
544
543
545 $ cat > baz <<EOF
544 $ cat > baz <<EOF
546 > 1 baz:1
545 > 1 baz:1
547 > 2 baz:2
546 > 2 baz:2
548 > 3 baz:3
547 > 3 baz:3
549 > 4
548 > 4
550 > 5
549 > 5
551 > EOF
550 > EOF
552 $ hg commit -m "baz:3"
551 $ hg commit -m "baz:3"
553
552
554 $ cat > baz <<EOF
553 $ cat > baz <<EOF
555 > 1 baz:1
554 > 1 baz:1
556 > 2 baz:2
555 > 2 baz:2
557 > 3 baz:3
556 > 3 baz:3
558 > 4 baz:4
557 > 4 baz:4
559 > 5
558 > 5
560 > EOF
559 > EOF
561 $ hg debugsetparents 19 18
560 $ hg debugsetparents 19 18
562 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
561 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
563 $ hg debugindexdot baz
562 $ hg debugindexdot baz
564 digraph G {
563 digraph G {
565 -1 -> 0
564 -1 -> 0
566 0 -> 1
565 0 -> 1
567 1 -> 2
566 1 -> 2
568 1 -> 2
567 1 -> 2
569 2 -> 3
568 2 -> 3
570 3 -> 4
569 3 -> 4
571 2 -> 4
570 2 -> 4
572 }
571 }
573 $ hg annotate baz
572 $ hg annotate baz
574 17: 1 baz:1
573 17: 1 baz:1
575 18: 2 baz:2
574 18: 2 baz:2
576 19: 3 baz:3
575 19: 3 baz:3
577 20: 4 baz:4
576 20: 4 baz:4
578 16: 5
577 16: 5
579
578
580 annotate clean file
579 annotate clean file
581
580
582 $ hg annotate -ncr "wdir()" foo
581 $ hg annotate -ncr "wdir()" foo
583 11 472b18db256d : foo
582 11 472b18db256d : foo
584
583
585 annotate modified file
584 annotate modified file
586
585
587 $ echo foofoo >> foo
586 $ echo foofoo >> foo
588 $ hg annotate -r "wdir()" foo
587 $ hg annotate -r "wdir()" foo
589 11 : foo
588 11 : foo
590 20+: foofoo
589 20+: foofoo
591
590
592 $ hg annotate -cr "wdir()" foo
591 $ hg annotate -cr "wdir()" foo
593 472b18db256d : foo
592 472b18db256d : foo
594 b6bedd5477e7+: foofoo
593 b6bedd5477e7+: foofoo
595
594
596 $ hg annotate -ncr "wdir()" foo
595 $ hg annotate -ncr "wdir()" foo
597 11 472b18db256d : foo
596 11 472b18db256d : foo
598 20 b6bedd5477e7+: foofoo
597 20 b6bedd5477e7+: foofoo
599
598
600 $ hg annotate --debug -ncr "wdir()" foo
599 $ hg annotate --debug -ncr "wdir()" foo
601 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
600 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
602 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
601 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
603
602
604 $ hg annotate -udr "wdir()" foo
603 $ hg annotate -udr "wdir()" foo
605 test Thu Jan 01 00:00:00 1970 +0000: foo
604 test Thu Jan 01 00:00:00 1970 +0000: foo
606 test [A-Za-z0-9:+ ]+: foofoo (re)
605 test [A-Za-z0-9:+ ]+: foofoo (re)
607
606
608 $ hg annotate -ncr "wdir()" -Tjson foo
607 $ hg annotate -ncr "wdir()" -Tjson foo
609 [
608 [
610 {
609 {
611 "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": "ffffffffffffffffffffffffffffffffffffffff", "rev": 2147483647}],
610 "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": "ffffffffffffffffffffffffffffffffffffffff", "rev": 2147483647}],
612 "path": "foo"
611 "path": "foo"
613 }
612 }
614 ]
613 ]
615
614
616 annotate added file
615 annotate added file
617
616
618 $ echo bar > bar
617 $ echo bar > bar
619 $ hg add bar
618 $ hg add bar
620 $ hg annotate -ncr "wdir()" bar
619 $ hg annotate -ncr "wdir()" bar
621 20 b6bedd5477e7+: bar
620 20 b6bedd5477e7+: bar
622
621
623 annotate renamed file
622 annotate renamed file
624
623
625 $ hg rename foo renamefoo2
624 $ hg rename foo renamefoo2
626 $ hg annotate -ncr "wdir()" renamefoo2
625 $ hg annotate -ncr "wdir()" renamefoo2
627 11 472b18db256d : foo
626 11 472b18db256d : foo
628 20 b6bedd5477e7+: foofoo
627 20 b6bedd5477e7+: foofoo
629
628
630 annotate missing file
629 annotate missing file
631
630
632 $ rm baz
631 $ rm baz
633
632
634 $ hg annotate -ncr "wdir()" baz
633 $ hg annotate -ncr "wdir()" baz
635 abort: $TESTTMP\repo\baz: $ENOENT$ (windows !)
634 abort: $TESTTMP\repo\baz: $ENOENT$ (windows !)
636 abort: $ENOENT$: '$TESTTMP/repo/baz' (no-windows !)
635 abort: $ENOENT$: '$TESTTMP/repo/baz' (no-windows !)
637 [255]
636 [255]
638
637
639 annotate removed file
638 annotate removed file
640
639
641 $ hg rm baz
640 $ hg rm baz
642
641
643 $ hg annotate -ncr "wdir()" baz
642 $ hg annotate -ncr "wdir()" baz
644 abort: $TESTTMP\repo\baz: $ENOENT$ (windows !)
643 abort: $TESTTMP\repo\baz: $ENOENT$ (windows !)
645 abort: $ENOENT$: '$TESTTMP/repo/baz' (no-windows !)
644 abort: $ENOENT$: '$TESTTMP/repo/baz' (no-windows !)
646 [255]
645 [255]
647
646
648 $ hg revert --all --no-backup --quiet
647 $ hg revert --all --no-backup --quiet
649 $ hg id -n
648 $ hg id -n
650 20
649 20
651
650
652 Test followlines() revset; we usually check both followlines(pat, range) and
651 Test followlines() revset; we usually check both followlines(pat, range) and
653 followlines(pat, range, descend=True) to make sure both give the same result
652 followlines(pat, range, descend=True) to make sure both give the same result
654 when they should.
653 when they should.
655
654
656 $ echo a >> foo
655 $ echo a >> foo
657 $ hg ci -m 'foo: add a'
656 $ hg ci -m 'foo: add a'
658 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5)'
657 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5)'
659 16: baz:0
658 16: baz:0
660 19: baz:3
659 19: baz:3
661 20: baz:4
660 20: baz:4
662 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=20)'
661 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=20)'
663 16: baz:0
662 16: baz:0
664 19: baz:3
663 19: baz:3
665 20: baz:4
664 20: baz:4
666 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19)'
665 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19)'
667 16: baz:0
666 16: baz:0
668 19: baz:3
667 19: baz:3
669 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=True)'
668 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=True)'
670 19: baz:3
669 19: baz:3
671 20: baz:4
670 20: baz:4
672 $ printf "0\n0\n" | cat - baz > baz1
671 $ printf "0\n0\n" | cat - baz > baz1
673 $ mv baz1 baz
672 $ mv baz1 baz
674 $ hg ci -m 'added two lines with 0'
673 $ hg ci -m 'added two lines with 0'
675 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
674 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
676 16: baz:0
675 16: baz:0
677 19: baz:3
676 19: baz:3
678 20: baz:4
677 20: baz:4
679 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, descend=true, startrev=19)'
678 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, descend=true, startrev=19)'
680 19: baz:3
679 19: baz:3
681 20: baz:4
680 20: baz:4
682 $ echo 6 >> baz
681 $ echo 6 >> baz
683 $ hg ci -m 'added line 8'
682 $ hg ci -m 'added line 8'
684 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
683 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
685 16: baz:0
684 16: baz:0
686 19: baz:3
685 19: baz:3
687 20: baz:4
686 20: baz:4
688 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=1)'
687 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=1)'
689 19: baz:3
688 19: baz:3
690 20: baz:4
689 20: baz:4
691 $ sed 's/3/3+/' baz > baz.new
690 $ sed 's/3/3+/' baz > baz.new
692 $ mv baz.new baz
691 $ mv baz.new baz
693 $ hg ci -m 'baz:3->3+'
692 $ hg ci -m 'baz:3->3+'
694 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, descend=0)'
693 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, descend=0)'
695 16: baz:0
694 16: baz:0
696 19: baz:3
695 19: baz:3
697 20: baz:4
696 20: baz:4
698 24: baz:3->3+
697 24: baz:3->3+
699 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=17, descend=True)'
698 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=17, descend=True)'
700 19: baz:3
699 19: baz:3
701 20: baz:4
700 20: baz:4
702 24: baz:3->3+
701 24: baz:3->3+
703 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 1:2, descend=false)'
702 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 1:2, descend=false)'
704 22: added two lines with 0
703 22: added two lines with 0
705
704
706 file patterns are okay
705 file patterns are okay
707 $ hg log -T '{rev}: {desc}\n' -r 'followlines("path:baz", 1:2)'
706 $ hg log -T '{rev}: {desc}\n' -r 'followlines("path:baz", 1:2)'
708 22: added two lines with 0
707 22: added two lines with 0
709
708
710 renames are followed
709 renames are followed
711 $ hg mv baz qux
710 $ hg mv baz qux
712 $ sed 's/4/4+/' qux > qux.new
711 $ sed 's/4/4+/' qux > qux.new
713 $ mv qux.new qux
712 $ mv qux.new qux
714 $ hg ci -m 'qux:4->4+'
713 $ hg ci -m 'qux:4->4+'
715 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
714 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
716 16: baz:0
715 16: baz:0
717 19: baz:3
716 19: baz:3
718 20: baz:4
717 20: baz:4
719 24: baz:3->3+
718 24: baz:3->3+
720 25: qux:4->4+
719 25: qux:4->4+
721
720
722 but are missed when following children
721 but are missed when following children
723 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=22, descend=True)'
722 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=22, descend=True)'
724 24: baz:3->3+
723 24: baz:3->3+
725
724
726 merge
725 merge
727 $ hg up 24 --quiet
726 $ hg up 24 --quiet
728 $ echo 7 >> baz
727 $ echo 7 >> baz
729 $ hg ci -m 'one more line, out of line range'
728 $ hg ci -m 'one more line, out of line range'
730 created new head
729 created new head
731 $ sed 's/3+/3-/' baz > baz.new
730 $ sed 's/3+/3-/' baz > baz.new
732 $ mv baz.new baz
731 $ mv baz.new baz
733 $ hg ci -m 'baz:3+->3-'
732 $ hg ci -m 'baz:3+->3-'
734 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
733 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
735 16: baz:0
734 16: baz:0
736 19: baz:3
735 19: baz:3
737 20: baz:4
736 20: baz:4
738 24: baz:3->3+
737 24: baz:3->3+
739 27: baz:3+->3-
738 27: baz:3+->3-
740 $ hg merge 25
739 $ hg merge 25
741 merging baz and qux to qux
740 merging baz and qux to qux
742 warning: conflicts while merging qux! (edit, then use 'hg resolve --mark')
741 warning: conflicts while merging qux! (edit, then use 'hg resolve --mark')
743 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
742 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
744 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
743 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
745 [1]
744 [1]
746 $ cat qux
745 $ cat qux
747 0
746 0
748 0
747 0
749 1 baz:1
748 1 baz:1
750 2 baz:2
749 2 baz:2
751 <<<<<<< working copy: 863de62655ef - test: baz:3+->3-
750 <<<<<<< working copy: 863de62655ef - test: baz:3+->3-
752 3- baz:3
751 3- baz:3
753 4 baz:4
752 4 baz:4
754 ||||||| base
753 ||||||| base
755 3+ baz:3
754 3+ baz:3
756 4 baz:4
755 4 baz:4
757 =======
756 =======
758 3+ baz:3
757 3+ baz:3
759 4+ baz:4
758 4+ baz:4
760 >>>>>>> merge rev: cb8df70ae185 - test: qux:4->4+
759 >>>>>>> merge rev: cb8df70ae185 - test: qux:4->4+
761 5
760 5
762 6
761 6
763 7
762 7
764 $ cat > qux <<EOF
763 $ cat > qux <<EOF
765 > 0
764 > 0
766 > 0
765 > 0
767 > 1 baz:1
766 > 1 baz:1
768 > 2 baz:2
767 > 2 baz:2
769 > 3- baz:3
768 > 3- baz:3
770 > 4 baz:4
769 > 4 baz:4
771 > 5
770 > 5
772 > 6
771 > 6
773 > 7
772 > 7
774 > EOF
773 > EOF
775 $ hg resolve --mark -q
774 $ hg resolve --mark -q
776 $ rm qux.orig
775 $ rm qux.orig
777 $ hg ci -m merge
776 $ hg ci -m merge
778 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
777 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
779 16: baz:0
778 16: baz:0
780 19: baz:3
779 19: baz:3
781 20: baz:4
780 20: baz:4
782 24: baz:3->3+
781 24: baz:3->3+
783 25: qux:4->4+
782 25: qux:4->4+
784 27: baz:3+->3-
783 27: baz:3+->3-
785 28: merge
784 28: merge
786 $ hg up 25 --quiet
785 $ hg up 25 --quiet
787 $ hg merge 27
786 $ hg merge 27
788 merging qux and baz to qux
787 merging qux and baz to qux
789 warning: conflicts while merging qux! (edit, then use 'hg resolve --mark')
788 warning: conflicts while merging qux! (edit, then use 'hg resolve --mark')
790 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
789 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
791 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
790 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
792 [1]
791 [1]
793 $ cat qux
792 $ cat qux
794 0
793 0
795 0
794 0
796 1 baz:1
795 1 baz:1
797 2 baz:2
796 2 baz:2
798 <<<<<<< working copy: cb8df70ae185 - test: qux:4->4+
797 <<<<<<< working copy: cb8df70ae185 - test: qux:4->4+
799 3+ baz:3
798 3+ baz:3
800 4+ baz:4
799 4+ baz:4
801 ||||||| base
800 ||||||| base
802 3+ baz:3
801 3+ baz:3
803 4 baz:4
802 4 baz:4
804 =======
803 =======
805 3- baz:3
804 3- baz:3
806 4 baz:4
805 4 baz:4
807 >>>>>>> merge rev: 863de62655ef - test: baz:3+->3-
806 >>>>>>> merge rev: 863de62655ef - test: baz:3+->3-
808 5
807 5
809 6
808 6
810 7
809 7
811 $ cat > qux <<EOF
810 $ cat > qux <<EOF
812 > 0
811 > 0
813 > 0
812 > 0
814 > 1 baz:1
813 > 1 baz:1
815 > 2 baz:2
814 > 2 baz:2
816 > 3+ baz:3
815 > 3+ baz:3
817 > 4+ baz:4
816 > 4+ baz:4
818 > 5
817 > 5
819 > 6
818 > 6
820 > EOF
819 > EOF
821 $ hg resolve --mark -q
820 $ hg resolve --mark -q
822 $ rm qux.orig
821 $ rm qux.orig
823 $ hg ci -m 'merge from other side'
822 $ hg ci -m 'merge from other side'
824 created new head
823 created new head
825 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
824 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
826 16: baz:0
825 16: baz:0
827 19: baz:3
826 19: baz:3
828 20: baz:4
827 20: baz:4
829 24: baz:3->3+
828 24: baz:3->3+
830 25: qux:4->4+
829 25: qux:4->4+
831 27: baz:3+->3-
830 27: baz:3+->3-
832 29: merge from other side
831 29: merge from other side
833 $ hg up 24 --quiet
832 $ hg up 24 --quiet
834
833
835 we are missing the branch with rename when following children
834 we are missing the branch with rename when following children
836 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=26, descend=True)'
835 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=26, descend=True)'
837 27: baz:3+->3-
836 27: baz:3+->3-
838
837
839 we follow all branches in descending direction
838 we follow all branches in descending direction
840 $ hg up 23 --quiet
839 $ hg up 23 --quiet
841 $ sed 's/3/+3/' baz > baz.new
840 $ sed 's/3/+3/' baz > baz.new
842 $ mv baz.new baz
841 $ mv baz.new baz
843 $ hg ci -m 'baz:3->+3'
842 $ hg ci -m 'baz:3->+3'
844 created new head
843 created new head
845 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 2:5, startrev=16, descend=True)' --graph
844 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 2:5, startrev=16, descend=True)' --graph
846 @ 30: baz:3->+3
845 @ 30: baz:3->+3
847 :
846 :
848 : o 27: baz:3+->3-
847 : o 27: baz:3+->3-
849 : :
848 : :
850 : o 24: baz:3->3+
849 : o 24: baz:3->3+
851 :/
850 :/
852 o 20: baz:4
851 o 20: baz:4
853 |\
852 |\
854 | o 19: baz:3
853 | o 19: baz:3
855 |/
854 |/
856 o 18: baz:2
855 o 18: baz:2
857 :
856 :
858 o 16: baz:0
857 o 16: baz:0
859 |
858 |
860 ~
859 ~
861
860
862 Issue5595: on a merge changeset with different line ranges depending on
861 Issue5595: on a merge changeset with different line ranges depending on
863 parent, be conservative and use the surrounding interval to avoid loosing
862 parent, be conservative and use the surrounding interval to avoid loosing
864 track of possible further descendants in specified range.
863 track of possible further descendants in specified range.
865
864
866 $ hg up 23 --quiet
865 $ hg up 23 --quiet
867 $ hg cat baz -r 24
866 $ hg cat baz -r 24
868 0
867 0
869 0
868 0
870 1 baz:1
869 1 baz:1
871 2 baz:2
870 2 baz:2
872 3+ baz:3
871 3+ baz:3
873 4 baz:4
872 4 baz:4
874 5
873 5
875 6
874 6
876 $ cat > baz << EOF
875 $ cat > baz << EOF
877 > 0
876 > 0
878 > 0
877 > 0
879 > a
878 > a
880 > b
879 > b
881 > 3+ baz:3
880 > 3+ baz:3
882 > 4 baz:4
881 > 4 baz:4
883 > y
882 > y
884 > z
883 > z
885 > EOF
884 > EOF
886 $ hg ci -m 'baz: mostly rewrite with some content from 24'
885 $ hg ci -m 'baz: mostly rewrite with some content from 24'
887 created new head
886 created new head
888 $ hg merge --tool :merge-other 24
887 $ hg merge --tool :merge-other 24
889 merging baz
888 merging baz
890 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
889 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
891 (branch merge, don't forget to commit)
890 (branch merge, don't forget to commit)
892 $ hg ci -m 'merge forgetting about baz rewrite'
891 $ hg ci -m 'merge forgetting about baz rewrite'
893 $ cat > baz << EOF
892 $ cat > baz << EOF
894 > 0
893 > 0
895 > 0
894 > 0
896 > 1 baz:1
895 > 1 baz:1
897 > 2+ baz:2
896 > 2+ baz:2
898 > 3+ baz:3
897 > 3+ baz:3
899 > 4 baz:4
898 > 4 baz:4
900 > 5
899 > 5
901 > 6
900 > 6
902 > EOF
901 > EOF
903 $ hg ci -m 'baz: narrow change (2->2+)'
902 $ hg ci -m 'baz: narrow change (2->2+)'
904 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:4, startrev=20, descend=True)' --graph
903 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:4, startrev=20, descend=True)' --graph
905 @ 33: baz: narrow change (2->2+)
904 @ 33: baz: narrow change (2->2+)
906 |
905 |
907 o 32: merge forgetting about baz rewrite
906 o 32: merge forgetting about baz rewrite
908 |\
907 |\
909 | o 31: baz: mostly rewrite with some content from 24
908 | o 31: baz: mostly rewrite with some content from 24
910 | :
909 | :
911 | : o 30: baz:3->+3
910 | : o 30: baz:3->+3
912 | :/
911 | :/
913 +---o 27: baz:3+->3-
912 +---o 27: baz:3+->3-
914 | :
913 | :
915 o : 24: baz:3->3+
914 o : 24: baz:3->3+
916 :/
915 :/
917 o 20: baz:4
916 o 20: baz:4
918 |\
917 |\
919 ~ ~
918 ~ ~
920
919
921 An integer as a line range, which is parsed as '1:1'
920 An integer as a line range, which is parsed as '1:1'
922
921
923 $ hg log -r 'followlines(baz, 1)'
922 $ hg log -r 'followlines(baz, 1)'
924 changeset: 22:2174d0bf352a
923 changeset: 22:2174d0bf352a
925 user: test
924 user: test
926 date: Thu Jan 01 00:00:00 1970 +0000
925 date: Thu Jan 01 00:00:00 1970 +0000
927 summary: added two lines with 0
926 summary: added two lines with 0
928
927
929
928
930 check error cases
929 check error cases
931 $ hg up 24 --quiet
930 $ hg up 24 --quiet
932 $ hg log -r 'followlines()'
931 $ hg log -r 'followlines()'
933 hg: parse error: followlines takes at least 1 positional arguments
932 hg: parse error: followlines takes at least 1 positional arguments
934 [255]
933 [255]
935 $ hg log -r 'followlines(baz)'
934 $ hg log -r 'followlines(baz)'
936 hg: parse error: followlines requires a line range
935 hg: parse error: followlines requires a line range
937 [255]
936 [255]
938 $ hg log -r 'followlines(baz, x)'
937 $ hg log -r 'followlines(baz, x)'
939 hg: parse error: followlines expects a line number or a range
938 hg: parse error: followlines expects a line number or a range
940 [255]
939 [255]
941 $ hg log -r 'followlines(baz, 1:2, startrev=desc("b"))'
940 $ hg log -r 'followlines(baz, 1:2, startrev=desc("b"))'
942 hg: parse error: followlines expects exactly one revision
941 hg: parse error: followlines expects exactly one revision
943 [255]
942 [255]
944 $ hg log -r 'followlines("glob:*", 1:2)'
943 $ hg log -r 'followlines("glob:*", 1:2)'
945 hg: parse error: followlines expects exactly one file
944 hg: parse error: followlines expects exactly one file
946 [255]
945 [255]
947 $ hg log -r 'followlines(baz, 1:)'
946 $ hg log -r 'followlines(baz, 1:)'
948 hg: parse error: line range bounds must be integers
947 hg: parse error: line range bounds must be integers
949 [255]
948 [255]
950 $ hg log -r 'followlines(baz, :1)'
949 $ hg log -r 'followlines(baz, :1)'
951 hg: parse error: line range bounds must be integers
950 hg: parse error: line range bounds must be integers
952 [255]
951 [255]
953 $ hg log -r 'followlines(baz, x:4)'
952 $ hg log -r 'followlines(baz, x:4)'
954 hg: parse error: line range bounds must be integers
953 hg: parse error: line range bounds must be integers
955 [255]
954 [255]
956 $ hg log -r 'followlines(baz, 5:4)'
955 $ hg log -r 'followlines(baz, 5:4)'
957 hg: parse error: line range must be positive
956 hg: parse error: line range must be positive
958 [255]
957 [255]
959 $ hg log -r 'followlines(baz, 0:4)'
958 $ hg log -r 'followlines(baz, 0:4)'
960 hg: parse error: fromline must be strictly positive
959 hg: parse error: fromline must be strictly positive
961 [255]
960 [255]
962 $ hg log -r 'followlines(baz, 2:40)'
961 $ hg log -r 'followlines(baz, 2:40)'
963 abort: line range exceeds file size
962 abort: line range exceeds file size
964 [255]
963 [255]
965 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=[1])'
964 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=[1])'
966 hg: parse error at 43: not a prefix: [
965 hg: parse error at 43: not a prefix: [
967 (followlines(baz, 2:4, startrev=20, descend=[1])
966 (followlines(baz, 2:4, startrev=20, descend=[1])
968 ^ here)
967 ^ here)
969 [255]
968 [255]
970 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=a)'
969 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=a)'
971 hg: parse error: descend argument must be a boolean
970 hg: parse error: descend argument must be a boolean
972 [255]
971 [255]
973
972
974 Test empty annotate output
973 Test empty annotate output
975
974
976 $ printf '\0' > binary
975 $ printf '\0' > binary
977 $ touch empty
976 $ touch empty
978 $ hg ci -qAm 'add binary and empty files'
977 $ hg ci -qAm 'add binary and empty files'
979
978
980 $ hg annotate binary empty
979 $ hg annotate binary empty
981 binary: binary file
980 binary: binary file
982
981
983 $ hg annotate -Tjson binary empty
982 $ hg annotate -Tjson binary empty
984 [
983 [
985 {
984 {
986 "path": "binary"
985 "path": "binary"
987 },
986 },
988 {
987 {
989 "lines": [],
988 "lines": [],
990 "path": "empty"
989 "path": "empty"
991 }
990 }
992 ]
991 ]
993
992
994 Test annotate with whitespace options
993 Test annotate with whitespace options
995
994
996 $ cd ..
995 $ cd ..
997 $ hg init repo-ws
996 $ hg init repo-ws
998 $ cd repo-ws
997 $ cd repo-ws
999 $ cat > a <<EOF
998 $ cat > a <<EOF
1000 > aa
999 > aa
1001 >
1000 >
1002 > b b
1001 > b b
1003 > EOF
1002 > EOF
1004 $ hg ci -Am "adda"
1003 $ hg ci -Am "adda"
1005 adding a
1004 adding a
1006 $ sed 's/EOL$//g' > a <<EOF
1005 $ sed 's/EOL$//g' > a <<EOF
1007 > a a
1006 > a a
1008 >
1007 >
1009 > EOL
1008 > EOL
1010 > b b
1009 > b b
1011 > EOF
1010 > EOF
1012 $ hg ci -m "changea"
1011 $ hg ci -m "changea"
1013
1012
1014 Annotate with no option
1013 Annotate with no option
1015
1014
1016 $ hg annotate a
1015 $ hg annotate a
1017 1: a a
1016 1: a a
1018 0:
1017 0:
1019 1:
1018 1:
1020 1: b b
1019 1: b b
1021
1020
1022 Annotate with --ignore-space-change
1021 Annotate with --ignore-space-change
1023
1022
1024 $ hg annotate --ignore-space-change a
1023 $ hg annotate --ignore-space-change a
1025 1: a a
1024 1: a a
1026 1:
1025 1:
1027 0:
1026 0:
1028 0: b b
1027 0: b b
1029
1028
1030 Annotate with --ignore-all-space
1029 Annotate with --ignore-all-space
1031
1030
1032 $ hg annotate --ignore-all-space a
1031 $ hg annotate --ignore-all-space a
1033 0: a a
1032 0: a a
1034 0:
1033 0:
1035 1:
1034 1:
1036 0: b b
1035 0: b b
1037
1036
1038 Annotate with --ignore-blank-lines (similar to no options case)
1037 Annotate with --ignore-blank-lines (similar to no options case)
1039
1038
1040 $ hg annotate --ignore-blank-lines a
1039 $ hg annotate --ignore-blank-lines a
1041 1: a a
1040 1: a a
1042 0:
1041 0:
1043 1:
1042 1:
1044 1: b b
1043 1: b b
1045
1044
1046 $ cd ..
1045 $ cd ..
1047
1046
1048 Annotate with orphaned CR (issue5798)
1047 Annotate with orphaned CR (issue5798)
1049 -------------------------------------
1048 -------------------------------------
1050
1049
1051 $ hg init repo-cr
1050 $ hg init repo-cr
1052 $ cd repo-cr
1051 $ cd repo-cr
1053
1052
1054 $ cat <<'EOF' >> "$TESTTMP/substcr.py"
1053 $ cat <<'EOF' >> "$TESTTMP/substcr.py"
1055 > import sys
1054 > import sys
1056 > from mercurial.utils import procutil
1055 > from mercurial.utils import procutil
1057 > procutil.setbinary(sys.stdin)
1056 > procutil.setbinary(sys.stdin)
1058 > procutil.setbinary(sys.stdout)
1057 > procutil.setbinary(sys.stdout)
1059 > stdin = getattr(sys.stdin, 'buffer', sys.stdin)
1058 > stdin = getattr(sys.stdin, 'buffer', sys.stdin)
1060 > stdout = getattr(sys.stdout, 'buffer', sys.stdout)
1059 > stdout = getattr(sys.stdout, 'buffer', sys.stdout)
1061 > stdout.write(stdin.read().replace(b'\r', b'[CR]'))
1060 > stdout.write(stdin.read().replace(b'\r', b'[CR]'))
1062 > EOF
1061 > EOF
1063
1062
1064 >>> with open('a', 'wb') as f:
1063 >>> with open('a', 'wb') as f:
1065 ... f.write(b'0a\r0b\r\n0c\r0d\r\n0e\n0f\n0g') and None
1064 ... f.write(b'0a\r0b\r\n0c\r0d\r\n0e\n0f\n0g') and None
1066 $ hg ci -qAm0
1065 $ hg ci -qAm0
1067 >>> with open('a', 'wb') as f:
1066 >>> with open('a', 'wb') as f:
1068 ... f.write(b'0a\r0b\r\n1c\r1d\r\n0e\n1f\n0g') and None
1067 ... f.write(b'0a\r0b\r\n1c\r1d\r\n0e\n1f\n0g') and None
1069 $ hg ci -m1
1068 $ hg ci -m1
1070
1069
1071 $ hg annotate -r0 a | "$PYTHON" "$TESTTMP/substcr.py"
1070 $ hg annotate -r0 a | "$PYTHON" "$TESTTMP/substcr.py"
1072 0: 0a[CR]0b[CR]
1071 0: 0a[CR]0b[CR]
1073 0: 0c[CR]0d[CR]
1072 0: 0c[CR]0d[CR]
1074 0: 0e
1073 0: 0e
1075 0: 0f
1074 0: 0f
1076 0: 0g
1075 0: 0g
1077 $ hg annotate -r1 a | "$PYTHON" "$TESTTMP/substcr.py"
1076 $ hg annotate -r1 a | "$PYTHON" "$TESTTMP/substcr.py"
1078 0: 0a[CR]0b[CR]
1077 0: 0a[CR]0b[CR]
1079 1: 1c[CR]1d[CR]
1078 1: 1c[CR]1d[CR]
1080 0: 0e
1079 0: 0e
1081 1: 1f
1080 1: 1f
1082 0: 0g
1081 0: 0g
1083
1082
1084 $ cd ..
1083 $ cd ..
1085
1084
1086 Annotate with linkrev pointing to another branch
1085 Annotate with linkrev pointing to another branch
1087 ------------------------------------------------
1086 ------------------------------------------------
1088
1087
1089 create history with a filerev whose linkrev points to another branch
1088 create history with a filerev whose linkrev points to another branch
1090
1089
1091 $ hg init branchedlinkrev
1090 $ hg init branchedlinkrev
1092 $ cd branchedlinkrev
1091 $ cd branchedlinkrev
1093 $ echo A > a
1092 $ echo A > a
1094 $ hg commit -Am 'contentA'
1093 $ hg commit -Am 'contentA'
1095 adding a
1094 adding a
1096 $ echo B >> a
1095 $ echo B >> a
1097 $ hg commit -m 'contentB'
1096 $ hg commit -m 'contentB'
1098 $ hg up --rev 'desc(contentA)'
1097 $ hg up --rev 'desc(contentA)'
1099 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1098 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1100 $ echo unrelated > unrelated
1099 $ echo unrelated > unrelated
1101 $ hg commit -Am 'unrelated'
1100 $ hg commit -Am 'unrelated'
1102 adding unrelated
1101 adding unrelated
1103 created new head
1102 created new head
1104 $ hg graft -r 'desc(contentB)'
1103 $ hg graft -r 'desc(contentB)'
1105 grafting 1:fd27c222e3e6 "contentB"
1104 grafting 1:fd27c222e3e6 "contentB"
1106 $ echo C >> a
1105 $ echo C >> a
1107 $ hg commit -m 'contentC'
1106 $ hg commit -m 'contentC'
1108 $ echo W >> a
1107 $ echo W >> a
1109 $ hg log -G
1108 $ hg log -G
1110 @ changeset: 4:072f1e8df249
1109 @ changeset: 4:072f1e8df249
1111 | tag: tip
1110 | tag: tip
1112 | user: test
1111 | user: test
1113 | date: Thu Jan 01 00:00:00 1970 +0000
1112 | date: Thu Jan 01 00:00:00 1970 +0000
1114 | summary: contentC
1113 | summary: contentC
1115 |
1114 |
1116 o changeset: 3:ff38df03cc4b
1115 o changeset: 3:ff38df03cc4b
1117 | user: test
1116 | user: test
1118 | date: Thu Jan 01 00:00:00 1970 +0000
1117 | date: Thu Jan 01 00:00:00 1970 +0000
1119 | summary: contentB
1118 | summary: contentB
1120 |
1119 |
1121 o changeset: 2:62aaf3f6fc06
1120 o changeset: 2:62aaf3f6fc06
1122 | parent: 0:f0932f74827e
1121 | parent: 0:f0932f74827e
1123 | user: test
1122 | user: test
1124 | date: Thu Jan 01 00:00:00 1970 +0000
1123 | date: Thu Jan 01 00:00:00 1970 +0000
1125 | summary: unrelated
1124 | summary: unrelated
1126 |
1125 |
1127 | o changeset: 1:fd27c222e3e6
1126 | o changeset: 1:fd27c222e3e6
1128 |/ user: test
1127 |/ user: test
1129 | date: Thu Jan 01 00:00:00 1970 +0000
1128 | date: Thu Jan 01 00:00:00 1970 +0000
1130 | summary: contentB
1129 | summary: contentB
1131 |
1130 |
1132 o changeset: 0:f0932f74827e
1131 o changeset: 0:f0932f74827e
1133 user: test
1132 user: test
1134 date: Thu Jan 01 00:00:00 1970 +0000
1133 date: Thu Jan 01 00:00:00 1970 +0000
1135 summary: contentA
1134 summary: contentA
1136
1135
1137
1136
1138 Annotate should list ancestor of starting revision only
1137 Annotate should list ancestor of starting revision only
1139
1138
1140 $ hg annotate a
1139 $ hg annotate a
1141 0: A
1140 0: A
1142 3: B
1141 3: B
1143 4: C
1142 4: C
1144
1143
1145 $ hg annotate a -r 'wdir()'
1144 $ hg annotate a -r 'wdir()'
1146 0 : A
1145 0 : A
1147 3 : B
1146 3 : B
1148 4 : C
1147 4 : C
1149 4+: W
1148 4+: W
1150
1149
1151 Even when the starting revision is the linkrev-shadowed one:
1150 Even when the starting revision is the linkrev-shadowed one:
1152
1151
1153 $ hg annotate a -r 3
1152 $ hg annotate a -r 3
1154 0: A
1153 0: A
1155 3: B
1154 3: B
1156
1155
1157 $ cd ..
1156 $ cd ..
1158
1157
1159 Issue5360: Deleted chunk in p1 of a merge changeset
1158 Issue5360: Deleted chunk in p1 of a merge changeset
1160
1159
1161 $ hg init repo-5360
1160 $ hg init repo-5360
1162 $ cd repo-5360
1161 $ cd repo-5360
1163 $ echo 1 > a
1162 $ echo 1 > a
1164 $ hg commit -A a -m 1
1163 $ hg commit -A a -m 1
1165 $ echo 2 >> a
1164 $ echo 2 >> a
1166 $ hg commit -m 2
1165 $ hg commit -m 2
1167 $ echo a > a
1166 $ echo a > a
1168 $ hg commit -m a
1167 $ hg commit -m a
1169 $ hg update '.^' -q
1168 $ hg update '.^' -q
1170 $ echo 3 >> a
1169 $ echo 3 >> a
1171 $ hg commit -m 3 -q
1170 $ hg commit -m 3 -q
1172 $ hg merge 2 -q
1171 $ hg merge 2 -q
1173 warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
1172 warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
1174 [1]
1173 [1]
1175 $ cat a
1174 $ cat a
1176 <<<<<<< working copy: 0a068f0261cf - test: 3
1175 <<<<<<< working copy: 0a068f0261cf - test: 3
1177 1
1176 1
1178 2
1177 2
1179 3
1178 3
1180 ||||||| base
1179 ||||||| base
1181 1
1180 1
1182 2
1181 2
1183 =======
1182 =======
1184 a
1183 a
1185 >>>>>>> merge rev: 9409851bc20a - test: a
1184 >>>>>>> merge rev: 9409851bc20a - test: a
1186 $ cat > a << EOF
1185 $ cat > a << EOF
1187 > b
1186 > b
1188 > 1
1187 > 1
1189 > 2
1188 > 2
1190 > 3
1189 > 3
1191 > a
1190 > a
1192 > EOF
1191 > EOF
1193 $ hg resolve --mark -q
1192 $ hg resolve --mark -q
1194 $ rm a.orig
1193 $ rm a.orig
1195 $ hg commit -m m
1194 $ hg commit -m m
1196 $ hg annotate a
1195 $ hg annotate a
1197 4: b
1196 4: b
1198 0: 1
1197 0: 1
1199 1: 2
1198 1: 2
1200 3: 3
1199 3: 3
1201 2: a
1200 2: a
1202
1201
1203 $ cd ..
1202 $ cd ..
@@ -1,819 +1,819 b''
1 (this file is backported from core hg tests/test-annotate.t)
1 (this file is backported from core hg tests/test-annotate.t)
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [ui]
4 > [ui]
5 > merge = :merge3
5 > merge = :merge3
6 > [diff]
6 > [diff]
7 > git=1
7 > git=1
8 > [extensions]
8 > [extensions]
9 > fastannotate=
9 > fastannotate=
10 > [fastannotate]
10 > [fastannotate]
11 > modes=fctx
11 > modes=fctx
12 > forcefollow=False
12 > forcefollow=False
13 > mainbranch=.
13 > mainbranch=.
14 > EOF
14 > EOF
15
15
16 init
16 init
17
17
18 $ hg init repo
18 $ hg init repo
19 $ cd repo
19 $ cd repo
20
20
21 commit
21 commit
22
22
23 $ echo 'a' > a
23 $ echo 'a' > a
24 $ hg ci -A -m test -u nobody -d '1 0'
24 $ hg ci -A -m test -u nobody -d '1 0'
25 adding a
25 adding a
26
26
27 annotate -c
27 annotate -c
28
28
29 $ hg annotate -c a
29 $ hg annotate -c a
30 8435f90966e4: a
30 8435f90966e4: a
31
31
32 annotate -cl
32 annotate -cl
33
33
34 $ hg annotate -cl a
34 $ hg annotate -cl a
35 8435f90966e4:1: a
35 8435f90966e4:1: a
36
36
37 annotate -d
37 annotate -d
38
38
39 $ hg annotate -d a
39 $ hg annotate -d a
40 Thu Jan 01 00:00:01 1970 +0000: a
40 Thu Jan 01 00:00:01 1970 +0000: a
41
41
42 annotate -n
42 annotate -n
43
43
44 $ hg annotate -n a
44 $ hg annotate -n a
45 0: a
45 0: a
46
46
47 annotate -nl
47 annotate -nl
48
48
49 $ hg annotate -nl a
49 $ hg annotate -nl a
50 0:1: a
50 0:1: a
51
51
52 annotate -u
52 annotate -u
53
53
54 $ hg annotate -u a
54 $ hg annotate -u a
55 nobody: a
55 nobody: a
56
56
57 annotate -cdnu
57 annotate -cdnu
58
58
59 $ hg annotate -cdnu a
59 $ hg annotate -cdnu a
60 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
60 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
61
61
62 annotate -cdnul
62 annotate -cdnul
63
63
64 $ hg annotate -cdnul a
64 $ hg annotate -cdnul a
65 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
65 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
66
66
67 annotate (JSON)
67 annotate (JSON)
68
68
69 $ hg annotate -Tjson a
69 $ hg annotate -Tjson a
70 [
70 [
71 {
71 {
72 "lines": [{"line": "a\n", "rev": 0}],
72 "lines": [{"line": "a\n", "rev": 0}],
73 "path": "a"
73 "path": "a"
74 }
74 }
75 ]
75 ]
76
76
77 $ hg annotate -Tjson -cdfnul a
77 $ hg annotate -Tjson -cdfnul a
78 [
78 [
79 {
79 {
80 "lines": [{"date": [1.0, 0], "line": "a\n", "lineno": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "path": "a", "rev": 0, "user": "nobody"}],
80 "lines": [{"date": [1.0, 0], "line": "a\n", "lineno": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "path": "a", "rev": 0, "user": "nobody"}],
81 "path": "a"
81 "path": "a"
82 }
82 }
83 ]
83 ]
84
84
85 $ cat <<EOF >>a
85 $ cat <<EOF >>a
86 > a
86 > a
87 > a
87 > a
88 > EOF
88 > EOF
89 $ hg ci -ma1 -d '1 0'
89 $ hg ci -ma1 -d '1 0'
90 $ hg cp a b
90 $ hg cp a b
91 $ hg ci -mb -d '1 0'
91 $ hg ci -mb -d '1 0'
92 $ cat <<EOF >> b
92 $ cat <<EOF >> b
93 > b4
93 > b4
94 > b5
94 > b5
95 > b6
95 > b6
96 > EOF
96 > EOF
97 $ hg ci -mb2 -d '2 0'
97 $ hg ci -mb2 -d '2 0'
98
98
99 annotate -n b
99 annotate -n b
100
100
101 $ hg annotate -n b
101 $ hg annotate -n b
102 0: a
102 0: a
103 1: a
103 1: a
104 1: a
104 1: a
105 3: b4
105 3: b4
106 3: b5
106 3: b5
107 3: b6
107 3: b6
108
108
109 annotate --no-follow b
109 annotate --no-follow b
110
110
111 $ hg annotate --no-follow b
111 $ hg annotate --no-follow b
112 2: a
112 2: a
113 2: a
113 2: a
114 2: a
114 2: a
115 3: b4
115 3: b4
116 3: b5
116 3: b5
117 3: b6
117 3: b6
118
118
119 annotate -nl b
119 annotate -nl b
120
120
121 $ hg annotate -nl b
121 $ hg annotate -nl b
122 0:1: a
122 0:1: a
123 1:2: a
123 1:2: a
124 1:3: a
124 1:3: a
125 3:4: b4
125 3:4: b4
126 3:5: b5
126 3:5: b5
127 3:6: b6
127 3:6: b6
128
128
129 annotate -nf b
129 annotate -nf b
130
130
131 $ hg annotate -nf b
131 $ hg annotate -nf b
132 0 a: a
132 0 a: a
133 1 a: a
133 1 a: a
134 1 a: a
134 1 a: a
135 3 b: b4
135 3 b: b4
136 3 b: b5
136 3 b: b5
137 3 b: b6
137 3 b: b6
138
138
139 annotate -nlf b
139 annotate -nlf b
140
140
141 $ hg annotate -nlf b
141 $ hg annotate -nlf b
142 0 a:1: a
142 0 a:1: a
143 1 a:2: a
143 1 a:2: a
144 1 a:3: a
144 1 a:3: a
145 3 b:4: b4
145 3 b:4: b4
146 3 b:5: b5
146 3 b:5: b5
147 3 b:6: b6
147 3 b:6: b6
148
148
149 $ hg up -C 2
149 $ hg up -C 2
150 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
150 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 $ cat <<EOF >> b
151 $ cat <<EOF >> b
152 > b4
152 > b4
153 > c
153 > c
154 > b5
154 > b5
155 > EOF
155 > EOF
156 $ hg ci -mb2.1 -d '2 0'
156 $ hg ci -mb2.1 -d '2 0'
157 created new head
157 created new head
158 $ hg merge
158 $ hg merge
159 merging b
159 merging b
160 warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
160 warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
161 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
161 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
162 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
162 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
163 [1]
163 [1]
164 $ cat b
164 $ cat b
165 a
165 a
166 a
166 a
167 a
167 a
168 <<<<<<< working copy: 5fbdc1152d97 - test: b2.1
168 <<<<<<< working copy: 5fbdc1152d97 - test: b2.1
169 b4
169 b4
170 c
170 c
171 b5
171 b5
172 ||||||| base
172 ||||||| base
173 =======
173 =======
174 b4
174 b4
175 b5
175 b5
176 b6
176 b6
177 >>>>>>> merge rev: 37ec9f5c3d1f - test: b2
177 >>>>>>> merge rev: 37ec9f5c3d1f - test: b2
178 $ cat <<EOF > b
178 $ cat <<EOF > b
179 > a
179 > a
180 > a
180 > a
181 > a
181 > a
182 > b4
182 > b4
183 > c
183 > c
184 > b5
184 > b5
185 > EOF
185 > EOF
186 $ hg resolve --mark -q
186 $ hg resolve --mark -q
187 $ rm b.orig
187 $ rm b.orig
188 $ hg ci -mmergeb -d '3 0'
188 $ hg ci -mmergeb -d '3 0'
189
189
190 annotate after merge
190 annotate after merge
191 (note: the first one falls back to the vanilla annotate which does not use linelog)
191 (note: the first one falls back to the vanilla annotate which does not use linelog)
192
192
193 $ hg annotate -nf b --debug
193 $ hg annotate -nf b --debug
194 fastannotate: b: rebuilding broken cache
194 fastannotate: b: rebuilding broken cache
195 fastannotate: b: 5 new changesets in the main branch
195 fastannotate: b: 5 new changesets in the main branch
196 0 a: a
196 0 a: a
197 1 a: a
197 1 a: a
198 1 a: a
198 1 a: a
199 3 b: b4
199 3 b: b4
200 4 b: c
200 4 b: c
201 3 b: b5
201 3 b: b5
202
202
203 (difference explained below)
203 (difference explained below)
204
204
205 $ hg annotate -nf b --debug
205 $ hg annotate -nf b --debug
206 fastannotate: b: using fast path (resolved fctx: False)
206 fastannotate: b: using fast path (resolved fctx: False)
207 0 a: a
207 0 a: a
208 1 a: a
208 1 a: a
209 1 a: a
209 1 a: a
210 4 b: b4
210 4 b: b4
211 4 b: c
211 4 b: c
212 4 b: b5
212 4 b: b5
213
213
214 annotate after merge with -l
214 annotate after merge with -l
215 (fastannotate differs from annotate)
215 (fastannotate differs from annotate)
216
216
217 $ hg log -Gp -T '{rev}:{node}' -r '2..5'
217 $ hg log -Gp -T '{rev}:{node}' -r '2..5'
218 @ 5:64afcdf8e29e063c635be123d8d2fb160af00f7e
218 @ 5:64afcdf8e29e063c635be123d8d2fb160af00f7e
219 |\
219 |\
220 | o 4:5fbdc1152d97597717021ad9e063061b200f146bdiff --git a/b b/b
220 | o 4:5fbdc1152d97597717021ad9e063061b200f146bdiff --git a/b b/b
221 | | --- a/b
221 | | --- a/b
222 | | +++ b/b
222 | | +++ b/b
223 | | @@ -1,3 +1,6 @@
223 | | @@ -1,3 +1,6 @@
224 | | a
224 | | a
225 | | a
225 | | a
226 | | a
226 | | a
227 | | +b4
227 | | +b4
228 | | +c
228 | | +c
229 | | +b5
229 | | +b5
230 | |
230 | |
231 o | 3:37ec9f5c3d1f99572d7075971cb4876e2139b52fdiff --git a/b b/b
231 o | 3:37ec9f5c3d1f99572d7075971cb4876e2139b52fdiff --git a/b b/b
232 |/ --- a/b
232 |/ --- a/b
233 | +++ b/b
233 | +++ b/b
234 | @@ -1,3 +1,6 @@
234 | @@ -1,3 +1,6 @@
235 | a
235 | a
236 | a
236 | a
237 | a
237 | a
238 | +b4
238 | +b4
239 | +b5
239 | +b5
240 | +b6
240 | +b6
241 |
241 |
242 o 2:3086dbafde1ce745abfc8d2d367847280aabae9ddiff --git a/a b/b
242 o 2:3086dbafde1ce745abfc8d2d367847280aabae9ddiff --git a/a b/b
243 | copy from a
243 | copy from a
244 ~ copy to b
244 ~ copy to b
245
245
246
246
247 (in this case, "b4", "b5" could be considered introduced by either rev 3, or rev 4.
247 (in this case, "b4", "b5" could be considered introduced by either rev 3, or rev 4.
248 and that causes the rev number difference)
248 and that causes the rev number difference)
249
249
250 $ hg annotate -nlf b --config fastannotate.modes=
250 $ hg annotate -nlf b --config fastannotate.modes=
251 0 a:1: a
251 0 a:1: a
252 1 a:2: a
252 1 a:2: a
253 1 a:3: a
253 1 a:3: a
254 3 b:4: b4
254 3 b:4: b4
255 4 b:5: c
255 4 b:5: c
256 3 b:5: b5
256 3 b:5: b5
257
257
258 $ hg annotate -nlf b
258 $ hg annotate -nlf b
259 0 a:1: a
259 0 a:1: a
260 1 a:2: a
260 1 a:2: a
261 1 a:3: a
261 1 a:3: a
262 4 b:4: b4
262 4 b:4: b4
263 4 b:5: c
263 4 b:5: c
264 4 b:6: b5
264 4 b:6: b5
265
265
266 $ hg up -C 1
266 $ hg up -C 1
267 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
267 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
268 $ hg cp a b
268 $ hg cp a b
269 $ cat <<EOF > b
269 $ cat <<EOF > b
270 > a
270 > a
271 > z
271 > z
272 > a
272 > a
273 > EOF
273 > EOF
274 $ hg ci -mc -d '3 0'
274 $ hg ci -mc -d '3 0'
275 created new head
275 created new head
276 Work around the pure version not resolving the conflict like native code
276 Work around the pure version not resolving the conflict like native code
277 #if pure
277 #if pure
278 $ hg merge
278 $ hg merge
279 merging b
279 merging b
280 warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
280 warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
281 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
281 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
282 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
282 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
283 [1]
283 [1]
284 $ cat <<EOF > b
284 $ cat <<EOF > b
285 > a
285 > a
286 > z
286 > z
287 > a
287 > a
288 > b4
288 > b4
289 > c
289 > c
290 > b5
290 > b5
291 > EOF
291 > EOF
292 $ hg resolve -m b
292 $ hg resolve -m b
293 (no more unresolved files)
293 (no more unresolved files)
294 $ rm b.orig
294 $ rm b.orig
295 #else
295 #else
296 $ hg merge
296 $ hg merge
297 merging b
297 merging b
298 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
298 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
299 (branch merge, don't forget to commit)
299 (branch merge, don't forget to commit)
300 #endif
300 #endif
301 $ echo d >> b
301 $ echo d >> b
302 $ hg ci -mmerge2 -d '4 0'
302 $ hg ci -mmerge2 -d '4 0'
303
303
304 annotate after rename merge
304 annotate after rename merge
305
305
306 $ hg annotate -nf b
306 $ hg annotate -nf b
307 0 a: a
307 0 a: a
308 6 b: z
308 6 b: z
309 1 a: a
309 1 a: a
310 3 b: b4
310 3 b: b4
311 4 b: c
311 4 b: c
312 3 b: b5
312 3 b: b5
313 7 b: d
313 7 b: d
314
314
315 annotate after rename merge with -l
315 annotate after rename merge with -l
316 (fastannotate differs from annotate)
316 (fastannotate differs from annotate)
317
317
318 $ hg log -Gp -T '{rev}:{node}' -r '0+1+6+7'
318 $ hg log -Gp -T '{rev}:{node}' -r '0+1+6+7'
319 @ 7:6284bb6c38fef984a929862a53bbc71ce9eafa81diff --git a/b b/b
319 @ 7:6284bb6c38fef984a929862a53bbc71ce9eafa81diff --git a/b b/b
320 |\ --- a/b
320 |\ --- a/b
321 | : +++ b/b
321 | : +++ b/b
322 | : @@ -1,3 +1,7 @@
322 | : @@ -1,3 +1,7 @@
323 | : a
323 | : a
324 | : z
324 | : z
325 | : a
325 | : a
326 | : +b4
326 | : +b4
327 | : +c
327 | : +c
328 | : +b5
328 | : +b5
329 | : +d
329 | : +d
330 | :
330 | :
331 o : 6:b80e3e32f75a6a67cd4ac85496a11511e9112816diff --git a/a b/b
331 o : 6:b80e3e32f75a6a67cd4ac85496a11511e9112816diff --git a/a b/b
332 :/ copy from a
332 :/ copy from a
333 : copy to b
333 : copy to b
334 : --- a/a
334 : --- a/a
335 : +++ b/b
335 : +++ b/b
336 : @@ -1,3 +1,3 @@
336 : @@ -1,3 +1,3 @@
337 : -a (?)
337 : -a (?)
338 : a
338 : a
339 : +z
339 : +z
340 : a
340 : a
341 : -a (?)
341 : -a (?)
342 :
342 :
343 o 1:762f04898e6684ff713415f7b8a8d53d33f96c92diff --git a/a b/a
343 o 1:762f04898e6684ff713415f7b8a8d53d33f96c92diff --git a/a b/a
344 | --- a/a
344 | --- a/a
345 | +++ b/a
345 | +++ b/a
346 | @@ -1,1 +1,3 @@
346 | @@ -1,1 +1,3 @@
347 | a
347 | a
348 | +a
348 | +a
349 | +a
349 | +a
350 |
350 |
351 o 0:8435f90966e442695d2ded29fdade2bac5ad8065diff --git a/a b/a
351 o 0:8435f90966e442695d2ded29fdade2bac5ad8065diff --git a/a b/a
352 new file mode 100644
352 new file mode 100644
353 --- /dev/null
353 --- /dev/null
354 +++ b/a
354 +++ b/a
355 @@ -0,0 +1,1 @@
355 @@ -0,0 +1,1 @@
356 +a
356 +a
357
357
358
358
359 (note on question marks:
359 (note on question marks:
360 the upstream bdiff change (96f2f50d923f+3633403888ae+8c0c75aa3ff4+5c4e2636c1a9
360 the upstream bdiff change (96f2f50d923f+3633403888ae+8c0c75aa3ff4+5c4e2636c1a9
361 +38ed54888617) alters the output so deletion is not always at the end of the
361 +38ed54888617) alters the output so deletion is not always at the end of the
362 output. for example:
362 output. for example:
363 | a | b | old | new | # old: e1d6aa0e4c3a, new: 8836f13e3c5b
363 | a | b | old | new | # old: e1d6aa0e4c3a, new: 8836f13e3c5b
364 |-------------------|
364 |-------------------|
365 | a | a | a | -a |
365 | a | a | a | -a |
366 | a | z | +z | a |
366 | a | z | +z | a |
367 | a | a | a | +z |
367 | a | a | a | +z |
368 | | | -a | a |
368 | | | -a | a |
369 |-------------------|
369 |-------------------|
370 | a | a | a |
370 | a | a | a |
371 | a | a | a |
371 | a | a | a |
372 | a | | -a |
372 | a | | -a |
373 this leads to more question marks below)
373 this leads to more question marks below)
374
374
375 (rev 1 adds two "a"s and rev 6 deletes one "a".
375 (rev 1 adds two "a"s and rev 6 deletes one "a".
376 the "a" that rev 6 deletes could be either the first or the second "a" of those two "a"s added by rev 1.
376 the "a" that rev 6 deletes could be either the first or the second "a" of those two "a"s added by rev 1.
377 and that causes the line number difference)
377 and that causes the line number difference)
378
378
379 $ hg annotate -nlf b --config fastannotate.modes=
379 $ hg annotate -nlf b --config fastannotate.modes=
380 0 a:1: a
380 0 a:1: a
381 6 b:2: z
381 6 b:2: z
382 1 a:3: a
382 1 a:3: a
383 3 b:4: b4
383 3 b:4: b4
384 4 b:5: c
384 4 b:5: c
385 3 b:5: b5
385 3 b:5: b5
386 7 b:7: d
386 7 b:7: d
387
387
388 $ hg annotate -nlf b
388 $ hg annotate -nlf b
389 0 a:1: a (?)
389 0 a:1: a (?)
390 1 a:2: a (?)
390 1 a:2: a (?)
391 6 b:2: z
391 6 b:2: z
392 1 a:2: a (?)
392 1 a:2: a (?)
393 1 a:3: a (?)
393 1 a:3: a (?)
394 3 b:4: b4
394 3 b:4: b4
395 4 b:5: c
395 4 b:5: c
396 3 b:5: b5
396 3 b:5: b5
397 7 b:7: d
397 7 b:7: d
398
398
399 Issue2807: alignment of line numbers with -l
399 Issue2807: alignment of line numbers with -l
400 (fastannotate differs from annotate, same reason as above)
400 (fastannotate differs from annotate, same reason as above)
401
401
402 $ echo more >> b
402 $ echo more >> b
403 $ hg ci -mmore -d '5 0'
403 $ hg ci -mmore -d '5 0'
404 $ echo more >> b
404 $ echo more >> b
405 $ hg ci -mmore -d '6 0'
405 $ hg ci -mmore -d '6 0'
406 $ echo more >> b
406 $ echo more >> b
407 $ hg ci -mmore -d '7 0'
407 $ hg ci -mmore -d '7 0'
408 $ hg annotate -nlf b
408 $ hg annotate -nlf b
409 0 a: 1: a (?)
409 0 a: 1: a (?)
410 1 a: 2: a (?)
410 1 a: 2: a (?)
411 6 b: 2: z
411 6 b: 2: z
412 1 a: 2: a (?)
412 1 a: 2: a (?)
413 1 a: 3: a (?)
413 1 a: 3: a (?)
414 3 b: 4: b4
414 3 b: 4: b4
415 4 b: 5: c
415 4 b: 5: c
416 3 b: 5: b5
416 3 b: 5: b5
417 7 b: 7: d
417 7 b: 7: d
418 8 b: 8: more
418 8 b: 8: more
419 9 b: 9: more
419 9 b: 9: more
420 10 b:10: more
420 10 b:10: more
421
421
422 linkrev vs rev
422 linkrev vs rev
423
423
424 $ hg annotate -r tip -n a
424 $ hg annotate -r tip -n a
425 0: a
425 0: a
426 1: a
426 1: a
427 1: a
427 1: a
428
428
429 linkrev vs rev with -l
429 linkrev vs rev with -l
430
430
431 $ hg annotate -r tip -nl a
431 $ hg annotate -r tip -nl a
432 0:1: a
432 0:1: a
433 1:2: a
433 1:2: a
434 1:3: a
434 1:3: a
435
435
436 Issue589: "undelete" sequence leads to crash
436 Issue589: "undelete" sequence leads to crash
437
437
438 annotate was crashing when trying to --follow something
438 annotate was crashing when trying to --follow something
439
439
440 like A -> B -> A
440 like A -> B -> A
441
441
442 generate ABA rename configuration
442 generate ABA rename configuration
443
443
444 $ echo foo > foo
444 $ echo foo > foo
445 $ hg add foo
445 $ hg add foo
446 $ hg ci -m addfoo
446 $ hg ci -m addfoo
447 $ hg rename foo bar
447 $ hg rename foo bar
448 $ hg ci -m renamefoo
448 $ hg ci -m renamefoo
449 $ hg rename bar foo
449 $ hg rename bar foo
450 $ hg ci -m renamebar
450 $ hg ci -m renamebar
451
451
452 annotate after ABA with follow
452 annotate after ABA with follow
453
453
454 $ hg annotate --follow foo
454 $ hg annotate --follow foo
455 foo: foo
455 foo: foo
456
456
457 missing file
457 missing file
458
458
459 $ hg ann nosuchfile
459 $ hg ann nosuchfile
460 abort: nosuchfile: no such file in rev e9e6b4fa872f
460 abort: nosuchfile: no such file in rev e9e6b4fa872f
461 [255]
461 [255]
462
462
463 annotate file without '\n' on last line
463 annotate file without '\n' on last line
464
464
465 $ printf "" > c
465 $ printf "" > c
466 $ hg ci -A -m test -u nobody -d '1 0'
466 $ hg ci -A -m test -u nobody -d '1 0'
467 adding c
467 adding c
468 $ hg annotate c
468 $ hg annotate c
469 $ printf "a\nb" > c
469 $ printf "a\nb" > c
470 $ hg ci -m test
470 $ hg ci -m test
471 $ hg annotate c
471 $ hg annotate c
472 [0-9]+: a (re)
472 [0-9]+: a (re)
473 [0-9]+: b (re)
473 [0-9]+: b (re)
474
474
475 Issue3841: check annotation of the file of which filelog includes
475 Issue3841: check annotation of the file of which filelog includes
476 merging between the revision and its ancestor
476 merging between the revision and its ancestor
477
477
478 to reproduce the situation with recent Mercurial, this script uses (1)
478 to reproduce the situation with recent Mercurial, this script uses (1)
479 "hg debugsetparents" to merge without ancestor check by "hg merge",
479 "hg debugsetparents" to merge without ancestor check by "hg merge",
480 and (2) the extension to allow filelog merging between the revision
480 and (2) the extension to allow filelog merging between the revision
481 and its ancestor by overriding "repo._filecommit".
481 and its ancestor by overriding "repo._filecommit".
482
482
483 $ cat > ../legacyrepo.py <<EOF
483 $ cat > ../legacyrepo.py <<EOF
484 > from mercurial import error, node
484 > from __future__ import absolute_import
485 > def reposetup(ui, repo):
485 > from mercurial import commit, error, extensions, node
486 > class legacyrepo(repo.__class__):
486 > def _filecommit(orig, repo, fctx, manifest1, manifest2,
487 > def _filecommit(self, fctx, manifest1, manifest2,
487 > linkrev, tr, includecopymeta):
488 > linkrev, tr, includecopymeta):
488 > fname = fctx.path()
489 > fname = fctx.path()
489 > text = fctx.data()
490 > text = fctx.data()
490 > flog = repo.file(fname)
491 > flog = self.file(fname)
491 > fparent1 = manifest1.get(fname, node.nullid)
492 > fparent1 = manifest1.get(fname, node.nullid)
492 > fparent2 = manifest2.get(fname, node.nullid)
493 > fparent2 = manifest2.get(fname, node.nullid)
493 > meta = {}
494 > meta = {}
494 > copy = fctx.copysource()
495 > copy = fctx.renamed()
495 > if copy and copy != fname:
496 > if copy and copy[0] != fname:
496 > raise error.Abort('copying is not supported')
497 > raise error.Abort('copying is not supported')
497 > if fparent2 != node.nullid:
498 > if fparent2 != node.nullid:
498 > return flog.add(text, meta, tr, linkrev,
499 > return flog.add(text, meta, tr, linkrev,
499 > fparent1, fparent2), 'modified'
500 > fparent1, fparent2), 'modified'
500 > raise error.Abort('only merging is supported')
501 > raise error.Abort('only merging is supported')
501 > def uisetup(ui):
502 > repo.__class__ = legacyrepo
502 > extensions.wrapfunction(commit, '_filecommit', _filecommit)
503 > EOF
503 > EOF
504
504
505 $ cat > baz <<EOF
505 $ cat > baz <<EOF
506 > 1
506 > 1
507 > 2
507 > 2
508 > 3
508 > 3
509 > 4
509 > 4
510 > 5
510 > 5
511 > EOF
511 > EOF
512 $ hg add baz
512 $ hg add baz
513 $ hg commit -m "baz:0"
513 $ hg commit -m "baz:0"
514
514
515 $ cat > baz <<EOF
515 $ cat > baz <<EOF
516 > 1 baz:1
516 > 1 baz:1
517 > 2
517 > 2
518 > 3
518 > 3
519 > 4
519 > 4
520 > 5
520 > 5
521 > EOF
521 > EOF
522 $ hg commit -m "baz:1"
522 $ hg commit -m "baz:1"
523
523
524 $ cat > baz <<EOF
524 $ cat > baz <<EOF
525 > 1 baz:1
525 > 1 baz:1
526 > 2 baz:2
526 > 2 baz:2
527 > 3
527 > 3
528 > 4
528 > 4
529 > 5
529 > 5
530 > EOF
530 > EOF
531 $ hg debugsetparents 17 17
531 $ hg debugsetparents 17 17
532 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
532 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
533 $ hg debugindexdot baz
533 $ hg debugindexdot baz
534 digraph G {
534 digraph G {
535 -1 -> 0
535 -1 -> 0
536 0 -> 1
536 0 -> 1
537 1 -> 2
537 1 -> 2
538 1 -> 2
538 1 -> 2
539 }
539 }
540 $ hg annotate baz
540 $ hg annotate baz
541 17: 1 baz:1
541 17: 1 baz:1
542 18: 2 baz:2
542 18: 2 baz:2
543 16: 3
543 16: 3
544 16: 4
544 16: 4
545 16: 5
545 16: 5
546
546
547 $ cat > baz <<EOF
547 $ cat > baz <<EOF
548 > 1 baz:1
548 > 1 baz:1
549 > 2 baz:2
549 > 2 baz:2
550 > 3 baz:3
550 > 3 baz:3
551 > 4
551 > 4
552 > 5
552 > 5
553 > EOF
553 > EOF
554 $ hg commit -m "baz:3"
554 $ hg commit -m "baz:3"
555
555
556 $ cat > baz <<EOF
556 $ cat > baz <<EOF
557 > 1 baz:1
557 > 1 baz:1
558 > 2 baz:2
558 > 2 baz:2
559 > 3 baz:3
559 > 3 baz:3
560 > 4 baz:4
560 > 4 baz:4
561 > 5
561 > 5
562 > EOF
562 > EOF
563 $ hg debugsetparents 19 18
563 $ hg debugsetparents 19 18
564 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
564 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
565 $ hg debugindexdot baz
565 $ hg debugindexdot baz
566 digraph G {
566 digraph G {
567 -1 -> 0
567 -1 -> 0
568 0 -> 1
568 0 -> 1
569 1 -> 2
569 1 -> 2
570 1 -> 2
570 1 -> 2
571 2 -> 3
571 2 -> 3
572 3 -> 4
572 3 -> 4
573 2 -> 4
573 2 -> 4
574 }
574 }
575 $ hg annotate baz
575 $ hg annotate baz
576 17: 1 baz:1
576 17: 1 baz:1
577 18: 2 baz:2
577 18: 2 baz:2
578 19: 3 baz:3
578 19: 3 baz:3
579 20: 4 baz:4
579 20: 4 baz:4
580 16: 5
580 16: 5
581
581
582 annotate clean file
582 annotate clean file
583
583
584 $ hg annotate -ncr "wdir()" foo
584 $ hg annotate -ncr "wdir()" foo
585 11 472b18db256d : foo
585 11 472b18db256d : foo
586
586
587 annotate modified file
587 annotate modified file
588
588
589 $ echo foofoo >> foo
589 $ echo foofoo >> foo
590 $ hg annotate -r "wdir()" foo
590 $ hg annotate -r "wdir()" foo
591 11 : foo
591 11 : foo
592 20+: foofoo
592 20+: foofoo
593
593
594 $ hg annotate -cr "wdir()" foo
594 $ hg annotate -cr "wdir()" foo
595 472b18db256d : foo
595 472b18db256d : foo
596 b6bedd5477e7+: foofoo
596 b6bedd5477e7+: foofoo
597
597
598 $ hg annotate -ncr "wdir()" foo
598 $ hg annotate -ncr "wdir()" foo
599 11 472b18db256d : foo
599 11 472b18db256d : foo
600 20 b6bedd5477e7+: foofoo
600 20 b6bedd5477e7+: foofoo
601
601
602 $ hg annotate --debug -ncr "wdir()" foo
602 $ hg annotate --debug -ncr "wdir()" foo
603 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
603 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
604 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
604 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
605
605
606 $ hg annotate -udr "wdir()" foo
606 $ hg annotate -udr "wdir()" foo
607 test Thu Jan 01 00:00:00 1970 +0000: foo
607 test Thu Jan 01 00:00:00 1970 +0000: foo
608 test [A-Za-z0-9:+ ]+: foofoo (re)
608 test [A-Za-z0-9:+ ]+: foofoo (re)
609
609
610 $ hg annotate -ncr "wdir()" -Tjson foo
610 $ hg annotate -ncr "wdir()" -Tjson foo
611 [
611 [
612 {
612 {
613 "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": "ffffffffffffffffffffffffffffffffffffffff", "rev": 2147483647}],
613 "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": "ffffffffffffffffffffffffffffffffffffffff", "rev": 2147483647}],
614 "path": "foo"
614 "path": "foo"
615 }
615 }
616 ]
616 ]
617
617
618 annotate added file
618 annotate added file
619
619
620 $ echo bar > bar
620 $ echo bar > bar
621 $ hg add bar
621 $ hg add bar
622 $ hg annotate -ncr "wdir()" bar
622 $ hg annotate -ncr "wdir()" bar
623 20 b6bedd5477e7+: bar
623 20 b6bedd5477e7+: bar
624
624
625 annotate renamed file
625 annotate renamed file
626
626
627 $ hg rename foo renamefoo2
627 $ hg rename foo renamefoo2
628 $ hg annotate -ncr "wdir()" renamefoo2
628 $ hg annotate -ncr "wdir()" renamefoo2
629 11 472b18db256d : foo
629 11 472b18db256d : foo
630 20 b6bedd5477e7+: foofoo
630 20 b6bedd5477e7+: foofoo
631
631
632 annotate missing file
632 annotate missing file
633
633
634 $ rm baz
634 $ rm baz
635 $ hg annotate -ncr "wdir()" baz
635 $ hg annotate -ncr "wdir()" baz
636 abort: $TESTTMP/repo/baz: $ENOENT$ (windows !)
636 abort: $TESTTMP/repo/baz: $ENOENT$ (windows !)
637 abort: $ENOENT$: '$TESTTMP/repo/baz' (no-windows !)
637 abort: $ENOENT$: '$TESTTMP/repo/baz' (no-windows !)
638 [255]
638 [255]
639
639
640 annotate removed file
640 annotate removed file
641
641
642 $ hg rm baz
642 $ hg rm baz
643 $ hg annotate -ncr "wdir()" baz
643 $ hg annotate -ncr "wdir()" baz
644 abort: $TESTTMP/repo/baz: $ENOENT$ (windows !)
644 abort: $TESTTMP/repo/baz: $ENOENT$ (windows !)
645 abort: $ENOENT$: '$TESTTMP/repo/baz' (no-windows !)
645 abort: $ENOENT$: '$TESTTMP/repo/baz' (no-windows !)
646 [255]
646 [255]
647
647
648 Test annotate with whitespace options
648 Test annotate with whitespace options
649
649
650 $ cd ..
650 $ cd ..
651 $ hg init repo-ws
651 $ hg init repo-ws
652 $ cd repo-ws
652 $ cd repo-ws
653 $ cat > a <<EOF
653 $ cat > a <<EOF
654 > aa
654 > aa
655 >
655 >
656 > b b
656 > b b
657 > EOF
657 > EOF
658 $ hg ci -Am "adda"
658 $ hg ci -Am "adda"
659 adding a
659 adding a
660 $ sed 's/EOL$//g' > a <<EOF
660 $ sed 's/EOL$//g' > a <<EOF
661 > a a
661 > a a
662 >
662 >
663 > EOL
663 > EOL
664 > b b
664 > b b
665 > EOF
665 > EOF
666 $ hg ci -m "changea"
666 $ hg ci -m "changea"
667
667
668 Annotate with no option
668 Annotate with no option
669
669
670 $ hg annotate a
670 $ hg annotate a
671 1: a a
671 1: a a
672 0:
672 0:
673 1:
673 1:
674 1: b b
674 1: b b
675
675
676 Annotate with --ignore-space-change
676 Annotate with --ignore-space-change
677
677
678 $ hg annotate --ignore-space-change a
678 $ hg annotate --ignore-space-change a
679 1: a a
679 1: a a
680 1:
680 1:
681 0:
681 0:
682 0: b b
682 0: b b
683
683
684 Annotate with --ignore-all-space
684 Annotate with --ignore-all-space
685
685
686 $ hg annotate --ignore-all-space a
686 $ hg annotate --ignore-all-space a
687 0: a a
687 0: a a
688 0:
688 0:
689 1:
689 1:
690 0: b b
690 0: b b
691
691
692 Annotate with --ignore-blank-lines (similar to no options case)
692 Annotate with --ignore-blank-lines (similar to no options case)
693
693
694 $ hg annotate --ignore-blank-lines a
694 $ hg annotate --ignore-blank-lines a
695 1: a a
695 1: a a
696 0:
696 0:
697 1:
697 1:
698 1: b b
698 1: b b
699
699
700 $ cd ..
700 $ cd ..
701
701
702 Annotate with linkrev pointing to another branch
702 Annotate with linkrev pointing to another branch
703 ------------------------------------------------
703 ------------------------------------------------
704
704
705 create history with a filerev whose linkrev points to another branch
705 create history with a filerev whose linkrev points to another branch
706
706
707 $ hg init branchedlinkrev
707 $ hg init branchedlinkrev
708 $ cd branchedlinkrev
708 $ cd branchedlinkrev
709 $ echo A > a
709 $ echo A > a
710 $ hg commit -Am 'contentA'
710 $ hg commit -Am 'contentA'
711 adding a
711 adding a
712 $ echo B >> a
712 $ echo B >> a
713 $ hg commit -m 'contentB'
713 $ hg commit -m 'contentB'
714 $ hg up --rev 'desc(contentA)'
714 $ hg up --rev 'desc(contentA)'
715 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
715 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
716 $ echo unrelated > unrelated
716 $ echo unrelated > unrelated
717 $ hg commit -Am 'unrelated'
717 $ hg commit -Am 'unrelated'
718 adding unrelated
718 adding unrelated
719 created new head
719 created new head
720 $ hg graft -r 'desc(contentB)'
720 $ hg graft -r 'desc(contentB)'
721 grafting 1:fd27c222e3e6 "contentB"
721 grafting 1:fd27c222e3e6 "contentB"
722 $ echo C >> a
722 $ echo C >> a
723 $ hg commit -m 'contentC'
723 $ hg commit -m 'contentC'
724 $ echo W >> a
724 $ echo W >> a
725 $ hg log -G
725 $ hg log -G
726 @ changeset: 4:072f1e8df249
726 @ changeset: 4:072f1e8df249
727 | tag: tip
727 | tag: tip
728 | user: test
728 | user: test
729 | date: Thu Jan 01 00:00:00 1970 +0000
729 | date: Thu Jan 01 00:00:00 1970 +0000
730 | summary: contentC
730 | summary: contentC
731 |
731 |
732 o changeset: 3:ff38df03cc4b
732 o changeset: 3:ff38df03cc4b
733 | user: test
733 | user: test
734 | date: Thu Jan 01 00:00:00 1970 +0000
734 | date: Thu Jan 01 00:00:00 1970 +0000
735 | summary: contentB
735 | summary: contentB
736 |
736 |
737 o changeset: 2:62aaf3f6fc06
737 o changeset: 2:62aaf3f6fc06
738 | parent: 0:f0932f74827e
738 | parent: 0:f0932f74827e
739 | user: test
739 | user: test
740 | date: Thu Jan 01 00:00:00 1970 +0000
740 | date: Thu Jan 01 00:00:00 1970 +0000
741 | summary: unrelated
741 | summary: unrelated
742 |
742 |
743 | o changeset: 1:fd27c222e3e6
743 | o changeset: 1:fd27c222e3e6
744 |/ user: test
744 |/ user: test
745 | date: Thu Jan 01 00:00:00 1970 +0000
745 | date: Thu Jan 01 00:00:00 1970 +0000
746 | summary: contentB
746 | summary: contentB
747 |
747 |
748 o changeset: 0:f0932f74827e
748 o changeset: 0:f0932f74827e
749 user: test
749 user: test
750 date: Thu Jan 01 00:00:00 1970 +0000
750 date: Thu Jan 01 00:00:00 1970 +0000
751 summary: contentA
751 summary: contentA
752
752
753
753
754 Annotate should list ancestor of starting revision only
754 Annotate should list ancestor of starting revision only
755
755
756 $ hg annotate a
756 $ hg annotate a
757 0: A
757 0: A
758 3: B
758 3: B
759 4: C
759 4: C
760
760
761 $ hg annotate a -r 'wdir()'
761 $ hg annotate a -r 'wdir()'
762 0 : A
762 0 : A
763 3 : B
763 3 : B
764 4 : C
764 4 : C
765 4+: W
765 4+: W
766
766
767 Even when the starting revision is the linkrev-shadowed one:
767 Even when the starting revision is the linkrev-shadowed one:
768
768
769 $ hg annotate a -r 3
769 $ hg annotate a -r 3
770 0: A
770 0: A
771 3: B
771 3: B
772
772
773 $ cd ..
773 $ cd ..
774
774
775 Issue5360: Deleted chunk in p1 of a merge changeset
775 Issue5360: Deleted chunk in p1 of a merge changeset
776
776
777 $ hg init repo-5360
777 $ hg init repo-5360
778 $ cd repo-5360
778 $ cd repo-5360
779 $ echo 1 > a
779 $ echo 1 > a
780 $ hg commit -A a -m 1
780 $ hg commit -A a -m 1
781 $ echo 2 >> a
781 $ echo 2 >> a
782 $ hg commit -m 2
782 $ hg commit -m 2
783 $ echo a > a
783 $ echo a > a
784 $ hg commit -m a
784 $ hg commit -m a
785 $ hg update '.^' -q
785 $ hg update '.^' -q
786 $ echo 3 >> a
786 $ echo 3 >> a
787 $ hg commit -m 3 -q
787 $ hg commit -m 3 -q
788 $ hg merge 2 -q
788 $ hg merge 2 -q
789 warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
789 warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
790 [1]
790 [1]
791 $ cat a
791 $ cat a
792 <<<<<<< working copy: 0a068f0261cf - test: 3
792 <<<<<<< working copy: 0a068f0261cf - test: 3
793 1
793 1
794 2
794 2
795 3
795 3
796 ||||||| base
796 ||||||| base
797 1
797 1
798 2
798 2
799 =======
799 =======
800 a
800 a
801 >>>>>>> merge rev: 9409851bc20a - test: a
801 >>>>>>> merge rev: 9409851bc20a - test: a
802 $ cat > a << EOF
802 $ cat > a << EOF
803 > b
803 > b
804 > 1
804 > 1
805 > 2
805 > 2
806 > 3
806 > 3
807 > a
807 > a
808 > EOF
808 > EOF
809 $ hg resolve --mark -q
809 $ hg resolve --mark -q
810 $ rm a.orig
810 $ rm a.orig
811 $ hg commit -m m
811 $ hg commit -m m
812 $ hg annotate a
812 $ hg annotate a
813 4: b
813 4: b
814 0: 1
814 0: 1
815 1: 2
815 1: 2
816 3: 3
816 3: 3
817 2: a
817 2: a
818
818
819 $ cd ..
819 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now