##// END OF EJS Templates
commitctx: extract _filecommit too...
marmoute -
r45760:ce9ee81d default
parent child Browse files
Show More
@@ -1,215 +1,354
1 1 # commit.py - fonction to perform commit
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 from __future__ import absolute_import
7 7
8 8 import errno
9 9 import weakref
10 10
11 11 from .i18n import _
12 12 from .node import (
13 13 hex,
14 nullid,
14 15 nullrev,
15 16 )
16 17
17 18 from . import (
19 context,
20 mergestate,
18 21 metadata,
19 22 phases,
20 23 scmutil,
21 24 subrepoutil,
22 25 )
23 26
24 27
25 28 def commitctx(repo, ctx, error=False, origctx=None):
26 29 """Add a new revision to the target repository.
27 30 Revision information is passed via the context argument.
28 31
29 32 ctx.files() should list all files involved in this commit, i.e.
30 33 modified/added/removed files. On merge, it may be wider than the
31 34 ctx.files() to be committed, since any file nodes derived directly
32 35 from p1 or p2 are excluded from the committed ctx.files().
33 36
34 37 origctx is for convert to work around the problem that bug
35 38 fixes to the files list in changesets change hashes. For
36 39 convert to be the identity, it can pass an origctx and this
37 40 function will use the same files list when it makes sense to
38 41 do so.
39 42 """
40 43 repo = repo.unfiltered()
41 44
42 45 p1, p2 = ctx.p1(), ctx.p2()
43 46 user = ctx.user()
44 47
45 48 if repo.filecopiesmode == b'changeset-sidedata':
46 49 writechangesetcopy = True
47 50 writefilecopymeta = True
48 51 writecopiesto = None
49 52 else:
50 53 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
51 54 writefilecopymeta = writecopiesto != b'changeset-only'
52 55 writechangesetcopy = writecopiesto in (
53 56 b'changeset-only',
54 57 b'compatibility',
55 58 )
56 59 p1copies, p2copies = None, None
57 60 if writechangesetcopy:
58 61 p1copies = ctx.p1copies()
59 62 p2copies = ctx.p2copies()
60 63 filesadded, filesremoved = None, None
61 64 with repo.lock(), repo.transaction(b"commit") as tr:
62 65 trp = weakref.proxy(tr)
63 66
64 67 if ctx.manifestnode():
65 68 # reuse an existing manifest revision
66 69 repo.ui.debug(b'reusing known manifest\n')
67 70 mn = ctx.manifestnode()
68 71 files = ctx.files()
69 72 if writechangesetcopy:
70 73 filesadded = ctx.filesadded()
71 74 filesremoved = ctx.filesremoved()
72 75 elif not ctx.files():
73 76 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
74 77 mn = p1.manifestnode()
75 78 files = []
76 79 else:
77 80 m1ctx = p1.manifestctx()
78 81 m2ctx = p2.manifestctx()
79 82 mctx = m1ctx.copy()
80 83
81 84 m = mctx.read()
82 85 m1 = m1ctx.read()
83 86 m2 = m2ctx.read()
84 87
85 88 # check in files
86 89 added = []
87 90 filesadded = []
88 91 removed = list(ctx.removed())
89 92 touched = []
90 93 linkrev = len(repo)
91 94 repo.ui.note(_(b"committing files:\n"))
92 95 uipathfn = scmutil.getuipathfn(repo)
93 96 for f in sorted(ctx.modified() + ctx.added()):
94 97 repo.ui.note(uipathfn(f) + b"\n")
95 98 try:
96 99 fctx = ctx[f]
97 100 if fctx is None:
98 101 removed.append(f)
99 102 else:
100 103 added.append(f)
101 m[f], is_touched = repo._filecommit(
102 fctx, m1, m2, linkrev, trp, writefilecopymeta,
104 m[f], is_touched = _filecommit(
105 repo, fctx, m1, m2, linkrev, trp, writefilecopymeta,
103 106 )
104 107 if is_touched:
105 108 touched.append(f)
106 109 if writechangesetcopy and is_touched == 'added':
107 110 filesadded.append(f)
108 111 m.setflag(f, fctx.flags())
109 112 except OSError:
110 113 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
111 114 raise
112 115 except IOError as inst:
113 116 errcode = getattr(inst, 'errno', errno.ENOENT)
114 117 if error or errcode and errcode != errno.ENOENT:
115 118 repo.ui.warn(
116 119 _(b"trouble committing %s!\n") % uipathfn(f)
117 120 )
118 121 raise
119 122
120 123 # update manifest
121 124 removed = [f for f in removed if f in m1 or f in m2]
122 125 drop = sorted([f for f in removed if f in m])
123 126 for f in drop:
124 127 del m[f]
125 128 if p2.rev() != nullrev:
126 129 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
127 130 removed = [f for f in removed if not rf(f)]
128 131
129 132 touched.extend(removed)
130 133
131 134 if writechangesetcopy:
132 135 filesremoved = removed
133 136
134 137 files = touched
135 138 md = None
136 139 if not files:
137 140 # if no "files" actually changed in terms of the changelog,
138 141 # try hard to detect unmodified manifest entry so that the
139 142 # exact same commit can be reproduced later on convert.
140 143 md = m1.diff(m, scmutil.matchfiles(repo, ctx.files()))
141 144 if not files and md:
142 145 repo.ui.debug(
143 146 b'not reusing manifest (no file change in '
144 147 b'changelog, but manifest differs)\n'
145 148 )
146 149 if files or md:
147 150 repo.ui.note(_(b"committing manifest\n"))
148 151 # we're using narrowmatch here since it's already applied at
149 152 # other stages (such as dirstate.walk), so we're already
150 153 # ignoring things outside of narrowspec in most cases. The
151 154 # one case where we might have files outside the narrowspec
152 155 # at this point is merges, and we already error out in the
153 156 # case where the merge has files outside of the narrowspec,
154 157 # so this is safe.
155 158 mn = mctx.write(
156 159 trp,
157 160 linkrev,
158 161 p1.manifestnode(),
159 162 p2.manifestnode(),
160 163 added,
161 164 drop,
162 165 match=repo.narrowmatch(),
163 166 )
164 167 else:
165 168 repo.ui.debug(
166 169 b'reusing manifest from p1 (listed files '
167 170 b'actually unchanged)\n'
168 171 )
169 172 mn = p1.manifestnode()
170 173
171 174 if writecopiesto == b'changeset-only':
172 175 # If writing only to changeset extras, use None to indicate that
173 176 # no entry should be written. If writing to both, write an empty
174 177 # entry to prevent the reader from falling back to reading
175 178 # filelogs.
176 179 p1copies = p1copies or None
177 180 p2copies = p2copies or None
178 181 filesadded = filesadded or None
179 182 filesremoved = filesremoved or None
180 183
181 184 if origctx and origctx.manifestnode() == mn:
182 185 files = origctx.files()
183 186
184 187 # update changelog
185 188 repo.ui.note(_(b"committing changelog\n"))
186 189 repo.changelog.delayupdate(tr)
187 190 n = repo.changelog.add(
188 191 mn,
189 192 files,
190 193 ctx.description(),
191 194 trp,
192 195 p1.node(),
193 196 p2.node(),
194 197 user,
195 198 ctx.date(),
196 199 ctx.extra().copy(),
197 200 p1copies,
198 201 p2copies,
199 202 filesadded,
200 203 filesremoved,
201 204 )
202 205 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
203 206 repo.hook(
204 207 b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
205 208 )
206 209 # set the new commit is proper phase
207 210 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
208 211 if targetphase:
209 212 # retract boundary do not alter parent changeset.
210 213 # if a parent have higher the resulting phase will
211 214 # be compliant anyway
212 215 #
213 216 # if minimal phase was 0 we don't need to retract anything
214 217 phases.registernew(repo, tr, targetphase, [n])
215 218 return n
219
220
221 def _filecommit(
222 repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta,
223 ):
224 """
225 commit an individual file as part of a larger transaction
226
227 input:
228
229 fctx: a file context with the content we are trying to commit
230 manifest1: manifest of changeset first parent
231 manifest2: manifest of changeset second parent
232 linkrev: revision number of the changeset being created
233 tr: current transation
234 individual: boolean, set to False to skip storing the copy data
235 (only used by the Google specific feature of using
236 changeset extra as copy source of truth).
237
238 output: (filenode, touched)
239
240 filenode: the filenode that should be used by this changeset
241 touched: one of: None, 'added' or 'modified'
242 """
243
244 fname = fctx.path()
245 fparent1 = manifest1.get(fname, nullid)
246 fparent2 = manifest2.get(fname, nullid)
247 touched = None
248 if fparent1 == fparent2 == nullid:
249 touched = 'added'
250
251 if isinstance(fctx, context.filectx):
252 # This block fast path most comparisons which are usually done. It
253 # assumes that bare filectx is used and no merge happened, hence no
254 # need to create a new file revision in this case.
255 node = fctx.filenode()
256 if node in [fparent1, fparent2]:
257 repo.ui.debug(b'reusing %s filelog entry\n' % fname)
258 if (
259 fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
260 ) or (
261 fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
262 ):
263 touched = 'modified'
264 return node, touched
265
266 flog = repo.file(fname)
267 meta = {}
268 cfname = fctx.copysource()
269 fnode = None
270
271 if cfname and cfname != fname:
272 # Mark the new revision of this file as a copy of another
273 # file. This copy data will effectively act as a parent
274 # of this new revision. If this is a merge, the first
275 # parent will be the nullid (meaning "look up the copy data")
276 # and the second one will be the other parent. For example:
277 #
278 # 0 --- 1 --- 3 rev1 changes file foo
279 # \ / rev2 renames foo to bar and changes it
280 # \- 2 -/ rev3 should have bar with all changes and
281 # should record that bar descends from
282 # bar in rev2 and foo in rev1
283 #
284 # this allows this merge to succeed:
285 #
286 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
287 # \ / merging rev3 and rev4 should use bar@rev2
288 # \- 2 --- 4 as the merge base
289 #
290
291 cnode = manifest1.get(cfname)
292 newfparent = fparent2
293
294 if manifest2: # branch merge
295 if fparent2 == nullid or cnode is None: # copied on remote side
296 if cfname in manifest2:
297 cnode = manifest2[cfname]
298 newfparent = fparent1
299
300 # Here, we used to search backwards through history to try to find
301 # where the file copy came from if the source of a copy was not in
302 # the parent directory. However, this doesn't actually make sense to
303 # do (what does a copy from something not in your working copy even
304 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
305 # the user that copy information was dropped, so if they didn't
306 # expect this outcome it can be fixed, but this is the correct
307 # behavior in this circumstance.
308
309 if cnode:
310 repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
311 if includecopymeta:
312 meta[b"copy"] = cfname
313 meta[b"copyrev"] = hex(cnode)
314 fparent1, fparent2 = nullid, newfparent
315 else:
316 repo.ui.warn(
317 _(
318 b"warning: can't find ancestor for '%s' "
319 b"copied from '%s'!\n"
320 )
321 % (fname, cfname)
322 )
323
324 elif fparent1 == nullid:
325 fparent1, fparent2 = fparent2, nullid
326 elif fparent2 != nullid:
327 # is one parent an ancestor of the other?
328 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
329 if fparent1 in fparentancestors:
330 fparent1, fparent2 = fparent2, nullid
331 elif fparent2 in fparentancestors:
332 fparent2 = nullid
333 elif not fparentancestors:
334 # TODO: this whole if-else might be simplified much more
335 ms = mergestate.mergestate.read(repo)
336 if (
337 fname in ms
338 and ms[fname] == mergestate.MERGE_RECORD_MERGED_OTHER
339 ):
340 fparent1, fparent2 = fparent2, nullid
341
342 # is the file changed?
343 text = fctx.data()
344 if fparent2 != nullid or meta or flog.cmp(fparent1, text):
345 if touched is None: # do not overwrite added
346 touched = 'modified'
347 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
348 # are just the flags changed during merge?
349 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
350 touched = 'modified'
351 fnode = fparent1
352 else:
353 fnode = fparent1
354 return fnode, touched
@@ -1,3612 +1,3473
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 )
25 25 from .pycompat import (
26 26 delattr,
27 27 getattr,
28 28 )
29 29 from . import (
30 30 bookmarks,
31 31 branchmap,
32 32 bundle2,
33 33 changegroup,
34 34 color,
35 35 commit,
36 36 context,
37 37 dirstate,
38 38 dirstateguard,
39 39 discovery,
40 40 encoding,
41 41 error,
42 42 exchange,
43 43 extensions,
44 44 filelog,
45 45 hook,
46 46 lock as lockmod,
47 47 match as matchmod,
48 48 mergestate as mergestatemod,
49 49 mergeutil,
50 50 namespaces,
51 51 narrowspec,
52 52 obsolete,
53 53 pathutil,
54 54 phases,
55 55 pushkey,
56 56 pycompat,
57 57 rcutil,
58 58 repoview,
59 59 revset,
60 60 revsetlang,
61 61 scmutil,
62 62 sparse,
63 63 store as storemod,
64 64 subrepoutil,
65 65 tags as tagsmod,
66 66 transaction,
67 67 txnutil,
68 68 util,
69 69 vfs as vfsmod,
70 70 )
71 71
72 72 from .interfaces import (
73 73 repository,
74 74 util as interfaceutil,
75 75 )
76 76
77 77 from .utils import (
78 78 hashutil,
79 79 procutil,
80 80 stringutil,
81 81 )
82 82
83 83 from .revlogutils import constants as revlogconst
84 84
85 85 release = lockmod.release
86 86 urlerr = util.urlerr
87 87 urlreq = util.urlreq
88 88
89 89 # set of (path, vfs-location) tuples. vfs-location is:
90 90 # - 'plain for vfs relative paths
91 91 # - '' for svfs relative paths
92 92 _cachedfiles = set()
93 93
94 94
95 95 class _basefilecache(scmutil.filecache):
96 96 """All filecache usage on repo are done for logic that should be unfiltered
97 97 """
98 98
99 99 def __get__(self, repo, type=None):
100 100 if repo is None:
101 101 return self
102 102 # proxy to unfiltered __dict__ since filtered repo has no entry
103 103 unfi = repo.unfiltered()
104 104 try:
105 105 return unfi.__dict__[self.sname]
106 106 except KeyError:
107 107 pass
108 108 return super(_basefilecache, self).__get__(unfi, type)
109 109
110 110 def set(self, repo, value):
111 111 return super(_basefilecache, self).set(repo.unfiltered(), value)
112 112
113 113
114 114 class repofilecache(_basefilecache):
115 115 """filecache for files in .hg but outside of .hg/store"""
116 116
117 117 def __init__(self, *paths):
118 118 super(repofilecache, self).__init__(*paths)
119 119 for path in paths:
120 120 _cachedfiles.add((path, b'plain'))
121 121
122 122 def join(self, obj, fname):
123 123 return obj.vfs.join(fname)
124 124
125 125
126 126 class storecache(_basefilecache):
127 127 """filecache for files in the store"""
128 128
129 129 def __init__(self, *paths):
130 130 super(storecache, self).__init__(*paths)
131 131 for path in paths:
132 132 _cachedfiles.add((path, b''))
133 133
134 134 def join(self, obj, fname):
135 135 return obj.sjoin(fname)
136 136
137 137
138 138 class mixedrepostorecache(_basefilecache):
139 139 """filecache for a mix files in .hg/store and outside"""
140 140
141 141 def __init__(self, *pathsandlocations):
142 142 # scmutil.filecache only uses the path for passing back into our
143 143 # join(), so we can safely pass a list of paths and locations
144 144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
145 145 _cachedfiles.update(pathsandlocations)
146 146
147 147 def join(self, obj, fnameandlocation):
148 148 fname, location = fnameandlocation
149 149 if location == b'plain':
150 150 return obj.vfs.join(fname)
151 151 else:
152 152 if location != b'':
153 153 raise error.ProgrammingError(
154 154 b'unexpected location: %s' % location
155 155 )
156 156 return obj.sjoin(fname)
157 157
158 158
159 159 def isfilecached(repo, name):
160 160 """check if a repo has already cached "name" filecache-ed property
161 161
162 162 This returns (cachedobj-or-None, iscached) tuple.
163 163 """
164 164 cacheentry = repo.unfiltered()._filecache.get(name, None)
165 165 if not cacheentry:
166 166 return None, False
167 167 return cacheentry.obj, True
168 168
169 169
170 170 class unfilteredpropertycache(util.propertycache):
171 171 """propertycache that apply to unfiltered repo only"""
172 172
173 173 def __get__(self, repo, type=None):
174 174 unfi = repo.unfiltered()
175 175 if unfi is repo:
176 176 return super(unfilteredpropertycache, self).__get__(unfi)
177 177 return getattr(unfi, self.name)
178 178
179 179
180 180 class filteredpropertycache(util.propertycache):
181 181 """propertycache that must take filtering in account"""
182 182
183 183 def cachevalue(self, obj, value):
184 184 object.__setattr__(obj, self.name, value)
185 185
186 186
187 187 def hasunfilteredcache(repo, name):
188 188 """check if a repo has an unfilteredpropertycache value for <name>"""
189 189 return name in vars(repo.unfiltered())
190 190
191 191
192 192 def unfilteredmethod(orig):
193 193 """decorate method that always need to be run on unfiltered version"""
194 194
195 195 def wrapper(repo, *args, **kwargs):
196 196 return orig(repo.unfiltered(), *args, **kwargs)
197 197
198 198 return wrapper
199 199
200 200
201 201 moderncaps = {
202 202 b'lookup',
203 203 b'branchmap',
204 204 b'pushkey',
205 205 b'known',
206 206 b'getbundle',
207 207 b'unbundle',
208 208 }
209 209 legacycaps = moderncaps.union({b'changegroupsubset'})
210 210
211 211
212 212 @interfaceutil.implementer(repository.ipeercommandexecutor)
213 213 class localcommandexecutor(object):
214 214 def __init__(self, peer):
215 215 self._peer = peer
216 216 self._sent = False
217 217 self._closed = False
218 218
219 219 def __enter__(self):
220 220 return self
221 221
222 222 def __exit__(self, exctype, excvalue, exctb):
223 223 self.close()
224 224
225 225 def callcommand(self, command, args):
226 226 if self._sent:
227 227 raise error.ProgrammingError(
228 228 b'callcommand() cannot be used after sendcommands()'
229 229 )
230 230
231 231 if self._closed:
232 232 raise error.ProgrammingError(
233 233 b'callcommand() cannot be used after close()'
234 234 )
235 235
236 236 # We don't need to support anything fancy. Just call the named
237 237 # method on the peer and return a resolved future.
238 238 fn = getattr(self._peer, pycompat.sysstr(command))
239 239
240 240 f = pycompat.futures.Future()
241 241
242 242 try:
243 243 result = fn(**pycompat.strkwargs(args))
244 244 except Exception:
245 245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
246 246 else:
247 247 f.set_result(result)
248 248
249 249 return f
250 250
251 251 def sendcommands(self):
252 252 self._sent = True
253 253
254 254 def close(self):
255 255 self._closed = True
256 256
257 257
258 258 @interfaceutil.implementer(repository.ipeercommands)
259 259 class localpeer(repository.peer):
260 260 '''peer for a local repo; reflects only the most recent API'''
261 261
262 262 def __init__(self, repo, caps=None):
263 263 super(localpeer, self).__init__()
264 264
265 265 if caps is None:
266 266 caps = moderncaps.copy()
267 267 self._repo = repo.filtered(b'served')
268 268 self.ui = repo.ui
269 269 self._caps = repo._restrictcapabilities(caps)
270 270
271 271 # Begin of _basepeer interface.
272 272
273 273 def url(self):
274 274 return self._repo.url()
275 275
276 276 def local(self):
277 277 return self._repo
278 278
279 279 def peer(self):
280 280 return self
281 281
282 282 def canpush(self):
283 283 return True
284 284
285 285 def close(self):
286 286 self._repo.close()
287 287
288 288 # End of _basepeer interface.
289 289
290 290 # Begin of _basewirecommands interface.
291 291
292 292 def branchmap(self):
293 293 return self._repo.branchmap()
294 294
295 295 def capabilities(self):
296 296 return self._caps
297 297
298 298 def clonebundles(self):
299 299 return self._repo.tryread(b'clonebundles.manifest')
300 300
301 301 def debugwireargs(self, one, two, three=None, four=None, five=None):
302 302 """Used to test argument passing over the wire"""
303 303 return b"%s %s %s %s %s" % (
304 304 one,
305 305 two,
306 306 pycompat.bytestr(three),
307 307 pycompat.bytestr(four),
308 308 pycompat.bytestr(five),
309 309 )
310 310
311 311 def getbundle(
312 312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
313 313 ):
314 314 chunks = exchange.getbundlechunks(
315 315 self._repo,
316 316 source,
317 317 heads=heads,
318 318 common=common,
319 319 bundlecaps=bundlecaps,
320 320 **kwargs
321 321 )[1]
322 322 cb = util.chunkbuffer(chunks)
323 323
324 324 if exchange.bundle2requested(bundlecaps):
325 325 # When requesting a bundle2, getbundle returns a stream to make the
326 326 # wire level function happier. We need to build a proper object
327 327 # from it in local peer.
328 328 return bundle2.getunbundler(self.ui, cb)
329 329 else:
330 330 return changegroup.getunbundler(b'01', cb, None)
331 331
332 332 def heads(self):
333 333 return self._repo.heads()
334 334
335 335 def known(self, nodes):
336 336 return self._repo.known(nodes)
337 337
338 338 def listkeys(self, namespace):
339 339 return self._repo.listkeys(namespace)
340 340
341 341 def lookup(self, key):
342 342 return self._repo.lookup(key)
343 343
344 344 def pushkey(self, namespace, key, old, new):
345 345 return self._repo.pushkey(namespace, key, old, new)
346 346
347 347 def stream_out(self):
348 348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
349 349
350 350 def unbundle(self, bundle, heads, url):
351 351 """apply a bundle on a repo
352 352
353 353 This function handles the repo locking itself."""
354 354 try:
355 355 try:
356 356 bundle = exchange.readbundle(self.ui, bundle, None)
357 357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
358 358 if util.safehasattr(ret, b'getchunks'):
359 359 # This is a bundle20 object, turn it into an unbundler.
360 360 # This little dance should be dropped eventually when the
361 361 # API is finally improved.
362 362 stream = util.chunkbuffer(ret.getchunks())
363 363 ret = bundle2.getunbundler(self.ui, stream)
364 364 return ret
365 365 except Exception as exc:
366 366 # If the exception contains output salvaged from a bundle2
367 367 # reply, we need to make sure it is printed before continuing
368 368 # to fail. So we build a bundle2 with such output and consume
369 369 # it directly.
370 370 #
371 371 # This is not very elegant but allows a "simple" solution for
372 372 # issue4594
373 373 output = getattr(exc, '_bundle2salvagedoutput', ())
374 374 if output:
375 375 bundler = bundle2.bundle20(self._repo.ui)
376 376 for out in output:
377 377 bundler.addpart(out)
378 378 stream = util.chunkbuffer(bundler.getchunks())
379 379 b = bundle2.getunbundler(self.ui, stream)
380 380 bundle2.processbundle(self._repo, b)
381 381 raise
382 382 except error.PushRaced as exc:
383 383 raise error.ResponseError(
384 384 _(b'push failed:'), stringutil.forcebytestr(exc)
385 385 )
386 386
387 387 # End of _basewirecommands interface.
388 388
389 389 # Begin of peer interface.
390 390
391 391 def commandexecutor(self):
392 392 return localcommandexecutor(self)
393 393
394 394 # End of peer interface.
395 395
396 396
397 397 @interfaceutil.implementer(repository.ipeerlegacycommands)
398 398 class locallegacypeer(localpeer):
399 399 '''peer extension which implements legacy methods too; used for tests with
400 400 restricted capabilities'''
401 401
402 402 def __init__(self, repo):
403 403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
404 404
405 405 # Begin of baselegacywirecommands interface.
406 406
407 407 def between(self, pairs):
408 408 return self._repo.between(pairs)
409 409
410 410 def branches(self, nodes):
411 411 return self._repo.branches(nodes)
412 412
413 413 def changegroup(self, nodes, source):
414 414 outgoing = discovery.outgoing(
415 415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
416 416 )
417 417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
418 418
419 419 def changegroupsubset(self, bases, heads, source):
420 420 outgoing = discovery.outgoing(
421 421 self._repo, missingroots=bases, ancestorsof=heads
422 422 )
423 423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424 424
425 425 # End of baselegacywirecommands interface.
426 426
427 427
428 428 # Increment the sub-version when the revlog v2 format changes to lock out old
429 429 # clients.
430 430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
431 431
432 432 # A repository with the sparserevlog feature will have delta chains that
433 433 # can spread over a larger span. Sparse reading cuts these large spans into
434 434 # pieces, so that each piece isn't too big.
435 435 # Without the sparserevlog capability, reading from the repository could use
436 436 # huge amounts of memory, because the whole span would be read at once,
437 437 # including all the intermediate revisions that aren't pertinent for the chain.
438 438 # This is why once a repository has enabled sparse-read, it becomes required.
439 439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
440 440
441 441 # A repository with the sidedataflag requirement will allow to store extra
442 442 # information for revision without altering their original hashes.
443 443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
444 444
445 445 # A repository with the the copies-sidedata-changeset requirement will store
446 446 # copies related information in changeset's sidedata.
447 447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
448 448
449 449 # The repository use persistent nodemap for the changelog and the manifest.
450 450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
451 451
452 452 # Functions receiving (ui, features) that extensions can register to impact
453 453 # the ability to load repositories with custom requirements. Only
454 454 # functions defined in loaded extensions are called.
455 455 #
456 456 # The function receives a set of requirement strings that the repository
457 457 # is capable of opening. Functions will typically add elements to the
458 458 # set to reflect that the extension knows how to handle that requirements.
459 459 featuresetupfuncs = set()
460 460
461 461
462 462 def makelocalrepository(baseui, path, intents=None):
463 463 """Create a local repository object.
464 464
465 465 Given arguments needed to construct a local repository, this function
466 466 performs various early repository loading functionality (such as
467 467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
468 468 the repository can be opened, derives a type suitable for representing
469 469 that repository, and returns an instance of it.
470 470
471 471 The returned object conforms to the ``repository.completelocalrepository``
472 472 interface.
473 473
474 474 The repository type is derived by calling a series of factory functions
475 475 for each aspect/interface of the final repository. These are defined by
476 476 ``REPO_INTERFACES``.
477 477
478 478 Each factory function is called to produce a type implementing a specific
479 479 interface. The cumulative list of returned types will be combined into a
480 480 new type and that type will be instantiated to represent the local
481 481 repository.
482 482
483 483 The factory functions each receive various state that may be consulted
484 484 as part of deriving a type.
485 485
486 486 Extensions should wrap these factory functions to customize repository type
487 487 creation. Note that an extension's wrapped function may be called even if
488 488 that extension is not loaded for the repo being constructed. Extensions
489 489 should check if their ``__name__`` appears in the
490 490 ``extensionmodulenames`` set passed to the factory function and no-op if
491 491 not.
492 492 """
493 493 ui = baseui.copy()
494 494 # Prevent copying repo configuration.
495 495 ui.copy = baseui.copy
496 496
497 497 # Working directory VFS rooted at repository root.
498 498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
499 499
500 500 # Main VFS for .hg/ directory.
501 501 hgpath = wdirvfs.join(b'.hg')
502 502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
503 503
504 504 # The .hg/ path should exist and should be a directory. All other
505 505 # cases are errors.
506 506 if not hgvfs.isdir():
507 507 try:
508 508 hgvfs.stat()
509 509 except OSError as e:
510 510 if e.errno != errno.ENOENT:
511 511 raise
512 512 except ValueError as e:
513 513 # Can be raised on Python 3.8 when path is invalid.
514 514 raise error.Abort(
515 515 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
516 516 )
517 517
518 518 raise error.RepoError(_(b'repository %s not found') % path)
519 519
520 520 # .hg/requires file contains a newline-delimited list of
521 521 # features/capabilities the opener (us) must have in order to use
522 522 # the repository. This file was introduced in Mercurial 0.9.2,
523 523 # which means very old repositories may not have one. We assume
524 524 # a missing file translates to no requirements.
525 525 try:
526 526 requirements = set(hgvfs.read(b'requires').splitlines())
527 527 except IOError as e:
528 528 if e.errno != errno.ENOENT:
529 529 raise
530 530 requirements = set()
531 531
532 532 # The .hg/hgrc file may load extensions or contain config options
533 533 # that influence repository construction. Attempt to load it and
534 534 # process any new extensions that it may have pulled in.
535 535 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
536 536 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
537 537 extensions.loadall(ui)
538 538 extensions.populateui(ui)
539 539
540 540 # Set of module names of extensions loaded for this repository.
541 541 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
542 542
543 543 supportedrequirements = gathersupportedrequirements(ui)
544 544
545 545 # We first validate the requirements are known.
546 546 ensurerequirementsrecognized(requirements, supportedrequirements)
547 547
548 548 # Then we validate that the known set is reasonable to use together.
549 549 ensurerequirementscompatible(ui, requirements)
550 550
551 551 # TODO there are unhandled edge cases related to opening repositories with
552 552 # shared storage. If storage is shared, we should also test for requirements
553 553 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
554 554 # that repo, as that repo may load extensions needed to open it. This is a
555 555 # bit complicated because we don't want the other hgrc to overwrite settings
556 556 # in this hgrc.
557 557 #
558 558 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
559 559 # file when sharing repos. But if a requirement is added after the share is
560 560 # performed, thereby introducing a new requirement for the opener, we may
561 561 # will not see that and could encounter a run-time error interacting with
562 562 # that shared store since it has an unknown-to-us requirement.
563 563
564 564 # At this point, we know we should be capable of opening the repository.
565 565 # Now get on with doing that.
566 566
567 567 features = set()
568 568
569 569 # The "store" part of the repository holds versioned data. How it is
570 570 # accessed is determined by various requirements. The ``shared`` or
571 571 # ``relshared`` requirements indicate the store lives in the path contained
572 572 # in the ``.hg/sharedpath`` file. This is an absolute path for
573 573 # ``shared`` and relative to ``.hg/`` for ``relshared``.
574 574 if b'shared' in requirements or b'relshared' in requirements:
575 575 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
576 576 if b'relshared' in requirements:
577 577 sharedpath = hgvfs.join(sharedpath)
578 578
579 579 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
580 580
581 581 if not sharedvfs.exists():
582 582 raise error.RepoError(
583 583 _(b'.hg/sharedpath points to nonexistent directory %s')
584 584 % sharedvfs.base
585 585 )
586 586
587 587 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
588 588
589 589 storebasepath = sharedvfs.base
590 590 cachepath = sharedvfs.join(b'cache')
591 591 else:
592 592 storebasepath = hgvfs.base
593 593 cachepath = hgvfs.join(b'cache')
594 594 wcachepath = hgvfs.join(b'wcache')
595 595
596 596 # The store has changed over time and the exact layout is dictated by
597 597 # requirements. The store interface abstracts differences across all
598 598 # of them.
599 599 store = makestore(
600 600 requirements,
601 601 storebasepath,
602 602 lambda base: vfsmod.vfs(base, cacheaudited=True),
603 603 )
604 604 hgvfs.createmode = store.createmode
605 605
606 606 storevfs = store.vfs
607 607 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
608 608
609 609 # The cache vfs is used to manage cache files.
610 610 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
611 611 cachevfs.createmode = store.createmode
612 612 # The cache vfs is used to manage cache files related to the working copy
613 613 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
614 614 wcachevfs.createmode = store.createmode
615 615
616 616 # Now resolve the type for the repository object. We do this by repeatedly
617 617 # calling a factory function to produces types for specific aspects of the
618 618 # repo's operation. The aggregate returned types are used as base classes
619 619 # for a dynamically-derived type, which will represent our new repository.
620 620
621 621 bases = []
622 622 extrastate = {}
623 623
624 624 for iface, fn in REPO_INTERFACES:
625 625 # We pass all potentially useful state to give extensions tons of
626 626 # flexibility.
627 627 typ = fn()(
628 628 ui=ui,
629 629 intents=intents,
630 630 requirements=requirements,
631 631 features=features,
632 632 wdirvfs=wdirvfs,
633 633 hgvfs=hgvfs,
634 634 store=store,
635 635 storevfs=storevfs,
636 636 storeoptions=storevfs.options,
637 637 cachevfs=cachevfs,
638 638 wcachevfs=wcachevfs,
639 639 extensionmodulenames=extensionmodulenames,
640 640 extrastate=extrastate,
641 641 baseclasses=bases,
642 642 )
643 643
644 644 if not isinstance(typ, type):
645 645 raise error.ProgrammingError(
646 646 b'unable to construct type for %s' % iface
647 647 )
648 648
649 649 bases.append(typ)
650 650
651 651 # type() allows you to use characters in type names that wouldn't be
652 652 # recognized as Python symbols in source code. We abuse that to add
653 653 # rich information about our constructed repo.
654 654 name = pycompat.sysstr(
655 655 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
656 656 )
657 657
658 658 cls = type(name, tuple(bases), {})
659 659
660 660 return cls(
661 661 baseui=baseui,
662 662 ui=ui,
663 663 origroot=path,
664 664 wdirvfs=wdirvfs,
665 665 hgvfs=hgvfs,
666 666 requirements=requirements,
667 667 supportedrequirements=supportedrequirements,
668 668 sharedpath=storebasepath,
669 669 store=store,
670 670 cachevfs=cachevfs,
671 671 wcachevfs=wcachevfs,
672 672 features=features,
673 673 intents=intents,
674 674 )
675 675
676 676
677 677 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
678 678 """Load hgrc files/content into a ui instance.
679 679
680 680 This is called during repository opening to load any additional
681 681 config files or settings relevant to the current repository.
682 682
683 683 Returns a bool indicating whether any additional configs were loaded.
684 684
685 685 Extensions should monkeypatch this function to modify how per-repo
686 686 configs are loaded. For example, an extension may wish to pull in
687 687 configs from alternate files or sources.
688 688 """
689 689 if not rcutil.use_repo_hgrc():
690 690 return False
691 691 try:
692 692 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
693 693 return True
694 694 except IOError:
695 695 return False
696 696
697 697
698 698 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
699 699 """Perform additional actions after .hg/hgrc is loaded.
700 700
701 701 This function is called during repository loading immediately after
702 702 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
703 703
704 704 The function can be used to validate configs, automatically add
705 705 options (including extensions) based on requirements, etc.
706 706 """
707 707
708 708 # Map of requirements to list of extensions to load automatically when
709 709 # requirement is present.
710 710 autoextensions = {
711 711 b'git': [b'git'],
712 712 b'largefiles': [b'largefiles'],
713 713 b'lfs': [b'lfs'],
714 714 }
715 715
716 716 for requirement, names in sorted(autoextensions.items()):
717 717 if requirement not in requirements:
718 718 continue
719 719
720 720 for name in names:
721 721 if not ui.hasconfig(b'extensions', name):
722 722 ui.setconfig(b'extensions', name, b'', source=b'autoload')
723 723
724 724
725 725 def gathersupportedrequirements(ui):
726 726 """Determine the complete set of recognized requirements."""
727 727 # Start with all requirements supported by this file.
728 728 supported = set(localrepository._basesupported)
729 729
730 730 # Execute ``featuresetupfuncs`` entries if they belong to an extension
731 731 # relevant to this ui instance.
732 732 modules = {m.__name__ for n, m in extensions.extensions(ui)}
733 733
734 734 for fn in featuresetupfuncs:
735 735 if fn.__module__ in modules:
736 736 fn(ui, supported)
737 737
738 738 # Add derived requirements from registered compression engines.
739 739 for name in util.compengines:
740 740 engine = util.compengines[name]
741 741 if engine.available() and engine.revlogheader():
742 742 supported.add(b'exp-compression-%s' % name)
743 743 if engine.name() == b'zstd':
744 744 supported.add(b'revlog-compression-zstd')
745 745
746 746 return supported
747 747
748 748
749 749 def ensurerequirementsrecognized(requirements, supported):
750 750 """Validate that a set of local requirements is recognized.
751 751
752 752 Receives a set of requirements. Raises an ``error.RepoError`` if there
753 753 exists any requirement in that set that currently loaded code doesn't
754 754 recognize.
755 755
756 756 Returns a set of supported requirements.
757 757 """
758 758 missing = set()
759 759
760 760 for requirement in requirements:
761 761 if requirement in supported:
762 762 continue
763 763
764 764 if not requirement or not requirement[0:1].isalnum():
765 765 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
766 766
767 767 missing.add(requirement)
768 768
769 769 if missing:
770 770 raise error.RequirementError(
771 771 _(b'repository requires features unknown to this Mercurial: %s')
772 772 % b' '.join(sorted(missing)),
773 773 hint=_(
774 774 b'see https://mercurial-scm.org/wiki/MissingRequirement '
775 775 b'for more information'
776 776 ),
777 777 )
778 778
779 779
780 780 def ensurerequirementscompatible(ui, requirements):
781 781 """Validates that a set of recognized requirements is mutually compatible.
782 782
783 783 Some requirements may not be compatible with others or require
784 784 config options that aren't enabled. This function is called during
785 785 repository opening to ensure that the set of requirements needed
786 786 to open a repository is sane and compatible with config options.
787 787
788 788 Extensions can monkeypatch this function to perform additional
789 789 checking.
790 790
791 791 ``error.RepoError`` should be raised on failure.
792 792 """
793 793 if b'exp-sparse' in requirements and not sparse.enabled:
794 794 raise error.RepoError(
795 795 _(
796 796 b'repository is using sparse feature but '
797 797 b'sparse is not enabled; enable the '
798 798 b'"sparse" extensions to access'
799 799 )
800 800 )
801 801
802 802
803 803 def makestore(requirements, path, vfstype):
804 804 """Construct a storage object for a repository."""
805 805 if b'store' in requirements:
806 806 if b'fncache' in requirements:
807 807 return storemod.fncachestore(
808 808 path, vfstype, b'dotencode' in requirements
809 809 )
810 810
811 811 return storemod.encodedstore(path, vfstype)
812 812
813 813 return storemod.basicstore(path, vfstype)
814 814
815 815
816 816 def resolvestorevfsoptions(ui, requirements, features):
817 817 """Resolve the options to pass to the store vfs opener.
818 818
819 819 The returned dict is used to influence behavior of the storage layer.
820 820 """
821 821 options = {}
822 822
823 823 if b'treemanifest' in requirements:
824 824 options[b'treemanifest'] = True
825 825
826 826 # experimental config: format.manifestcachesize
827 827 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
828 828 if manifestcachesize is not None:
829 829 options[b'manifestcachesize'] = manifestcachesize
830 830
831 831 # In the absence of another requirement superseding a revlog-related
832 832 # requirement, we have to assume the repo is using revlog version 0.
833 833 # This revlog format is super old and we don't bother trying to parse
834 834 # opener options for it because those options wouldn't do anything
835 835 # meaningful on such old repos.
836 836 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
837 837 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
838 838 else: # explicitly mark repo as using revlogv0
839 839 options[b'revlogv0'] = True
840 840
841 841 if COPIESSDC_REQUIREMENT in requirements:
842 842 options[b'copies-storage'] = b'changeset-sidedata'
843 843 else:
844 844 writecopiesto = ui.config(b'experimental', b'copies.write-to')
845 845 copiesextramode = (b'changeset-only', b'compatibility')
846 846 if writecopiesto in copiesextramode:
847 847 options[b'copies-storage'] = b'extra'
848 848
849 849 return options
850 850
851 851
852 852 def resolverevlogstorevfsoptions(ui, requirements, features):
853 853 """Resolve opener options specific to revlogs."""
854 854
855 855 options = {}
856 856 options[b'flagprocessors'] = {}
857 857
858 858 if b'revlogv1' in requirements:
859 859 options[b'revlogv1'] = True
860 860 if REVLOGV2_REQUIREMENT in requirements:
861 861 options[b'revlogv2'] = True
862 862
863 863 if b'generaldelta' in requirements:
864 864 options[b'generaldelta'] = True
865 865
866 866 # experimental config: format.chunkcachesize
867 867 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
868 868 if chunkcachesize is not None:
869 869 options[b'chunkcachesize'] = chunkcachesize
870 870
871 871 deltabothparents = ui.configbool(
872 872 b'storage', b'revlog.optimize-delta-parent-choice'
873 873 )
874 874 options[b'deltabothparents'] = deltabothparents
875 875
876 876 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
877 877 lazydeltabase = False
878 878 if lazydelta:
879 879 lazydeltabase = ui.configbool(
880 880 b'storage', b'revlog.reuse-external-delta-parent'
881 881 )
882 882 if lazydeltabase is None:
883 883 lazydeltabase = not scmutil.gddeltaconfig(ui)
884 884 options[b'lazydelta'] = lazydelta
885 885 options[b'lazydeltabase'] = lazydeltabase
886 886
887 887 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
888 888 if 0 <= chainspan:
889 889 options[b'maxdeltachainspan'] = chainspan
890 890
891 891 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
892 892 if mmapindexthreshold is not None:
893 893 options[b'mmapindexthreshold'] = mmapindexthreshold
894 894
895 895 withsparseread = ui.configbool(b'experimental', b'sparse-read')
896 896 srdensitythres = float(
897 897 ui.config(b'experimental', b'sparse-read.density-threshold')
898 898 )
899 899 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
900 900 options[b'with-sparse-read'] = withsparseread
901 901 options[b'sparse-read-density-threshold'] = srdensitythres
902 902 options[b'sparse-read-min-gap-size'] = srmingapsize
903 903
904 904 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
905 905 options[b'sparse-revlog'] = sparserevlog
906 906 if sparserevlog:
907 907 options[b'generaldelta'] = True
908 908
909 909 sidedata = SIDEDATA_REQUIREMENT in requirements
910 910 options[b'side-data'] = sidedata
911 911
912 912 maxchainlen = None
913 913 if sparserevlog:
914 914 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
915 915 # experimental config: format.maxchainlen
916 916 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
917 917 if maxchainlen is not None:
918 918 options[b'maxchainlen'] = maxchainlen
919 919
920 920 for r in requirements:
921 921 # we allow multiple compression engine requirement to co-exist because
922 922 # strickly speaking, revlog seems to support mixed compression style.
923 923 #
924 924 # The compression used for new entries will be "the last one"
925 925 prefix = r.startswith
926 926 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
927 927 options[b'compengine'] = r.split(b'-', 2)[2]
928 928
929 929 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
930 930 if options[b'zlib.level'] is not None:
931 931 if not (0 <= options[b'zlib.level'] <= 9):
932 932 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
933 933 raise error.Abort(msg % options[b'zlib.level'])
934 934 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
935 935 if options[b'zstd.level'] is not None:
936 936 if not (0 <= options[b'zstd.level'] <= 22):
937 937 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
938 938 raise error.Abort(msg % options[b'zstd.level'])
939 939
940 940 if repository.NARROW_REQUIREMENT in requirements:
941 941 options[b'enableellipsis'] = True
942 942
943 943 if ui.configbool(b'experimental', b'rust.index'):
944 944 options[b'rust.index'] = True
945 945 if NODEMAP_REQUIREMENT in requirements:
946 946 options[b'persistent-nodemap'] = True
947 947 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
948 948 options[b'persistent-nodemap.mmap'] = True
949 949 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
950 950 options[b'persistent-nodemap.mode'] = epnm
951 951 if ui.configbool(b'devel', b'persistent-nodemap'):
952 952 options[b'devel-force-nodemap'] = True
953 953
954 954 return options
955 955
956 956
957 957 def makemain(**kwargs):
958 958 """Produce a type conforming to ``ilocalrepositorymain``."""
959 959 return localrepository
960 960
961 961
962 962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
963 963 class revlogfilestorage(object):
964 964 """File storage when using revlogs."""
965 965
966 966 def file(self, path):
967 967 if path[0] == b'/':
968 968 path = path[1:]
969 969
970 970 return filelog.filelog(self.svfs, path)
971 971
972 972
973 973 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
974 974 class revlognarrowfilestorage(object):
975 975 """File storage when using revlogs and narrow files."""
976 976
977 977 def file(self, path):
978 978 if path[0] == b'/':
979 979 path = path[1:]
980 980
981 981 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
982 982
983 983
984 984 def makefilestorage(requirements, features, **kwargs):
985 985 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
986 986 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
987 987 features.add(repository.REPO_FEATURE_STREAM_CLONE)
988 988
989 989 if repository.NARROW_REQUIREMENT in requirements:
990 990 return revlognarrowfilestorage
991 991 else:
992 992 return revlogfilestorage
993 993
994 994
995 995 # List of repository interfaces and factory functions for them. Each
996 996 # will be called in order during ``makelocalrepository()`` to iteratively
997 997 # derive the final type for a local repository instance. We capture the
998 998 # function as a lambda so we don't hold a reference and the module-level
999 999 # functions can be wrapped.
1000 1000 REPO_INTERFACES = [
1001 1001 (repository.ilocalrepositorymain, lambda: makemain),
1002 1002 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1003 1003 ]
1004 1004
1005 1005
1006 1006 @interfaceutil.implementer(repository.ilocalrepositorymain)
1007 1007 class localrepository(object):
1008 1008 """Main class for representing local repositories.
1009 1009
1010 1010 All local repositories are instances of this class.
1011 1011
1012 1012 Constructed on its own, instances of this class are not usable as
1013 1013 repository objects. To obtain a usable repository object, call
1014 1014 ``hg.repository()``, ``localrepo.instance()``, or
1015 1015 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1016 1016 ``instance()`` adds support for creating new repositories.
1017 1017 ``hg.repository()`` adds more extension integration, including calling
1018 1018 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1019 1019 used.
1020 1020 """
1021 1021
1022 1022 # obsolete experimental requirements:
1023 1023 # - manifestv2: An experimental new manifest format that allowed
1024 1024 # for stem compression of long paths. Experiment ended up not
1025 1025 # being successful (repository sizes went up due to worse delta
1026 1026 # chains), and the code was deleted in 4.6.
1027 1027 supportedformats = {
1028 1028 b'revlogv1',
1029 1029 b'generaldelta',
1030 1030 b'treemanifest',
1031 1031 COPIESSDC_REQUIREMENT,
1032 1032 REVLOGV2_REQUIREMENT,
1033 1033 SIDEDATA_REQUIREMENT,
1034 1034 SPARSEREVLOG_REQUIREMENT,
1035 1035 NODEMAP_REQUIREMENT,
1036 1036 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1037 1037 }
1038 1038 _basesupported = supportedformats | {
1039 1039 b'store',
1040 1040 b'fncache',
1041 1041 b'shared',
1042 1042 b'relshared',
1043 1043 b'dotencode',
1044 1044 b'exp-sparse',
1045 1045 b'internal-phase',
1046 1046 }
1047 1047
1048 1048 # list of prefix for file which can be written without 'wlock'
1049 1049 # Extensions should extend this list when needed
1050 1050 _wlockfreeprefix = {
1051 1051 # We migh consider requiring 'wlock' for the next
1052 1052 # two, but pretty much all the existing code assume
1053 1053 # wlock is not needed so we keep them excluded for
1054 1054 # now.
1055 1055 b'hgrc',
1056 1056 b'requires',
1057 1057 # XXX cache is a complicatged business someone
1058 1058 # should investigate this in depth at some point
1059 1059 b'cache/',
1060 1060 # XXX shouldn't be dirstate covered by the wlock?
1061 1061 b'dirstate',
1062 1062 # XXX bisect was still a bit too messy at the time
1063 1063 # this changeset was introduced. Someone should fix
1064 1064 # the remainig bit and drop this line
1065 1065 b'bisect.state',
1066 1066 }
1067 1067
1068 1068 def __init__(
1069 1069 self,
1070 1070 baseui,
1071 1071 ui,
1072 1072 origroot,
1073 1073 wdirvfs,
1074 1074 hgvfs,
1075 1075 requirements,
1076 1076 supportedrequirements,
1077 1077 sharedpath,
1078 1078 store,
1079 1079 cachevfs,
1080 1080 wcachevfs,
1081 1081 features,
1082 1082 intents=None,
1083 1083 ):
1084 1084 """Create a new local repository instance.
1085 1085
1086 1086 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1087 1087 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1088 1088 object.
1089 1089
1090 1090 Arguments:
1091 1091
1092 1092 baseui
1093 1093 ``ui.ui`` instance that ``ui`` argument was based off of.
1094 1094
1095 1095 ui
1096 1096 ``ui.ui`` instance for use by the repository.
1097 1097
1098 1098 origroot
1099 1099 ``bytes`` path to working directory root of this repository.
1100 1100
1101 1101 wdirvfs
1102 1102 ``vfs.vfs`` rooted at the working directory.
1103 1103
1104 1104 hgvfs
1105 1105 ``vfs.vfs`` rooted at .hg/
1106 1106
1107 1107 requirements
1108 1108 ``set`` of bytestrings representing repository opening requirements.
1109 1109
1110 1110 supportedrequirements
1111 1111 ``set`` of bytestrings representing repository requirements that we
1112 1112 know how to open. May be a supetset of ``requirements``.
1113 1113
1114 1114 sharedpath
1115 1115 ``bytes`` Defining path to storage base directory. Points to a
1116 1116 ``.hg/`` directory somewhere.
1117 1117
1118 1118 store
1119 1119 ``store.basicstore`` (or derived) instance providing access to
1120 1120 versioned storage.
1121 1121
1122 1122 cachevfs
1123 1123 ``vfs.vfs`` used for cache files.
1124 1124
1125 1125 wcachevfs
1126 1126 ``vfs.vfs`` used for cache files related to the working copy.
1127 1127
1128 1128 features
1129 1129 ``set`` of bytestrings defining features/capabilities of this
1130 1130 instance.
1131 1131
1132 1132 intents
1133 1133 ``set`` of system strings indicating what this repo will be used
1134 1134 for.
1135 1135 """
1136 1136 self.baseui = baseui
1137 1137 self.ui = ui
1138 1138 self.origroot = origroot
1139 1139 # vfs rooted at working directory.
1140 1140 self.wvfs = wdirvfs
1141 1141 self.root = wdirvfs.base
1142 1142 # vfs rooted at .hg/. Used to access most non-store paths.
1143 1143 self.vfs = hgvfs
1144 1144 self.path = hgvfs.base
1145 1145 self.requirements = requirements
1146 1146 self.supported = supportedrequirements
1147 1147 self.sharedpath = sharedpath
1148 1148 self.store = store
1149 1149 self.cachevfs = cachevfs
1150 1150 self.wcachevfs = wcachevfs
1151 1151 self.features = features
1152 1152
1153 1153 self.filtername = None
1154 1154
1155 1155 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1156 1156 b'devel', b'check-locks'
1157 1157 ):
1158 1158 self.vfs.audit = self._getvfsward(self.vfs.audit)
1159 1159 # A list of callback to shape the phase if no data were found.
1160 1160 # Callback are in the form: func(repo, roots) --> processed root.
1161 1161 # This list it to be filled by extension during repo setup
1162 1162 self._phasedefaults = []
1163 1163
1164 1164 color.setup(self.ui)
1165 1165
1166 1166 self.spath = self.store.path
1167 1167 self.svfs = self.store.vfs
1168 1168 self.sjoin = self.store.join
1169 1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1170 1170 b'devel', b'check-locks'
1171 1171 ):
1172 1172 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1173 1173 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1174 1174 else: # standard vfs
1175 1175 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1176 1176
1177 1177 self._dirstatevalidatewarned = False
1178 1178
1179 1179 self._branchcaches = branchmap.BranchMapCache()
1180 1180 self._revbranchcache = None
1181 1181 self._filterpats = {}
1182 1182 self._datafilters = {}
1183 1183 self._transref = self._lockref = self._wlockref = None
1184 1184
1185 1185 # A cache for various files under .hg/ that tracks file changes,
1186 1186 # (used by the filecache decorator)
1187 1187 #
1188 1188 # Maps a property name to its util.filecacheentry
1189 1189 self._filecache = {}
1190 1190
1191 1191 # hold sets of revision to be filtered
1192 1192 # should be cleared when something might have changed the filter value:
1193 1193 # - new changesets,
1194 1194 # - phase change,
1195 1195 # - new obsolescence marker,
1196 1196 # - working directory parent change,
1197 1197 # - bookmark changes
1198 1198 self.filteredrevcache = {}
1199 1199
1200 1200 # post-dirstate-status hooks
1201 1201 self._postdsstatus = []
1202 1202
1203 1203 # generic mapping between names and nodes
1204 1204 self.names = namespaces.namespaces()
1205 1205
1206 1206 # Key to signature value.
1207 1207 self._sparsesignaturecache = {}
1208 1208 # Signature to cached matcher instance.
1209 1209 self._sparsematchercache = {}
1210 1210
1211 1211 self._extrafilterid = repoview.extrafilter(ui)
1212 1212
1213 1213 self.filecopiesmode = None
1214 1214 if COPIESSDC_REQUIREMENT in self.requirements:
1215 1215 self.filecopiesmode = b'changeset-sidedata'
1216 1216
1217 1217 def _getvfsward(self, origfunc):
1218 1218 """build a ward for self.vfs"""
1219 1219 rref = weakref.ref(self)
1220 1220
1221 1221 def checkvfs(path, mode=None):
1222 1222 ret = origfunc(path, mode=mode)
1223 1223 repo = rref()
1224 1224 if (
1225 1225 repo is None
1226 1226 or not util.safehasattr(repo, b'_wlockref')
1227 1227 or not util.safehasattr(repo, b'_lockref')
1228 1228 ):
1229 1229 return
1230 1230 if mode in (None, b'r', b'rb'):
1231 1231 return
1232 1232 if path.startswith(repo.path):
1233 1233 # truncate name relative to the repository (.hg)
1234 1234 path = path[len(repo.path) + 1 :]
1235 1235 if path.startswith(b'cache/'):
1236 1236 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1237 1237 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1238 1238 # path prefixes covered by 'lock'
1239 1239 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1240 1240 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1241 1241 if repo._currentlock(repo._lockref) is None:
1242 1242 repo.ui.develwarn(
1243 1243 b'write with no lock: "%s"' % path,
1244 1244 stacklevel=3,
1245 1245 config=b'check-locks',
1246 1246 )
1247 1247 elif repo._currentlock(repo._wlockref) is None:
1248 1248 # rest of vfs files are covered by 'wlock'
1249 1249 #
1250 1250 # exclude special files
1251 1251 for prefix in self._wlockfreeprefix:
1252 1252 if path.startswith(prefix):
1253 1253 return
1254 1254 repo.ui.develwarn(
1255 1255 b'write with no wlock: "%s"' % path,
1256 1256 stacklevel=3,
1257 1257 config=b'check-locks',
1258 1258 )
1259 1259 return ret
1260 1260
1261 1261 return checkvfs
1262 1262
1263 1263 def _getsvfsward(self, origfunc):
1264 1264 """build a ward for self.svfs"""
1265 1265 rref = weakref.ref(self)
1266 1266
1267 1267 def checksvfs(path, mode=None):
1268 1268 ret = origfunc(path, mode=mode)
1269 1269 repo = rref()
1270 1270 if repo is None or not util.safehasattr(repo, b'_lockref'):
1271 1271 return
1272 1272 if mode in (None, b'r', b'rb'):
1273 1273 return
1274 1274 if path.startswith(repo.sharedpath):
1275 1275 # truncate name relative to the repository (.hg)
1276 1276 path = path[len(repo.sharedpath) + 1 :]
1277 1277 if repo._currentlock(repo._lockref) is None:
1278 1278 repo.ui.develwarn(
1279 1279 b'write with no lock: "%s"' % path, stacklevel=4
1280 1280 )
1281 1281 return ret
1282 1282
1283 1283 return checksvfs
1284 1284
1285 1285 def close(self):
1286 1286 self._writecaches()
1287 1287
1288 1288 def _writecaches(self):
1289 1289 if self._revbranchcache:
1290 1290 self._revbranchcache.write()
1291 1291
1292 1292 def _restrictcapabilities(self, caps):
1293 1293 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1294 1294 caps = set(caps)
1295 1295 capsblob = bundle2.encodecaps(
1296 1296 bundle2.getrepocaps(self, role=b'client')
1297 1297 )
1298 1298 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1299 1299 return caps
1300 1300
1301 1301 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1302 1302 # self -> auditor -> self._checknested -> self
1303 1303
1304 1304 @property
1305 1305 def auditor(self):
1306 1306 # This is only used by context.workingctx.match in order to
1307 1307 # detect files in subrepos.
1308 1308 return pathutil.pathauditor(self.root, callback=self._checknested)
1309 1309
1310 1310 @property
1311 1311 def nofsauditor(self):
1312 1312 # This is only used by context.basectx.match in order to detect
1313 1313 # files in subrepos.
1314 1314 return pathutil.pathauditor(
1315 1315 self.root, callback=self._checknested, realfs=False, cached=True
1316 1316 )
1317 1317
1318 1318 def _checknested(self, path):
1319 1319 """Determine if path is a legal nested repository."""
1320 1320 if not path.startswith(self.root):
1321 1321 return False
1322 1322 subpath = path[len(self.root) + 1 :]
1323 1323 normsubpath = util.pconvert(subpath)
1324 1324
1325 1325 # XXX: Checking against the current working copy is wrong in
1326 1326 # the sense that it can reject things like
1327 1327 #
1328 1328 # $ hg cat -r 10 sub/x.txt
1329 1329 #
1330 1330 # if sub/ is no longer a subrepository in the working copy
1331 1331 # parent revision.
1332 1332 #
1333 1333 # However, it can of course also allow things that would have
1334 1334 # been rejected before, such as the above cat command if sub/
1335 1335 # is a subrepository now, but was a normal directory before.
1336 1336 # The old path auditor would have rejected by mistake since it
1337 1337 # panics when it sees sub/.hg/.
1338 1338 #
1339 1339 # All in all, checking against the working copy seems sensible
1340 1340 # since we want to prevent access to nested repositories on
1341 1341 # the filesystem *now*.
1342 1342 ctx = self[None]
1343 1343 parts = util.splitpath(subpath)
1344 1344 while parts:
1345 1345 prefix = b'/'.join(parts)
1346 1346 if prefix in ctx.substate:
1347 1347 if prefix == normsubpath:
1348 1348 return True
1349 1349 else:
1350 1350 sub = ctx.sub(prefix)
1351 1351 return sub.checknested(subpath[len(prefix) + 1 :])
1352 1352 else:
1353 1353 parts.pop()
1354 1354 return False
1355 1355
1356 1356 def peer(self):
1357 1357 return localpeer(self) # not cached to avoid reference cycle
1358 1358
1359 1359 def unfiltered(self):
1360 1360 """Return unfiltered version of the repository
1361 1361
1362 1362 Intended to be overwritten by filtered repo."""
1363 1363 return self
1364 1364
1365 1365 def filtered(self, name, visibilityexceptions=None):
1366 1366 """Return a filtered version of a repository
1367 1367
1368 1368 The `name` parameter is the identifier of the requested view. This
1369 1369 will return a repoview object set "exactly" to the specified view.
1370 1370
1371 1371 This function does not apply recursive filtering to a repository. For
1372 1372 example calling `repo.filtered("served")` will return a repoview using
1373 1373 the "served" view, regardless of the initial view used by `repo`.
1374 1374
1375 1375 In other word, there is always only one level of `repoview` "filtering".
1376 1376 """
1377 1377 if self._extrafilterid is not None and b'%' not in name:
1378 1378 name = name + b'%' + self._extrafilterid
1379 1379
1380 1380 cls = repoview.newtype(self.unfiltered().__class__)
1381 1381 return cls(self, name, visibilityexceptions)
1382 1382
1383 1383 @mixedrepostorecache(
1384 1384 (b'bookmarks', b'plain'),
1385 1385 (b'bookmarks.current', b'plain'),
1386 1386 (b'bookmarks', b''),
1387 1387 (b'00changelog.i', b''),
1388 1388 )
1389 1389 def _bookmarks(self):
1390 1390 # Since the multiple files involved in the transaction cannot be
1391 1391 # written atomically (with current repository format), there is a race
1392 1392 # condition here.
1393 1393 #
1394 1394 # 1) changelog content A is read
1395 1395 # 2) outside transaction update changelog to content B
1396 1396 # 3) outside transaction update bookmark file referring to content B
1397 1397 # 4) bookmarks file content is read and filtered against changelog-A
1398 1398 #
1399 1399 # When this happens, bookmarks against nodes missing from A are dropped.
1400 1400 #
1401 1401 # Having this happening during read is not great, but it become worse
1402 1402 # when this happen during write because the bookmarks to the "unknown"
1403 1403 # nodes will be dropped for good. However, writes happen within locks.
1404 1404 # This locking makes it possible to have a race free consistent read.
1405 1405 # For this purpose data read from disc before locking are
1406 1406 # "invalidated" right after the locks are taken. This invalidations are
1407 1407 # "light", the `filecache` mechanism keep the data in memory and will
1408 1408 # reuse them if the underlying files did not changed. Not parsing the
1409 1409 # same data multiple times helps performances.
1410 1410 #
1411 1411 # Unfortunately in the case describe above, the files tracked by the
1412 1412 # bookmarks file cache might not have changed, but the in-memory
1413 1413 # content is still "wrong" because we used an older changelog content
1414 1414 # to process the on-disk data. So after locking, the changelog would be
1415 1415 # refreshed but `_bookmarks` would be preserved.
1416 1416 # Adding `00changelog.i` to the list of tracked file is not
1417 1417 # enough, because at the time we build the content for `_bookmarks` in
1418 1418 # (4), the changelog file has already diverged from the content used
1419 1419 # for loading `changelog` in (1)
1420 1420 #
1421 1421 # To prevent the issue, we force the changelog to be explicitly
1422 1422 # reloaded while computing `_bookmarks`. The data race can still happen
1423 1423 # without the lock (with a narrower window), but it would no longer go
1424 1424 # undetected during the lock time refresh.
1425 1425 #
1426 1426 # The new schedule is as follow
1427 1427 #
1428 1428 # 1) filecache logic detect that `_bookmarks` needs to be computed
1429 1429 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1430 1430 # 3) We force `changelog` filecache to be tested
1431 1431 # 4) cachestat for `changelog` are captured (for changelog)
1432 1432 # 5) `_bookmarks` is computed and cached
1433 1433 #
1434 1434 # The step in (3) ensure we have a changelog at least as recent as the
1435 1435 # cache stat computed in (1). As a result at locking time:
1436 1436 # * if the changelog did not changed since (1) -> we can reuse the data
1437 1437 # * otherwise -> the bookmarks get refreshed.
1438 1438 self._refreshchangelog()
1439 1439 return bookmarks.bmstore(self)
1440 1440
1441 1441 def _refreshchangelog(self):
1442 1442 """make sure the in memory changelog match the on-disk one"""
1443 1443 if 'changelog' in vars(self) and self.currenttransaction() is None:
1444 1444 del self.changelog
1445 1445
1446 1446 @property
1447 1447 def _activebookmark(self):
1448 1448 return self._bookmarks.active
1449 1449
1450 1450 # _phasesets depend on changelog. what we need is to call
1451 1451 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1452 1452 # can't be easily expressed in filecache mechanism.
1453 1453 @storecache(b'phaseroots', b'00changelog.i')
1454 1454 def _phasecache(self):
1455 1455 return phases.phasecache(self, self._phasedefaults)
1456 1456
1457 1457 @storecache(b'obsstore')
1458 1458 def obsstore(self):
1459 1459 return obsolete.makestore(self.ui, self)
1460 1460
1461 1461 @storecache(b'00changelog.i')
1462 1462 def changelog(self):
1463 1463 # load dirstate before changelog to avoid race see issue6303
1464 1464 self.dirstate.prefetch_parents()
1465 1465 return self.store.changelog(txnutil.mayhavepending(self.root))
1466 1466
1467 1467 @storecache(b'00manifest.i')
1468 1468 def manifestlog(self):
1469 1469 return self.store.manifestlog(self, self._storenarrowmatch)
1470 1470
1471 1471 @repofilecache(b'dirstate')
1472 1472 def dirstate(self):
1473 1473 return self._makedirstate()
1474 1474
1475 1475 def _makedirstate(self):
1476 1476 """Extension point for wrapping the dirstate per-repo."""
1477 1477 sparsematchfn = lambda: sparse.matcher(self)
1478 1478
1479 1479 return dirstate.dirstate(
1480 1480 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1481 1481 )
1482 1482
1483 1483 def _dirstatevalidate(self, node):
1484 1484 try:
1485 1485 self.changelog.rev(node)
1486 1486 return node
1487 1487 except error.LookupError:
1488 1488 if not self._dirstatevalidatewarned:
1489 1489 self._dirstatevalidatewarned = True
1490 1490 self.ui.warn(
1491 1491 _(b"warning: ignoring unknown working parent %s!\n")
1492 1492 % short(node)
1493 1493 )
1494 1494 return nullid
1495 1495
1496 1496 @storecache(narrowspec.FILENAME)
1497 1497 def narrowpats(self):
1498 1498 """matcher patterns for this repository's narrowspec
1499 1499
1500 1500 A tuple of (includes, excludes).
1501 1501 """
1502 1502 return narrowspec.load(self)
1503 1503
1504 1504 @storecache(narrowspec.FILENAME)
1505 1505 def _storenarrowmatch(self):
1506 1506 if repository.NARROW_REQUIREMENT not in self.requirements:
1507 1507 return matchmod.always()
1508 1508 include, exclude = self.narrowpats
1509 1509 return narrowspec.match(self.root, include=include, exclude=exclude)
1510 1510
1511 1511 @storecache(narrowspec.FILENAME)
1512 1512 def _narrowmatch(self):
1513 1513 if repository.NARROW_REQUIREMENT not in self.requirements:
1514 1514 return matchmod.always()
1515 1515 narrowspec.checkworkingcopynarrowspec(self)
1516 1516 include, exclude = self.narrowpats
1517 1517 return narrowspec.match(self.root, include=include, exclude=exclude)
1518 1518
1519 1519 def narrowmatch(self, match=None, includeexact=False):
1520 1520 """matcher corresponding the the repo's narrowspec
1521 1521
1522 1522 If `match` is given, then that will be intersected with the narrow
1523 1523 matcher.
1524 1524
1525 1525 If `includeexact` is True, then any exact matches from `match` will
1526 1526 be included even if they're outside the narrowspec.
1527 1527 """
1528 1528 if match:
1529 1529 if includeexact and not self._narrowmatch.always():
1530 1530 # do not exclude explicitly-specified paths so that they can
1531 1531 # be warned later on
1532 1532 em = matchmod.exact(match.files())
1533 1533 nm = matchmod.unionmatcher([self._narrowmatch, em])
1534 1534 return matchmod.intersectmatchers(match, nm)
1535 1535 return matchmod.intersectmatchers(match, self._narrowmatch)
1536 1536 return self._narrowmatch
1537 1537
1538 1538 def setnarrowpats(self, newincludes, newexcludes):
1539 1539 narrowspec.save(self, newincludes, newexcludes)
1540 1540 self.invalidate(clearfilecache=True)
1541 1541
1542 1542 @unfilteredpropertycache
1543 1543 def _quick_access_changeid_null(self):
1544 1544 return {
1545 1545 b'null': (nullrev, nullid),
1546 1546 nullrev: (nullrev, nullid),
1547 1547 nullid: (nullrev, nullid),
1548 1548 }
1549 1549
1550 1550 @unfilteredpropertycache
1551 1551 def _quick_access_changeid_wc(self):
1552 1552 # also fast path access to the working copy parents
1553 1553 # however, only do it for filter that ensure wc is visible.
1554 1554 quick = {}
1555 1555 cl = self.unfiltered().changelog
1556 1556 for node in self.dirstate.parents():
1557 1557 if node == nullid:
1558 1558 continue
1559 1559 rev = cl.index.get_rev(node)
1560 1560 if rev is None:
1561 1561 # unknown working copy parent case:
1562 1562 #
1563 1563 # skip the fast path and let higher code deal with it
1564 1564 continue
1565 1565 pair = (rev, node)
1566 1566 quick[rev] = pair
1567 1567 quick[node] = pair
1568 1568 # also add the parents of the parents
1569 1569 for r in cl.parentrevs(rev):
1570 1570 if r == nullrev:
1571 1571 continue
1572 1572 n = cl.node(r)
1573 1573 pair = (r, n)
1574 1574 quick[r] = pair
1575 1575 quick[n] = pair
1576 1576 p1node = self.dirstate.p1()
1577 1577 if p1node != nullid:
1578 1578 quick[b'.'] = quick[p1node]
1579 1579 return quick
1580 1580
1581 1581 @unfilteredmethod
1582 1582 def _quick_access_changeid_invalidate(self):
1583 1583 if '_quick_access_changeid_wc' in vars(self):
1584 1584 del self.__dict__['_quick_access_changeid_wc']
1585 1585
1586 1586 @property
1587 1587 def _quick_access_changeid(self):
1588 1588 """an helper dictionnary for __getitem__ calls
1589 1589
1590 1590 This contains a list of symbol we can recognise right away without
1591 1591 further processing.
1592 1592 """
1593 1593 mapping = self._quick_access_changeid_null
1594 1594 if self.filtername in repoview.filter_has_wc:
1595 1595 mapping = mapping.copy()
1596 1596 mapping.update(self._quick_access_changeid_wc)
1597 1597 return mapping
1598 1598
1599 1599 def __getitem__(self, changeid):
1600 1600 # dealing with special cases
1601 1601 if changeid is None:
1602 1602 return context.workingctx(self)
1603 1603 if isinstance(changeid, context.basectx):
1604 1604 return changeid
1605 1605
1606 1606 # dealing with multiple revisions
1607 1607 if isinstance(changeid, slice):
1608 1608 # wdirrev isn't contiguous so the slice shouldn't include it
1609 1609 return [
1610 1610 self[i]
1611 1611 for i in pycompat.xrange(*changeid.indices(len(self)))
1612 1612 if i not in self.changelog.filteredrevs
1613 1613 ]
1614 1614
1615 1615 # dealing with some special values
1616 1616 quick_access = self._quick_access_changeid.get(changeid)
1617 1617 if quick_access is not None:
1618 1618 rev, node = quick_access
1619 1619 return context.changectx(self, rev, node, maybe_filtered=False)
1620 1620 if changeid == b'tip':
1621 1621 node = self.changelog.tip()
1622 1622 rev = self.changelog.rev(node)
1623 1623 return context.changectx(self, rev, node)
1624 1624
1625 1625 # dealing with arbitrary values
1626 1626 try:
1627 1627 if isinstance(changeid, int):
1628 1628 node = self.changelog.node(changeid)
1629 1629 rev = changeid
1630 1630 elif changeid == b'.':
1631 1631 # this is a hack to delay/avoid loading obsmarkers
1632 1632 # when we know that '.' won't be hidden
1633 1633 node = self.dirstate.p1()
1634 1634 rev = self.unfiltered().changelog.rev(node)
1635 1635 elif len(changeid) == 20:
1636 1636 try:
1637 1637 node = changeid
1638 1638 rev = self.changelog.rev(changeid)
1639 1639 except error.FilteredLookupError:
1640 1640 changeid = hex(changeid) # for the error message
1641 1641 raise
1642 1642 except LookupError:
1643 1643 # check if it might have come from damaged dirstate
1644 1644 #
1645 1645 # XXX we could avoid the unfiltered if we had a recognizable
1646 1646 # exception for filtered changeset access
1647 1647 if (
1648 1648 self.local()
1649 1649 and changeid in self.unfiltered().dirstate.parents()
1650 1650 ):
1651 1651 msg = _(b"working directory has unknown parent '%s'!")
1652 1652 raise error.Abort(msg % short(changeid))
1653 1653 changeid = hex(changeid) # for the error message
1654 1654 raise
1655 1655
1656 1656 elif len(changeid) == 40:
1657 1657 node = bin(changeid)
1658 1658 rev = self.changelog.rev(node)
1659 1659 else:
1660 1660 raise error.ProgrammingError(
1661 1661 b"unsupported changeid '%s' of type %s"
1662 1662 % (changeid, pycompat.bytestr(type(changeid)))
1663 1663 )
1664 1664
1665 1665 return context.changectx(self, rev, node)
1666 1666
1667 1667 except (error.FilteredIndexError, error.FilteredLookupError):
1668 1668 raise error.FilteredRepoLookupError(
1669 1669 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1670 1670 )
1671 1671 except (IndexError, LookupError):
1672 1672 raise error.RepoLookupError(
1673 1673 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1674 1674 )
1675 1675 except error.WdirUnsupported:
1676 1676 return context.workingctx(self)
1677 1677
1678 1678 def __contains__(self, changeid):
1679 1679 """True if the given changeid exists
1680 1680
1681 1681 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1682 1682 specified.
1683 1683 """
1684 1684 try:
1685 1685 self[changeid]
1686 1686 return True
1687 1687 except error.RepoLookupError:
1688 1688 return False
1689 1689
1690 1690 def __nonzero__(self):
1691 1691 return True
1692 1692
1693 1693 __bool__ = __nonzero__
1694 1694
1695 1695 def __len__(self):
1696 1696 # no need to pay the cost of repoview.changelog
1697 1697 unfi = self.unfiltered()
1698 1698 return len(unfi.changelog)
1699 1699
1700 1700 def __iter__(self):
1701 1701 return iter(self.changelog)
1702 1702
1703 1703 def revs(self, expr, *args):
1704 1704 '''Find revisions matching a revset.
1705 1705
1706 1706 The revset is specified as a string ``expr`` that may contain
1707 1707 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1708 1708
1709 1709 Revset aliases from the configuration are not expanded. To expand
1710 1710 user aliases, consider calling ``scmutil.revrange()`` or
1711 1711 ``repo.anyrevs([expr], user=True)``.
1712 1712
1713 1713 Returns a smartset.abstractsmartset, which is a list-like interface
1714 1714 that contains integer revisions.
1715 1715 '''
1716 1716 tree = revsetlang.spectree(expr, *args)
1717 1717 return revset.makematcher(tree)(self)
1718 1718
1719 1719 def set(self, expr, *args):
1720 1720 '''Find revisions matching a revset and emit changectx instances.
1721 1721
1722 1722 This is a convenience wrapper around ``revs()`` that iterates the
1723 1723 result and is a generator of changectx instances.
1724 1724
1725 1725 Revset aliases from the configuration are not expanded. To expand
1726 1726 user aliases, consider calling ``scmutil.revrange()``.
1727 1727 '''
1728 1728 for r in self.revs(expr, *args):
1729 1729 yield self[r]
1730 1730
1731 1731 def anyrevs(self, specs, user=False, localalias=None):
1732 1732 '''Find revisions matching one of the given revsets.
1733 1733
1734 1734 Revset aliases from the configuration are not expanded by default. To
1735 1735 expand user aliases, specify ``user=True``. To provide some local
1736 1736 definitions overriding user aliases, set ``localalias`` to
1737 1737 ``{name: definitionstring}``.
1738 1738 '''
1739 1739 if specs == [b'null']:
1740 1740 return revset.baseset([nullrev])
1741 1741 if specs == [b'.']:
1742 1742 quick_data = self._quick_access_changeid.get(b'.')
1743 1743 if quick_data is not None:
1744 1744 return revset.baseset([quick_data[0]])
1745 1745 if user:
1746 1746 m = revset.matchany(
1747 1747 self.ui,
1748 1748 specs,
1749 1749 lookup=revset.lookupfn(self),
1750 1750 localalias=localalias,
1751 1751 )
1752 1752 else:
1753 1753 m = revset.matchany(None, specs, localalias=localalias)
1754 1754 return m(self)
1755 1755
1756 1756 def url(self):
1757 1757 return b'file:' + self.root
1758 1758
1759 1759 def hook(self, name, throw=False, **args):
1760 1760 """Call a hook, passing this repo instance.
1761 1761
1762 1762 This a convenience method to aid invoking hooks. Extensions likely
1763 1763 won't call this unless they have registered a custom hook or are
1764 1764 replacing code that is expected to call a hook.
1765 1765 """
1766 1766 return hook.hook(self.ui, self, name, throw, **args)
1767 1767
1768 1768 @filteredpropertycache
1769 1769 def _tagscache(self):
1770 1770 '''Returns a tagscache object that contains various tags related
1771 1771 caches.'''
1772 1772
1773 1773 # This simplifies its cache management by having one decorated
1774 1774 # function (this one) and the rest simply fetch things from it.
1775 1775 class tagscache(object):
1776 1776 def __init__(self):
1777 1777 # These two define the set of tags for this repository. tags
1778 1778 # maps tag name to node; tagtypes maps tag name to 'global' or
1779 1779 # 'local'. (Global tags are defined by .hgtags across all
1780 1780 # heads, and local tags are defined in .hg/localtags.)
1781 1781 # They constitute the in-memory cache of tags.
1782 1782 self.tags = self.tagtypes = None
1783 1783
1784 1784 self.nodetagscache = self.tagslist = None
1785 1785
1786 1786 cache = tagscache()
1787 1787 cache.tags, cache.tagtypes = self._findtags()
1788 1788
1789 1789 return cache
1790 1790
1791 1791 def tags(self):
1792 1792 '''return a mapping of tag to node'''
1793 1793 t = {}
1794 1794 if self.changelog.filteredrevs:
1795 1795 tags, tt = self._findtags()
1796 1796 else:
1797 1797 tags = self._tagscache.tags
1798 1798 rev = self.changelog.rev
1799 1799 for k, v in pycompat.iteritems(tags):
1800 1800 try:
1801 1801 # ignore tags to unknown nodes
1802 1802 rev(v)
1803 1803 t[k] = v
1804 1804 except (error.LookupError, ValueError):
1805 1805 pass
1806 1806 return t
1807 1807
1808 1808 def _findtags(self):
1809 1809 '''Do the hard work of finding tags. Return a pair of dicts
1810 1810 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1811 1811 maps tag name to a string like \'global\' or \'local\'.
1812 1812 Subclasses or extensions are free to add their own tags, but
1813 1813 should be aware that the returned dicts will be retained for the
1814 1814 duration of the localrepo object.'''
1815 1815
1816 1816 # XXX what tagtype should subclasses/extensions use? Currently
1817 1817 # mq and bookmarks add tags, but do not set the tagtype at all.
1818 1818 # Should each extension invent its own tag type? Should there
1819 1819 # be one tagtype for all such "virtual" tags? Or is the status
1820 1820 # quo fine?
1821 1821
1822 1822 # map tag name to (node, hist)
1823 1823 alltags = tagsmod.findglobaltags(self.ui, self)
1824 1824 # map tag name to tag type
1825 1825 tagtypes = {tag: b'global' for tag in alltags}
1826 1826
1827 1827 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1828 1828
1829 1829 # Build the return dicts. Have to re-encode tag names because
1830 1830 # the tags module always uses UTF-8 (in order not to lose info
1831 1831 # writing to the cache), but the rest of Mercurial wants them in
1832 1832 # local encoding.
1833 1833 tags = {}
1834 1834 for (name, (node, hist)) in pycompat.iteritems(alltags):
1835 1835 if node != nullid:
1836 1836 tags[encoding.tolocal(name)] = node
1837 1837 tags[b'tip'] = self.changelog.tip()
1838 1838 tagtypes = {
1839 1839 encoding.tolocal(name): value
1840 1840 for (name, value) in pycompat.iteritems(tagtypes)
1841 1841 }
1842 1842 return (tags, tagtypes)
1843 1843
1844 1844 def tagtype(self, tagname):
1845 1845 '''
1846 1846 return the type of the given tag. result can be:
1847 1847
1848 1848 'local' : a local tag
1849 1849 'global' : a global tag
1850 1850 None : tag does not exist
1851 1851 '''
1852 1852
1853 1853 return self._tagscache.tagtypes.get(tagname)
1854 1854
1855 1855 def tagslist(self):
1856 1856 '''return a list of tags ordered by revision'''
1857 1857 if not self._tagscache.tagslist:
1858 1858 l = []
1859 1859 for t, n in pycompat.iteritems(self.tags()):
1860 1860 l.append((self.changelog.rev(n), t, n))
1861 1861 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1862 1862
1863 1863 return self._tagscache.tagslist
1864 1864
1865 1865 def nodetags(self, node):
1866 1866 '''return the tags associated with a node'''
1867 1867 if not self._tagscache.nodetagscache:
1868 1868 nodetagscache = {}
1869 1869 for t, n in pycompat.iteritems(self._tagscache.tags):
1870 1870 nodetagscache.setdefault(n, []).append(t)
1871 1871 for tags in pycompat.itervalues(nodetagscache):
1872 1872 tags.sort()
1873 1873 self._tagscache.nodetagscache = nodetagscache
1874 1874 return self._tagscache.nodetagscache.get(node, [])
1875 1875
1876 1876 def nodebookmarks(self, node):
1877 1877 """return the list of bookmarks pointing to the specified node"""
1878 1878 return self._bookmarks.names(node)
1879 1879
1880 1880 def branchmap(self):
1881 1881 '''returns a dictionary {branch: [branchheads]} with branchheads
1882 1882 ordered by increasing revision number'''
1883 1883 return self._branchcaches[self]
1884 1884
1885 1885 @unfilteredmethod
1886 1886 def revbranchcache(self):
1887 1887 if not self._revbranchcache:
1888 1888 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1889 1889 return self._revbranchcache
1890 1890
1891 1891 def branchtip(self, branch, ignoremissing=False):
1892 1892 '''return the tip node for a given branch
1893 1893
1894 1894 If ignoremissing is True, then this method will not raise an error.
1895 1895 This is helpful for callers that only expect None for a missing branch
1896 1896 (e.g. namespace).
1897 1897
1898 1898 '''
1899 1899 try:
1900 1900 return self.branchmap().branchtip(branch)
1901 1901 except KeyError:
1902 1902 if not ignoremissing:
1903 1903 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1904 1904 else:
1905 1905 pass
1906 1906
1907 1907 def lookup(self, key):
1908 1908 node = scmutil.revsymbol(self, key).node()
1909 1909 if node is None:
1910 1910 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1911 1911 return node
1912 1912
1913 1913 def lookupbranch(self, key):
1914 1914 if self.branchmap().hasbranch(key):
1915 1915 return key
1916 1916
1917 1917 return scmutil.revsymbol(self, key).branch()
1918 1918
1919 1919 def known(self, nodes):
1920 1920 cl = self.changelog
1921 1921 get_rev = cl.index.get_rev
1922 1922 filtered = cl.filteredrevs
1923 1923 result = []
1924 1924 for n in nodes:
1925 1925 r = get_rev(n)
1926 1926 resp = not (r is None or r in filtered)
1927 1927 result.append(resp)
1928 1928 return result
1929 1929
1930 1930 def local(self):
1931 1931 return self
1932 1932
1933 1933 def publishing(self):
1934 1934 # it's safe (and desirable) to trust the publish flag unconditionally
1935 1935 # so that we don't finalize changes shared between users via ssh or nfs
1936 1936 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1937 1937
1938 1938 def cancopy(self):
1939 1939 # so statichttprepo's override of local() works
1940 1940 if not self.local():
1941 1941 return False
1942 1942 if not self.publishing():
1943 1943 return True
1944 1944 # if publishing we can't copy if there is filtered content
1945 1945 return not self.filtered(b'visible').changelog.filteredrevs
1946 1946
1947 1947 def shared(self):
1948 1948 '''the type of shared repository (None if not shared)'''
1949 1949 if self.sharedpath != self.path:
1950 1950 return b'store'
1951 1951 return None
1952 1952
1953 1953 def wjoin(self, f, *insidef):
1954 1954 return self.vfs.reljoin(self.root, f, *insidef)
1955 1955
1956 1956 def setparents(self, p1, p2=nullid):
1957 1957 self[None].setparents(p1, p2)
1958 1958 self._quick_access_changeid_invalidate()
1959 1959
1960 1960 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1961 1961 """changeid must be a changeset revision, if specified.
1962 1962 fileid can be a file revision or node."""
1963 1963 return context.filectx(
1964 1964 self, path, changeid, fileid, changectx=changectx
1965 1965 )
1966 1966
1967 1967 def getcwd(self):
1968 1968 return self.dirstate.getcwd()
1969 1969
1970 1970 def pathto(self, f, cwd=None):
1971 1971 return self.dirstate.pathto(f, cwd)
1972 1972
1973 1973 def _loadfilter(self, filter):
1974 1974 if filter not in self._filterpats:
1975 1975 l = []
1976 1976 for pat, cmd in self.ui.configitems(filter):
1977 1977 if cmd == b'!':
1978 1978 continue
1979 1979 mf = matchmod.match(self.root, b'', [pat])
1980 1980 fn = None
1981 1981 params = cmd
1982 1982 for name, filterfn in pycompat.iteritems(self._datafilters):
1983 1983 if cmd.startswith(name):
1984 1984 fn = filterfn
1985 1985 params = cmd[len(name) :].lstrip()
1986 1986 break
1987 1987 if not fn:
1988 1988 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1989 1989 fn.__name__ = 'commandfilter'
1990 1990 # Wrap old filters not supporting keyword arguments
1991 1991 if not pycompat.getargspec(fn)[2]:
1992 1992 oldfn = fn
1993 1993 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1994 1994 fn.__name__ = 'compat-' + oldfn.__name__
1995 1995 l.append((mf, fn, params))
1996 1996 self._filterpats[filter] = l
1997 1997 return self._filterpats[filter]
1998 1998
1999 1999 def _filter(self, filterpats, filename, data):
2000 2000 for mf, fn, cmd in filterpats:
2001 2001 if mf(filename):
2002 2002 self.ui.debug(
2003 2003 b"filtering %s through %s\n"
2004 2004 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2005 2005 )
2006 2006 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2007 2007 break
2008 2008
2009 2009 return data
2010 2010
2011 2011 @unfilteredpropertycache
2012 2012 def _encodefilterpats(self):
2013 2013 return self._loadfilter(b'encode')
2014 2014
2015 2015 @unfilteredpropertycache
2016 2016 def _decodefilterpats(self):
2017 2017 return self._loadfilter(b'decode')
2018 2018
2019 2019 def adddatafilter(self, name, filter):
2020 2020 self._datafilters[name] = filter
2021 2021
2022 2022 def wread(self, filename):
2023 2023 if self.wvfs.islink(filename):
2024 2024 data = self.wvfs.readlink(filename)
2025 2025 else:
2026 2026 data = self.wvfs.read(filename)
2027 2027 return self._filter(self._encodefilterpats, filename, data)
2028 2028
2029 2029 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2030 2030 """write ``data`` into ``filename`` in the working directory
2031 2031
2032 2032 This returns length of written (maybe decoded) data.
2033 2033 """
2034 2034 data = self._filter(self._decodefilterpats, filename, data)
2035 2035 if b'l' in flags:
2036 2036 self.wvfs.symlink(data, filename)
2037 2037 else:
2038 2038 self.wvfs.write(
2039 2039 filename, data, backgroundclose=backgroundclose, **kwargs
2040 2040 )
2041 2041 if b'x' in flags:
2042 2042 self.wvfs.setflags(filename, False, True)
2043 2043 else:
2044 2044 self.wvfs.setflags(filename, False, False)
2045 2045 return len(data)
2046 2046
2047 2047 def wwritedata(self, filename, data):
2048 2048 return self._filter(self._decodefilterpats, filename, data)
2049 2049
2050 2050 def currenttransaction(self):
2051 2051 """return the current transaction or None if non exists"""
2052 2052 if self._transref:
2053 2053 tr = self._transref()
2054 2054 else:
2055 2055 tr = None
2056 2056
2057 2057 if tr and tr.running():
2058 2058 return tr
2059 2059 return None
2060 2060
2061 2061 def transaction(self, desc, report=None):
2062 2062 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2063 2063 b'devel', b'check-locks'
2064 2064 ):
2065 2065 if self._currentlock(self._lockref) is None:
2066 2066 raise error.ProgrammingError(b'transaction requires locking')
2067 2067 tr = self.currenttransaction()
2068 2068 if tr is not None:
2069 2069 return tr.nest(name=desc)
2070 2070
2071 2071 # abort here if the journal already exists
2072 2072 if self.svfs.exists(b"journal"):
2073 2073 raise error.RepoError(
2074 2074 _(b"abandoned transaction found"),
2075 2075 hint=_(b"run 'hg recover' to clean up transaction"),
2076 2076 )
2077 2077
2078 2078 idbase = b"%.40f#%f" % (random.random(), time.time())
2079 2079 ha = hex(hashutil.sha1(idbase).digest())
2080 2080 txnid = b'TXN:' + ha
2081 2081 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2082 2082
2083 2083 self._writejournal(desc)
2084 2084 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2085 2085 if report:
2086 2086 rp = report
2087 2087 else:
2088 2088 rp = self.ui.warn
2089 2089 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2090 2090 # we must avoid cyclic reference between repo and transaction.
2091 2091 reporef = weakref.ref(self)
2092 2092 # Code to track tag movement
2093 2093 #
2094 2094 # Since tags are all handled as file content, it is actually quite hard
2095 2095 # to track these movement from a code perspective. So we fallback to a
2096 2096 # tracking at the repository level. One could envision to track changes
2097 2097 # to the '.hgtags' file through changegroup apply but that fails to
2098 2098 # cope with case where transaction expose new heads without changegroup
2099 2099 # being involved (eg: phase movement).
2100 2100 #
2101 2101 # For now, We gate the feature behind a flag since this likely comes
2102 2102 # with performance impacts. The current code run more often than needed
2103 2103 # and do not use caches as much as it could. The current focus is on
2104 2104 # the behavior of the feature so we disable it by default. The flag
2105 2105 # will be removed when we are happy with the performance impact.
2106 2106 #
2107 2107 # Once this feature is no longer experimental move the following
2108 2108 # documentation to the appropriate help section:
2109 2109 #
2110 2110 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2111 2111 # tags (new or changed or deleted tags). In addition the details of
2112 2112 # these changes are made available in a file at:
2113 2113 # ``REPOROOT/.hg/changes/tags.changes``.
2114 2114 # Make sure you check for HG_TAG_MOVED before reading that file as it
2115 2115 # might exist from a previous transaction even if no tag were touched
2116 2116 # in this one. Changes are recorded in a line base format::
2117 2117 #
2118 2118 # <action> <hex-node> <tag-name>\n
2119 2119 #
2120 2120 # Actions are defined as follow:
2121 2121 # "-R": tag is removed,
2122 2122 # "+A": tag is added,
2123 2123 # "-M": tag is moved (old value),
2124 2124 # "+M": tag is moved (new value),
2125 2125 tracktags = lambda x: None
2126 2126 # experimental config: experimental.hook-track-tags
2127 2127 shouldtracktags = self.ui.configbool(
2128 2128 b'experimental', b'hook-track-tags'
2129 2129 )
2130 2130 if desc != b'strip' and shouldtracktags:
2131 2131 oldheads = self.changelog.headrevs()
2132 2132
2133 2133 def tracktags(tr2):
2134 2134 repo = reporef()
2135 2135 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2136 2136 newheads = repo.changelog.headrevs()
2137 2137 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2138 2138 # notes: we compare lists here.
2139 2139 # As we do it only once buiding set would not be cheaper
2140 2140 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2141 2141 if changes:
2142 2142 tr2.hookargs[b'tag_moved'] = b'1'
2143 2143 with repo.vfs(
2144 2144 b'changes/tags.changes', b'w', atomictemp=True
2145 2145 ) as changesfile:
2146 2146 # note: we do not register the file to the transaction
2147 2147 # because we needs it to still exist on the transaction
2148 2148 # is close (for txnclose hooks)
2149 2149 tagsmod.writediff(changesfile, changes)
2150 2150
2151 2151 def validate(tr2):
2152 2152 """will run pre-closing hooks"""
2153 2153 # XXX the transaction API is a bit lacking here so we take a hacky
2154 2154 # path for now
2155 2155 #
2156 2156 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2157 2157 # dict is copied before these run. In addition we needs the data
2158 2158 # available to in memory hooks too.
2159 2159 #
2160 2160 # Moreover, we also need to make sure this runs before txnclose
2161 2161 # hooks and there is no "pending" mechanism that would execute
2162 2162 # logic only if hooks are about to run.
2163 2163 #
2164 2164 # Fixing this limitation of the transaction is also needed to track
2165 2165 # other families of changes (bookmarks, phases, obsolescence).
2166 2166 #
2167 2167 # This will have to be fixed before we remove the experimental
2168 2168 # gating.
2169 2169 tracktags(tr2)
2170 2170 repo = reporef()
2171 2171
2172 2172 singleheadopt = (b'experimental', b'single-head-per-branch')
2173 2173 singlehead = repo.ui.configbool(*singleheadopt)
2174 2174 if singlehead:
2175 2175 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2176 2176 accountclosed = singleheadsub.get(
2177 2177 b"account-closed-heads", False
2178 2178 )
2179 2179 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2180 2180 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2181 2181 for name, (old, new) in sorted(
2182 2182 tr.changes[b'bookmarks'].items()
2183 2183 ):
2184 2184 args = tr.hookargs.copy()
2185 2185 args.update(bookmarks.preparehookargs(name, old, new))
2186 2186 repo.hook(
2187 2187 b'pretxnclose-bookmark',
2188 2188 throw=True,
2189 2189 **pycompat.strkwargs(args)
2190 2190 )
2191 2191 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2192 2192 cl = repo.unfiltered().changelog
2193 2193 for revs, (old, new) in tr.changes[b'phases']:
2194 2194 for rev in revs:
2195 2195 args = tr.hookargs.copy()
2196 2196 node = hex(cl.node(rev))
2197 2197 args.update(phases.preparehookargs(node, old, new))
2198 2198 repo.hook(
2199 2199 b'pretxnclose-phase',
2200 2200 throw=True,
2201 2201 **pycompat.strkwargs(args)
2202 2202 )
2203 2203
2204 2204 repo.hook(
2205 2205 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2206 2206 )
2207 2207
2208 2208 def releasefn(tr, success):
2209 2209 repo = reporef()
2210 2210 if repo is None:
2211 2211 # If the repo has been GC'd (and this release function is being
2212 2212 # called from transaction.__del__), there's not much we can do,
2213 2213 # so just leave the unfinished transaction there and let the
2214 2214 # user run `hg recover`.
2215 2215 return
2216 2216 if success:
2217 2217 # this should be explicitly invoked here, because
2218 2218 # in-memory changes aren't written out at closing
2219 2219 # transaction, if tr.addfilegenerator (via
2220 2220 # dirstate.write or so) isn't invoked while
2221 2221 # transaction running
2222 2222 repo.dirstate.write(None)
2223 2223 else:
2224 2224 # discard all changes (including ones already written
2225 2225 # out) in this transaction
2226 2226 narrowspec.restorebackup(self, b'journal.narrowspec')
2227 2227 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2228 2228 repo.dirstate.restorebackup(None, b'journal.dirstate')
2229 2229
2230 2230 repo.invalidate(clearfilecache=True)
2231 2231
2232 2232 tr = transaction.transaction(
2233 2233 rp,
2234 2234 self.svfs,
2235 2235 vfsmap,
2236 2236 b"journal",
2237 2237 b"undo",
2238 2238 aftertrans(renames),
2239 2239 self.store.createmode,
2240 2240 validator=validate,
2241 2241 releasefn=releasefn,
2242 2242 checkambigfiles=_cachedfiles,
2243 2243 name=desc,
2244 2244 )
2245 2245 tr.changes[b'origrepolen'] = len(self)
2246 2246 tr.changes[b'obsmarkers'] = set()
2247 2247 tr.changes[b'phases'] = []
2248 2248 tr.changes[b'bookmarks'] = {}
2249 2249
2250 2250 tr.hookargs[b'txnid'] = txnid
2251 2251 tr.hookargs[b'txnname'] = desc
2252 2252 tr.hookargs[b'changes'] = tr.changes
2253 2253 # note: writing the fncache only during finalize mean that the file is
2254 2254 # outdated when running hooks. As fncache is used for streaming clone,
2255 2255 # this is not expected to break anything that happen during the hooks.
2256 2256 tr.addfinalize(b'flush-fncache', self.store.write)
2257 2257
2258 2258 def txnclosehook(tr2):
2259 2259 """To be run if transaction is successful, will schedule a hook run
2260 2260 """
2261 2261 # Don't reference tr2 in hook() so we don't hold a reference.
2262 2262 # This reduces memory consumption when there are multiple
2263 2263 # transactions per lock. This can likely go away if issue5045
2264 2264 # fixes the function accumulation.
2265 2265 hookargs = tr2.hookargs
2266 2266
2267 2267 def hookfunc(unused_success):
2268 2268 repo = reporef()
2269 2269 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2270 2270 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2271 2271 for name, (old, new) in bmchanges:
2272 2272 args = tr.hookargs.copy()
2273 2273 args.update(bookmarks.preparehookargs(name, old, new))
2274 2274 repo.hook(
2275 2275 b'txnclose-bookmark',
2276 2276 throw=False,
2277 2277 **pycompat.strkwargs(args)
2278 2278 )
2279 2279
2280 2280 if hook.hashook(repo.ui, b'txnclose-phase'):
2281 2281 cl = repo.unfiltered().changelog
2282 2282 phasemv = sorted(
2283 2283 tr.changes[b'phases'], key=lambda r: r[0][0]
2284 2284 )
2285 2285 for revs, (old, new) in phasemv:
2286 2286 for rev in revs:
2287 2287 args = tr.hookargs.copy()
2288 2288 node = hex(cl.node(rev))
2289 2289 args.update(phases.preparehookargs(node, old, new))
2290 2290 repo.hook(
2291 2291 b'txnclose-phase',
2292 2292 throw=False,
2293 2293 **pycompat.strkwargs(args)
2294 2294 )
2295 2295
2296 2296 repo.hook(
2297 2297 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2298 2298 )
2299 2299
2300 2300 reporef()._afterlock(hookfunc)
2301 2301
2302 2302 tr.addfinalize(b'txnclose-hook', txnclosehook)
2303 2303 # Include a leading "-" to make it happen before the transaction summary
2304 2304 # reports registered via scmutil.registersummarycallback() whose names
2305 2305 # are 00-txnreport etc. That way, the caches will be warm when the
2306 2306 # callbacks run.
2307 2307 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2308 2308
2309 2309 def txnaborthook(tr2):
2310 2310 """To be run if transaction is aborted
2311 2311 """
2312 2312 reporef().hook(
2313 2313 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2314 2314 )
2315 2315
2316 2316 tr.addabort(b'txnabort-hook', txnaborthook)
2317 2317 # avoid eager cache invalidation. in-memory data should be identical
2318 2318 # to stored data if transaction has no error.
2319 2319 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2320 2320 self._transref = weakref.ref(tr)
2321 2321 scmutil.registersummarycallback(self, tr, desc)
2322 2322 return tr
2323 2323
2324 2324 def _journalfiles(self):
2325 2325 return (
2326 2326 (self.svfs, b'journal'),
2327 2327 (self.svfs, b'journal.narrowspec'),
2328 2328 (self.vfs, b'journal.narrowspec.dirstate'),
2329 2329 (self.vfs, b'journal.dirstate'),
2330 2330 (self.vfs, b'journal.branch'),
2331 2331 (self.vfs, b'journal.desc'),
2332 2332 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2333 2333 (self.svfs, b'journal.phaseroots'),
2334 2334 )
2335 2335
2336 2336 def undofiles(self):
2337 2337 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2338 2338
2339 2339 @unfilteredmethod
2340 2340 def _writejournal(self, desc):
2341 2341 self.dirstate.savebackup(None, b'journal.dirstate')
2342 2342 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2343 2343 narrowspec.savebackup(self, b'journal.narrowspec')
2344 2344 self.vfs.write(
2345 2345 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2346 2346 )
2347 2347 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2348 2348 bookmarksvfs = bookmarks.bookmarksvfs(self)
2349 2349 bookmarksvfs.write(
2350 2350 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2351 2351 )
2352 2352 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2353 2353
2354 2354 def recover(self):
2355 2355 with self.lock():
2356 2356 if self.svfs.exists(b"journal"):
2357 2357 self.ui.status(_(b"rolling back interrupted transaction\n"))
2358 2358 vfsmap = {
2359 2359 b'': self.svfs,
2360 2360 b'plain': self.vfs,
2361 2361 }
2362 2362 transaction.rollback(
2363 2363 self.svfs,
2364 2364 vfsmap,
2365 2365 b"journal",
2366 2366 self.ui.warn,
2367 2367 checkambigfiles=_cachedfiles,
2368 2368 )
2369 2369 self.invalidate()
2370 2370 return True
2371 2371 else:
2372 2372 self.ui.warn(_(b"no interrupted transaction available\n"))
2373 2373 return False
2374 2374
2375 2375 def rollback(self, dryrun=False, force=False):
2376 2376 wlock = lock = dsguard = None
2377 2377 try:
2378 2378 wlock = self.wlock()
2379 2379 lock = self.lock()
2380 2380 if self.svfs.exists(b"undo"):
2381 2381 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2382 2382
2383 2383 return self._rollback(dryrun, force, dsguard)
2384 2384 else:
2385 2385 self.ui.warn(_(b"no rollback information available\n"))
2386 2386 return 1
2387 2387 finally:
2388 2388 release(dsguard, lock, wlock)
2389 2389
2390 2390 @unfilteredmethod # Until we get smarter cache management
2391 2391 def _rollback(self, dryrun, force, dsguard):
2392 2392 ui = self.ui
2393 2393 try:
2394 2394 args = self.vfs.read(b'undo.desc').splitlines()
2395 2395 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2396 2396 if len(args) >= 3:
2397 2397 detail = args[2]
2398 2398 oldtip = oldlen - 1
2399 2399
2400 2400 if detail and ui.verbose:
2401 2401 msg = _(
2402 2402 b'repository tip rolled back to revision %d'
2403 2403 b' (undo %s: %s)\n'
2404 2404 ) % (oldtip, desc, detail)
2405 2405 else:
2406 2406 msg = _(
2407 2407 b'repository tip rolled back to revision %d (undo %s)\n'
2408 2408 ) % (oldtip, desc)
2409 2409 except IOError:
2410 2410 msg = _(b'rolling back unknown transaction\n')
2411 2411 desc = None
2412 2412
2413 2413 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2414 2414 raise error.Abort(
2415 2415 _(
2416 2416 b'rollback of last commit while not checked out '
2417 2417 b'may lose data'
2418 2418 ),
2419 2419 hint=_(b'use -f to force'),
2420 2420 )
2421 2421
2422 2422 ui.status(msg)
2423 2423 if dryrun:
2424 2424 return 0
2425 2425
2426 2426 parents = self.dirstate.parents()
2427 2427 self.destroying()
2428 2428 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2429 2429 transaction.rollback(
2430 2430 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2431 2431 )
2432 2432 bookmarksvfs = bookmarks.bookmarksvfs(self)
2433 2433 if bookmarksvfs.exists(b'undo.bookmarks'):
2434 2434 bookmarksvfs.rename(
2435 2435 b'undo.bookmarks', b'bookmarks', checkambig=True
2436 2436 )
2437 2437 if self.svfs.exists(b'undo.phaseroots'):
2438 2438 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2439 2439 self.invalidate()
2440 2440
2441 2441 has_node = self.changelog.index.has_node
2442 2442 parentgone = any(not has_node(p) for p in parents)
2443 2443 if parentgone:
2444 2444 # prevent dirstateguard from overwriting already restored one
2445 2445 dsguard.close()
2446 2446
2447 2447 narrowspec.restorebackup(self, b'undo.narrowspec')
2448 2448 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2449 2449 self.dirstate.restorebackup(None, b'undo.dirstate')
2450 2450 try:
2451 2451 branch = self.vfs.read(b'undo.branch')
2452 2452 self.dirstate.setbranch(encoding.tolocal(branch))
2453 2453 except IOError:
2454 2454 ui.warn(
2455 2455 _(
2456 2456 b'named branch could not be reset: '
2457 2457 b'current branch is still \'%s\'\n'
2458 2458 )
2459 2459 % self.dirstate.branch()
2460 2460 )
2461 2461
2462 2462 parents = tuple([p.rev() for p in self[None].parents()])
2463 2463 if len(parents) > 1:
2464 2464 ui.status(
2465 2465 _(
2466 2466 b'working directory now based on '
2467 2467 b'revisions %d and %d\n'
2468 2468 )
2469 2469 % parents
2470 2470 )
2471 2471 else:
2472 2472 ui.status(
2473 2473 _(b'working directory now based on revision %d\n') % parents
2474 2474 )
2475 2475 mergestatemod.mergestate.clean(self, self[b'.'].node())
2476 2476
2477 2477 # TODO: if we know which new heads may result from this rollback, pass
2478 2478 # them to destroy(), which will prevent the branchhead cache from being
2479 2479 # invalidated.
2480 2480 self.destroyed()
2481 2481 return 0
2482 2482
2483 2483 def _buildcacheupdater(self, newtransaction):
2484 2484 """called during transaction to build the callback updating cache
2485 2485
2486 2486 Lives on the repository to help extension who might want to augment
2487 2487 this logic. For this purpose, the created transaction is passed to the
2488 2488 method.
2489 2489 """
2490 2490 # we must avoid cyclic reference between repo and transaction.
2491 2491 reporef = weakref.ref(self)
2492 2492
2493 2493 def updater(tr):
2494 2494 repo = reporef()
2495 2495 repo.updatecaches(tr)
2496 2496
2497 2497 return updater
2498 2498
2499 2499 @unfilteredmethod
2500 2500 def updatecaches(self, tr=None, full=False):
2501 2501 """warm appropriate caches
2502 2502
2503 2503 If this function is called after a transaction closed. The transaction
2504 2504 will be available in the 'tr' argument. This can be used to selectively
2505 2505 update caches relevant to the changes in that transaction.
2506 2506
2507 2507 If 'full' is set, make sure all caches the function knows about have
2508 2508 up-to-date data. Even the ones usually loaded more lazily.
2509 2509 """
2510 2510 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2511 2511 # During strip, many caches are invalid but
2512 2512 # later call to `destroyed` will refresh them.
2513 2513 return
2514 2514
2515 2515 if tr is None or tr.changes[b'origrepolen'] < len(self):
2516 2516 # accessing the 'ser ved' branchmap should refresh all the others,
2517 2517 self.ui.debug(b'updating the branch cache\n')
2518 2518 self.filtered(b'served').branchmap()
2519 2519 self.filtered(b'served.hidden').branchmap()
2520 2520
2521 2521 if full:
2522 2522 unfi = self.unfiltered()
2523 2523
2524 2524 self.changelog.update_caches(transaction=tr)
2525 2525 self.manifestlog.update_caches(transaction=tr)
2526 2526
2527 2527 rbc = unfi.revbranchcache()
2528 2528 for r in unfi.changelog:
2529 2529 rbc.branchinfo(r)
2530 2530 rbc.write()
2531 2531
2532 2532 # ensure the working copy parents are in the manifestfulltextcache
2533 2533 for ctx in self[b'.'].parents():
2534 2534 ctx.manifest() # accessing the manifest is enough
2535 2535
2536 2536 # accessing fnode cache warms the cache
2537 2537 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2538 2538 # accessing tags warm the cache
2539 2539 self.tags()
2540 2540 self.filtered(b'served').tags()
2541 2541
2542 2542 # The `full` arg is documented as updating even the lazily-loaded
2543 2543 # caches immediately, so we're forcing a write to cause these caches
2544 2544 # to be warmed up even if they haven't explicitly been requested
2545 2545 # yet (if they've never been used by hg, they won't ever have been
2546 2546 # written, even if they're a subset of another kind of cache that
2547 2547 # *has* been used).
2548 2548 for filt in repoview.filtertable.keys():
2549 2549 filtered = self.filtered(filt)
2550 2550 filtered.branchmap().write(filtered)
2551 2551
2552 2552 def invalidatecaches(self):
2553 2553
2554 2554 if '_tagscache' in vars(self):
2555 2555 # can't use delattr on proxy
2556 2556 del self.__dict__['_tagscache']
2557 2557
2558 2558 self._branchcaches.clear()
2559 2559 self.invalidatevolatilesets()
2560 2560 self._sparsesignaturecache.clear()
2561 2561
2562 2562 def invalidatevolatilesets(self):
2563 2563 self.filteredrevcache.clear()
2564 2564 obsolete.clearobscaches(self)
2565 2565 self._quick_access_changeid_invalidate()
2566 2566
2567 2567 def invalidatedirstate(self):
2568 2568 '''Invalidates the dirstate, causing the next call to dirstate
2569 2569 to check if it was modified since the last time it was read,
2570 2570 rereading it if it has.
2571 2571
2572 2572 This is different to dirstate.invalidate() that it doesn't always
2573 2573 rereads the dirstate. Use dirstate.invalidate() if you want to
2574 2574 explicitly read the dirstate again (i.e. restoring it to a previous
2575 2575 known good state).'''
2576 2576 if hasunfilteredcache(self, 'dirstate'):
2577 2577 for k in self.dirstate._filecache:
2578 2578 try:
2579 2579 delattr(self.dirstate, k)
2580 2580 except AttributeError:
2581 2581 pass
2582 2582 delattr(self.unfiltered(), 'dirstate')
2583 2583
2584 2584 def invalidate(self, clearfilecache=False):
2585 2585 '''Invalidates both store and non-store parts other than dirstate
2586 2586
2587 2587 If a transaction is running, invalidation of store is omitted,
2588 2588 because discarding in-memory changes might cause inconsistency
2589 2589 (e.g. incomplete fncache causes unintentional failure, but
2590 2590 redundant one doesn't).
2591 2591 '''
2592 2592 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2593 2593 for k in list(self._filecache.keys()):
2594 2594 # dirstate is invalidated separately in invalidatedirstate()
2595 2595 if k == b'dirstate':
2596 2596 continue
2597 2597 if (
2598 2598 k == b'changelog'
2599 2599 and self.currenttransaction()
2600 2600 and self.changelog._delayed
2601 2601 ):
2602 2602 # The changelog object may store unwritten revisions. We don't
2603 2603 # want to lose them.
2604 2604 # TODO: Solve the problem instead of working around it.
2605 2605 continue
2606 2606
2607 2607 if clearfilecache:
2608 2608 del self._filecache[k]
2609 2609 try:
2610 2610 delattr(unfiltered, k)
2611 2611 except AttributeError:
2612 2612 pass
2613 2613 self.invalidatecaches()
2614 2614 if not self.currenttransaction():
2615 2615 # TODO: Changing contents of store outside transaction
2616 2616 # causes inconsistency. We should make in-memory store
2617 2617 # changes detectable, and abort if changed.
2618 2618 self.store.invalidatecaches()
2619 2619
2620 2620 def invalidateall(self):
2621 2621 '''Fully invalidates both store and non-store parts, causing the
2622 2622 subsequent operation to reread any outside changes.'''
2623 2623 # extension should hook this to invalidate its caches
2624 2624 self.invalidate()
2625 2625 self.invalidatedirstate()
2626 2626
2627 2627 @unfilteredmethod
2628 2628 def _refreshfilecachestats(self, tr):
2629 2629 """Reload stats of cached files so that they are flagged as valid"""
2630 2630 for k, ce in self._filecache.items():
2631 2631 k = pycompat.sysstr(k)
2632 2632 if k == 'dirstate' or k not in self.__dict__:
2633 2633 continue
2634 2634 ce.refresh()
2635 2635
2636 2636 def _lock(
2637 2637 self,
2638 2638 vfs,
2639 2639 lockname,
2640 2640 wait,
2641 2641 releasefn,
2642 2642 acquirefn,
2643 2643 desc,
2644 2644 inheritchecker=None,
2645 2645 parentenvvar=None,
2646 2646 ):
2647 2647 parentlock = None
2648 2648 # the contents of parentenvvar are used by the underlying lock to
2649 2649 # determine whether it can be inherited
2650 2650 if parentenvvar is not None:
2651 2651 parentlock = encoding.environ.get(parentenvvar)
2652 2652
2653 2653 timeout = 0
2654 2654 warntimeout = 0
2655 2655 if wait:
2656 2656 timeout = self.ui.configint(b"ui", b"timeout")
2657 2657 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2658 2658 # internal config: ui.signal-safe-lock
2659 2659 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2660 2660
2661 2661 l = lockmod.trylock(
2662 2662 self.ui,
2663 2663 vfs,
2664 2664 lockname,
2665 2665 timeout,
2666 2666 warntimeout,
2667 2667 releasefn=releasefn,
2668 2668 acquirefn=acquirefn,
2669 2669 desc=desc,
2670 2670 inheritchecker=inheritchecker,
2671 2671 parentlock=parentlock,
2672 2672 signalsafe=signalsafe,
2673 2673 )
2674 2674 return l
2675 2675
2676 2676 def _afterlock(self, callback):
2677 2677 """add a callback to be run when the repository is fully unlocked
2678 2678
2679 2679 The callback will be executed when the outermost lock is released
2680 2680 (with wlock being higher level than 'lock')."""
2681 2681 for ref in (self._wlockref, self._lockref):
2682 2682 l = ref and ref()
2683 2683 if l and l.held:
2684 2684 l.postrelease.append(callback)
2685 2685 break
2686 2686 else: # no lock have been found.
2687 2687 callback(True)
2688 2688
2689 2689 def lock(self, wait=True):
2690 2690 '''Lock the repository store (.hg/store) and return a weak reference
2691 2691 to the lock. Use this before modifying the store (e.g. committing or
2692 2692 stripping). If you are opening a transaction, get a lock as well.)
2693 2693
2694 2694 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2695 2695 'wlock' first to avoid a dead-lock hazard.'''
2696 2696 l = self._currentlock(self._lockref)
2697 2697 if l is not None:
2698 2698 l.lock()
2699 2699 return l
2700 2700
2701 2701 l = self._lock(
2702 2702 vfs=self.svfs,
2703 2703 lockname=b"lock",
2704 2704 wait=wait,
2705 2705 releasefn=None,
2706 2706 acquirefn=self.invalidate,
2707 2707 desc=_(b'repository %s') % self.origroot,
2708 2708 )
2709 2709 self._lockref = weakref.ref(l)
2710 2710 return l
2711 2711
2712 2712 def _wlockchecktransaction(self):
2713 2713 if self.currenttransaction() is not None:
2714 2714 raise error.LockInheritanceContractViolation(
2715 2715 b'wlock cannot be inherited in the middle of a transaction'
2716 2716 )
2717 2717
2718 2718 def wlock(self, wait=True):
2719 2719 '''Lock the non-store parts of the repository (everything under
2720 2720 .hg except .hg/store) and return a weak reference to the lock.
2721 2721
2722 2722 Use this before modifying files in .hg.
2723 2723
2724 2724 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2725 2725 'wlock' first to avoid a dead-lock hazard.'''
2726 2726 l = self._wlockref and self._wlockref()
2727 2727 if l is not None and l.held:
2728 2728 l.lock()
2729 2729 return l
2730 2730
2731 2731 # We do not need to check for non-waiting lock acquisition. Such
2732 2732 # acquisition would not cause dead-lock as they would just fail.
2733 2733 if wait and (
2734 2734 self.ui.configbool(b'devel', b'all-warnings')
2735 2735 or self.ui.configbool(b'devel', b'check-locks')
2736 2736 ):
2737 2737 if self._currentlock(self._lockref) is not None:
2738 2738 self.ui.develwarn(b'"wlock" acquired after "lock"')
2739 2739
2740 2740 def unlock():
2741 2741 if self.dirstate.pendingparentchange():
2742 2742 self.dirstate.invalidate()
2743 2743 else:
2744 2744 self.dirstate.write(None)
2745 2745
2746 2746 self._filecache[b'dirstate'].refresh()
2747 2747
2748 2748 l = self._lock(
2749 2749 self.vfs,
2750 2750 b"wlock",
2751 2751 wait,
2752 2752 unlock,
2753 2753 self.invalidatedirstate,
2754 2754 _(b'working directory of %s') % self.origroot,
2755 2755 inheritchecker=self._wlockchecktransaction,
2756 2756 parentenvvar=b'HG_WLOCK_LOCKER',
2757 2757 )
2758 2758 self._wlockref = weakref.ref(l)
2759 2759 return l
2760 2760
2761 2761 def _currentlock(self, lockref):
2762 2762 """Returns the lock if it's held, or None if it's not."""
2763 2763 if lockref is None:
2764 2764 return None
2765 2765 l = lockref()
2766 2766 if l is None or not l.held:
2767 2767 return None
2768 2768 return l
2769 2769
2770 2770 def currentwlock(self):
2771 2771 """Returns the wlock if it's held, or None if it's not."""
2772 2772 return self._currentlock(self._wlockref)
2773 2773
2774 def _filecommit(
2775 self, fctx, manifest1, manifest2, linkrev, tr, includecopymeta,
2776 ):
2777 """
2778 commit an individual file as part of a larger transaction
2779
2780 input:
2781
2782 fctx: a file context with the content we are trying to commit
2783 manifest1: manifest of changeset first parent
2784 manifest2: manifest of changeset second parent
2785 linkrev: revision number of the changeset being created
2786 tr: current transation
2787 individual: boolean, set to False to skip storing the copy data
2788 (only used by the Google specific feature of using
2789 changeset extra as copy source of truth).
2790
2791 output: (filenode, touched)
2792
2793 filenode: the filenode that should be used by this changeset
2794 touched: one of: None, 'added' or 'modified'
2795 """
2796
2797 fname = fctx.path()
2798 fparent1 = manifest1.get(fname, nullid)
2799 fparent2 = manifest2.get(fname, nullid)
2800 touched = None
2801 if fparent1 == fparent2 == nullid:
2802 touched = 'added'
2803
2804 if isinstance(fctx, context.filectx):
2805 # This block fast path most comparisons which are usually done. It
2806 # assumes that bare filectx is used and no merge happened, hence no
2807 # need to create a new file revision in this case.
2808 node = fctx.filenode()
2809 if node in [fparent1, fparent2]:
2810 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2811 if (
2812 fparent1 != nullid
2813 and manifest1.flags(fname) != fctx.flags()
2814 ) or (
2815 fparent2 != nullid
2816 and manifest2.flags(fname) != fctx.flags()
2817 ):
2818 touched = 'modified'
2819 return node, touched
2820
2821 flog = self.file(fname)
2822 meta = {}
2823 cfname = fctx.copysource()
2824 fnode = None
2825
2826 if cfname and cfname != fname:
2827 # Mark the new revision of this file as a copy of another
2828 # file. This copy data will effectively act as a parent
2829 # of this new revision. If this is a merge, the first
2830 # parent will be the nullid (meaning "look up the copy data")
2831 # and the second one will be the other parent. For example:
2832 #
2833 # 0 --- 1 --- 3 rev1 changes file foo
2834 # \ / rev2 renames foo to bar and changes it
2835 # \- 2 -/ rev3 should have bar with all changes and
2836 # should record that bar descends from
2837 # bar in rev2 and foo in rev1
2838 #
2839 # this allows this merge to succeed:
2840 #
2841 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2842 # \ / merging rev3 and rev4 should use bar@rev2
2843 # \- 2 --- 4 as the merge base
2844 #
2845
2846 cnode = manifest1.get(cfname)
2847 newfparent = fparent2
2848
2849 if manifest2: # branch merge
2850 if fparent2 == nullid or cnode is None: # copied on remote side
2851 if cfname in manifest2:
2852 cnode = manifest2[cfname]
2853 newfparent = fparent1
2854
2855 # Here, we used to search backwards through history to try to find
2856 # where the file copy came from if the source of a copy was not in
2857 # the parent directory. However, this doesn't actually make sense to
2858 # do (what does a copy from something not in your working copy even
2859 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2860 # the user that copy information was dropped, so if they didn't
2861 # expect this outcome it can be fixed, but this is the correct
2862 # behavior in this circumstance.
2863
2864 if cnode:
2865 self.ui.debug(
2866 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2867 )
2868 if includecopymeta:
2869 meta[b"copy"] = cfname
2870 meta[b"copyrev"] = hex(cnode)
2871 fparent1, fparent2 = nullid, newfparent
2872 else:
2873 self.ui.warn(
2874 _(
2875 b"warning: can't find ancestor for '%s' "
2876 b"copied from '%s'!\n"
2877 )
2878 % (fname, cfname)
2879 )
2880
2881 elif fparent1 == nullid:
2882 fparent1, fparent2 = fparent2, nullid
2883 elif fparent2 != nullid:
2884 # is one parent an ancestor of the other?
2885 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2886 if fparent1 in fparentancestors:
2887 fparent1, fparent2 = fparent2, nullid
2888 elif fparent2 in fparentancestors:
2889 fparent2 = nullid
2890 elif not fparentancestors:
2891 # TODO: this whole if-else might be simplified much more
2892 ms = mergestatemod.mergestate.read(self)
2893 if (
2894 fname in ms
2895 and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
2896 ):
2897 fparent1, fparent2 = fparent2, nullid
2898
2899 # is the file changed?
2900 text = fctx.data()
2901 if fparent2 != nullid or meta or flog.cmp(fparent1, text):
2902 if touched is None: # do not overwrite added
2903 touched = 'modified'
2904 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2905 # are just the flags changed during merge?
2906 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2907 touched = 'modified'
2908 fnode = fparent1
2909 else:
2910 fnode = fparent1
2911 return fnode, touched
2912
2913 2774 def checkcommitpatterns(self, wctx, match, status, fail):
2914 2775 """check for commit arguments that aren't committable"""
2915 2776 if match.isexact() or match.prefix():
2916 2777 matched = set(status.modified + status.added + status.removed)
2917 2778
2918 2779 for f in match.files():
2919 2780 f = self.dirstate.normalize(f)
2920 2781 if f == b'.' or f in matched or f in wctx.substate:
2921 2782 continue
2922 2783 if f in status.deleted:
2923 2784 fail(f, _(b'file not found!'))
2924 2785 # Is it a directory that exists or used to exist?
2925 2786 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2926 2787 d = f + b'/'
2927 2788 for mf in matched:
2928 2789 if mf.startswith(d):
2929 2790 break
2930 2791 else:
2931 2792 fail(f, _(b"no match under directory!"))
2932 2793 elif f not in self.dirstate:
2933 2794 fail(f, _(b"file not tracked!"))
2934 2795
2935 2796 @unfilteredmethod
2936 2797 def commit(
2937 2798 self,
2938 2799 text=b"",
2939 2800 user=None,
2940 2801 date=None,
2941 2802 match=None,
2942 2803 force=False,
2943 2804 editor=None,
2944 2805 extra=None,
2945 2806 ):
2946 2807 """Add a new revision to current repository.
2947 2808
2948 2809 Revision information is gathered from the working directory,
2949 2810 match can be used to filter the committed files. If editor is
2950 2811 supplied, it is called to get a commit message.
2951 2812 """
2952 2813 if extra is None:
2953 2814 extra = {}
2954 2815
2955 2816 def fail(f, msg):
2956 2817 raise error.Abort(b'%s: %s' % (f, msg))
2957 2818
2958 2819 if not match:
2959 2820 match = matchmod.always()
2960 2821
2961 2822 if not force:
2962 2823 match.bad = fail
2963 2824
2964 2825 # lock() for recent changelog (see issue4368)
2965 2826 with self.wlock(), self.lock():
2966 2827 wctx = self[None]
2967 2828 merge = len(wctx.parents()) > 1
2968 2829
2969 2830 if not force and merge and not match.always():
2970 2831 raise error.Abort(
2971 2832 _(
2972 2833 b'cannot partially commit a merge '
2973 2834 b'(do not specify files or patterns)'
2974 2835 )
2975 2836 )
2976 2837
2977 2838 status = self.status(match=match, clean=force)
2978 2839 if force:
2979 2840 status.modified.extend(
2980 2841 status.clean
2981 2842 ) # mq may commit clean files
2982 2843
2983 2844 # check subrepos
2984 2845 subs, commitsubs, newstate = subrepoutil.precommit(
2985 2846 self.ui, wctx, status, match, force=force
2986 2847 )
2987 2848
2988 2849 # make sure all explicit patterns are matched
2989 2850 if not force:
2990 2851 self.checkcommitpatterns(wctx, match, status, fail)
2991 2852
2992 2853 cctx = context.workingcommitctx(
2993 2854 self, status, text, user, date, extra
2994 2855 )
2995 2856
2996 2857 ms = mergestatemod.mergestate.read(self)
2997 2858 mergeutil.checkunresolved(ms)
2998 2859
2999 2860 # internal config: ui.allowemptycommit
3000 2861 if cctx.isempty() and not self.ui.configbool(
3001 2862 b'ui', b'allowemptycommit'
3002 2863 ):
3003 2864 self.ui.debug(b'nothing to commit, clearing merge state\n')
3004 2865 ms.reset()
3005 2866 return None
3006 2867
3007 2868 if merge and cctx.deleted():
3008 2869 raise error.Abort(_(b"cannot commit merge with missing files"))
3009 2870
3010 2871 if editor:
3011 2872 cctx._text = editor(self, cctx, subs)
3012 2873 edited = text != cctx._text
3013 2874
3014 2875 # Save commit message in case this transaction gets rolled back
3015 2876 # (e.g. by a pretxncommit hook). Leave the content alone on
3016 2877 # the assumption that the user will use the same editor again.
3017 2878 msgfn = self.savecommitmessage(cctx._text)
3018 2879
3019 2880 # commit subs and write new state
3020 2881 if subs:
3021 2882 uipathfn = scmutil.getuipathfn(self)
3022 2883 for s in sorted(commitsubs):
3023 2884 sub = wctx.sub(s)
3024 2885 self.ui.status(
3025 2886 _(b'committing subrepository %s\n')
3026 2887 % uipathfn(subrepoutil.subrelpath(sub))
3027 2888 )
3028 2889 sr = sub.commit(cctx._text, user, date)
3029 2890 newstate[s] = (newstate[s][0], sr)
3030 2891 subrepoutil.writestate(self, newstate)
3031 2892
3032 2893 p1, p2 = self.dirstate.parents()
3033 2894 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3034 2895 try:
3035 2896 self.hook(
3036 2897 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3037 2898 )
3038 2899 with self.transaction(b'commit'):
3039 2900 ret = self.commitctx(cctx, True)
3040 2901 # update bookmarks, dirstate and mergestate
3041 2902 bookmarks.update(self, [p1, p2], ret)
3042 2903 cctx.markcommitted(ret)
3043 2904 ms.reset()
3044 2905 except: # re-raises
3045 2906 if edited:
3046 2907 self.ui.write(
3047 2908 _(b'note: commit message saved in %s\n') % msgfn
3048 2909 )
3049 2910 self.ui.write(
3050 2911 _(
3051 2912 b"note: use 'hg commit --logfile "
3052 2913 b".hg/last-message.txt --edit' to reuse it\n"
3053 2914 )
3054 2915 )
3055 2916 raise
3056 2917
3057 2918 def commithook(unused_success):
3058 2919 # hack for command that use a temporary commit (eg: histedit)
3059 2920 # temporary commit got stripped before hook release
3060 2921 if self.changelog.hasnode(ret):
3061 2922 self.hook(
3062 2923 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3063 2924 )
3064 2925
3065 2926 self._afterlock(commithook)
3066 2927 return ret
3067 2928
3068 2929 @unfilteredmethod
3069 2930 def commitctx(self, ctx, error=False, origctx=None):
3070 2931 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3071 2932
3072 2933 @unfilteredmethod
3073 2934 def destroying(self):
3074 2935 '''Inform the repository that nodes are about to be destroyed.
3075 2936 Intended for use by strip and rollback, so there's a common
3076 2937 place for anything that has to be done before destroying history.
3077 2938
3078 2939 This is mostly useful for saving state that is in memory and waiting
3079 2940 to be flushed when the current lock is released. Because a call to
3080 2941 destroyed is imminent, the repo will be invalidated causing those
3081 2942 changes to stay in memory (waiting for the next unlock), or vanish
3082 2943 completely.
3083 2944 '''
3084 2945 # When using the same lock to commit and strip, the phasecache is left
3085 2946 # dirty after committing. Then when we strip, the repo is invalidated,
3086 2947 # causing those changes to disappear.
3087 2948 if '_phasecache' in vars(self):
3088 2949 self._phasecache.write()
3089 2950
3090 2951 @unfilteredmethod
3091 2952 def destroyed(self):
3092 2953 '''Inform the repository that nodes have been destroyed.
3093 2954 Intended for use by strip and rollback, so there's a common
3094 2955 place for anything that has to be done after destroying history.
3095 2956 '''
3096 2957 # When one tries to:
3097 2958 # 1) destroy nodes thus calling this method (e.g. strip)
3098 2959 # 2) use phasecache somewhere (e.g. commit)
3099 2960 #
3100 2961 # then 2) will fail because the phasecache contains nodes that were
3101 2962 # removed. We can either remove phasecache from the filecache,
3102 2963 # causing it to reload next time it is accessed, or simply filter
3103 2964 # the removed nodes now and write the updated cache.
3104 2965 self._phasecache.filterunknown(self)
3105 2966 self._phasecache.write()
3106 2967
3107 2968 # refresh all repository caches
3108 2969 self.updatecaches()
3109 2970
3110 2971 # Ensure the persistent tag cache is updated. Doing it now
3111 2972 # means that the tag cache only has to worry about destroyed
3112 2973 # heads immediately after a strip/rollback. That in turn
3113 2974 # guarantees that "cachetip == currenttip" (comparing both rev
3114 2975 # and node) always means no nodes have been added or destroyed.
3115 2976
3116 2977 # XXX this is suboptimal when qrefresh'ing: we strip the current
3117 2978 # head, refresh the tag cache, then immediately add a new head.
3118 2979 # But I think doing it this way is necessary for the "instant
3119 2980 # tag cache retrieval" case to work.
3120 2981 self.invalidate()
3121 2982
3122 2983 def status(
3123 2984 self,
3124 2985 node1=b'.',
3125 2986 node2=None,
3126 2987 match=None,
3127 2988 ignored=False,
3128 2989 clean=False,
3129 2990 unknown=False,
3130 2991 listsubrepos=False,
3131 2992 ):
3132 2993 '''a convenience method that calls node1.status(node2)'''
3133 2994 return self[node1].status(
3134 2995 node2, match, ignored, clean, unknown, listsubrepos
3135 2996 )
3136 2997
3137 2998 def addpostdsstatus(self, ps):
3138 2999 """Add a callback to run within the wlock, at the point at which status
3139 3000 fixups happen.
3140 3001
3141 3002 On status completion, callback(wctx, status) will be called with the
3142 3003 wlock held, unless the dirstate has changed from underneath or the wlock
3143 3004 couldn't be grabbed.
3144 3005
3145 3006 Callbacks should not capture and use a cached copy of the dirstate --
3146 3007 it might change in the meanwhile. Instead, they should access the
3147 3008 dirstate via wctx.repo().dirstate.
3148 3009
3149 3010 This list is emptied out after each status run -- extensions should
3150 3011 make sure it adds to this list each time dirstate.status is called.
3151 3012 Extensions should also make sure they don't call this for statuses
3152 3013 that don't involve the dirstate.
3153 3014 """
3154 3015
3155 3016 # The list is located here for uniqueness reasons -- it is actually
3156 3017 # managed by the workingctx, but that isn't unique per-repo.
3157 3018 self._postdsstatus.append(ps)
3158 3019
3159 3020 def postdsstatus(self):
3160 3021 """Used by workingctx to get the list of post-dirstate-status hooks."""
3161 3022 return self._postdsstatus
3162 3023
3163 3024 def clearpostdsstatus(self):
3164 3025 """Used by workingctx to clear post-dirstate-status hooks."""
3165 3026 del self._postdsstatus[:]
3166 3027
3167 3028 def heads(self, start=None):
3168 3029 if start is None:
3169 3030 cl = self.changelog
3170 3031 headrevs = reversed(cl.headrevs())
3171 3032 return [cl.node(rev) for rev in headrevs]
3172 3033
3173 3034 heads = self.changelog.heads(start)
3174 3035 # sort the output in rev descending order
3175 3036 return sorted(heads, key=self.changelog.rev, reverse=True)
3176 3037
3177 3038 def branchheads(self, branch=None, start=None, closed=False):
3178 3039 '''return a (possibly filtered) list of heads for the given branch
3179 3040
3180 3041 Heads are returned in topological order, from newest to oldest.
3181 3042 If branch is None, use the dirstate branch.
3182 3043 If start is not None, return only heads reachable from start.
3183 3044 If closed is True, return heads that are marked as closed as well.
3184 3045 '''
3185 3046 if branch is None:
3186 3047 branch = self[None].branch()
3187 3048 branches = self.branchmap()
3188 3049 if not branches.hasbranch(branch):
3189 3050 return []
3190 3051 # the cache returns heads ordered lowest to highest
3191 3052 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3192 3053 if start is not None:
3193 3054 # filter out the heads that cannot be reached from startrev
3194 3055 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3195 3056 bheads = [h for h in bheads if h in fbheads]
3196 3057 return bheads
3197 3058
3198 3059 def branches(self, nodes):
3199 3060 if not nodes:
3200 3061 nodes = [self.changelog.tip()]
3201 3062 b = []
3202 3063 for n in nodes:
3203 3064 t = n
3204 3065 while True:
3205 3066 p = self.changelog.parents(n)
3206 3067 if p[1] != nullid or p[0] == nullid:
3207 3068 b.append((t, n, p[0], p[1]))
3208 3069 break
3209 3070 n = p[0]
3210 3071 return b
3211 3072
3212 3073 def between(self, pairs):
3213 3074 r = []
3214 3075
3215 3076 for top, bottom in pairs:
3216 3077 n, l, i = top, [], 0
3217 3078 f = 1
3218 3079
3219 3080 while n != bottom and n != nullid:
3220 3081 p = self.changelog.parents(n)[0]
3221 3082 if i == f:
3222 3083 l.append(n)
3223 3084 f = f * 2
3224 3085 n = p
3225 3086 i += 1
3226 3087
3227 3088 r.append(l)
3228 3089
3229 3090 return r
3230 3091
3231 3092 def checkpush(self, pushop):
3232 3093 """Extensions can override this function if additional checks have
3233 3094 to be performed before pushing, or call it if they override push
3234 3095 command.
3235 3096 """
3236 3097
3237 3098 @unfilteredpropertycache
3238 3099 def prepushoutgoinghooks(self):
3239 3100 """Return util.hooks consists of a pushop with repo, remote, outgoing
3240 3101 methods, which are called before pushing changesets.
3241 3102 """
3242 3103 return util.hooks()
3243 3104
3244 3105 def pushkey(self, namespace, key, old, new):
3245 3106 try:
3246 3107 tr = self.currenttransaction()
3247 3108 hookargs = {}
3248 3109 if tr is not None:
3249 3110 hookargs.update(tr.hookargs)
3250 3111 hookargs = pycompat.strkwargs(hookargs)
3251 3112 hookargs['namespace'] = namespace
3252 3113 hookargs['key'] = key
3253 3114 hookargs['old'] = old
3254 3115 hookargs['new'] = new
3255 3116 self.hook(b'prepushkey', throw=True, **hookargs)
3256 3117 except error.HookAbort as exc:
3257 3118 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3258 3119 if exc.hint:
3259 3120 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3260 3121 return False
3261 3122 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3262 3123 ret = pushkey.push(self, namespace, key, old, new)
3263 3124
3264 3125 def runhook(unused_success):
3265 3126 self.hook(
3266 3127 b'pushkey',
3267 3128 namespace=namespace,
3268 3129 key=key,
3269 3130 old=old,
3270 3131 new=new,
3271 3132 ret=ret,
3272 3133 )
3273 3134
3274 3135 self._afterlock(runhook)
3275 3136 return ret
3276 3137
3277 3138 def listkeys(self, namespace):
3278 3139 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3279 3140 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3280 3141 values = pushkey.list(self, namespace)
3281 3142 self.hook(b'listkeys', namespace=namespace, values=values)
3282 3143 return values
3283 3144
3284 3145 def debugwireargs(self, one, two, three=None, four=None, five=None):
3285 3146 '''used to test argument passing over the wire'''
3286 3147 return b"%s %s %s %s %s" % (
3287 3148 one,
3288 3149 two,
3289 3150 pycompat.bytestr(three),
3290 3151 pycompat.bytestr(four),
3291 3152 pycompat.bytestr(five),
3292 3153 )
3293 3154
3294 3155 def savecommitmessage(self, text):
3295 3156 fp = self.vfs(b'last-message.txt', b'wb')
3296 3157 try:
3297 3158 fp.write(text)
3298 3159 finally:
3299 3160 fp.close()
3300 3161 return self.pathto(fp.name[len(self.root) + 1 :])
3301 3162
3302 3163
3303 3164 # used to avoid circular references so destructors work
3304 3165 def aftertrans(files):
3305 3166 renamefiles = [tuple(t) for t in files]
3306 3167
3307 3168 def a():
3308 3169 for vfs, src, dest in renamefiles:
3309 3170 # if src and dest refer to a same file, vfs.rename is a no-op,
3310 3171 # leaving both src and dest on disk. delete dest to make sure
3311 3172 # the rename couldn't be such a no-op.
3312 3173 vfs.tryunlink(dest)
3313 3174 try:
3314 3175 vfs.rename(src, dest)
3315 3176 except OSError: # journal file does not yet exist
3316 3177 pass
3317 3178
3318 3179 return a
3319 3180
3320 3181
3321 3182 def undoname(fn):
3322 3183 base, name = os.path.split(fn)
3323 3184 assert name.startswith(b'journal')
3324 3185 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3325 3186
3326 3187
3327 3188 def instance(ui, path, create, intents=None, createopts=None):
3328 3189 localpath = util.urllocalpath(path)
3329 3190 if create:
3330 3191 createrepository(ui, localpath, createopts=createopts)
3331 3192
3332 3193 return makelocalrepository(ui, localpath, intents=intents)
3333 3194
3334 3195
3335 3196 def islocal(path):
3336 3197 return True
3337 3198
3338 3199
3339 3200 def defaultcreateopts(ui, createopts=None):
3340 3201 """Populate the default creation options for a repository.
3341 3202
3342 3203 A dictionary of explicitly requested creation options can be passed
3343 3204 in. Missing keys will be populated.
3344 3205 """
3345 3206 createopts = dict(createopts or {})
3346 3207
3347 3208 if b'backend' not in createopts:
3348 3209 # experimental config: storage.new-repo-backend
3349 3210 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3350 3211
3351 3212 return createopts
3352 3213
3353 3214
3354 3215 def newreporequirements(ui, createopts):
3355 3216 """Determine the set of requirements for a new local repository.
3356 3217
3357 3218 Extensions can wrap this function to specify custom requirements for
3358 3219 new repositories.
3359 3220 """
3360 3221 # If the repo is being created from a shared repository, we copy
3361 3222 # its requirements.
3362 3223 if b'sharedrepo' in createopts:
3363 3224 requirements = set(createopts[b'sharedrepo'].requirements)
3364 3225 if createopts.get(b'sharedrelative'):
3365 3226 requirements.add(b'relshared')
3366 3227 else:
3367 3228 requirements.add(b'shared')
3368 3229
3369 3230 return requirements
3370 3231
3371 3232 if b'backend' not in createopts:
3372 3233 raise error.ProgrammingError(
3373 3234 b'backend key not present in createopts; '
3374 3235 b'was defaultcreateopts() called?'
3375 3236 )
3376 3237
3377 3238 if createopts[b'backend'] != b'revlogv1':
3378 3239 raise error.Abort(
3379 3240 _(
3380 3241 b'unable to determine repository requirements for '
3381 3242 b'storage backend: %s'
3382 3243 )
3383 3244 % createopts[b'backend']
3384 3245 )
3385 3246
3386 3247 requirements = {b'revlogv1'}
3387 3248 if ui.configbool(b'format', b'usestore'):
3388 3249 requirements.add(b'store')
3389 3250 if ui.configbool(b'format', b'usefncache'):
3390 3251 requirements.add(b'fncache')
3391 3252 if ui.configbool(b'format', b'dotencode'):
3392 3253 requirements.add(b'dotencode')
3393 3254
3394 3255 compengines = ui.configlist(b'format', b'revlog-compression')
3395 3256 for compengine in compengines:
3396 3257 if compengine in util.compengines:
3397 3258 break
3398 3259 else:
3399 3260 raise error.Abort(
3400 3261 _(
3401 3262 b'compression engines %s defined by '
3402 3263 b'format.revlog-compression not available'
3403 3264 )
3404 3265 % b', '.join(b'"%s"' % e for e in compengines),
3405 3266 hint=_(
3406 3267 b'run "hg debuginstall" to list available '
3407 3268 b'compression engines'
3408 3269 ),
3409 3270 )
3410 3271
3411 3272 # zlib is the historical default and doesn't need an explicit requirement.
3412 3273 if compengine == b'zstd':
3413 3274 requirements.add(b'revlog-compression-zstd')
3414 3275 elif compengine != b'zlib':
3415 3276 requirements.add(b'exp-compression-%s' % compengine)
3416 3277
3417 3278 if scmutil.gdinitconfig(ui):
3418 3279 requirements.add(b'generaldelta')
3419 3280 if ui.configbool(b'format', b'sparse-revlog'):
3420 3281 requirements.add(SPARSEREVLOG_REQUIREMENT)
3421 3282
3422 3283 # experimental config: format.exp-use-side-data
3423 3284 if ui.configbool(b'format', b'exp-use-side-data'):
3424 3285 requirements.add(SIDEDATA_REQUIREMENT)
3425 3286 # experimental config: format.exp-use-copies-side-data-changeset
3426 3287 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3427 3288 requirements.add(SIDEDATA_REQUIREMENT)
3428 3289 requirements.add(COPIESSDC_REQUIREMENT)
3429 3290 if ui.configbool(b'experimental', b'treemanifest'):
3430 3291 requirements.add(b'treemanifest')
3431 3292
3432 3293 revlogv2 = ui.config(b'experimental', b'revlogv2')
3433 3294 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3434 3295 requirements.remove(b'revlogv1')
3435 3296 # generaldelta is implied by revlogv2.
3436 3297 requirements.discard(b'generaldelta')
3437 3298 requirements.add(REVLOGV2_REQUIREMENT)
3438 3299 # experimental config: format.internal-phase
3439 3300 if ui.configbool(b'format', b'internal-phase'):
3440 3301 requirements.add(b'internal-phase')
3441 3302
3442 3303 if createopts.get(b'narrowfiles'):
3443 3304 requirements.add(repository.NARROW_REQUIREMENT)
3444 3305
3445 3306 if createopts.get(b'lfs'):
3446 3307 requirements.add(b'lfs')
3447 3308
3448 3309 if ui.configbool(b'format', b'bookmarks-in-store'):
3449 3310 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3450 3311
3451 3312 if ui.configbool(b'format', b'use-persistent-nodemap'):
3452 3313 requirements.add(NODEMAP_REQUIREMENT)
3453 3314
3454 3315 return requirements
3455 3316
3456 3317
3457 3318 def filterknowncreateopts(ui, createopts):
3458 3319 """Filters a dict of repo creation options against options that are known.
3459 3320
3460 3321 Receives a dict of repo creation options and returns a dict of those
3461 3322 options that we don't know how to handle.
3462 3323
3463 3324 This function is called as part of repository creation. If the
3464 3325 returned dict contains any items, repository creation will not
3465 3326 be allowed, as it means there was a request to create a repository
3466 3327 with options not recognized by loaded code.
3467 3328
3468 3329 Extensions can wrap this function to filter out creation options
3469 3330 they know how to handle.
3470 3331 """
3471 3332 known = {
3472 3333 b'backend',
3473 3334 b'lfs',
3474 3335 b'narrowfiles',
3475 3336 b'sharedrepo',
3476 3337 b'sharedrelative',
3477 3338 b'shareditems',
3478 3339 b'shallowfilestore',
3479 3340 }
3480 3341
3481 3342 return {k: v for k, v in createopts.items() if k not in known}
3482 3343
3483 3344
3484 3345 def createrepository(ui, path, createopts=None):
3485 3346 """Create a new repository in a vfs.
3486 3347
3487 3348 ``path`` path to the new repo's working directory.
3488 3349 ``createopts`` options for the new repository.
3489 3350
3490 3351 The following keys for ``createopts`` are recognized:
3491 3352
3492 3353 backend
3493 3354 The storage backend to use.
3494 3355 lfs
3495 3356 Repository will be created with ``lfs`` requirement. The lfs extension
3496 3357 will automatically be loaded when the repository is accessed.
3497 3358 narrowfiles
3498 3359 Set up repository to support narrow file storage.
3499 3360 sharedrepo
3500 3361 Repository object from which storage should be shared.
3501 3362 sharedrelative
3502 3363 Boolean indicating if the path to the shared repo should be
3503 3364 stored as relative. By default, the pointer to the "parent" repo
3504 3365 is stored as an absolute path.
3505 3366 shareditems
3506 3367 Set of items to share to the new repository (in addition to storage).
3507 3368 shallowfilestore
3508 3369 Indicates that storage for files should be shallow (not all ancestor
3509 3370 revisions are known).
3510 3371 """
3511 3372 createopts = defaultcreateopts(ui, createopts=createopts)
3512 3373
3513 3374 unknownopts = filterknowncreateopts(ui, createopts)
3514 3375
3515 3376 if not isinstance(unknownopts, dict):
3516 3377 raise error.ProgrammingError(
3517 3378 b'filterknowncreateopts() did not return a dict'
3518 3379 )
3519 3380
3520 3381 if unknownopts:
3521 3382 raise error.Abort(
3522 3383 _(
3523 3384 b'unable to create repository because of unknown '
3524 3385 b'creation option: %s'
3525 3386 )
3526 3387 % b', '.join(sorted(unknownopts)),
3527 3388 hint=_(b'is a required extension not loaded?'),
3528 3389 )
3529 3390
3530 3391 requirements = newreporequirements(ui, createopts=createopts)
3531 3392
3532 3393 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3533 3394
3534 3395 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3535 3396 if hgvfs.exists():
3536 3397 raise error.RepoError(_(b'repository %s already exists') % path)
3537 3398
3538 3399 if b'sharedrepo' in createopts:
3539 3400 sharedpath = createopts[b'sharedrepo'].sharedpath
3540 3401
3541 3402 if createopts.get(b'sharedrelative'):
3542 3403 try:
3543 3404 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3544 3405 except (IOError, ValueError) as e:
3545 3406 # ValueError is raised on Windows if the drive letters differ
3546 3407 # on each path.
3547 3408 raise error.Abort(
3548 3409 _(b'cannot calculate relative path'),
3549 3410 hint=stringutil.forcebytestr(e),
3550 3411 )
3551 3412
3552 3413 if not wdirvfs.exists():
3553 3414 wdirvfs.makedirs()
3554 3415
3555 3416 hgvfs.makedir(notindexed=True)
3556 3417 if b'sharedrepo' not in createopts:
3557 3418 hgvfs.mkdir(b'cache')
3558 3419 hgvfs.mkdir(b'wcache')
3559 3420
3560 3421 if b'store' in requirements and b'sharedrepo' not in createopts:
3561 3422 hgvfs.mkdir(b'store')
3562 3423
3563 3424 # We create an invalid changelog outside the store so very old
3564 3425 # Mercurial versions (which didn't know about the requirements
3565 3426 # file) encounter an error on reading the changelog. This
3566 3427 # effectively locks out old clients and prevents them from
3567 3428 # mucking with a repo in an unknown format.
3568 3429 #
3569 3430 # The revlog header has version 2, which won't be recognized by
3570 3431 # such old clients.
3571 3432 hgvfs.append(
3572 3433 b'00changelog.i',
3573 3434 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3574 3435 b'layout',
3575 3436 )
3576 3437
3577 3438 scmutil.writerequires(hgvfs, requirements)
3578 3439
3579 3440 # Write out file telling readers where to find the shared store.
3580 3441 if b'sharedrepo' in createopts:
3581 3442 hgvfs.write(b'sharedpath', sharedpath)
3582 3443
3583 3444 if createopts.get(b'shareditems'):
3584 3445 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3585 3446 hgvfs.write(b'shared', shared)
3586 3447
3587 3448
3588 3449 def poisonrepository(repo):
3589 3450 """Poison a repository instance so it can no longer be used."""
3590 3451 # Perform any cleanup on the instance.
3591 3452 repo.close()
3592 3453
3593 3454 # Our strategy is to replace the type of the object with one that
3594 3455 # has all attribute lookups result in error.
3595 3456 #
3596 3457 # But we have to allow the close() method because some constructors
3597 3458 # of repos call close() on repo references.
3598 3459 class poisonedrepository(object):
3599 3460 def __getattribute__(self, item):
3600 3461 if item == 'close':
3601 3462 return object.__getattribute__(self, item)
3602 3463
3603 3464 raise error.ProgrammingError(
3604 3465 b'repo instances should not be used after unshare'
3605 3466 )
3606 3467
3607 3468 def close(self):
3608 3469 pass
3609 3470
3610 3471 # We may have a repoview, which intercepts __setattr__. So be sure
3611 3472 # we operate at the lowest level possible.
3612 3473 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,1203 +1,1202
1 1 $ cat >> "$HGRCPATH" << EOF
2 2 > [ui]
3 3 > merge = :merge3
4 4 > EOF
5 5
6 6 init
7 7
8 8 $ hg init repo
9 9 $ cd repo
10 10
11 11 commit
12 12
13 13 $ echo 'a' > a
14 14 $ hg ci -A -m test -u nobody -d '1 0'
15 15 adding a
16 16
17 17 annotate -c
18 18
19 19 $ hg annotate -c a
20 20 8435f90966e4: a
21 21
22 22 annotate -cl
23 23
24 24 $ hg annotate -cl a
25 25 8435f90966e4:1: a
26 26
27 27 annotate -d
28 28
29 29 $ hg annotate -d a
30 30 Thu Jan 01 00:00:01 1970 +0000: a
31 31
32 32 annotate -n
33 33
34 34 $ hg annotate -n a
35 35 0: a
36 36
37 37 annotate -nl
38 38
39 39 $ hg annotate -nl a
40 40 0:1: a
41 41
42 42 annotate -u
43 43
44 44 $ hg annotate -u a
45 45 nobody: a
46 46
47 47 annotate -cdnu
48 48
49 49 $ hg annotate -cdnu a
50 50 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
51 51
52 52 annotate -cdnul
53 53
54 54 $ hg annotate -cdnul a
55 55 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
56 56
57 57 annotate (JSON)
58 58
59 59 $ hg annotate -Tjson a
60 60 [
61 61 {
62 62 "lines": [{"line": "a\n", "rev": 0}],
63 63 "path": "a"
64 64 }
65 65 ]
66 66
67 67 $ hg annotate -Tjson -cdfnul a
68 68 [
69 69 {
70 70 "lines": [{"date": [1.0, 0], "line": "a\n", "lineno": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "path": "a", "rev": 0, "user": "nobody"}],
71 71 "path": "a"
72 72 }
73 73 ]
74 74
75 75 log-like templating
76 76
77 77 $ hg annotate -T'{lines % "{rev} {node|shortest}: {line}"}' a
78 78 0 8435: a
79 79
80 80 '{lineno}' field should be populated as necessary
81 81
82 82 $ hg annotate -T'{lines % "{rev}:{lineno}: {line}"}' a
83 83 0:1: a
84 84 $ hg annotate -Ta a \
85 85 > --config templates.a='"{lines % "{rev}:{lineno}: {line}"}"'
86 86 0:1: a
87 87
88 88 $ cat <<EOF >>a
89 89 > a
90 90 > a
91 91 > EOF
92 92 $ hg ci -ma1 -d '1 0'
93 93 $ hg cp a b
94 94 $ hg ci -mb -d '1 0'
95 95 $ cat <<EOF >> b
96 96 > b4
97 97 > b5
98 98 > b6
99 99 > EOF
100 100 $ hg ci -mb2 -d '2 0'
101 101
102 102 default output of '{lines}' should be readable
103 103
104 104 $ hg annotate -T'{lines}' a
105 105 0: a
106 106 1: a
107 107 1: a
108 108 $ hg annotate -T'{join(lines, "\n")}' a
109 109 0: a
110 110
111 111 1: a
112 112
113 113 1: a
114 114
115 115 several filters can be applied to '{lines}'
116 116
117 117 $ hg annotate -T'{lines|json}\n' a
118 118 [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}]
119 119 $ hg annotate -T'{lines|stringify}' a
120 120 0: a
121 121 1: a
122 122 1: a
123 123 $ hg annotate -T'{lines|count}\n' a
124 124 3
125 125
126 126 annotate multiple files (JSON)
127 127
128 128 $ hg annotate -Tjson a b
129 129 [
130 130 {
131 131 "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}],
132 132 "path": "a"
133 133 },
134 134 {
135 135 "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}, {"line": "b4\n", "rev": 3}, {"line": "b5\n", "rev": 3}, {"line": "b6\n", "rev": 3}],
136 136 "path": "b"
137 137 }
138 138 ]
139 139
140 140 annotate multiple files (template)
141 141
142 142 $ hg annotate -T'== {path} ==\n{lines % "{rev}: {line}"}' a b
143 143 == a ==
144 144 0: a
145 145 1: a
146 146 1: a
147 147 == b ==
148 148 0: a
149 149 1: a
150 150 1: a
151 151 3: b4
152 152 3: b5
153 153 3: b6
154 154
155 155 annotate -n b
156 156
157 157 $ hg annotate -n b
158 158 0: a
159 159 1: a
160 160 1: a
161 161 3: b4
162 162 3: b5
163 163 3: b6
164 164
165 165 annotate --no-follow b
166 166
167 167 $ hg annotate --no-follow b
168 168 2: a
169 169 2: a
170 170 2: a
171 171 3: b4
172 172 3: b5
173 173 3: b6
174 174
175 175 annotate -nl b
176 176
177 177 $ hg annotate -nl b
178 178 0:1: a
179 179 1:2: a
180 180 1:3: a
181 181 3:4: b4
182 182 3:5: b5
183 183 3:6: b6
184 184
185 185 annotate -nf b
186 186
187 187 $ hg annotate -nf b
188 188 0 a: a
189 189 1 a: a
190 190 1 a: a
191 191 3 b: b4
192 192 3 b: b5
193 193 3 b: b6
194 194
195 195 annotate -nlf b
196 196
197 197 $ hg annotate -nlf b
198 198 0 a:1: a
199 199 1 a:2: a
200 200 1 a:3: a
201 201 3 b:4: b4
202 202 3 b:5: b5
203 203 3 b:6: b6
204 204
205 205 $ hg up -C 2
206 206 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
207 207 $ cat <<EOF >> b
208 208 > b4
209 209 > c
210 210 > b5
211 211 > EOF
212 212 $ hg ci -mb2.1 -d '2 0'
213 213 created new head
214 214 $ hg merge
215 215 merging b
216 216 warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
217 217 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
218 218 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
219 219 [1]
220 220 $ cat b
221 221 a
222 222 a
223 223 a
224 224 <<<<<<< working copy: 5fbdc1152d97 - test: b2.1
225 225 b4
226 226 c
227 227 b5
228 228 ||||||| base
229 229 =======
230 230 b4
231 231 b5
232 232 b6
233 233 >>>>>>> merge rev: 37ec9f5c3d1f - test: b2
234 234 $ cat <<EOF > b
235 235 > a
236 236 > a
237 237 > a
238 238 > b4
239 239 > c
240 240 > b5
241 241 > EOF
242 242 $ hg resolve --mark -q
243 243 $ rm b.orig
244 244 $ hg ci -mmergeb -d '3 0'
245 245
246 246 annotate after merge
247 247
248 248 $ hg annotate -nf b
249 249 0 a: a
250 250 1 a: a
251 251 1 a: a
252 252 3 b: b4
253 253 4 b: c
254 254 3 b: b5
255 255
256 256 annotate after merge with -l
257 257
258 258 $ hg annotate -nlf b
259 259 0 a:1: a
260 260 1 a:2: a
261 261 1 a:3: a
262 262 3 b:4: b4
263 263 4 b:5: c
264 264 3 b:5: b5
265 265
266 266 $ hg up -C 1
267 267 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
268 268 $ hg cp a b
269 269 $ cat <<EOF > b
270 270 > a
271 271 > z
272 272 > a
273 273 > EOF
274 274 $ hg ci -mc -d '3 0'
275 275 created new head
276 276 Work around the pure version not resolving the conflict like native code
277 277 #if pure
278 278 $ hg merge
279 279 merging b
280 280 warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
281 281 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
282 282 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
283 283 [1]
284 284 $ cat <<EOF > b
285 285 > a
286 286 > z
287 287 > a
288 288 > b4
289 289 > c
290 290 > b5
291 291 > EOF
292 292 $ hg resolve -m b
293 293 (no more unresolved files)
294 294 $ rm b.orig
295 295 #else
296 296 $ hg merge
297 297 merging b
298 298 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
299 299 (branch merge, don't forget to commit)
300 300 #endif
301 301 $ echo d >> b
302 302 $ hg ci -mmerge2 -d '4 0'
303 303
304 304 annotate after rename merge
305 305
306 306 $ hg annotate -nf b
307 307 0 a: a
308 308 6 b: z
309 309 1 a: a
310 310 3 b: b4
311 311 4 b: c
312 312 3 b: b5
313 313 7 b: d
314 314
315 315 annotate after rename merge with -l
316 316
317 317 $ hg annotate -nlf b
318 318 0 a:1: a
319 319 6 b:2: z
320 320 1 a:3: a
321 321 3 b:4: b4
322 322 4 b:5: c
323 323 3 b:5: b5
324 324 7 b:7: d
325 325
326 326 --skip nothing (should be the same as no --skip at all)
327 327
328 328 $ hg annotate -nlf b --skip '1::0'
329 329 0 a:1: a
330 330 6 b:2: z
331 331 1 a:3: a
332 332 3 b:4: b4
333 333 4 b:5: c
334 334 3 b:5: b5
335 335 7 b:7: d
336 336
337 337 --skip a modified line. Note a slight behavior difference in pure - this is
338 338 because the pure code comes up with slightly different deltas internally.
339 339
340 340 $ hg annotate -nlf b --skip 6
341 341 0 a:1: a
342 342 1 a:2* z (no-pure !)
343 343 0 a:1* z (pure !)
344 344 1 a:3: a
345 345 3 b:4: b4
346 346 4 b:5: c
347 347 3 b:5: b5
348 348 7 b:7: d
349 349
350 350 --skip added lines (and test multiple skip)
351 351
352 352 $ hg annotate -nlf b --skip 3
353 353 0 a:1: a
354 354 6 b:2: z
355 355 1 a:3: a
356 356 1 a:3* b4
357 357 4 b:5: c
358 358 1 a:3* b5
359 359 7 b:7: d
360 360
361 361 $ hg annotate -nlf b --skip 4
362 362 0 a:1: a
363 363 6 b:2: z
364 364 1 a:3: a
365 365 3 b:4: b4
366 366 1 a:3* c
367 367 3 b:5: b5
368 368 7 b:7: d
369 369
370 370 $ hg annotate -nlf b --skip 3 --skip 4
371 371 0 a:1: a
372 372 6 b:2: z
373 373 1 a:3: a
374 374 1 a:3* b4
375 375 1 a:3* c
376 376 1 a:3* b5
377 377 7 b:7: d
378 378
379 379 $ hg annotate -nlf b --skip 'merge()'
380 380 0 a:1: a
381 381 6 b:2: z
382 382 1 a:3: a
383 383 3 b:4: b4
384 384 4 b:5: c
385 385 3 b:5: b5
386 386 3 b:5* d
387 387
388 388 --skip everything -- use the revision the file was introduced in
389 389
390 390 $ hg annotate -nlf b --skip 'all()'
391 391 0 a:1: a
392 392 0 a:1* z
393 393 0 a:1* a
394 394 0 a:1* b4
395 395 0 a:1* c
396 396 0 a:1* b5
397 397 0 a:1* d
398 398
399 399 Issue2807: alignment of line numbers with -l
400 400
401 401 $ echo more >> b
402 402 $ hg ci -mmore -d '5 0'
403 403 $ echo more >> b
404 404 $ hg ci -mmore -d '6 0'
405 405 $ echo more >> b
406 406 $ hg ci -mmore -d '7 0'
407 407 $ hg annotate -nlf b
408 408 0 a: 1: a
409 409 6 b: 2: z
410 410 1 a: 3: a
411 411 3 b: 4: b4
412 412 4 b: 5: c
413 413 3 b: 5: b5
414 414 7 b: 7: d
415 415 8 b: 8: more
416 416 9 b: 9: more
417 417 10 b:10: more
418 418
419 419 linkrev vs rev
420 420
421 421 $ hg annotate -r tip -n a
422 422 0: a
423 423 1: a
424 424 1: a
425 425
426 426 linkrev vs rev with -l
427 427
428 428 $ hg annotate -r tip -nl a
429 429 0:1: a
430 430 1:2: a
431 431 1:3: a
432 432
433 433 Issue589: "undelete" sequence leads to crash
434 434
435 435 annotate was crashing when trying to --follow something
436 436
437 437 like A -> B -> A
438 438
439 439 generate ABA rename configuration
440 440
441 441 $ echo foo > foo
442 442 $ hg add foo
443 443 $ hg ci -m addfoo
444 444 $ hg rename foo bar
445 445 $ hg ci -m renamefoo
446 446 $ hg rename bar foo
447 447 $ hg ci -m renamebar
448 448
449 449 annotate after ABA with follow
450 450
451 451 $ hg annotate --follow foo
452 452 foo: foo
453 453
454 454 missing file
455 455
456 456 $ hg ann nosuchfile
457 457 abort: nosuchfile: no such file in rev e9e6b4fa872f
458 458 [255]
459 459
460 460 annotate file without '\n' on last line
461 461
462 462 $ printf "" > c
463 463 $ hg ci -A -m test -u nobody -d '1 0'
464 464 adding c
465 465 $ hg annotate c
466 466 $ printf "a\nb" > c
467 467 $ hg ci -m test
468 468 $ hg annotate c
469 469 [0-9]+: a (re)
470 470 [0-9]+: b (re)
471 471
472 472 Issue3841: check annotation of the file of which filelog includes
473 473 merging between the revision and its ancestor
474 474
475 475 to reproduce the situation with recent Mercurial, this script uses (1)
476 476 "hg debugsetparents" to merge without ancestor check by "hg merge",
477 477 and (2) the extension to allow filelog merging between the revision
478 478 and its ancestor by overriding "repo._filecommit".
479 479
480 480 $ cat > ../legacyrepo.py <<EOF
481 481 > from __future__ import absolute_import
482 > from mercurial import error, node
483 > def reposetup(ui, repo):
484 > class legacyrepo(repo.__class__):
485 > def _filecommit(self, fctx, manifest1, manifest2,
482 > from mercurial import commit, error, extensions, node
483 > def _filecommit(orig, repo, fctx, manifest1, manifest2,
486 484 > linkrev, tr, includecopymeta):
487 485 > fname = fctx.path()
488 486 > text = fctx.data()
489 > flog = self.file(fname)
487 > flog = repo.file(fname)
490 488 > fparent1 = manifest1.get(fname, node.nullid)
491 489 > fparent2 = manifest2.get(fname, node.nullid)
492 490 > meta = {}
493 491 > copy = fctx.copysource()
494 492 > if copy and copy != fname:
495 493 > raise error.Abort('copying is not supported')
496 494 > if fparent2 != node.nullid:
497 495 > return flog.add(text, meta, tr, linkrev,
498 496 > fparent1, fparent2), 'modified'
499 497 > raise error.Abort('only merging is supported')
500 > repo.__class__ = legacyrepo
498 > def uisetup(ui):
499 > extensions.wrapfunction(commit, '_filecommit', _filecommit)
501 500 > EOF
502 501
503 502 $ cat > baz <<EOF
504 503 > 1
505 504 > 2
506 505 > 3
507 506 > 4
508 507 > 5
509 508 > EOF
510 509 $ hg add baz
511 510 $ hg commit -m "baz:0"
512 511
513 512 $ cat > baz <<EOF
514 513 > 1 baz:1
515 514 > 2
516 515 > 3
517 516 > 4
518 517 > 5
519 518 > EOF
520 519 $ hg commit -m "baz:1"
521 520
522 521 $ cat > baz <<EOF
523 522 > 1 baz:1
524 523 > 2 baz:2
525 524 > 3
526 525 > 4
527 526 > 5
528 527 > EOF
529 528 $ hg debugsetparents 17 17
530 529 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
531 530 $ hg debugindexdot baz
532 531 digraph G {
533 532 -1 -> 0
534 533 0 -> 1
535 534 1 -> 2
536 535 1 -> 2
537 536 }
538 537 $ hg annotate baz
539 538 17: 1 baz:1
540 539 18: 2 baz:2
541 540 16: 3
542 541 16: 4
543 542 16: 5
544 543
545 544 $ cat > baz <<EOF
546 545 > 1 baz:1
547 546 > 2 baz:2
548 547 > 3 baz:3
549 548 > 4
550 549 > 5
551 550 > EOF
552 551 $ hg commit -m "baz:3"
553 552
554 553 $ cat > baz <<EOF
555 554 > 1 baz:1
556 555 > 2 baz:2
557 556 > 3 baz:3
558 557 > 4 baz:4
559 558 > 5
560 559 > EOF
561 560 $ hg debugsetparents 19 18
562 561 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
563 562 $ hg debugindexdot baz
564 563 digraph G {
565 564 -1 -> 0
566 565 0 -> 1
567 566 1 -> 2
568 567 1 -> 2
569 568 2 -> 3
570 569 3 -> 4
571 570 2 -> 4
572 571 }
573 572 $ hg annotate baz
574 573 17: 1 baz:1
575 574 18: 2 baz:2
576 575 19: 3 baz:3
577 576 20: 4 baz:4
578 577 16: 5
579 578
580 579 annotate clean file
581 580
582 581 $ hg annotate -ncr "wdir()" foo
583 582 11 472b18db256d : foo
584 583
585 584 annotate modified file
586 585
587 586 $ echo foofoo >> foo
588 587 $ hg annotate -r "wdir()" foo
589 588 11 : foo
590 589 20+: foofoo
591 590
592 591 $ hg annotate -cr "wdir()" foo
593 592 472b18db256d : foo
594 593 b6bedd5477e7+: foofoo
595 594
596 595 $ hg annotate -ncr "wdir()" foo
597 596 11 472b18db256d : foo
598 597 20 b6bedd5477e7+: foofoo
599 598
600 599 $ hg annotate --debug -ncr "wdir()" foo
601 600 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
602 601 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
603 602
604 603 $ hg annotate -udr "wdir()" foo
605 604 test Thu Jan 01 00:00:00 1970 +0000: foo
606 605 test [A-Za-z0-9:+ ]+: foofoo (re)
607 606
608 607 $ hg annotate -ncr "wdir()" -Tjson foo
609 608 [
610 609 {
611 610 "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": "ffffffffffffffffffffffffffffffffffffffff", "rev": 2147483647}],
612 611 "path": "foo"
613 612 }
614 613 ]
615 614
616 615 annotate added file
617 616
618 617 $ echo bar > bar
619 618 $ hg add bar
620 619 $ hg annotate -ncr "wdir()" bar
621 620 20 b6bedd5477e7+: bar
622 621
623 622 annotate renamed file
624 623
625 624 $ hg rename foo renamefoo2
626 625 $ hg annotate -ncr "wdir()" renamefoo2
627 626 11 472b18db256d : foo
628 627 20 b6bedd5477e7+: foofoo
629 628
630 629 annotate missing file
631 630
632 631 $ rm baz
633 632
634 633 $ hg annotate -ncr "wdir()" baz
635 634 abort: $TESTTMP\repo\baz: $ENOENT$ (windows !)
636 635 abort: $ENOENT$: '$TESTTMP/repo/baz' (no-windows !)
637 636 [255]
638 637
639 638 annotate removed file
640 639
641 640 $ hg rm baz
642 641
643 642 $ hg annotate -ncr "wdir()" baz
644 643 abort: $TESTTMP\repo\baz: $ENOENT$ (windows !)
645 644 abort: $ENOENT$: '$TESTTMP/repo/baz' (no-windows !)
646 645 [255]
647 646
648 647 $ hg revert --all --no-backup --quiet
649 648 $ hg id -n
650 649 20
651 650
652 651 Test followlines() revset; we usually check both followlines(pat, range) and
653 652 followlines(pat, range, descend=True) to make sure both give the same result
654 653 when they should.
655 654
656 655 $ echo a >> foo
657 656 $ hg ci -m 'foo: add a'
658 657 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5)'
659 658 16: baz:0
660 659 19: baz:3
661 660 20: baz:4
662 661 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=20)'
663 662 16: baz:0
664 663 19: baz:3
665 664 20: baz:4
666 665 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19)'
667 666 16: baz:0
668 667 19: baz:3
669 668 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=True)'
670 669 19: baz:3
671 670 20: baz:4
672 671 $ printf "0\n0\n" | cat - baz > baz1
673 672 $ mv baz1 baz
674 673 $ hg ci -m 'added two lines with 0'
675 674 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
676 675 16: baz:0
677 676 19: baz:3
678 677 20: baz:4
679 678 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, descend=true, startrev=19)'
680 679 19: baz:3
681 680 20: baz:4
682 681 $ echo 6 >> baz
683 682 $ hg ci -m 'added line 8'
684 683 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
685 684 16: baz:0
686 685 19: baz:3
687 686 20: baz:4
688 687 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=1)'
689 688 19: baz:3
690 689 20: baz:4
691 690 $ sed 's/3/3+/' baz > baz.new
692 691 $ mv baz.new baz
693 692 $ hg ci -m 'baz:3->3+'
694 693 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, descend=0)'
695 694 16: baz:0
696 695 19: baz:3
697 696 20: baz:4
698 697 24: baz:3->3+
699 698 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=17, descend=True)'
700 699 19: baz:3
701 700 20: baz:4
702 701 24: baz:3->3+
703 702 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 1:2, descend=false)'
704 703 22: added two lines with 0
705 704
706 705 file patterns are okay
707 706 $ hg log -T '{rev}: {desc}\n' -r 'followlines("path:baz", 1:2)'
708 707 22: added two lines with 0
709 708
710 709 renames are followed
711 710 $ hg mv baz qux
712 711 $ sed 's/4/4+/' qux > qux.new
713 712 $ mv qux.new qux
714 713 $ hg ci -m 'qux:4->4+'
715 714 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
716 715 16: baz:0
717 716 19: baz:3
718 717 20: baz:4
719 718 24: baz:3->3+
720 719 25: qux:4->4+
721 720
722 721 but are missed when following children
723 722 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=22, descend=True)'
724 723 24: baz:3->3+
725 724
726 725 merge
727 726 $ hg up 24 --quiet
728 727 $ echo 7 >> baz
729 728 $ hg ci -m 'one more line, out of line range'
730 729 created new head
731 730 $ sed 's/3+/3-/' baz > baz.new
732 731 $ mv baz.new baz
733 732 $ hg ci -m 'baz:3+->3-'
734 733 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
735 734 16: baz:0
736 735 19: baz:3
737 736 20: baz:4
738 737 24: baz:3->3+
739 738 27: baz:3+->3-
740 739 $ hg merge 25
741 740 merging baz and qux to qux
742 741 warning: conflicts while merging qux! (edit, then use 'hg resolve --mark')
743 742 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
744 743 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
745 744 [1]
746 745 $ cat qux
747 746 0
748 747 0
749 748 1 baz:1
750 749 2 baz:2
751 750 <<<<<<< working copy: 863de62655ef - test: baz:3+->3-
752 751 3- baz:3
753 752 4 baz:4
754 753 ||||||| base
755 754 3+ baz:3
756 755 4 baz:4
757 756 =======
758 757 3+ baz:3
759 758 4+ baz:4
760 759 >>>>>>> merge rev: cb8df70ae185 - test: qux:4->4+
761 760 5
762 761 6
763 762 7
764 763 $ cat > qux <<EOF
765 764 > 0
766 765 > 0
767 766 > 1 baz:1
768 767 > 2 baz:2
769 768 > 3- baz:3
770 769 > 4 baz:4
771 770 > 5
772 771 > 6
773 772 > 7
774 773 > EOF
775 774 $ hg resolve --mark -q
776 775 $ rm qux.orig
777 776 $ hg ci -m merge
778 777 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
779 778 16: baz:0
780 779 19: baz:3
781 780 20: baz:4
782 781 24: baz:3->3+
783 782 25: qux:4->4+
784 783 27: baz:3+->3-
785 784 28: merge
786 785 $ hg up 25 --quiet
787 786 $ hg merge 27
788 787 merging qux and baz to qux
789 788 warning: conflicts while merging qux! (edit, then use 'hg resolve --mark')
790 789 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
791 790 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
792 791 [1]
793 792 $ cat qux
794 793 0
795 794 0
796 795 1 baz:1
797 796 2 baz:2
798 797 <<<<<<< working copy: cb8df70ae185 - test: qux:4->4+
799 798 3+ baz:3
800 799 4+ baz:4
801 800 ||||||| base
802 801 3+ baz:3
803 802 4 baz:4
804 803 =======
805 804 3- baz:3
806 805 4 baz:4
807 806 >>>>>>> merge rev: 863de62655ef - test: baz:3+->3-
808 807 5
809 808 6
810 809 7
811 810 $ cat > qux <<EOF
812 811 > 0
813 812 > 0
814 813 > 1 baz:1
815 814 > 2 baz:2
816 815 > 3+ baz:3
817 816 > 4+ baz:4
818 817 > 5
819 818 > 6
820 819 > EOF
821 820 $ hg resolve --mark -q
822 821 $ rm qux.orig
823 822 $ hg ci -m 'merge from other side'
824 823 created new head
825 824 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
826 825 16: baz:0
827 826 19: baz:3
828 827 20: baz:4
829 828 24: baz:3->3+
830 829 25: qux:4->4+
831 830 27: baz:3+->3-
832 831 29: merge from other side
833 832 $ hg up 24 --quiet
834 833
835 834 we are missing the branch with rename when following children
836 835 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=26, descend=True)'
837 836 27: baz:3+->3-
838 837
839 838 we follow all branches in descending direction
840 839 $ hg up 23 --quiet
841 840 $ sed 's/3/+3/' baz > baz.new
842 841 $ mv baz.new baz
843 842 $ hg ci -m 'baz:3->+3'
844 843 created new head
845 844 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 2:5, startrev=16, descend=True)' --graph
846 845 @ 30: baz:3->+3
847 846 :
848 847 : o 27: baz:3+->3-
849 848 : :
850 849 : o 24: baz:3->3+
851 850 :/
852 851 o 20: baz:4
853 852 |\
854 853 | o 19: baz:3
855 854 |/
856 855 o 18: baz:2
857 856 :
858 857 o 16: baz:0
859 858 |
860 859 ~
861 860
862 861 Issue5595: on a merge changeset with different line ranges depending on
863 862 parent, be conservative and use the surrounding interval to avoid loosing
864 863 track of possible further descendants in specified range.
865 864
866 865 $ hg up 23 --quiet
867 866 $ hg cat baz -r 24
868 867 0
869 868 0
870 869 1 baz:1
871 870 2 baz:2
872 871 3+ baz:3
873 872 4 baz:4
874 873 5
875 874 6
876 875 $ cat > baz << EOF
877 876 > 0
878 877 > 0
879 878 > a
880 879 > b
881 880 > 3+ baz:3
882 881 > 4 baz:4
883 882 > y
884 883 > z
885 884 > EOF
886 885 $ hg ci -m 'baz: mostly rewrite with some content from 24'
887 886 created new head
888 887 $ hg merge --tool :merge-other 24
889 888 merging baz
890 889 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
891 890 (branch merge, don't forget to commit)
892 891 $ hg ci -m 'merge forgetting about baz rewrite'
893 892 $ cat > baz << EOF
894 893 > 0
895 894 > 0
896 895 > 1 baz:1
897 896 > 2+ baz:2
898 897 > 3+ baz:3
899 898 > 4 baz:4
900 899 > 5
901 900 > 6
902 901 > EOF
903 902 $ hg ci -m 'baz: narrow change (2->2+)'
904 903 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:4, startrev=20, descend=True)' --graph
905 904 @ 33: baz: narrow change (2->2+)
906 905 |
907 906 o 32: merge forgetting about baz rewrite
908 907 |\
909 908 | o 31: baz: mostly rewrite with some content from 24
910 909 | :
911 910 | : o 30: baz:3->+3
912 911 | :/
913 912 +---o 27: baz:3+->3-
914 913 | :
915 914 o : 24: baz:3->3+
916 915 :/
917 916 o 20: baz:4
918 917 |\
919 918 ~ ~
920 919
921 920 An integer as a line range, which is parsed as '1:1'
922 921
923 922 $ hg log -r 'followlines(baz, 1)'
924 923 changeset: 22:2174d0bf352a
925 924 user: test
926 925 date: Thu Jan 01 00:00:00 1970 +0000
927 926 summary: added two lines with 0
928 927
929 928
930 929 check error cases
931 930 $ hg up 24 --quiet
932 931 $ hg log -r 'followlines()'
933 932 hg: parse error: followlines takes at least 1 positional arguments
934 933 [255]
935 934 $ hg log -r 'followlines(baz)'
936 935 hg: parse error: followlines requires a line range
937 936 [255]
938 937 $ hg log -r 'followlines(baz, x)'
939 938 hg: parse error: followlines expects a line number or a range
940 939 [255]
941 940 $ hg log -r 'followlines(baz, 1:2, startrev=desc("b"))'
942 941 hg: parse error: followlines expects exactly one revision
943 942 [255]
944 943 $ hg log -r 'followlines("glob:*", 1:2)'
945 944 hg: parse error: followlines expects exactly one file
946 945 [255]
947 946 $ hg log -r 'followlines(baz, 1:)'
948 947 hg: parse error: line range bounds must be integers
949 948 [255]
950 949 $ hg log -r 'followlines(baz, :1)'
951 950 hg: parse error: line range bounds must be integers
952 951 [255]
953 952 $ hg log -r 'followlines(baz, x:4)'
954 953 hg: parse error: line range bounds must be integers
955 954 [255]
956 955 $ hg log -r 'followlines(baz, 5:4)'
957 956 hg: parse error: line range must be positive
958 957 [255]
959 958 $ hg log -r 'followlines(baz, 0:4)'
960 959 hg: parse error: fromline must be strictly positive
961 960 [255]
962 961 $ hg log -r 'followlines(baz, 2:40)'
963 962 abort: line range exceeds file size
964 963 [255]
965 964 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=[1])'
966 965 hg: parse error at 43: not a prefix: [
967 966 (followlines(baz, 2:4, startrev=20, descend=[1])
968 967 ^ here)
969 968 [255]
970 969 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=a)'
971 970 hg: parse error: descend argument must be a boolean
972 971 [255]
973 972
974 973 Test empty annotate output
975 974
976 975 $ printf '\0' > binary
977 976 $ touch empty
978 977 $ hg ci -qAm 'add binary and empty files'
979 978
980 979 $ hg annotate binary empty
981 980 binary: binary file
982 981
983 982 $ hg annotate -Tjson binary empty
984 983 [
985 984 {
986 985 "path": "binary"
987 986 },
988 987 {
989 988 "lines": [],
990 989 "path": "empty"
991 990 }
992 991 ]
993 992
994 993 Test annotate with whitespace options
995 994
996 995 $ cd ..
997 996 $ hg init repo-ws
998 997 $ cd repo-ws
999 998 $ cat > a <<EOF
1000 999 > aa
1001 1000 >
1002 1001 > b b
1003 1002 > EOF
1004 1003 $ hg ci -Am "adda"
1005 1004 adding a
1006 1005 $ sed 's/EOL$//g' > a <<EOF
1007 1006 > a a
1008 1007 >
1009 1008 > EOL
1010 1009 > b b
1011 1010 > EOF
1012 1011 $ hg ci -m "changea"
1013 1012
1014 1013 Annotate with no option
1015 1014
1016 1015 $ hg annotate a
1017 1016 1: a a
1018 1017 0:
1019 1018 1:
1020 1019 1: b b
1021 1020
1022 1021 Annotate with --ignore-space-change
1023 1022
1024 1023 $ hg annotate --ignore-space-change a
1025 1024 1: a a
1026 1025 1:
1027 1026 0:
1028 1027 0: b b
1029 1028
1030 1029 Annotate with --ignore-all-space
1031 1030
1032 1031 $ hg annotate --ignore-all-space a
1033 1032 0: a a
1034 1033 0:
1035 1034 1:
1036 1035 0: b b
1037 1036
1038 1037 Annotate with --ignore-blank-lines (similar to no options case)
1039 1038
1040 1039 $ hg annotate --ignore-blank-lines a
1041 1040 1: a a
1042 1041 0:
1043 1042 1:
1044 1043 1: b b
1045 1044
1046 1045 $ cd ..
1047 1046
1048 1047 Annotate with orphaned CR (issue5798)
1049 1048 -------------------------------------
1050 1049
1051 1050 $ hg init repo-cr
1052 1051 $ cd repo-cr
1053 1052
1054 1053 $ cat <<'EOF' >> "$TESTTMP/substcr.py"
1055 1054 > import sys
1056 1055 > from mercurial.utils import procutil
1057 1056 > procutil.setbinary(sys.stdin)
1058 1057 > procutil.setbinary(sys.stdout)
1059 1058 > stdin = getattr(sys.stdin, 'buffer', sys.stdin)
1060 1059 > stdout = getattr(sys.stdout, 'buffer', sys.stdout)
1061 1060 > stdout.write(stdin.read().replace(b'\r', b'[CR]'))
1062 1061 > EOF
1063 1062
1064 1063 >>> with open('a', 'wb') as f:
1065 1064 ... f.write(b'0a\r0b\r\n0c\r0d\r\n0e\n0f\n0g') and None
1066 1065 $ hg ci -qAm0
1067 1066 >>> with open('a', 'wb') as f:
1068 1067 ... f.write(b'0a\r0b\r\n1c\r1d\r\n0e\n1f\n0g') and None
1069 1068 $ hg ci -m1
1070 1069
1071 1070 $ hg annotate -r0 a | "$PYTHON" "$TESTTMP/substcr.py"
1072 1071 0: 0a[CR]0b[CR]
1073 1072 0: 0c[CR]0d[CR]
1074 1073 0: 0e
1075 1074 0: 0f
1076 1075 0: 0g
1077 1076 $ hg annotate -r1 a | "$PYTHON" "$TESTTMP/substcr.py"
1078 1077 0: 0a[CR]0b[CR]
1079 1078 1: 1c[CR]1d[CR]
1080 1079 0: 0e
1081 1080 1: 1f
1082 1081 0: 0g
1083 1082
1084 1083 $ cd ..
1085 1084
1086 1085 Annotate with linkrev pointing to another branch
1087 1086 ------------------------------------------------
1088 1087
1089 1088 create history with a filerev whose linkrev points to another branch
1090 1089
1091 1090 $ hg init branchedlinkrev
1092 1091 $ cd branchedlinkrev
1093 1092 $ echo A > a
1094 1093 $ hg commit -Am 'contentA'
1095 1094 adding a
1096 1095 $ echo B >> a
1097 1096 $ hg commit -m 'contentB'
1098 1097 $ hg up --rev 'desc(contentA)'
1099 1098 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1100 1099 $ echo unrelated > unrelated
1101 1100 $ hg commit -Am 'unrelated'
1102 1101 adding unrelated
1103 1102 created new head
1104 1103 $ hg graft -r 'desc(contentB)'
1105 1104 grafting 1:fd27c222e3e6 "contentB"
1106 1105 $ echo C >> a
1107 1106 $ hg commit -m 'contentC'
1108 1107 $ echo W >> a
1109 1108 $ hg log -G
1110 1109 @ changeset: 4:072f1e8df249
1111 1110 | tag: tip
1112 1111 | user: test
1113 1112 | date: Thu Jan 01 00:00:00 1970 +0000
1114 1113 | summary: contentC
1115 1114 |
1116 1115 o changeset: 3:ff38df03cc4b
1117 1116 | user: test
1118 1117 | date: Thu Jan 01 00:00:00 1970 +0000
1119 1118 | summary: contentB
1120 1119 |
1121 1120 o changeset: 2:62aaf3f6fc06
1122 1121 | parent: 0:f0932f74827e
1123 1122 | user: test
1124 1123 | date: Thu Jan 01 00:00:00 1970 +0000
1125 1124 | summary: unrelated
1126 1125 |
1127 1126 | o changeset: 1:fd27c222e3e6
1128 1127 |/ user: test
1129 1128 | date: Thu Jan 01 00:00:00 1970 +0000
1130 1129 | summary: contentB
1131 1130 |
1132 1131 o changeset: 0:f0932f74827e
1133 1132 user: test
1134 1133 date: Thu Jan 01 00:00:00 1970 +0000
1135 1134 summary: contentA
1136 1135
1137 1136
1138 1137 Annotate should list ancestor of starting revision only
1139 1138
1140 1139 $ hg annotate a
1141 1140 0: A
1142 1141 3: B
1143 1142 4: C
1144 1143
1145 1144 $ hg annotate a -r 'wdir()'
1146 1145 0 : A
1147 1146 3 : B
1148 1147 4 : C
1149 1148 4+: W
1150 1149
1151 1150 Even when the starting revision is the linkrev-shadowed one:
1152 1151
1153 1152 $ hg annotate a -r 3
1154 1153 0: A
1155 1154 3: B
1156 1155
1157 1156 $ cd ..
1158 1157
1159 1158 Issue5360: Deleted chunk in p1 of a merge changeset
1160 1159
1161 1160 $ hg init repo-5360
1162 1161 $ cd repo-5360
1163 1162 $ echo 1 > a
1164 1163 $ hg commit -A a -m 1
1165 1164 $ echo 2 >> a
1166 1165 $ hg commit -m 2
1167 1166 $ echo a > a
1168 1167 $ hg commit -m a
1169 1168 $ hg update '.^' -q
1170 1169 $ echo 3 >> a
1171 1170 $ hg commit -m 3 -q
1172 1171 $ hg merge 2 -q
1173 1172 warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
1174 1173 [1]
1175 1174 $ cat a
1176 1175 <<<<<<< working copy: 0a068f0261cf - test: 3
1177 1176 1
1178 1177 2
1179 1178 3
1180 1179 ||||||| base
1181 1180 1
1182 1181 2
1183 1182 =======
1184 1183 a
1185 1184 >>>>>>> merge rev: 9409851bc20a - test: a
1186 1185 $ cat > a << EOF
1187 1186 > b
1188 1187 > 1
1189 1188 > 2
1190 1189 > 3
1191 1190 > a
1192 1191 > EOF
1193 1192 $ hg resolve --mark -q
1194 1193 $ rm a.orig
1195 1194 $ hg commit -m m
1196 1195 $ hg annotate a
1197 1196 4: b
1198 1197 0: 1
1199 1198 1: 2
1200 1199 3: 3
1201 1200 2: a
1202 1201
1203 1202 $ cd ..
@@ -1,819 +1,819
1 1 (this file is backported from core hg tests/test-annotate.t)
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [ui]
5 5 > merge = :merge3
6 6 > [diff]
7 7 > git=1
8 8 > [extensions]
9 9 > fastannotate=
10 10 > [fastannotate]
11 11 > modes=fctx
12 12 > forcefollow=False
13 13 > mainbranch=.
14 14 > EOF
15 15
16 16 init
17 17
18 18 $ hg init repo
19 19 $ cd repo
20 20
21 21 commit
22 22
23 23 $ echo 'a' > a
24 24 $ hg ci -A -m test -u nobody -d '1 0'
25 25 adding a
26 26
27 27 annotate -c
28 28
29 29 $ hg annotate -c a
30 30 8435f90966e4: a
31 31
32 32 annotate -cl
33 33
34 34 $ hg annotate -cl a
35 35 8435f90966e4:1: a
36 36
37 37 annotate -d
38 38
39 39 $ hg annotate -d a
40 40 Thu Jan 01 00:00:01 1970 +0000: a
41 41
42 42 annotate -n
43 43
44 44 $ hg annotate -n a
45 45 0: a
46 46
47 47 annotate -nl
48 48
49 49 $ hg annotate -nl a
50 50 0:1: a
51 51
52 52 annotate -u
53 53
54 54 $ hg annotate -u a
55 55 nobody: a
56 56
57 57 annotate -cdnu
58 58
59 59 $ hg annotate -cdnu a
60 60 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
61 61
62 62 annotate -cdnul
63 63
64 64 $ hg annotate -cdnul a
65 65 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
66 66
67 67 annotate (JSON)
68 68
69 69 $ hg annotate -Tjson a
70 70 [
71 71 {
72 72 "lines": [{"line": "a\n", "rev": 0}],
73 73 "path": "a"
74 74 }
75 75 ]
76 76
77 77 $ hg annotate -Tjson -cdfnul a
78 78 [
79 79 {
80 80 "lines": [{"date": [1.0, 0], "line": "a\n", "lineno": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "path": "a", "rev": 0, "user": "nobody"}],
81 81 "path": "a"
82 82 }
83 83 ]
84 84
85 85 $ cat <<EOF >>a
86 86 > a
87 87 > a
88 88 > EOF
89 89 $ hg ci -ma1 -d '1 0'
90 90 $ hg cp a b
91 91 $ hg ci -mb -d '1 0'
92 92 $ cat <<EOF >> b
93 93 > b4
94 94 > b5
95 95 > b6
96 96 > EOF
97 97 $ hg ci -mb2 -d '2 0'
98 98
99 99 annotate -n b
100 100
101 101 $ hg annotate -n b
102 102 0: a
103 103 1: a
104 104 1: a
105 105 3: b4
106 106 3: b5
107 107 3: b6
108 108
109 109 annotate --no-follow b
110 110
111 111 $ hg annotate --no-follow b
112 112 2: a
113 113 2: a
114 114 2: a
115 115 3: b4
116 116 3: b5
117 117 3: b6
118 118
119 119 annotate -nl b
120 120
121 121 $ hg annotate -nl b
122 122 0:1: a
123 123 1:2: a
124 124 1:3: a
125 125 3:4: b4
126 126 3:5: b5
127 127 3:6: b6
128 128
129 129 annotate -nf b
130 130
131 131 $ hg annotate -nf b
132 132 0 a: a
133 133 1 a: a
134 134 1 a: a
135 135 3 b: b4
136 136 3 b: b5
137 137 3 b: b6
138 138
139 139 annotate -nlf b
140 140
141 141 $ hg annotate -nlf b
142 142 0 a:1: a
143 143 1 a:2: a
144 144 1 a:3: a
145 145 3 b:4: b4
146 146 3 b:5: b5
147 147 3 b:6: b6
148 148
149 149 $ hg up -C 2
150 150 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 151 $ cat <<EOF >> b
152 152 > b4
153 153 > c
154 154 > b5
155 155 > EOF
156 156 $ hg ci -mb2.1 -d '2 0'
157 157 created new head
158 158 $ hg merge
159 159 merging b
160 160 warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
161 161 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
162 162 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
163 163 [1]
164 164 $ cat b
165 165 a
166 166 a
167 167 a
168 168 <<<<<<< working copy: 5fbdc1152d97 - test: b2.1
169 169 b4
170 170 c
171 171 b5
172 172 ||||||| base
173 173 =======
174 174 b4
175 175 b5
176 176 b6
177 177 >>>>>>> merge rev: 37ec9f5c3d1f - test: b2
178 178 $ cat <<EOF > b
179 179 > a
180 180 > a
181 181 > a
182 182 > b4
183 183 > c
184 184 > b5
185 185 > EOF
186 186 $ hg resolve --mark -q
187 187 $ rm b.orig
188 188 $ hg ci -mmergeb -d '3 0'
189 189
190 190 annotate after merge
191 191 (note: the first one falls back to the vanilla annotate which does not use linelog)
192 192
193 193 $ hg annotate -nf b --debug
194 194 fastannotate: b: rebuilding broken cache
195 195 fastannotate: b: 5 new changesets in the main branch
196 196 0 a: a
197 197 1 a: a
198 198 1 a: a
199 199 3 b: b4
200 200 4 b: c
201 201 3 b: b5
202 202
203 203 (difference explained below)
204 204
205 205 $ hg annotate -nf b --debug
206 206 fastannotate: b: using fast path (resolved fctx: False)
207 207 0 a: a
208 208 1 a: a
209 209 1 a: a
210 210 4 b: b4
211 211 4 b: c
212 212 4 b: b5
213 213
214 214 annotate after merge with -l
215 215 (fastannotate differs from annotate)
216 216
217 217 $ hg log -Gp -T '{rev}:{node}' -r '2..5'
218 218 @ 5:64afcdf8e29e063c635be123d8d2fb160af00f7e
219 219 |\
220 220 | o 4:5fbdc1152d97597717021ad9e063061b200f146bdiff --git a/b b/b
221 221 | | --- a/b
222 222 | | +++ b/b
223 223 | | @@ -1,3 +1,6 @@
224 224 | | a
225 225 | | a
226 226 | | a
227 227 | | +b4
228 228 | | +c
229 229 | | +b5
230 230 | |
231 231 o | 3:37ec9f5c3d1f99572d7075971cb4876e2139b52fdiff --git a/b b/b
232 232 |/ --- a/b
233 233 | +++ b/b
234 234 | @@ -1,3 +1,6 @@
235 235 | a
236 236 | a
237 237 | a
238 238 | +b4
239 239 | +b5
240 240 | +b6
241 241 |
242 242 o 2:3086dbafde1ce745abfc8d2d367847280aabae9ddiff --git a/a b/b
243 243 | copy from a
244 244 ~ copy to b
245 245
246 246
247 247 (in this case, "b4", "b5" could be considered introduced by either rev 3, or rev 4.
248 248 and that causes the rev number difference)
249 249
250 250 $ hg annotate -nlf b --config fastannotate.modes=
251 251 0 a:1: a
252 252 1 a:2: a
253 253 1 a:3: a
254 254 3 b:4: b4
255 255 4 b:5: c
256 256 3 b:5: b5
257 257
258 258 $ hg annotate -nlf b
259 259 0 a:1: a
260 260 1 a:2: a
261 261 1 a:3: a
262 262 4 b:4: b4
263 263 4 b:5: c
264 264 4 b:6: b5
265 265
266 266 $ hg up -C 1
267 267 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
268 268 $ hg cp a b
269 269 $ cat <<EOF > b
270 270 > a
271 271 > z
272 272 > a
273 273 > EOF
274 274 $ hg ci -mc -d '3 0'
275 275 created new head
276 276 Work around the pure version not resolving the conflict like native code
277 277 #if pure
278 278 $ hg merge
279 279 merging b
280 280 warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
281 281 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
282 282 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
283 283 [1]
284 284 $ cat <<EOF > b
285 285 > a
286 286 > z
287 287 > a
288 288 > b4
289 289 > c
290 290 > b5
291 291 > EOF
292 292 $ hg resolve -m b
293 293 (no more unresolved files)
294 294 $ rm b.orig
295 295 #else
296 296 $ hg merge
297 297 merging b
298 298 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
299 299 (branch merge, don't forget to commit)
300 300 #endif
301 301 $ echo d >> b
302 302 $ hg ci -mmerge2 -d '4 0'
303 303
304 304 annotate after rename merge
305 305
306 306 $ hg annotate -nf b
307 307 0 a: a
308 308 6 b: z
309 309 1 a: a
310 310 3 b: b4
311 311 4 b: c
312 312 3 b: b5
313 313 7 b: d
314 314
315 315 annotate after rename merge with -l
316 316 (fastannotate differs from annotate)
317 317
318 318 $ hg log -Gp -T '{rev}:{node}' -r '0+1+6+7'
319 319 @ 7:6284bb6c38fef984a929862a53bbc71ce9eafa81diff --git a/b b/b
320 320 |\ --- a/b
321 321 | : +++ b/b
322 322 | : @@ -1,3 +1,7 @@
323 323 | : a
324 324 | : z
325 325 | : a
326 326 | : +b4
327 327 | : +c
328 328 | : +b5
329 329 | : +d
330 330 | :
331 331 o : 6:b80e3e32f75a6a67cd4ac85496a11511e9112816diff --git a/a b/b
332 332 :/ copy from a
333 333 : copy to b
334 334 : --- a/a
335 335 : +++ b/b
336 336 : @@ -1,3 +1,3 @@
337 337 : -a (?)
338 338 : a
339 339 : +z
340 340 : a
341 341 : -a (?)
342 342 :
343 343 o 1:762f04898e6684ff713415f7b8a8d53d33f96c92diff --git a/a b/a
344 344 | --- a/a
345 345 | +++ b/a
346 346 | @@ -1,1 +1,3 @@
347 347 | a
348 348 | +a
349 349 | +a
350 350 |
351 351 o 0:8435f90966e442695d2ded29fdade2bac5ad8065diff --git a/a b/a
352 352 new file mode 100644
353 353 --- /dev/null
354 354 +++ b/a
355 355 @@ -0,0 +1,1 @@
356 356 +a
357 357
358 358
359 359 (note on question marks:
360 360 the upstream bdiff change (96f2f50d923f+3633403888ae+8c0c75aa3ff4+5c4e2636c1a9
361 361 +38ed54888617) alters the output so deletion is not always at the end of the
362 362 output. for example:
363 363 | a | b | old | new | # old: e1d6aa0e4c3a, new: 8836f13e3c5b
364 364 |-------------------|
365 365 | a | a | a | -a |
366 366 | a | z | +z | a |
367 367 | a | a | a | +z |
368 368 | | | -a | a |
369 369 |-------------------|
370 370 | a | a | a |
371 371 | a | a | a |
372 372 | a | | -a |
373 373 this leads to more question marks below)
374 374
375 375 (rev 1 adds two "a"s and rev 6 deletes one "a".
376 376 the "a" that rev 6 deletes could be either the first or the second "a" of those two "a"s added by rev 1.
377 377 and that causes the line number difference)
378 378
379 379 $ hg annotate -nlf b --config fastannotate.modes=
380 380 0 a:1: a
381 381 6 b:2: z
382 382 1 a:3: a
383 383 3 b:4: b4
384 384 4 b:5: c
385 385 3 b:5: b5
386 386 7 b:7: d
387 387
388 388 $ hg annotate -nlf b
389 389 0 a:1: a (?)
390 390 1 a:2: a (?)
391 391 6 b:2: z
392 392 1 a:2: a (?)
393 393 1 a:3: a (?)
394 394 3 b:4: b4
395 395 4 b:5: c
396 396 3 b:5: b5
397 397 7 b:7: d
398 398
399 399 Issue2807: alignment of line numbers with -l
400 400 (fastannotate differs from annotate, same reason as above)
401 401
402 402 $ echo more >> b
403 403 $ hg ci -mmore -d '5 0'
404 404 $ echo more >> b
405 405 $ hg ci -mmore -d '6 0'
406 406 $ echo more >> b
407 407 $ hg ci -mmore -d '7 0'
408 408 $ hg annotate -nlf b
409 409 0 a: 1: a (?)
410 410 1 a: 2: a (?)
411 411 6 b: 2: z
412 412 1 a: 2: a (?)
413 413 1 a: 3: a (?)
414 414 3 b: 4: b4
415 415 4 b: 5: c
416 416 3 b: 5: b5
417 417 7 b: 7: d
418 418 8 b: 8: more
419 419 9 b: 9: more
420 420 10 b:10: more
421 421
422 422 linkrev vs rev
423 423
424 424 $ hg annotate -r tip -n a
425 425 0: a
426 426 1: a
427 427 1: a
428 428
429 429 linkrev vs rev with -l
430 430
431 431 $ hg annotate -r tip -nl a
432 432 0:1: a
433 433 1:2: a
434 434 1:3: a
435 435
436 436 Issue589: "undelete" sequence leads to crash
437 437
438 438 annotate was crashing when trying to --follow something
439 439
440 440 like A -> B -> A
441 441
442 442 generate ABA rename configuration
443 443
444 444 $ echo foo > foo
445 445 $ hg add foo
446 446 $ hg ci -m addfoo
447 447 $ hg rename foo bar
448 448 $ hg ci -m renamefoo
449 449 $ hg rename bar foo
450 450 $ hg ci -m renamebar
451 451
452 452 annotate after ABA with follow
453 453
454 454 $ hg annotate --follow foo
455 455 foo: foo
456 456
457 457 missing file
458 458
459 459 $ hg ann nosuchfile
460 460 abort: nosuchfile: no such file in rev e9e6b4fa872f
461 461 [255]
462 462
463 463 annotate file without '\n' on last line
464 464
465 465 $ printf "" > c
466 466 $ hg ci -A -m test -u nobody -d '1 0'
467 467 adding c
468 468 $ hg annotate c
469 469 $ printf "a\nb" > c
470 470 $ hg ci -m test
471 471 $ hg annotate c
472 472 [0-9]+: a (re)
473 473 [0-9]+: b (re)
474 474
475 475 Issue3841: check annotation of the file of which filelog includes
476 476 merging between the revision and its ancestor
477 477
478 478 to reproduce the situation with recent Mercurial, this script uses (1)
479 479 "hg debugsetparents" to merge without ancestor check by "hg merge",
480 480 and (2) the extension to allow filelog merging between the revision
481 481 and its ancestor by overriding "repo._filecommit".
482 482
483 483 $ cat > ../legacyrepo.py <<EOF
484 > from mercurial import error, node
485 > def reposetup(ui, repo):
486 > class legacyrepo(repo.__class__):
487 > def _filecommit(self, fctx, manifest1, manifest2,
484 > from __future__ import absolute_import
485 > from mercurial import commit, error, extensions, node
486 > def _filecommit(orig, repo, fctx, manifest1, manifest2,
488 487 > linkrev, tr, includecopymeta):
489 488 > fname = fctx.path()
490 489 > text = fctx.data()
491 > flog = self.file(fname)
490 > flog = repo.file(fname)
492 491 > fparent1 = manifest1.get(fname, node.nullid)
493 492 > fparent2 = manifest2.get(fname, node.nullid)
494 493 > meta = {}
495 > copy = fctx.renamed()
496 > if copy and copy[0] != fname:
494 > copy = fctx.copysource()
495 > if copy and copy != fname:
497 496 > raise error.Abort('copying is not supported')
498 497 > if fparent2 != node.nullid:
499 498 > return flog.add(text, meta, tr, linkrev,
500 499 > fparent1, fparent2), 'modified'
501 500 > raise error.Abort('only merging is supported')
502 > repo.__class__ = legacyrepo
501 > def uisetup(ui):
502 > extensions.wrapfunction(commit, '_filecommit', _filecommit)
503 503 > EOF
504 504
505 505 $ cat > baz <<EOF
506 506 > 1
507 507 > 2
508 508 > 3
509 509 > 4
510 510 > 5
511 511 > EOF
512 512 $ hg add baz
513 513 $ hg commit -m "baz:0"
514 514
515 515 $ cat > baz <<EOF
516 516 > 1 baz:1
517 517 > 2
518 518 > 3
519 519 > 4
520 520 > 5
521 521 > EOF
522 522 $ hg commit -m "baz:1"
523 523
524 524 $ cat > baz <<EOF
525 525 > 1 baz:1
526 526 > 2 baz:2
527 527 > 3
528 528 > 4
529 529 > 5
530 530 > EOF
531 531 $ hg debugsetparents 17 17
532 532 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
533 533 $ hg debugindexdot baz
534 534 digraph G {
535 535 -1 -> 0
536 536 0 -> 1
537 537 1 -> 2
538 538 1 -> 2
539 539 }
540 540 $ hg annotate baz
541 541 17: 1 baz:1
542 542 18: 2 baz:2
543 543 16: 3
544 544 16: 4
545 545 16: 5
546 546
547 547 $ cat > baz <<EOF
548 548 > 1 baz:1
549 549 > 2 baz:2
550 550 > 3 baz:3
551 551 > 4
552 552 > 5
553 553 > EOF
554 554 $ hg commit -m "baz:3"
555 555
556 556 $ cat > baz <<EOF
557 557 > 1 baz:1
558 558 > 2 baz:2
559 559 > 3 baz:3
560 560 > 4 baz:4
561 561 > 5
562 562 > EOF
563 563 $ hg debugsetparents 19 18
564 564 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
565 565 $ hg debugindexdot baz
566 566 digraph G {
567 567 -1 -> 0
568 568 0 -> 1
569 569 1 -> 2
570 570 1 -> 2
571 571 2 -> 3
572 572 3 -> 4
573 573 2 -> 4
574 574 }
575 575 $ hg annotate baz
576 576 17: 1 baz:1
577 577 18: 2 baz:2
578 578 19: 3 baz:3
579 579 20: 4 baz:4
580 580 16: 5
581 581
582 582 annotate clean file
583 583
584 584 $ hg annotate -ncr "wdir()" foo
585 585 11 472b18db256d : foo
586 586
587 587 annotate modified file
588 588
589 589 $ echo foofoo >> foo
590 590 $ hg annotate -r "wdir()" foo
591 591 11 : foo
592 592 20+: foofoo
593 593
594 594 $ hg annotate -cr "wdir()" foo
595 595 472b18db256d : foo
596 596 b6bedd5477e7+: foofoo
597 597
598 598 $ hg annotate -ncr "wdir()" foo
599 599 11 472b18db256d : foo
600 600 20 b6bedd5477e7+: foofoo
601 601
602 602 $ hg annotate --debug -ncr "wdir()" foo
603 603 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
604 604 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
605 605
606 606 $ hg annotate -udr "wdir()" foo
607 607 test Thu Jan 01 00:00:00 1970 +0000: foo
608 608 test [A-Za-z0-9:+ ]+: foofoo (re)
609 609
610 610 $ hg annotate -ncr "wdir()" -Tjson foo
611 611 [
612 612 {
613 613 "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": "ffffffffffffffffffffffffffffffffffffffff", "rev": 2147483647}],
614 614 "path": "foo"
615 615 }
616 616 ]
617 617
618 618 annotate added file
619 619
620 620 $ echo bar > bar
621 621 $ hg add bar
622 622 $ hg annotate -ncr "wdir()" bar
623 623 20 b6bedd5477e7+: bar
624 624
625 625 annotate renamed file
626 626
627 627 $ hg rename foo renamefoo2
628 628 $ hg annotate -ncr "wdir()" renamefoo2
629 629 11 472b18db256d : foo
630 630 20 b6bedd5477e7+: foofoo
631 631
632 632 annotate missing file
633 633
634 634 $ rm baz
635 635 $ hg annotate -ncr "wdir()" baz
636 636 abort: $TESTTMP/repo/baz: $ENOENT$ (windows !)
637 637 abort: $ENOENT$: '$TESTTMP/repo/baz' (no-windows !)
638 638 [255]
639 639
640 640 annotate removed file
641 641
642 642 $ hg rm baz
643 643 $ hg annotate -ncr "wdir()" baz
644 644 abort: $TESTTMP/repo/baz: $ENOENT$ (windows !)
645 645 abort: $ENOENT$: '$TESTTMP/repo/baz' (no-windows !)
646 646 [255]
647 647
648 648 Test annotate with whitespace options
649 649
650 650 $ cd ..
651 651 $ hg init repo-ws
652 652 $ cd repo-ws
653 653 $ cat > a <<EOF
654 654 > aa
655 655 >
656 656 > b b
657 657 > EOF
658 658 $ hg ci -Am "adda"
659 659 adding a
660 660 $ sed 's/EOL$//g' > a <<EOF
661 661 > a a
662 662 >
663 663 > EOL
664 664 > b b
665 665 > EOF
666 666 $ hg ci -m "changea"
667 667
668 668 Annotate with no option
669 669
670 670 $ hg annotate a
671 671 1: a a
672 672 0:
673 673 1:
674 674 1: b b
675 675
676 676 Annotate with --ignore-space-change
677 677
678 678 $ hg annotate --ignore-space-change a
679 679 1: a a
680 680 1:
681 681 0:
682 682 0: b b
683 683
684 684 Annotate with --ignore-all-space
685 685
686 686 $ hg annotate --ignore-all-space a
687 687 0: a a
688 688 0:
689 689 1:
690 690 0: b b
691 691
692 692 Annotate with --ignore-blank-lines (similar to no options case)
693 693
694 694 $ hg annotate --ignore-blank-lines a
695 695 1: a a
696 696 0:
697 697 1:
698 698 1: b b
699 699
700 700 $ cd ..
701 701
702 702 Annotate with linkrev pointing to another branch
703 703 ------------------------------------------------
704 704
705 705 create history with a filerev whose linkrev points to another branch
706 706
707 707 $ hg init branchedlinkrev
708 708 $ cd branchedlinkrev
709 709 $ echo A > a
710 710 $ hg commit -Am 'contentA'
711 711 adding a
712 712 $ echo B >> a
713 713 $ hg commit -m 'contentB'
714 714 $ hg up --rev 'desc(contentA)'
715 715 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
716 716 $ echo unrelated > unrelated
717 717 $ hg commit -Am 'unrelated'
718 718 adding unrelated
719 719 created new head
720 720 $ hg graft -r 'desc(contentB)'
721 721 grafting 1:fd27c222e3e6 "contentB"
722 722 $ echo C >> a
723 723 $ hg commit -m 'contentC'
724 724 $ echo W >> a
725 725 $ hg log -G
726 726 @ changeset: 4:072f1e8df249
727 727 | tag: tip
728 728 | user: test
729 729 | date: Thu Jan 01 00:00:00 1970 +0000
730 730 | summary: contentC
731 731 |
732 732 o changeset: 3:ff38df03cc4b
733 733 | user: test
734 734 | date: Thu Jan 01 00:00:00 1970 +0000
735 735 | summary: contentB
736 736 |
737 737 o changeset: 2:62aaf3f6fc06
738 738 | parent: 0:f0932f74827e
739 739 | user: test
740 740 | date: Thu Jan 01 00:00:00 1970 +0000
741 741 | summary: unrelated
742 742 |
743 743 | o changeset: 1:fd27c222e3e6
744 744 |/ user: test
745 745 | date: Thu Jan 01 00:00:00 1970 +0000
746 746 | summary: contentB
747 747 |
748 748 o changeset: 0:f0932f74827e
749 749 user: test
750 750 date: Thu Jan 01 00:00:00 1970 +0000
751 751 summary: contentA
752 752
753 753
754 754 Annotate should list ancestor of starting revision only
755 755
756 756 $ hg annotate a
757 757 0: A
758 758 3: B
759 759 4: C
760 760
761 761 $ hg annotate a -r 'wdir()'
762 762 0 : A
763 763 3 : B
764 764 4 : C
765 765 4+: W
766 766
767 767 Even when the starting revision is the linkrev-shadowed one:
768 768
769 769 $ hg annotate a -r 3
770 770 0: A
771 771 3: B
772 772
773 773 $ cd ..
774 774
775 775 Issue5360: Deleted chunk in p1 of a merge changeset
776 776
777 777 $ hg init repo-5360
778 778 $ cd repo-5360
779 779 $ echo 1 > a
780 780 $ hg commit -A a -m 1
781 781 $ echo 2 >> a
782 782 $ hg commit -m 2
783 783 $ echo a > a
784 784 $ hg commit -m a
785 785 $ hg update '.^' -q
786 786 $ echo 3 >> a
787 787 $ hg commit -m 3 -q
788 788 $ hg merge 2 -q
789 789 warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
790 790 [1]
791 791 $ cat a
792 792 <<<<<<< working copy: 0a068f0261cf - test: 3
793 793 1
794 794 2
795 795 3
796 796 ||||||| base
797 797 1
798 798 2
799 799 =======
800 800 a
801 801 >>>>>>> merge rev: 9409851bc20a - test: a
802 802 $ cat > a << EOF
803 803 > b
804 804 > 1
805 805 > 2
806 806 > 3
807 807 > a
808 808 > EOF
809 809 $ hg resolve --mark -q
810 810 $ rm a.orig
811 811 $ hg commit -m m
812 812 $ hg annotate a
813 813 4: b
814 814 0: 1
815 815 1: 2
816 816 3: 3
817 817 2: a
818 818
819 819 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now