##// END OF EJS Templates
dagop: move annotateline and _annotatepair from context.py...
Yuya Nishihara -
r36935:7affcabf default
parent child Browse files
Show More
@@ -1,2749 +1,2681 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import re
13 import re
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirnodes,
26 wdirnodes,
27 wdirrev,
27 wdirrev,
28 )
28 )
29 from .thirdparty import (
30 attr,
31 )
32 from . import (
29 from . import (
30 dagop,
33 encoding,
31 encoding,
34 error,
32 error,
35 fileset,
33 fileset,
36 match as matchmod,
34 match as matchmod,
37 mdiff,
35 mdiff,
38 obsolete as obsmod,
36 obsolete as obsmod,
39 obsutil,
37 obsutil,
40 patch,
38 patch,
41 pathutil,
39 pathutil,
42 phases,
40 phases,
43 pycompat,
41 pycompat,
44 repoview,
42 repoview,
45 revlog,
43 revlog,
46 scmutil,
44 scmutil,
47 sparse,
45 sparse,
48 subrepo,
46 subrepo,
49 subrepoutil,
47 subrepoutil,
50 util,
48 util,
51 )
49 )
52 from .utils import dateutil
50 from .utils import dateutil
53
51
54 propertycache = util.propertycache
52 propertycache = util.propertycache
55
53
56 nonascii = re.compile(br'[^\x21-\x7f]').search
54 nonascii = re.compile(br'[^\x21-\x7f]').search
57
55
58 class basectx(object):
56 class basectx(object):
59 """A basectx object represents the common logic for its children:
57 """A basectx object represents the common logic for its children:
60 changectx: read-only context that is already present in the repo,
58 changectx: read-only context that is already present in the repo,
61 workingctx: a context that represents the working directory and can
59 workingctx: a context that represents the working directory and can
62 be committed,
60 be committed,
63 memctx: a context that represents changes in-memory and can also
61 memctx: a context that represents changes in-memory and can also
64 be committed."""
62 be committed."""
65 def __new__(cls, repo, changeid='', *args, **kwargs):
63 def __new__(cls, repo, changeid='', *args, **kwargs):
66 if isinstance(changeid, basectx):
64 if isinstance(changeid, basectx):
67 return changeid
65 return changeid
68
66
69 o = super(basectx, cls).__new__(cls)
67 o = super(basectx, cls).__new__(cls)
70
68
71 o._repo = repo
69 o._repo = repo
72 o._rev = nullrev
70 o._rev = nullrev
73 o._node = nullid
71 o._node = nullid
74
72
75 return o
73 return o
76
74
77 def __bytes__(self):
75 def __bytes__(self):
78 return short(self.node())
76 return short(self.node())
79
77
80 __str__ = encoding.strmethod(__bytes__)
78 __str__ = encoding.strmethod(__bytes__)
81
79
82 def __repr__(self):
80 def __repr__(self):
83 return r"<%s %s>" % (type(self).__name__, str(self))
81 return r"<%s %s>" % (type(self).__name__, str(self))
84
82
85 def __eq__(self, other):
83 def __eq__(self, other):
86 try:
84 try:
87 return type(self) == type(other) and self._rev == other._rev
85 return type(self) == type(other) and self._rev == other._rev
88 except AttributeError:
86 except AttributeError:
89 return False
87 return False
90
88
91 def __ne__(self, other):
89 def __ne__(self, other):
92 return not (self == other)
90 return not (self == other)
93
91
94 def __contains__(self, key):
92 def __contains__(self, key):
95 return key in self._manifest
93 return key in self._manifest
96
94
97 def __getitem__(self, key):
95 def __getitem__(self, key):
98 return self.filectx(key)
96 return self.filectx(key)
99
97
100 def __iter__(self):
98 def __iter__(self):
101 return iter(self._manifest)
99 return iter(self._manifest)
102
100
103 def _buildstatusmanifest(self, status):
101 def _buildstatusmanifest(self, status):
104 """Builds a manifest that includes the given status results, if this is
102 """Builds a manifest that includes the given status results, if this is
105 a working copy context. For non-working copy contexts, it just returns
103 a working copy context. For non-working copy contexts, it just returns
106 the normal manifest."""
104 the normal manifest."""
107 return self.manifest()
105 return self.manifest()
108
106
109 def _matchstatus(self, other, match):
107 def _matchstatus(self, other, match):
110 """This internal method provides a way for child objects to override the
108 """This internal method provides a way for child objects to override the
111 match operator.
109 match operator.
112 """
110 """
113 return match
111 return match
114
112
115 def _buildstatus(self, other, s, match, listignored, listclean,
113 def _buildstatus(self, other, s, match, listignored, listclean,
116 listunknown):
114 listunknown):
117 """build a status with respect to another context"""
115 """build a status with respect to another context"""
118 # Load earliest manifest first for caching reasons. More specifically,
116 # Load earliest manifest first for caching reasons. More specifically,
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 # 1000 and cache it so that when you read 1001, we just need to apply a
119 # 1000 and cache it so that when you read 1001, we just need to apply a
122 # delta to what's in the cache. So that's one full reconstruction + one
120 # delta to what's in the cache. So that's one full reconstruction + one
123 # delta application.
121 # delta application.
124 mf2 = None
122 mf2 = None
125 if self.rev() is not None and self.rev() < other.rev():
123 if self.rev() is not None and self.rev() < other.rev():
126 mf2 = self._buildstatusmanifest(s)
124 mf2 = self._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
125 mf1 = other._buildstatusmanifest(s)
128 if mf2 is None:
126 if mf2 is None:
129 mf2 = self._buildstatusmanifest(s)
127 mf2 = self._buildstatusmanifest(s)
130
128
131 modified, added = [], []
129 modified, added = [], []
132 removed = []
130 removed = []
133 clean = []
131 clean = []
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
132 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 deletedset = set(deleted)
133 deletedset = set(deleted)
136 d = mf1.diff(mf2, match=match, clean=listclean)
134 d = mf1.diff(mf2, match=match, clean=listclean)
137 for fn, value in d.iteritems():
135 for fn, value in d.iteritems():
138 if fn in deletedset:
136 if fn in deletedset:
139 continue
137 continue
140 if value is None:
138 if value is None:
141 clean.append(fn)
139 clean.append(fn)
142 continue
140 continue
143 (node1, flag1), (node2, flag2) = value
141 (node1, flag1), (node2, flag2) = value
144 if node1 is None:
142 if node1 is None:
145 added.append(fn)
143 added.append(fn)
146 elif node2 is None:
144 elif node2 is None:
147 removed.append(fn)
145 removed.append(fn)
148 elif flag1 != flag2:
146 elif flag1 != flag2:
149 modified.append(fn)
147 modified.append(fn)
150 elif node2 not in wdirnodes:
148 elif node2 not in wdirnodes:
151 # When comparing files between two commits, we save time by
149 # When comparing files between two commits, we save time by
152 # not comparing the file contents when the nodeids differ.
150 # not comparing the file contents when the nodeids differ.
153 # Note that this means we incorrectly report a reverted change
151 # Note that this means we incorrectly report a reverted change
154 # to a file as a modification.
152 # to a file as a modification.
155 modified.append(fn)
153 modified.append(fn)
156 elif self[fn].cmp(other[fn]):
154 elif self[fn].cmp(other[fn]):
157 modified.append(fn)
155 modified.append(fn)
158 else:
156 else:
159 clean.append(fn)
157 clean.append(fn)
160
158
161 if removed:
159 if removed:
162 # need to filter files if they are already reported as removed
160 # need to filter files if they are already reported as removed
163 unknown = [fn for fn in unknown if fn not in mf1 and
161 unknown = [fn for fn in unknown if fn not in mf1 and
164 (not match or match(fn))]
162 (not match or match(fn))]
165 ignored = [fn for fn in ignored if fn not in mf1 and
163 ignored = [fn for fn in ignored if fn not in mf1 and
166 (not match or match(fn))]
164 (not match or match(fn))]
167 # if they're deleted, don't report them as removed
165 # if they're deleted, don't report them as removed
168 removed = [fn for fn in removed if fn not in deletedset]
166 removed = [fn for fn in removed if fn not in deletedset]
169
167
170 return scmutil.status(modified, added, removed, deleted, unknown,
168 return scmutil.status(modified, added, removed, deleted, unknown,
171 ignored, clean)
169 ignored, clean)
172
170
173 @propertycache
171 @propertycache
174 def substate(self):
172 def substate(self):
175 return subrepoutil.state(self, self._repo.ui)
173 return subrepoutil.state(self, self._repo.ui)
176
174
177 def subrev(self, subpath):
175 def subrev(self, subpath):
178 return self.substate[subpath][1]
176 return self.substate[subpath][1]
179
177
180 def rev(self):
178 def rev(self):
181 return self._rev
179 return self._rev
182 def node(self):
180 def node(self):
183 return self._node
181 return self._node
184 def hex(self):
182 def hex(self):
185 return hex(self.node())
183 return hex(self.node())
186 def manifest(self):
184 def manifest(self):
187 return self._manifest
185 return self._manifest
188 def manifestctx(self):
186 def manifestctx(self):
189 return self._manifestctx
187 return self._manifestctx
190 def repo(self):
188 def repo(self):
191 return self._repo
189 return self._repo
192 def phasestr(self):
190 def phasestr(self):
193 return phases.phasenames[self.phase()]
191 return phases.phasenames[self.phase()]
194 def mutable(self):
192 def mutable(self):
195 return self.phase() > phases.public
193 return self.phase() > phases.public
196
194
197 def getfileset(self, expr):
195 def getfileset(self, expr):
198 return fileset.getfileset(self, expr)
196 return fileset.getfileset(self, expr)
199
197
200 def obsolete(self):
198 def obsolete(self):
201 """True if the changeset is obsolete"""
199 """True if the changeset is obsolete"""
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
200 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203
201
204 def extinct(self):
202 def extinct(self):
205 """True if the changeset is extinct"""
203 """True if the changeset is extinct"""
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
204 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207
205
208 def orphan(self):
206 def orphan(self):
209 """True if the changeset is not obsolete but it's ancestor are"""
207 """True if the changeset is not obsolete but it's ancestor are"""
210 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
208 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
211
209
212 def phasedivergent(self):
210 def phasedivergent(self):
213 """True if the changeset try to be a successor of a public changeset
211 """True if the changeset try to be a successor of a public changeset
214
212
215 Only non-public and non-obsolete changesets may be bumped.
213 Only non-public and non-obsolete changesets may be bumped.
216 """
214 """
217 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
215 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
218
216
219 def contentdivergent(self):
217 def contentdivergent(self):
220 """Is a successors of a changeset with multiple possible successors set
218 """Is a successors of a changeset with multiple possible successors set
221
219
222 Only non-public and non-obsolete changesets may be divergent.
220 Only non-public and non-obsolete changesets may be divergent.
223 """
221 """
224 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
222 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
225
223
226 def isunstable(self):
224 def isunstable(self):
227 """True if the changeset is either unstable, bumped or divergent"""
225 """True if the changeset is either unstable, bumped or divergent"""
228 return self.orphan() or self.phasedivergent() or self.contentdivergent()
226 return self.orphan() or self.phasedivergent() or self.contentdivergent()
229
227
230 def instabilities(self):
228 def instabilities(self):
231 """return the list of instabilities affecting this changeset.
229 """return the list of instabilities affecting this changeset.
232
230
233 Instabilities are returned as strings. possible values are:
231 Instabilities are returned as strings. possible values are:
234 - orphan,
232 - orphan,
235 - phase-divergent,
233 - phase-divergent,
236 - content-divergent.
234 - content-divergent.
237 """
235 """
238 instabilities = []
236 instabilities = []
239 if self.orphan():
237 if self.orphan():
240 instabilities.append('orphan')
238 instabilities.append('orphan')
241 if self.phasedivergent():
239 if self.phasedivergent():
242 instabilities.append('phase-divergent')
240 instabilities.append('phase-divergent')
243 if self.contentdivergent():
241 if self.contentdivergent():
244 instabilities.append('content-divergent')
242 instabilities.append('content-divergent')
245 return instabilities
243 return instabilities
246
244
247 def parents(self):
245 def parents(self):
248 """return contexts for each parent changeset"""
246 """return contexts for each parent changeset"""
249 return self._parents
247 return self._parents
250
248
251 def p1(self):
249 def p1(self):
252 return self._parents[0]
250 return self._parents[0]
253
251
254 def p2(self):
252 def p2(self):
255 parents = self._parents
253 parents = self._parents
256 if len(parents) == 2:
254 if len(parents) == 2:
257 return parents[1]
255 return parents[1]
258 return changectx(self._repo, nullrev)
256 return changectx(self._repo, nullrev)
259
257
260 def _fileinfo(self, path):
258 def _fileinfo(self, path):
261 if r'_manifest' in self.__dict__:
259 if r'_manifest' in self.__dict__:
262 try:
260 try:
263 return self._manifest[path], self._manifest.flags(path)
261 return self._manifest[path], self._manifest.flags(path)
264 except KeyError:
262 except KeyError:
265 raise error.ManifestLookupError(self._node, path,
263 raise error.ManifestLookupError(self._node, path,
266 _('not found in manifest'))
264 _('not found in manifest'))
267 if r'_manifestdelta' in self.__dict__ or path in self.files():
265 if r'_manifestdelta' in self.__dict__ or path in self.files():
268 if path in self._manifestdelta:
266 if path in self._manifestdelta:
269 return (self._manifestdelta[path],
267 return (self._manifestdelta[path],
270 self._manifestdelta.flags(path))
268 self._manifestdelta.flags(path))
271 mfl = self._repo.manifestlog
269 mfl = self._repo.manifestlog
272 try:
270 try:
273 node, flag = mfl[self._changeset.manifest].find(path)
271 node, flag = mfl[self._changeset.manifest].find(path)
274 except KeyError:
272 except KeyError:
275 raise error.ManifestLookupError(self._node, path,
273 raise error.ManifestLookupError(self._node, path,
276 _('not found in manifest'))
274 _('not found in manifest'))
277
275
278 return node, flag
276 return node, flag
279
277
280 def filenode(self, path):
278 def filenode(self, path):
281 return self._fileinfo(path)[0]
279 return self._fileinfo(path)[0]
282
280
283 def flags(self, path):
281 def flags(self, path):
284 try:
282 try:
285 return self._fileinfo(path)[1]
283 return self._fileinfo(path)[1]
286 except error.LookupError:
284 except error.LookupError:
287 return ''
285 return ''
288
286
289 def sub(self, path, allowcreate=True):
287 def sub(self, path, allowcreate=True):
290 '''return a subrepo for the stored revision of path, never wdir()'''
288 '''return a subrepo for the stored revision of path, never wdir()'''
291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
289 return subrepo.subrepo(self, path, allowcreate=allowcreate)
292
290
293 def nullsub(self, path, pctx):
291 def nullsub(self, path, pctx):
294 return subrepo.nullsubrepo(self, path, pctx)
292 return subrepo.nullsubrepo(self, path, pctx)
295
293
296 def workingsub(self, path):
294 def workingsub(self, path):
297 '''return a subrepo for the stored revision, or wdir if this is a wdir
295 '''return a subrepo for the stored revision, or wdir if this is a wdir
298 context.
296 context.
299 '''
297 '''
300 return subrepo.subrepo(self, path, allowwdir=True)
298 return subrepo.subrepo(self, path, allowwdir=True)
301
299
302 def match(self, pats=None, include=None, exclude=None, default='glob',
300 def match(self, pats=None, include=None, exclude=None, default='glob',
303 listsubrepos=False, badfn=None):
301 listsubrepos=False, badfn=None):
304 r = self._repo
302 r = self._repo
305 return matchmod.match(r.root, r.getcwd(), pats,
303 return matchmod.match(r.root, r.getcwd(), pats,
306 include, exclude, default,
304 include, exclude, default,
307 auditor=r.nofsauditor, ctx=self,
305 auditor=r.nofsauditor, ctx=self,
308 listsubrepos=listsubrepos, badfn=badfn)
306 listsubrepos=listsubrepos, badfn=badfn)
309
307
310 def diff(self, ctx2=None, match=None, **opts):
308 def diff(self, ctx2=None, match=None, **opts):
311 """Returns a diff generator for the given contexts and matcher"""
309 """Returns a diff generator for the given contexts and matcher"""
312 if ctx2 is None:
310 if ctx2 is None:
313 ctx2 = self.p1()
311 ctx2 = self.p1()
314 if ctx2 is not None:
312 if ctx2 is not None:
315 ctx2 = self._repo[ctx2]
313 ctx2 = self._repo[ctx2]
316 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
314 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
315 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318
316
319 def dirs(self):
317 def dirs(self):
320 return self._manifest.dirs()
318 return self._manifest.dirs()
321
319
322 def hasdir(self, dir):
320 def hasdir(self, dir):
323 return self._manifest.hasdir(dir)
321 return self._manifest.hasdir(dir)
324
322
325 def status(self, other=None, match=None, listignored=False,
323 def status(self, other=None, match=None, listignored=False,
326 listclean=False, listunknown=False, listsubrepos=False):
324 listclean=False, listunknown=False, listsubrepos=False):
327 """return status of files between two nodes or node and working
325 """return status of files between two nodes or node and working
328 directory.
326 directory.
329
327
330 If other is None, compare this node with working directory.
328 If other is None, compare this node with working directory.
331
329
332 returns (modified, added, removed, deleted, unknown, ignored, clean)
330 returns (modified, added, removed, deleted, unknown, ignored, clean)
333 """
331 """
334
332
335 ctx1 = self
333 ctx1 = self
336 ctx2 = self._repo[other]
334 ctx2 = self._repo[other]
337
335
338 # This next code block is, admittedly, fragile logic that tests for
336 # This next code block is, admittedly, fragile logic that tests for
339 # reversing the contexts and wouldn't need to exist if it weren't for
337 # reversing the contexts and wouldn't need to exist if it weren't for
340 # the fast (and common) code path of comparing the working directory
338 # the fast (and common) code path of comparing the working directory
341 # with its first parent.
339 # with its first parent.
342 #
340 #
343 # What we're aiming for here is the ability to call:
341 # What we're aiming for here is the ability to call:
344 #
342 #
345 # workingctx.status(parentctx)
343 # workingctx.status(parentctx)
346 #
344 #
347 # If we always built the manifest for each context and compared those,
345 # If we always built the manifest for each context and compared those,
348 # then we'd be done. But the special case of the above call means we
346 # then we'd be done. But the special case of the above call means we
349 # just copy the manifest of the parent.
347 # just copy the manifest of the parent.
350 reversed = False
348 reversed = False
351 if (not isinstance(ctx1, changectx)
349 if (not isinstance(ctx1, changectx)
352 and isinstance(ctx2, changectx)):
350 and isinstance(ctx2, changectx)):
353 reversed = True
351 reversed = True
354 ctx1, ctx2 = ctx2, ctx1
352 ctx1, ctx2 = ctx2, ctx1
355
353
356 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
354 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
357 match = ctx2._matchstatus(ctx1, match)
355 match = ctx2._matchstatus(ctx1, match)
358 r = scmutil.status([], [], [], [], [], [], [])
356 r = scmutil.status([], [], [], [], [], [], [])
359 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
357 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
360 listunknown)
358 listunknown)
361
359
362 if reversed:
360 if reversed:
363 # Reverse added and removed. Clear deleted, unknown and ignored as
361 # Reverse added and removed. Clear deleted, unknown and ignored as
364 # these make no sense to reverse.
362 # these make no sense to reverse.
365 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
363 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
366 r.clean)
364 r.clean)
367
365
368 if listsubrepos:
366 if listsubrepos:
369 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
367 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
370 try:
368 try:
371 rev2 = ctx2.subrev(subpath)
369 rev2 = ctx2.subrev(subpath)
372 except KeyError:
370 except KeyError:
373 # A subrepo that existed in node1 was deleted between
371 # A subrepo that existed in node1 was deleted between
374 # node1 and node2 (inclusive). Thus, ctx2's substate
372 # node1 and node2 (inclusive). Thus, ctx2's substate
375 # won't contain that subpath. The best we can do ignore it.
373 # won't contain that subpath. The best we can do ignore it.
376 rev2 = None
374 rev2 = None
377 submatch = matchmod.subdirmatcher(subpath, match)
375 submatch = matchmod.subdirmatcher(subpath, match)
378 s = sub.status(rev2, match=submatch, ignored=listignored,
376 s = sub.status(rev2, match=submatch, ignored=listignored,
379 clean=listclean, unknown=listunknown,
377 clean=listclean, unknown=listunknown,
380 listsubrepos=True)
378 listsubrepos=True)
381 for rfiles, sfiles in zip(r, s):
379 for rfiles, sfiles in zip(r, s):
382 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
380 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
383
381
384 for l in r:
382 for l in r:
385 l.sort()
383 l.sort()
386
384
387 return r
385 return r
388
386
389 def _filterederror(repo, changeid):
387 def _filterederror(repo, changeid):
390 """build an exception to be raised about a filtered changeid
388 """build an exception to be raised about a filtered changeid
391
389
392 This is extracted in a function to help extensions (eg: evolve) to
390 This is extracted in a function to help extensions (eg: evolve) to
393 experiment with various message variants."""
391 experiment with various message variants."""
394 if repo.filtername.startswith('visible'):
392 if repo.filtername.startswith('visible'):
395
393
396 # Check if the changeset is obsolete
394 # Check if the changeset is obsolete
397 unfilteredrepo = repo.unfiltered()
395 unfilteredrepo = repo.unfiltered()
398 ctx = unfilteredrepo[changeid]
396 ctx = unfilteredrepo[changeid]
399
397
400 # If the changeset is obsolete, enrich the message with the reason
398 # If the changeset is obsolete, enrich the message with the reason
401 # that made this changeset not visible
399 # that made this changeset not visible
402 if ctx.obsolete():
400 if ctx.obsolete():
403 msg = obsutil._getfilteredreason(repo, changeid, ctx)
401 msg = obsutil._getfilteredreason(repo, changeid, ctx)
404 else:
402 else:
405 msg = _("hidden revision '%s'") % changeid
403 msg = _("hidden revision '%s'") % changeid
406
404
407 hint = _('use --hidden to access hidden revisions')
405 hint = _('use --hidden to access hidden revisions')
408
406
409 return error.FilteredRepoLookupError(msg, hint=hint)
407 return error.FilteredRepoLookupError(msg, hint=hint)
410 msg = _("filtered revision '%s' (not in '%s' subset)")
408 msg = _("filtered revision '%s' (not in '%s' subset)")
411 msg %= (changeid, repo.filtername)
409 msg %= (changeid, repo.filtername)
412 return error.FilteredRepoLookupError(msg)
410 return error.FilteredRepoLookupError(msg)
413
411
414 class changectx(basectx):
412 class changectx(basectx):
415 """A changecontext object makes access to data related to a particular
413 """A changecontext object makes access to data related to a particular
416 changeset convenient. It represents a read-only context already present in
414 changeset convenient. It represents a read-only context already present in
417 the repo."""
415 the repo."""
418 def __init__(self, repo, changeid=''):
416 def __init__(self, repo, changeid=''):
419 """changeid is a revision number, node, or tag"""
417 """changeid is a revision number, node, or tag"""
420
418
421 # since basectx.__new__ already took care of copying the object, we
419 # since basectx.__new__ already took care of copying the object, we
422 # don't need to do anything in __init__, so we just exit here
420 # don't need to do anything in __init__, so we just exit here
423 if isinstance(changeid, basectx):
421 if isinstance(changeid, basectx):
424 return
422 return
425
423
426 if changeid == '':
424 if changeid == '':
427 changeid = '.'
425 changeid = '.'
428 self._repo = repo
426 self._repo = repo
429
427
430 try:
428 try:
431 if isinstance(changeid, int):
429 if isinstance(changeid, int):
432 self._node = repo.changelog.node(changeid)
430 self._node = repo.changelog.node(changeid)
433 self._rev = changeid
431 self._rev = changeid
434 return
432 return
435 if not pycompat.ispy3 and isinstance(changeid, long):
433 if not pycompat.ispy3 and isinstance(changeid, long):
436 changeid = "%d" % changeid
434 changeid = "%d" % changeid
437 if changeid == 'null':
435 if changeid == 'null':
438 self._node = nullid
436 self._node = nullid
439 self._rev = nullrev
437 self._rev = nullrev
440 return
438 return
441 if changeid == 'tip':
439 if changeid == 'tip':
442 self._node = repo.changelog.tip()
440 self._node = repo.changelog.tip()
443 self._rev = repo.changelog.rev(self._node)
441 self._rev = repo.changelog.rev(self._node)
444 return
442 return
445 if (changeid == '.'
443 if (changeid == '.'
446 or repo.local() and changeid == repo.dirstate.p1()):
444 or repo.local() and changeid == repo.dirstate.p1()):
447 # this is a hack to delay/avoid loading obsmarkers
445 # this is a hack to delay/avoid loading obsmarkers
448 # when we know that '.' won't be hidden
446 # when we know that '.' won't be hidden
449 self._node = repo.dirstate.p1()
447 self._node = repo.dirstate.p1()
450 self._rev = repo.unfiltered().changelog.rev(self._node)
448 self._rev = repo.unfiltered().changelog.rev(self._node)
451 return
449 return
452 if len(changeid) == 20:
450 if len(changeid) == 20:
453 try:
451 try:
454 self._node = changeid
452 self._node = changeid
455 self._rev = repo.changelog.rev(changeid)
453 self._rev = repo.changelog.rev(changeid)
456 return
454 return
457 except error.FilteredRepoLookupError:
455 except error.FilteredRepoLookupError:
458 raise
456 raise
459 except LookupError:
457 except LookupError:
460 pass
458 pass
461
459
462 try:
460 try:
463 r = int(changeid)
461 r = int(changeid)
464 if '%d' % r != changeid:
462 if '%d' % r != changeid:
465 raise ValueError
463 raise ValueError
466 l = len(repo.changelog)
464 l = len(repo.changelog)
467 if r < 0:
465 if r < 0:
468 r += l
466 r += l
469 if r < 0 or r >= l and r != wdirrev:
467 if r < 0 or r >= l and r != wdirrev:
470 raise ValueError
468 raise ValueError
471 self._rev = r
469 self._rev = r
472 self._node = repo.changelog.node(r)
470 self._node = repo.changelog.node(r)
473 return
471 return
474 except error.FilteredIndexError:
472 except error.FilteredIndexError:
475 raise
473 raise
476 except (ValueError, OverflowError, IndexError):
474 except (ValueError, OverflowError, IndexError):
477 pass
475 pass
478
476
479 if len(changeid) == 40:
477 if len(changeid) == 40:
480 try:
478 try:
481 self._node = bin(changeid)
479 self._node = bin(changeid)
482 self._rev = repo.changelog.rev(self._node)
480 self._rev = repo.changelog.rev(self._node)
483 return
481 return
484 except error.FilteredLookupError:
482 except error.FilteredLookupError:
485 raise
483 raise
486 except (TypeError, LookupError):
484 except (TypeError, LookupError):
487 pass
485 pass
488
486
489 # lookup bookmarks through the name interface
487 # lookup bookmarks through the name interface
490 try:
488 try:
491 self._node = repo.names.singlenode(repo, changeid)
489 self._node = repo.names.singlenode(repo, changeid)
492 self._rev = repo.changelog.rev(self._node)
490 self._rev = repo.changelog.rev(self._node)
493 return
491 return
494 except KeyError:
492 except KeyError:
495 pass
493 pass
496 except error.FilteredRepoLookupError:
494 except error.FilteredRepoLookupError:
497 raise
495 raise
498 except error.RepoLookupError:
496 except error.RepoLookupError:
499 pass
497 pass
500
498
501 self._node = repo.unfiltered().changelog._partialmatch(changeid)
499 self._node = repo.unfiltered().changelog._partialmatch(changeid)
502 if self._node is not None:
500 if self._node is not None:
503 self._rev = repo.changelog.rev(self._node)
501 self._rev = repo.changelog.rev(self._node)
504 return
502 return
505
503
506 # lookup failed
504 # lookup failed
507 # check if it might have come from damaged dirstate
505 # check if it might have come from damaged dirstate
508 #
506 #
509 # XXX we could avoid the unfiltered if we had a recognizable
507 # XXX we could avoid the unfiltered if we had a recognizable
510 # exception for filtered changeset access
508 # exception for filtered changeset access
511 if (repo.local()
509 if (repo.local()
512 and changeid in repo.unfiltered().dirstate.parents()):
510 and changeid in repo.unfiltered().dirstate.parents()):
513 msg = _("working directory has unknown parent '%s'!")
511 msg = _("working directory has unknown parent '%s'!")
514 raise error.Abort(msg % short(changeid))
512 raise error.Abort(msg % short(changeid))
515 try:
513 try:
516 if len(changeid) == 20 and nonascii(changeid):
514 if len(changeid) == 20 and nonascii(changeid):
517 changeid = hex(changeid)
515 changeid = hex(changeid)
518 except TypeError:
516 except TypeError:
519 pass
517 pass
520 except (error.FilteredIndexError, error.FilteredLookupError,
518 except (error.FilteredIndexError, error.FilteredLookupError,
521 error.FilteredRepoLookupError):
519 error.FilteredRepoLookupError):
522 raise _filterederror(repo, changeid)
520 raise _filterederror(repo, changeid)
523 except IndexError:
521 except IndexError:
524 pass
522 pass
525 raise error.RepoLookupError(
523 raise error.RepoLookupError(
526 _("unknown revision '%s'") % changeid)
524 _("unknown revision '%s'") % changeid)
527
525
528 def __hash__(self):
526 def __hash__(self):
529 try:
527 try:
530 return hash(self._rev)
528 return hash(self._rev)
531 except AttributeError:
529 except AttributeError:
532 return id(self)
530 return id(self)
533
531
534 def __nonzero__(self):
532 def __nonzero__(self):
535 return self._rev != nullrev
533 return self._rev != nullrev
536
534
537 __bool__ = __nonzero__
535 __bool__ = __nonzero__
538
536
539 @propertycache
537 @propertycache
540 def _changeset(self):
538 def _changeset(self):
541 return self._repo.changelog.changelogrevision(self.rev())
539 return self._repo.changelog.changelogrevision(self.rev())
542
540
543 @propertycache
541 @propertycache
544 def _manifest(self):
542 def _manifest(self):
545 return self._manifestctx.read()
543 return self._manifestctx.read()
546
544
547 @property
545 @property
548 def _manifestctx(self):
546 def _manifestctx(self):
549 return self._repo.manifestlog[self._changeset.manifest]
547 return self._repo.manifestlog[self._changeset.manifest]
550
548
551 @propertycache
549 @propertycache
552 def _manifestdelta(self):
550 def _manifestdelta(self):
553 return self._manifestctx.readdelta()
551 return self._manifestctx.readdelta()
554
552
555 @propertycache
553 @propertycache
556 def _parents(self):
554 def _parents(self):
557 repo = self._repo
555 repo = self._repo
558 p1, p2 = repo.changelog.parentrevs(self._rev)
556 p1, p2 = repo.changelog.parentrevs(self._rev)
559 if p2 == nullrev:
557 if p2 == nullrev:
560 return [changectx(repo, p1)]
558 return [changectx(repo, p1)]
561 return [changectx(repo, p1), changectx(repo, p2)]
559 return [changectx(repo, p1), changectx(repo, p2)]
562
560
563 def changeset(self):
561 def changeset(self):
564 c = self._changeset
562 c = self._changeset
565 return (
563 return (
566 c.manifest,
564 c.manifest,
567 c.user,
565 c.user,
568 c.date,
566 c.date,
569 c.files,
567 c.files,
570 c.description,
568 c.description,
571 c.extra,
569 c.extra,
572 )
570 )
573 def manifestnode(self):
571 def manifestnode(self):
574 return self._changeset.manifest
572 return self._changeset.manifest
575
573
576 def user(self):
574 def user(self):
577 return self._changeset.user
575 return self._changeset.user
578 def date(self):
576 def date(self):
579 return self._changeset.date
577 return self._changeset.date
580 def files(self):
578 def files(self):
581 return self._changeset.files
579 return self._changeset.files
582 def description(self):
580 def description(self):
583 return self._changeset.description
581 return self._changeset.description
584 def branch(self):
582 def branch(self):
585 return encoding.tolocal(self._changeset.extra.get("branch"))
583 return encoding.tolocal(self._changeset.extra.get("branch"))
586 def closesbranch(self):
584 def closesbranch(self):
587 return 'close' in self._changeset.extra
585 return 'close' in self._changeset.extra
588 def extra(self):
586 def extra(self):
589 """Return a dict of extra information."""
587 """Return a dict of extra information."""
590 return self._changeset.extra
588 return self._changeset.extra
591 def tags(self):
589 def tags(self):
592 """Return a list of byte tag names"""
590 """Return a list of byte tag names"""
593 return self._repo.nodetags(self._node)
591 return self._repo.nodetags(self._node)
594 def bookmarks(self):
592 def bookmarks(self):
595 """Return a list of byte bookmark names."""
593 """Return a list of byte bookmark names."""
596 return self._repo.nodebookmarks(self._node)
594 return self._repo.nodebookmarks(self._node)
597 def phase(self):
595 def phase(self):
598 return self._repo._phasecache.phase(self._repo, self._rev)
596 return self._repo._phasecache.phase(self._repo, self._rev)
599 def hidden(self):
597 def hidden(self):
600 return self._rev in repoview.filterrevs(self._repo, 'visible')
598 return self._rev in repoview.filterrevs(self._repo, 'visible')
601
599
602 def isinmemory(self):
600 def isinmemory(self):
603 return False
601 return False
604
602
605 def children(self):
603 def children(self):
606 """return list of changectx contexts for each child changeset.
604 """return list of changectx contexts for each child changeset.
607
605
608 This returns only the immediate child changesets. Use descendants() to
606 This returns only the immediate child changesets. Use descendants() to
609 recursively walk children.
607 recursively walk children.
610 """
608 """
611 c = self._repo.changelog.children(self._node)
609 c = self._repo.changelog.children(self._node)
612 return [changectx(self._repo, x) for x in c]
610 return [changectx(self._repo, x) for x in c]
613
611
614 def ancestors(self):
612 def ancestors(self):
615 for a in self._repo.changelog.ancestors([self._rev]):
613 for a in self._repo.changelog.ancestors([self._rev]):
616 yield changectx(self._repo, a)
614 yield changectx(self._repo, a)
617
615
618 def descendants(self):
616 def descendants(self):
619 """Recursively yield all children of the changeset.
617 """Recursively yield all children of the changeset.
620
618
621 For just the immediate children, use children()
619 For just the immediate children, use children()
622 """
620 """
623 for d in self._repo.changelog.descendants([self._rev]):
621 for d in self._repo.changelog.descendants([self._rev]):
624 yield changectx(self._repo, d)
622 yield changectx(self._repo, d)
625
623
626 def filectx(self, path, fileid=None, filelog=None):
624 def filectx(self, path, fileid=None, filelog=None):
627 """get a file context from this changeset"""
625 """get a file context from this changeset"""
628 if fileid is None:
626 if fileid is None:
629 fileid = self.filenode(path)
627 fileid = self.filenode(path)
630 return filectx(self._repo, path, fileid=fileid,
628 return filectx(self._repo, path, fileid=fileid,
631 changectx=self, filelog=filelog)
629 changectx=self, filelog=filelog)
632
630
633 def ancestor(self, c2, warn=False):
631 def ancestor(self, c2, warn=False):
634 """return the "best" ancestor context of self and c2
632 """return the "best" ancestor context of self and c2
635
633
636 If there are multiple candidates, it will show a message and check
634 If there are multiple candidates, it will show a message and check
637 merge.preferancestor configuration before falling back to the
635 merge.preferancestor configuration before falling back to the
638 revlog ancestor."""
636 revlog ancestor."""
639 # deal with workingctxs
637 # deal with workingctxs
640 n2 = c2._node
638 n2 = c2._node
641 if n2 is None:
639 if n2 is None:
642 n2 = c2._parents[0]._node
640 n2 = c2._parents[0]._node
643 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
641 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
644 if not cahs:
642 if not cahs:
645 anc = nullid
643 anc = nullid
646 elif len(cahs) == 1:
644 elif len(cahs) == 1:
647 anc = cahs[0]
645 anc = cahs[0]
648 else:
646 else:
649 # experimental config: merge.preferancestor
647 # experimental config: merge.preferancestor
650 for r in self._repo.ui.configlist('merge', 'preferancestor'):
648 for r in self._repo.ui.configlist('merge', 'preferancestor'):
651 try:
649 try:
652 ctx = changectx(self._repo, r)
650 ctx = changectx(self._repo, r)
653 except error.RepoLookupError:
651 except error.RepoLookupError:
654 continue
652 continue
655 anc = ctx.node()
653 anc = ctx.node()
656 if anc in cahs:
654 if anc in cahs:
657 break
655 break
658 else:
656 else:
659 anc = self._repo.changelog.ancestor(self._node, n2)
657 anc = self._repo.changelog.ancestor(self._node, n2)
660 if warn:
658 if warn:
661 self._repo.ui.status(
659 self._repo.ui.status(
662 (_("note: using %s as ancestor of %s and %s\n") %
660 (_("note: using %s as ancestor of %s and %s\n") %
663 (short(anc), short(self._node), short(n2))) +
661 (short(anc), short(self._node), short(n2))) +
664 ''.join(_(" alternatively, use --config "
662 ''.join(_(" alternatively, use --config "
665 "merge.preferancestor=%s\n") %
663 "merge.preferancestor=%s\n") %
666 short(n) for n in sorted(cahs) if n != anc))
664 short(n) for n in sorted(cahs) if n != anc))
667 return changectx(self._repo, anc)
665 return changectx(self._repo, anc)
668
666
669 def descendant(self, other):
667 def descendant(self, other):
670 """True if other is descendant of this changeset"""
668 """True if other is descendant of this changeset"""
671 return self._repo.changelog.descendant(self._rev, other._rev)
669 return self._repo.changelog.descendant(self._rev, other._rev)
672
670
673 def walk(self, match):
671 def walk(self, match):
674 '''Generates matching file names.'''
672 '''Generates matching file names.'''
675
673
676 # Wrap match.bad method to have message with nodeid
674 # Wrap match.bad method to have message with nodeid
677 def bad(fn, msg):
675 def bad(fn, msg):
678 # The manifest doesn't know about subrepos, so don't complain about
676 # The manifest doesn't know about subrepos, so don't complain about
679 # paths into valid subrepos.
677 # paths into valid subrepos.
680 if any(fn == s or fn.startswith(s + '/')
678 if any(fn == s or fn.startswith(s + '/')
681 for s in self.substate):
679 for s in self.substate):
682 return
680 return
683 match.bad(fn, _('no such file in rev %s') % self)
681 match.bad(fn, _('no such file in rev %s') % self)
684
682
685 m = matchmod.badmatch(match, bad)
683 m = matchmod.badmatch(match, bad)
686 return self._manifest.walk(m)
684 return self._manifest.walk(m)
687
685
688 def matches(self, match):
686 def matches(self, match):
689 return self.walk(match)
687 return self.walk(match)
690
688
691 class basefilectx(object):
689 class basefilectx(object):
692 """A filecontext object represents the common logic for its children:
690 """A filecontext object represents the common logic for its children:
693 filectx: read-only access to a filerevision that is already present
691 filectx: read-only access to a filerevision that is already present
694 in the repo,
692 in the repo,
695 workingfilectx: a filecontext that represents files from the working
693 workingfilectx: a filecontext that represents files from the working
696 directory,
694 directory,
697 memfilectx: a filecontext that represents files in-memory,
695 memfilectx: a filecontext that represents files in-memory,
698 overlayfilectx: duplicate another filecontext with some fields overridden.
696 overlayfilectx: duplicate another filecontext with some fields overridden.
699 """
697 """
700 @propertycache
698 @propertycache
701 def _filelog(self):
699 def _filelog(self):
702 return self._repo.file(self._path)
700 return self._repo.file(self._path)
703
701
704 @propertycache
702 @propertycache
705 def _changeid(self):
703 def _changeid(self):
706 if r'_changeid' in self.__dict__:
704 if r'_changeid' in self.__dict__:
707 return self._changeid
705 return self._changeid
708 elif r'_changectx' in self.__dict__:
706 elif r'_changectx' in self.__dict__:
709 return self._changectx.rev()
707 return self._changectx.rev()
710 elif r'_descendantrev' in self.__dict__:
708 elif r'_descendantrev' in self.__dict__:
711 # this file context was created from a revision with a known
709 # this file context was created from a revision with a known
712 # descendant, we can (lazily) correct for linkrev aliases
710 # descendant, we can (lazily) correct for linkrev aliases
713 return self._adjustlinkrev(self._descendantrev)
711 return self._adjustlinkrev(self._descendantrev)
714 else:
712 else:
715 return self._filelog.linkrev(self._filerev)
713 return self._filelog.linkrev(self._filerev)
716
714
717 @propertycache
715 @propertycache
718 def _filenode(self):
716 def _filenode(self):
719 if r'_fileid' in self.__dict__:
717 if r'_fileid' in self.__dict__:
720 return self._filelog.lookup(self._fileid)
718 return self._filelog.lookup(self._fileid)
721 else:
719 else:
722 return self._changectx.filenode(self._path)
720 return self._changectx.filenode(self._path)
723
721
724 @propertycache
722 @propertycache
725 def _filerev(self):
723 def _filerev(self):
726 return self._filelog.rev(self._filenode)
724 return self._filelog.rev(self._filenode)
727
725
728 @propertycache
726 @propertycache
729 def _repopath(self):
727 def _repopath(self):
730 return self._path
728 return self._path
731
729
732 def __nonzero__(self):
730 def __nonzero__(self):
733 try:
731 try:
734 self._filenode
732 self._filenode
735 return True
733 return True
736 except error.LookupError:
734 except error.LookupError:
737 # file is missing
735 # file is missing
738 return False
736 return False
739
737
740 __bool__ = __nonzero__
738 __bool__ = __nonzero__
741
739
742 def __bytes__(self):
740 def __bytes__(self):
743 try:
741 try:
744 return "%s@%s" % (self.path(), self._changectx)
742 return "%s@%s" % (self.path(), self._changectx)
745 except error.LookupError:
743 except error.LookupError:
746 return "%s@???" % self.path()
744 return "%s@???" % self.path()
747
745
748 __str__ = encoding.strmethod(__bytes__)
746 __str__ = encoding.strmethod(__bytes__)
749
747
750 def __repr__(self):
748 def __repr__(self):
751 return r"<%s %s>" % (type(self).__name__, str(self))
749 return r"<%s %s>" % (type(self).__name__, str(self))
752
750
753 def __hash__(self):
751 def __hash__(self):
754 try:
752 try:
755 return hash((self._path, self._filenode))
753 return hash((self._path, self._filenode))
756 except AttributeError:
754 except AttributeError:
757 return id(self)
755 return id(self)
758
756
759 def __eq__(self, other):
757 def __eq__(self, other):
760 try:
758 try:
761 return (type(self) == type(other) and self._path == other._path
759 return (type(self) == type(other) and self._path == other._path
762 and self._filenode == other._filenode)
760 and self._filenode == other._filenode)
763 except AttributeError:
761 except AttributeError:
764 return False
762 return False
765
763
766 def __ne__(self, other):
764 def __ne__(self, other):
767 return not (self == other)
765 return not (self == other)
768
766
769 def filerev(self):
767 def filerev(self):
770 return self._filerev
768 return self._filerev
771 def filenode(self):
769 def filenode(self):
772 return self._filenode
770 return self._filenode
773 @propertycache
771 @propertycache
774 def _flags(self):
772 def _flags(self):
775 return self._changectx.flags(self._path)
773 return self._changectx.flags(self._path)
776 def flags(self):
774 def flags(self):
777 return self._flags
775 return self._flags
778 def filelog(self):
776 def filelog(self):
779 return self._filelog
777 return self._filelog
780 def rev(self):
778 def rev(self):
781 return self._changeid
779 return self._changeid
782 def linkrev(self):
780 def linkrev(self):
783 return self._filelog.linkrev(self._filerev)
781 return self._filelog.linkrev(self._filerev)
784 def node(self):
782 def node(self):
785 return self._changectx.node()
783 return self._changectx.node()
786 def hex(self):
784 def hex(self):
787 return self._changectx.hex()
785 return self._changectx.hex()
788 def user(self):
786 def user(self):
789 return self._changectx.user()
787 return self._changectx.user()
790 def date(self):
788 def date(self):
791 return self._changectx.date()
789 return self._changectx.date()
792 def files(self):
790 def files(self):
793 return self._changectx.files()
791 return self._changectx.files()
794 def description(self):
792 def description(self):
795 return self._changectx.description()
793 return self._changectx.description()
796 def branch(self):
794 def branch(self):
797 return self._changectx.branch()
795 return self._changectx.branch()
798 def extra(self):
796 def extra(self):
799 return self._changectx.extra()
797 return self._changectx.extra()
800 def phase(self):
798 def phase(self):
801 return self._changectx.phase()
799 return self._changectx.phase()
802 def phasestr(self):
800 def phasestr(self):
803 return self._changectx.phasestr()
801 return self._changectx.phasestr()
804 def obsolete(self):
802 def obsolete(self):
805 return self._changectx.obsolete()
803 return self._changectx.obsolete()
806 def instabilities(self):
804 def instabilities(self):
807 return self._changectx.instabilities()
805 return self._changectx.instabilities()
808 def manifest(self):
806 def manifest(self):
809 return self._changectx.manifest()
807 return self._changectx.manifest()
810 def changectx(self):
808 def changectx(self):
811 return self._changectx
809 return self._changectx
812 def renamed(self):
810 def renamed(self):
813 return self._copied
811 return self._copied
814 def repo(self):
812 def repo(self):
815 return self._repo
813 return self._repo
816 def size(self):
814 def size(self):
817 return len(self.data())
815 return len(self.data())
818
816
819 def path(self):
817 def path(self):
820 return self._path
818 return self._path
821
819
822 def isbinary(self):
820 def isbinary(self):
823 try:
821 try:
824 return util.binary(self.data())
822 return util.binary(self.data())
825 except IOError:
823 except IOError:
826 return False
824 return False
827 def isexec(self):
825 def isexec(self):
828 return 'x' in self.flags()
826 return 'x' in self.flags()
829 def islink(self):
827 def islink(self):
830 return 'l' in self.flags()
828 return 'l' in self.flags()
831
829
832 def isabsent(self):
830 def isabsent(self):
833 """whether this filectx represents a file not in self._changectx
831 """whether this filectx represents a file not in self._changectx
834
832
835 This is mainly for merge code to detect change/delete conflicts. This is
833 This is mainly for merge code to detect change/delete conflicts. This is
836 expected to be True for all subclasses of basectx."""
834 expected to be True for all subclasses of basectx."""
837 return False
835 return False
838
836
839 _customcmp = False
837 _customcmp = False
840 def cmp(self, fctx):
838 def cmp(self, fctx):
841 """compare with other file context
839 """compare with other file context
842
840
843 returns True if different than fctx.
841 returns True if different than fctx.
844 """
842 """
845 if fctx._customcmp:
843 if fctx._customcmp:
846 return fctx.cmp(self)
844 return fctx.cmp(self)
847
845
848 if (fctx._filenode is None
846 if (fctx._filenode is None
849 and (self._repo._encodefilterpats
847 and (self._repo._encodefilterpats
850 # if file data starts with '\1\n', empty metadata block is
848 # if file data starts with '\1\n', empty metadata block is
851 # prepended, which adds 4 bytes to filelog.size().
849 # prepended, which adds 4 bytes to filelog.size().
852 or self.size() - 4 == fctx.size())
850 or self.size() - 4 == fctx.size())
853 or self.size() == fctx.size()):
851 or self.size() == fctx.size()):
854 return self._filelog.cmp(self._filenode, fctx.data())
852 return self._filelog.cmp(self._filenode, fctx.data())
855
853
856 return True
854 return True
857
855
858 def _adjustlinkrev(self, srcrev, inclusive=False):
856 def _adjustlinkrev(self, srcrev, inclusive=False):
859 """return the first ancestor of <srcrev> introducing <fnode>
857 """return the first ancestor of <srcrev> introducing <fnode>
860
858
861 If the linkrev of the file revision does not point to an ancestor of
859 If the linkrev of the file revision does not point to an ancestor of
862 srcrev, we'll walk down the ancestors until we find one introducing
860 srcrev, we'll walk down the ancestors until we find one introducing
863 this file revision.
861 this file revision.
864
862
865 :srcrev: the changeset revision we search ancestors from
863 :srcrev: the changeset revision we search ancestors from
866 :inclusive: if true, the src revision will also be checked
864 :inclusive: if true, the src revision will also be checked
867 """
865 """
868 repo = self._repo
866 repo = self._repo
869 cl = repo.unfiltered().changelog
867 cl = repo.unfiltered().changelog
870 mfl = repo.manifestlog
868 mfl = repo.manifestlog
871 # fetch the linkrev
869 # fetch the linkrev
872 lkr = self.linkrev()
870 lkr = self.linkrev()
873 # hack to reuse ancestor computation when searching for renames
871 # hack to reuse ancestor computation when searching for renames
874 memberanc = getattr(self, '_ancestrycontext', None)
872 memberanc = getattr(self, '_ancestrycontext', None)
875 iteranc = None
873 iteranc = None
876 if srcrev is None:
874 if srcrev is None:
877 # wctx case, used by workingfilectx during mergecopy
875 # wctx case, used by workingfilectx during mergecopy
878 revs = [p.rev() for p in self._repo[None].parents()]
876 revs = [p.rev() for p in self._repo[None].parents()]
879 inclusive = True # we skipped the real (revless) source
877 inclusive = True # we skipped the real (revless) source
880 else:
878 else:
881 revs = [srcrev]
879 revs = [srcrev]
882 if memberanc is None:
880 if memberanc is None:
883 memberanc = iteranc = cl.ancestors(revs, lkr,
881 memberanc = iteranc = cl.ancestors(revs, lkr,
884 inclusive=inclusive)
882 inclusive=inclusive)
885 # check if this linkrev is an ancestor of srcrev
883 # check if this linkrev is an ancestor of srcrev
886 if lkr not in memberanc:
884 if lkr not in memberanc:
887 if iteranc is None:
885 if iteranc is None:
888 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
886 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
889 fnode = self._filenode
887 fnode = self._filenode
890 path = self._path
888 path = self._path
891 for a in iteranc:
889 for a in iteranc:
892 ac = cl.read(a) # get changeset data (we avoid object creation)
890 ac = cl.read(a) # get changeset data (we avoid object creation)
893 if path in ac[3]: # checking the 'files' field.
891 if path in ac[3]: # checking the 'files' field.
894 # The file has been touched, check if the content is
892 # The file has been touched, check if the content is
895 # similar to the one we search for.
893 # similar to the one we search for.
896 if fnode == mfl[ac[0]].readfast().get(path):
894 if fnode == mfl[ac[0]].readfast().get(path):
897 return a
895 return a
898 # In theory, we should never get out of that loop without a result.
896 # In theory, we should never get out of that loop without a result.
899 # But if manifest uses a buggy file revision (not children of the
897 # But if manifest uses a buggy file revision (not children of the
900 # one it replaces) we could. Such a buggy situation will likely
898 # one it replaces) we could. Such a buggy situation will likely
901 # result is crash somewhere else at to some point.
899 # result is crash somewhere else at to some point.
902 return lkr
900 return lkr
903
901
904 def introrev(self):
902 def introrev(self):
905 """return the rev of the changeset which introduced this file revision
903 """return the rev of the changeset which introduced this file revision
906
904
907 This method is different from linkrev because it take into account the
905 This method is different from linkrev because it take into account the
908 changeset the filectx was created from. It ensures the returned
906 changeset the filectx was created from. It ensures the returned
909 revision is one of its ancestors. This prevents bugs from
907 revision is one of its ancestors. This prevents bugs from
910 'linkrev-shadowing' when a file revision is used by multiple
908 'linkrev-shadowing' when a file revision is used by multiple
911 changesets.
909 changesets.
912 """
910 """
913 lkr = self.linkrev()
911 lkr = self.linkrev()
914 attrs = vars(self)
912 attrs = vars(self)
915 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
913 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
916 if noctx or self.rev() == lkr:
914 if noctx or self.rev() == lkr:
917 return self.linkrev()
915 return self.linkrev()
918 return self._adjustlinkrev(self.rev(), inclusive=True)
916 return self._adjustlinkrev(self.rev(), inclusive=True)
919
917
920 def introfilectx(self):
918 def introfilectx(self):
921 """Return filectx having identical contents, but pointing to the
919 """Return filectx having identical contents, but pointing to the
922 changeset revision where this filectx was introduced"""
920 changeset revision where this filectx was introduced"""
923 introrev = self.introrev()
921 introrev = self.introrev()
924 if self.rev() == introrev:
922 if self.rev() == introrev:
925 return self
923 return self
926 return self.filectx(self.filenode(), changeid=introrev)
924 return self.filectx(self.filenode(), changeid=introrev)
927
925
928 def _parentfilectx(self, path, fileid, filelog):
926 def _parentfilectx(self, path, fileid, filelog):
929 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
927 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
930 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
928 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
931 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
929 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
932 # If self is associated with a changeset (probably explicitly
930 # If self is associated with a changeset (probably explicitly
933 # fed), ensure the created filectx is associated with a
931 # fed), ensure the created filectx is associated with a
934 # changeset that is an ancestor of self.changectx.
932 # changeset that is an ancestor of self.changectx.
935 # This lets us later use _adjustlinkrev to get a correct link.
933 # This lets us later use _adjustlinkrev to get a correct link.
936 fctx._descendantrev = self.rev()
934 fctx._descendantrev = self.rev()
937 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
935 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
938 elif r'_descendantrev' in vars(self):
936 elif r'_descendantrev' in vars(self):
939 # Otherwise propagate _descendantrev if we have one associated.
937 # Otherwise propagate _descendantrev if we have one associated.
940 fctx._descendantrev = self._descendantrev
938 fctx._descendantrev = self._descendantrev
941 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
939 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
942 return fctx
940 return fctx
943
941
944 def parents(self):
942 def parents(self):
945 _path = self._path
943 _path = self._path
946 fl = self._filelog
944 fl = self._filelog
947 parents = self._filelog.parents(self._filenode)
945 parents = self._filelog.parents(self._filenode)
948 pl = [(_path, node, fl) for node in parents if node != nullid]
946 pl = [(_path, node, fl) for node in parents if node != nullid]
949
947
950 r = fl.renamed(self._filenode)
948 r = fl.renamed(self._filenode)
951 if r:
949 if r:
952 # - In the simple rename case, both parent are nullid, pl is empty.
950 # - In the simple rename case, both parent are nullid, pl is empty.
953 # - In case of merge, only one of the parent is null id and should
951 # - In case of merge, only one of the parent is null id and should
954 # be replaced with the rename information. This parent is -always-
952 # be replaced with the rename information. This parent is -always-
955 # the first one.
953 # the first one.
956 #
954 #
957 # As null id have always been filtered out in the previous list
955 # As null id have always been filtered out in the previous list
958 # comprehension, inserting to 0 will always result in "replacing
956 # comprehension, inserting to 0 will always result in "replacing
959 # first nullid parent with rename information.
957 # first nullid parent with rename information.
960 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
958 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
961
959
962 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
960 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
963
961
964 def p1(self):
962 def p1(self):
965 return self.parents()[0]
963 return self.parents()[0]
966
964
967 def p2(self):
965 def p2(self):
968 p = self.parents()
966 p = self.parents()
969 if len(p) == 2:
967 if len(p) == 2:
970 return p[1]
968 return p[1]
971 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
969 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
972
970
973 def annotate(self, follow=False, linenumber=False, skiprevs=None,
971 def annotate(self, follow=False, linenumber=False, skiprevs=None,
974 diffopts=None):
972 diffopts=None):
975 '''returns a list of tuples of ((ctx, number), line) for each line
973 '''returns a list of tuples of ((ctx, number), line) for each line
976 in the file, where ctx is the filectx of the node where
974 in the file, where ctx is the filectx of the node where
977 that line was last changed; if linenumber parameter is true, number is
975 that line was last changed; if linenumber parameter is true, number is
978 the line number at the first appearance in the managed file, otherwise,
976 the line number at the first appearance in the managed file, otherwise,
979 number has a fixed value of False.
977 number has a fixed value of False.
980 '''
978 '''
979 annotateline = dagop.annotateline
980 _annotatepair = dagop._annotatepair
981
981
982 def lines(text):
982 def lines(text):
983 if text.endswith("\n"):
983 if text.endswith("\n"):
984 return text.count("\n")
984 return text.count("\n")
985 return text.count("\n") + int(bool(text))
985 return text.count("\n") + int(bool(text))
986
986
987 if linenumber:
987 if linenumber:
988 def decorate(text, rev):
988 def decorate(text, rev):
989 return ([annotateline(fctx=rev, lineno=i)
989 return ([annotateline(fctx=rev, lineno=i)
990 for i in xrange(1, lines(text) + 1)], text)
990 for i in xrange(1, lines(text) + 1)], text)
991 else:
991 else:
992 def decorate(text, rev):
992 def decorate(text, rev):
993 return ([annotateline(fctx=rev)] * lines(text), text)
993 return ([annotateline(fctx=rev)] * lines(text), text)
994
994
995 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
995 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
996
996
997 def parents(f):
997 def parents(f):
998 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
998 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
999 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
999 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1000 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1000 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1001 # isn't an ancestor of the srcrev.
1001 # isn't an ancestor of the srcrev.
1002 f._changeid
1002 f._changeid
1003 pl = f.parents()
1003 pl = f.parents()
1004
1004
1005 # Don't return renamed parents if we aren't following.
1005 # Don't return renamed parents if we aren't following.
1006 if not follow:
1006 if not follow:
1007 pl = [p for p in pl if p.path() == f.path()]
1007 pl = [p for p in pl if p.path() == f.path()]
1008
1008
1009 # renamed filectx won't have a filelog yet, so set it
1009 # renamed filectx won't have a filelog yet, so set it
1010 # from the cache to save time
1010 # from the cache to save time
1011 for p in pl:
1011 for p in pl:
1012 if not r'_filelog' in p.__dict__:
1012 if not r'_filelog' in p.__dict__:
1013 p._filelog = getlog(p.path())
1013 p._filelog = getlog(p.path())
1014
1014
1015 return pl
1015 return pl
1016
1016
1017 # use linkrev to find the first changeset where self appeared
1017 # use linkrev to find the first changeset where self appeared
1018 base = self.introfilectx()
1018 base = self.introfilectx()
1019 if getattr(base, '_ancestrycontext', None) is None:
1019 if getattr(base, '_ancestrycontext', None) is None:
1020 cl = self._repo.changelog
1020 cl = self._repo.changelog
1021 if base.rev() is None:
1021 if base.rev() is None:
1022 # wctx is not inclusive, but works because _ancestrycontext
1022 # wctx is not inclusive, but works because _ancestrycontext
1023 # is used to test filelog revisions
1023 # is used to test filelog revisions
1024 ac = cl.ancestors([p.rev() for p in base.parents()],
1024 ac = cl.ancestors([p.rev() for p in base.parents()],
1025 inclusive=True)
1025 inclusive=True)
1026 else:
1026 else:
1027 ac = cl.ancestors([base.rev()], inclusive=True)
1027 ac = cl.ancestors([base.rev()], inclusive=True)
1028 base._ancestrycontext = ac
1028 base._ancestrycontext = ac
1029
1029
1030 # This algorithm would prefer to be recursive, but Python is a
1030 # This algorithm would prefer to be recursive, but Python is a
1031 # bit recursion-hostile. Instead we do an iterative
1031 # bit recursion-hostile. Instead we do an iterative
1032 # depth-first search.
1032 # depth-first search.
1033
1033
1034 # 1st DFS pre-calculates pcache and needed
1034 # 1st DFS pre-calculates pcache and needed
1035 visit = [base]
1035 visit = [base]
1036 pcache = {}
1036 pcache = {}
1037 needed = {base: 1}
1037 needed = {base: 1}
1038 while visit:
1038 while visit:
1039 f = visit.pop()
1039 f = visit.pop()
1040 if f in pcache:
1040 if f in pcache:
1041 continue
1041 continue
1042 pl = parents(f)
1042 pl = parents(f)
1043 pcache[f] = pl
1043 pcache[f] = pl
1044 for p in pl:
1044 for p in pl:
1045 needed[p] = needed.get(p, 0) + 1
1045 needed[p] = needed.get(p, 0) + 1
1046 if p not in pcache:
1046 if p not in pcache:
1047 visit.append(p)
1047 visit.append(p)
1048
1048
1049 # 2nd DFS does the actual annotate
1049 # 2nd DFS does the actual annotate
1050 visit[:] = [base]
1050 visit[:] = [base]
1051 hist = {}
1051 hist = {}
1052 while visit:
1052 while visit:
1053 f = visit[-1]
1053 f = visit[-1]
1054 if f in hist:
1054 if f in hist:
1055 visit.pop()
1055 visit.pop()
1056 continue
1056 continue
1057
1057
1058 ready = True
1058 ready = True
1059 pl = pcache[f]
1059 pl = pcache[f]
1060 for p in pl:
1060 for p in pl:
1061 if p not in hist:
1061 if p not in hist:
1062 ready = False
1062 ready = False
1063 visit.append(p)
1063 visit.append(p)
1064 if ready:
1064 if ready:
1065 visit.pop()
1065 visit.pop()
1066 curr = decorate(f.data(), f)
1066 curr = decorate(f.data(), f)
1067 skipchild = False
1067 skipchild = False
1068 if skiprevs is not None:
1068 if skiprevs is not None:
1069 skipchild = f._changeid in skiprevs
1069 skipchild = f._changeid in skiprevs
1070 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1070 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1071 diffopts)
1071 diffopts)
1072 for p in pl:
1072 for p in pl:
1073 if needed[p] == 1:
1073 if needed[p] == 1:
1074 del hist[p]
1074 del hist[p]
1075 del needed[p]
1075 del needed[p]
1076 else:
1076 else:
1077 needed[p] -= 1
1077 needed[p] -= 1
1078
1078
1079 hist[f] = curr
1079 hist[f] = curr
1080 del pcache[f]
1080 del pcache[f]
1081
1081
1082 lineattrs, text = hist[base]
1082 lineattrs, text = hist[base]
1083 return pycompat.ziplist(lineattrs, mdiff.splitnewlines(text))
1083 return pycompat.ziplist(lineattrs, mdiff.splitnewlines(text))
1084
1084
1085 def ancestors(self, followfirst=False):
1085 def ancestors(self, followfirst=False):
1086 visit = {}
1086 visit = {}
1087 c = self
1087 c = self
1088 if followfirst:
1088 if followfirst:
1089 cut = 1
1089 cut = 1
1090 else:
1090 else:
1091 cut = None
1091 cut = None
1092
1092
1093 while True:
1093 while True:
1094 for parent in c.parents()[:cut]:
1094 for parent in c.parents()[:cut]:
1095 visit[(parent.linkrev(), parent.filenode())] = parent
1095 visit[(parent.linkrev(), parent.filenode())] = parent
1096 if not visit:
1096 if not visit:
1097 break
1097 break
1098 c = visit.pop(max(visit))
1098 c = visit.pop(max(visit))
1099 yield c
1099 yield c
1100
1100
1101 def decodeddata(self):
1101 def decodeddata(self):
1102 """Returns `data()` after running repository decoding filters.
1102 """Returns `data()` after running repository decoding filters.
1103
1103
1104 This is often equivalent to how the data would be expressed on disk.
1104 This is often equivalent to how the data would be expressed on disk.
1105 """
1105 """
1106 return self._repo.wwritedata(self.path(), self.data())
1106 return self._repo.wwritedata(self.path(), self.data())
1107
1107
1108 @attr.s(slots=True, frozen=True)
1109 class annotateline(object):
1110 fctx = attr.ib()
1111 lineno = attr.ib(default=False)
1112 # Whether this annotation was the result of a skip-annotate.
1113 skip = attr.ib(default=False)
1114
1115 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1116 r'''
1117 Given parent and child fctxes and annotate data for parents, for all lines
1118 in either parent that match the child, annotate the child with the parent's
1119 data.
1120
1121 Additionally, if `skipchild` is True, replace all other lines with parent
1122 annotate data as well such that child is never blamed for any lines.
1123
1124 See test-annotate.py for unit tests.
1125 '''
1126 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1127 for parent in parents]
1128
1129 if skipchild:
1130 # Need to iterate over the blocks twice -- make it a list
1131 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1132 # Mercurial currently prefers p2 over p1 for annotate.
1133 # TODO: change this?
1134 for parent, blocks in pblocks:
1135 for (a1, a2, b1, b2), t in blocks:
1136 # Changed blocks ('!') or blocks made only of blank lines ('~')
1137 # belong to the child.
1138 if t == '=':
1139 child[0][b1:b2] = parent[0][a1:a2]
1140
1141 if skipchild:
1142 # Now try and match up anything that couldn't be matched,
1143 # Reversing pblocks maintains bias towards p2, matching above
1144 # behavior.
1145 pblocks.reverse()
1146
1147 # The heuristics are:
1148 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1149 # This could potentially be smarter but works well enough.
1150 # * For a non-matching section, do a best-effort fit. Match lines in
1151 # diff hunks 1:1, dropping lines as necessary.
1152 # * Repeat the last line as a last resort.
1153
1154 # First, replace as much as possible without repeating the last line.
1155 remaining = [(parent, []) for parent, _blocks in pblocks]
1156 for idx, (parent, blocks) in enumerate(pblocks):
1157 for (a1, a2, b1, b2), _t in blocks:
1158 if a2 - a1 >= b2 - b1:
1159 for bk in xrange(b1, b2):
1160 if child[0][bk].fctx == childfctx:
1161 ak = min(a1 + (bk - b1), a2 - 1)
1162 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1163 else:
1164 remaining[idx][1].append((a1, a2, b1, b2))
1165
1166 # Then, look at anything left, which might involve repeating the last
1167 # line.
1168 for parent, blocks in remaining:
1169 for a1, a2, b1, b2 in blocks:
1170 for bk in xrange(b1, b2):
1171 if child[0][bk].fctx == childfctx:
1172 ak = min(a1 + (bk - b1), a2 - 1)
1173 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1174 return child
1175
1176 class filectx(basefilectx):
1108 class filectx(basefilectx):
1177 """A filecontext object makes access to data related to a particular
1109 """A filecontext object makes access to data related to a particular
1178 filerevision convenient."""
1110 filerevision convenient."""
1179 def __init__(self, repo, path, changeid=None, fileid=None,
1111 def __init__(self, repo, path, changeid=None, fileid=None,
1180 filelog=None, changectx=None):
1112 filelog=None, changectx=None):
1181 """changeid can be a changeset revision, node, or tag.
1113 """changeid can be a changeset revision, node, or tag.
1182 fileid can be a file revision or node."""
1114 fileid can be a file revision or node."""
1183 self._repo = repo
1115 self._repo = repo
1184 self._path = path
1116 self._path = path
1185
1117
1186 assert (changeid is not None
1118 assert (changeid is not None
1187 or fileid is not None
1119 or fileid is not None
1188 or changectx is not None), \
1120 or changectx is not None), \
1189 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1121 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1190 % (changeid, fileid, changectx))
1122 % (changeid, fileid, changectx))
1191
1123
1192 if filelog is not None:
1124 if filelog is not None:
1193 self._filelog = filelog
1125 self._filelog = filelog
1194
1126
1195 if changeid is not None:
1127 if changeid is not None:
1196 self._changeid = changeid
1128 self._changeid = changeid
1197 if changectx is not None:
1129 if changectx is not None:
1198 self._changectx = changectx
1130 self._changectx = changectx
1199 if fileid is not None:
1131 if fileid is not None:
1200 self._fileid = fileid
1132 self._fileid = fileid
1201
1133
1202 @propertycache
1134 @propertycache
1203 def _changectx(self):
1135 def _changectx(self):
1204 try:
1136 try:
1205 return changectx(self._repo, self._changeid)
1137 return changectx(self._repo, self._changeid)
1206 except error.FilteredRepoLookupError:
1138 except error.FilteredRepoLookupError:
1207 # Linkrev may point to any revision in the repository. When the
1139 # Linkrev may point to any revision in the repository. When the
1208 # repository is filtered this may lead to `filectx` trying to build
1140 # repository is filtered this may lead to `filectx` trying to build
1209 # `changectx` for filtered revision. In such case we fallback to
1141 # `changectx` for filtered revision. In such case we fallback to
1210 # creating `changectx` on the unfiltered version of the reposition.
1142 # creating `changectx` on the unfiltered version of the reposition.
1211 # This fallback should not be an issue because `changectx` from
1143 # This fallback should not be an issue because `changectx` from
1212 # `filectx` are not used in complex operations that care about
1144 # `filectx` are not used in complex operations that care about
1213 # filtering.
1145 # filtering.
1214 #
1146 #
1215 # This fallback is a cheap and dirty fix that prevent several
1147 # This fallback is a cheap and dirty fix that prevent several
1216 # crashes. It does not ensure the behavior is correct. However the
1148 # crashes. It does not ensure the behavior is correct. However the
1217 # behavior was not correct before filtering either and "incorrect
1149 # behavior was not correct before filtering either and "incorrect
1218 # behavior" is seen as better as "crash"
1150 # behavior" is seen as better as "crash"
1219 #
1151 #
1220 # Linkrevs have several serious troubles with filtering that are
1152 # Linkrevs have several serious troubles with filtering that are
1221 # complicated to solve. Proper handling of the issue here should be
1153 # complicated to solve. Proper handling of the issue here should be
1222 # considered when solving linkrev issue are on the table.
1154 # considered when solving linkrev issue are on the table.
1223 return changectx(self._repo.unfiltered(), self._changeid)
1155 return changectx(self._repo.unfiltered(), self._changeid)
1224
1156
1225 def filectx(self, fileid, changeid=None):
1157 def filectx(self, fileid, changeid=None):
1226 '''opens an arbitrary revision of the file without
1158 '''opens an arbitrary revision of the file without
1227 opening a new filelog'''
1159 opening a new filelog'''
1228 return filectx(self._repo, self._path, fileid=fileid,
1160 return filectx(self._repo, self._path, fileid=fileid,
1229 filelog=self._filelog, changeid=changeid)
1161 filelog=self._filelog, changeid=changeid)
1230
1162
1231 def rawdata(self):
1163 def rawdata(self):
1232 return self._filelog.revision(self._filenode, raw=True)
1164 return self._filelog.revision(self._filenode, raw=True)
1233
1165
1234 def rawflags(self):
1166 def rawflags(self):
1235 """low-level revlog flags"""
1167 """low-level revlog flags"""
1236 return self._filelog.flags(self._filerev)
1168 return self._filelog.flags(self._filerev)
1237
1169
1238 def data(self):
1170 def data(self):
1239 try:
1171 try:
1240 return self._filelog.read(self._filenode)
1172 return self._filelog.read(self._filenode)
1241 except error.CensoredNodeError:
1173 except error.CensoredNodeError:
1242 if self._repo.ui.config("censor", "policy") == "ignore":
1174 if self._repo.ui.config("censor", "policy") == "ignore":
1243 return ""
1175 return ""
1244 raise error.Abort(_("censored node: %s") % short(self._filenode),
1176 raise error.Abort(_("censored node: %s") % short(self._filenode),
1245 hint=_("set censor.policy to ignore errors"))
1177 hint=_("set censor.policy to ignore errors"))
1246
1178
1247 def size(self):
1179 def size(self):
1248 return self._filelog.size(self._filerev)
1180 return self._filelog.size(self._filerev)
1249
1181
1250 @propertycache
1182 @propertycache
1251 def _copied(self):
1183 def _copied(self):
1252 """check if file was actually renamed in this changeset revision
1184 """check if file was actually renamed in this changeset revision
1253
1185
1254 If rename logged in file revision, we report copy for changeset only
1186 If rename logged in file revision, we report copy for changeset only
1255 if file revisions linkrev points back to the changeset in question
1187 if file revisions linkrev points back to the changeset in question
1256 or both changeset parents contain different file revisions.
1188 or both changeset parents contain different file revisions.
1257 """
1189 """
1258
1190
1259 renamed = self._filelog.renamed(self._filenode)
1191 renamed = self._filelog.renamed(self._filenode)
1260 if not renamed:
1192 if not renamed:
1261 return renamed
1193 return renamed
1262
1194
1263 if self.rev() == self.linkrev():
1195 if self.rev() == self.linkrev():
1264 return renamed
1196 return renamed
1265
1197
1266 name = self.path()
1198 name = self.path()
1267 fnode = self._filenode
1199 fnode = self._filenode
1268 for p in self._changectx.parents():
1200 for p in self._changectx.parents():
1269 try:
1201 try:
1270 if fnode == p.filenode(name):
1202 if fnode == p.filenode(name):
1271 return None
1203 return None
1272 except error.LookupError:
1204 except error.LookupError:
1273 pass
1205 pass
1274 return renamed
1206 return renamed
1275
1207
1276 def children(self):
1208 def children(self):
1277 # hard for renames
1209 # hard for renames
1278 c = self._filelog.children(self._filenode)
1210 c = self._filelog.children(self._filenode)
1279 return [filectx(self._repo, self._path, fileid=x,
1211 return [filectx(self._repo, self._path, fileid=x,
1280 filelog=self._filelog) for x in c]
1212 filelog=self._filelog) for x in c]
1281
1213
1282 class committablectx(basectx):
1214 class committablectx(basectx):
1283 """A committablectx object provides common functionality for a context that
1215 """A committablectx object provides common functionality for a context that
1284 wants the ability to commit, e.g. workingctx or memctx."""
1216 wants the ability to commit, e.g. workingctx or memctx."""
1285 def __init__(self, repo, text="", user=None, date=None, extra=None,
1217 def __init__(self, repo, text="", user=None, date=None, extra=None,
1286 changes=None):
1218 changes=None):
1287 self._repo = repo
1219 self._repo = repo
1288 self._rev = None
1220 self._rev = None
1289 self._node = None
1221 self._node = None
1290 self._text = text
1222 self._text = text
1291 if date:
1223 if date:
1292 self._date = dateutil.parsedate(date)
1224 self._date = dateutil.parsedate(date)
1293 if user:
1225 if user:
1294 self._user = user
1226 self._user = user
1295 if changes:
1227 if changes:
1296 self._status = changes
1228 self._status = changes
1297
1229
1298 self._extra = {}
1230 self._extra = {}
1299 if extra:
1231 if extra:
1300 self._extra = extra.copy()
1232 self._extra = extra.copy()
1301 if 'branch' not in self._extra:
1233 if 'branch' not in self._extra:
1302 try:
1234 try:
1303 branch = encoding.fromlocal(self._repo.dirstate.branch())
1235 branch = encoding.fromlocal(self._repo.dirstate.branch())
1304 except UnicodeDecodeError:
1236 except UnicodeDecodeError:
1305 raise error.Abort(_('branch name not in UTF-8!'))
1237 raise error.Abort(_('branch name not in UTF-8!'))
1306 self._extra['branch'] = branch
1238 self._extra['branch'] = branch
1307 if self._extra['branch'] == '':
1239 if self._extra['branch'] == '':
1308 self._extra['branch'] = 'default'
1240 self._extra['branch'] = 'default'
1309
1241
1310 def __bytes__(self):
1242 def __bytes__(self):
1311 return bytes(self._parents[0]) + "+"
1243 return bytes(self._parents[0]) + "+"
1312
1244
1313 __str__ = encoding.strmethod(__bytes__)
1245 __str__ = encoding.strmethod(__bytes__)
1314
1246
1315 def __nonzero__(self):
1247 def __nonzero__(self):
1316 return True
1248 return True
1317
1249
1318 __bool__ = __nonzero__
1250 __bool__ = __nonzero__
1319
1251
1320 def _buildflagfunc(self):
1252 def _buildflagfunc(self):
1321 # Create a fallback function for getting file flags when the
1253 # Create a fallback function for getting file flags when the
1322 # filesystem doesn't support them
1254 # filesystem doesn't support them
1323
1255
1324 copiesget = self._repo.dirstate.copies().get
1256 copiesget = self._repo.dirstate.copies().get
1325 parents = self.parents()
1257 parents = self.parents()
1326 if len(parents) < 2:
1258 if len(parents) < 2:
1327 # when we have one parent, it's easy: copy from parent
1259 # when we have one parent, it's easy: copy from parent
1328 man = parents[0].manifest()
1260 man = parents[0].manifest()
1329 def func(f):
1261 def func(f):
1330 f = copiesget(f, f)
1262 f = copiesget(f, f)
1331 return man.flags(f)
1263 return man.flags(f)
1332 else:
1264 else:
1333 # merges are tricky: we try to reconstruct the unstored
1265 # merges are tricky: we try to reconstruct the unstored
1334 # result from the merge (issue1802)
1266 # result from the merge (issue1802)
1335 p1, p2 = parents
1267 p1, p2 = parents
1336 pa = p1.ancestor(p2)
1268 pa = p1.ancestor(p2)
1337 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1269 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1338
1270
1339 def func(f):
1271 def func(f):
1340 f = copiesget(f, f) # may be wrong for merges with copies
1272 f = copiesget(f, f) # may be wrong for merges with copies
1341 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1273 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1342 if fl1 == fl2:
1274 if fl1 == fl2:
1343 return fl1
1275 return fl1
1344 if fl1 == fla:
1276 if fl1 == fla:
1345 return fl2
1277 return fl2
1346 if fl2 == fla:
1278 if fl2 == fla:
1347 return fl1
1279 return fl1
1348 return '' # punt for conflicts
1280 return '' # punt for conflicts
1349
1281
1350 return func
1282 return func
1351
1283
1352 @propertycache
1284 @propertycache
1353 def _flagfunc(self):
1285 def _flagfunc(self):
1354 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1286 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1355
1287
1356 @propertycache
1288 @propertycache
1357 def _status(self):
1289 def _status(self):
1358 return self._repo.status()
1290 return self._repo.status()
1359
1291
1360 @propertycache
1292 @propertycache
1361 def _user(self):
1293 def _user(self):
1362 return self._repo.ui.username()
1294 return self._repo.ui.username()
1363
1295
1364 @propertycache
1296 @propertycache
1365 def _date(self):
1297 def _date(self):
1366 ui = self._repo.ui
1298 ui = self._repo.ui
1367 date = ui.configdate('devel', 'default-date')
1299 date = ui.configdate('devel', 'default-date')
1368 if date is None:
1300 if date is None:
1369 date = dateutil.makedate()
1301 date = dateutil.makedate()
1370 return date
1302 return date
1371
1303
1372 def subrev(self, subpath):
1304 def subrev(self, subpath):
1373 return None
1305 return None
1374
1306
1375 def manifestnode(self):
1307 def manifestnode(self):
1376 return None
1308 return None
1377 def user(self):
1309 def user(self):
1378 return self._user or self._repo.ui.username()
1310 return self._user or self._repo.ui.username()
1379 def date(self):
1311 def date(self):
1380 return self._date
1312 return self._date
1381 def description(self):
1313 def description(self):
1382 return self._text
1314 return self._text
1383 def files(self):
1315 def files(self):
1384 return sorted(self._status.modified + self._status.added +
1316 return sorted(self._status.modified + self._status.added +
1385 self._status.removed)
1317 self._status.removed)
1386
1318
1387 def modified(self):
1319 def modified(self):
1388 return self._status.modified
1320 return self._status.modified
1389 def added(self):
1321 def added(self):
1390 return self._status.added
1322 return self._status.added
1391 def removed(self):
1323 def removed(self):
1392 return self._status.removed
1324 return self._status.removed
1393 def deleted(self):
1325 def deleted(self):
1394 return self._status.deleted
1326 return self._status.deleted
1395 def branch(self):
1327 def branch(self):
1396 return encoding.tolocal(self._extra['branch'])
1328 return encoding.tolocal(self._extra['branch'])
1397 def closesbranch(self):
1329 def closesbranch(self):
1398 return 'close' in self._extra
1330 return 'close' in self._extra
1399 def extra(self):
1331 def extra(self):
1400 return self._extra
1332 return self._extra
1401
1333
1402 def isinmemory(self):
1334 def isinmemory(self):
1403 return False
1335 return False
1404
1336
1405 def tags(self):
1337 def tags(self):
1406 return []
1338 return []
1407
1339
1408 def bookmarks(self):
1340 def bookmarks(self):
1409 b = []
1341 b = []
1410 for p in self.parents():
1342 for p in self.parents():
1411 b.extend(p.bookmarks())
1343 b.extend(p.bookmarks())
1412 return b
1344 return b
1413
1345
1414 def phase(self):
1346 def phase(self):
1415 phase = phases.draft # default phase to draft
1347 phase = phases.draft # default phase to draft
1416 for p in self.parents():
1348 for p in self.parents():
1417 phase = max(phase, p.phase())
1349 phase = max(phase, p.phase())
1418 return phase
1350 return phase
1419
1351
1420 def hidden(self):
1352 def hidden(self):
1421 return False
1353 return False
1422
1354
1423 def children(self):
1355 def children(self):
1424 return []
1356 return []
1425
1357
1426 def flags(self, path):
1358 def flags(self, path):
1427 if r'_manifest' in self.__dict__:
1359 if r'_manifest' in self.__dict__:
1428 try:
1360 try:
1429 return self._manifest.flags(path)
1361 return self._manifest.flags(path)
1430 except KeyError:
1362 except KeyError:
1431 return ''
1363 return ''
1432
1364
1433 try:
1365 try:
1434 return self._flagfunc(path)
1366 return self._flagfunc(path)
1435 except OSError:
1367 except OSError:
1436 return ''
1368 return ''
1437
1369
1438 def ancestor(self, c2):
1370 def ancestor(self, c2):
1439 """return the "best" ancestor context of self and c2"""
1371 """return the "best" ancestor context of self and c2"""
1440 return self._parents[0].ancestor(c2) # punt on two parents for now
1372 return self._parents[0].ancestor(c2) # punt on two parents for now
1441
1373
1442 def walk(self, match):
1374 def walk(self, match):
1443 '''Generates matching file names.'''
1375 '''Generates matching file names.'''
1444 return sorted(self._repo.dirstate.walk(match,
1376 return sorted(self._repo.dirstate.walk(match,
1445 subrepos=sorted(self.substate),
1377 subrepos=sorted(self.substate),
1446 unknown=True, ignored=False))
1378 unknown=True, ignored=False))
1447
1379
1448 def matches(self, match):
1380 def matches(self, match):
1449 return sorted(self._repo.dirstate.matches(match))
1381 return sorted(self._repo.dirstate.matches(match))
1450
1382
1451 def ancestors(self):
1383 def ancestors(self):
1452 for p in self._parents:
1384 for p in self._parents:
1453 yield p
1385 yield p
1454 for a in self._repo.changelog.ancestors(
1386 for a in self._repo.changelog.ancestors(
1455 [p.rev() for p in self._parents]):
1387 [p.rev() for p in self._parents]):
1456 yield changectx(self._repo, a)
1388 yield changectx(self._repo, a)
1457
1389
1458 def markcommitted(self, node):
1390 def markcommitted(self, node):
1459 """Perform post-commit cleanup necessary after committing this ctx
1391 """Perform post-commit cleanup necessary after committing this ctx
1460
1392
1461 Specifically, this updates backing stores this working context
1393 Specifically, this updates backing stores this working context
1462 wraps to reflect the fact that the changes reflected by this
1394 wraps to reflect the fact that the changes reflected by this
1463 workingctx have been committed. For example, it marks
1395 workingctx have been committed. For example, it marks
1464 modified and added files as normal in the dirstate.
1396 modified and added files as normal in the dirstate.
1465
1397
1466 """
1398 """
1467
1399
1468 with self._repo.dirstate.parentchange():
1400 with self._repo.dirstate.parentchange():
1469 for f in self.modified() + self.added():
1401 for f in self.modified() + self.added():
1470 self._repo.dirstate.normal(f)
1402 self._repo.dirstate.normal(f)
1471 for f in self.removed():
1403 for f in self.removed():
1472 self._repo.dirstate.drop(f)
1404 self._repo.dirstate.drop(f)
1473 self._repo.dirstate.setparents(node)
1405 self._repo.dirstate.setparents(node)
1474
1406
1475 # write changes out explicitly, because nesting wlock at
1407 # write changes out explicitly, because nesting wlock at
1476 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1408 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1477 # from immediately doing so for subsequent changing files
1409 # from immediately doing so for subsequent changing files
1478 self._repo.dirstate.write(self._repo.currenttransaction())
1410 self._repo.dirstate.write(self._repo.currenttransaction())
1479
1411
1480 def dirty(self, missing=False, merge=True, branch=True):
1412 def dirty(self, missing=False, merge=True, branch=True):
1481 return False
1413 return False
1482
1414
1483 class workingctx(committablectx):
1415 class workingctx(committablectx):
1484 """A workingctx object makes access to data related to
1416 """A workingctx object makes access to data related to
1485 the current working directory convenient.
1417 the current working directory convenient.
1486 date - any valid date string or (unixtime, offset), or None.
1418 date - any valid date string or (unixtime, offset), or None.
1487 user - username string, or None.
1419 user - username string, or None.
1488 extra - a dictionary of extra values, or None.
1420 extra - a dictionary of extra values, or None.
1489 changes - a list of file lists as returned by localrepo.status()
1421 changes - a list of file lists as returned by localrepo.status()
1490 or None to use the repository status.
1422 or None to use the repository status.
1491 """
1423 """
1492 def __init__(self, repo, text="", user=None, date=None, extra=None,
1424 def __init__(self, repo, text="", user=None, date=None, extra=None,
1493 changes=None):
1425 changes=None):
1494 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1426 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1495
1427
1496 def __iter__(self):
1428 def __iter__(self):
1497 d = self._repo.dirstate
1429 d = self._repo.dirstate
1498 for f in d:
1430 for f in d:
1499 if d[f] != 'r':
1431 if d[f] != 'r':
1500 yield f
1432 yield f
1501
1433
1502 def __contains__(self, key):
1434 def __contains__(self, key):
1503 return self._repo.dirstate[key] not in "?r"
1435 return self._repo.dirstate[key] not in "?r"
1504
1436
1505 def hex(self):
1437 def hex(self):
1506 return hex(wdirid)
1438 return hex(wdirid)
1507
1439
1508 @propertycache
1440 @propertycache
1509 def _parents(self):
1441 def _parents(self):
1510 p = self._repo.dirstate.parents()
1442 p = self._repo.dirstate.parents()
1511 if p[1] == nullid:
1443 if p[1] == nullid:
1512 p = p[:-1]
1444 p = p[:-1]
1513 return [changectx(self._repo, x) for x in p]
1445 return [changectx(self._repo, x) for x in p]
1514
1446
1515 def filectx(self, path, filelog=None):
1447 def filectx(self, path, filelog=None):
1516 """get a file context from the working directory"""
1448 """get a file context from the working directory"""
1517 return workingfilectx(self._repo, path, workingctx=self,
1449 return workingfilectx(self._repo, path, workingctx=self,
1518 filelog=filelog)
1450 filelog=filelog)
1519
1451
1520 def dirty(self, missing=False, merge=True, branch=True):
1452 def dirty(self, missing=False, merge=True, branch=True):
1521 "check whether a working directory is modified"
1453 "check whether a working directory is modified"
1522 # check subrepos first
1454 # check subrepos first
1523 for s in sorted(self.substate):
1455 for s in sorted(self.substate):
1524 if self.sub(s).dirty(missing=missing):
1456 if self.sub(s).dirty(missing=missing):
1525 return True
1457 return True
1526 # check current working dir
1458 # check current working dir
1527 return ((merge and self.p2()) or
1459 return ((merge and self.p2()) or
1528 (branch and self.branch() != self.p1().branch()) or
1460 (branch and self.branch() != self.p1().branch()) or
1529 self.modified() or self.added() or self.removed() or
1461 self.modified() or self.added() or self.removed() or
1530 (missing and self.deleted()))
1462 (missing and self.deleted()))
1531
1463
1532 def add(self, list, prefix=""):
1464 def add(self, list, prefix=""):
1533 with self._repo.wlock():
1465 with self._repo.wlock():
1534 ui, ds = self._repo.ui, self._repo.dirstate
1466 ui, ds = self._repo.ui, self._repo.dirstate
1535 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1467 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1536 rejected = []
1468 rejected = []
1537 lstat = self._repo.wvfs.lstat
1469 lstat = self._repo.wvfs.lstat
1538 for f in list:
1470 for f in list:
1539 # ds.pathto() returns an absolute file when this is invoked from
1471 # ds.pathto() returns an absolute file when this is invoked from
1540 # the keyword extension. That gets flagged as non-portable on
1472 # the keyword extension. That gets flagged as non-portable on
1541 # Windows, since it contains the drive letter and colon.
1473 # Windows, since it contains the drive letter and colon.
1542 scmutil.checkportable(ui, os.path.join(prefix, f))
1474 scmutil.checkportable(ui, os.path.join(prefix, f))
1543 try:
1475 try:
1544 st = lstat(f)
1476 st = lstat(f)
1545 except OSError:
1477 except OSError:
1546 ui.warn(_("%s does not exist!\n") % uipath(f))
1478 ui.warn(_("%s does not exist!\n") % uipath(f))
1547 rejected.append(f)
1479 rejected.append(f)
1548 continue
1480 continue
1549 if st.st_size > 10000000:
1481 if st.st_size > 10000000:
1550 ui.warn(_("%s: up to %d MB of RAM may be required "
1482 ui.warn(_("%s: up to %d MB of RAM may be required "
1551 "to manage this file\n"
1483 "to manage this file\n"
1552 "(use 'hg revert %s' to cancel the "
1484 "(use 'hg revert %s' to cancel the "
1553 "pending addition)\n")
1485 "pending addition)\n")
1554 % (f, 3 * st.st_size // 1000000, uipath(f)))
1486 % (f, 3 * st.st_size // 1000000, uipath(f)))
1555 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1487 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1556 ui.warn(_("%s not added: only files and symlinks "
1488 ui.warn(_("%s not added: only files and symlinks "
1557 "supported currently\n") % uipath(f))
1489 "supported currently\n") % uipath(f))
1558 rejected.append(f)
1490 rejected.append(f)
1559 elif ds[f] in 'amn':
1491 elif ds[f] in 'amn':
1560 ui.warn(_("%s already tracked!\n") % uipath(f))
1492 ui.warn(_("%s already tracked!\n") % uipath(f))
1561 elif ds[f] == 'r':
1493 elif ds[f] == 'r':
1562 ds.normallookup(f)
1494 ds.normallookup(f)
1563 else:
1495 else:
1564 ds.add(f)
1496 ds.add(f)
1565 return rejected
1497 return rejected
1566
1498
1567 def forget(self, files, prefix=""):
1499 def forget(self, files, prefix=""):
1568 with self._repo.wlock():
1500 with self._repo.wlock():
1569 ds = self._repo.dirstate
1501 ds = self._repo.dirstate
1570 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1502 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1571 rejected = []
1503 rejected = []
1572 for f in files:
1504 for f in files:
1573 if f not in self._repo.dirstate:
1505 if f not in self._repo.dirstate:
1574 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1506 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1575 rejected.append(f)
1507 rejected.append(f)
1576 elif self._repo.dirstate[f] != 'a':
1508 elif self._repo.dirstate[f] != 'a':
1577 self._repo.dirstate.remove(f)
1509 self._repo.dirstate.remove(f)
1578 else:
1510 else:
1579 self._repo.dirstate.drop(f)
1511 self._repo.dirstate.drop(f)
1580 return rejected
1512 return rejected
1581
1513
1582 def undelete(self, list):
1514 def undelete(self, list):
1583 pctxs = self.parents()
1515 pctxs = self.parents()
1584 with self._repo.wlock():
1516 with self._repo.wlock():
1585 ds = self._repo.dirstate
1517 ds = self._repo.dirstate
1586 for f in list:
1518 for f in list:
1587 if self._repo.dirstate[f] != 'r':
1519 if self._repo.dirstate[f] != 'r':
1588 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1520 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1589 else:
1521 else:
1590 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1522 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1591 t = fctx.data()
1523 t = fctx.data()
1592 self._repo.wwrite(f, t, fctx.flags())
1524 self._repo.wwrite(f, t, fctx.flags())
1593 self._repo.dirstate.normal(f)
1525 self._repo.dirstate.normal(f)
1594
1526
1595 def copy(self, source, dest):
1527 def copy(self, source, dest):
1596 try:
1528 try:
1597 st = self._repo.wvfs.lstat(dest)
1529 st = self._repo.wvfs.lstat(dest)
1598 except OSError as err:
1530 except OSError as err:
1599 if err.errno != errno.ENOENT:
1531 if err.errno != errno.ENOENT:
1600 raise
1532 raise
1601 self._repo.ui.warn(_("%s does not exist!\n")
1533 self._repo.ui.warn(_("%s does not exist!\n")
1602 % self._repo.dirstate.pathto(dest))
1534 % self._repo.dirstate.pathto(dest))
1603 return
1535 return
1604 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1536 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1605 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1537 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1606 "symbolic link\n")
1538 "symbolic link\n")
1607 % self._repo.dirstate.pathto(dest))
1539 % self._repo.dirstate.pathto(dest))
1608 else:
1540 else:
1609 with self._repo.wlock():
1541 with self._repo.wlock():
1610 if self._repo.dirstate[dest] in '?':
1542 if self._repo.dirstate[dest] in '?':
1611 self._repo.dirstate.add(dest)
1543 self._repo.dirstate.add(dest)
1612 elif self._repo.dirstate[dest] in 'r':
1544 elif self._repo.dirstate[dest] in 'r':
1613 self._repo.dirstate.normallookup(dest)
1545 self._repo.dirstate.normallookup(dest)
1614 self._repo.dirstate.copy(source, dest)
1546 self._repo.dirstate.copy(source, dest)
1615
1547
1616 def match(self, pats=None, include=None, exclude=None, default='glob',
1548 def match(self, pats=None, include=None, exclude=None, default='glob',
1617 listsubrepos=False, badfn=None):
1549 listsubrepos=False, badfn=None):
1618 r = self._repo
1550 r = self._repo
1619
1551
1620 # Only a case insensitive filesystem needs magic to translate user input
1552 # Only a case insensitive filesystem needs magic to translate user input
1621 # to actual case in the filesystem.
1553 # to actual case in the filesystem.
1622 icasefs = not util.fscasesensitive(r.root)
1554 icasefs = not util.fscasesensitive(r.root)
1623 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1555 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1624 default, auditor=r.auditor, ctx=self,
1556 default, auditor=r.auditor, ctx=self,
1625 listsubrepos=listsubrepos, badfn=badfn,
1557 listsubrepos=listsubrepos, badfn=badfn,
1626 icasefs=icasefs)
1558 icasefs=icasefs)
1627
1559
1628 def _filtersuspectsymlink(self, files):
1560 def _filtersuspectsymlink(self, files):
1629 if not files or self._repo.dirstate._checklink:
1561 if not files or self._repo.dirstate._checklink:
1630 return files
1562 return files
1631
1563
1632 # Symlink placeholders may get non-symlink-like contents
1564 # Symlink placeholders may get non-symlink-like contents
1633 # via user error or dereferencing by NFS or Samba servers,
1565 # via user error or dereferencing by NFS or Samba servers,
1634 # so we filter out any placeholders that don't look like a
1566 # so we filter out any placeholders that don't look like a
1635 # symlink
1567 # symlink
1636 sane = []
1568 sane = []
1637 for f in files:
1569 for f in files:
1638 if self.flags(f) == 'l':
1570 if self.flags(f) == 'l':
1639 d = self[f].data()
1571 d = self[f].data()
1640 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1572 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1641 self._repo.ui.debug('ignoring suspect symlink placeholder'
1573 self._repo.ui.debug('ignoring suspect symlink placeholder'
1642 ' "%s"\n' % f)
1574 ' "%s"\n' % f)
1643 continue
1575 continue
1644 sane.append(f)
1576 sane.append(f)
1645 return sane
1577 return sane
1646
1578
1647 def _checklookup(self, files):
1579 def _checklookup(self, files):
1648 # check for any possibly clean files
1580 # check for any possibly clean files
1649 if not files:
1581 if not files:
1650 return [], [], []
1582 return [], [], []
1651
1583
1652 modified = []
1584 modified = []
1653 deleted = []
1585 deleted = []
1654 fixup = []
1586 fixup = []
1655 pctx = self._parents[0]
1587 pctx = self._parents[0]
1656 # do a full compare of any files that might have changed
1588 # do a full compare of any files that might have changed
1657 for f in sorted(files):
1589 for f in sorted(files):
1658 try:
1590 try:
1659 # This will return True for a file that got replaced by a
1591 # This will return True for a file that got replaced by a
1660 # directory in the interim, but fixing that is pretty hard.
1592 # directory in the interim, but fixing that is pretty hard.
1661 if (f not in pctx or self.flags(f) != pctx.flags(f)
1593 if (f not in pctx or self.flags(f) != pctx.flags(f)
1662 or pctx[f].cmp(self[f])):
1594 or pctx[f].cmp(self[f])):
1663 modified.append(f)
1595 modified.append(f)
1664 else:
1596 else:
1665 fixup.append(f)
1597 fixup.append(f)
1666 except (IOError, OSError):
1598 except (IOError, OSError):
1667 # A file become inaccessible in between? Mark it as deleted,
1599 # A file become inaccessible in between? Mark it as deleted,
1668 # matching dirstate behavior (issue5584).
1600 # matching dirstate behavior (issue5584).
1669 # The dirstate has more complex behavior around whether a
1601 # The dirstate has more complex behavior around whether a
1670 # missing file matches a directory, etc, but we don't need to
1602 # missing file matches a directory, etc, but we don't need to
1671 # bother with that: if f has made it to this point, we're sure
1603 # bother with that: if f has made it to this point, we're sure
1672 # it's in the dirstate.
1604 # it's in the dirstate.
1673 deleted.append(f)
1605 deleted.append(f)
1674
1606
1675 return modified, deleted, fixup
1607 return modified, deleted, fixup
1676
1608
1677 def _poststatusfixup(self, status, fixup):
1609 def _poststatusfixup(self, status, fixup):
1678 """update dirstate for files that are actually clean"""
1610 """update dirstate for files that are actually clean"""
1679 poststatus = self._repo.postdsstatus()
1611 poststatus = self._repo.postdsstatus()
1680 if fixup or poststatus:
1612 if fixup or poststatus:
1681 try:
1613 try:
1682 oldid = self._repo.dirstate.identity()
1614 oldid = self._repo.dirstate.identity()
1683
1615
1684 # updating the dirstate is optional
1616 # updating the dirstate is optional
1685 # so we don't wait on the lock
1617 # so we don't wait on the lock
1686 # wlock can invalidate the dirstate, so cache normal _after_
1618 # wlock can invalidate the dirstate, so cache normal _after_
1687 # taking the lock
1619 # taking the lock
1688 with self._repo.wlock(False):
1620 with self._repo.wlock(False):
1689 if self._repo.dirstate.identity() == oldid:
1621 if self._repo.dirstate.identity() == oldid:
1690 if fixup:
1622 if fixup:
1691 normal = self._repo.dirstate.normal
1623 normal = self._repo.dirstate.normal
1692 for f in fixup:
1624 for f in fixup:
1693 normal(f)
1625 normal(f)
1694 # write changes out explicitly, because nesting
1626 # write changes out explicitly, because nesting
1695 # wlock at runtime may prevent 'wlock.release()'
1627 # wlock at runtime may prevent 'wlock.release()'
1696 # after this block from doing so for subsequent
1628 # after this block from doing so for subsequent
1697 # changing files
1629 # changing files
1698 tr = self._repo.currenttransaction()
1630 tr = self._repo.currenttransaction()
1699 self._repo.dirstate.write(tr)
1631 self._repo.dirstate.write(tr)
1700
1632
1701 if poststatus:
1633 if poststatus:
1702 for ps in poststatus:
1634 for ps in poststatus:
1703 ps(self, status)
1635 ps(self, status)
1704 else:
1636 else:
1705 # in this case, writing changes out breaks
1637 # in this case, writing changes out breaks
1706 # consistency, because .hg/dirstate was
1638 # consistency, because .hg/dirstate was
1707 # already changed simultaneously after last
1639 # already changed simultaneously after last
1708 # caching (see also issue5584 for detail)
1640 # caching (see also issue5584 for detail)
1709 self._repo.ui.debug('skip updating dirstate: '
1641 self._repo.ui.debug('skip updating dirstate: '
1710 'identity mismatch\n')
1642 'identity mismatch\n')
1711 except error.LockError:
1643 except error.LockError:
1712 pass
1644 pass
1713 finally:
1645 finally:
1714 # Even if the wlock couldn't be grabbed, clear out the list.
1646 # Even if the wlock couldn't be grabbed, clear out the list.
1715 self._repo.clearpostdsstatus()
1647 self._repo.clearpostdsstatus()
1716
1648
1717 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1649 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1718 '''Gets the status from the dirstate -- internal use only.'''
1650 '''Gets the status from the dirstate -- internal use only.'''
1719 subrepos = []
1651 subrepos = []
1720 if '.hgsub' in self:
1652 if '.hgsub' in self:
1721 subrepos = sorted(self.substate)
1653 subrepos = sorted(self.substate)
1722 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1654 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1723 clean=clean, unknown=unknown)
1655 clean=clean, unknown=unknown)
1724
1656
1725 # check for any possibly clean files
1657 # check for any possibly clean files
1726 fixup = []
1658 fixup = []
1727 if cmp:
1659 if cmp:
1728 modified2, deleted2, fixup = self._checklookup(cmp)
1660 modified2, deleted2, fixup = self._checklookup(cmp)
1729 s.modified.extend(modified2)
1661 s.modified.extend(modified2)
1730 s.deleted.extend(deleted2)
1662 s.deleted.extend(deleted2)
1731
1663
1732 if fixup and clean:
1664 if fixup and clean:
1733 s.clean.extend(fixup)
1665 s.clean.extend(fixup)
1734
1666
1735 self._poststatusfixup(s, fixup)
1667 self._poststatusfixup(s, fixup)
1736
1668
1737 if match.always():
1669 if match.always():
1738 # cache for performance
1670 # cache for performance
1739 if s.unknown or s.ignored or s.clean:
1671 if s.unknown or s.ignored or s.clean:
1740 # "_status" is cached with list*=False in the normal route
1672 # "_status" is cached with list*=False in the normal route
1741 self._status = scmutil.status(s.modified, s.added, s.removed,
1673 self._status = scmutil.status(s.modified, s.added, s.removed,
1742 s.deleted, [], [], [])
1674 s.deleted, [], [], [])
1743 else:
1675 else:
1744 self._status = s
1676 self._status = s
1745
1677
1746 return s
1678 return s
1747
1679
1748 @propertycache
1680 @propertycache
1749 def _manifest(self):
1681 def _manifest(self):
1750 """generate a manifest corresponding to the values in self._status
1682 """generate a manifest corresponding to the values in self._status
1751
1683
1752 This reuse the file nodeid from parent, but we use special node
1684 This reuse the file nodeid from parent, but we use special node
1753 identifiers for added and modified files. This is used by manifests
1685 identifiers for added and modified files. This is used by manifests
1754 merge to see that files are different and by update logic to avoid
1686 merge to see that files are different and by update logic to avoid
1755 deleting newly added files.
1687 deleting newly added files.
1756 """
1688 """
1757 return self._buildstatusmanifest(self._status)
1689 return self._buildstatusmanifest(self._status)
1758
1690
1759 def _buildstatusmanifest(self, status):
1691 def _buildstatusmanifest(self, status):
1760 """Builds a manifest that includes the given status results."""
1692 """Builds a manifest that includes the given status results."""
1761 parents = self.parents()
1693 parents = self.parents()
1762
1694
1763 man = parents[0].manifest().copy()
1695 man = parents[0].manifest().copy()
1764
1696
1765 ff = self._flagfunc
1697 ff = self._flagfunc
1766 for i, l in ((addednodeid, status.added),
1698 for i, l in ((addednodeid, status.added),
1767 (modifiednodeid, status.modified)):
1699 (modifiednodeid, status.modified)):
1768 for f in l:
1700 for f in l:
1769 man[f] = i
1701 man[f] = i
1770 try:
1702 try:
1771 man.setflag(f, ff(f))
1703 man.setflag(f, ff(f))
1772 except OSError:
1704 except OSError:
1773 pass
1705 pass
1774
1706
1775 for f in status.deleted + status.removed:
1707 for f in status.deleted + status.removed:
1776 if f in man:
1708 if f in man:
1777 del man[f]
1709 del man[f]
1778
1710
1779 return man
1711 return man
1780
1712
1781 def _buildstatus(self, other, s, match, listignored, listclean,
1713 def _buildstatus(self, other, s, match, listignored, listclean,
1782 listunknown):
1714 listunknown):
1783 """build a status with respect to another context
1715 """build a status with respect to another context
1784
1716
1785 This includes logic for maintaining the fast path of status when
1717 This includes logic for maintaining the fast path of status when
1786 comparing the working directory against its parent, which is to skip
1718 comparing the working directory against its parent, which is to skip
1787 building a new manifest if self (working directory) is not comparing
1719 building a new manifest if self (working directory) is not comparing
1788 against its parent (repo['.']).
1720 against its parent (repo['.']).
1789 """
1721 """
1790 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1722 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1791 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1723 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1792 # might have accidentally ended up with the entire contents of the file
1724 # might have accidentally ended up with the entire contents of the file
1793 # they are supposed to be linking to.
1725 # they are supposed to be linking to.
1794 s.modified[:] = self._filtersuspectsymlink(s.modified)
1726 s.modified[:] = self._filtersuspectsymlink(s.modified)
1795 if other != self._repo['.']:
1727 if other != self._repo['.']:
1796 s = super(workingctx, self)._buildstatus(other, s, match,
1728 s = super(workingctx, self)._buildstatus(other, s, match,
1797 listignored, listclean,
1729 listignored, listclean,
1798 listunknown)
1730 listunknown)
1799 return s
1731 return s
1800
1732
1801 def _matchstatus(self, other, match):
1733 def _matchstatus(self, other, match):
1802 """override the match method with a filter for directory patterns
1734 """override the match method with a filter for directory patterns
1803
1735
1804 We use inheritance to customize the match.bad method only in cases of
1736 We use inheritance to customize the match.bad method only in cases of
1805 workingctx since it belongs only to the working directory when
1737 workingctx since it belongs only to the working directory when
1806 comparing against the parent changeset.
1738 comparing against the parent changeset.
1807
1739
1808 If we aren't comparing against the working directory's parent, then we
1740 If we aren't comparing against the working directory's parent, then we
1809 just use the default match object sent to us.
1741 just use the default match object sent to us.
1810 """
1742 """
1811 if other != self._repo['.']:
1743 if other != self._repo['.']:
1812 def bad(f, msg):
1744 def bad(f, msg):
1813 # 'f' may be a directory pattern from 'match.files()',
1745 # 'f' may be a directory pattern from 'match.files()',
1814 # so 'f not in ctx1' is not enough
1746 # so 'f not in ctx1' is not enough
1815 if f not in other and not other.hasdir(f):
1747 if f not in other and not other.hasdir(f):
1816 self._repo.ui.warn('%s: %s\n' %
1748 self._repo.ui.warn('%s: %s\n' %
1817 (self._repo.dirstate.pathto(f), msg))
1749 (self._repo.dirstate.pathto(f), msg))
1818 match.bad = bad
1750 match.bad = bad
1819 return match
1751 return match
1820
1752
1821 def markcommitted(self, node):
1753 def markcommitted(self, node):
1822 super(workingctx, self).markcommitted(node)
1754 super(workingctx, self).markcommitted(node)
1823
1755
1824 sparse.aftercommit(self._repo, node)
1756 sparse.aftercommit(self._repo, node)
1825
1757
1826 class committablefilectx(basefilectx):
1758 class committablefilectx(basefilectx):
1827 """A committablefilectx provides common functionality for a file context
1759 """A committablefilectx provides common functionality for a file context
1828 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1760 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1829 def __init__(self, repo, path, filelog=None, ctx=None):
1761 def __init__(self, repo, path, filelog=None, ctx=None):
1830 self._repo = repo
1762 self._repo = repo
1831 self._path = path
1763 self._path = path
1832 self._changeid = None
1764 self._changeid = None
1833 self._filerev = self._filenode = None
1765 self._filerev = self._filenode = None
1834
1766
1835 if filelog is not None:
1767 if filelog is not None:
1836 self._filelog = filelog
1768 self._filelog = filelog
1837 if ctx:
1769 if ctx:
1838 self._changectx = ctx
1770 self._changectx = ctx
1839
1771
1840 def __nonzero__(self):
1772 def __nonzero__(self):
1841 return True
1773 return True
1842
1774
1843 __bool__ = __nonzero__
1775 __bool__ = __nonzero__
1844
1776
1845 def linkrev(self):
1777 def linkrev(self):
1846 # linked to self._changectx no matter if file is modified or not
1778 # linked to self._changectx no matter if file is modified or not
1847 return self.rev()
1779 return self.rev()
1848
1780
1849 def parents(self):
1781 def parents(self):
1850 '''return parent filectxs, following copies if necessary'''
1782 '''return parent filectxs, following copies if necessary'''
1851 def filenode(ctx, path):
1783 def filenode(ctx, path):
1852 return ctx._manifest.get(path, nullid)
1784 return ctx._manifest.get(path, nullid)
1853
1785
1854 path = self._path
1786 path = self._path
1855 fl = self._filelog
1787 fl = self._filelog
1856 pcl = self._changectx._parents
1788 pcl = self._changectx._parents
1857 renamed = self.renamed()
1789 renamed = self.renamed()
1858
1790
1859 if renamed:
1791 if renamed:
1860 pl = [renamed + (None,)]
1792 pl = [renamed + (None,)]
1861 else:
1793 else:
1862 pl = [(path, filenode(pcl[0], path), fl)]
1794 pl = [(path, filenode(pcl[0], path), fl)]
1863
1795
1864 for pc in pcl[1:]:
1796 for pc in pcl[1:]:
1865 pl.append((path, filenode(pc, path), fl))
1797 pl.append((path, filenode(pc, path), fl))
1866
1798
1867 return [self._parentfilectx(p, fileid=n, filelog=l)
1799 return [self._parentfilectx(p, fileid=n, filelog=l)
1868 for p, n, l in pl if n != nullid]
1800 for p, n, l in pl if n != nullid]
1869
1801
1870 def children(self):
1802 def children(self):
1871 return []
1803 return []
1872
1804
1873 class workingfilectx(committablefilectx):
1805 class workingfilectx(committablefilectx):
1874 """A workingfilectx object makes access to data related to a particular
1806 """A workingfilectx object makes access to data related to a particular
1875 file in the working directory convenient."""
1807 file in the working directory convenient."""
1876 def __init__(self, repo, path, filelog=None, workingctx=None):
1808 def __init__(self, repo, path, filelog=None, workingctx=None):
1877 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1809 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1878
1810
1879 @propertycache
1811 @propertycache
1880 def _changectx(self):
1812 def _changectx(self):
1881 return workingctx(self._repo)
1813 return workingctx(self._repo)
1882
1814
1883 def data(self):
1815 def data(self):
1884 return self._repo.wread(self._path)
1816 return self._repo.wread(self._path)
1885 def renamed(self):
1817 def renamed(self):
1886 rp = self._repo.dirstate.copied(self._path)
1818 rp = self._repo.dirstate.copied(self._path)
1887 if not rp:
1819 if not rp:
1888 return None
1820 return None
1889 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1821 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1890
1822
1891 def size(self):
1823 def size(self):
1892 return self._repo.wvfs.lstat(self._path).st_size
1824 return self._repo.wvfs.lstat(self._path).st_size
1893 def date(self):
1825 def date(self):
1894 t, tz = self._changectx.date()
1826 t, tz = self._changectx.date()
1895 try:
1827 try:
1896 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1828 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1897 except OSError as err:
1829 except OSError as err:
1898 if err.errno != errno.ENOENT:
1830 if err.errno != errno.ENOENT:
1899 raise
1831 raise
1900 return (t, tz)
1832 return (t, tz)
1901
1833
1902 def exists(self):
1834 def exists(self):
1903 return self._repo.wvfs.exists(self._path)
1835 return self._repo.wvfs.exists(self._path)
1904
1836
1905 def lexists(self):
1837 def lexists(self):
1906 return self._repo.wvfs.lexists(self._path)
1838 return self._repo.wvfs.lexists(self._path)
1907
1839
1908 def audit(self):
1840 def audit(self):
1909 return self._repo.wvfs.audit(self._path)
1841 return self._repo.wvfs.audit(self._path)
1910
1842
1911 def cmp(self, fctx):
1843 def cmp(self, fctx):
1912 """compare with other file context
1844 """compare with other file context
1913
1845
1914 returns True if different than fctx.
1846 returns True if different than fctx.
1915 """
1847 """
1916 # fctx should be a filectx (not a workingfilectx)
1848 # fctx should be a filectx (not a workingfilectx)
1917 # invert comparison to reuse the same code path
1849 # invert comparison to reuse the same code path
1918 return fctx.cmp(self)
1850 return fctx.cmp(self)
1919
1851
1920 def remove(self, ignoremissing=False):
1852 def remove(self, ignoremissing=False):
1921 """wraps unlink for a repo's working directory"""
1853 """wraps unlink for a repo's working directory"""
1922 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1854 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1923
1855
1924 def write(self, data, flags, backgroundclose=False, **kwargs):
1856 def write(self, data, flags, backgroundclose=False, **kwargs):
1925 """wraps repo.wwrite"""
1857 """wraps repo.wwrite"""
1926 self._repo.wwrite(self._path, data, flags,
1858 self._repo.wwrite(self._path, data, flags,
1927 backgroundclose=backgroundclose,
1859 backgroundclose=backgroundclose,
1928 **kwargs)
1860 **kwargs)
1929
1861
1930 def markcopied(self, src):
1862 def markcopied(self, src):
1931 """marks this file a copy of `src`"""
1863 """marks this file a copy of `src`"""
1932 if self._repo.dirstate[self._path] in "nma":
1864 if self._repo.dirstate[self._path] in "nma":
1933 self._repo.dirstate.copy(src, self._path)
1865 self._repo.dirstate.copy(src, self._path)
1934
1866
1935 def clearunknown(self):
1867 def clearunknown(self):
1936 """Removes conflicting items in the working directory so that
1868 """Removes conflicting items in the working directory so that
1937 ``write()`` can be called successfully.
1869 ``write()`` can be called successfully.
1938 """
1870 """
1939 wvfs = self._repo.wvfs
1871 wvfs = self._repo.wvfs
1940 f = self._path
1872 f = self._path
1941 wvfs.audit(f)
1873 wvfs.audit(f)
1942 if wvfs.isdir(f) and not wvfs.islink(f):
1874 if wvfs.isdir(f) and not wvfs.islink(f):
1943 wvfs.rmtree(f, forcibly=True)
1875 wvfs.rmtree(f, forcibly=True)
1944 for p in reversed(list(util.finddirs(f))):
1876 for p in reversed(list(util.finddirs(f))):
1945 if wvfs.isfileorlink(p):
1877 if wvfs.isfileorlink(p):
1946 wvfs.unlink(p)
1878 wvfs.unlink(p)
1947 break
1879 break
1948
1880
1949 def setflags(self, l, x):
1881 def setflags(self, l, x):
1950 self._repo.wvfs.setflags(self._path, l, x)
1882 self._repo.wvfs.setflags(self._path, l, x)
1951
1883
1952 class overlayworkingctx(committablectx):
1884 class overlayworkingctx(committablectx):
1953 """Wraps another mutable context with a write-back cache that can be
1885 """Wraps another mutable context with a write-back cache that can be
1954 converted into a commit context.
1886 converted into a commit context.
1955
1887
1956 self._cache[path] maps to a dict with keys: {
1888 self._cache[path] maps to a dict with keys: {
1957 'exists': bool?
1889 'exists': bool?
1958 'date': date?
1890 'date': date?
1959 'data': str?
1891 'data': str?
1960 'flags': str?
1892 'flags': str?
1961 'copied': str? (path or None)
1893 'copied': str? (path or None)
1962 }
1894 }
1963 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1895 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1964 is `False`, the file was deleted.
1896 is `False`, the file was deleted.
1965 """
1897 """
1966
1898
1967 def __init__(self, repo):
1899 def __init__(self, repo):
1968 super(overlayworkingctx, self).__init__(repo)
1900 super(overlayworkingctx, self).__init__(repo)
1969 self._repo = repo
1901 self._repo = repo
1970 self.clean()
1902 self.clean()
1971
1903
1972 def setbase(self, wrappedctx):
1904 def setbase(self, wrappedctx):
1973 self._wrappedctx = wrappedctx
1905 self._wrappedctx = wrappedctx
1974 self._parents = [wrappedctx]
1906 self._parents = [wrappedctx]
1975 # Drop old manifest cache as it is now out of date.
1907 # Drop old manifest cache as it is now out of date.
1976 # This is necessary when, e.g., rebasing several nodes with one
1908 # This is necessary when, e.g., rebasing several nodes with one
1977 # ``overlayworkingctx`` (e.g. with --collapse).
1909 # ``overlayworkingctx`` (e.g. with --collapse).
1978 util.clearcachedproperty(self, '_manifest')
1910 util.clearcachedproperty(self, '_manifest')
1979
1911
1980 def data(self, path):
1912 def data(self, path):
1981 if self.isdirty(path):
1913 if self.isdirty(path):
1982 if self._cache[path]['exists']:
1914 if self._cache[path]['exists']:
1983 if self._cache[path]['data']:
1915 if self._cache[path]['data']:
1984 return self._cache[path]['data']
1916 return self._cache[path]['data']
1985 else:
1917 else:
1986 # Must fallback here, too, because we only set flags.
1918 # Must fallback here, too, because we only set flags.
1987 return self._wrappedctx[path].data()
1919 return self._wrappedctx[path].data()
1988 else:
1920 else:
1989 raise error.ProgrammingError("No such file or directory: %s" %
1921 raise error.ProgrammingError("No such file or directory: %s" %
1990 path)
1922 path)
1991 else:
1923 else:
1992 return self._wrappedctx[path].data()
1924 return self._wrappedctx[path].data()
1993
1925
1994 @propertycache
1926 @propertycache
1995 def _manifest(self):
1927 def _manifest(self):
1996 parents = self.parents()
1928 parents = self.parents()
1997 man = parents[0].manifest().copy()
1929 man = parents[0].manifest().copy()
1998
1930
1999 flag = self._flagfunc
1931 flag = self._flagfunc
2000 for path in self.added():
1932 for path in self.added():
2001 man[path] = addednodeid
1933 man[path] = addednodeid
2002 man.setflag(path, flag(path))
1934 man.setflag(path, flag(path))
2003 for path in self.modified():
1935 for path in self.modified():
2004 man[path] = modifiednodeid
1936 man[path] = modifiednodeid
2005 man.setflag(path, flag(path))
1937 man.setflag(path, flag(path))
2006 for path in self.removed():
1938 for path in self.removed():
2007 del man[path]
1939 del man[path]
2008 return man
1940 return man
2009
1941
2010 @propertycache
1942 @propertycache
2011 def _flagfunc(self):
1943 def _flagfunc(self):
2012 def f(path):
1944 def f(path):
2013 return self._cache[path]['flags']
1945 return self._cache[path]['flags']
2014 return f
1946 return f
2015
1947
2016 def files(self):
1948 def files(self):
2017 return sorted(self.added() + self.modified() + self.removed())
1949 return sorted(self.added() + self.modified() + self.removed())
2018
1950
2019 def modified(self):
1951 def modified(self):
2020 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1952 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2021 self._existsinparent(f)]
1953 self._existsinparent(f)]
2022
1954
2023 def added(self):
1955 def added(self):
2024 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1956 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2025 not self._existsinparent(f)]
1957 not self._existsinparent(f)]
2026
1958
2027 def removed(self):
1959 def removed(self):
2028 return [f for f in self._cache.keys() if
1960 return [f for f in self._cache.keys() if
2029 not self._cache[f]['exists'] and self._existsinparent(f)]
1961 not self._cache[f]['exists'] and self._existsinparent(f)]
2030
1962
2031 def isinmemory(self):
1963 def isinmemory(self):
2032 return True
1964 return True
2033
1965
2034 def filedate(self, path):
1966 def filedate(self, path):
2035 if self.isdirty(path):
1967 if self.isdirty(path):
2036 return self._cache[path]['date']
1968 return self._cache[path]['date']
2037 else:
1969 else:
2038 return self._wrappedctx[path].date()
1970 return self._wrappedctx[path].date()
2039
1971
2040 def markcopied(self, path, origin):
1972 def markcopied(self, path, origin):
2041 if self.isdirty(path):
1973 if self.isdirty(path):
2042 self._cache[path]['copied'] = origin
1974 self._cache[path]['copied'] = origin
2043 else:
1975 else:
2044 raise error.ProgrammingError('markcopied() called on clean context')
1976 raise error.ProgrammingError('markcopied() called on clean context')
2045
1977
2046 def copydata(self, path):
1978 def copydata(self, path):
2047 if self.isdirty(path):
1979 if self.isdirty(path):
2048 return self._cache[path]['copied']
1980 return self._cache[path]['copied']
2049 else:
1981 else:
2050 raise error.ProgrammingError('copydata() called on clean context')
1982 raise error.ProgrammingError('copydata() called on clean context')
2051
1983
2052 def flags(self, path):
1984 def flags(self, path):
2053 if self.isdirty(path):
1985 if self.isdirty(path):
2054 if self._cache[path]['exists']:
1986 if self._cache[path]['exists']:
2055 return self._cache[path]['flags']
1987 return self._cache[path]['flags']
2056 else:
1988 else:
2057 raise error.ProgrammingError("No such file or directory: %s" %
1989 raise error.ProgrammingError("No such file or directory: %s" %
2058 self._path)
1990 self._path)
2059 else:
1991 else:
2060 return self._wrappedctx[path].flags()
1992 return self._wrappedctx[path].flags()
2061
1993
2062 def _existsinparent(self, path):
1994 def _existsinparent(self, path):
2063 try:
1995 try:
2064 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1996 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2065 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1997 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2066 # with an ``exists()`` function.
1998 # with an ``exists()`` function.
2067 self._wrappedctx[path]
1999 self._wrappedctx[path]
2068 return True
2000 return True
2069 except error.ManifestLookupError:
2001 except error.ManifestLookupError:
2070 return False
2002 return False
2071
2003
2072 def _auditconflicts(self, path):
2004 def _auditconflicts(self, path):
2073 """Replicates conflict checks done by wvfs.write().
2005 """Replicates conflict checks done by wvfs.write().
2074
2006
2075 Since we never write to the filesystem and never call `applyupdates` in
2007 Since we never write to the filesystem and never call `applyupdates` in
2076 IMM, we'll never check that a path is actually writable -- e.g., because
2008 IMM, we'll never check that a path is actually writable -- e.g., because
2077 it adds `a/foo`, but `a` is actually a file in the other commit.
2009 it adds `a/foo`, but `a` is actually a file in the other commit.
2078 """
2010 """
2079 def fail(path, component):
2011 def fail(path, component):
2080 # p1() is the base and we're receiving "writes" for p2()'s
2012 # p1() is the base and we're receiving "writes" for p2()'s
2081 # files.
2013 # files.
2082 if 'l' in self.p1()[component].flags():
2014 if 'l' in self.p1()[component].flags():
2083 raise error.Abort("error: %s conflicts with symlink %s "
2015 raise error.Abort("error: %s conflicts with symlink %s "
2084 "in %s." % (path, component,
2016 "in %s." % (path, component,
2085 self.p1().rev()))
2017 self.p1().rev()))
2086 else:
2018 else:
2087 raise error.Abort("error: '%s' conflicts with file '%s' in "
2019 raise error.Abort("error: '%s' conflicts with file '%s' in "
2088 "%s." % (path, component,
2020 "%s." % (path, component,
2089 self.p1().rev()))
2021 self.p1().rev()))
2090
2022
2091 # Test that each new directory to be created to write this path from p2
2023 # Test that each new directory to be created to write this path from p2
2092 # is not a file in p1.
2024 # is not a file in p1.
2093 components = path.split('/')
2025 components = path.split('/')
2094 for i in xrange(len(components)):
2026 for i in xrange(len(components)):
2095 component = "/".join(components[0:i])
2027 component = "/".join(components[0:i])
2096 if component in self.p1():
2028 if component in self.p1():
2097 fail(path, component)
2029 fail(path, component)
2098
2030
2099 # Test the other direction -- that this path from p2 isn't a directory
2031 # Test the other direction -- that this path from p2 isn't a directory
2100 # in p1 (test that p1 doesn't any paths matching `path/*`).
2032 # in p1 (test that p1 doesn't any paths matching `path/*`).
2101 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
2033 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
2102 matches = self.p1().manifest().matches(match)
2034 matches = self.p1().manifest().matches(match)
2103 if len(matches) > 0:
2035 if len(matches) > 0:
2104 if len(matches) == 1 and matches.keys()[0] == path:
2036 if len(matches) == 1 and matches.keys()[0] == path:
2105 return
2037 return
2106 raise error.Abort("error: file '%s' cannot be written because "
2038 raise error.Abort("error: file '%s' cannot be written because "
2107 " '%s/' is a folder in %s (containing %d "
2039 " '%s/' is a folder in %s (containing %d "
2108 "entries: %s)"
2040 "entries: %s)"
2109 % (path, path, self.p1(), len(matches),
2041 % (path, path, self.p1(), len(matches),
2110 ', '.join(matches.keys())))
2042 ', '.join(matches.keys())))
2111
2043
2112 def write(self, path, data, flags='', **kwargs):
2044 def write(self, path, data, flags='', **kwargs):
2113 if data is None:
2045 if data is None:
2114 raise error.ProgrammingError("data must be non-None")
2046 raise error.ProgrammingError("data must be non-None")
2115 self._auditconflicts(path)
2047 self._auditconflicts(path)
2116 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
2048 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
2117 flags=flags)
2049 flags=flags)
2118
2050
2119 def setflags(self, path, l, x):
2051 def setflags(self, path, l, x):
2120 self._markdirty(path, exists=True, date=dateutil.makedate(),
2052 self._markdirty(path, exists=True, date=dateutil.makedate(),
2121 flags=(l and 'l' or '') + (x and 'x' or ''))
2053 flags=(l and 'l' or '') + (x and 'x' or ''))
2122
2054
2123 def remove(self, path):
2055 def remove(self, path):
2124 self._markdirty(path, exists=False)
2056 self._markdirty(path, exists=False)
2125
2057
2126 def exists(self, path):
2058 def exists(self, path):
2127 """exists behaves like `lexists`, but needs to follow symlinks and
2059 """exists behaves like `lexists`, but needs to follow symlinks and
2128 return False if they are broken.
2060 return False if they are broken.
2129 """
2061 """
2130 if self.isdirty(path):
2062 if self.isdirty(path):
2131 # If this path exists and is a symlink, "follow" it by calling
2063 # If this path exists and is a symlink, "follow" it by calling
2132 # exists on the destination path.
2064 # exists on the destination path.
2133 if (self._cache[path]['exists'] and
2065 if (self._cache[path]['exists'] and
2134 'l' in self._cache[path]['flags']):
2066 'l' in self._cache[path]['flags']):
2135 return self.exists(self._cache[path]['data'].strip())
2067 return self.exists(self._cache[path]['data'].strip())
2136 else:
2068 else:
2137 return self._cache[path]['exists']
2069 return self._cache[path]['exists']
2138
2070
2139 return self._existsinparent(path)
2071 return self._existsinparent(path)
2140
2072
2141 def lexists(self, path):
2073 def lexists(self, path):
2142 """lexists returns True if the path exists"""
2074 """lexists returns True if the path exists"""
2143 if self.isdirty(path):
2075 if self.isdirty(path):
2144 return self._cache[path]['exists']
2076 return self._cache[path]['exists']
2145
2077
2146 return self._existsinparent(path)
2078 return self._existsinparent(path)
2147
2079
2148 def size(self, path):
2080 def size(self, path):
2149 if self.isdirty(path):
2081 if self.isdirty(path):
2150 if self._cache[path]['exists']:
2082 if self._cache[path]['exists']:
2151 return len(self._cache[path]['data'])
2083 return len(self._cache[path]['data'])
2152 else:
2084 else:
2153 raise error.ProgrammingError("No such file or directory: %s" %
2085 raise error.ProgrammingError("No such file or directory: %s" %
2154 self._path)
2086 self._path)
2155 return self._wrappedctx[path].size()
2087 return self._wrappedctx[path].size()
2156
2088
2157 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2089 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2158 user=None, editor=None):
2090 user=None, editor=None):
2159 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2091 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2160 committed.
2092 committed.
2161
2093
2162 ``text`` is the commit message.
2094 ``text`` is the commit message.
2163 ``parents`` (optional) are rev numbers.
2095 ``parents`` (optional) are rev numbers.
2164 """
2096 """
2165 # Default parents to the wrapped contexts' if not passed.
2097 # Default parents to the wrapped contexts' if not passed.
2166 if parents is None:
2098 if parents is None:
2167 parents = self._wrappedctx.parents()
2099 parents = self._wrappedctx.parents()
2168 if len(parents) == 1:
2100 if len(parents) == 1:
2169 parents = (parents[0], None)
2101 parents = (parents[0], None)
2170
2102
2171 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2103 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2172 if parents[1] is None:
2104 if parents[1] is None:
2173 parents = (self._repo[parents[0]], None)
2105 parents = (self._repo[parents[0]], None)
2174 else:
2106 else:
2175 parents = (self._repo[parents[0]], self._repo[parents[1]])
2107 parents = (self._repo[parents[0]], self._repo[parents[1]])
2176
2108
2177 files = self._cache.keys()
2109 files = self._cache.keys()
2178 def getfile(repo, memctx, path):
2110 def getfile(repo, memctx, path):
2179 if self._cache[path]['exists']:
2111 if self._cache[path]['exists']:
2180 return memfilectx(repo, memctx, path,
2112 return memfilectx(repo, memctx, path,
2181 self._cache[path]['data'],
2113 self._cache[path]['data'],
2182 'l' in self._cache[path]['flags'],
2114 'l' in self._cache[path]['flags'],
2183 'x' in self._cache[path]['flags'],
2115 'x' in self._cache[path]['flags'],
2184 self._cache[path]['copied'])
2116 self._cache[path]['copied'])
2185 else:
2117 else:
2186 # Returning None, but including the path in `files`, is
2118 # Returning None, but including the path in `files`, is
2187 # necessary for memctx to register a deletion.
2119 # necessary for memctx to register a deletion.
2188 return None
2120 return None
2189 return memctx(self._repo, parents, text, files, getfile, date=date,
2121 return memctx(self._repo, parents, text, files, getfile, date=date,
2190 extra=extra, user=user, branch=branch, editor=editor)
2122 extra=extra, user=user, branch=branch, editor=editor)
2191
2123
2192 def isdirty(self, path):
2124 def isdirty(self, path):
2193 return path in self._cache
2125 return path in self._cache
2194
2126
2195 def isempty(self):
2127 def isempty(self):
2196 # We need to discard any keys that are actually clean before the empty
2128 # We need to discard any keys that are actually clean before the empty
2197 # commit check.
2129 # commit check.
2198 self._compact()
2130 self._compact()
2199 return len(self._cache) == 0
2131 return len(self._cache) == 0
2200
2132
2201 def clean(self):
2133 def clean(self):
2202 self._cache = {}
2134 self._cache = {}
2203
2135
2204 def _compact(self):
2136 def _compact(self):
2205 """Removes keys from the cache that are actually clean, by comparing
2137 """Removes keys from the cache that are actually clean, by comparing
2206 them with the underlying context.
2138 them with the underlying context.
2207
2139
2208 This can occur during the merge process, e.g. by passing --tool :local
2140 This can occur during the merge process, e.g. by passing --tool :local
2209 to resolve a conflict.
2141 to resolve a conflict.
2210 """
2142 """
2211 keys = []
2143 keys = []
2212 for path in self._cache.keys():
2144 for path in self._cache.keys():
2213 cache = self._cache[path]
2145 cache = self._cache[path]
2214 try:
2146 try:
2215 underlying = self._wrappedctx[path]
2147 underlying = self._wrappedctx[path]
2216 if (underlying.data() == cache['data'] and
2148 if (underlying.data() == cache['data'] and
2217 underlying.flags() == cache['flags']):
2149 underlying.flags() == cache['flags']):
2218 keys.append(path)
2150 keys.append(path)
2219 except error.ManifestLookupError:
2151 except error.ManifestLookupError:
2220 # Path not in the underlying manifest (created).
2152 # Path not in the underlying manifest (created).
2221 continue
2153 continue
2222
2154
2223 for path in keys:
2155 for path in keys:
2224 del self._cache[path]
2156 del self._cache[path]
2225 return keys
2157 return keys
2226
2158
2227 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2159 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2228 self._cache[path] = {
2160 self._cache[path] = {
2229 'exists': exists,
2161 'exists': exists,
2230 'data': data,
2162 'data': data,
2231 'date': date,
2163 'date': date,
2232 'flags': flags,
2164 'flags': flags,
2233 'copied': None,
2165 'copied': None,
2234 }
2166 }
2235
2167
2236 def filectx(self, path, filelog=None):
2168 def filectx(self, path, filelog=None):
2237 return overlayworkingfilectx(self._repo, path, parent=self,
2169 return overlayworkingfilectx(self._repo, path, parent=self,
2238 filelog=filelog)
2170 filelog=filelog)
2239
2171
2240 class overlayworkingfilectx(committablefilectx):
2172 class overlayworkingfilectx(committablefilectx):
2241 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2173 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2242 cache, which can be flushed through later by calling ``flush()``."""
2174 cache, which can be flushed through later by calling ``flush()``."""
2243
2175
2244 def __init__(self, repo, path, filelog=None, parent=None):
2176 def __init__(self, repo, path, filelog=None, parent=None):
2245 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2177 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2246 parent)
2178 parent)
2247 self._repo = repo
2179 self._repo = repo
2248 self._parent = parent
2180 self._parent = parent
2249 self._path = path
2181 self._path = path
2250
2182
2251 def cmp(self, fctx):
2183 def cmp(self, fctx):
2252 return self.data() != fctx.data()
2184 return self.data() != fctx.data()
2253
2185
2254 def changectx(self):
2186 def changectx(self):
2255 return self._parent
2187 return self._parent
2256
2188
2257 def data(self):
2189 def data(self):
2258 return self._parent.data(self._path)
2190 return self._parent.data(self._path)
2259
2191
2260 def date(self):
2192 def date(self):
2261 return self._parent.filedate(self._path)
2193 return self._parent.filedate(self._path)
2262
2194
2263 def exists(self):
2195 def exists(self):
2264 return self.lexists()
2196 return self.lexists()
2265
2197
2266 def lexists(self):
2198 def lexists(self):
2267 return self._parent.exists(self._path)
2199 return self._parent.exists(self._path)
2268
2200
2269 def renamed(self):
2201 def renamed(self):
2270 path = self._parent.copydata(self._path)
2202 path = self._parent.copydata(self._path)
2271 if not path:
2203 if not path:
2272 return None
2204 return None
2273 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2205 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2274
2206
2275 def size(self):
2207 def size(self):
2276 return self._parent.size(self._path)
2208 return self._parent.size(self._path)
2277
2209
2278 def markcopied(self, origin):
2210 def markcopied(self, origin):
2279 self._parent.markcopied(self._path, origin)
2211 self._parent.markcopied(self._path, origin)
2280
2212
2281 def audit(self):
2213 def audit(self):
2282 pass
2214 pass
2283
2215
2284 def flags(self):
2216 def flags(self):
2285 return self._parent.flags(self._path)
2217 return self._parent.flags(self._path)
2286
2218
2287 def setflags(self, islink, isexec):
2219 def setflags(self, islink, isexec):
2288 return self._parent.setflags(self._path, islink, isexec)
2220 return self._parent.setflags(self._path, islink, isexec)
2289
2221
2290 def write(self, data, flags, backgroundclose=False, **kwargs):
2222 def write(self, data, flags, backgroundclose=False, **kwargs):
2291 return self._parent.write(self._path, data, flags, **kwargs)
2223 return self._parent.write(self._path, data, flags, **kwargs)
2292
2224
2293 def remove(self, ignoremissing=False):
2225 def remove(self, ignoremissing=False):
2294 return self._parent.remove(self._path)
2226 return self._parent.remove(self._path)
2295
2227
2296 def clearunknown(self):
2228 def clearunknown(self):
2297 pass
2229 pass
2298
2230
2299 class workingcommitctx(workingctx):
2231 class workingcommitctx(workingctx):
2300 """A workingcommitctx object makes access to data related to
2232 """A workingcommitctx object makes access to data related to
2301 the revision being committed convenient.
2233 the revision being committed convenient.
2302
2234
2303 This hides changes in the working directory, if they aren't
2235 This hides changes in the working directory, if they aren't
2304 committed in this context.
2236 committed in this context.
2305 """
2237 """
2306 def __init__(self, repo, changes,
2238 def __init__(self, repo, changes,
2307 text="", user=None, date=None, extra=None):
2239 text="", user=None, date=None, extra=None):
2308 super(workingctx, self).__init__(repo, text, user, date, extra,
2240 super(workingctx, self).__init__(repo, text, user, date, extra,
2309 changes)
2241 changes)
2310
2242
2311 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2243 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2312 """Return matched files only in ``self._status``
2244 """Return matched files only in ``self._status``
2313
2245
2314 Uncommitted files appear "clean" via this context, even if
2246 Uncommitted files appear "clean" via this context, even if
2315 they aren't actually so in the working directory.
2247 they aren't actually so in the working directory.
2316 """
2248 """
2317 if clean:
2249 if clean:
2318 clean = [f for f in self._manifest if f not in self._changedset]
2250 clean = [f for f in self._manifest if f not in self._changedset]
2319 else:
2251 else:
2320 clean = []
2252 clean = []
2321 return scmutil.status([f for f in self._status.modified if match(f)],
2253 return scmutil.status([f for f in self._status.modified if match(f)],
2322 [f for f in self._status.added if match(f)],
2254 [f for f in self._status.added if match(f)],
2323 [f for f in self._status.removed if match(f)],
2255 [f for f in self._status.removed if match(f)],
2324 [], [], [], clean)
2256 [], [], [], clean)
2325
2257
2326 @propertycache
2258 @propertycache
2327 def _changedset(self):
2259 def _changedset(self):
2328 """Return the set of files changed in this context
2260 """Return the set of files changed in this context
2329 """
2261 """
2330 changed = set(self._status.modified)
2262 changed = set(self._status.modified)
2331 changed.update(self._status.added)
2263 changed.update(self._status.added)
2332 changed.update(self._status.removed)
2264 changed.update(self._status.removed)
2333 return changed
2265 return changed
2334
2266
2335 def makecachingfilectxfn(func):
2267 def makecachingfilectxfn(func):
2336 """Create a filectxfn that caches based on the path.
2268 """Create a filectxfn that caches based on the path.
2337
2269
2338 We can't use util.cachefunc because it uses all arguments as the cache
2270 We can't use util.cachefunc because it uses all arguments as the cache
2339 key and this creates a cycle since the arguments include the repo and
2271 key and this creates a cycle since the arguments include the repo and
2340 memctx.
2272 memctx.
2341 """
2273 """
2342 cache = {}
2274 cache = {}
2343
2275
2344 def getfilectx(repo, memctx, path):
2276 def getfilectx(repo, memctx, path):
2345 if path not in cache:
2277 if path not in cache:
2346 cache[path] = func(repo, memctx, path)
2278 cache[path] = func(repo, memctx, path)
2347 return cache[path]
2279 return cache[path]
2348
2280
2349 return getfilectx
2281 return getfilectx
2350
2282
2351 def memfilefromctx(ctx):
2283 def memfilefromctx(ctx):
2352 """Given a context return a memfilectx for ctx[path]
2284 """Given a context return a memfilectx for ctx[path]
2353
2285
2354 This is a convenience method for building a memctx based on another
2286 This is a convenience method for building a memctx based on another
2355 context.
2287 context.
2356 """
2288 """
2357 def getfilectx(repo, memctx, path):
2289 def getfilectx(repo, memctx, path):
2358 fctx = ctx[path]
2290 fctx = ctx[path]
2359 # this is weird but apparently we only keep track of one parent
2291 # this is weird but apparently we only keep track of one parent
2360 # (why not only store that instead of a tuple?)
2292 # (why not only store that instead of a tuple?)
2361 copied = fctx.renamed()
2293 copied = fctx.renamed()
2362 if copied:
2294 if copied:
2363 copied = copied[0]
2295 copied = copied[0]
2364 return memfilectx(repo, memctx, path, fctx.data(),
2296 return memfilectx(repo, memctx, path, fctx.data(),
2365 islink=fctx.islink(), isexec=fctx.isexec(),
2297 islink=fctx.islink(), isexec=fctx.isexec(),
2366 copied=copied)
2298 copied=copied)
2367
2299
2368 return getfilectx
2300 return getfilectx
2369
2301
2370 def memfilefrompatch(patchstore):
2302 def memfilefrompatch(patchstore):
2371 """Given a patch (e.g. patchstore object) return a memfilectx
2303 """Given a patch (e.g. patchstore object) return a memfilectx
2372
2304
2373 This is a convenience method for building a memctx based on a patchstore.
2305 This is a convenience method for building a memctx based on a patchstore.
2374 """
2306 """
2375 def getfilectx(repo, memctx, path):
2307 def getfilectx(repo, memctx, path):
2376 data, mode, copied = patchstore.getfile(path)
2308 data, mode, copied = patchstore.getfile(path)
2377 if data is None:
2309 if data is None:
2378 return None
2310 return None
2379 islink, isexec = mode
2311 islink, isexec = mode
2380 return memfilectx(repo, memctx, path, data, islink=islink,
2312 return memfilectx(repo, memctx, path, data, islink=islink,
2381 isexec=isexec, copied=copied)
2313 isexec=isexec, copied=copied)
2382
2314
2383 return getfilectx
2315 return getfilectx
2384
2316
2385 class memctx(committablectx):
2317 class memctx(committablectx):
2386 """Use memctx to perform in-memory commits via localrepo.commitctx().
2318 """Use memctx to perform in-memory commits via localrepo.commitctx().
2387
2319
2388 Revision information is supplied at initialization time while
2320 Revision information is supplied at initialization time while
2389 related files data and is made available through a callback
2321 related files data and is made available through a callback
2390 mechanism. 'repo' is the current localrepo, 'parents' is a
2322 mechanism. 'repo' is the current localrepo, 'parents' is a
2391 sequence of two parent revisions identifiers (pass None for every
2323 sequence of two parent revisions identifiers (pass None for every
2392 missing parent), 'text' is the commit message and 'files' lists
2324 missing parent), 'text' is the commit message and 'files' lists
2393 names of files touched by the revision (normalized and relative to
2325 names of files touched by the revision (normalized and relative to
2394 repository root).
2326 repository root).
2395
2327
2396 filectxfn(repo, memctx, path) is a callable receiving the
2328 filectxfn(repo, memctx, path) is a callable receiving the
2397 repository, the current memctx object and the normalized path of
2329 repository, the current memctx object and the normalized path of
2398 requested file, relative to repository root. It is fired by the
2330 requested file, relative to repository root. It is fired by the
2399 commit function for every file in 'files', but calls order is
2331 commit function for every file in 'files', but calls order is
2400 undefined. If the file is available in the revision being
2332 undefined. If the file is available in the revision being
2401 committed (updated or added), filectxfn returns a memfilectx
2333 committed (updated or added), filectxfn returns a memfilectx
2402 object. If the file was removed, filectxfn return None for recent
2334 object. If the file was removed, filectxfn return None for recent
2403 Mercurial. Moved files are represented by marking the source file
2335 Mercurial. Moved files are represented by marking the source file
2404 removed and the new file added with copy information (see
2336 removed and the new file added with copy information (see
2405 memfilectx).
2337 memfilectx).
2406
2338
2407 user receives the committer name and defaults to current
2339 user receives the committer name and defaults to current
2408 repository username, date is the commit date in any format
2340 repository username, date is the commit date in any format
2409 supported by dateutil.parsedate() and defaults to current date, extra
2341 supported by dateutil.parsedate() and defaults to current date, extra
2410 is a dictionary of metadata or is left empty.
2342 is a dictionary of metadata or is left empty.
2411 """
2343 """
2412
2344
2413 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2345 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2414 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2346 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2415 # this field to determine what to do in filectxfn.
2347 # this field to determine what to do in filectxfn.
2416 _returnnoneformissingfiles = True
2348 _returnnoneformissingfiles = True
2417
2349
2418 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2350 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2419 date=None, extra=None, branch=None, editor=False):
2351 date=None, extra=None, branch=None, editor=False):
2420 super(memctx, self).__init__(repo, text, user, date, extra)
2352 super(memctx, self).__init__(repo, text, user, date, extra)
2421 self._rev = None
2353 self._rev = None
2422 self._node = None
2354 self._node = None
2423 parents = [(p or nullid) for p in parents]
2355 parents = [(p or nullid) for p in parents]
2424 p1, p2 = parents
2356 p1, p2 = parents
2425 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2357 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2426 files = sorted(set(files))
2358 files = sorted(set(files))
2427 self._files = files
2359 self._files = files
2428 if branch is not None:
2360 if branch is not None:
2429 self._extra['branch'] = encoding.fromlocal(branch)
2361 self._extra['branch'] = encoding.fromlocal(branch)
2430 self.substate = {}
2362 self.substate = {}
2431
2363
2432 if isinstance(filectxfn, patch.filestore):
2364 if isinstance(filectxfn, patch.filestore):
2433 filectxfn = memfilefrompatch(filectxfn)
2365 filectxfn = memfilefrompatch(filectxfn)
2434 elif not callable(filectxfn):
2366 elif not callable(filectxfn):
2435 # if store is not callable, wrap it in a function
2367 # if store is not callable, wrap it in a function
2436 filectxfn = memfilefromctx(filectxfn)
2368 filectxfn = memfilefromctx(filectxfn)
2437
2369
2438 # memoizing increases performance for e.g. vcs convert scenarios.
2370 # memoizing increases performance for e.g. vcs convert scenarios.
2439 self._filectxfn = makecachingfilectxfn(filectxfn)
2371 self._filectxfn = makecachingfilectxfn(filectxfn)
2440
2372
2441 if editor:
2373 if editor:
2442 self._text = editor(self._repo, self, [])
2374 self._text = editor(self._repo, self, [])
2443 self._repo.savecommitmessage(self._text)
2375 self._repo.savecommitmessage(self._text)
2444
2376
2445 def filectx(self, path, filelog=None):
2377 def filectx(self, path, filelog=None):
2446 """get a file context from the working directory
2378 """get a file context from the working directory
2447
2379
2448 Returns None if file doesn't exist and should be removed."""
2380 Returns None if file doesn't exist and should be removed."""
2449 return self._filectxfn(self._repo, self, path)
2381 return self._filectxfn(self._repo, self, path)
2450
2382
2451 def commit(self):
2383 def commit(self):
2452 """commit context to the repo"""
2384 """commit context to the repo"""
2453 return self._repo.commitctx(self)
2385 return self._repo.commitctx(self)
2454
2386
2455 @propertycache
2387 @propertycache
2456 def _manifest(self):
2388 def _manifest(self):
2457 """generate a manifest based on the return values of filectxfn"""
2389 """generate a manifest based on the return values of filectxfn"""
2458
2390
2459 # keep this simple for now; just worry about p1
2391 # keep this simple for now; just worry about p1
2460 pctx = self._parents[0]
2392 pctx = self._parents[0]
2461 man = pctx.manifest().copy()
2393 man = pctx.manifest().copy()
2462
2394
2463 for f in self._status.modified:
2395 for f in self._status.modified:
2464 p1node = nullid
2396 p1node = nullid
2465 p2node = nullid
2397 p2node = nullid
2466 p = pctx[f].parents() # if file isn't in pctx, check p2?
2398 p = pctx[f].parents() # if file isn't in pctx, check p2?
2467 if len(p) > 0:
2399 if len(p) > 0:
2468 p1node = p[0].filenode()
2400 p1node = p[0].filenode()
2469 if len(p) > 1:
2401 if len(p) > 1:
2470 p2node = p[1].filenode()
2402 p2node = p[1].filenode()
2471 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2403 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2472
2404
2473 for f in self._status.added:
2405 for f in self._status.added:
2474 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2406 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2475
2407
2476 for f in self._status.removed:
2408 for f in self._status.removed:
2477 if f in man:
2409 if f in man:
2478 del man[f]
2410 del man[f]
2479
2411
2480 return man
2412 return man
2481
2413
2482 @propertycache
2414 @propertycache
2483 def _status(self):
2415 def _status(self):
2484 """Calculate exact status from ``files`` specified at construction
2416 """Calculate exact status from ``files`` specified at construction
2485 """
2417 """
2486 man1 = self.p1().manifest()
2418 man1 = self.p1().manifest()
2487 p2 = self._parents[1]
2419 p2 = self._parents[1]
2488 # "1 < len(self._parents)" can't be used for checking
2420 # "1 < len(self._parents)" can't be used for checking
2489 # existence of the 2nd parent, because "memctx._parents" is
2421 # existence of the 2nd parent, because "memctx._parents" is
2490 # explicitly initialized by the list, of which length is 2.
2422 # explicitly initialized by the list, of which length is 2.
2491 if p2.node() != nullid:
2423 if p2.node() != nullid:
2492 man2 = p2.manifest()
2424 man2 = p2.manifest()
2493 managing = lambda f: f in man1 or f in man2
2425 managing = lambda f: f in man1 or f in man2
2494 else:
2426 else:
2495 managing = lambda f: f in man1
2427 managing = lambda f: f in man1
2496
2428
2497 modified, added, removed = [], [], []
2429 modified, added, removed = [], [], []
2498 for f in self._files:
2430 for f in self._files:
2499 if not managing(f):
2431 if not managing(f):
2500 added.append(f)
2432 added.append(f)
2501 elif self[f]:
2433 elif self[f]:
2502 modified.append(f)
2434 modified.append(f)
2503 else:
2435 else:
2504 removed.append(f)
2436 removed.append(f)
2505
2437
2506 return scmutil.status(modified, added, removed, [], [], [], [])
2438 return scmutil.status(modified, added, removed, [], [], [], [])
2507
2439
2508 class memfilectx(committablefilectx):
2440 class memfilectx(committablefilectx):
2509 """memfilectx represents an in-memory file to commit.
2441 """memfilectx represents an in-memory file to commit.
2510
2442
2511 See memctx and committablefilectx for more details.
2443 See memctx and committablefilectx for more details.
2512 """
2444 """
2513 def __init__(self, repo, changectx, path, data, islink=False,
2445 def __init__(self, repo, changectx, path, data, islink=False,
2514 isexec=False, copied=None):
2446 isexec=False, copied=None):
2515 """
2447 """
2516 path is the normalized file path relative to repository root.
2448 path is the normalized file path relative to repository root.
2517 data is the file content as a string.
2449 data is the file content as a string.
2518 islink is True if the file is a symbolic link.
2450 islink is True if the file is a symbolic link.
2519 isexec is True if the file is executable.
2451 isexec is True if the file is executable.
2520 copied is the source file path if current file was copied in the
2452 copied is the source file path if current file was copied in the
2521 revision being committed, or None."""
2453 revision being committed, or None."""
2522 super(memfilectx, self).__init__(repo, path, None, changectx)
2454 super(memfilectx, self).__init__(repo, path, None, changectx)
2523 self._data = data
2455 self._data = data
2524 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2456 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2525 self._copied = None
2457 self._copied = None
2526 if copied:
2458 if copied:
2527 self._copied = (copied, nullid)
2459 self._copied = (copied, nullid)
2528
2460
2529 def data(self):
2461 def data(self):
2530 return self._data
2462 return self._data
2531
2463
2532 def remove(self, ignoremissing=False):
2464 def remove(self, ignoremissing=False):
2533 """wraps unlink for a repo's working directory"""
2465 """wraps unlink for a repo's working directory"""
2534 # need to figure out what to do here
2466 # need to figure out what to do here
2535 del self._changectx[self._path]
2467 del self._changectx[self._path]
2536
2468
2537 def write(self, data, flags, **kwargs):
2469 def write(self, data, flags, **kwargs):
2538 """wraps repo.wwrite"""
2470 """wraps repo.wwrite"""
2539 self._data = data
2471 self._data = data
2540
2472
2541 class overlayfilectx(committablefilectx):
2473 class overlayfilectx(committablefilectx):
2542 """Like memfilectx but take an original filectx and optional parameters to
2474 """Like memfilectx but take an original filectx and optional parameters to
2543 override parts of it. This is useful when fctx.data() is expensive (i.e.
2475 override parts of it. This is useful when fctx.data() is expensive (i.e.
2544 flag processor is expensive) and raw data, flags, and filenode could be
2476 flag processor is expensive) and raw data, flags, and filenode could be
2545 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2477 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2546 """
2478 """
2547
2479
2548 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2480 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2549 copied=None, ctx=None):
2481 copied=None, ctx=None):
2550 """originalfctx: filecontext to duplicate
2482 """originalfctx: filecontext to duplicate
2551
2483
2552 datafunc: None or a function to override data (file content). It is a
2484 datafunc: None or a function to override data (file content). It is a
2553 function to be lazy. path, flags, copied, ctx: None or overridden value
2485 function to be lazy. path, flags, copied, ctx: None or overridden value
2554
2486
2555 copied could be (path, rev), or False. copied could also be just path,
2487 copied could be (path, rev), or False. copied could also be just path,
2556 and will be converted to (path, nullid). This simplifies some callers.
2488 and will be converted to (path, nullid). This simplifies some callers.
2557 """
2489 """
2558
2490
2559 if path is None:
2491 if path is None:
2560 path = originalfctx.path()
2492 path = originalfctx.path()
2561 if ctx is None:
2493 if ctx is None:
2562 ctx = originalfctx.changectx()
2494 ctx = originalfctx.changectx()
2563 ctxmatch = lambda: True
2495 ctxmatch = lambda: True
2564 else:
2496 else:
2565 ctxmatch = lambda: ctx == originalfctx.changectx()
2497 ctxmatch = lambda: ctx == originalfctx.changectx()
2566
2498
2567 repo = originalfctx.repo()
2499 repo = originalfctx.repo()
2568 flog = originalfctx.filelog()
2500 flog = originalfctx.filelog()
2569 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2501 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2570
2502
2571 if copied is None:
2503 if copied is None:
2572 copied = originalfctx.renamed()
2504 copied = originalfctx.renamed()
2573 copiedmatch = lambda: True
2505 copiedmatch = lambda: True
2574 else:
2506 else:
2575 if copied and not isinstance(copied, tuple):
2507 if copied and not isinstance(copied, tuple):
2576 # repo._filecommit will recalculate copyrev so nullid is okay
2508 # repo._filecommit will recalculate copyrev so nullid is okay
2577 copied = (copied, nullid)
2509 copied = (copied, nullid)
2578 copiedmatch = lambda: copied == originalfctx.renamed()
2510 copiedmatch = lambda: copied == originalfctx.renamed()
2579
2511
2580 # When data, copied (could affect data), ctx (could affect filelog
2512 # When data, copied (could affect data), ctx (could affect filelog
2581 # parents) are not overridden, rawdata, rawflags, and filenode may be
2513 # parents) are not overridden, rawdata, rawflags, and filenode may be
2582 # reused (repo._filecommit should double check filelog parents).
2514 # reused (repo._filecommit should double check filelog parents).
2583 #
2515 #
2584 # path, flags are not hashed in filelog (but in manifestlog) so they do
2516 # path, flags are not hashed in filelog (but in manifestlog) so they do
2585 # not affect reusable here.
2517 # not affect reusable here.
2586 #
2518 #
2587 # If ctx or copied is overridden to a same value with originalfctx,
2519 # If ctx or copied is overridden to a same value with originalfctx,
2588 # still consider it's reusable. originalfctx.renamed() may be a bit
2520 # still consider it's reusable. originalfctx.renamed() may be a bit
2589 # expensive so it's not called unless necessary. Assuming datafunc is
2521 # expensive so it's not called unless necessary. Assuming datafunc is
2590 # always expensive, do not call it for this "reusable" test.
2522 # always expensive, do not call it for this "reusable" test.
2591 reusable = datafunc is None and ctxmatch() and copiedmatch()
2523 reusable = datafunc is None and ctxmatch() and copiedmatch()
2592
2524
2593 if datafunc is None:
2525 if datafunc is None:
2594 datafunc = originalfctx.data
2526 datafunc = originalfctx.data
2595 if flags is None:
2527 if flags is None:
2596 flags = originalfctx.flags()
2528 flags = originalfctx.flags()
2597
2529
2598 self._datafunc = datafunc
2530 self._datafunc = datafunc
2599 self._flags = flags
2531 self._flags = flags
2600 self._copied = copied
2532 self._copied = copied
2601
2533
2602 if reusable:
2534 if reusable:
2603 # copy extra fields from originalfctx
2535 # copy extra fields from originalfctx
2604 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2536 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2605 for attr_ in attrs:
2537 for attr_ in attrs:
2606 if util.safehasattr(originalfctx, attr_):
2538 if util.safehasattr(originalfctx, attr_):
2607 setattr(self, attr_, getattr(originalfctx, attr_))
2539 setattr(self, attr_, getattr(originalfctx, attr_))
2608
2540
2609 def data(self):
2541 def data(self):
2610 return self._datafunc()
2542 return self._datafunc()
2611
2543
2612 class metadataonlyctx(committablectx):
2544 class metadataonlyctx(committablectx):
2613 """Like memctx but it's reusing the manifest of different commit.
2545 """Like memctx but it's reusing the manifest of different commit.
2614 Intended to be used by lightweight operations that are creating
2546 Intended to be used by lightweight operations that are creating
2615 metadata-only changes.
2547 metadata-only changes.
2616
2548
2617 Revision information is supplied at initialization time. 'repo' is the
2549 Revision information is supplied at initialization time. 'repo' is the
2618 current localrepo, 'ctx' is original revision which manifest we're reuisng
2550 current localrepo, 'ctx' is original revision which manifest we're reuisng
2619 'parents' is a sequence of two parent revisions identifiers (pass None for
2551 'parents' is a sequence of two parent revisions identifiers (pass None for
2620 every missing parent), 'text' is the commit.
2552 every missing parent), 'text' is the commit.
2621
2553
2622 user receives the committer name and defaults to current repository
2554 user receives the committer name and defaults to current repository
2623 username, date is the commit date in any format supported by
2555 username, date is the commit date in any format supported by
2624 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2556 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2625 metadata or is left empty.
2557 metadata or is left empty.
2626 """
2558 """
2627 def __new__(cls, repo, originalctx, *args, **kwargs):
2559 def __new__(cls, repo, originalctx, *args, **kwargs):
2628 return super(metadataonlyctx, cls).__new__(cls, repo)
2560 return super(metadataonlyctx, cls).__new__(cls, repo)
2629
2561
2630 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2562 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2631 date=None, extra=None, editor=False):
2563 date=None, extra=None, editor=False):
2632 if text is None:
2564 if text is None:
2633 text = originalctx.description()
2565 text = originalctx.description()
2634 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2566 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2635 self._rev = None
2567 self._rev = None
2636 self._node = None
2568 self._node = None
2637 self._originalctx = originalctx
2569 self._originalctx = originalctx
2638 self._manifestnode = originalctx.manifestnode()
2570 self._manifestnode = originalctx.manifestnode()
2639 if parents is None:
2571 if parents is None:
2640 parents = originalctx.parents()
2572 parents = originalctx.parents()
2641 else:
2573 else:
2642 parents = [repo[p] for p in parents if p is not None]
2574 parents = [repo[p] for p in parents if p is not None]
2643 parents = parents[:]
2575 parents = parents[:]
2644 while len(parents) < 2:
2576 while len(parents) < 2:
2645 parents.append(repo[nullid])
2577 parents.append(repo[nullid])
2646 p1, p2 = self._parents = parents
2578 p1, p2 = self._parents = parents
2647
2579
2648 # sanity check to ensure that the reused manifest parents are
2580 # sanity check to ensure that the reused manifest parents are
2649 # manifests of our commit parents
2581 # manifests of our commit parents
2650 mp1, mp2 = self.manifestctx().parents
2582 mp1, mp2 = self.manifestctx().parents
2651 if p1 != nullid and p1.manifestnode() != mp1:
2583 if p1 != nullid and p1.manifestnode() != mp1:
2652 raise RuntimeError('can\'t reuse the manifest: '
2584 raise RuntimeError('can\'t reuse the manifest: '
2653 'its p1 doesn\'t match the new ctx p1')
2585 'its p1 doesn\'t match the new ctx p1')
2654 if p2 != nullid and p2.manifestnode() != mp2:
2586 if p2 != nullid and p2.manifestnode() != mp2:
2655 raise RuntimeError('can\'t reuse the manifest: '
2587 raise RuntimeError('can\'t reuse the manifest: '
2656 'its p2 doesn\'t match the new ctx p2')
2588 'its p2 doesn\'t match the new ctx p2')
2657
2589
2658 self._files = originalctx.files()
2590 self._files = originalctx.files()
2659 self.substate = {}
2591 self.substate = {}
2660
2592
2661 if editor:
2593 if editor:
2662 self._text = editor(self._repo, self, [])
2594 self._text = editor(self._repo, self, [])
2663 self._repo.savecommitmessage(self._text)
2595 self._repo.savecommitmessage(self._text)
2664
2596
2665 def manifestnode(self):
2597 def manifestnode(self):
2666 return self._manifestnode
2598 return self._manifestnode
2667
2599
2668 @property
2600 @property
2669 def _manifestctx(self):
2601 def _manifestctx(self):
2670 return self._repo.manifestlog[self._manifestnode]
2602 return self._repo.manifestlog[self._manifestnode]
2671
2603
2672 def filectx(self, path, filelog=None):
2604 def filectx(self, path, filelog=None):
2673 return self._originalctx.filectx(path, filelog=filelog)
2605 return self._originalctx.filectx(path, filelog=filelog)
2674
2606
2675 def commit(self):
2607 def commit(self):
2676 """commit context to the repo"""
2608 """commit context to the repo"""
2677 return self._repo.commitctx(self)
2609 return self._repo.commitctx(self)
2678
2610
2679 @property
2611 @property
2680 def _manifest(self):
2612 def _manifest(self):
2681 return self._originalctx.manifest()
2613 return self._originalctx.manifest()
2682
2614
2683 @propertycache
2615 @propertycache
2684 def _status(self):
2616 def _status(self):
2685 """Calculate exact status from ``files`` specified in the ``origctx``
2617 """Calculate exact status from ``files`` specified in the ``origctx``
2686 and parents manifests.
2618 and parents manifests.
2687 """
2619 """
2688 man1 = self.p1().manifest()
2620 man1 = self.p1().manifest()
2689 p2 = self._parents[1]
2621 p2 = self._parents[1]
2690 # "1 < len(self._parents)" can't be used for checking
2622 # "1 < len(self._parents)" can't be used for checking
2691 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2623 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2692 # explicitly initialized by the list, of which length is 2.
2624 # explicitly initialized by the list, of which length is 2.
2693 if p2.node() != nullid:
2625 if p2.node() != nullid:
2694 man2 = p2.manifest()
2626 man2 = p2.manifest()
2695 managing = lambda f: f in man1 or f in man2
2627 managing = lambda f: f in man1 or f in man2
2696 else:
2628 else:
2697 managing = lambda f: f in man1
2629 managing = lambda f: f in man1
2698
2630
2699 modified, added, removed = [], [], []
2631 modified, added, removed = [], [], []
2700 for f in self._files:
2632 for f in self._files:
2701 if not managing(f):
2633 if not managing(f):
2702 added.append(f)
2634 added.append(f)
2703 elif f in self:
2635 elif f in self:
2704 modified.append(f)
2636 modified.append(f)
2705 else:
2637 else:
2706 removed.append(f)
2638 removed.append(f)
2707
2639
2708 return scmutil.status(modified, added, removed, [], [], [], [])
2640 return scmutil.status(modified, added, removed, [], [], [], [])
2709
2641
2710 class arbitraryfilectx(object):
2642 class arbitraryfilectx(object):
2711 """Allows you to use filectx-like functions on a file in an arbitrary
2643 """Allows you to use filectx-like functions on a file in an arbitrary
2712 location on disk, possibly not in the working directory.
2644 location on disk, possibly not in the working directory.
2713 """
2645 """
2714 def __init__(self, path, repo=None):
2646 def __init__(self, path, repo=None):
2715 # Repo is optional because contrib/simplemerge uses this class.
2647 # Repo is optional because contrib/simplemerge uses this class.
2716 self._repo = repo
2648 self._repo = repo
2717 self._path = path
2649 self._path = path
2718
2650
2719 def cmp(self, fctx):
2651 def cmp(self, fctx):
2720 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2652 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2721 # path if either side is a symlink.
2653 # path if either side is a symlink.
2722 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2654 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2723 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2655 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2724 # Add a fast-path for merge if both sides are disk-backed.
2656 # Add a fast-path for merge if both sides are disk-backed.
2725 # Note that filecmp uses the opposite return values (True if same)
2657 # Note that filecmp uses the opposite return values (True if same)
2726 # from our cmp functions (True if different).
2658 # from our cmp functions (True if different).
2727 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2659 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2728 return self.data() != fctx.data()
2660 return self.data() != fctx.data()
2729
2661
2730 def path(self):
2662 def path(self):
2731 return self._path
2663 return self._path
2732
2664
2733 def flags(self):
2665 def flags(self):
2734 return ''
2666 return ''
2735
2667
2736 def data(self):
2668 def data(self):
2737 return util.readfile(self._path)
2669 return util.readfile(self._path)
2738
2670
2739 def decodeddata(self):
2671 def decodeddata(self):
2740 with open(self._path, "rb") as f:
2672 with open(self._path, "rb") as f:
2741 return f.read()
2673 return f.read()
2742
2674
2743 def remove(self):
2675 def remove(self):
2744 util.unlink(self._path)
2676 util.unlink(self._path)
2745
2677
2746 def write(self, data, flags, **kwargs):
2678 def write(self, data, flags, **kwargs):
2747 assert not flags
2679 assert not flags
2748 with open(self._path, "w") as f:
2680 with open(self._path, "w") as f:
2749 f.write(data)
2681 f.write(data)
@@ -1,557 +1,628 b''
1 # dagop.py - graph ancestry and topology algorithm for revset
1 # dagop.py - graph ancestry and topology algorithm for revset
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11
11
12 from .thirdparty import (
13 attr,
14 )
12 from . import (
15 from . import (
13 error,
16 error,
14 mdiff,
17 mdiff,
15 node,
18 node,
16 patch,
19 patch,
17 smartset,
20 smartset,
18 )
21 )
19
22
20 baseset = smartset.baseset
23 baseset = smartset.baseset
21 generatorset = smartset.generatorset
24 generatorset = smartset.generatorset
22
25
23 # possible maximum depth between null and wdir()
26 # possible maximum depth between null and wdir()
24 _maxlogdepth = 0x80000000
27 _maxlogdepth = 0x80000000
25
28
26 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
29 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
27 """Walk DAG using 'pfunc' from the given 'revs' nodes
30 """Walk DAG using 'pfunc' from the given 'revs' nodes
28
31
29 'pfunc(rev)' should return the parent/child revisions of the given 'rev'
32 'pfunc(rev)' should return the parent/child revisions of the given 'rev'
30 if 'reverse' is True/False respectively.
33 if 'reverse' is True/False respectively.
31
34
32 Scan ends at the stopdepth (exlusive) if specified. Revisions found
35 Scan ends at the stopdepth (exlusive) if specified. Revisions found
33 earlier than the startdepth are omitted.
36 earlier than the startdepth are omitted.
34 """
37 """
35 if startdepth is None:
38 if startdepth is None:
36 startdepth = 0
39 startdepth = 0
37 if stopdepth is None:
40 if stopdepth is None:
38 stopdepth = _maxlogdepth
41 stopdepth = _maxlogdepth
39 if stopdepth == 0:
42 if stopdepth == 0:
40 return
43 return
41 if stopdepth < 0:
44 if stopdepth < 0:
42 raise error.ProgrammingError('negative stopdepth')
45 raise error.ProgrammingError('negative stopdepth')
43 if reverse:
46 if reverse:
44 heapsign = -1 # max heap
47 heapsign = -1 # max heap
45 else:
48 else:
46 heapsign = +1 # min heap
49 heapsign = +1 # min heap
47
50
48 # load input revs lazily to heap so earlier revisions can be yielded
51 # load input revs lazily to heap so earlier revisions can be yielded
49 # without fully computing the input revs
52 # without fully computing the input revs
50 revs.sort(reverse)
53 revs.sort(reverse)
51 irevs = iter(revs)
54 irevs = iter(revs)
52 pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first)
55 pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first)
53
56
54 inputrev = next(irevs, None)
57 inputrev = next(irevs, None)
55 if inputrev is not None:
58 if inputrev is not None:
56 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
59 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
57
60
58 lastrev = None
61 lastrev = None
59 while pendingheap:
62 while pendingheap:
60 currev, curdepth = heapq.heappop(pendingheap)
63 currev, curdepth = heapq.heappop(pendingheap)
61 currev = heapsign * currev
64 currev = heapsign * currev
62 if currev == inputrev:
65 if currev == inputrev:
63 inputrev = next(irevs, None)
66 inputrev = next(irevs, None)
64 if inputrev is not None:
67 if inputrev is not None:
65 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
68 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
66 # rescan parents until curdepth >= startdepth because queued entries
69 # rescan parents until curdepth >= startdepth because queued entries
67 # of the same revision are iterated from the lowest depth
70 # of the same revision are iterated from the lowest depth
68 foundnew = (currev != lastrev)
71 foundnew = (currev != lastrev)
69 if foundnew and curdepth >= startdepth:
72 if foundnew and curdepth >= startdepth:
70 lastrev = currev
73 lastrev = currev
71 yield currev
74 yield currev
72 pdepth = curdepth + 1
75 pdepth = curdepth + 1
73 if foundnew and pdepth < stopdepth:
76 if foundnew and pdepth < stopdepth:
74 for prev in pfunc(currev):
77 for prev in pfunc(currev):
75 if prev != node.nullrev:
78 if prev != node.nullrev:
76 heapq.heappush(pendingheap, (heapsign * prev, pdepth))
79 heapq.heappush(pendingheap, (heapsign * prev, pdepth))
77
80
78 def filectxancestors(fctxs, followfirst=False):
81 def filectxancestors(fctxs, followfirst=False):
79 """Like filectx.ancestors(), but can walk from multiple files/revisions,
82 """Like filectx.ancestors(), but can walk from multiple files/revisions,
80 and includes the given fctxs themselves
83 and includes the given fctxs themselves
81
84
82 Yields (rev, {fctx, ...}) pairs in descending order.
85 Yields (rev, {fctx, ...}) pairs in descending order.
83 """
86 """
84 visit = {}
87 visit = {}
85 visitheap = []
88 visitheap = []
86 def addvisit(fctx):
89 def addvisit(fctx):
87 rev = fctx.rev()
90 rev = fctx.rev()
88 if rev not in visit:
91 if rev not in visit:
89 visit[rev] = set()
92 visit[rev] = set()
90 heapq.heappush(visitheap, -rev) # max heap
93 heapq.heappush(visitheap, -rev) # max heap
91 visit[rev].add(fctx)
94 visit[rev].add(fctx)
92
95
93 if followfirst:
96 if followfirst:
94 cut = 1
97 cut = 1
95 else:
98 else:
96 cut = None
99 cut = None
97
100
98 for c in fctxs:
101 for c in fctxs:
99 addvisit(c)
102 addvisit(c)
100 while visit:
103 while visit:
101 currev = -heapq.heappop(visitheap)
104 currev = -heapq.heappop(visitheap)
102 curfctxs = visit.pop(currev)
105 curfctxs = visit.pop(currev)
103 yield currev, curfctxs
106 yield currev, curfctxs
104 for c in curfctxs:
107 for c in curfctxs:
105 for parent in c.parents()[:cut]:
108 for parent in c.parents()[:cut]:
106 addvisit(parent)
109 addvisit(parent)
107 assert not visitheap
110 assert not visitheap
108
111
109 def filerevancestors(fctxs, followfirst=False):
112 def filerevancestors(fctxs, followfirst=False):
110 """Like filectx.ancestors(), but can walk from multiple files/revisions,
113 """Like filectx.ancestors(), but can walk from multiple files/revisions,
111 and includes the given fctxs themselves
114 and includes the given fctxs themselves
112
115
113 Returns a smartset.
116 Returns a smartset.
114 """
117 """
115 gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst))
118 gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst))
116 return generatorset(gen, iterasc=False)
119 return generatorset(gen, iterasc=False)
117
120
118 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
121 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
119 if followfirst:
122 if followfirst:
120 cut = 1
123 cut = 1
121 else:
124 else:
122 cut = None
125 cut = None
123 cl = repo.changelog
126 cl = repo.changelog
124 def plainpfunc(rev):
127 def plainpfunc(rev):
125 try:
128 try:
126 return cl.parentrevs(rev)[:cut]
129 return cl.parentrevs(rev)[:cut]
127 except error.WdirUnsupported:
130 except error.WdirUnsupported:
128 return (pctx.rev() for pctx in repo[rev].parents()[:cut])
131 return (pctx.rev() for pctx in repo[rev].parents()[:cut])
129 if cutfunc is None:
132 if cutfunc is None:
130 pfunc = plainpfunc
133 pfunc = plainpfunc
131 else:
134 else:
132 pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)]
135 pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)]
133 revs = revs.filter(lambda rev: not cutfunc(rev))
136 revs = revs.filter(lambda rev: not cutfunc(rev))
134 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
137 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
135
138
136 def revancestors(repo, revs, followfirst=False, startdepth=None,
139 def revancestors(repo, revs, followfirst=False, startdepth=None,
137 stopdepth=None, cutfunc=None):
140 stopdepth=None, cutfunc=None):
138 """Like revlog.ancestors(), but supports additional options, includes
141 """Like revlog.ancestors(), but supports additional options, includes
139 the given revs themselves, and returns a smartset
142 the given revs themselves, and returns a smartset
140
143
141 Scan ends at the stopdepth (exlusive) if specified. Revisions found
144 Scan ends at the stopdepth (exlusive) if specified. Revisions found
142 earlier than the startdepth are omitted.
145 earlier than the startdepth are omitted.
143
146
144 If cutfunc is provided, it will be used to cut the traversal of the DAG.
147 If cutfunc is provided, it will be used to cut the traversal of the DAG.
145 When cutfunc(X) returns True, the DAG traversal stops - revision X and
148 When cutfunc(X) returns True, the DAG traversal stops - revision X and
146 X's ancestors in the traversal path will be skipped. This could be an
149 X's ancestors in the traversal path will be skipped. This could be an
147 optimization sometimes.
150 optimization sometimes.
148
151
149 Note: if Y is an ancestor of X, cutfunc(X) returning True does not
152 Note: if Y is an ancestor of X, cutfunc(X) returning True does not
150 necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to
153 necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to
151 return True in this case. For example,
154 return True in this case. For example,
152
155
153 D # revancestors(repo, D, cutfunc=lambda rev: rev == B)
156 D # revancestors(repo, D, cutfunc=lambda rev: rev == B)
154 |\ # will include "A", because the path D -> C -> A was not cut.
157 |\ # will include "A", because the path D -> C -> A was not cut.
155 B C # If "B" gets cut, "A" might want to be cut too.
158 B C # If "B" gets cut, "A" might want to be cut too.
156 |/
159 |/
157 A
160 A
158 """
161 """
159 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth,
162 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth,
160 cutfunc)
163 cutfunc)
161 return generatorset(gen, iterasc=False)
164 return generatorset(gen, iterasc=False)
162
165
163 def _genrevdescendants(repo, revs, followfirst):
166 def _genrevdescendants(repo, revs, followfirst):
164 if followfirst:
167 if followfirst:
165 cut = 1
168 cut = 1
166 else:
169 else:
167 cut = None
170 cut = None
168
171
169 cl = repo.changelog
172 cl = repo.changelog
170 first = revs.min()
173 first = revs.min()
171 nullrev = node.nullrev
174 nullrev = node.nullrev
172 if first == nullrev:
175 if first == nullrev:
173 # Are there nodes with a null first parent and a non-null
176 # Are there nodes with a null first parent and a non-null
174 # second one? Maybe. Do we care? Probably not.
177 # second one? Maybe. Do we care? Probably not.
175 yield first
178 yield first
176 for i in cl:
179 for i in cl:
177 yield i
180 yield i
178 else:
181 else:
179 seen = set(revs)
182 seen = set(revs)
180 for i in cl.revs(first):
183 for i in cl.revs(first):
181 if i in seen:
184 if i in seen:
182 yield i
185 yield i
183 continue
186 continue
184 for x in cl.parentrevs(i)[:cut]:
187 for x in cl.parentrevs(i)[:cut]:
185 if x != nullrev and x in seen:
188 if x != nullrev and x in seen:
186 seen.add(i)
189 seen.add(i)
187 yield i
190 yield i
188 break
191 break
189
192
190 def _builddescendantsmap(repo, startrev, followfirst):
193 def _builddescendantsmap(repo, startrev, followfirst):
191 """Build map of 'rev -> child revs', offset from startrev"""
194 """Build map of 'rev -> child revs', offset from startrev"""
192 cl = repo.changelog
195 cl = repo.changelog
193 nullrev = node.nullrev
196 nullrev = node.nullrev
194 descmap = [[] for _rev in xrange(startrev, len(cl))]
197 descmap = [[] for _rev in xrange(startrev, len(cl))]
195 for currev in cl.revs(startrev + 1):
198 for currev in cl.revs(startrev + 1):
196 p1rev, p2rev = cl.parentrevs(currev)
199 p1rev, p2rev = cl.parentrevs(currev)
197 if p1rev >= startrev:
200 if p1rev >= startrev:
198 descmap[p1rev - startrev].append(currev)
201 descmap[p1rev - startrev].append(currev)
199 if not followfirst and p2rev != nullrev and p2rev >= startrev:
202 if not followfirst and p2rev != nullrev and p2rev >= startrev:
200 descmap[p2rev - startrev].append(currev)
203 descmap[p2rev - startrev].append(currev)
201 return descmap
204 return descmap
202
205
203 def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth):
206 def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth):
204 startrev = revs.min()
207 startrev = revs.min()
205 descmap = _builddescendantsmap(repo, startrev, followfirst)
208 descmap = _builddescendantsmap(repo, startrev, followfirst)
206 def pfunc(rev):
209 def pfunc(rev):
207 return descmap[rev - startrev]
210 return descmap[rev - startrev]
208 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False)
211 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False)
209
212
210 def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None):
213 def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None):
211 """Like revlog.descendants() but supports additional options, includes
214 """Like revlog.descendants() but supports additional options, includes
212 the given revs themselves, and returns a smartset
215 the given revs themselves, and returns a smartset
213
216
214 Scan ends at the stopdepth (exlusive) if specified. Revisions found
217 Scan ends at the stopdepth (exlusive) if specified. Revisions found
215 earlier than the startdepth are omitted.
218 earlier than the startdepth are omitted.
216 """
219 """
217 if startdepth is None and stopdepth is None:
220 if startdepth is None and stopdepth is None:
218 gen = _genrevdescendants(repo, revs, followfirst)
221 gen = _genrevdescendants(repo, revs, followfirst)
219 else:
222 else:
220 gen = _genrevdescendantsofdepth(repo, revs, followfirst,
223 gen = _genrevdescendantsofdepth(repo, revs, followfirst,
221 startdepth, stopdepth)
224 startdepth, stopdepth)
222 return generatorset(gen, iterasc=True)
225 return generatorset(gen, iterasc=True)
223
226
224 def _reachablerootspure(repo, minroot, roots, heads, includepath):
227 def _reachablerootspure(repo, minroot, roots, heads, includepath):
225 """return (heads(::<roots> and ::<heads>))
228 """return (heads(::<roots> and ::<heads>))
226
229
227 If includepath is True, return (<roots>::<heads>)."""
230 If includepath is True, return (<roots>::<heads>)."""
228 if not roots:
231 if not roots:
229 return []
232 return []
230 parentrevs = repo.changelog.parentrevs
233 parentrevs = repo.changelog.parentrevs
231 roots = set(roots)
234 roots = set(roots)
232 visit = list(heads)
235 visit = list(heads)
233 reachable = set()
236 reachable = set()
234 seen = {}
237 seen = {}
235 # prefetch all the things! (because python is slow)
238 # prefetch all the things! (because python is slow)
236 reached = reachable.add
239 reached = reachable.add
237 dovisit = visit.append
240 dovisit = visit.append
238 nextvisit = visit.pop
241 nextvisit = visit.pop
239 # open-code the post-order traversal due to the tiny size of
242 # open-code the post-order traversal due to the tiny size of
240 # sys.getrecursionlimit()
243 # sys.getrecursionlimit()
241 while visit:
244 while visit:
242 rev = nextvisit()
245 rev = nextvisit()
243 if rev in roots:
246 if rev in roots:
244 reached(rev)
247 reached(rev)
245 if not includepath:
248 if not includepath:
246 continue
249 continue
247 parents = parentrevs(rev)
250 parents = parentrevs(rev)
248 seen[rev] = parents
251 seen[rev] = parents
249 for parent in parents:
252 for parent in parents:
250 if parent >= minroot and parent not in seen:
253 if parent >= minroot and parent not in seen:
251 dovisit(parent)
254 dovisit(parent)
252 if not reachable:
255 if not reachable:
253 return baseset()
256 return baseset()
254 if not includepath:
257 if not includepath:
255 return reachable
258 return reachable
256 for rev in sorted(seen):
259 for rev in sorted(seen):
257 for parent in seen[rev]:
260 for parent in seen[rev]:
258 if parent in reachable:
261 if parent in reachable:
259 reached(rev)
262 reached(rev)
260 return reachable
263 return reachable
261
264
262 def reachableroots(repo, roots, heads, includepath=False):
265 def reachableroots(repo, roots, heads, includepath=False):
263 """return (heads(::<roots> and ::<heads>))
266 """return (heads(::<roots> and ::<heads>))
264
267
265 If includepath is True, return (<roots>::<heads>)."""
268 If includepath is True, return (<roots>::<heads>)."""
266 if not roots:
269 if not roots:
267 return baseset()
270 return baseset()
268 minroot = roots.min()
271 minroot = roots.min()
269 roots = list(roots)
272 roots = list(roots)
270 heads = list(heads)
273 heads = list(heads)
271 try:
274 try:
272 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
275 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
273 except AttributeError:
276 except AttributeError:
274 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
277 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
275 revs = baseset(revs)
278 revs = baseset(revs)
276 revs.sort()
279 revs.sort()
277 return revs
280 return revs
278
281
279 def _changesrange(fctx1, fctx2, linerange2, diffopts):
282 def _changesrange(fctx1, fctx2, linerange2, diffopts):
280 """Return `(diffinrange, linerange1)` where `diffinrange` is True
283 """Return `(diffinrange, linerange1)` where `diffinrange` is True
281 if diff from fctx2 to fctx1 has changes in linerange2 and
284 if diff from fctx2 to fctx1 has changes in linerange2 and
282 `linerange1` is the new line range for fctx1.
285 `linerange1` is the new line range for fctx1.
283 """
286 """
284 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
287 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
285 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
288 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
286 diffinrange = any(stype == '!' for _, stype in filteredblocks)
289 diffinrange = any(stype == '!' for _, stype in filteredblocks)
287 return diffinrange, linerange1
290 return diffinrange, linerange1
288
291
289 def blockancestors(fctx, fromline, toline, followfirst=False):
292 def blockancestors(fctx, fromline, toline, followfirst=False):
290 """Yield ancestors of `fctx` with respect to the block of lines within
293 """Yield ancestors of `fctx` with respect to the block of lines within
291 `fromline`-`toline` range.
294 `fromline`-`toline` range.
292 """
295 """
293 diffopts = patch.diffopts(fctx._repo.ui)
296 diffopts = patch.diffopts(fctx._repo.ui)
294 fctx = fctx.introfilectx()
297 fctx = fctx.introfilectx()
295 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
298 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
296 while visit:
299 while visit:
297 c, linerange2 = visit.pop(max(visit))
300 c, linerange2 = visit.pop(max(visit))
298 pl = c.parents()
301 pl = c.parents()
299 if followfirst:
302 if followfirst:
300 pl = pl[:1]
303 pl = pl[:1]
301 if not pl:
304 if not pl:
302 # The block originates from the initial revision.
305 # The block originates from the initial revision.
303 yield c, linerange2
306 yield c, linerange2
304 continue
307 continue
305 inrange = False
308 inrange = False
306 for p in pl:
309 for p in pl:
307 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
310 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
308 inrange = inrange or inrangep
311 inrange = inrange or inrangep
309 if linerange1[0] == linerange1[1]:
312 if linerange1[0] == linerange1[1]:
310 # Parent's linerange is empty, meaning that the block got
313 # Parent's linerange is empty, meaning that the block got
311 # introduced in this revision; no need to go futher in this
314 # introduced in this revision; no need to go futher in this
312 # branch.
315 # branch.
313 continue
316 continue
314 # Set _descendantrev with 'c' (a known descendant) so that, when
317 # Set _descendantrev with 'c' (a known descendant) so that, when
315 # _adjustlinkrev is called for 'p', it receives this descendant
318 # _adjustlinkrev is called for 'p', it receives this descendant
316 # (as srcrev) instead possibly topmost introrev.
319 # (as srcrev) instead possibly topmost introrev.
317 p._descendantrev = c.rev()
320 p._descendantrev = c.rev()
318 visit[p.linkrev(), p.filenode()] = p, linerange1
321 visit[p.linkrev(), p.filenode()] = p, linerange1
319 if inrange:
322 if inrange:
320 yield c, linerange2
323 yield c, linerange2
321
324
322 def blockdescendants(fctx, fromline, toline):
325 def blockdescendants(fctx, fromline, toline):
323 """Yield descendants of `fctx` with respect to the block of lines within
326 """Yield descendants of `fctx` with respect to the block of lines within
324 `fromline`-`toline` range.
327 `fromline`-`toline` range.
325 """
328 """
326 # First possibly yield 'fctx' if it has changes in range with respect to
329 # First possibly yield 'fctx' if it has changes in range with respect to
327 # its parents.
330 # its parents.
328 try:
331 try:
329 c, linerange1 = next(blockancestors(fctx, fromline, toline))
332 c, linerange1 = next(blockancestors(fctx, fromline, toline))
330 except StopIteration:
333 except StopIteration:
331 pass
334 pass
332 else:
335 else:
333 if c == fctx:
336 if c == fctx:
334 yield c, linerange1
337 yield c, linerange1
335
338
336 diffopts = patch.diffopts(fctx._repo.ui)
339 diffopts = patch.diffopts(fctx._repo.ui)
337 fl = fctx.filelog()
340 fl = fctx.filelog()
338 seen = {fctx.filerev(): (fctx, (fromline, toline))}
341 seen = {fctx.filerev(): (fctx, (fromline, toline))}
339 for i in fl.descendants([fctx.filerev()]):
342 for i in fl.descendants([fctx.filerev()]):
340 c = fctx.filectx(i)
343 c = fctx.filectx(i)
341 inrange = False
344 inrange = False
342 for x in fl.parentrevs(i):
345 for x in fl.parentrevs(i):
343 try:
346 try:
344 p, linerange2 = seen[x]
347 p, linerange2 = seen[x]
345 except KeyError:
348 except KeyError:
346 # nullrev or other branch
349 # nullrev or other branch
347 continue
350 continue
348 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
351 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
349 inrange = inrange or inrangep
352 inrange = inrange or inrangep
350 # If revision 'i' has been seen (it's a merge) and the line range
353 # If revision 'i' has been seen (it's a merge) and the line range
351 # previously computed differs from the one we just got, we take the
354 # previously computed differs from the one we just got, we take the
352 # surrounding interval. This is conservative but avoids loosing
355 # surrounding interval. This is conservative but avoids loosing
353 # information.
356 # information.
354 if i in seen and seen[i][1] != linerange1:
357 if i in seen and seen[i][1] != linerange1:
355 lbs, ubs = zip(linerange1, seen[i][1])
358 lbs, ubs = zip(linerange1, seen[i][1])
356 linerange1 = min(lbs), max(ubs)
359 linerange1 = min(lbs), max(ubs)
357 seen[i] = c, linerange1
360 seen[i] = c, linerange1
358 if inrange:
361 if inrange:
359 yield c, linerange1
362 yield c, linerange1
360
363
364 @attr.s(slots=True, frozen=True)
365 class annotateline(object):
366 fctx = attr.ib()
367 lineno = attr.ib(default=False)
368 # Whether this annotation was the result of a skip-annotate.
369 skip = attr.ib(default=False)
370
371 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
372 r'''
373 Given parent and child fctxes and annotate data for parents, for all lines
374 in either parent that match the child, annotate the child with the parent's
375 data.
376
377 Additionally, if `skipchild` is True, replace all other lines with parent
378 annotate data as well such that child is never blamed for any lines.
379
380 See test-annotate.py for unit tests.
381 '''
382 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
383 for parent in parents]
384
385 if skipchild:
386 # Need to iterate over the blocks twice -- make it a list
387 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
388 # Mercurial currently prefers p2 over p1 for annotate.
389 # TODO: change this?
390 for parent, blocks in pblocks:
391 for (a1, a2, b1, b2), t in blocks:
392 # Changed blocks ('!') or blocks made only of blank lines ('~')
393 # belong to the child.
394 if t == '=':
395 child[0][b1:b2] = parent[0][a1:a2]
396
397 if skipchild:
398 # Now try and match up anything that couldn't be matched,
399 # Reversing pblocks maintains bias towards p2, matching above
400 # behavior.
401 pblocks.reverse()
402
403 # The heuristics are:
404 # * Work on blocks of changed lines (effectively diff hunks with -U0).
405 # This could potentially be smarter but works well enough.
406 # * For a non-matching section, do a best-effort fit. Match lines in
407 # diff hunks 1:1, dropping lines as necessary.
408 # * Repeat the last line as a last resort.
409
410 # First, replace as much as possible without repeating the last line.
411 remaining = [(parent, []) for parent, _blocks in pblocks]
412 for idx, (parent, blocks) in enumerate(pblocks):
413 for (a1, a2, b1, b2), _t in blocks:
414 if a2 - a1 >= b2 - b1:
415 for bk in xrange(b1, b2):
416 if child[0][bk].fctx == childfctx:
417 ak = min(a1 + (bk - b1), a2 - 1)
418 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
419 else:
420 remaining[idx][1].append((a1, a2, b1, b2))
421
422 # Then, look at anything left, which might involve repeating the last
423 # line.
424 for parent, blocks in remaining:
425 for a1, a2, b1, b2 in blocks:
426 for bk in xrange(b1, b2):
427 if child[0][bk].fctx == childfctx:
428 ak = min(a1 + (bk - b1), a2 - 1)
429 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
430 return child
431
361 def toposort(revs, parentsfunc, firstbranch=()):
432 def toposort(revs, parentsfunc, firstbranch=()):
362 """Yield revisions from heads to roots one (topo) branch at a time.
433 """Yield revisions from heads to roots one (topo) branch at a time.
363
434
364 This function aims to be used by a graph generator that wishes to minimize
435 This function aims to be used by a graph generator that wishes to minimize
365 the number of parallel branches and their interleaving.
436 the number of parallel branches and their interleaving.
366
437
367 Example iteration order (numbers show the "true" order in a changelog):
438 Example iteration order (numbers show the "true" order in a changelog):
368
439
369 o 4
440 o 4
370 |
441 |
371 o 1
442 o 1
372 |
443 |
373 | o 3
444 | o 3
374 | |
445 | |
375 | o 2
446 | o 2
376 |/
447 |/
377 o 0
448 o 0
378
449
379 Note that the ancestors of merges are understood by the current
450 Note that the ancestors of merges are understood by the current
380 algorithm to be on the same branch. This means no reordering will
451 algorithm to be on the same branch. This means no reordering will
381 occur behind a merge.
452 occur behind a merge.
382 """
453 """
383
454
384 ### Quick summary of the algorithm
455 ### Quick summary of the algorithm
385 #
456 #
386 # This function is based around a "retention" principle. We keep revisions
457 # This function is based around a "retention" principle. We keep revisions
387 # in memory until we are ready to emit a whole branch that immediately
458 # in memory until we are ready to emit a whole branch that immediately
388 # "merges" into an existing one. This reduces the number of parallel
459 # "merges" into an existing one. This reduces the number of parallel
389 # branches with interleaved revisions.
460 # branches with interleaved revisions.
390 #
461 #
391 # During iteration revs are split into two groups:
462 # During iteration revs are split into two groups:
392 # A) revision already emitted
463 # A) revision already emitted
393 # B) revision in "retention". They are stored as different subgroups.
464 # B) revision in "retention". They are stored as different subgroups.
394 #
465 #
395 # for each REV, we do the following logic:
466 # for each REV, we do the following logic:
396 #
467 #
397 # 1) if REV is a parent of (A), we will emit it. If there is a
468 # 1) if REV is a parent of (A), we will emit it. If there is a
398 # retention group ((B) above) that is blocked on REV being
469 # retention group ((B) above) that is blocked on REV being
399 # available, we emit all the revisions out of that retention
470 # available, we emit all the revisions out of that retention
400 # group first.
471 # group first.
401 #
472 #
402 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
473 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
403 # available, if such subgroup exist, we add REV to it and the subgroup is
474 # available, if such subgroup exist, we add REV to it and the subgroup is
404 # now awaiting for REV.parents() to be available.
475 # now awaiting for REV.parents() to be available.
405 #
476 #
406 # 3) finally if no such group existed in (B), we create a new subgroup.
477 # 3) finally if no such group existed in (B), we create a new subgroup.
407 #
478 #
408 #
479 #
409 # To bootstrap the algorithm, we emit the tipmost revision (which
480 # To bootstrap the algorithm, we emit the tipmost revision (which
410 # puts it in group (A) from above).
481 # puts it in group (A) from above).
411
482
412 revs.sort(reverse=True)
483 revs.sort(reverse=True)
413
484
414 # Set of parents of revision that have been emitted. They can be considered
485 # Set of parents of revision that have been emitted. They can be considered
415 # unblocked as the graph generator is already aware of them so there is no
486 # unblocked as the graph generator is already aware of them so there is no
416 # need to delay the revisions that reference them.
487 # need to delay the revisions that reference them.
417 #
488 #
418 # If someone wants to prioritize a branch over the others, pre-filling this
489 # If someone wants to prioritize a branch over the others, pre-filling this
419 # set will force all other branches to wait until this branch is ready to be
490 # set will force all other branches to wait until this branch is ready to be
420 # emitted.
491 # emitted.
421 unblocked = set(firstbranch)
492 unblocked = set(firstbranch)
422
493
423 # list of groups waiting to be displayed, each group is defined by:
494 # list of groups waiting to be displayed, each group is defined by:
424 #
495 #
425 # (revs: lists of revs waiting to be displayed,
496 # (revs: lists of revs waiting to be displayed,
426 # blocked: set of that cannot be displayed before those in 'revs')
497 # blocked: set of that cannot be displayed before those in 'revs')
427 #
498 #
428 # The second value ('blocked') correspond to parents of any revision in the
499 # The second value ('blocked') correspond to parents of any revision in the
429 # group ('revs') that is not itself contained in the group. The main idea
500 # group ('revs') that is not itself contained in the group. The main idea
430 # of this algorithm is to delay as much as possible the emission of any
501 # of this algorithm is to delay as much as possible the emission of any
431 # revision. This means waiting for the moment we are about to display
502 # revision. This means waiting for the moment we are about to display
432 # these parents to display the revs in a group.
503 # these parents to display the revs in a group.
433 #
504 #
434 # This first implementation is smart until it encounters a merge: it will
505 # This first implementation is smart until it encounters a merge: it will
435 # emit revs as soon as any parent is about to be emitted and can grow an
506 # emit revs as soon as any parent is about to be emitted and can grow an
436 # arbitrary number of revs in 'blocked'. In practice this mean we properly
507 # arbitrary number of revs in 'blocked'. In practice this mean we properly
437 # retains new branches but gives up on any special ordering for ancestors
508 # retains new branches but gives up on any special ordering for ancestors
438 # of merges. The implementation can be improved to handle this better.
509 # of merges. The implementation can be improved to handle this better.
439 #
510 #
440 # The first subgroup is special. It corresponds to all the revision that
511 # The first subgroup is special. It corresponds to all the revision that
441 # were already emitted. The 'revs' lists is expected to be empty and the
512 # were already emitted. The 'revs' lists is expected to be empty and the
442 # 'blocked' set contains the parents revisions of already emitted revision.
513 # 'blocked' set contains the parents revisions of already emitted revision.
443 #
514 #
444 # You could pre-seed the <parents> set of groups[0] to a specific
515 # You could pre-seed the <parents> set of groups[0] to a specific
445 # changesets to select what the first emitted branch should be.
516 # changesets to select what the first emitted branch should be.
446 groups = [([], unblocked)]
517 groups = [([], unblocked)]
447 pendingheap = []
518 pendingheap = []
448 pendingset = set()
519 pendingset = set()
449
520
450 heapq.heapify(pendingheap)
521 heapq.heapify(pendingheap)
451 heappop = heapq.heappop
522 heappop = heapq.heappop
452 heappush = heapq.heappush
523 heappush = heapq.heappush
453 for currentrev in revs:
524 for currentrev in revs:
454 # Heap works with smallest element, we want highest so we invert
525 # Heap works with smallest element, we want highest so we invert
455 if currentrev not in pendingset:
526 if currentrev not in pendingset:
456 heappush(pendingheap, -currentrev)
527 heappush(pendingheap, -currentrev)
457 pendingset.add(currentrev)
528 pendingset.add(currentrev)
458 # iterates on pending rev until after the current rev have been
529 # iterates on pending rev until after the current rev have been
459 # processed.
530 # processed.
460 rev = None
531 rev = None
461 while rev != currentrev:
532 while rev != currentrev:
462 rev = -heappop(pendingheap)
533 rev = -heappop(pendingheap)
463 pendingset.remove(rev)
534 pendingset.remove(rev)
464
535
465 # Seek for a subgroup blocked, waiting for the current revision.
536 # Seek for a subgroup blocked, waiting for the current revision.
466 matching = [i for i, g in enumerate(groups) if rev in g[1]]
537 matching = [i for i, g in enumerate(groups) if rev in g[1]]
467
538
468 if matching:
539 if matching:
469 # The main idea is to gather together all sets that are blocked
540 # The main idea is to gather together all sets that are blocked
470 # on the same revision.
541 # on the same revision.
471 #
542 #
472 # Groups are merged when a common blocking ancestor is
543 # Groups are merged when a common blocking ancestor is
473 # observed. For example, given two groups:
544 # observed. For example, given two groups:
474 #
545 #
475 # revs [5, 4] waiting for 1
546 # revs [5, 4] waiting for 1
476 # revs [3, 2] waiting for 1
547 # revs [3, 2] waiting for 1
477 #
548 #
478 # These two groups will be merged when we process
549 # These two groups will be merged when we process
479 # 1. In theory, we could have merged the groups when
550 # 1. In theory, we could have merged the groups when
480 # we added 2 to the group it is now in (we could have
551 # we added 2 to the group it is now in (we could have
481 # noticed the groups were both blocked on 1 then), but
552 # noticed the groups were both blocked on 1 then), but
482 # the way it works now makes the algorithm simpler.
553 # the way it works now makes the algorithm simpler.
483 #
554 #
484 # We also always keep the oldest subgroup first. We can
555 # We also always keep the oldest subgroup first. We can
485 # probably improve the behavior by having the longest set
556 # probably improve the behavior by having the longest set
486 # first. That way, graph algorithms could minimise the length
557 # first. That way, graph algorithms could minimise the length
487 # of parallel lines their drawing. This is currently not done.
558 # of parallel lines their drawing. This is currently not done.
488 targetidx = matching.pop(0)
559 targetidx = matching.pop(0)
489 trevs, tparents = groups[targetidx]
560 trevs, tparents = groups[targetidx]
490 for i in matching:
561 for i in matching:
491 gr = groups[i]
562 gr = groups[i]
492 trevs.extend(gr[0])
563 trevs.extend(gr[0])
493 tparents |= gr[1]
564 tparents |= gr[1]
494 # delete all merged subgroups (except the one we kept)
565 # delete all merged subgroups (except the one we kept)
495 # (starting from the last subgroup for performance and
566 # (starting from the last subgroup for performance and
496 # sanity reasons)
567 # sanity reasons)
497 for i in reversed(matching):
568 for i in reversed(matching):
498 del groups[i]
569 del groups[i]
499 else:
570 else:
500 # This is a new head. We create a new subgroup for it.
571 # This is a new head. We create a new subgroup for it.
501 targetidx = len(groups)
572 targetidx = len(groups)
502 groups.append(([], {rev}))
573 groups.append(([], {rev}))
503
574
504 gr = groups[targetidx]
575 gr = groups[targetidx]
505
576
506 # We now add the current nodes to this subgroups. This is done
577 # We now add the current nodes to this subgroups. This is done
507 # after the subgroup merging because all elements from a subgroup
578 # after the subgroup merging because all elements from a subgroup
508 # that relied on this rev must precede it.
579 # that relied on this rev must precede it.
509 #
580 #
510 # we also update the <parents> set to include the parents of the
581 # we also update the <parents> set to include the parents of the
511 # new nodes.
582 # new nodes.
512 if rev == currentrev: # only display stuff in rev
583 if rev == currentrev: # only display stuff in rev
513 gr[0].append(rev)
584 gr[0].append(rev)
514 gr[1].remove(rev)
585 gr[1].remove(rev)
515 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
586 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
516 gr[1].update(parents)
587 gr[1].update(parents)
517 for p in parents:
588 for p in parents:
518 if p not in pendingset:
589 if p not in pendingset:
519 pendingset.add(p)
590 pendingset.add(p)
520 heappush(pendingheap, -p)
591 heappush(pendingheap, -p)
521
592
522 # Look for a subgroup to display
593 # Look for a subgroup to display
523 #
594 #
524 # When unblocked is empty (if clause), we were not waiting for any
595 # When unblocked is empty (if clause), we were not waiting for any
525 # revisions during the first iteration (if no priority was given) or
596 # revisions during the first iteration (if no priority was given) or
526 # if we emitted a whole disconnected set of the graph (reached a
597 # if we emitted a whole disconnected set of the graph (reached a
527 # root). In that case we arbitrarily take the oldest known
598 # root). In that case we arbitrarily take the oldest known
528 # subgroup. The heuristic could probably be better.
599 # subgroup. The heuristic could probably be better.
529 #
600 #
530 # Otherwise (elif clause) if the subgroup is blocked on
601 # Otherwise (elif clause) if the subgroup is blocked on
531 # a revision we just emitted, we can safely emit it as
602 # a revision we just emitted, we can safely emit it as
532 # well.
603 # well.
533 if not unblocked:
604 if not unblocked:
534 if len(groups) > 1: # display other subset
605 if len(groups) > 1: # display other subset
535 targetidx = 1
606 targetidx = 1
536 gr = groups[1]
607 gr = groups[1]
537 elif not gr[1] & unblocked:
608 elif not gr[1] & unblocked:
538 gr = None
609 gr = None
539
610
540 if gr is not None:
611 if gr is not None:
541 # update the set of awaited revisions with the one from the
612 # update the set of awaited revisions with the one from the
542 # subgroup
613 # subgroup
543 unblocked |= gr[1]
614 unblocked |= gr[1]
544 # output all revisions in the subgroup
615 # output all revisions in the subgroup
545 for r in gr[0]:
616 for r in gr[0]:
546 yield r
617 yield r
547 # delete the subgroup that you just output
618 # delete the subgroup that you just output
548 # unless it is groups[0] in which case you just empty it.
619 # unless it is groups[0] in which case you just empty it.
549 if targetidx:
620 if targetidx:
550 del groups[targetidx]
621 del groups[targetidx]
551 else:
622 else:
552 gr[0][:] = []
623 gr[0][:] = []
553 # Check if we have some subgroup waiting for revisions we are not going to
624 # Check if we have some subgroup waiting for revisions we are not going to
554 # iterate over
625 # iterate over
555 for g in groups:
626 for g in groups:
556 for r in g[0]:
627 for r in g[0]:
557 yield r
628 yield r
@@ -1,104 +1,104 b''
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2 from __future__ import print_function
2 from __future__ import print_function
3
3
4 import unittest
4 import unittest
5
5
6 from mercurial import (
6 from mercurial import (
7 mdiff,
7 mdiff,
8 )
8 )
9 from mercurial.context import (
9 from mercurial.dagop import (
10 annotateline,
10 annotateline,
11 _annotatepair,
11 _annotatepair,
12 )
12 )
13
13
14 class AnnotateTests(unittest.TestCase):
14 class AnnotateTests(unittest.TestCase):
15 """Unit tests for annotate code."""
15 """Unit tests for annotate code."""
16
16
17 def testannotatepair(self):
17 def testannotatepair(self):
18 self.maxDiff = None # camelcase-required
18 self.maxDiff = None # camelcase-required
19
19
20 oldfctx = b'old'
20 oldfctx = b'old'
21 p1fctx, p2fctx, childfctx = b'p1', b'p2', b'c'
21 p1fctx, p2fctx, childfctx = b'p1', b'p2', b'c'
22 olddata = b'a\nb\n'
22 olddata = b'a\nb\n'
23 p1data = b'a\nb\nc\n'
23 p1data = b'a\nb\nc\n'
24 p2data = b'a\nc\nd\n'
24 p2data = b'a\nc\nd\n'
25 childdata = b'a\nb2\nc\nc2\nd\n'
25 childdata = b'a\nb2\nc\nc2\nd\n'
26 diffopts = mdiff.diffopts()
26 diffopts = mdiff.diffopts()
27
27
28 def decorate(text, rev):
28 def decorate(text, rev):
29 return ([annotateline(fctx=rev, lineno=i)
29 return ([annotateline(fctx=rev, lineno=i)
30 for i in range(1, text.count(b'\n') + 1)],
30 for i in range(1, text.count(b'\n') + 1)],
31 text)
31 text)
32
32
33 # Basic usage
33 # Basic usage
34
34
35 oldann = decorate(olddata, oldfctx)
35 oldann = decorate(olddata, oldfctx)
36 p1ann = decorate(p1data, p1fctx)
36 p1ann = decorate(p1data, p1fctx)
37 p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
37 p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
38 self.assertEqual(p1ann[0], [
38 self.assertEqual(p1ann[0], [
39 annotateline(b'old', 1),
39 annotateline(b'old', 1),
40 annotateline(b'old', 2),
40 annotateline(b'old', 2),
41 annotateline(b'p1', 3),
41 annotateline(b'p1', 3),
42 ])
42 ])
43
43
44 p2ann = decorate(p2data, p2fctx)
44 p2ann = decorate(p2data, p2fctx)
45 p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
45 p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
46 self.assertEqual(p2ann[0], [
46 self.assertEqual(p2ann[0], [
47 annotateline(b'old', 1),
47 annotateline(b'old', 1),
48 annotateline(b'p2', 2),
48 annotateline(b'p2', 2),
49 annotateline(b'p2', 3),
49 annotateline(b'p2', 3),
50 ])
50 ])
51
51
52 # Test with multiple parents (note the difference caused by ordering)
52 # Test with multiple parents (note the difference caused by ordering)
53
53
54 childann = decorate(childdata, childfctx)
54 childann = decorate(childdata, childfctx)
55 childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
55 childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
56 diffopts)
56 diffopts)
57 self.assertEqual(childann[0], [
57 self.assertEqual(childann[0], [
58 annotateline(b'old', 1),
58 annotateline(b'old', 1),
59 annotateline(b'c', 2),
59 annotateline(b'c', 2),
60 annotateline(b'p2', 2),
60 annotateline(b'p2', 2),
61 annotateline(b'c', 4),
61 annotateline(b'c', 4),
62 annotateline(b'p2', 3),
62 annotateline(b'p2', 3),
63 ])
63 ])
64
64
65 childann = decorate(childdata, childfctx)
65 childann = decorate(childdata, childfctx)
66 childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
66 childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
67 diffopts)
67 diffopts)
68 self.assertEqual(childann[0], [
68 self.assertEqual(childann[0], [
69 annotateline(b'old', 1),
69 annotateline(b'old', 1),
70 annotateline(b'c', 2),
70 annotateline(b'c', 2),
71 annotateline(b'p1', 3),
71 annotateline(b'p1', 3),
72 annotateline(b'c', 4),
72 annotateline(b'c', 4),
73 annotateline(b'p2', 3),
73 annotateline(b'p2', 3),
74 ])
74 ])
75
75
76 # Test with skipchild (note the difference caused by ordering)
76 # Test with skipchild (note the difference caused by ordering)
77
77
78 childann = decorate(childdata, childfctx)
78 childann = decorate(childdata, childfctx)
79 childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
79 childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
80 diffopts)
80 diffopts)
81 self.assertEqual(childann[0], [
81 self.assertEqual(childann[0], [
82 annotateline(b'old', 1),
82 annotateline(b'old', 1),
83 annotateline(b'old', 2, True),
83 annotateline(b'old', 2, True),
84 # note that this line was carried over from earlier so it is *not*
84 # note that this line was carried over from earlier so it is *not*
85 # marked skipped
85 # marked skipped
86 annotateline(b'p2', 2),
86 annotateline(b'p2', 2),
87 annotateline(b'p2', 2, True),
87 annotateline(b'p2', 2, True),
88 annotateline(b'p2', 3),
88 annotateline(b'p2', 3),
89 ])
89 ])
90
90
91 childann = decorate(childdata, childfctx)
91 childann = decorate(childdata, childfctx)
92 childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
92 childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
93 diffopts)
93 diffopts)
94 self.assertEqual(childann[0], [
94 self.assertEqual(childann[0], [
95 annotateline(b'old', 1),
95 annotateline(b'old', 1),
96 annotateline(b'old', 2, True),
96 annotateline(b'old', 2, True),
97 annotateline(b'p1', 3),
97 annotateline(b'p1', 3),
98 annotateline(b'p1', 3, True),
98 annotateline(b'p1', 3, True),
99 annotateline(b'p2', 3),
99 annotateline(b'p2', 3),
100 ])
100 ])
101
101
102 if __name__ == '__main__':
102 if __name__ == '__main__':
103 import silenttestrunner
103 import silenttestrunner
104 silenttestrunner.main(__name__)
104 silenttestrunner.main(__name__)
General Comments 0
You need to be logged in to leave comments. Login now