##// END OF EJS Templates
py3: use raw strings while accessing class.__dict__...
Pulkit Goyal -
r32148:2cfdf524 default
parent child Browse files
Show More
@@ -1,2174 +1,2174 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 )
26 )
27 from . import (
27 from . import (
28 encoding,
28 encoding,
29 error,
29 error,
30 fileset,
30 fileset,
31 match as matchmod,
31 match as matchmod,
32 mdiff,
32 mdiff,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 phases,
35 phases,
36 pycompat,
36 pycompat,
37 repoview,
37 repoview,
38 revlog,
38 revlog,
39 scmutil,
39 scmutil,
40 subrepo,
40 subrepo,
41 util,
41 util,
42 )
42 )
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45
45
46 nonascii = re.compile(r'[^\x21-\x7f]').search
46 nonascii = re.compile(r'[^\x21-\x7f]').search
47
47
48 class basectx(object):
48 class basectx(object):
49 """A basectx object represents the common logic for its children:
49 """A basectx object represents the common logic for its children:
50 changectx: read-only context that is already present in the repo,
50 changectx: read-only context that is already present in the repo,
51 workingctx: a context that represents the working directory and can
51 workingctx: a context that represents the working directory and can
52 be committed,
52 be committed,
53 memctx: a context that represents changes in-memory and can also
53 memctx: a context that represents changes in-memory and can also
54 be committed."""
54 be committed."""
55 def __new__(cls, repo, changeid='', *args, **kwargs):
55 def __new__(cls, repo, changeid='', *args, **kwargs):
56 if isinstance(changeid, basectx):
56 if isinstance(changeid, basectx):
57 return changeid
57 return changeid
58
58
59 o = super(basectx, cls).__new__(cls)
59 o = super(basectx, cls).__new__(cls)
60
60
61 o._repo = repo
61 o._repo = repo
62 o._rev = nullrev
62 o._rev = nullrev
63 o._node = nullid
63 o._node = nullid
64
64
65 return o
65 return o
66
66
67 def __str__(self):
67 def __str__(self):
68 r = short(self.node())
68 r = short(self.node())
69 if pycompat.ispy3:
69 if pycompat.ispy3:
70 return r.decode('ascii')
70 return r.decode('ascii')
71 return r
71 return r
72
72
73 def __bytes__(self):
73 def __bytes__(self):
74 return short(self.node())
74 return short(self.node())
75
75
76 def __int__(self):
76 def __int__(self):
77 return self.rev()
77 return self.rev()
78
78
79 def __repr__(self):
79 def __repr__(self):
80 return "<%s %s>" % (type(self).__name__, str(self))
80 return "<%s %s>" % (type(self).__name__, str(self))
81
81
82 def __eq__(self, other):
82 def __eq__(self, other):
83 try:
83 try:
84 return type(self) == type(other) and self._rev == other._rev
84 return type(self) == type(other) and self._rev == other._rev
85 except AttributeError:
85 except AttributeError:
86 return False
86 return False
87
87
88 def __ne__(self, other):
88 def __ne__(self, other):
89 return not (self == other)
89 return not (self == other)
90
90
91 def __contains__(self, key):
91 def __contains__(self, key):
92 return key in self._manifest
92 return key in self._manifest
93
93
94 def __getitem__(self, key):
94 def __getitem__(self, key):
95 return self.filectx(key)
95 return self.filectx(key)
96
96
97 def __iter__(self):
97 def __iter__(self):
98 return iter(self._manifest)
98 return iter(self._manifest)
99
99
100 def _buildstatusmanifest(self, status):
100 def _buildstatusmanifest(self, status):
101 """Builds a manifest that includes the given status results, if this is
101 """Builds a manifest that includes the given status results, if this is
102 a working copy context. For non-working copy contexts, it just returns
102 a working copy context. For non-working copy contexts, it just returns
103 the normal manifest."""
103 the normal manifest."""
104 return self.manifest()
104 return self.manifest()
105
105
106 def _matchstatus(self, other, match):
106 def _matchstatus(self, other, match):
107 """return match.always if match is none
107 """return match.always if match is none
108
108
109 This internal method provides a way for child objects to override the
109 This internal method provides a way for child objects to override the
110 match operator.
110 match operator.
111 """
111 """
112 return match or matchmod.always(self._repo.root, self._repo.getcwd())
112 return match or matchmod.always(self._repo.root, self._repo.getcwd())
113
113
114 def _buildstatus(self, other, s, match, listignored, listclean,
114 def _buildstatus(self, other, s, match, listignored, listclean,
115 listunknown):
115 listunknown):
116 """build a status with respect to another context"""
116 """build a status with respect to another context"""
117 # Load earliest manifest first for caching reasons. More specifically,
117 # Load earliest manifest first for caching reasons. More specifically,
118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 # 1000 and cache it so that when you read 1001, we just need to apply a
120 # 1000 and cache it so that when you read 1001, we just need to apply a
121 # delta to what's in the cache. So that's one full reconstruction + one
121 # delta to what's in the cache. So that's one full reconstruction + one
122 # delta application.
122 # delta application.
123 mf2 = None
123 mf2 = None
124 if self.rev() is not None and self.rev() < other.rev():
124 if self.rev() is not None and self.rev() < other.rev():
125 mf2 = self._buildstatusmanifest(s)
125 mf2 = self._buildstatusmanifest(s)
126 mf1 = other._buildstatusmanifest(s)
126 mf1 = other._buildstatusmanifest(s)
127 if mf2 is None:
127 if mf2 is None:
128 mf2 = self._buildstatusmanifest(s)
128 mf2 = self._buildstatusmanifest(s)
129
129
130 modified, added = [], []
130 modified, added = [], []
131 removed = []
131 removed = []
132 clean = []
132 clean = []
133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 deletedset = set(deleted)
134 deletedset = set(deleted)
135 d = mf1.diff(mf2, match=match, clean=listclean)
135 d = mf1.diff(mf2, match=match, clean=listclean)
136 for fn, value in d.iteritems():
136 for fn, value in d.iteritems():
137 if fn in deletedset:
137 if fn in deletedset:
138 continue
138 continue
139 if value is None:
139 if value is None:
140 clean.append(fn)
140 clean.append(fn)
141 continue
141 continue
142 (node1, flag1), (node2, flag2) = value
142 (node1, flag1), (node2, flag2) = value
143 if node1 is None:
143 if node1 is None:
144 added.append(fn)
144 added.append(fn)
145 elif node2 is None:
145 elif node2 is None:
146 removed.append(fn)
146 removed.append(fn)
147 elif flag1 != flag2:
147 elif flag1 != flag2:
148 modified.append(fn)
148 modified.append(fn)
149 elif node2 not in wdirnodes:
149 elif node2 not in wdirnodes:
150 # When comparing files between two commits, we save time by
150 # When comparing files between two commits, we save time by
151 # not comparing the file contents when the nodeids differ.
151 # not comparing the file contents when the nodeids differ.
152 # Note that this means we incorrectly report a reverted change
152 # Note that this means we incorrectly report a reverted change
153 # to a file as a modification.
153 # to a file as a modification.
154 modified.append(fn)
154 modified.append(fn)
155 elif self[fn].cmp(other[fn]):
155 elif self[fn].cmp(other[fn]):
156 modified.append(fn)
156 modified.append(fn)
157 else:
157 else:
158 clean.append(fn)
158 clean.append(fn)
159
159
160 if removed:
160 if removed:
161 # need to filter files if they are already reported as removed
161 # need to filter files if they are already reported as removed
162 unknown = [fn for fn in unknown if fn not in mf1 and
162 unknown = [fn for fn in unknown if fn not in mf1 and
163 (not match or match(fn))]
163 (not match or match(fn))]
164 ignored = [fn for fn in ignored if fn not in mf1 and
164 ignored = [fn for fn in ignored if fn not in mf1 and
165 (not match or match(fn))]
165 (not match or match(fn))]
166 # if they're deleted, don't report them as removed
166 # if they're deleted, don't report them as removed
167 removed = [fn for fn in removed if fn not in deletedset]
167 removed = [fn for fn in removed if fn not in deletedset]
168
168
169 return scmutil.status(modified, added, removed, deleted, unknown,
169 return scmutil.status(modified, added, removed, deleted, unknown,
170 ignored, clean)
170 ignored, clean)
171
171
172 @propertycache
172 @propertycache
173 def substate(self):
173 def substate(self):
174 return subrepo.state(self, self._repo.ui)
174 return subrepo.state(self, self._repo.ui)
175
175
176 def subrev(self, subpath):
176 def subrev(self, subpath):
177 return self.substate[subpath][1]
177 return self.substate[subpath][1]
178
178
179 def rev(self):
179 def rev(self):
180 return self._rev
180 return self._rev
181 def node(self):
181 def node(self):
182 return self._node
182 return self._node
183 def hex(self):
183 def hex(self):
184 return hex(self.node())
184 return hex(self.node())
185 def manifest(self):
185 def manifest(self):
186 return self._manifest
186 return self._manifest
187 def manifestctx(self):
187 def manifestctx(self):
188 return self._manifestctx
188 return self._manifestctx
189 def repo(self):
189 def repo(self):
190 return self._repo
190 return self._repo
191 def phasestr(self):
191 def phasestr(self):
192 return phases.phasenames[self.phase()]
192 return phases.phasenames[self.phase()]
193 def mutable(self):
193 def mutable(self):
194 return self.phase() > phases.public
194 return self.phase() > phases.public
195
195
196 def getfileset(self, expr):
196 def getfileset(self, expr):
197 return fileset.getfileset(self, expr)
197 return fileset.getfileset(self, expr)
198
198
199 def obsolete(self):
199 def obsolete(self):
200 """True if the changeset is obsolete"""
200 """True if the changeset is obsolete"""
201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202
202
203 def extinct(self):
203 def extinct(self):
204 """True if the changeset is extinct"""
204 """True if the changeset is extinct"""
205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206
206
207 def unstable(self):
207 def unstable(self):
208 """True if the changeset is not obsolete but it's ancestor are"""
208 """True if the changeset is not obsolete but it's ancestor are"""
209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210
210
211 def bumped(self):
211 def bumped(self):
212 """True if the changeset try to be a successor of a public changeset
212 """True if the changeset try to be a successor of a public changeset
213
213
214 Only non-public and non-obsolete changesets may be bumped.
214 Only non-public and non-obsolete changesets may be bumped.
215 """
215 """
216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217
217
218 def divergent(self):
218 def divergent(self):
219 """Is a successors of a changeset with multiple possible successors set
219 """Is a successors of a changeset with multiple possible successors set
220
220
221 Only non-public and non-obsolete changesets may be divergent.
221 Only non-public and non-obsolete changesets may be divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224
224
225 def troubled(self):
225 def troubled(self):
226 """True if the changeset is either unstable, bumped or divergent"""
226 """True if the changeset is either unstable, bumped or divergent"""
227 return self.unstable() or self.bumped() or self.divergent()
227 return self.unstable() or self.bumped() or self.divergent()
228
228
229 def troubles(self):
229 def troubles(self):
230 """return the list of troubles affecting this changesets.
230 """return the list of troubles affecting this changesets.
231
231
232 Troubles are returned as strings. possible values are:
232 Troubles are returned as strings. possible values are:
233 - unstable,
233 - unstable,
234 - bumped,
234 - bumped,
235 - divergent.
235 - divergent.
236 """
236 """
237 troubles = []
237 troubles = []
238 if self.unstable():
238 if self.unstable():
239 troubles.append('unstable')
239 troubles.append('unstable')
240 if self.bumped():
240 if self.bumped():
241 troubles.append('bumped')
241 troubles.append('bumped')
242 if self.divergent():
242 if self.divergent():
243 troubles.append('divergent')
243 troubles.append('divergent')
244 return troubles
244 return troubles
245
245
246 def parents(self):
246 def parents(self):
247 """return contexts for each parent changeset"""
247 """return contexts for each parent changeset"""
248 return self._parents
248 return self._parents
249
249
250 def p1(self):
250 def p1(self):
251 return self._parents[0]
251 return self._parents[0]
252
252
253 def p2(self):
253 def p2(self):
254 parents = self._parents
254 parents = self._parents
255 if len(parents) == 2:
255 if len(parents) == 2:
256 return parents[1]
256 return parents[1]
257 return changectx(self._repo, nullrev)
257 return changectx(self._repo, nullrev)
258
258
259 def _fileinfo(self, path):
259 def _fileinfo(self, path):
260 if '_manifest' in self.__dict__:
260 if r'_manifest' in self.__dict__:
261 try:
261 try:
262 return self._manifest[path], self._manifest.flags(path)
262 return self._manifest[path], self._manifest.flags(path)
263 except KeyError:
263 except KeyError:
264 raise error.ManifestLookupError(self._node, path,
264 raise error.ManifestLookupError(self._node, path,
265 _('not found in manifest'))
265 _('not found in manifest'))
266 if '_manifestdelta' in self.__dict__ or path in self.files():
266 if r'_manifestdelta' in self.__dict__ or path in self.files():
267 if path in self._manifestdelta:
267 if path in self._manifestdelta:
268 return (self._manifestdelta[path],
268 return (self._manifestdelta[path],
269 self._manifestdelta.flags(path))
269 self._manifestdelta.flags(path))
270 mfl = self._repo.manifestlog
270 mfl = self._repo.manifestlog
271 try:
271 try:
272 node, flag = mfl[self._changeset.manifest].find(path)
272 node, flag = mfl[self._changeset.manifest].find(path)
273 except KeyError:
273 except KeyError:
274 raise error.ManifestLookupError(self._node, path,
274 raise error.ManifestLookupError(self._node, path,
275 _('not found in manifest'))
275 _('not found in manifest'))
276
276
277 return node, flag
277 return node, flag
278
278
279 def filenode(self, path):
279 def filenode(self, path):
280 return self._fileinfo(path)[0]
280 return self._fileinfo(path)[0]
281
281
282 def flags(self, path):
282 def flags(self, path):
283 try:
283 try:
284 return self._fileinfo(path)[1]
284 return self._fileinfo(path)[1]
285 except error.LookupError:
285 except error.LookupError:
286 return ''
286 return ''
287
287
288 def sub(self, path, allowcreate=True):
288 def sub(self, path, allowcreate=True):
289 '''return a subrepo for the stored revision of path, never wdir()'''
289 '''return a subrepo for the stored revision of path, never wdir()'''
290 return subrepo.subrepo(self, path, allowcreate=allowcreate)
290 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291
291
292 def nullsub(self, path, pctx):
292 def nullsub(self, path, pctx):
293 return subrepo.nullsubrepo(self, path, pctx)
293 return subrepo.nullsubrepo(self, path, pctx)
294
294
295 def workingsub(self, path):
295 def workingsub(self, path):
296 '''return a subrepo for the stored revision, or wdir if this is a wdir
296 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 context.
297 context.
298 '''
298 '''
299 return subrepo.subrepo(self, path, allowwdir=True)
299 return subrepo.subrepo(self, path, allowwdir=True)
300
300
301 def match(self, pats=None, include=None, exclude=None, default='glob',
301 def match(self, pats=None, include=None, exclude=None, default='glob',
302 listsubrepos=False, badfn=None):
302 listsubrepos=False, badfn=None):
303 if pats is None:
303 if pats is None:
304 pats = []
304 pats = []
305 r = self._repo
305 r = self._repo
306 return matchmod.match(r.root, r.getcwd(), pats,
306 return matchmod.match(r.root, r.getcwd(), pats,
307 include, exclude, default,
307 include, exclude, default,
308 auditor=r.nofsauditor, ctx=self,
308 auditor=r.nofsauditor, ctx=self,
309 listsubrepos=listsubrepos, badfn=badfn)
309 listsubrepos=listsubrepos, badfn=badfn)
310
310
311 def diff(self, ctx2=None, match=None, **opts):
311 def diff(self, ctx2=None, match=None, **opts):
312 """Returns a diff generator for the given contexts and matcher"""
312 """Returns a diff generator for the given contexts and matcher"""
313 if ctx2 is None:
313 if ctx2 is None:
314 ctx2 = self.p1()
314 ctx2 = self.p1()
315 if ctx2 is not None:
315 if ctx2 is not None:
316 ctx2 = self._repo[ctx2]
316 ctx2 = self._repo[ctx2]
317 diffopts = patch.diffopts(self._repo.ui, opts)
317 diffopts = patch.diffopts(self._repo.ui, opts)
318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
319
319
320 def dirs(self):
320 def dirs(self):
321 return self._manifest.dirs()
321 return self._manifest.dirs()
322
322
323 def hasdir(self, dir):
323 def hasdir(self, dir):
324 return self._manifest.hasdir(dir)
324 return self._manifest.hasdir(dir)
325
325
326 def dirty(self, missing=False, merge=True, branch=True):
326 def dirty(self, missing=False, merge=True, branch=True):
327 return False
327 return False
328
328
329 def status(self, other=None, match=None, listignored=False,
329 def status(self, other=None, match=None, listignored=False,
330 listclean=False, listunknown=False, listsubrepos=False):
330 listclean=False, listunknown=False, listsubrepos=False):
331 """return status of files between two nodes or node and working
331 """return status of files between two nodes or node and working
332 directory.
332 directory.
333
333
334 If other is None, compare this node with working directory.
334 If other is None, compare this node with working directory.
335
335
336 returns (modified, added, removed, deleted, unknown, ignored, clean)
336 returns (modified, added, removed, deleted, unknown, ignored, clean)
337 """
337 """
338
338
339 ctx1 = self
339 ctx1 = self
340 ctx2 = self._repo[other]
340 ctx2 = self._repo[other]
341
341
342 # This next code block is, admittedly, fragile logic that tests for
342 # This next code block is, admittedly, fragile logic that tests for
343 # reversing the contexts and wouldn't need to exist if it weren't for
343 # reversing the contexts and wouldn't need to exist if it weren't for
344 # the fast (and common) code path of comparing the working directory
344 # the fast (and common) code path of comparing the working directory
345 # with its first parent.
345 # with its first parent.
346 #
346 #
347 # What we're aiming for here is the ability to call:
347 # What we're aiming for here is the ability to call:
348 #
348 #
349 # workingctx.status(parentctx)
349 # workingctx.status(parentctx)
350 #
350 #
351 # If we always built the manifest for each context and compared those,
351 # If we always built the manifest for each context and compared those,
352 # then we'd be done. But the special case of the above call means we
352 # then we'd be done. But the special case of the above call means we
353 # just copy the manifest of the parent.
353 # just copy the manifest of the parent.
354 reversed = False
354 reversed = False
355 if (not isinstance(ctx1, changectx)
355 if (not isinstance(ctx1, changectx)
356 and isinstance(ctx2, changectx)):
356 and isinstance(ctx2, changectx)):
357 reversed = True
357 reversed = True
358 ctx1, ctx2 = ctx2, ctx1
358 ctx1, ctx2 = ctx2, ctx1
359
359
360 match = ctx2._matchstatus(ctx1, match)
360 match = ctx2._matchstatus(ctx1, match)
361 r = scmutil.status([], [], [], [], [], [], [])
361 r = scmutil.status([], [], [], [], [], [], [])
362 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
362 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
363 listunknown)
363 listunknown)
364
364
365 if reversed:
365 if reversed:
366 # Reverse added and removed. Clear deleted, unknown and ignored as
366 # Reverse added and removed. Clear deleted, unknown and ignored as
367 # these make no sense to reverse.
367 # these make no sense to reverse.
368 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
368 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
369 r.clean)
369 r.clean)
370
370
371 if listsubrepos:
371 if listsubrepos:
372 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
372 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
373 try:
373 try:
374 rev2 = ctx2.subrev(subpath)
374 rev2 = ctx2.subrev(subpath)
375 except KeyError:
375 except KeyError:
376 # A subrepo that existed in node1 was deleted between
376 # A subrepo that existed in node1 was deleted between
377 # node1 and node2 (inclusive). Thus, ctx2's substate
377 # node1 and node2 (inclusive). Thus, ctx2's substate
378 # won't contain that subpath. The best we can do ignore it.
378 # won't contain that subpath. The best we can do ignore it.
379 rev2 = None
379 rev2 = None
380 submatch = matchmod.subdirmatcher(subpath, match)
380 submatch = matchmod.subdirmatcher(subpath, match)
381 s = sub.status(rev2, match=submatch, ignored=listignored,
381 s = sub.status(rev2, match=submatch, ignored=listignored,
382 clean=listclean, unknown=listunknown,
382 clean=listclean, unknown=listunknown,
383 listsubrepos=True)
383 listsubrepos=True)
384 for rfiles, sfiles in zip(r, s):
384 for rfiles, sfiles in zip(r, s):
385 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
385 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
386
386
387 for l in r:
387 for l in r:
388 l.sort()
388 l.sort()
389
389
390 return r
390 return r
391
391
392
392
393 def makememctx(repo, parents, text, user, date, branch, files, store,
393 def makememctx(repo, parents, text, user, date, branch, files, store,
394 editor=None, extra=None):
394 editor=None, extra=None):
395 def getfilectx(repo, memctx, path):
395 def getfilectx(repo, memctx, path):
396 data, mode, copied = store.getfile(path)
396 data, mode, copied = store.getfile(path)
397 if data is None:
397 if data is None:
398 return None
398 return None
399 islink, isexec = mode
399 islink, isexec = mode
400 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
400 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
401 copied=copied, memctx=memctx)
401 copied=copied, memctx=memctx)
402 if extra is None:
402 if extra is None:
403 extra = {}
403 extra = {}
404 if branch:
404 if branch:
405 extra['branch'] = encoding.fromlocal(branch)
405 extra['branch'] = encoding.fromlocal(branch)
406 ctx = memctx(repo, parents, text, files, getfilectx, user,
406 ctx = memctx(repo, parents, text, files, getfilectx, user,
407 date, extra, editor)
407 date, extra, editor)
408 return ctx
408 return ctx
409
409
410 def _filterederror(repo, changeid):
410 def _filterederror(repo, changeid):
411 """build an exception to be raised about a filtered changeid
411 """build an exception to be raised about a filtered changeid
412
412
413 This is extracted in a function to help extensions (eg: evolve) to
413 This is extracted in a function to help extensions (eg: evolve) to
414 experiment with various message variants."""
414 experiment with various message variants."""
415 if repo.filtername.startswith('visible'):
415 if repo.filtername.startswith('visible'):
416 msg = _("hidden revision '%s'") % changeid
416 msg = _("hidden revision '%s'") % changeid
417 hint = _('use --hidden to access hidden revisions')
417 hint = _('use --hidden to access hidden revisions')
418 return error.FilteredRepoLookupError(msg, hint=hint)
418 return error.FilteredRepoLookupError(msg, hint=hint)
419 msg = _("filtered revision '%s' (not in '%s' subset)")
419 msg = _("filtered revision '%s' (not in '%s' subset)")
420 msg %= (changeid, repo.filtername)
420 msg %= (changeid, repo.filtername)
421 return error.FilteredRepoLookupError(msg)
421 return error.FilteredRepoLookupError(msg)
422
422
423 class changectx(basectx):
423 class changectx(basectx):
424 """A changecontext object makes access to data related to a particular
424 """A changecontext object makes access to data related to a particular
425 changeset convenient. It represents a read-only context already present in
425 changeset convenient. It represents a read-only context already present in
426 the repo."""
426 the repo."""
427 def __init__(self, repo, changeid=''):
427 def __init__(self, repo, changeid=''):
428 """changeid is a revision number, node, or tag"""
428 """changeid is a revision number, node, or tag"""
429
429
430 # since basectx.__new__ already took care of copying the object, we
430 # since basectx.__new__ already took care of copying the object, we
431 # don't need to do anything in __init__, so we just exit here
431 # don't need to do anything in __init__, so we just exit here
432 if isinstance(changeid, basectx):
432 if isinstance(changeid, basectx):
433 return
433 return
434
434
435 if changeid == '':
435 if changeid == '':
436 changeid = '.'
436 changeid = '.'
437 self._repo = repo
437 self._repo = repo
438
438
439 try:
439 try:
440 if isinstance(changeid, int):
440 if isinstance(changeid, int):
441 self._node = repo.changelog.node(changeid)
441 self._node = repo.changelog.node(changeid)
442 self._rev = changeid
442 self._rev = changeid
443 return
443 return
444 if not pycompat.ispy3 and isinstance(changeid, long):
444 if not pycompat.ispy3 and isinstance(changeid, long):
445 changeid = str(changeid)
445 changeid = str(changeid)
446 if changeid == 'null':
446 if changeid == 'null':
447 self._node = nullid
447 self._node = nullid
448 self._rev = nullrev
448 self._rev = nullrev
449 return
449 return
450 if changeid == 'tip':
450 if changeid == 'tip':
451 self._node = repo.changelog.tip()
451 self._node = repo.changelog.tip()
452 self._rev = repo.changelog.rev(self._node)
452 self._rev = repo.changelog.rev(self._node)
453 return
453 return
454 if changeid == '.' or changeid == repo.dirstate.p1():
454 if changeid == '.' or changeid == repo.dirstate.p1():
455 # this is a hack to delay/avoid loading obsmarkers
455 # this is a hack to delay/avoid loading obsmarkers
456 # when we know that '.' won't be hidden
456 # when we know that '.' won't be hidden
457 self._node = repo.dirstate.p1()
457 self._node = repo.dirstate.p1()
458 self._rev = repo.unfiltered().changelog.rev(self._node)
458 self._rev = repo.unfiltered().changelog.rev(self._node)
459 return
459 return
460 if len(changeid) == 20:
460 if len(changeid) == 20:
461 try:
461 try:
462 self._node = changeid
462 self._node = changeid
463 self._rev = repo.changelog.rev(changeid)
463 self._rev = repo.changelog.rev(changeid)
464 return
464 return
465 except error.FilteredRepoLookupError:
465 except error.FilteredRepoLookupError:
466 raise
466 raise
467 except LookupError:
467 except LookupError:
468 pass
468 pass
469
469
470 try:
470 try:
471 r = int(changeid)
471 r = int(changeid)
472 if '%d' % r != changeid:
472 if '%d' % r != changeid:
473 raise ValueError
473 raise ValueError
474 l = len(repo.changelog)
474 l = len(repo.changelog)
475 if r < 0:
475 if r < 0:
476 r += l
476 r += l
477 if r < 0 or r >= l:
477 if r < 0 or r >= l:
478 raise ValueError
478 raise ValueError
479 self._rev = r
479 self._rev = r
480 self._node = repo.changelog.node(r)
480 self._node = repo.changelog.node(r)
481 return
481 return
482 except error.FilteredIndexError:
482 except error.FilteredIndexError:
483 raise
483 raise
484 except (ValueError, OverflowError, IndexError):
484 except (ValueError, OverflowError, IndexError):
485 pass
485 pass
486
486
487 if len(changeid) == 40:
487 if len(changeid) == 40:
488 try:
488 try:
489 self._node = bin(changeid)
489 self._node = bin(changeid)
490 self._rev = repo.changelog.rev(self._node)
490 self._rev = repo.changelog.rev(self._node)
491 return
491 return
492 except error.FilteredLookupError:
492 except error.FilteredLookupError:
493 raise
493 raise
494 except (TypeError, LookupError):
494 except (TypeError, LookupError):
495 pass
495 pass
496
496
497 # lookup bookmarks through the name interface
497 # lookup bookmarks through the name interface
498 try:
498 try:
499 self._node = repo.names.singlenode(repo, changeid)
499 self._node = repo.names.singlenode(repo, changeid)
500 self._rev = repo.changelog.rev(self._node)
500 self._rev = repo.changelog.rev(self._node)
501 return
501 return
502 except KeyError:
502 except KeyError:
503 pass
503 pass
504 except error.FilteredRepoLookupError:
504 except error.FilteredRepoLookupError:
505 raise
505 raise
506 except error.RepoLookupError:
506 except error.RepoLookupError:
507 pass
507 pass
508
508
509 self._node = repo.unfiltered().changelog._partialmatch(changeid)
509 self._node = repo.unfiltered().changelog._partialmatch(changeid)
510 if self._node is not None:
510 if self._node is not None:
511 self._rev = repo.changelog.rev(self._node)
511 self._rev = repo.changelog.rev(self._node)
512 return
512 return
513
513
514 # lookup failed
514 # lookup failed
515 # check if it might have come from damaged dirstate
515 # check if it might have come from damaged dirstate
516 #
516 #
517 # XXX we could avoid the unfiltered if we had a recognizable
517 # XXX we could avoid the unfiltered if we had a recognizable
518 # exception for filtered changeset access
518 # exception for filtered changeset access
519 if changeid in repo.unfiltered().dirstate.parents():
519 if changeid in repo.unfiltered().dirstate.parents():
520 msg = _("working directory has unknown parent '%s'!")
520 msg = _("working directory has unknown parent '%s'!")
521 raise error.Abort(msg % short(changeid))
521 raise error.Abort(msg % short(changeid))
522 try:
522 try:
523 if len(changeid) == 20 and nonascii(changeid):
523 if len(changeid) == 20 and nonascii(changeid):
524 changeid = hex(changeid)
524 changeid = hex(changeid)
525 except TypeError:
525 except TypeError:
526 pass
526 pass
527 except (error.FilteredIndexError, error.FilteredLookupError,
527 except (error.FilteredIndexError, error.FilteredLookupError,
528 error.FilteredRepoLookupError):
528 error.FilteredRepoLookupError):
529 raise _filterederror(repo, changeid)
529 raise _filterederror(repo, changeid)
530 except IndexError:
530 except IndexError:
531 pass
531 pass
532 raise error.RepoLookupError(
532 raise error.RepoLookupError(
533 _("unknown revision '%s'") % changeid)
533 _("unknown revision '%s'") % changeid)
534
534
535 def __hash__(self):
535 def __hash__(self):
536 try:
536 try:
537 return hash(self._rev)
537 return hash(self._rev)
538 except AttributeError:
538 except AttributeError:
539 return id(self)
539 return id(self)
540
540
541 def __nonzero__(self):
541 def __nonzero__(self):
542 return self._rev != nullrev
542 return self._rev != nullrev
543
543
544 __bool__ = __nonzero__
544 __bool__ = __nonzero__
545
545
546 @propertycache
546 @propertycache
547 def _changeset(self):
547 def _changeset(self):
548 return self._repo.changelog.changelogrevision(self.rev())
548 return self._repo.changelog.changelogrevision(self.rev())
549
549
550 @propertycache
550 @propertycache
551 def _manifest(self):
551 def _manifest(self):
552 return self._manifestctx.read()
552 return self._manifestctx.read()
553
553
554 @propertycache
554 @propertycache
555 def _manifestctx(self):
555 def _manifestctx(self):
556 return self._repo.manifestlog[self._changeset.manifest]
556 return self._repo.manifestlog[self._changeset.manifest]
557
557
558 @propertycache
558 @propertycache
559 def _manifestdelta(self):
559 def _manifestdelta(self):
560 return self._manifestctx.readdelta()
560 return self._manifestctx.readdelta()
561
561
562 @propertycache
562 @propertycache
563 def _parents(self):
563 def _parents(self):
564 repo = self._repo
564 repo = self._repo
565 p1, p2 = repo.changelog.parentrevs(self._rev)
565 p1, p2 = repo.changelog.parentrevs(self._rev)
566 if p2 == nullrev:
566 if p2 == nullrev:
567 return [changectx(repo, p1)]
567 return [changectx(repo, p1)]
568 return [changectx(repo, p1), changectx(repo, p2)]
568 return [changectx(repo, p1), changectx(repo, p2)]
569
569
570 def changeset(self):
570 def changeset(self):
571 c = self._changeset
571 c = self._changeset
572 return (
572 return (
573 c.manifest,
573 c.manifest,
574 c.user,
574 c.user,
575 c.date,
575 c.date,
576 c.files,
576 c.files,
577 c.description,
577 c.description,
578 c.extra,
578 c.extra,
579 )
579 )
580 def manifestnode(self):
580 def manifestnode(self):
581 return self._changeset.manifest
581 return self._changeset.manifest
582
582
583 def user(self):
583 def user(self):
584 return self._changeset.user
584 return self._changeset.user
585 def date(self):
585 def date(self):
586 return self._changeset.date
586 return self._changeset.date
587 def files(self):
587 def files(self):
588 return self._changeset.files
588 return self._changeset.files
589 def description(self):
589 def description(self):
590 return self._changeset.description
590 return self._changeset.description
591 def branch(self):
591 def branch(self):
592 return encoding.tolocal(self._changeset.extra.get("branch"))
592 return encoding.tolocal(self._changeset.extra.get("branch"))
593 def closesbranch(self):
593 def closesbranch(self):
594 return 'close' in self._changeset.extra
594 return 'close' in self._changeset.extra
595 def extra(self):
595 def extra(self):
596 return self._changeset.extra
596 return self._changeset.extra
597 def tags(self):
597 def tags(self):
598 return self._repo.nodetags(self._node)
598 return self._repo.nodetags(self._node)
599 def bookmarks(self):
599 def bookmarks(self):
600 return self._repo.nodebookmarks(self._node)
600 return self._repo.nodebookmarks(self._node)
601 def phase(self):
601 def phase(self):
602 return self._repo._phasecache.phase(self._repo, self._rev)
602 return self._repo._phasecache.phase(self._repo, self._rev)
603 def hidden(self):
603 def hidden(self):
604 return self._rev in repoview.filterrevs(self._repo, 'visible')
604 return self._rev in repoview.filterrevs(self._repo, 'visible')
605
605
606 def children(self):
606 def children(self):
607 """return contexts for each child changeset"""
607 """return contexts for each child changeset"""
608 c = self._repo.changelog.children(self._node)
608 c = self._repo.changelog.children(self._node)
609 return [changectx(self._repo, x) for x in c]
609 return [changectx(self._repo, x) for x in c]
610
610
611 def ancestors(self):
611 def ancestors(self):
612 for a in self._repo.changelog.ancestors([self._rev]):
612 for a in self._repo.changelog.ancestors([self._rev]):
613 yield changectx(self._repo, a)
613 yield changectx(self._repo, a)
614
614
615 def descendants(self):
615 def descendants(self):
616 for d in self._repo.changelog.descendants([self._rev]):
616 for d in self._repo.changelog.descendants([self._rev]):
617 yield changectx(self._repo, d)
617 yield changectx(self._repo, d)
618
618
619 def filectx(self, path, fileid=None, filelog=None):
619 def filectx(self, path, fileid=None, filelog=None):
620 """get a file context from this changeset"""
620 """get a file context from this changeset"""
621 if fileid is None:
621 if fileid is None:
622 fileid = self.filenode(path)
622 fileid = self.filenode(path)
623 return filectx(self._repo, path, fileid=fileid,
623 return filectx(self._repo, path, fileid=fileid,
624 changectx=self, filelog=filelog)
624 changectx=self, filelog=filelog)
625
625
626 def ancestor(self, c2, warn=False):
626 def ancestor(self, c2, warn=False):
627 """return the "best" ancestor context of self and c2
627 """return the "best" ancestor context of self and c2
628
628
629 If there are multiple candidates, it will show a message and check
629 If there are multiple candidates, it will show a message and check
630 merge.preferancestor configuration before falling back to the
630 merge.preferancestor configuration before falling back to the
631 revlog ancestor."""
631 revlog ancestor."""
632 # deal with workingctxs
632 # deal with workingctxs
633 n2 = c2._node
633 n2 = c2._node
634 if n2 is None:
634 if n2 is None:
635 n2 = c2._parents[0]._node
635 n2 = c2._parents[0]._node
636 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
636 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
637 if not cahs:
637 if not cahs:
638 anc = nullid
638 anc = nullid
639 elif len(cahs) == 1:
639 elif len(cahs) == 1:
640 anc = cahs[0]
640 anc = cahs[0]
641 else:
641 else:
642 # experimental config: merge.preferancestor
642 # experimental config: merge.preferancestor
643 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
643 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
644 try:
644 try:
645 ctx = changectx(self._repo, r)
645 ctx = changectx(self._repo, r)
646 except error.RepoLookupError:
646 except error.RepoLookupError:
647 continue
647 continue
648 anc = ctx.node()
648 anc = ctx.node()
649 if anc in cahs:
649 if anc in cahs:
650 break
650 break
651 else:
651 else:
652 anc = self._repo.changelog.ancestor(self._node, n2)
652 anc = self._repo.changelog.ancestor(self._node, n2)
653 if warn:
653 if warn:
654 self._repo.ui.status(
654 self._repo.ui.status(
655 (_("note: using %s as ancestor of %s and %s\n") %
655 (_("note: using %s as ancestor of %s and %s\n") %
656 (short(anc), short(self._node), short(n2))) +
656 (short(anc), short(self._node), short(n2))) +
657 ''.join(_(" alternatively, use --config "
657 ''.join(_(" alternatively, use --config "
658 "merge.preferancestor=%s\n") %
658 "merge.preferancestor=%s\n") %
659 short(n) for n in sorted(cahs) if n != anc))
659 short(n) for n in sorted(cahs) if n != anc))
660 return changectx(self._repo, anc)
660 return changectx(self._repo, anc)
661
661
662 def descendant(self, other):
662 def descendant(self, other):
663 """True if other is descendant of this changeset"""
663 """True if other is descendant of this changeset"""
664 return self._repo.changelog.descendant(self._rev, other._rev)
664 return self._repo.changelog.descendant(self._rev, other._rev)
665
665
666 def walk(self, match):
666 def walk(self, match):
667 '''Generates matching file names.'''
667 '''Generates matching file names.'''
668
668
669 # Wrap match.bad method to have message with nodeid
669 # Wrap match.bad method to have message with nodeid
670 def bad(fn, msg):
670 def bad(fn, msg):
671 # The manifest doesn't know about subrepos, so don't complain about
671 # The manifest doesn't know about subrepos, so don't complain about
672 # paths into valid subrepos.
672 # paths into valid subrepos.
673 if any(fn == s or fn.startswith(s + '/')
673 if any(fn == s or fn.startswith(s + '/')
674 for s in self.substate):
674 for s in self.substate):
675 return
675 return
676 match.bad(fn, _('no such file in rev %s') % self)
676 match.bad(fn, _('no such file in rev %s') % self)
677
677
678 m = matchmod.badmatch(match, bad)
678 m = matchmod.badmatch(match, bad)
679 return self._manifest.walk(m)
679 return self._manifest.walk(m)
680
680
681 def matches(self, match):
681 def matches(self, match):
682 return self.walk(match)
682 return self.walk(match)
683
683
684 class basefilectx(object):
684 class basefilectx(object):
685 """A filecontext object represents the common logic for its children:
685 """A filecontext object represents the common logic for its children:
686 filectx: read-only access to a filerevision that is already present
686 filectx: read-only access to a filerevision that is already present
687 in the repo,
687 in the repo,
688 workingfilectx: a filecontext that represents files from the working
688 workingfilectx: a filecontext that represents files from the working
689 directory,
689 directory,
690 memfilectx: a filecontext that represents files in-memory."""
690 memfilectx: a filecontext that represents files in-memory."""
691 def __new__(cls, repo, path, *args, **kwargs):
691 def __new__(cls, repo, path, *args, **kwargs):
692 return super(basefilectx, cls).__new__(cls)
692 return super(basefilectx, cls).__new__(cls)
693
693
694 @propertycache
694 @propertycache
695 def _filelog(self):
695 def _filelog(self):
696 return self._repo.file(self._path)
696 return self._repo.file(self._path)
697
697
698 @propertycache
698 @propertycache
699 def _changeid(self):
699 def _changeid(self):
700 if '_changeid' in self.__dict__:
700 if r'_changeid' in self.__dict__:
701 return self._changeid
701 return self._changeid
702 elif '_changectx' in self.__dict__:
702 elif r'_changectx' in self.__dict__:
703 return self._changectx.rev()
703 return self._changectx.rev()
704 elif '_descendantrev' in self.__dict__:
704 elif r'_descendantrev' in self.__dict__:
705 # this file context was created from a revision with a known
705 # this file context was created from a revision with a known
706 # descendant, we can (lazily) correct for linkrev aliases
706 # descendant, we can (lazily) correct for linkrev aliases
707 return self._adjustlinkrev(self._descendantrev)
707 return self._adjustlinkrev(self._descendantrev)
708 else:
708 else:
709 return self._filelog.linkrev(self._filerev)
709 return self._filelog.linkrev(self._filerev)
710
710
711 @propertycache
711 @propertycache
712 def _filenode(self):
712 def _filenode(self):
713 if '_fileid' in self.__dict__:
713 if r'_fileid' in self.__dict__:
714 return self._filelog.lookup(self._fileid)
714 return self._filelog.lookup(self._fileid)
715 else:
715 else:
716 return self._changectx.filenode(self._path)
716 return self._changectx.filenode(self._path)
717
717
718 @propertycache
718 @propertycache
719 def _filerev(self):
719 def _filerev(self):
720 return self._filelog.rev(self._filenode)
720 return self._filelog.rev(self._filenode)
721
721
722 @propertycache
722 @propertycache
723 def _repopath(self):
723 def _repopath(self):
724 return self._path
724 return self._path
725
725
726 def __nonzero__(self):
726 def __nonzero__(self):
727 try:
727 try:
728 self._filenode
728 self._filenode
729 return True
729 return True
730 except error.LookupError:
730 except error.LookupError:
731 # file is missing
731 # file is missing
732 return False
732 return False
733
733
734 __bool__ = __nonzero__
734 __bool__ = __nonzero__
735
735
736 def __str__(self):
736 def __str__(self):
737 try:
737 try:
738 return "%s@%s" % (self.path(), self._changectx)
738 return "%s@%s" % (self.path(), self._changectx)
739 except error.LookupError:
739 except error.LookupError:
740 return "%s@???" % self.path()
740 return "%s@???" % self.path()
741
741
742 def __repr__(self):
742 def __repr__(self):
743 return "<%s %s>" % (type(self).__name__, str(self))
743 return "<%s %s>" % (type(self).__name__, str(self))
744
744
745 def __hash__(self):
745 def __hash__(self):
746 try:
746 try:
747 return hash((self._path, self._filenode))
747 return hash((self._path, self._filenode))
748 except AttributeError:
748 except AttributeError:
749 return id(self)
749 return id(self)
750
750
751 def __eq__(self, other):
751 def __eq__(self, other):
752 try:
752 try:
753 return (type(self) == type(other) and self._path == other._path
753 return (type(self) == type(other) and self._path == other._path
754 and self._filenode == other._filenode)
754 and self._filenode == other._filenode)
755 except AttributeError:
755 except AttributeError:
756 return False
756 return False
757
757
758 def __ne__(self, other):
758 def __ne__(self, other):
759 return not (self == other)
759 return not (self == other)
760
760
761 def filerev(self):
761 def filerev(self):
762 return self._filerev
762 return self._filerev
763 def filenode(self):
763 def filenode(self):
764 return self._filenode
764 return self._filenode
765 def flags(self):
765 def flags(self):
766 return self._changectx.flags(self._path)
766 return self._changectx.flags(self._path)
767 def filelog(self):
767 def filelog(self):
768 return self._filelog
768 return self._filelog
769 def rev(self):
769 def rev(self):
770 return self._changeid
770 return self._changeid
771 def linkrev(self):
771 def linkrev(self):
772 return self._filelog.linkrev(self._filerev)
772 return self._filelog.linkrev(self._filerev)
773 def node(self):
773 def node(self):
774 return self._changectx.node()
774 return self._changectx.node()
775 def hex(self):
775 def hex(self):
776 return self._changectx.hex()
776 return self._changectx.hex()
777 def user(self):
777 def user(self):
778 return self._changectx.user()
778 return self._changectx.user()
779 def date(self):
779 def date(self):
780 return self._changectx.date()
780 return self._changectx.date()
781 def files(self):
781 def files(self):
782 return self._changectx.files()
782 return self._changectx.files()
783 def description(self):
783 def description(self):
784 return self._changectx.description()
784 return self._changectx.description()
785 def branch(self):
785 def branch(self):
786 return self._changectx.branch()
786 return self._changectx.branch()
787 def extra(self):
787 def extra(self):
788 return self._changectx.extra()
788 return self._changectx.extra()
789 def phase(self):
789 def phase(self):
790 return self._changectx.phase()
790 return self._changectx.phase()
791 def phasestr(self):
791 def phasestr(self):
792 return self._changectx.phasestr()
792 return self._changectx.phasestr()
793 def manifest(self):
793 def manifest(self):
794 return self._changectx.manifest()
794 return self._changectx.manifest()
795 def changectx(self):
795 def changectx(self):
796 return self._changectx
796 return self._changectx
797 def repo(self):
797 def repo(self):
798 return self._repo
798 return self._repo
799
799
800 def path(self):
800 def path(self):
801 return self._path
801 return self._path
802
802
803 def isbinary(self):
803 def isbinary(self):
804 try:
804 try:
805 return util.binary(self.data())
805 return util.binary(self.data())
806 except IOError:
806 except IOError:
807 return False
807 return False
808 def isexec(self):
808 def isexec(self):
809 return 'x' in self.flags()
809 return 'x' in self.flags()
810 def islink(self):
810 def islink(self):
811 return 'l' in self.flags()
811 return 'l' in self.flags()
812
812
813 def isabsent(self):
813 def isabsent(self):
814 """whether this filectx represents a file not in self._changectx
814 """whether this filectx represents a file not in self._changectx
815
815
816 This is mainly for merge code to detect change/delete conflicts. This is
816 This is mainly for merge code to detect change/delete conflicts. This is
817 expected to be True for all subclasses of basectx."""
817 expected to be True for all subclasses of basectx."""
818 return False
818 return False
819
819
820 _customcmp = False
820 _customcmp = False
821 def cmp(self, fctx):
821 def cmp(self, fctx):
822 """compare with other file context
822 """compare with other file context
823
823
824 returns True if different than fctx.
824 returns True if different than fctx.
825 """
825 """
826 if fctx._customcmp:
826 if fctx._customcmp:
827 return fctx.cmp(self)
827 return fctx.cmp(self)
828
828
829 if (fctx._filenode is None
829 if (fctx._filenode is None
830 and (self._repo._encodefilterpats
830 and (self._repo._encodefilterpats
831 # if file data starts with '\1\n', empty metadata block is
831 # if file data starts with '\1\n', empty metadata block is
832 # prepended, which adds 4 bytes to filelog.size().
832 # prepended, which adds 4 bytes to filelog.size().
833 or self.size() - 4 == fctx.size())
833 or self.size() - 4 == fctx.size())
834 or self.size() == fctx.size()):
834 or self.size() == fctx.size()):
835 return self._filelog.cmp(self._filenode, fctx.data())
835 return self._filelog.cmp(self._filenode, fctx.data())
836
836
837 return True
837 return True
838
838
839 def _adjustlinkrev(self, srcrev, inclusive=False):
839 def _adjustlinkrev(self, srcrev, inclusive=False):
840 """return the first ancestor of <srcrev> introducing <fnode>
840 """return the first ancestor of <srcrev> introducing <fnode>
841
841
842 If the linkrev of the file revision does not point to an ancestor of
842 If the linkrev of the file revision does not point to an ancestor of
843 srcrev, we'll walk down the ancestors until we find one introducing
843 srcrev, we'll walk down the ancestors until we find one introducing
844 this file revision.
844 this file revision.
845
845
846 :srcrev: the changeset revision we search ancestors from
846 :srcrev: the changeset revision we search ancestors from
847 :inclusive: if true, the src revision will also be checked
847 :inclusive: if true, the src revision will also be checked
848 """
848 """
849 repo = self._repo
849 repo = self._repo
850 cl = repo.unfiltered().changelog
850 cl = repo.unfiltered().changelog
851 mfl = repo.manifestlog
851 mfl = repo.manifestlog
852 # fetch the linkrev
852 # fetch the linkrev
853 lkr = self.linkrev()
853 lkr = self.linkrev()
854 # hack to reuse ancestor computation when searching for renames
854 # hack to reuse ancestor computation when searching for renames
855 memberanc = getattr(self, '_ancestrycontext', None)
855 memberanc = getattr(self, '_ancestrycontext', None)
856 iteranc = None
856 iteranc = None
857 if srcrev is None:
857 if srcrev is None:
858 # wctx case, used by workingfilectx during mergecopy
858 # wctx case, used by workingfilectx during mergecopy
859 revs = [p.rev() for p in self._repo[None].parents()]
859 revs = [p.rev() for p in self._repo[None].parents()]
860 inclusive = True # we skipped the real (revless) source
860 inclusive = True # we skipped the real (revless) source
861 else:
861 else:
862 revs = [srcrev]
862 revs = [srcrev]
863 if memberanc is None:
863 if memberanc is None:
864 memberanc = iteranc = cl.ancestors(revs, lkr,
864 memberanc = iteranc = cl.ancestors(revs, lkr,
865 inclusive=inclusive)
865 inclusive=inclusive)
866 # check if this linkrev is an ancestor of srcrev
866 # check if this linkrev is an ancestor of srcrev
867 if lkr not in memberanc:
867 if lkr not in memberanc:
868 if iteranc is None:
868 if iteranc is None:
869 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
869 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
870 fnode = self._filenode
870 fnode = self._filenode
871 path = self._path
871 path = self._path
872 for a in iteranc:
872 for a in iteranc:
873 ac = cl.read(a) # get changeset data (we avoid object creation)
873 ac = cl.read(a) # get changeset data (we avoid object creation)
874 if path in ac[3]: # checking the 'files' field.
874 if path in ac[3]: # checking the 'files' field.
875 # The file has been touched, check if the content is
875 # The file has been touched, check if the content is
876 # similar to the one we search for.
876 # similar to the one we search for.
877 if fnode == mfl[ac[0]].readfast().get(path):
877 if fnode == mfl[ac[0]].readfast().get(path):
878 return a
878 return a
879 # In theory, we should never get out of that loop without a result.
879 # In theory, we should never get out of that loop without a result.
880 # But if manifest uses a buggy file revision (not children of the
880 # But if manifest uses a buggy file revision (not children of the
881 # one it replaces) we could. Such a buggy situation will likely
881 # one it replaces) we could. Such a buggy situation will likely
882 # result is crash somewhere else at to some point.
882 # result is crash somewhere else at to some point.
883 return lkr
883 return lkr
884
884
885 def introrev(self):
885 def introrev(self):
886 """return the rev of the changeset which introduced this file revision
886 """return the rev of the changeset which introduced this file revision
887
887
888 This method is different from linkrev because it take into account the
888 This method is different from linkrev because it take into account the
889 changeset the filectx was created from. It ensures the returned
889 changeset the filectx was created from. It ensures the returned
890 revision is one of its ancestors. This prevents bugs from
890 revision is one of its ancestors. This prevents bugs from
891 'linkrev-shadowing' when a file revision is used by multiple
891 'linkrev-shadowing' when a file revision is used by multiple
892 changesets.
892 changesets.
893 """
893 """
894 lkr = self.linkrev()
894 lkr = self.linkrev()
895 attrs = vars(self)
895 attrs = vars(self)
896 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
896 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
897 if noctx or self.rev() == lkr:
897 if noctx or self.rev() == lkr:
898 return self.linkrev()
898 return self.linkrev()
899 return self._adjustlinkrev(self.rev(), inclusive=True)
899 return self._adjustlinkrev(self.rev(), inclusive=True)
900
900
901 def _parentfilectx(self, path, fileid, filelog):
901 def _parentfilectx(self, path, fileid, filelog):
902 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
902 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
903 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
903 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
904 if '_changeid' in vars(self) or '_changectx' in vars(self):
904 if '_changeid' in vars(self) or '_changectx' in vars(self):
905 # If self is associated with a changeset (probably explicitly
905 # If self is associated with a changeset (probably explicitly
906 # fed), ensure the created filectx is associated with a
906 # fed), ensure the created filectx is associated with a
907 # changeset that is an ancestor of self.changectx.
907 # changeset that is an ancestor of self.changectx.
908 # This lets us later use _adjustlinkrev to get a correct link.
908 # This lets us later use _adjustlinkrev to get a correct link.
909 fctx._descendantrev = self.rev()
909 fctx._descendantrev = self.rev()
910 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
910 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
911 elif '_descendantrev' in vars(self):
911 elif '_descendantrev' in vars(self):
912 # Otherwise propagate _descendantrev if we have one associated.
912 # Otherwise propagate _descendantrev if we have one associated.
913 fctx._descendantrev = self._descendantrev
913 fctx._descendantrev = self._descendantrev
914 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
914 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
915 return fctx
915 return fctx
916
916
917 def parents(self):
917 def parents(self):
918 _path = self._path
918 _path = self._path
919 fl = self._filelog
919 fl = self._filelog
920 parents = self._filelog.parents(self._filenode)
920 parents = self._filelog.parents(self._filenode)
921 pl = [(_path, node, fl) for node in parents if node != nullid]
921 pl = [(_path, node, fl) for node in parents if node != nullid]
922
922
923 r = fl.renamed(self._filenode)
923 r = fl.renamed(self._filenode)
924 if r:
924 if r:
925 # - In the simple rename case, both parent are nullid, pl is empty.
925 # - In the simple rename case, both parent are nullid, pl is empty.
926 # - In case of merge, only one of the parent is null id and should
926 # - In case of merge, only one of the parent is null id and should
927 # be replaced with the rename information. This parent is -always-
927 # be replaced with the rename information. This parent is -always-
928 # the first one.
928 # the first one.
929 #
929 #
930 # As null id have always been filtered out in the previous list
930 # As null id have always been filtered out in the previous list
931 # comprehension, inserting to 0 will always result in "replacing
931 # comprehension, inserting to 0 will always result in "replacing
932 # first nullid parent with rename information.
932 # first nullid parent with rename information.
933 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
933 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
934
934
935 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
935 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
936
936
937 def p1(self):
937 def p1(self):
938 return self.parents()[0]
938 return self.parents()[0]
939
939
940 def p2(self):
940 def p2(self):
941 p = self.parents()
941 p = self.parents()
942 if len(p) == 2:
942 if len(p) == 2:
943 return p[1]
943 return p[1]
944 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
944 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
945
945
946 def annotate(self, follow=False, linenumber=False, diffopts=None):
946 def annotate(self, follow=False, linenumber=False, diffopts=None):
947 '''returns a list of tuples of ((ctx, number), line) for each line
947 '''returns a list of tuples of ((ctx, number), line) for each line
948 in the file, where ctx is the filectx of the node where
948 in the file, where ctx is the filectx of the node where
949 that line was last changed; if linenumber parameter is true, number is
949 that line was last changed; if linenumber parameter is true, number is
950 the line number at the first appearance in the managed file, otherwise,
950 the line number at the first appearance in the managed file, otherwise,
951 number has a fixed value of False.
951 number has a fixed value of False.
952 '''
952 '''
953
953
954 def lines(text):
954 def lines(text):
955 if text.endswith("\n"):
955 if text.endswith("\n"):
956 return text.count("\n")
956 return text.count("\n")
957 return text.count("\n") + int(bool(text))
957 return text.count("\n") + int(bool(text))
958
958
959 if linenumber:
959 if linenumber:
960 def decorate(text, rev):
960 def decorate(text, rev):
961 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
961 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
962 else:
962 else:
963 def decorate(text, rev):
963 def decorate(text, rev):
964 return ([(rev, False)] * lines(text), text)
964 return ([(rev, False)] * lines(text), text)
965
965
966 def pair(parent, child):
966 def pair(parent, child):
967 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
967 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
968 for (a1, a2, b1, b2), t in blocks:
968 for (a1, a2, b1, b2), t in blocks:
969 # Changed blocks ('!') or blocks made only of blank lines ('~')
969 # Changed blocks ('!') or blocks made only of blank lines ('~')
970 # belong to the child.
970 # belong to the child.
971 if t == '=':
971 if t == '=':
972 child[0][b1:b2] = parent[0][a1:a2]
972 child[0][b1:b2] = parent[0][a1:a2]
973 return child
973 return child
974
974
975 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
975 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
976
976
977 def parents(f):
977 def parents(f):
978 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
978 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
979 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
979 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
980 # from the topmost introrev (= srcrev) down to p.linkrev() if it
980 # from the topmost introrev (= srcrev) down to p.linkrev() if it
981 # isn't an ancestor of the srcrev.
981 # isn't an ancestor of the srcrev.
982 f._changeid
982 f._changeid
983 pl = f.parents()
983 pl = f.parents()
984
984
985 # Don't return renamed parents if we aren't following.
985 # Don't return renamed parents if we aren't following.
986 if not follow:
986 if not follow:
987 pl = [p for p in pl if p.path() == f.path()]
987 pl = [p for p in pl if p.path() == f.path()]
988
988
989 # renamed filectx won't have a filelog yet, so set it
989 # renamed filectx won't have a filelog yet, so set it
990 # from the cache to save time
990 # from the cache to save time
991 for p in pl:
991 for p in pl:
992 if not '_filelog' in p.__dict__:
992 if not '_filelog' in p.__dict__:
993 p._filelog = getlog(p.path())
993 p._filelog = getlog(p.path())
994
994
995 return pl
995 return pl
996
996
997 # use linkrev to find the first changeset where self appeared
997 # use linkrev to find the first changeset where self appeared
998 base = self
998 base = self
999 introrev = self.introrev()
999 introrev = self.introrev()
1000 if self.rev() != introrev:
1000 if self.rev() != introrev:
1001 base = self.filectx(self.filenode(), changeid=introrev)
1001 base = self.filectx(self.filenode(), changeid=introrev)
1002 if getattr(base, '_ancestrycontext', None) is None:
1002 if getattr(base, '_ancestrycontext', None) is None:
1003 cl = self._repo.changelog
1003 cl = self._repo.changelog
1004 if introrev is None:
1004 if introrev is None:
1005 # wctx is not inclusive, but works because _ancestrycontext
1005 # wctx is not inclusive, but works because _ancestrycontext
1006 # is used to test filelog revisions
1006 # is used to test filelog revisions
1007 ac = cl.ancestors([p.rev() for p in base.parents()],
1007 ac = cl.ancestors([p.rev() for p in base.parents()],
1008 inclusive=True)
1008 inclusive=True)
1009 else:
1009 else:
1010 ac = cl.ancestors([introrev], inclusive=True)
1010 ac = cl.ancestors([introrev], inclusive=True)
1011 base._ancestrycontext = ac
1011 base._ancestrycontext = ac
1012
1012
1013 # This algorithm would prefer to be recursive, but Python is a
1013 # This algorithm would prefer to be recursive, but Python is a
1014 # bit recursion-hostile. Instead we do an iterative
1014 # bit recursion-hostile. Instead we do an iterative
1015 # depth-first search.
1015 # depth-first search.
1016
1016
1017 # 1st DFS pre-calculates pcache and needed
1017 # 1st DFS pre-calculates pcache and needed
1018 visit = [base]
1018 visit = [base]
1019 pcache = {}
1019 pcache = {}
1020 needed = {base: 1}
1020 needed = {base: 1}
1021 while visit:
1021 while visit:
1022 f = visit.pop()
1022 f = visit.pop()
1023 if f in pcache:
1023 if f in pcache:
1024 continue
1024 continue
1025 pl = parents(f)
1025 pl = parents(f)
1026 pcache[f] = pl
1026 pcache[f] = pl
1027 for p in pl:
1027 for p in pl:
1028 needed[p] = needed.get(p, 0) + 1
1028 needed[p] = needed.get(p, 0) + 1
1029 if p not in pcache:
1029 if p not in pcache:
1030 visit.append(p)
1030 visit.append(p)
1031
1031
1032 # 2nd DFS does the actual annotate
1032 # 2nd DFS does the actual annotate
1033 visit[:] = [base]
1033 visit[:] = [base]
1034 hist = {}
1034 hist = {}
1035 while visit:
1035 while visit:
1036 f = visit[-1]
1036 f = visit[-1]
1037 if f in hist:
1037 if f in hist:
1038 visit.pop()
1038 visit.pop()
1039 continue
1039 continue
1040
1040
1041 ready = True
1041 ready = True
1042 pl = pcache[f]
1042 pl = pcache[f]
1043 for p in pl:
1043 for p in pl:
1044 if p not in hist:
1044 if p not in hist:
1045 ready = False
1045 ready = False
1046 visit.append(p)
1046 visit.append(p)
1047 if ready:
1047 if ready:
1048 visit.pop()
1048 visit.pop()
1049 curr = decorate(f.data(), f)
1049 curr = decorate(f.data(), f)
1050 for p in pl:
1050 for p in pl:
1051 curr = pair(hist[p], curr)
1051 curr = pair(hist[p], curr)
1052 if needed[p] == 1:
1052 if needed[p] == 1:
1053 del hist[p]
1053 del hist[p]
1054 del needed[p]
1054 del needed[p]
1055 else:
1055 else:
1056 needed[p] -= 1
1056 needed[p] -= 1
1057
1057
1058 hist[f] = curr
1058 hist[f] = curr
1059 del pcache[f]
1059 del pcache[f]
1060
1060
1061 return zip(hist[base][0], hist[base][1].splitlines(True))
1061 return zip(hist[base][0], hist[base][1].splitlines(True))
1062
1062
1063 def ancestors(self, followfirst=False):
1063 def ancestors(self, followfirst=False):
1064 visit = {}
1064 visit = {}
1065 c = self
1065 c = self
1066 if followfirst:
1066 if followfirst:
1067 cut = 1
1067 cut = 1
1068 else:
1068 else:
1069 cut = None
1069 cut = None
1070
1070
1071 while True:
1071 while True:
1072 for parent in c.parents()[:cut]:
1072 for parent in c.parents()[:cut]:
1073 visit[(parent.linkrev(), parent.filenode())] = parent
1073 visit[(parent.linkrev(), parent.filenode())] = parent
1074 if not visit:
1074 if not visit:
1075 break
1075 break
1076 c = visit.pop(max(visit))
1076 c = visit.pop(max(visit))
1077 yield c
1077 yield c
1078
1078
1079 class filectx(basefilectx):
1079 class filectx(basefilectx):
1080 """A filecontext object makes access to data related to a particular
1080 """A filecontext object makes access to data related to a particular
1081 filerevision convenient."""
1081 filerevision convenient."""
1082 def __init__(self, repo, path, changeid=None, fileid=None,
1082 def __init__(self, repo, path, changeid=None, fileid=None,
1083 filelog=None, changectx=None):
1083 filelog=None, changectx=None):
1084 """changeid can be a changeset revision, node, or tag.
1084 """changeid can be a changeset revision, node, or tag.
1085 fileid can be a file revision or node."""
1085 fileid can be a file revision or node."""
1086 self._repo = repo
1086 self._repo = repo
1087 self._path = path
1087 self._path = path
1088
1088
1089 assert (changeid is not None
1089 assert (changeid is not None
1090 or fileid is not None
1090 or fileid is not None
1091 or changectx is not None), \
1091 or changectx is not None), \
1092 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1092 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1093 % (changeid, fileid, changectx))
1093 % (changeid, fileid, changectx))
1094
1094
1095 if filelog is not None:
1095 if filelog is not None:
1096 self._filelog = filelog
1096 self._filelog = filelog
1097
1097
1098 if changeid is not None:
1098 if changeid is not None:
1099 self._changeid = changeid
1099 self._changeid = changeid
1100 if changectx is not None:
1100 if changectx is not None:
1101 self._changectx = changectx
1101 self._changectx = changectx
1102 if fileid is not None:
1102 if fileid is not None:
1103 self._fileid = fileid
1103 self._fileid = fileid
1104
1104
1105 @propertycache
1105 @propertycache
1106 def _changectx(self):
1106 def _changectx(self):
1107 try:
1107 try:
1108 return changectx(self._repo, self._changeid)
1108 return changectx(self._repo, self._changeid)
1109 except error.FilteredRepoLookupError:
1109 except error.FilteredRepoLookupError:
1110 # Linkrev may point to any revision in the repository. When the
1110 # Linkrev may point to any revision in the repository. When the
1111 # repository is filtered this may lead to `filectx` trying to build
1111 # repository is filtered this may lead to `filectx` trying to build
1112 # `changectx` for filtered revision. In such case we fallback to
1112 # `changectx` for filtered revision. In such case we fallback to
1113 # creating `changectx` on the unfiltered version of the reposition.
1113 # creating `changectx` on the unfiltered version of the reposition.
1114 # This fallback should not be an issue because `changectx` from
1114 # This fallback should not be an issue because `changectx` from
1115 # `filectx` are not used in complex operations that care about
1115 # `filectx` are not used in complex operations that care about
1116 # filtering.
1116 # filtering.
1117 #
1117 #
1118 # This fallback is a cheap and dirty fix that prevent several
1118 # This fallback is a cheap and dirty fix that prevent several
1119 # crashes. It does not ensure the behavior is correct. However the
1119 # crashes. It does not ensure the behavior is correct. However the
1120 # behavior was not correct before filtering either and "incorrect
1120 # behavior was not correct before filtering either and "incorrect
1121 # behavior" is seen as better as "crash"
1121 # behavior" is seen as better as "crash"
1122 #
1122 #
1123 # Linkrevs have several serious troubles with filtering that are
1123 # Linkrevs have several serious troubles with filtering that are
1124 # complicated to solve. Proper handling of the issue here should be
1124 # complicated to solve. Proper handling of the issue here should be
1125 # considered when solving linkrev issue are on the table.
1125 # considered when solving linkrev issue are on the table.
1126 return changectx(self._repo.unfiltered(), self._changeid)
1126 return changectx(self._repo.unfiltered(), self._changeid)
1127
1127
1128 def filectx(self, fileid, changeid=None):
1128 def filectx(self, fileid, changeid=None):
1129 '''opens an arbitrary revision of the file without
1129 '''opens an arbitrary revision of the file without
1130 opening a new filelog'''
1130 opening a new filelog'''
1131 return filectx(self._repo, self._path, fileid=fileid,
1131 return filectx(self._repo, self._path, fileid=fileid,
1132 filelog=self._filelog, changeid=changeid)
1132 filelog=self._filelog, changeid=changeid)
1133
1133
1134 def rawdata(self):
1134 def rawdata(self):
1135 return self._filelog.revision(self._filenode, raw=True)
1135 return self._filelog.revision(self._filenode, raw=True)
1136
1136
1137 def data(self):
1137 def data(self):
1138 try:
1138 try:
1139 return self._filelog.read(self._filenode)
1139 return self._filelog.read(self._filenode)
1140 except error.CensoredNodeError:
1140 except error.CensoredNodeError:
1141 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1141 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1142 return ""
1142 return ""
1143 raise error.Abort(_("censored node: %s") % short(self._filenode),
1143 raise error.Abort(_("censored node: %s") % short(self._filenode),
1144 hint=_("set censor.policy to ignore errors"))
1144 hint=_("set censor.policy to ignore errors"))
1145
1145
1146 def size(self):
1146 def size(self):
1147 return self._filelog.size(self._filerev)
1147 return self._filelog.size(self._filerev)
1148
1148
1149 def renamed(self):
1149 def renamed(self):
1150 """check if file was actually renamed in this changeset revision
1150 """check if file was actually renamed in this changeset revision
1151
1151
1152 If rename logged in file revision, we report copy for changeset only
1152 If rename logged in file revision, we report copy for changeset only
1153 if file revisions linkrev points back to the changeset in question
1153 if file revisions linkrev points back to the changeset in question
1154 or both changeset parents contain different file revisions.
1154 or both changeset parents contain different file revisions.
1155 """
1155 """
1156
1156
1157 renamed = self._filelog.renamed(self._filenode)
1157 renamed = self._filelog.renamed(self._filenode)
1158 if not renamed:
1158 if not renamed:
1159 return renamed
1159 return renamed
1160
1160
1161 if self.rev() == self.linkrev():
1161 if self.rev() == self.linkrev():
1162 return renamed
1162 return renamed
1163
1163
1164 name = self.path()
1164 name = self.path()
1165 fnode = self._filenode
1165 fnode = self._filenode
1166 for p in self._changectx.parents():
1166 for p in self._changectx.parents():
1167 try:
1167 try:
1168 if fnode == p.filenode(name):
1168 if fnode == p.filenode(name):
1169 return None
1169 return None
1170 except error.LookupError:
1170 except error.LookupError:
1171 pass
1171 pass
1172 return renamed
1172 return renamed
1173
1173
1174 def children(self):
1174 def children(self):
1175 # hard for renames
1175 # hard for renames
1176 c = self._filelog.children(self._filenode)
1176 c = self._filelog.children(self._filenode)
1177 return [filectx(self._repo, self._path, fileid=x,
1177 return [filectx(self._repo, self._path, fileid=x,
1178 filelog=self._filelog) for x in c]
1178 filelog=self._filelog) for x in c]
1179
1179
1180 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1180 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1181 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1181 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1182 if diff from fctx2 to fctx1 has changes in linerange2 and
1182 if diff from fctx2 to fctx1 has changes in linerange2 and
1183 `linerange1` is the new line range for fctx1.
1183 `linerange1` is the new line range for fctx1.
1184 """
1184 """
1185 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1185 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1186 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1186 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1187 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1187 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1188 return diffinrange, linerange1
1188 return diffinrange, linerange1
1189
1189
1190 def blockancestors(fctx, fromline, toline, followfirst=False):
1190 def blockancestors(fctx, fromline, toline, followfirst=False):
1191 """Yield ancestors of `fctx` with respect to the block of lines within
1191 """Yield ancestors of `fctx` with respect to the block of lines within
1192 `fromline`-`toline` range.
1192 `fromline`-`toline` range.
1193 """
1193 """
1194 diffopts = patch.diffopts(fctx._repo.ui)
1194 diffopts = patch.diffopts(fctx._repo.ui)
1195 introrev = fctx.introrev()
1195 introrev = fctx.introrev()
1196 if fctx.rev() != introrev:
1196 if fctx.rev() != introrev:
1197 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1197 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1198 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1198 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1199 while visit:
1199 while visit:
1200 c, linerange2 = visit.pop(max(visit))
1200 c, linerange2 = visit.pop(max(visit))
1201 pl = c.parents()
1201 pl = c.parents()
1202 if followfirst:
1202 if followfirst:
1203 pl = pl[:1]
1203 pl = pl[:1]
1204 if not pl:
1204 if not pl:
1205 # The block originates from the initial revision.
1205 # The block originates from the initial revision.
1206 yield c, linerange2
1206 yield c, linerange2
1207 continue
1207 continue
1208 inrange = False
1208 inrange = False
1209 for p in pl:
1209 for p in pl:
1210 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1210 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1211 inrange = inrange or inrangep
1211 inrange = inrange or inrangep
1212 if linerange1[0] == linerange1[1]:
1212 if linerange1[0] == linerange1[1]:
1213 # Parent's linerange is empty, meaning that the block got
1213 # Parent's linerange is empty, meaning that the block got
1214 # introduced in this revision; no need to go futher in this
1214 # introduced in this revision; no need to go futher in this
1215 # branch.
1215 # branch.
1216 continue
1216 continue
1217 # Set _descendantrev with 'c' (a known descendant) so that, when
1217 # Set _descendantrev with 'c' (a known descendant) so that, when
1218 # _adjustlinkrev is called for 'p', it receives this descendant
1218 # _adjustlinkrev is called for 'p', it receives this descendant
1219 # (as srcrev) instead possibly topmost introrev.
1219 # (as srcrev) instead possibly topmost introrev.
1220 p._descendantrev = c.rev()
1220 p._descendantrev = c.rev()
1221 visit[p.linkrev(), p.filenode()] = p, linerange1
1221 visit[p.linkrev(), p.filenode()] = p, linerange1
1222 if inrange:
1222 if inrange:
1223 yield c, linerange2
1223 yield c, linerange2
1224
1224
1225 def blockdescendants(fctx, fromline, toline):
1225 def blockdescendants(fctx, fromline, toline):
1226 """Yield descendants of `fctx` with respect to the block of lines within
1226 """Yield descendants of `fctx` with respect to the block of lines within
1227 `fromline`-`toline` range.
1227 `fromline`-`toline` range.
1228 """
1228 """
1229 # First possibly yield 'fctx' if it has changes in range with respect to
1229 # First possibly yield 'fctx' if it has changes in range with respect to
1230 # its parents.
1230 # its parents.
1231 try:
1231 try:
1232 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1232 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1233 except StopIteration:
1233 except StopIteration:
1234 pass
1234 pass
1235 else:
1235 else:
1236 if c == fctx:
1236 if c == fctx:
1237 yield c, linerange1
1237 yield c, linerange1
1238
1238
1239 diffopts = patch.diffopts(fctx._repo.ui)
1239 diffopts = patch.diffopts(fctx._repo.ui)
1240 fl = fctx.filelog()
1240 fl = fctx.filelog()
1241 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1241 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1242 for i in fl.descendants([fctx.filerev()]):
1242 for i in fl.descendants([fctx.filerev()]):
1243 c = fctx.filectx(i)
1243 c = fctx.filectx(i)
1244 inrange = False
1244 inrange = False
1245 for x in fl.parentrevs(i):
1245 for x in fl.parentrevs(i):
1246 try:
1246 try:
1247 p, linerange2 = seen[x]
1247 p, linerange2 = seen[x]
1248 except KeyError:
1248 except KeyError:
1249 # nullrev or other branch
1249 # nullrev or other branch
1250 continue
1250 continue
1251 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1251 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1252 inrange = inrange or inrangep
1252 inrange = inrange or inrangep
1253 # If revision 'i' has been seen (it's a merge), we assume that its
1253 # If revision 'i' has been seen (it's a merge), we assume that its
1254 # line range is the same independently of which parents was used
1254 # line range is the same independently of which parents was used
1255 # to compute it.
1255 # to compute it.
1256 assert i not in seen or seen[i][1] == linerange1, (
1256 assert i not in seen or seen[i][1] == linerange1, (
1257 'computed line range for %s is not consistent between '
1257 'computed line range for %s is not consistent between '
1258 'ancestor branches' % c)
1258 'ancestor branches' % c)
1259 seen[i] = c, linerange1
1259 seen[i] = c, linerange1
1260 if inrange:
1260 if inrange:
1261 yield c, linerange1
1261 yield c, linerange1
1262
1262
1263 class committablectx(basectx):
1263 class committablectx(basectx):
1264 """A committablectx object provides common functionality for a context that
1264 """A committablectx object provides common functionality for a context that
1265 wants the ability to commit, e.g. workingctx or memctx."""
1265 wants the ability to commit, e.g. workingctx or memctx."""
1266 def __init__(self, repo, text="", user=None, date=None, extra=None,
1266 def __init__(self, repo, text="", user=None, date=None, extra=None,
1267 changes=None):
1267 changes=None):
1268 self._repo = repo
1268 self._repo = repo
1269 self._rev = None
1269 self._rev = None
1270 self._node = None
1270 self._node = None
1271 self._text = text
1271 self._text = text
1272 if date:
1272 if date:
1273 self._date = util.parsedate(date)
1273 self._date = util.parsedate(date)
1274 if user:
1274 if user:
1275 self._user = user
1275 self._user = user
1276 if changes:
1276 if changes:
1277 self._status = changes
1277 self._status = changes
1278
1278
1279 self._extra = {}
1279 self._extra = {}
1280 if extra:
1280 if extra:
1281 self._extra = extra.copy()
1281 self._extra = extra.copy()
1282 if 'branch' not in self._extra:
1282 if 'branch' not in self._extra:
1283 try:
1283 try:
1284 branch = encoding.fromlocal(self._repo.dirstate.branch())
1284 branch = encoding.fromlocal(self._repo.dirstate.branch())
1285 except UnicodeDecodeError:
1285 except UnicodeDecodeError:
1286 raise error.Abort(_('branch name not in UTF-8!'))
1286 raise error.Abort(_('branch name not in UTF-8!'))
1287 self._extra['branch'] = branch
1287 self._extra['branch'] = branch
1288 if self._extra['branch'] == '':
1288 if self._extra['branch'] == '':
1289 self._extra['branch'] = 'default'
1289 self._extra['branch'] = 'default'
1290
1290
1291 def __str__(self):
1291 def __str__(self):
1292 return str(self._parents[0]) + "+"
1292 return str(self._parents[0]) + "+"
1293
1293
1294 def __nonzero__(self):
1294 def __nonzero__(self):
1295 return True
1295 return True
1296
1296
1297 __bool__ = __nonzero__
1297 __bool__ = __nonzero__
1298
1298
1299 def _buildflagfunc(self):
1299 def _buildflagfunc(self):
1300 # Create a fallback function for getting file flags when the
1300 # Create a fallback function for getting file flags when the
1301 # filesystem doesn't support them
1301 # filesystem doesn't support them
1302
1302
1303 copiesget = self._repo.dirstate.copies().get
1303 copiesget = self._repo.dirstate.copies().get
1304 parents = self.parents()
1304 parents = self.parents()
1305 if len(parents) < 2:
1305 if len(parents) < 2:
1306 # when we have one parent, it's easy: copy from parent
1306 # when we have one parent, it's easy: copy from parent
1307 man = parents[0].manifest()
1307 man = parents[0].manifest()
1308 def func(f):
1308 def func(f):
1309 f = copiesget(f, f)
1309 f = copiesget(f, f)
1310 return man.flags(f)
1310 return man.flags(f)
1311 else:
1311 else:
1312 # merges are tricky: we try to reconstruct the unstored
1312 # merges are tricky: we try to reconstruct the unstored
1313 # result from the merge (issue1802)
1313 # result from the merge (issue1802)
1314 p1, p2 = parents
1314 p1, p2 = parents
1315 pa = p1.ancestor(p2)
1315 pa = p1.ancestor(p2)
1316 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1316 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1317
1317
1318 def func(f):
1318 def func(f):
1319 f = copiesget(f, f) # may be wrong for merges with copies
1319 f = copiesget(f, f) # may be wrong for merges with copies
1320 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1320 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1321 if fl1 == fl2:
1321 if fl1 == fl2:
1322 return fl1
1322 return fl1
1323 if fl1 == fla:
1323 if fl1 == fla:
1324 return fl2
1324 return fl2
1325 if fl2 == fla:
1325 if fl2 == fla:
1326 return fl1
1326 return fl1
1327 return '' # punt for conflicts
1327 return '' # punt for conflicts
1328
1328
1329 return func
1329 return func
1330
1330
1331 @propertycache
1331 @propertycache
1332 def _flagfunc(self):
1332 def _flagfunc(self):
1333 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1333 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1334
1334
1335 @propertycache
1335 @propertycache
1336 def _status(self):
1336 def _status(self):
1337 return self._repo.status()
1337 return self._repo.status()
1338
1338
1339 @propertycache
1339 @propertycache
1340 def _user(self):
1340 def _user(self):
1341 return self._repo.ui.username()
1341 return self._repo.ui.username()
1342
1342
1343 @propertycache
1343 @propertycache
1344 def _date(self):
1344 def _date(self):
1345 return util.makedate()
1345 return util.makedate()
1346
1346
1347 def subrev(self, subpath):
1347 def subrev(self, subpath):
1348 return None
1348 return None
1349
1349
1350 def manifestnode(self):
1350 def manifestnode(self):
1351 return None
1351 return None
1352 def user(self):
1352 def user(self):
1353 return self._user or self._repo.ui.username()
1353 return self._user or self._repo.ui.username()
1354 def date(self):
1354 def date(self):
1355 return self._date
1355 return self._date
1356 def description(self):
1356 def description(self):
1357 return self._text
1357 return self._text
1358 def files(self):
1358 def files(self):
1359 return sorted(self._status.modified + self._status.added +
1359 return sorted(self._status.modified + self._status.added +
1360 self._status.removed)
1360 self._status.removed)
1361
1361
1362 def modified(self):
1362 def modified(self):
1363 return self._status.modified
1363 return self._status.modified
1364 def added(self):
1364 def added(self):
1365 return self._status.added
1365 return self._status.added
1366 def removed(self):
1366 def removed(self):
1367 return self._status.removed
1367 return self._status.removed
1368 def deleted(self):
1368 def deleted(self):
1369 return self._status.deleted
1369 return self._status.deleted
1370 def branch(self):
1370 def branch(self):
1371 return encoding.tolocal(self._extra['branch'])
1371 return encoding.tolocal(self._extra['branch'])
1372 def closesbranch(self):
1372 def closesbranch(self):
1373 return 'close' in self._extra
1373 return 'close' in self._extra
1374 def extra(self):
1374 def extra(self):
1375 return self._extra
1375 return self._extra
1376
1376
1377 def tags(self):
1377 def tags(self):
1378 return []
1378 return []
1379
1379
1380 def bookmarks(self):
1380 def bookmarks(self):
1381 b = []
1381 b = []
1382 for p in self.parents():
1382 for p in self.parents():
1383 b.extend(p.bookmarks())
1383 b.extend(p.bookmarks())
1384 return b
1384 return b
1385
1385
1386 def phase(self):
1386 def phase(self):
1387 phase = phases.draft # default phase to draft
1387 phase = phases.draft # default phase to draft
1388 for p in self.parents():
1388 for p in self.parents():
1389 phase = max(phase, p.phase())
1389 phase = max(phase, p.phase())
1390 return phase
1390 return phase
1391
1391
1392 def hidden(self):
1392 def hidden(self):
1393 return False
1393 return False
1394
1394
1395 def children(self):
1395 def children(self):
1396 return []
1396 return []
1397
1397
1398 def flags(self, path):
1398 def flags(self, path):
1399 if '_manifest' in self.__dict__:
1399 if r'_manifest' in self.__dict__:
1400 try:
1400 try:
1401 return self._manifest.flags(path)
1401 return self._manifest.flags(path)
1402 except KeyError:
1402 except KeyError:
1403 return ''
1403 return ''
1404
1404
1405 try:
1405 try:
1406 return self._flagfunc(path)
1406 return self._flagfunc(path)
1407 except OSError:
1407 except OSError:
1408 return ''
1408 return ''
1409
1409
1410 def ancestor(self, c2):
1410 def ancestor(self, c2):
1411 """return the "best" ancestor context of self and c2"""
1411 """return the "best" ancestor context of self and c2"""
1412 return self._parents[0].ancestor(c2) # punt on two parents for now
1412 return self._parents[0].ancestor(c2) # punt on two parents for now
1413
1413
1414 def walk(self, match):
1414 def walk(self, match):
1415 '''Generates matching file names.'''
1415 '''Generates matching file names.'''
1416 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1416 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1417 True, False))
1417 True, False))
1418
1418
1419 def matches(self, match):
1419 def matches(self, match):
1420 return sorted(self._repo.dirstate.matches(match))
1420 return sorted(self._repo.dirstate.matches(match))
1421
1421
1422 def ancestors(self):
1422 def ancestors(self):
1423 for p in self._parents:
1423 for p in self._parents:
1424 yield p
1424 yield p
1425 for a in self._repo.changelog.ancestors(
1425 for a in self._repo.changelog.ancestors(
1426 [p.rev() for p in self._parents]):
1426 [p.rev() for p in self._parents]):
1427 yield changectx(self._repo, a)
1427 yield changectx(self._repo, a)
1428
1428
1429 def markcommitted(self, node):
1429 def markcommitted(self, node):
1430 """Perform post-commit cleanup necessary after committing this ctx
1430 """Perform post-commit cleanup necessary after committing this ctx
1431
1431
1432 Specifically, this updates backing stores this working context
1432 Specifically, this updates backing stores this working context
1433 wraps to reflect the fact that the changes reflected by this
1433 wraps to reflect the fact that the changes reflected by this
1434 workingctx have been committed. For example, it marks
1434 workingctx have been committed. For example, it marks
1435 modified and added files as normal in the dirstate.
1435 modified and added files as normal in the dirstate.
1436
1436
1437 """
1437 """
1438
1438
1439 self._repo.dirstate.beginparentchange()
1439 self._repo.dirstate.beginparentchange()
1440 for f in self.modified() + self.added():
1440 for f in self.modified() + self.added():
1441 self._repo.dirstate.normal(f)
1441 self._repo.dirstate.normal(f)
1442 for f in self.removed():
1442 for f in self.removed():
1443 self._repo.dirstate.drop(f)
1443 self._repo.dirstate.drop(f)
1444 self._repo.dirstate.setparents(node)
1444 self._repo.dirstate.setparents(node)
1445 self._repo.dirstate.endparentchange()
1445 self._repo.dirstate.endparentchange()
1446
1446
1447 # write changes out explicitly, because nesting wlock at
1447 # write changes out explicitly, because nesting wlock at
1448 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1448 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1449 # from immediately doing so for subsequent changing files
1449 # from immediately doing so for subsequent changing files
1450 self._repo.dirstate.write(self._repo.currenttransaction())
1450 self._repo.dirstate.write(self._repo.currenttransaction())
1451
1451
1452 class workingctx(committablectx):
1452 class workingctx(committablectx):
1453 """A workingctx object makes access to data related to
1453 """A workingctx object makes access to data related to
1454 the current working directory convenient.
1454 the current working directory convenient.
1455 date - any valid date string or (unixtime, offset), or None.
1455 date - any valid date string or (unixtime, offset), or None.
1456 user - username string, or None.
1456 user - username string, or None.
1457 extra - a dictionary of extra values, or None.
1457 extra - a dictionary of extra values, or None.
1458 changes - a list of file lists as returned by localrepo.status()
1458 changes - a list of file lists as returned by localrepo.status()
1459 or None to use the repository status.
1459 or None to use the repository status.
1460 """
1460 """
1461 def __init__(self, repo, text="", user=None, date=None, extra=None,
1461 def __init__(self, repo, text="", user=None, date=None, extra=None,
1462 changes=None):
1462 changes=None):
1463 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1463 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1464
1464
1465 def __iter__(self):
1465 def __iter__(self):
1466 d = self._repo.dirstate
1466 d = self._repo.dirstate
1467 for f in d:
1467 for f in d:
1468 if d[f] != 'r':
1468 if d[f] != 'r':
1469 yield f
1469 yield f
1470
1470
1471 def __contains__(self, key):
1471 def __contains__(self, key):
1472 return self._repo.dirstate[key] not in "?r"
1472 return self._repo.dirstate[key] not in "?r"
1473
1473
1474 def hex(self):
1474 def hex(self):
1475 return hex(wdirid)
1475 return hex(wdirid)
1476
1476
1477 @propertycache
1477 @propertycache
1478 def _parents(self):
1478 def _parents(self):
1479 p = self._repo.dirstate.parents()
1479 p = self._repo.dirstate.parents()
1480 if p[1] == nullid:
1480 if p[1] == nullid:
1481 p = p[:-1]
1481 p = p[:-1]
1482 return [changectx(self._repo, x) for x in p]
1482 return [changectx(self._repo, x) for x in p]
1483
1483
1484 def filectx(self, path, filelog=None):
1484 def filectx(self, path, filelog=None):
1485 """get a file context from the working directory"""
1485 """get a file context from the working directory"""
1486 return workingfilectx(self._repo, path, workingctx=self,
1486 return workingfilectx(self._repo, path, workingctx=self,
1487 filelog=filelog)
1487 filelog=filelog)
1488
1488
1489 def dirty(self, missing=False, merge=True, branch=True):
1489 def dirty(self, missing=False, merge=True, branch=True):
1490 "check whether a working directory is modified"
1490 "check whether a working directory is modified"
1491 # check subrepos first
1491 # check subrepos first
1492 for s in sorted(self.substate):
1492 for s in sorted(self.substate):
1493 if self.sub(s).dirty():
1493 if self.sub(s).dirty():
1494 return True
1494 return True
1495 # check current working dir
1495 # check current working dir
1496 return ((merge and self.p2()) or
1496 return ((merge and self.p2()) or
1497 (branch and self.branch() != self.p1().branch()) or
1497 (branch and self.branch() != self.p1().branch()) or
1498 self.modified() or self.added() or self.removed() or
1498 self.modified() or self.added() or self.removed() or
1499 (missing and self.deleted()))
1499 (missing and self.deleted()))
1500
1500
1501 def add(self, list, prefix=""):
1501 def add(self, list, prefix=""):
1502 join = lambda f: os.path.join(prefix, f)
1502 join = lambda f: os.path.join(prefix, f)
1503 with self._repo.wlock():
1503 with self._repo.wlock():
1504 ui, ds = self._repo.ui, self._repo.dirstate
1504 ui, ds = self._repo.ui, self._repo.dirstate
1505 rejected = []
1505 rejected = []
1506 lstat = self._repo.wvfs.lstat
1506 lstat = self._repo.wvfs.lstat
1507 for f in list:
1507 for f in list:
1508 scmutil.checkportable(ui, join(f))
1508 scmutil.checkportable(ui, join(f))
1509 try:
1509 try:
1510 st = lstat(f)
1510 st = lstat(f)
1511 except OSError:
1511 except OSError:
1512 ui.warn(_("%s does not exist!\n") % join(f))
1512 ui.warn(_("%s does not exist!\n") % join(f))
1513 rejected.append(f)
1513 rejected.append(f)
1514 continue
1514 continue
1515 if st.st_size > 10000000:
1515 if st.st_size > 10000000:
1516 ui.warn(_("%s: up to %d MB of RAM may be required "
1516 ui.warn(_("%s: up to %d MB of RAM may be required "
1517 "to manage this file\n"
1517 "to manage this file\n"
1518 "(use 'hg revert %s' to cancel the "
1518 "(use 'hg revert %s' to cancel the "
1519 "pending addition)\n")
1519 "pending addition)\n")
1520 % (f, 3 * st.st_size // 1000000, join(f)))
1520 % (f, 3 * st.st_size // 1000000, join(f)))
1521 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1521 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1522 ui.warn(_("%s not added: only files and symlinks "
1522 ui.warn(_("%s not added: only files and symlinks "
1523 "supported currently\n") % join(f))
1523 "supported currently\n") % join(f))
1524 rejected.append(f)
1524 rejected.append(f)
1525 elif ds[f] in 'amn':
1525 elif ds[f] in 'amn':
1526 ui.warn(_("%s already tracked!\n") % join(f))
1526 ui.warn(_("%s already tracked!\n") % join(f))
1527 elif ds[f] == 'r':
1527 elif ds[f] == 'r':
1528 ds.normallookup(f)
1528 ds.normallookup(f)
1529 else:
1529 else:
1530 ds.add(f)
1530 ds.add(f)
1531 return rejected
1531 return rejected
1532
1532
1533 def forget(self, files, prefix=""):
1533 def forget(self, files, prefix=""):
1534 join = lambda f: os.path.join(prefix, f)
1534 join = lambda f: os.path.join(prefix, f)
1535 with self._repo.wlock():
1535 with self._repo.wlock():
1536 rejected = []
1536 rejected = []
1537 for f in files:
1537 for f in files:
1538 if f not in self._repo.dirstate:
1538 if f not in self._repo.dirstate:
1539 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1539 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1540 rejected.append(f)
1540 rejected.append(f)
1541 elif self._repo.dirstate[f] != 'a':
1541 elif self._repo.dirstate[f] != 'a':
1542 self._repo.dirstate.remove(f)
1542 self._repo.dirstate.remove(f)
1543 else:
1543 else:
1544 self._repo.dirstate.drop(f)
1544 self._repo.dirstate.drop(f)
1545 return rejected
1545 return rejected
1546
1546
1547 def undelete(self, list):
1547 def undelete(self, list):
1548 pctxs = self.parents()
1548 pctxs = self.parents()
1549 with self._repo.wlock():
1549 with self._repo.wlock():
1550 for f in list:
1550 for f in list:
1551 if self._repo.dirstate[f] != 'r':
1551 if self._repo.dirstate[f] != 'r':
1552 self._repo.ui.warn(_("%s not removed!\n") % f)
1552 self._repo.ui.warn(_("%s not removed!\n") % f)
1553 else:
1553 else:
1554 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1554 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1555 t = fctx.data()
1555 t = fctx.data()
1556 self._repo.wwrite(f, t, fctx.flags())
1556 self._repo.wwrite(f, t, fctx.flags())
1557 self._repo.dirstate.normal(f)
1557 self._repo.dirstate.normal(f)
1558
1558
1559 def copy(self, source, dest):
1559 def copy(self, source, dest):
1560 try:
1560 try:
1561 st = self._repo.wvfs.lstat(dest)
1561 st = self._repo.wvfs.lstat(dest)
1562 except OSError as err:
1562 except OSError as err:
1563 if err.errno != errno.ENOENT:
1563 if err.errno != errno.ENOENT:
1564 raise
1564 raise
1565 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1565 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1566 return
1566 return
1567 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1567 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1568 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1568 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1569 "symbolic link\n") % dest)
1569 "symbolic link\n") % dest)
1570 else:
1570 else:
1571 with self._repo.wlock():
1571 with self._repo.wlock():
1572 if self._repo.dirstate[dest] in '?':
1572 if self._repo.dirstate[dest] in '?':
1573 self._repo.dirstate.add(dest)
1573 self._repo.dirstate.add(dest)
1574 elif self._repo.dirstate[dest] in 'r':
1574 elif self._repo.dirstate[dest] in 'r':
1575 self._repo.dirstate.normallookup(dest)
1575 self._repo.dirstate.normallookup(dest)
1576 self._repo.dirstate.copy(source, dest)
1576 self._repo.dirstate.copy(source, dest)
1577
1577
1578 def match(self, pats=None, include=None, exclude=None, default='glob',
1578 def match(self, pats=None, include=None, exclude=None, default='glob',
1579 listsubrepos=False, badfn=None):
1579 listsubrepos=False, badfn=None):
1580 if pats is None:
1580 if pats is None:
1581 pats = []
1581 pats = []
1582 r = self._repo
1582 r = self._repo
1583
1583
1584 # Only a case insensitive filesystem needs magic to translate user input
1584 # Only a case insensitive filesystem needs magic to translate user input
1585 # to actual case in the filesystem.
1585 # to actual case in the filesystem.
1586 matcherfunc = matchmod.match
1586 matcherfunc = matchmod.match
1587 if not util.fscasesensitive(r.root):
1587 if not util.fscasesensitive(r.root):
1588 matcherfunc = matchmod.icasefsmatcher
1588 matcherfunc = matchmod.icasefsmatcher
1589 return matcherfunc(r.root, r.getcwd(), pats,
1589 return matcherfunc(r.root, r.getcwd(), pats,
1590 include, exclude, default,
1590 include, exclude, default,
1591 auditor=r.auditor, ctx=self,
1591 auditor=r.auditor, ctx=self,
1592 listsubrepos=listsubrepos, badfn=badfn)
1592 listsubrepos=listsubrepos, badfn=badfn)
1593
1593
1594 def _filtersuspectsymlink(self, files):
1594 def _filtersuspectsymlink(self, files):
1595 if not files or self._repo.dirstate._checklink:
1595 if not files or self._repo.dirstate._checklink:
1596 return files
1596 return files
1597
1597
1598 # Symlink placeholders may get non-symlink-like contents
1598 # Symlink placeholders may get non-symlink-like contents
1599 # via user error or dereferencing by NFS or Samba servers,
1599 # via user error or dereferencing by NFS or Samba servers,
1600 # so we filter out any placeholders that don't look like a
1600 # so we filter out any placeholders that don't look like a
1601 # symlink
1601 # symlink
1602 sane = []
1602 sane = []
1603 for f in files:
1603 for f in files:
1604 if self.flags(f) == 'l':
1604 if self.flags(f) == 'l':
1605 d = self[f].data()
1605 d = self[f].data()
1606 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1606 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1607 self._repo.ui.debug('ignoring suspect symlink placeholder'
1607 self._repo.ui.debug('ignoring suspect symlink placeholder'
1608 ' "%s"\n' % f)
1608 ' "%s"\n' % f)
1609 continue
1609 continue
1610 sane.append(f)
1610 sane.append(f)
1611 return sane
1611 return sane
1612
1612
1613 def _checklookup(self, files):
1613 def _checklookup(self, files):
1614 # check for any possibly clean files
1614 # check for any possibly clean files
1615 if not files:
1615 if not files:
1616 return [], []
1616 return [], []
1617
1617
1618 modified = []
1618 modified = []
1619 fixup = []
1619 fixup = []
1620 pctx = self._parents[0]
1620 pctx = self._parents[0]
1621 # do a full compare of any files that might have changed
1621 # do a full compare of any files that might have changed
1622 for f in sorted(files):
1622 for f in sorted(files):
1623 if (f not in pctx or self.flags(f) != pctx.flags(f)
1623 if (f not in pctx or self.flags(f) != pctx.flags(f)
1624 or pctx[f].cmp(self[f])):
1624 or pctx[f].cmp(self[f])):
1625 modified.append(f)
1625 modified.append(f)
1626 else:
1626 else:
1627 fixup.append(f)
1627 fixup.append(f)
1628
1628
1629 # update dirstate for files that are actually clean
1629 # update dirstate for files that are actually clean
1630 if fixup:
1630 if fixup:
1631 try:
1631 try:
1632 # updating the dirstate is optional
1632 # updating the dirstate is optional
1633 # so we don't wait on the lock
1633 # so we don't wait on the lock
1634 # wlock can invalidate the dirstate, so cache normal _after_
1634 # wlock can invalidate the dirstate, so cache normal _after_
1635 # taking the lock
1635 # taking the lock
1636 with self._repo.wlock(False):
1636 with self._repo.wlock(False):
1637 normal = self._repo.dirstate.normal
1637 normal = self._repo.dirstate.normal
1638 for f in fixup:
1638 for f in fixup:
1639 normal(f)
1639 normal(f)
1640 # write changes out explicitly, because nesting
1640 # write changes out explicitly, because nesting
1641 # wlock at runtime may prevent 'wlock.release()'
1641 # wlock at runtime may prevent 'wlock.release()'
1642 # after this block from doing so for subsequent
1642 # after this block from doing so for subsequent
1643 # changing files
1643 # changing files
1644 self._repo.dirstate.write(self._repo.currenttransaction())
1644 self._repo.dirstate.write(self._repo.currenttransaction())
1645 except error.LockError:
1645 except error.LockError:
1646 pass
1646 pass
1647 return modified, fixup
1647 return modified, fixup
1648
1648
1649 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1649 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1650 unknown=False):
1650 unknown=False):
1651 '''Gets the status from the dirstate -- internal use only.'''
1651 '''Gets the status from the dirstate -- internal use only.'''
1652 listignored, listclean, listunknown = ignored, clean, unknown
1652 listignored, listclean, listunknown = ignored, clean, unknown
1653 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1653 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1654 subrepos = []
1654 subrepos = []
1655 if '.hgsub' in self:
1655 if '.hgsub' in self:
1656 subrepos = sorted(self.substate)
1656 subrepos = sorted(self.substate)
1657 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1657 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1658 listclean, listunknown)
1658 listclean, listunknown)
1659
1659
1660 # check for any possibly clean files
1660 # check for any possibly clean files
1661 if cmp:
1661 if cmp:
1662 modified2, fixup = self._checklookup(cmp)
1662 modified2, fixup = self._checklookup(cmp)
1663 s.modified.extend(modified2)
1663 s.modified.extend(modified2)
1664
1664
1665 # update dirstate for files that are actually clean
1665 # update dirstate for files that are actually clean
1666 if fixup and listclean:
1666 if fixup and listclean:
1667 s.clean.extend(fixup)
1667 s.clean.extend(fixup)
1668
1668
1669 if match.always():
1669 if match.always():
1670 # cache for performance
1670 # cache for performance
1671 if s.unknown or s.ignored or s.clean:
1671 if s.unknown or s.ignored or s.clean:
1672 # "_status" is cached with list*=False in the normal route
1672 # "_status" is cached with list*=False in the normal route
1673 self._status = scmutil.status(s.modified, s.added, s.removed,
1673 self._status = scmutil.status(s.modified, s.added, s.removed,
1674 s.deleted, [], [], [])
1674 s.deleted, [], [], [])
1675 else:
1675 else:
1676 self._status = s
1676 self._status = s
1677
1677
1678 return s
1678 return s
1679
1679
1680 @propertycache
1680 @propertycache
1681 def _manifest(self):
1681 def _manifest(self):
1682 """generate a manifest corresponding to the values in self._status
1682 """generate a manifest corresponding to the values in self._status
1683
1683
1684 This reuse the file nodeid from parent, but we use special node
1684 This reuse the file nodeid from parent, but we use special node
1685 identifiers for added and modified files. This is used by manifests
1685 identifiers for added and modified files. This is used by manifests
1686 merge to see that files are different and by update logic to avoid
1686 merge to see that files are different and by update logic to avoid
1687 deleting newly added files.
1687 deleting newly added files.
1688 """
1688 """
1689 return self._buildstatusmanifest(self._status)
1689 return self._buildstatusmanifest(self._status)
1690
1690
1691 def _buildstatusmanifest(self, status):
1691 def _buildstatusmanifest(self, status):
1692 """Builds a manifest that includes the given status results."""
1692 """Builds a manifest that includes the given status results."""
1693 parents = self.parents()
1693 parents = self.parents()
1694
1694
1695 man = parents[0].manifest().copy()
1695 man = parents[0].manifest().copy()
1696
1696
1697 ff = self._flagfunc
1697 ff = self._flagfunc
1698 for i, l in ((addednodeid, status.added),
1698 for i, l in ((addednodeid, status.added),
1699 (modifiednodeid, status.modified)):
1699 (modifiednodeid, status.modified)):
1700 for f in l:
1700 for f in l:
1701 man[f] = i
1701 man[f] = i
1702 try:
1702 try:
1703 man.setflag(f, ff(f))
1703 man.setflag(f, ff(f))
1704 except OSError:
1704 except OSError:
1705 pass
1705 pass
1706
1706
1707 for f in status.deleted + status.removed:
1707 for f in status.deleted + status.removed:
1708 if f in man:
1708 if f in man:
1709 del man[f]
1709 del man[f]
1710
1710
1711 return man
1711 return man
1712
1712
1713 def _buildstatus(self, other, s, match, listignored, listclean,
1713 def _buildstatus(self, other, s, match, listignored, listclean,
1714 listunknown):
1714 listunknown):
1715 """build a status with respect to another context
1715 """build a status with respect to another context
1716
1716
1717 This includes logic for maintaining the fast path of status when
1717 This includes logic for maintaining the fast path of status when
1718 comparing the working directory against its parent, which is to skip
1718 comparing the working directory against its parent, which is to skip
1719 building a new manifest if self (working directory) is not comparing
1719 building a new manifest if self (working directory) is not comparing
1720 against its parent (repo['.']).
1720 against its parent (repo['.']).
1721 """
1721 """
1722 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1722 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1723 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1723 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1724 # might have accidentally ended up with the entire contents of the file
1724 # might have accidentally ended up with the entire contents of the file
1725 # they are supposed to be linking to.
1725 # they are supposed to be linking to.
1726 s.modified[:] = self._filtersuspectsymlink(s.modified)
1726 s.modified[:] = self._filtersuspectsymlink(s.modified)
1727 if other != self._repo['.']:
1727 if other != self._repo['.']:
1728 s = super(workingctx, self)._buildstatus(other, s, match,
1728 s = super(workingctx, self)._buildstatus(other, s, match,
1729 listignored, listclean,
1729 listignored, listclean,
1730 listunknown)
1730 listunknown)
1731 return s
1731 return s
1732
1732
1733 def _matchstatus(self, other, match):
1733 def _matchstatus(self, other, match):
1734 """override the match method with a filter for directory patterns
1734 """override the match method with a filter for directory patterns
1735
1735
1736 We use inheritance to customize the match.bad method only in cases of
1736 We use inheritance to customize the match.bad method only in cases of
1737 workingctx since it belongs only to the working directory when
1737 workingctx since it belongs only to the working directory when
1738 comparing against the parent changeset.
1738 comparing against the parent changeset.
1739
1739
1740 If we aren't comparing against the working directory's parent, then we
1740 If we aren't comparing against the working directory's parent, then we
1741 just use the default match object sent to us.
1741 just use the default match object sent to us.
1742 """
1742 """
1743 superself = super(workingctx, self)
1743 superself = super(workingctx, self)
1744 match = superself._matchstatus(other, match)
1744 match = superself._matchstatus(other, match)
1745 if other != self._repo['.']:
1745 if other != self._repo['.']:
1746 def bad(f, msg):
1746 def bad(f, msg):
1747 # 'f' may be a directory pattern from 'match.files()',
1747 # 'f' may be a directory pattern from 'match.files()',
1748 # so 'f not in ctx1' is not enough
1748 # so 'f not in ctx1' is not enough
1749 if f not in other and not other.hasdir(f):
1749 if f not in other and not other.hasdir(f):
1750 self._repo.ui.warn('%s: %s\n' %
1750 self._repo.ui.warn('%s: %s\n' %
1751 (self._repo.dirstate.pathto(f), msg))
1751 (self._repo.dirstate.pathto(f), msg))
1752 match.bad = bad
1752 match.bad = bad
1753 return match
1753 return match
1754
1754
1755 class committablefilectx(basefilectx):
1755 class committablefilectx(basefilectx):
1756 """A committablefilectx provides common functionality for a file context
1756 """A committablefilectx provides common functionality for a file context
1757 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1757 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1758 def __init__(self, repo, path, filelog=None, ctx=None):
1758 def __init__(self, repo, path, filelog=None, ctx=None):
1759 self._repo = repo
1759 self._repo = repo
1760 self._path = path
1760 self._path = path
1761 self._changeid = None
1761 self._changeid = None
1762 self._filerev = self._filenode = None
1762 self._filerev = self._filenode = None
1763
1763
1764 if filelog is not None:
1764 if filelog is not None:
1765 self._filelog = filelog
1765 self._filelog = filelog
1766 if ctx:
1766 if ctx:
1767 self._changectx = ctx
1767 self._changectx = ctx
1768
1768
1769 def __nonzero__(self):
1769 def __nonzero__(self):
1770 return True
1770 return True
1771
1771
1772 __bool__ = __nonzero__
1772 __bool__ = __nonzero__
1773
1773
1774 def linkrev(self):
1774 def linkrev(self):
1775 # linked to self._changectx no matter if file is modified or not
1775 # linked to self._changectx no matter if file is modified or not
1776 return self.rev()
1776 return self.rev()
1777
1777
1778 def parents(self):
1778 def parents(self):
1779 '''return parent filectxs, following copies if necessary'''
1779 '''return parent filectxs, following copies if necessary'''
1780 def filenode(ctx, path):
1780 def filenode(ctx, path):
1781 return ctx._manifest.get(path, nullid)
1781 return ctx._manifest.get(path, nullid)
1782
1782
1783 path = self._path
1783 path = self._path
1784 fl = self._filelog
1784 fl = self._filelog
1785 pcl = self._changectx._parents
1785 pcl = self._changectx._parents
1786 renamed = self.renamed()
1786 renamed = self.renamed()
1787
1787
1788 if renamed:
1788 if renamed:
1789 pl = [renamed + (None,)]
1789 pl = [renamed + (None,)]
1790 else:
1790 else:
1791 pl = [(path, filenode(pcl[0], path), fl)]
1791 pl = [(path, filenode(pcl[0], path), fl)]
1792
1792
1793 for pc in pcl[1:]:
1793 for pc in pcl[1:]:
1794 pl.append((path, filenode(pc, path), fl))
1794 pl.append((path, filenode(pc, path), fl))
1795
1795
1796 return [self._parentfilectx(p, fileid=n, filelog=l)
1796 return [self._parentfilectx(p, fileid=n, filelog=l)
1797 for p, n, l in pl if n != nullid]
1797 for p, n, l in pl if n != nullid]
1798
1798
1799 def children(self):
1799 def children(self):
1800 return []
1800 return []
1801
1801
1802 class workingfilectx(committablefilectx):
1802 class workingfilectx(committablefilectx):
1803 """A workingfilectx object makes access to data related to a particular
1803 """A workingfilectx object makes access to data related to a particular
1804 file in the working directory convenient."""
1804 file in the working directory convenient."""
1805 def __init__(self, repo, path, filelog=None, workingctx=None):
1805 def __init__(self, repo, path, filelog=None, workingctx=None):
1806 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1806 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1807
1807
1808 @propertycache
1808 @propertycache
1809 def _changectx(self):
1809 def _changectx(self):
1810 return workingctx(self._repo)
1810 return workingctx(self._repo)
1811
1811
1812 def data(self):
1812 def data(self):
1813 return self._repo.wread(self._path)
1813 return self._repo.wread(self._path)
1814 def renamed(self):
1814 def renamed(self):
1815 rp = self._repo.dirstate.copied(self._path)
1815 rp = self._repo.dirstate.copied(self._path)
1816 if not rp:
1816 if not rp:
1817 return None
1817 return None
1818 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1818 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1819
1819
1820 def size(self):
1820 def size(self):
1821 return self._repo.wvfs.lstat(self._path).st_size
1821 return self._repo.wvfs.lstat(self._path).st_size
1822 def date(self):
1822 def date(self):
1823 t, tz = self._changectx.date()
1823 t, tz = self._changectx.date()
1824 try:
1824 try:
1825 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1825 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1826 except OSError as err:
1826 except OSError as err:
1827 if err.errno != errno.ENOENT:
1827 if err.errno != errno.ENOENT:
1828 raise
1828 raise
1829 return (t, tz)
1829 return (t, tz)
1830
1830
1831 def cmp(self, fctx):
1831 def cmp(self, fctx):
1832 """compare with other file context
1832 """compare with other file context
1833
1833
1834 returns True if different than fctx.
1834 returns True if different than fctx.
1835 """
1835 """
1836 # fctx should be a filectx (not a workingfilectx)
1836 # fctx should be a filectx (not a workingfilectx)
1837 # invert comparison to reuse the same code path
1837 # invert comparison to reuse the same code path
1838 return fctx.cmp(self)
1838 return fctx.cmp(self)
1839
1839
1840 def remove(self, ignoremissing=False):
1840 def remove(self, ignoremissing=False):
1841 """wraps unlink for a repo's working directory"""
1841 """wraps unlink for a repo's working directory"""
1842 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1842 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1843
1843
1844 def write(self, data, flags):
1844 def write(self, data, flags):
1845 """wraps repo.wwrite"""
1845 """wraps repo.wwrite"""
1846 self._repo.wwrite(self._path, data, flags)
1846 self._repo.wwrite(self._path, data, flags)
1847
1847
1848 class workingcommitctx(workingctx):
1848 class workingcommitctx(workingctx):
1849 """A workingcommitctx object makes access to data related to
1849 """A workingcommitctx object makes access to data related to
1850 the revision being committed convenient.
1850 the revision being committed convenient.
1851
1851
1852 This hides changes in the working directory, if they aren't
1852 This hides changes in the working directory, if they aren't
1853 committed in this context.
1853 committed in this context.
1854 """
1854 """
1855 def __init__(self, repo, changes,
1855 def __init__(self, repo, changes,
1856 text="", user=None, date=None, extra=None):
1856 text="", user=None, date=None, extra=None):
1857 super(workingctx, self).__init__(repo, text, user, date, extra,
1857 super(workingctx, self).__init__(repo, text, user, date, extra,
1858 changes)
1858 changes)
1859
1859
1860 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1860 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1861 unknown=False):
1861 unknown=False):
1862 """Return matched files only in ``self._status``
1862 """Return matched files only in ``self._status``
1863
1863
1864 Uncommitted files appear "clean" via this context, even if
1864 Uncommitted files appear "clean" via this context, even if
1865 they aren't actually so in the working directory.
1865 they aren't actually so in the working directory.
1866 """
1866 """
1867 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1867 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1868 if clean:
1868 if clean:
1869 clean = [f for f in self._manifest if f not in self._changedset]
1869 clean = [f for f in self._manifest if f not in self._changedset]
1870 else:
1870 else:
1871 clean = []
1871 clean = []
1872 return scmutil.status([f for f in self._status.modified if match(f)],
1872 return scmutil.status([f for f in self._status.modified if match(f)],
1873 [f for f in self._status.added if match(f)],
1873 [f for f in self._status.added if match(f)],
1874 [f for f in self._status.removed if match(f)],
1874 [f for f in self._status.removed if match(f)],
1875 [], [], [], clean)
1875 [], [], [], clean)
1876
1876
1877 @propertycache
1877 @propertycache
1878 def _changedset(self):
1878 def _changedset(self):
1879 """Return the set of files changed in this context
1879 """Return the set of files changed in this context
1880 """
1880 """
1881 changed = set(self._status.modified)
1881 changed = set(self._status.modified)
1882 changed.update(self._status.added)
1882 changed.update(self._status.added)
1883 changed.update(self._status.removed)
1883 changed.update(self._status.removed)
1884 return changed
1884 return changed
1885
1885
1886 def makecachingfilectxfn(func):
1886 def makecachingfilectxfn(func):
1887 """Create a filectxfn that caches based on the path.
1887 """Create a filectxfn that caches based on the path.
1888
1888
1889 We can't use util.cachefunc because it uses all arguments as the cache
1889 We can't use util.cachefunc because it uses all arguments as the cache
1890 key and this creates a cycle since the arguments include the repo and
1890 key and this creates a cycle since the arguments include the repo and
1891 memctx.
1891 memctx.
1892 """
1892 """
1893 cache = {}
1893 cache = {}
1894
1894
1895 def getfilectx(repo, memctx, path):
1895 def getfilectx(repo, memctx, path):
1896 if path not in cache:
1896 if path not in cache:
1897 cache[path] = func(repo, memctx, path)
1897 cache[path] = func(repo, memctx, path)
1898 return cache[path]
1898 return cache[path]
1899
1899
1900 return getfilectx
1900 return getfilectx
1901
1901
1902 class memctx(committablectx):
1902 class memctx(committablectx):
1903 """Use memctx to perform in-memory commits via localrepo.commitctx().
1903 """Use memctx to perform in-memory commits via localrepo.commitctx().
1904
1904
1905 Revision information is supplied at initialization time while
1905 Revision information is supplied at initialization time while
1906 related files data and is made available through a callback
1906 related files data and is made available through a callback
1907 mechanism. 'repo' is the current localrepo, 'parents' is a
1907 mechanism. 'repo' is the current localrepo, 'parents' is a
1908 sequence of two parent revisions identifiers (pass None for every
1908 sequence of two parent revisions identifiers (pass None for every
1909 missing parent), 'text' is the commit message and 'files' lists
1909 missing parent), 'text' is the commit message and 'files' lists
1910 names of files touched by the revision (normalized and relative to
1910 names of files touched by the revision (normalized and relative to
1911 repository root).
1911 repository root).
1912
1912
1913 filectxfn(repo, memctx, path) is a callable receiving the
1913 filectxfn(repo, memctx, path) is a callable receiving the
1914 repository, the current memctx object and the normalized path of
1914 repository, the current memctx object and the normalized path of
1915 requested file, relative to repository root. It is fired by the
1915 requested file, relative to repository root. It is fired by the
1916 commit function for every file in 'files', but calls order is
1916 commit function for every file in 'files', but calls order is
1917 undefined. If the file is available in the revision being
1917 undefined. If the file is available in the revision being
1918 committed (updated or added), filectxfn returns a memfilectx
1918 committed (updated or added), filectxfn returns a memfilectx
1919 object. If the file was removed, filectxfn return None for recent
1919 object. If the file was removed, filectxfn return None for recent
1920 Mercurial. Moved files are represented by marking the source file
1920 Mercurial. Moved files are represented by marking the source file
1921 removed and the new file added with copy information (see
1921 removed and the new file added with copy information (see
1922 memfilectx).
1922 memfilectx).
1923
1923
1924 user receives the committer name and defaults to current
1924 user receives the committer name and defaults to current
1925 repository username, date is the commit date in any format
1925 repository username, date is the commit date in any format
1926 supported by util.parsedate() and defaults to current date, extra
1926 supported by util.parsedate() and defaults to current date, extra
1927 is a dictionary of metadata or is left empty.
1927 is a dictionary of metadata or is left empty.
1928 """
1928 """
1929
1929
1930 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1930 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1931 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1931 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1932 # this field to determine what to do in filectxfn.
1932 # this field to determine what to do in filectxfn.
1933 _returnnoneformissingfiles = True
1933 _returnnoneformissingfiles = True
1934
1934
1935 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1935 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1936 date=None, extra=None, editor=False):
1936 date=None, extra=None, editor=False):
1937 super(memctx, self).__init__(repo, text, user, date, extra)
1937 super(memctx, self).__init__(repo, text, user, date, extra)
1938 self._rev = None
1938 self._rev = None
1939 self._node = None
1939 self._node = None
1940 parents = [(p or nullid) for p in parents]
1940 parents = [(p or nullid) for p in parents]
1941 p1, p2 = parents
1941 p1, p2 = parents
1942 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1942 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1943 files = sorted(set(files))
1943 files = sorted(set(files))
1944 self._files = files
1944 self._files = files
1945 self.substate = {}
1945 self.substate = {}
1946
1946
1947 # if store is not callable, wrap it in a function
1947 # if store is not callable, wrap it in a function
1948 if not callable(filectxfn):
1948 if not callable(filectxfn):
1949 def getfilectx(repo, memctx, path):
1949 def getfilectx(repo, memctx, path):
1950 fctx = filectxfn[path]
1950 fctx = filectxfn[path]
1951 # this is weird but apparently we only keep track of one parent
1951 # this is weird but apparently we only keep track of one parent
1952 # (why not only store that instead of a tuple?)
1952 # (why not only store that instead of a tuple?)
1953 copied = fctx.renamed()
1953 copied = fctx.renamed()
1954 if copied:
1954 if copied:
1955 copied = copied[0]
1955 copied = copied[0]
1956 return memfilectx(repo, path, fctx.data(),
1956 return memfilectx(repo, path, fctx.data(),
1957 islink=fctx.islink(), isexec=fctx.isexec(),
1957 islink=fctx.islink(), isexec=fctx.isexec(),
1958 copied=copied, memctx=memctx)
1958 copied=copied, memctx=memctx)
1959 self._filectxfn = getfilectx
1959 self._filectxfn = getfilectx
1960 else:
1960 else:
1961 # memoizing increases performance for e.g. vcs convert scenarios.
1961 # memoizing increases performance for e.g. vcs convert scenarios.
1962 self._filectxfn = makecachingfilectxfn(filectxfn)
1962 self._filectxfn = makecachingfilectxfn(filectxfn)
1963
1963
1964 if extra:
1964 if extra:
1965 self._extra = extra.copy()
1965 self._extra = extra.copy()
1966 else:
1966 else:
1967 self._extra = {}
1967 self._extra = {}
1968
1968
1969 if self._extra.get('branch', '') == '':
1969 if self._extra.get('branch', '') == '':
1970 self._extra['branch'] = 'default'
1970 self._extra['branch'] = 'default'
1971
1971
1972 if editor:
1972 if editor:
1973 self._text = editor(self._repo, self, [])
1973 self._text = editor(self._repo, self, [])
1974 self._repo.savecommitmessage(self._text)
1974 self._repo.savecommitmessage(self._text)
1975
1975
1976 def filectx(self, path, filelog=None):
1976 def filectx(self, path, filelog=None):
1977 """get a file context from the working directory
1977 """get a file context from the working directory
1978
1978
1979 Returns None if file doesn't exist and should be removed."""
1979 Returns None if file doesn't exist and should be removed."""
1980 return self._filectxfn(self._repo, self, path)
1980 return self._filectxfn(self._repo, self, path)
1981
1981
1982 def commit(self):
1982 def commit(self):
1983 """commit context to the repo"""
1983 """commit context to the repo"""
1984 return self._repo.commitctx(self)
1984 return self._repo.commitctx(self)
1985
1985
1986 @propertycache
1986 @propertycache
1987 def _manifest(self):
1987 def _manifest(self):
1988 """generate a manifest based on the return values of filectxfn"""
1988 """generate a manifest based on the return values of filectxfn"""
1989
1989
1990 # keep this simple for now; just worry about p1
1990 # keep this simple for now; just worry about p1
1991 pctx = self._parents[0]
1991 pctx = self._parents[0]
1992 man = pctx.manifest().copy()
1992 man = pctx.manifest().copy()
1993
1993
1994 for f in self._status.modified:
1994 for f in self._status.modified:
1995 p1node = nullid
1995 p1node = nullid
1996 p2node = nullid
1996 p2node = nullid
1997 p = pctx[f].parents() # if file isn't in pctx, check p2?
1997 p = pctx[f].parents() # if file isn't in pctx, check p2?
1998 if len(p) > 0:
1998 if len(p) > 0:
1999 p1node = p[0].filenode()
1999 p1node = p[0].filenode()
2000 if len(p) > 1:
2000 if len(p) > 1:
2001 p2node = p[1].filenode()
2001 p2node = p[1].filenode()
2002 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2002 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2003
2003
2004 for f in self._status.added:
2004 for f in self._status.added:
2005 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2005 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2006
2006
2007 for f in self._status.removed:
2007 for f in self._status.removed:
2008 if f in man:
2008 if f in man:
2009 del man[f]
2009 del man[f]
2010
2010
2011 return man
2011 return man
2012
2012
2013 @propertycache
2013 @propertycache
2014 def _status(self):
2014 def _status(self):
2015 """Calculate exact status from ``files`` specified at construction
2015 """Calculate exact status from ``files`` specified at construction
2016 """
2016 """
2017 man1 = self.p1().manifest()
2017 man1 = self.p1().manifest()
2018 p2 = self._parents[1]
2018 p2 = self._parents[1]
2019 # "1 < len(self._parents)" can't be used for checking
2019 # "1 < len(self._parents)" can't be used for checking
2020 # existence of the 2nd parent, because "memctx._parents" is
2020 # existence of the 2nd parent, because "memctx._parents" is
2021 # explicitly initialized by the list, of which length is 2.
2021 # explicitly initialized by the list, of which length is 2.
2022 if p2.node() != nullid:
2022 if p2.node() != nullid:
2023 man2 = p2.manifest()
2023 man2 = p2.manifest()
2024 managing = lambda f: f in man1 or f in man2
2024 managing = lambda f: f in man1 or f in man2
2025 else:
2025 else:
2026 managing = lambda f: f in man1
2026 managing = lambda f: f in man1
2027
2027
2028 modified, added, removed = [], [], []
2028 modified, added, removed = [], [], []
2029 for f in self._files:
2029 for f in self._files:
2030 if not managing(f):
2030 if not managing(f):
2031 added.append(f)
2031 added.append(f)
2032 elif self[f]:
2032 elif self[f]:
2033 modified.append(f)
2033 modified.append(f)
2034 else:
2034 else:
2035 removed.append(f)
2035 removed.append(f)
2036
2036
2037 return scmutil.status(modified, added, removed, [], [], [], [])
2037 return scmutil.status(modified, added, removed, [], [], [], [])
2038
2038
2039 class memfilectx(committablefilectx):
2039 class memfilectx(committablefilectx):
2040 """memfilectx represents an in-memory file to commit.
2040 """memfilectx represents an in-memory file to commit.
2041
2041
2042 See memctx and committablefilectx for more details.
2042 See memctx and committablefilectx for more details.
2043 """
2043 """
2044 def __init__(self, repo, path, data, islink=False,
2044 def __init__(self, repo, path, data, islink=False,
2045 isexec=False, copied=None, memctx=None):
2045 isexec=False, copied=None, memctx=None):
2046 """
2046 """
2047 path is the normalized file path relative to repository root.
2047 path is the normalized file path relative to repository root.
2048 data is the file content as a string.
2048 data is the file content as a string.
2049 islink is True if the file is a symbolic link.
2049 islink is True if the file is a symbolic link.
2050 isexec is True if the file is executable.
2050 isexec is True if the file is executable.
2051 copied is the source file path if current file was copied in the
2051 copied is the source file path if current file was copied in the
2052 revision being committed, or None."""
2052 revision being committed, or None."""
2053 super(memfilectx, self).__init__(repo, path, None, memctx)
2053 super(memfilectx, self).__init__(repo, path, None, memctx)
2054 self._data = data
2054 self._data = data
2055 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2055 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2056 self._copied = None
2056 self._copied = None
2057 if copied:
2057 if copied:
2058 self._copied = (copied, nullid)
2058 self._copied = (copied, nullid)
2059
2059
2060 def data(self):
2060 def data(self):
2061 return self._data
2061 return self._data
2062 def size(self):
2062 def size(self):
2063 return len(self.data())
2063 return len(self.data())
2064 def flags(self):
2064 def flags(self):
2065 return self._flags
2065 return self._flags
2066 def renamed(self):
2066 def renamed(self):
2067 return self._copied
2067 return self._copied
2068
2068
2069 def remove(self, ignoremissing=False):
2069 def remove(self, ignoremissing=False):
2070 """wraps unlink for a repo's working directory"""
2070 """wraps unlink for a repo's working directory"""
2071 # need to figure out what to do here
2071 # need to figure out what to do here
2072 del self._changectx[self._path]
2072 del self._changectx[self._path]
2073
2073
2074 def write(self, data, flags):
2074 def write(self, data, flags):
2075 """wraps repo.wwrite"""
2075 """wraps repo.wwrite"""
2076 self._data = data
2076 self._data = data
2077
2077
2078 class metadataonlyctx(committablectx):
2078 class metadataonlyctx(committablectx):
2079 """Like memctx but it's reusing the manifest of different commit.
2079 """Like memctx but it's reusing the manifest of different commit.
2080 Intended to be used by lightweight operations that are creating
2080 Intended to be used by lightweight operations that are creating
2081 metadata-only changes.
2081 metadata-only changes.
2082
2082
2083 Revision information is supplied at initialization time. 'repo' is the
2083 Revision information is supplied at initialization time. 'repo' is the
2084 current localrepo, 'ctx' is original revision which manifest we're reuisng
2084 current localrepo, 'ctx' is original revision which manifest we're reuisng
2085 'parents' is a sequence of two parent revisions identifiers (pass None for
2085 'parents' is a sequence of two parent revisions identifiers (pass None for
2086 every missing parent), 'text' is the commit.
2086 every missing parent), 'text' is the commit.
2087
2087
2088 user receives the committer name and defaults to current repository
2088 user receives the committer name and defaults to current repository
2089 username, date is the commit date in any format supported by
2089 username, date is the commit date in any format supported by
2090 util.parsedate() and defaults to current date, extra is a dictionary of
2090 util.parsedate() and defaults to current date, extra is a dictionary of
2091 metadata or is left empty.
2091 metadata or is left empty.
2092 """
2092 """
2093 def __new__(cls, repo, originalctx, *args, **kwargs):
2093 def __new__(cls, repo, originalctx, *args, **kwargs):
2094 return super(metadataonlyctx, cls).__new__(cls, repo)
2094 return super(metadataonlyctx, cls).__new__(cls, repo)
2095
2095
2096 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2096 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2097 extra=None, editor=False):
2097 extra=None, editor=False):
2098 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2098 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2099 self._rev = None
2099 self._rev = None
2100 self._node = None
2100 self._node = None
2101 self._originalctx = originalctx
2101 self._originalctx = originalctx
2102 self._manifestnode = originalctx.manifestnode()
2102 self._manifestnode = originalctx.manifestnode()
2103 parents = [(p or nullid) for p in parents]
2103 parents = [(p or nullid) for p in parents]
2104 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2104 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2105
2105
2106 # sanity check to ensure that the reused manifest parents are
2106 # sanity check to ensure that the reused manifest parents are
2107 # manifests of our commit parents
2107 # manifests of our commit parents
2108 mp1, mp2 = self.manifestctx().parents
2108 mp1, mp2 = self.manifestctx().parents
2109 if p1 != nullid and p1.manifestnode() != mp1:
2109 if p1 != nullid and p1.manifestnode() != mp1:
2110 raise RuntimeError('can\'t reuse the manifest: '
2110 raise RuntimeError('can\'t reuse the manifest: '
2111 'its p1 doesn\'t match the new ctx p1')
2111 'its p1 doesn\'t match the new ctx p1')
2112 if p2 != nullid and p2.manifestnode() != mp2:
2112 if p2 != nullid and p2.manifestnode() != mp2:
2113 raise RuntimeError('can\'t reuse the manifest: '
2113 raise RuntimeError('can\'t reuse the manifest: '
2114 'its p2 doesn\'t match the new ctx p2')
2114 'its p2 doesn\'t match the new ctx p2')
2115
2115
2116 self._files = originalctx.files()
2116 self._files = originalctx.files()
2117 self.substate = {}
2117 self.substate = {}
2118
2118
2119 if extra:
2119 if extra:
2120 self._extra = extra.copy()
2120 self._extra = extra.copy()
2121 else:
2121 else:
2122 self._extra = {}
2122 self._extra = {}
2123
2123
2124 if self._extra.get('branch', '') == '':
2124 if self._extra.get('branch', '') == '':
2125 self._extra['branch'] = 'default'
2125 self._extra['branch'] = 'default'
2126
2126
2127 if editor:
2127 if editor:
2128 self._text = editor(self._repo, self, [])
2128 self._text = editor(self._repo, self, [])
2129 self._repo.savecommitmessage(self._text)
2129 self._repo.savecommitmessage(self._text)
2130
2130
2131 def manifestnode(self):
2131 def manifestnode(self):
2132 return self._manifestnode
2132 return self._manifestnode
2133
2133
2134 @propertycache
2134 @propertycache
2135 def _manifestctx(self):
2135 def _manifestctx(self):
2136 return self._repo.manifestlog[self._manifestnode]
2136 return self._repo.manifestlog[self._manifestnode]
2137
2137
2138 def filectx(self, path, filelog=None):
2138 def filectx(self, path, filelog=None):
2139 return self._originalctx.filectx(path, filelog=filelog)
2139 return self._originalctx.filectx(path, filelog=filelog)
2140
2140
2141 def commit(self):
2141 def commit(self):
2142 """commit context to the repo"""
2142 """commit context to the repo"""
2143 return self._repo.commitctx(self)
2143 return self._repo.commitctx(self)
2144
2144
2145 @property
2145 @property
2146 def _manifest(self):
2146 def _manifest(self):
2147 return self._originalctx.manifest()
2147 return self._originalctx.manifest()
2148
2148
2149 @propertycache
2149 @propertycache
2150 def _status(self):
2150 def _status(self):
2151 """Calculate exact status from ``files`` specified in the ``origctx``
2151 """Calculate exact status from ``files`` specified in the ``origctx``
2152 and parents manifests.
2152 and parents manifests.
2153 """
2153 """
2154 man1 = self.p1().manifest()
2154 man1 = self.p1().manifest()
2155 p2 = self._parents[1]
2155 p2 = self._parents[1]
2156 # "1 < len(self._parents)" can't be used for checking
2156 # "1 < len(self._parents)" can't be used for checking
2157 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2157 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2158 # explicitly initialized by the list, of which length is 2.
2158 # explicitly initialized by the list, of which length is 2.
2159 if p2.node() != nullid:
2159 if p2.node() != nullid:
2160 man2 = p2.manifest()
2160 man2 = p2.manifest()
2161 managing = lambda f: f in man1 or f in man2
2161 managing = lambda f: f in man1 or f in man2
2162 else:
2162 else:
2163 managing = lambda f: f in man1
2163 managing = lambda f: f in man1
2164
2164
2165 modified, added, removed = [], [], []
2165 modified, added, removed = [], [], []
2166 for f in self._files:
2166 for f in self._files:
2167 if not managing(f):
2167 if not managing(f):
2168 added.append(f)
2168 added.append(f)
2169 elif self[f]:
2169 elif self[f]:
2170 modified.append(f)
2170 modified.append(f)
2171 else:
2171 else:
2172 removed.append(f)
2172 removed.append(f)
2173
2173
2174 return scmutil.status(modified, added, removed, [], [], [], [])
2174 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,1066 +1,1066 b''
1 # smartset.py - data structure for revision set
1 # smartset.py - data structure for revision set
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 util,
11 util,
12 )
12 )
13
13
14 def _formatsetrepr(r):
14 def _formatsetrepr(r):
15 """Format an optional printable representation of a set
15 """Format an optional printable representation of a set
16
16
17 ======== =================================
17 ======== =================================
18 type(r) example
18 type(r) example
19 ======== =================================
19 ======== =================================
20 tuple ('<not %r>', other)
20 tuple ('<not %r>', other)
21 str '<branch closed>'
21 str '<branch closed>'
22 callable lambda: '<branch %r>' % sorted(b)
22 callable lambda: '<branch %r>' % sorted(b)
23 object other
23 object other
24 ======== =================================
24 ======== =================================
25 """
25 """
26 if r is None:
26 if r is None:
27 return ''
27 return ''
28 elif isinstance(r, tuple):
28 elif isinstance(r, tuple):
29 return r[0] % r[1:]
29 return r[0] % r[1:]
30 elif isinstance(r, str):
30 elif isinstance(r, str):
31 return r
31 return r
32 elif callable(r):
32 elif callable(r):
33 return r()
33 return r()
34 else:
34 else:
35 return repr(r)
35 return repr(r)
36
36
37 class abstractsmartset(object):
37 class abstractsmartset(object):
38
38
39 def __nonzero__(self):
39 def __nonzero__(self):
40 """True if the smartset is not empty"""
40 """True if the smartset is not empty"""
41 raise NotImplementedError()
41 raise NotImplementedError()
42
42
43 __bool__ = __nonzero__
43 __bool__ = __nonzero__
44
44
45 def __contains__(self, rev):
45 def __contains__(self, rev):
46 """provide fast membership testing"""
46 """provide fast membership testing"""
47 raise NotImplementedError()
47 raise NotImplementedError()
48
48
49 def __iter__(self):
49 def __iter__(self):
50 """iterate the set in the order it is supposed to be iterated"""
50 """iterate the set in the order it is supposed to be iterated"""
51 raise NotImplementedError()
51 raise NotImplementedError()
52
52
53 # Attributes containing a function to perform a fast iteration in a given
53 # Attributes containing a function to perform a fast iteration in a given
54 # direction. A smartset can have none, one, or both defined.
54 # direction. A smartset can have none, one, or both defined.
55 #
55 #
56 # Default value is None instead of a function returning None to avoid
56 # Default value is None instead of a function returning None to avoid
57 # initializing an iterator just for testing if a fast method exists.
57 # initializing an iterator just for testing if a fast method exists.
58 fastasc = None
58 fastasc = None
59 fastdesc = None
59 fastdesc = None
60
60
61 def isascending(self):
61 def isascending(self):
62 """True if the set will iterate in ascending order"""
62 """True if the set will iterate in ascending order"""
63 raise NotImplementedError()
63 raise NotImplementedError()
64
64
65 def isdescending(self):
65 def isdescending(self):
66 """True if the set will iterate in descending order"""
66 """True if the set will iterate in descending order"""
67 raise NotImplementedError()
67 raise NotImplementedError()
68
68
69 def istopo(self):
69 def istopo(self):
70 """True if the set will iterate in topographical order"""
70 """True if the set will iterate in topographical order"""
71 raise NotImplementedError()
71 raise NotImplementedError()
72
72
73 def min(self):
73 def min(self):
74 """return the minimum element in the set"""
74 """return the minimum element in the set"""
75 if self.fastasc is None:
75 if self.fastasc is None:
76 v = min(self)
76 v = min(self)
77 else:
77 else:
78 for v in self.fastasc():
78 for v in self.fastasc():
79 break
79 break
80 else:
80 else:
81 raise ValueError('arg is an empty sequence')
81 raise ValueError('arg is an empty sequence')
82 self.min = lambda: v
82 self.min = lambda: v
83 return v
83 return v
84
84
85 def max(self):
85 def max(self):
86 """return the maximum element in the set"""
86 """return the maximum element in the set"""
87 if self.fastdesc is None:
87 if self.fastdesc is None:
88 return max(self)
88 return max(self)
89 else:
89 else:
90 for v in self.fastdesc():
90 for v in self.fastdesc():
91 break
91 break
92 else:
92 else:
93 raise ValueError('arg is an empty sequence')
93 raise ValueError('arg is an empty sequence')
94 self.max = lambda: v
94 self.max = lambda: v
95 return v
95 return v
96
96
97 def first(self):
97 def first(self):
98 """return the first element in the set (user iteration perspective)
98 """return the first element in the set (user iteration perspective)
99
99
100 Return None if the set is empty"""
100 Return None if the set is empty"""
101 raise NotImplementedError()
101 raise NotImplementedError()
102
102
103 def last(self):
103 def last(self):
104 """return the last element in the set (user iteration perspective)
104 """return the last element in the set (user iteration perspective)
105
105
106 Return None if the set is empty"""
106 Return None if the set is empty"""
107 raise NotImplementedError()
107 raise NotImplementedError()
108
108
109 def __len__(self):
109 def __len__(self):
110 """return the length of the smartsets
110 """return the length of the smartsets
111
111
112 This can be expensive on smartset that could be lazy otherwise."""
112 This can be expensive on smartset that could be lazy otherwise."""
113 raise NotImplementedError()
113 raise NotImplementedError()
114
114
115 def reverse(self):
115 def reverse(self):
116 """reverse the expected iteration order"""
116 """reverse the expected iteration order"""
117 raise NotImplementedError()
117 raise NotImplementedError()
118
118
119 def sort(self, reverse=True):
119 def sort(self, reverse=True):
120 """get the set to iterate in an ascending or descending order"""
120 """get the set to iterate in an ascending or descending order"""
121 raise NotImplementedError()
121 raise NotImplementedError()
122
122
123 def __and__(self, other):
123 def __and__(self, other):
124 """Returns a new object with the intersection of the two collections.
124 """Returns a new object with the intersection of the two collections.
125
125
126 This is part of the mandatory API for smartset."""
126 This is part of the mandatory API for smartset."""
127 if isinstance(other, fullreposet):
127 if isinstance(other, fullreposet):
128 return self
128 return self
129 return self.filter(other.__contains__, condrepr=other, cache=False)
129 return self.filter(other.__contains__, condrepr=other, cache=False)
130
130
131 def __add__(self, other):
131 def __add__(self, other):
132 """Returns a new object with the union of the two collections.
132 """Returns a new object with the union of the two collections.
133
133
134 This is part of the mandatory API for smartset."""
134 This is part of the mandatory API for smartset."""
135 return addset(self, other)
135 return addset(self, other)
136
136
137 def __sub__(self, other):
137 def __sub__(self, other):
138 """Returns a new object with the substraction of the two collections.
138 """Returns a new object with the substraction of the two collections.
139
139
140 This is part of the mandatory API for smartset."""
140 This is part of the mandatory API for smartset."""
141 c = other.__contains__
141 c = other.__contains__
142 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
142 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
143 cache=False)
143 cache=False)
144
144
145 def filter(self, condition, condrepr=None, cache=True):
145 def filter(self, condition, condrepr=None, cache=True):
146 """Returns this smartset filtered by condition as a new smartset.
146 """Returns this smartset filtered by condition as a new smartset.
147
147
148 `condition` is a callable which takes a revision number and returns a
148 `condition` is a callable which takes a revision number and returns a
149 boolean. Optional `condrepr` provides a printable representation of
149 boolean. Optional `condrepr` provides a printable representation of
150 the given `condition`.
150 the given `condition`.
151
151
152 This is part of the mandatory API for smartset."""
152 This is part of the mandatory API for smartset."""
153 # builtin cannot be cached. but do not needs to
153 # builtin cannot be cached. but do not needs to
154 if cache and util.safehasattr(condition, 'func_code'):
154 if cache and util.safehasattr(condition, 'func_code'):
155 condition = util.cachefunc(condition)
155 condition = util.cachefunc(condition)
156 return filteredset(self, condition, condrepr)
156 return filteredset(self, condition, condrepr)
157
157
158 class baseset(abstractsmartset):
158 class baseset(abstractsmartset):
159 """Basic data structure that represents a revset and contains the basic
159 """Basic data structure that represents a revset and contains the basic
160 operation that it should be able to perform.
160 operation that it should be able to perform.
161
161
162 Every method in this class should be implemented by any smartset class.
162 Every method in this class should be implemented by any smartset class.
163
163
164 This class could be constructed by an (unordered) set, or an (ordered)
164 This class could be constructed by an (unordered) set, or an (ordered)
165 list-like object. If a set is provided, it'll be sorted lazily.
165 list-like object. If a set is provided, it'll be sorted lazily.
166
166
167 >>> x = [4, 0, 7, 6]
167 >>> x = [4, 0, 7, 6]
168 >>> y = [5, 6, 7, 3]
168 >>> y = [5, 6, 7, 3]
169
169
170 Construct by a set:
170 Construct by a set:
171 >>> xs = baseset(set(x))
171 >>> xs = baseset(set(x))
172 >>> ys = baseset(set(y))
172 >>> ys = baseset(set(y))
173 >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
173 >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
174 [[0, 4, 6, 7, 3, 5], [6, 7], [0, 4]]
174 [[0, 4, 6, 7, 3, 5], [6, 7], [0, 4]]
175 >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
175 >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
176 ['addset', 'baseset', 'baseset']
176 ['addset', 'baseset', 'baseset']
177
177
178 Construct by a list-like:
178 Construct by a list-like:
179 >>> xs = baseset(x)
179 >>> xs = baseset(x)
180 >>> ys = baseset(i for i in y)
180 >>> ys = baseset(i for i in y)
181 >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
181 >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
182 [[4, 0, 7, 6, 5, 3], [7, 6], [4, 0]]
182 [[4, 0, 7, 6, 5, 3], [7, 6], [4, 0]]
183 >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
183 >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
184 ['addset', 'filteredset', 'filteredset']
184 ['addset', 'filteredset', 'filteredset']
185
185
186 Populate "_set" fields in the lists so set optimization may be used:
186 Populate "_set" fields in the lists so set optimization may be used:
187 >>> [1 in xs, 3 in ys]
187 >>> [1 in xs, 3 in ys]
188 [False, True]
188 [False, True]
189
189
190 Without sort(), results won't be changed:
190 Without sort(), results won't be changed:
191 >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
191 >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
192 [[4, 0, 7, 6, 5, 3], [7, 6], [4, 0]]
192 [[4, 0, 7, 6, 5, 3], [7, 6], [4, 0]]
193 >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
193 >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
194 ['addset', 'filteredset', 'filteredset']
194 ['addset', 'filteredset', 'filteredset']
195
195
196 With sort(), set optimization could be used:
196 With sort(), set optimization could be used:
197 >>> xs.sort(reverse=True)
197 >>> xs.sort(reverse=True)
198 >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
198 >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
199 [[7, 6, 4, 0, 5, 3], [7, 6], [4, 0]]
199 [[7, 6, 4, 0, 5, 3], [7, 6], [4, 0]]
200 >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
200 >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
201 ['addset', 'baseset', 'baseset']
201 ['addset', 'baseset', 'baseset']
202
202
203 >>> ys.sort()
203 >>> ys.sort()
204 >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
204 >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
205 [[7, 6, 4, 0, 3, 5], [7, 6], [4, 0]]
205 [[7, 6, 4, 0, 3, 5], [7, 6], [4, 0]]
206 >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
206 >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
207 ['addset', 'baseset', 'baseset']
207 ['addset', 'baseset', 'baseset']
208
208
209 istopo is preserved across set operations
209 istopo is preserved across set operations
210 >>> xs = baseset(set(x), istopo=True)
210 >>> xs = baseset(set(x), istopo=True)
211 >>> rs = xs & ys
211 >>> rs = xs & ys
212 >>> type(rs).__name__
212 >>> type(rs).__name__
213 'baseset'
213 'baseset'
214 >>> rs._istopo
214 >>> rs._istopo
215 True
215 True
216 """
216 """
217 def __init__(self, data=(), datarepr=None, istopo=False):
217 def __init__(self, data=(), datarepr=None, istopo=False):
218 """
218 """
219 datarepr: a tuple of (format, obj, ...), a function or an object that
219 datarepr: a tuple of (format, obj, ...), a function or an object that
220 provides a printable representation of the given data.
220 provides a printable representation of the given data.
221 """
221 """
222 self._ascending = None
222 self._ascending = None
223 self._istopo = istopo
223 self._istopo = istopo
224 if isinstance(data, set):
224 if isinstance(data, set):
225 # converting set to list has a cost, do it lazily
225 # converting set to list has a cost, do it lazily
226 self._set = data
226 self._set = data
227 # set has no order we pick one for stability purpose
227 # set has no order we pick one for stability purpose
228 self._ascending = True
228 self._ascending = True
229 else:
229 else:
230 if not isinstance(data, list):
230 if not isinstance(data, list):
231 data = list(data)
231 data = list(data)
232 self._list = data
232 self._list = data
233 self._datarepr = datarepr
233 self._datarepr = datarepr
234
234
235 @util.propertycache
235 @util.propertycache
236 def _set(self):
236 def _set(self):
237 return set(self._list)
237 return set(self._list)
238
238
239 @util.propertycache
239 @util.propertycache
240 def _asclist(self):
240 def _asclist(self):
241 asclist = self._list[:]
241 asclist = self._list[:]
242 asclist.sort()
242 asclist.sort()
243 return asclist
243 return asclist
244
244
245 @util.propertycache
245 @util.propertycache
246 def _list(self):
246 def _list(self):
247 # _list is only lazily constructed if we have _set
247 # _list is only lazily constructed if we have _set
248 assert '_set' in self.__dict__
248 assert r'_set' in self.__dict__
249 return list(self._set)
249 return list(self._set)
250
250
251 def __iter__(self):
251 def __iter__(self):
252 if self._ascending is None:
252 if self._ascending is None:
253 return iter(self._list)
253 return iter(self._list)
254 elif self._ascending:
254 elif self._ascending:
255 return iter(self._asclist)
255 return iter(self._asclist)
256 else:
256 else:
257 return reversed(self._asclist)
257 return reversed(self._asclist)
258
258
259 def fastasc(self):
259 def fastasc(self):
260 return iter(self._asclist)
260 return iter(self._asclist)
261
261
262 def fastdesc(self):
262 def fastdesc(self):
263 return reversed(self._asclist)
263 return reversed(self._asclist)
264
264
265 @util.propertycache
265 @util.propertycache
266 def __contains__(self):
266 def __contains__(self):
267 return self._set.__contains__
267 return self._set.__contains__
268
268
269 def __nonzero__(self):
269 def __nonzero__(self):
270 return bool(len(self))
270 return bool(len(self))
271
271
272 __bool__ = __nonzero__
272 __bool__ = __nonzero__
273
273
274 def sort(self, reverse=False):
274 def sort(self, reverse=False):
275 self._ascending = not bool(reverse)
275 self._ascending = not bool(reverse)
276 self._istopo = False
276 self._istopo = False
277
277
278 def reverse(self):
278 def reverse(self):
279 if self._ascending is None:
279 if self._ascending is None:
280 self._list.reverse()
280 self._list.reverse()
281 else:
281 else:
282 self._ascending = not self._ascending
282 self._ascending = not self._ascending
283 self._istopo = False
283 self._istopo = False
284
284
285 def __len__(self):
285 def __len__(self):
286 if '_list' in self.__dict__:
286 if '_list' in self.__dict__:
287 return len(self._list)
287 return len(self._list)
288 else:
288 else:
289 return len(self._set)
289 return len(self._set)
290
290
291 def isascending(self):
291 def isascending(self):
292 """Returns True if the collection is ascending order, False if not.
292 """Returns True if the collection is ascending order, False if not.
293
293
294 This is part of the mandatory API for smartset."""
294 This is part of the mandatory API for smartset."""
295 if len(self) <= 1:
295 if len(self) <= 1:
296 return True
296 return True
297 return self._ascending is not None and self._ascending
297 return self._ascending is not None and self._ascending
298
298
299 def isdescending(self):
299 def isdescending(self):
300 """Returns True if the collection is descending order, False if not.
300 """Returns True if the collection is descending order, False if not.
301
301
302 This is part of the mandatory API for smartset."""
302 This is part of the mandatory API for smartset."""
303 if len(self) <= 1:
303 if len(self) <= 1:
304 return True
304 return True
305 return self._ascending is not None and not self._ascending
305 return self._ascending is not None and not self._ascending
306
306
307 def istopo(self):
307 def istopo(self):
308 """Is the collection is in topographical order or not.
308 """Is the collection is in topographical order or not.
309
309
310 This is part of the mandatory API for smartset."""
310 This is part of the mandatory API for smartset."""
311 if len(self) <= 1:
311 if len(self) <= 1:
312 return True
312 return True
313 return self._istopo
313 return self._istopo
314
314
315 def first(self):
315 def first(self):
316 if self:
316 if self:
317 if self._ascending is None:
317 if self._ascending is None:
318 return self._list[0]
318 return self._list[0]
319 elif self._ascending:
319 elif self._ascending:
320 return self._asclist[0]
320 return self._asclist[0]
321 else:
321 else:
322 return self._asclist[-1]
322 return self._asclist[-1]
323 return None
323 return None
324
324
325 def last(self):
325 def last(self):
326 if self:
326 if self:
327 if self._ascending is None:
327 if self._ascending is None:
328 return self._list[-1]
328 return self._list[-1]
329 elif self._ascending:
329 elif self._ascending:
330 return self._asclist[-1]
330 return self._asclist[-1]
331 else:
331 else:
332 return self._asclist[0]
332 return self._asclist[0]
333 return None
333 return None
334
334
335 def _fastsetop(self, other, op):
335 def _fastsetop(self, other, op):
336 # try to use native set operations as fast paths
336 # try to use native set operations as fast paths
337 if (type(other) is baseset and '_set' in other.__dict__ and '_set' in
337 if (type(other) is baseset and '_set' in other.__dict__ and '_set' in
338 self.__dict__ and self._ascending is not None):
338 self.__dict__ and self._ascending is not None):
339 s = baseset(data=getattr(self._set, op)(other._set),
339 s = baseset(data=getattr(self._set, op)(other._set),
340 istopo=self._istopo)
340 istopo=self._istopo)
341 s._ascending = self._ascending
341 s._ascending = self._ascending
342 else:
342 else:
343 s = getattr(super(baseset, self), op)(other)
343 s = getattr(super(baseset, self), op)(other)
344 return s
344 return s
345
345
346 def __and__(self, other):
346 def __and__(self, other):
347 return self._fastsetop(other, '__and__')
347 return self._fastsetop(other, '__and__')
348
348
349 def __sub__(self, other):
349 def __sub__(self, other):
350 return self._fastsetop(other, '__sub__')
350 return self._fastsetop(other, '__sub__')
351
351
352 def __repr__(self):
352 def __repr__(self):
353 d = {None: '', False: '-', True: '+'}[self._ascending]
353 d = {None: '', False: '-', True: '+'}[self._ascending]
354 s = _formatsetrepr(self._datarepr)
354 s = _formatsetrepr(self._datarepr)
355 if not s:
355 if not s:
356 l = self._list
356 l = self._list
357 # if _list has been built from a set, it might have a different
357 # if _list has been built from a set, it might have a different
358 # order from one python implementation to another.
358 # order from one python implementation to another.
359 # We fallback to the sorted version for a stable output.
359 # We fallback to the sorted version for a stable output.
360 if self._ascending is not None:
360 if self._ascending is not None:
361 l = self._asclist
361 l = self._asclist
362 s = repr(l)
362 s = repr(l)
363 return '<%s%s %s>' % (type(self).__name__, d, s)
363 return '<%s%s %s>' % (type(self).__name__, d, s)
364
364
365 class filteredset(abstractsmartset):
365 class filteredset(abstractsmartset):
366 """Duck type for baseset class which iterates lazily over the revisions in
366 """Duck type for baseset class which iterates lazily over the revisions in
367 the subset and contains a function which tests for membership in the
367 the subset and contains a function which tests for membership in the
368 revset
368 revset
369 """
369 """
370 def __init__(self, subset, condition=lambda x: True, condrepr=None):
370 def __init__(self, subset, condition=lambda x: True, condrepr=None):
371 """
371 """
372 condition: a function that decide whether a revision in the subset
372 condition: a function that decide whether a revision in the subset
373 belongs to the revset or not.
373 belongs to the revset or not.
374 condrepr: a tuple of (format, obj, ...), a function or an object that
374 condrepr: a tuple of (format, obj, ...), a function or an object that
375 provides a printable representation of the given condition.
375 provides a printable representation of the given condition.
376 """
376 """
377 self._subset = subset
377 self._subset = subset
378 self._condition = condition
378 self._condition = condition
379 self._condrepr = condrepr
379 self._condrepr = condrepr
380
380
381 def __contains__(self, x):
381 def __contains__(self, x):
382 return x in self._subset and self._condition(x)
382 return x in self._subset and self._condition(x)
383
383
384 def __iter__(self):
384 def __iter__(self):
385 return self._iterfilter(self._subset)
385 return self._iterfilter(self._subset)
386
386
387 def _iterfilter(self, it):
387 def _iterfilter(self, it):
388 cond = self._condition
388 cond = self._condition
389 for x in it:
389 for x in it:
390 if cond(x):
390 if cond(x):
391 yield x
391 yield x
392
392
393 @property
393 @property
394 def fastasc(self):
394 def fastasc(self):
395 it = self._subset.fastasc
395 it = self._subset.fastasc
396 if it is None:
396 if it is None:
397 return None
397 return None
398 return lambda: self._iterfilter(it())
398 return lambda: self._iterfilter(it())
399
399
400 @property
400 @property
401 def fastdesc(self):
401 def fastdesc(self):
402 it = self._subset.fastdesc
402 it = self._subset.fastdesc
403 if it is None:
403 if it is None:
404 return None
404 return None
405 return lambda: self._iterfilter(it())
405 return lambda: self._iterfilter(it())
406
406
407 def __nonzero__(self):
407 def __nonzero__(self):
408 fast = None
408 fast = None
409 candidates = [self.fastasc if self.isascending() else None,
409 candidates = [self.fastasc if self.isascending() else None,
410 self.fastdesc if self.isdescending() else None,
410 self.fastdesc if self.isdescending() else None,
411 self.fastasc,
411 self.fastasc,
412 self.fastdesc]
412 self.fastdesc]
413 for candidate in candidates:
413 for candidate in candidates:
414 if candidate is not None:
414 if candidate is not None:
415 fast = candidate
415 fast = candidate
416 break
416 break
417
417
418 if fast is not None:
418 if fast is not None:
419 it = fast()
419 it = fast()
420 else:
420 else:
421 it = self
421 it = self
422
422
423 for r in it:
423 for r in it:
424 return True
424 return True
425 return False
425 return False
426
426
427 __bool__ = __nonzero__
427 __bool__ = __nonzero__
428
428
429 def __len__(self):
429 def __len__(self):
430 # Basic implementation to be changed in future patches.
430 # Basic implementation to be changed in future patches.
431 # until this gets improved, we use generator expression
431 # until this gets improved, we use generator expression
432 # here, since list comprehensions are free to call __len__ again
432 # here, since list comprehensions are free to call __len__ again
433 # causing infinite recursion
433 # causing infinite recursion
434 l = baseset(r for r in self)
434 l = baseset(r for r in self)
435 return len(l)
435 return len(l)
436
436
437 def sort(self, reverse=False):
437 def sort(self, reverse=False):
438 self._subset.sort(reverse=reverse)
438 self._subset.sort(reverse=reverse)
439
439
440 def reverse(self):
440 def reverse(self):
441 self._subset.reverse()
441 self._subset.reverse()
442
442
443 def isascending(self):
443 def isascending(self):
444 return self._subset.isascending()
444 return self._subset.isascending()
445
445
446 def isdescending(self):
446 def isdescending(self):
447 return self._subset.isdescending()
447 return self._subset.isdescending()
448
448
449 def istopo(self):
449 def istopo(self):
450 return self._subset.istopo()
450 return self._subset.istopo()
451
451
452 def first(self):
452 def first(self):
453 for x in self:
453 for x in self:
454 return x
454 return x
455 return None
455 return None
456
456
457 def last(self):
457 def last(self):
458 it = None
458 it = None
459 if self.isascending():
459 if self.isascending():
460 it = self.fastdesc
460 it = self.fastdesc
461 elif self.isdescending():
461 elif self.isdescending():
462 it = self.fastasc
462 it = self.fastasc
463 if it is not None:
463 if it is not None:
464 for x in it():
464 for x in it():
465 return x
465 return x
466 return None #empty case
466 return None #empty case
467 else:
467 else:
468 x = None
468 x = None
469 for x in self:
469 for x in self:
470 pass
470 pass
471 return x
471 return x
472
472
473 def __repr__(self):
473 def __repr__(self):
474 xs = [repr(self._subset)]
474 xs = [repr(self._subset)]
475 s = _formatsetrepr(self._condrepr)
475 s = _formatsetrepr(self._condrepr)
476 if s:
476 if s:
477 xs.append(s)
477 xs.append(s)
478 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
478 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
479
479
480 def _iterordered(ascending, iter1, iter2):
480 def _iterordered(ascending, iter1, iter2):
481 """produce an ordered iteration from two iterators with the same order
481 """produce an ordered iteration from two iterators with the same order
482
482
483 The ascending is used to indicated the iteration direction.
483 The ascending is used to indicated the iteration direction.
484 """
484 """
485 choice = max
485 choice = max
486 if ascending:
486 if ascending:
487 choice = min
487 choice = min
488
488
489 val1 = None
489 val1 = None
490 val2 = None
490 val2 = None
491 try:
491 try:
492 # Consume both iterators in an ordered way until one is empty
492 # Consume both iterators in an ordered way until one is empty
493 while True:
493 while True:
494 if val1 is None:
494 if val1 is None:
495 val1 = next(iter1)
495 val1 = next(iter1)
496 if val2 is None:
496 if val2 is None:
497 val2 = next(iter2)
497 val2 = next(iter2)
498 n = choice(val1, val2)
498 n = choice(val1, val2)
499 yield n
499 yield n
500 if val1 == n:
500 if val1 == n:
501 val1 = None
501 val1 = None
502 if val2 == n:
502 if val2 == n:
503 val2 = None
503 val2 = None
504 except StopIteration:
504 except StopIteration:
505 # Flush any remaining values and consume the other one
505 # Flush any remaining values and consume the other one
506 it = iter2
506 it = iter2
507 if val1 is not None:
507 if val1 is not None:
508 yield val1
508 yield val1
509 it = iter1
509 it = iter1
510 elif val2 is not None:
510 elif val2 is not None:
511 # might have been equality and both are empty
511 # might have been equality and both are empty
512 yield val2
512 yield val2
513 for val in it:
513 for val in it:
514 yield val
514 yield val
515
515
516 class addset(abstractsmartset):
516 class addset(abstractsmartset):
517 """Represent the addition of two sets
517 """Represent the addition of two sets
518
518
519 Wrapper structure for lazily adding two structures without losing much
519 Wrapper structure for lazily adding two structures without losing much
520 performance on the __contains__ method
520 performance on the __contains__ method
521
521
522 If the ascending attribute is set, that means the two structures are
522 If the ascending attribute is set, that means the two structures are
523 ordered in either an ascending or descending way. Therefore, we can add
523 ordered in either an ascending or descending way. Therefore, we can add
524 them maintaining the order by iterating over both at the same time
524 them maintaining the order by iterating over both at the same time
525
525
526 >>> xs = baseset([0, 3, 2])
526 >>> xs = baseset([0, 3, 2])
527 >>> ys = baseset([5, 2, 4])
527 >>> ys = baseset([5, 2, 4])
528
528
529 >>> rs = addset(xs, ys)
529 >>> rs = addset(xs, ys)
530 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
530 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
531 (True, True, False, True, 0, 4)
531 (True, True, False, True, 0, 4)
532 >>> rs = addset(xs, baseset([]))
532 >>> rs = addset(xs, baseset([]))
533 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
533 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
534 (True, True, False, 0, 2)
534 (True, True, False, 0, 2)
535 >>> rs = addset(baseset([]), baseset([]))
535 >>> rs = addset(baseset([]), baseset([]))
536 >>> bool(rs), 0 in rs, rs.first(), rs.last()
536 >>> bool(rs), 0 in rs, rs.first(), rs.last()
537 (False, False, None, None)
537 (False, False, None, None)
538
538
539 iterate unsorted:
539 iterate unsorted:
540 >>> rs = addset(xs, ys)
540 >>> rs = addset(xs, ys)
541 >>> # (use generator because pypy could call len())
541 >>> # (use generator because pypy could call len())
542 >>> list(x for x in rs) # without _genlist
542 >>> list(x for x in rs) # without _genlist
543 [0, 3, 2, 5, 4]
543 [0, 3, 2, 5, 4]
544 >>> assert not rs._genlist
544 >>> assert not rs._genlist
545 >>> len(rs)
545 >>> len(rs)
546 5
546 5
547 >>> [x for x in rs] # with _genlist
547 >>> [x for x in rs] # with _genlist
548 [0, 3, 2, 5, 4]
548 [0, 3, 2, 5, 4]
549 >>> assert rs._genlist
549 >>> assert rs._genlist
550
550
551 iterate ascending:
551 iterate ascending:
552 >>> rs = addset(xs, ys, ascending=True)
552 >>> rs = addset(xs, ys, ascending=True)
553 >>> # (use generator because pypy could call len())
553 >>> # (use generator because pypy could call len())
554 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
554 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
555 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
555 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
556 >>> assert not rs._asclist
556 >>> assert not rs._asclist
557 >>> len(rs)
557 >>> len(rs)
558 5
558 5
559 >>> [x for x in rs], [x for x in rs.fastasc()]
559 >>> [x for x in rs], [x for x in rs.fastasc()]
560 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
560 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
561 >>> assert rs._asclist
561 >>> assert rs._asclist
562
562
563 iterate descending:
563 iterate descending:
564 >>> rs = addset(xs, ys, ascending=False)
564 >>> rs = addset(xs, ys, ascending=False)
565 >>> # (use generator because pypy could call len())
565 >>> # (use generator because pypy could call len())
566 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
566 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
567 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
567 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
568 >>> assert not rs._asclist
568 >>> assert not rs._asclist
569 >>> len(rs)
569 >>> len(rs)
570 5
570 5
571 >>> [x for x in rs], [x for x in rs.fastdesc()]
571 >>> [x for x in rs], [x for x in rs.fastdesc()]
572 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
572 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
573 >>> assert rs._asclist
573 >>> assert rs._asclist
574
574
575 iterate ascending without fastasc:
575 iterate ascending without fastasc:
576 >>> rs = addset(xs, generatorset(ys), ascending=True)
576 >>> rs = addset(xs, generatorset(ys), ascending=True)
577 >>> assert rs.fastasc is None
577 >>> assert rs.fastasc is None
578 >>> [x for x in rs]
578 >>> [x for x in rs]
579 [0, 2, 3, 4, 5]
579 [0, 2, 3, 4, 5]
580
580
581 iterate descending without fastdesc:
581 iterate descending without fastdesc:
582 >>> rs = addset(generatorset(xs), ys, ascending=False)
582 >>> rs = addset(generatorset(xs), ys, ascending=False)
583 >>> assert rs.fastdesc is None
583 >>> assert rs.fastdesc is None
584 >>> [x for x in rs]
584 >>> [x for x in rs]
585 [5, 4, 3, 2, 0]
585 [5, 4, 3, 2, 0]
586 """
586 """
587 def __init__(self, revs1, revs2, ascending=None):
587 def __init__(self, revs1, revs2, ascending=None):
588 self._r1 = revs1
588 self._r1 = revs1
589 self._r2 = revs2
589 self._r2 = revs2
590 self._iter = None
590 self._iter = None
591 self._ascending = ascending
591 self._ascending = ascending
592 self._genlist = None
592 self._genlist = None
593 self._asclist = None
593 self._asclist = None
594
594
595 def __len__(self):
595 def __len__(self):
596 return len(self._list)
596 return len(self._list)
597
597
598 def __nonzero__(self):
598 def __nonzero__(self):
599 return bool(self._r1) or bool(self._r2)
599 return bool(self._r1) or bool(self._r2)
600
600
601 __bool__ = __nonzero__
601 __bool__ = __nonzero__
602
602
603 @util.propertycache
603 @util.propertycache
604 def _list(self):
604 def _list(self):
605 if not self._genlist:
605 if not self._genlist:
606 self._genlist = baseset(iter(self))
606 self._genlist = baseset(iter(self))
607 return self._genlist
607 return self._genlist
608
608
609 def __iter__(self):
609 def __iter__(self):
610 """Iterate over both collections without repeating elements
610 """Iterate over both collections without repeating elements
611
611
612 If the ascending attribute is not set, iterate over the first one and
612 If the ascending attribute is not set, iterate over the first one and
613 then over the second one checking for membership on the first one so we
613 then over the second one checking for membership on the first one so we
614 dont yield any duplicates.
614 dont yield any duplicates.
615
615
616 If the ascending attribute is set, iterate over both collections at the
616 If the ascending attribute is set, iterate over both collections at the
617 same time, yielding only one value at a time in the given order.
617 same time, yielding only one value at a time in the given order.
618 """
618 """
619 if self._ascending is None:
619 if self._ascending is None:
620 if self._genlist:
620 if self._genlist:
621 return iter(self._genlist)
621 return iter(self._genlist)
622 def arbitraryordergen():
622 def arbitraryordergen():
623 for r in self._r1:
623 for r in self._r1:
624 yield r
624 yield r
625 inr1 = self._r1.__contains__
625 inr1 = self._r1.__contains__
626 for r in self._r2:
626 for r in self._r2:
627 if not inr1(r):
627 if not inr1(r):
628 yield r
628 yield r
629 return arbitraryordergen()
629 return arbitraryordergen()
630 # try to use our own fast iterator if it exists
630 # try to use our own fast iterator if it exists
631 self._trysetasclist()
631 self._trysetasclist()
632 if self._ascending:
632 if self._ascending:
633 attr = 'fastasc'
633 attr = 'fastasc'
634 else:
634 else:
635 attr = 'fastdesc'
635 attr = 'fastdesc'
636 it = getattr(self, attr)
636 it = getattr(self, attr)
637 if it is not None:
637 if it is not None:
638 return it()
638 return it()
639 # maybe half of the component supports fast
639 # maybe half of the component supports fast
640 # get iterator for _r1
640 # get iterator for _r1
641 iter1 = getattr(self._r1, attr)
641 iter1 = getattr(self._r1, attr)
642 if iter1 is None:
642 if iter1 is None:
643 # let's avoid side effect (not sure it matters)
643 # let's avoid side effect (not sure it matters)
644 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
644 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
645 else:
645 else:
646 iter1 = iter1()
646 iter1 = iter1()
647 # get iterator for _r2
647 # get iterator for _r2
648 iter2 = getattr(self._r2, attr)
648 iter2 = getattr(self._r2, attr)
649 if iter2 is None:
649 if iter2 is None:
650 # let's avoid side effect (not sure it matters)
650 # let's avoid side effect (not sure it matters)
651 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
651 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
652 else:
652 else:
653 iter2 = iter2()
653 iter2 = iter2()
654 return _iterordered(self._ascending, iter1, iter2)
654 return _iterordered(self._ascending, iter1, iter2)
655
655
656 def _trysetasclist(self):
656 def _trysetasclist(self):
657 """populate the _asclist attribute if possible and necessary"""
657 """populate the _asclist attribute if possible and necessary"""
658 if self._genlist is not None and self._asclist is None:
658 if self._genlist is not None and self._asclist is None:
659 self._asclist = sorted(self._genlist)
659 self._asclist = sorted(self._genlist)
660
660
661 @property
661 @property
662 def fastasc(self):
662 def fastasc(self):
663 self._trysetasclist()
663 self._trysetasclist()
664 if self._asclist is not None:
664 if self._asclist is not None:
665 return self._asclist.__iter__
665 return self._asclist.__iter__
666 iter1 = self._r1.fastasc
666 iter1 = self._r1.fastasc
667 iter2 = self._r2.fastasc
667 iter2 = self._r2.fastasc
668 if None in (iter1, iter2):
668 if None in (iter1, iter2):
669 return None
669 return None
670 return lambda: _iterordered(True, iter1(), iter2())
670 return lambda: _iterordered(True, iter1(), iter2())
671
671
672 @property
672 @property
673 def fastdesc(self):
673 def fastdesc(self):
674 self._trysetasclist()
674 self._trysetasclist()
675 if self._asclist is not None:
675 if self._asclist is not None:
676 return self._asclist.__reversed__
676 return self._asclist.__reversed__
677 iter1 = self._r1.fastdesc
677 iter1 = self._r1.fastdesc
678 iter2 = self._r2.fastdesc
678 iter2 = self._r2.fastdesc
679 if None in (iter1, iter2):
679 if None in (iter1, iter2):
680 return None
680 return None
681 return lambda: _iterordered(False, iter1(), iter2())
681 return lambda: _iterordered(False, iter1(), iter2())
682
682
683 def __contains__(self, x):
683 def __contains__(self, x):
684 return x in self._r1 or x in self._r2
684 return x in self._r1 or x in self._r2
685
685
686 def sort(self, reverse=False):
686 def sort(self, reverse=False):
687 """Sort the added set
687 """Sort the added set
688
688
689 For this we use the cached list with all the generated values and if we
689 For this we use the cached list with all the generated values and if we
690 know they are ascending or descending we can sort them in a smart way.
690 know they are ascending or descending we can sort them in a smart way.
691 """
691 """
692 self._ascending = not reverse
692 self._ascending = not reverse
693
693
694 def isascending(self):
694 def isascending(self):
695 return self._ascending is not None and self._ascending
695 return self._ascending is not None and self._ascending
696
696
697 def isdescending(self):
697 def isdescending(self):
698 return self._ascending is not None and not self._ascending
698 return self._ascending is not None and not self._ascending
699
699
700 def istopo(self):
700 def istopo(self):
701 # not worth the trouble asserting if the two sets combined are still
701 # not worth the trouble asserting if the two sets combined are still
702 # in topographical order. Use the sort() predicate to explicitly sort
702 # in topographical order. Use the sort() predicate to explicitly sort
703 # again instead.
703 # again instead.
704 return False
704 return False
705
705
706 def reverse(self):
706 def reverse(self):
707 if self._ascending is None:
707 if self._ascending is None:
708 self._list.reverse()
708 self._list.reverse()
709 else:
709 else:
710 self._ascending = not self._ascending
710 self._ascending = not self._ascending
711
711
712 def first(self):
712 def first(self):
713 for x in self:
713 for x in self:
714 return x
714 return x
715 return None
715 return None
716
716
717 def last(self):
717 def last(self):
718 self.reverse()
718 self.reverse()
719 val = self.first()
719 val = self.first()
720 self.reverse()
720 self.reverse()
721 return val
721 return val
722
722
723 def __repr__(self):
723 def __repr__(self):
724 d = {None: '', False: '-', True: '+'}[self._ascending]
724 d = {None: '', False: '-', True: '+'}[self._ascending]
725 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
725 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
726
726
727 class generatorset(abstractsmartset):
727 class generatorset(abstractsmartset):
728 """Wrap a generator for lazy iteration
728 """Wrap a generator for lazy iteration
729
729
730 Wrapper structure for generators that provides lazy membership and can
730 Wrapper structure for generators that provides lazy membership and can
731 be iterated more than once.
731 be iterated more than once.
732 When asked for membership it generates values until either it finds the
732 When asked for membership it generates values until either it finds the
733 requested one or has gone through all the elements in the generator
733 requested one or has gone through all the elements in the generator
734 """
734 """
735 def __init__(self, gen, iterasc=None):
735 def __init__(self, gen, iterasc=None):
736 """
736 """
737 gen: a generator producing the values for the generatorset.
737 gen: a generator producing the values for the generatorset.
738 """
738 """
739 self._gen = gen
739 self._gen = gen
740 self._asclist = None
740 self._asclist = None
741 self._cache = {}
741 self._cache = {}
742 self._genlist = []
742 self._genlist = []
743 self._finished = False
743 self._finished = False
744 self._ascending = True
744 self._ascending = True
745 if iterasc is not None:
745 if iterasc is not None:
746 if iterasc:
746 if iterasc:
747 self.fastasc = self._iterator
747 self.fastasc = self._iterator
748 self.__contains__ = self._asccontains
748 self.__contains__ = self._asccontains
749 else:
749 else:
750 self.fastdesc = self._iterator
750 self.fastdesc = self._iterator
751 self.__contains__ = self._desccontains
751 self.__contains__ = self._desccontains
752
752
753 def __nonzero__(self):
753 def __nonzero__(self):
754 # Do not use 'for r in self' because it will enforce the iteration
754 # Do not use 'for r in self' because it will enforce the iteration
755 # order (default ascending), possibly unrolling a whole descending
755 # order (default ascending), possibly unrolling a whole descending
756 # iterator.
756 # iterator.
757 if self._genlist:
757 if self._genlist:
758 return True
758 return True
759 for r in self._consumegen():
759 for r in self._consumegen():
760 return True
760 return True
761 return False
761 return False
762
762
763 __bool__ = __nonzero__
763 __bool__ = __nonzero__
764
764
765 def __contains__(self, x):
765 def __contains__(self, x):
766 if x in self._cache:
766 if x in self._cache:
767 return self._cache[x]
767 return self._cache[x]
768
768
769 # Use new values only, as existing values would be cached.
769 # Use new values only, as existing values would be cached.
770 for l in self._consumegen():
770 for l in self._consumegen():
771 if l == x:
771 if l == x:
772 return True
772 return True
773
773
774 self._cache[x] = False
774 self._cache[x] = False
775 return False
775 return False
776
776
777 def _asccontains(self, x):
777 def _asccontains(self, x):
778 """version of contains optimised for ascending generator"""
778 """version of contains optimised for ascending generator"""
779 if x in self._cache:
779 if x in self._cache:
780 return self._cache[x]
780 return self._cache[x]
781
781
782 # Use new values only, as existing values would be cached.
782 # Use new values only, as existing values would be cached.
783 for l in self._consumegen():
783 for l in self._consumegen():
784 if l == x:
784 if l == x:
785 return True
785 return True
786 if l > x:
786 if l > x:
787 break
787 break
788
788
789 self._cache[x] = False
789 self._cache[x] = False
790 return False
790 return False
791
791
792 def _desccontains(self, x):
792 def _desccontains(self, x):
793 """version of contains optimised for descending generator"""
793 """version of contains optimised for descending generator"""
794 if x in self._cache:
794 if x in self._cache:
795 return self._cache[x]
795 return self._cache[x]
796
796
797 # Use new values only, as existing values would be cached.
797 # Use new values only, as existing values would be cached.
798 for l in self._consumegen():
798 for l in self._consumegen():
799 if l == x:
799 if l == x:
800 return True
800 return True
801 if l < x:
801 if l < x:
802 break
802 break
803
803
804 self._cache[x] = False
804 self._cache[x] = False
805 return False
805 return False
806
806
807 def __iter__(self):
807 def __iter__(self):
808 if self._ascending:
808 if self._ascending:
809 it = self.fastasc
809 it = self.fastasc
810 else:
810 else:
811 it = self.fastdesc
811 it = self.fastdesc
812 if it is not None:
812 if it is not None:
813 return it()
813 return it()
814 # we need to consume the iterator
814 # we need to consume the iterator
815 for x in self._consumegen():
815 for x in self._consumegen():
816 pass
816 pass
817 # recall the same code
817 # recall the same code
818 return iter(self)
818 return iter(self)
819
819
820 def _iterator(self):
820 def _iterator(self):
821 if self._finished:
821 if self._finished:
822 return iter(self._genlist)
822 return iter(self._genlist)
823
823
824 # We have to use this complex iteration strategy to allow multiple
824 # We have to use this complex iteration strategy to allow multiple
825 # iterations at the same time. We need to be able to catch revision
825 # iterations at the same time. We need to be able to catch revision
826 # removed from _consumegen and added to genlist in another instance.
826 # removed from _consumegen and added to genlist in another instance.
827 #
827 #
828 # Getting rid of it would provide an about 15% speed up on this
828 # Getting rid of it would provide an about 15% speed up on this
829 # iteration.
829 # iteration.
830 genlist = self._genlist
830 genlist = self._genlist
831 nextgen = self._consumegen()
831 nextgen = self._consumegen()
832 _len, _next = len, next # cache global lookup
832 _len, _next = len, next # cache global lookup
833 def gen():
833 def gen():
834 i = 0
834 i = 0
835 while True:
835 while True:
836 if i < _len(genlist):
836 if i < _len(genlist):
837 yield genlist[i]
837 yield genlist[i]
838 else:
838 else:
839 yield _next(nextgen)
839 yield _next(nextgen)
840 i += 1
840 i += 1
841 return gen()
841 return gen()
842
842
843 def _consumegen(self):
843 def _consumegen(self):
844 cache = self._cache
844 cache = self._cache
845 genlist = self._genlist.append
845 genlist = self._genlist.append
846 for item in self._gen:
846 for item in self._gen:
847 cache[item] = True
847 cache[item] = True
848 genlist(item)
848 genlist(item)
849 yield item
849 yield item
850 if not self._finished:
850 if not self._finished:
851 self._finished = True
851 self._finished = True
852 asc = self._genlist[:]
852 asc = self._genlist[:]
853 asc.sort()
853 asc.sort()
854 self._asclist = asc
854 self._asclist = asc
855 self.fastasc = asc.__iter__
855 self.fastasc = asc.__iter__
856 self.fastdesc = asc.__reversed__
856 self.fastdesc = asc.__reversed__
857
857
858 def __len__(self):
858 def __len__(self):
859 for x in self._consumegen():
859 for x in self._consumegen():
860 pass
860 pass
861 return len(self._genlist)
861 return len(self._genlist)
862
862
863 def sort(self, reverse=False):
863 def sort(self, reverse=False):
864 self._ascending = not reverse
864 self._ascending = not reverse
865
865
866 def reverse(self):
866 def reverse(self):
867 self._ascending = not self._ascending
867 self._ascending = not self._ascending
868
868
869 def isascending(self):
869 def isascending(self):
870 return self._ascending
870 return self._ascending
871
871
872 def isdescending(self):
872 def isdescending(self):
873 return not self._ascending
873 return not self._ascending
874
874
875 def istopo(self):
875 def istopo(self):
876 # not worth the trouble asserting if the two sets combined are still
876 # not worth the trouble asserting if the two sets combined are still
877 # in topographical order. Use the sort() predicate to explicitly sort
877 # in topographical order. Use the sort() predicate to explicitly sort
878 # again instead.
878 # again instead.
879 return False
879 return False
880
880
881 def first(self):
881 def first(self):
882 if self._ascending:
882 if self._ascending:
883 it = self.fastasc
883 it = self.fastasc
884 else:
884 else:
885 it = self.fastdesc
885 it = self.fastdesc
886 if it is None:
886 if it is None:
887 # we need to consume all and try again
887 # we need to consume all and try again
888 for x in self._consumegen():
888 for x in self._consumegen():
889 pass
889 pass
890 return self.first()
890 return self.first()
891 return next(it(), None)
891 return next(it(), None)
892
892
893 def last(self):
893 def last(self):
894 if self._ascending:
894 if self._ascending:
895 it = self.fastdesc
895 it = self.fastdesc
896 else:
896 else:
897 it = self.fastasc
897 it = self.fastasc
898 if it is None:
898 if it is None:
899 # we need to consume all and try again
899 # we need to consume all and try again
900 for x in self._consumegen():
900 for x in self._consumegen():
901 pass
901 pass
902 return self.first()
902 return self.first()
903 return next(it(), None)
903 return next(it(), None)
904
904
905 def __repr__(self):
905 def __repr__(self):
906 d = {False: '-', True: '+'}[self._ascending]
906 d = {False: '-', True: '+'}[self._ascending]
907 return '<%s%s>' % (type(self).__name__, d)
907 return '<%s%s>' % (type(self).__name__, d)
908
908
909 class spanset(abstractsmartset):
909 class spanset(abstractsmartset):
910 """Duck type for baseset class which represents a range of revisions and
910 """Duck type for baseset class which represents a range of revisions and
911 can work lazily and without having all the range in memory
911 can work lazily and without having all the range in memory
912
912
913 Note that spanset(x, y) behave almost like xrange(x, y) except for two
913 Note that spanset(x, y) behave almost like xrange(x, y) except for two
914 notable points:
914 notable points:
915 - when x < y it will be automatically descending,
915 - when x < y it will be automatically descending,
916 - revision filtered with this repoview will be skipped.
916 - revision filtered with this repoview will be skipped.
917
917
918 """
918 """
919 def __init__(self, repo, start=0, end=None):
919 def __init__(self, repo, start=0, end=None):
920 """
920 """
921 start: first revision included the set
921 start: first revision included the set
922 (default to 0)
922 (default to 0)
923 end: first revision excluded (last+1)
923 end: first revision excluded (last+1)
924 (default to len(repo)
924 (default to len(repo)
925
925
926 Spanset will be descending if `end` < `start`.
926 Spanset will be descending if `end` < `start`.
927 """
927 """
928 if end is None:
928 if end is None:
929 end = len(repo)
929 end = len(repo)
930 self._ascending = start <= end
930 self._ascending = start <= end
931 if not self._ascending:
931 if not self._ascending:
932 start, end = end + 1, start +1
932 start, end = end + 1, start +1
933 self._start = start
933 self._start = start
934 self._end = end
934 self._end = end
935 self._hiddenrevs = repo.changelog.filteredrevs
935 self._hiddenrevs = repo.changelog.filteredrevs
936
936
937 def sort(self, reverse=False):
937 def sort(self, reverse=False):
938 self._ascending = not reverse
938 self._ascending = not reverse
939
939
940 def reverse(self):
940 def reverse(self):
941 self._ascending = not self._ascending
941 self._ascending = not self._ascending
942
942
943 def istopo(self):
943 def istopo(self):
944 # not worth the trouble asserting if the two sets combined are still
944 # not worth the trouble asserting if the two sets combined are still
945 # in topographical order. Use the sort() predicate to explicitly sort
945 # in topographical order. Use the sort() predicate to explicitly sort
946 # again instead.
946 # again instead.
947 return False
947 return False
948
948
949 def _iterfilter(self, iterrange):
949 def _iterfilter(self, iterrange):
950 s = self._hiddenrevs
950 s = self._hiddenrevs
951 for r in iterrange:
951 for r in iterrange:
952 if r not in s:
952 if r not in s:
953 yield r
953 yield r
954
954
955 def __iter__(self):
955 def __iter__(self):
956 if self._ascending:
956 if self._ascending:
957 return self.fastasc()
957 return self.fastasc()
958 else:
958 else:
959 return self.fastdesc()
959 return self.fastdesc()
960
960
961 def fastasc(self):
961 def fastasc(self):
962 iterrange = xrange(self._start, self._end)
962 iterrange = xrange(self._start, self._end)
963 if self._hiddenrevs:
963 if self._hiddenrevs:
964 return self._iterfilter(iterrange)
964 return self._iterfilter(iterrange)
965 return iter(iterrange)
965 return iter(iterrange)
966
966
967 def fastdesc(self):
967 def fastdesc(self):
968 iterrange = xrange(self._end - 1, self._start - 1, -1)
968 iterrange = xrange(self._end - 1, self._start - 1, -1)
969 if self._hiddenrevs:
969 if self._hiddenrevs:
970 return self._iterfilter(iterrange)
970 return self._iterfilter(iterrange)
971 return iter(iterrange)
971 return iter(iterrange)
972
972
973 def __contains__(self, rev):
973 def __contains__(self, rev):
974 hidden = self._hiddenrevs
974 hidden = self._hiddenrevs
975 return ((self._start <= rev < self._end)
975 return ((self._start <= rev < self._end)
976 and not (hidden and rev in hidden))
976 and not (hidden and rev in hidden))
977
977
978 def __nonzero__(self):
978 def __nonzero__(self):
979 for r in self:
979 for r in self:
980 return True
980 return True
981 return False
981 return False
982
982
983 __bool__ = __nonzero__
983 __bool__ = __nonzero__
984
984
985 def __len__(self):
985 def __len__(self):
986 if not self._hiddenrevs:
986 if not self._hiddenrevs:
987 return abs(self._end - self._start)
987 return abs(self._end - self._start)
988 else:
988 else:
989 count = 0
989 count = 0
990 start = self._start
990 start = self._start
991 end = self._end
991 end = self._end
992 for rev in self._hiddenrevs:
992 for rev in self._hiddenrevs:
993 if (end < rev <= start) or (start <= rev < end):
993 if (end < rev <= start) or (start <= rev < end):
994 count += 1
994 count += 1
995 return abs(self._end - self._start) - count
995 return abs(self._end - self._start) - count
996
996
997 def isascending(self):
997 def isascending(self):
998 return self._ascending
998 return self._ascending
999
999
1000 def isdescending(self):
1000 def isdescending(self):
1001 return not self._ascending
1001 return not self._ascending
1002
1002
1003 def first(self):
1003 def first(self):
1004 if self._ascending:
1004 if self._ascending:
1005 it = self.fastasc
1005 it = self.fastasc
1006 else:
1006 else:
1007 it = self.fastdesc
1007 it = self.fastdesc
1008 for x in it():
1008 for x in it():
1009 return x
1009 return x
1010 return None
1010 return None
1011
1011
1012 def last(self):
1012 def last(self):
1013 if self._ascending:
1013 if self._ascending:
1014 it = self.fastdesc
1014 it = self.fastdesc
1015 else:
1015 else:
1016 it = self.fastasc
1016 it = self.fastasc
1017 for x in it():
1017 for x in it():
1018 return x
1018 return x
1019 return None
1019 return None
1020
1020
1021 def __repr__(self):
1021 def __repr__(self):
1022 d = {False: '-', True: '+'}[self._ascending]
1022 d = {False: '-', True: '+'}[self._ascending]
1023 return '<%s%s %d:%d>' % (type(self).__name__, d,
1023 return '<%s%s %d:%d>' % (type(self).__name__, d,
1024 self._start, self._end - 1)
1024 self._start, self._end - 1)
1025
1025
1026 class fullreposet(spanset):
1026 class fullreposet(spanset):
1027 """a set containing all revisions in the repo
1027 """a set containing all revisions in the repo
1028
1028
1029 This class exists to host special optimization and magic to handle virtual
1029 This class exists to host special optimization and magic to handle virtual
1030 revisions such as "null".
1030 revisions such as "null".
1031 """
1031 """
1032
1032
1033 def __init__(self, repo):
1033 def __init__(self, repo):
1034 super(fullreposet, self).__init__(repo)
1034 super(fullreposet, self).__init__(repo)
1035
1035
1036 def __and__(self, other):
1036 def __and__(self, other):
1037 """As self contains the whole repo, all of the other set should also be
1037 """As self contains the whole repo, all of the other set should also be
1038 in self. Therefore `self & other = other`.
1038 in self. Therefore `self & other = other`.
1039
1039
1040 This boldly assumes the other contains valid revs only.
1040 This boldly assumes the other contains valid revs only.
1041 """
1041 """
1042 # other not a smartset, make is so
1042 # other not a smartset, make is so
1043 if not util.safehasattr(other, 'isascending'):
1043 if not util.safehasattr(other, 'isascending'):
1044 # filter out hidden revision
1044 # filter out hidden revision
1045 # (this boldly assumes all smartset are pure)
1045 # (this boldly assumes all smartset are pure)
1046 #
1046 #
1047 # `other` was used with "&", let's assume this is a set like
1047 # `other` was used with "&", let's assume this is a set like
1048 # object.
1048 # object.
1049 other = baseset(other - self._hiddenrevs)
1049 other = baseset(other - self._hiddenrevs)
1050
1050
1051 other.sort(reverse=self.isdescending())
1051 other.sort(reverse=self.isdescending())
1052 return other
1052 return other
1053
1053
1054 def prettyformat(revs):
1054 def prettyformat(revs):
1055 lines = []
1055 lines = []
1056 rs = repr(revs)
1056 rs = repr(revs)
1057 p = 0
1057 p = 0
1058 while p < len(rs):
1058 while p < len(rs):
1059 q = rs.find('<', p + 1)
1059 q = rs.find('<', p + 1)
1060 if q < 0:
1060 if q < 0:
1061 q = len(rs)
1061 q = len(rs)
1062 l = rs.count('<', 0, p) - rs.count('>', 0, p)
1062 l = rs.count('<', 0, p) - rs.count('>', 0, p)
1063 assert l >= 0
1063 assert l >= 0
1064 lines.append((l, rs[p:q].rstrip()))
1064 lines.append((l, rs[p:q].rstrip()))
1065 p = q
1065 p = q
1066 return '\n'.join(' ' * l + s for l, s in lines)
1066 return '\n'.join(' ' * l + s for l, s in lines)
General Comments 0
You need to be logged in to leave comments. Login now