##// END OF EJS Templates
context: also return ancestor's line range in blockancestors
Denis Laxalde -
r31076:0e07855e default
parent child Browse files
Show More
@@ -1,2116 +1,2116 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 newnodeid,
21 newnodeid,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 )
26 )
27 from . import (
27 from . import (
28 encoding,
28 encoding,
29 error,
29 error,
30 fileset,
30 fileset,
31 match as matchmod,
31 match as matchmod,
32 mdiff,
32 mdiff,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 phases,
35 phases,
36 repoview,
36 repoview,
37 revlog,
37 revlog,
38 scmutil,
38 scmutil,
39 subrepo,
39 subrepo,
40 util,
40 util,
41 )
41 )
42
42
43 propertycache = util.propertycache
43 propertycache = util.propertycache
44
44
45 nonascii = re.compile(r'[^\x21-\x7f]').search
45 nonascii = re.compile(r'[^\x21-\x7f]').search
46
46
47 class basectx(object):
47 class basectx(object):
48 """A basectx object represents the common logic for its children:
48 """A basectx object represents the common logic for its children:
49 changectx: read-only context that is already present in the repo,
49 changectx: read-only context that is already present in the repo,
50 workingctx: a context that represents the working directory and can
50 workingctx: a context that represents the working directory and can
51 be committed,
51 be committed,
52 memctx: a context that represents changes in-memory and can also
52 memctx: a context that represents changes in-memory and can also
53 be committed."""
53 be committed."""
54 def __new__(cls, repo, changeid='', *args, **kwargs):
54 def __new__(cls, repo, changeid='', *args, **kwargs):
55 if isinstance(changeid, basectx):
55 if isinstance(changeid, basectx):
56 return changeid
56 return changeid
57
57
58 o = super(basectx, cls).__new__(cls)
58 o = super(basectx, cls).__new__(cls)
59
59
60 o._repo = repo
60 o._repo = repo
61 o._rev = nullrev
61 o._rev = nullrev
62 o._node = nullid
62 o._node = nullid
63
63
64 return o
64 return o
65
65
66 def __str__(self):
66 def __str__(self):
67 return short(self.node())
67 return short(self.node())
68
68
69 def __int__(self):
69 def __int__(self):
70 return self.rev()
70 return self.rev()
71
71
72 def __repr__(self):
72 def __repr__(self):
73 return "<%s %s>" % (type(self).__name__, str(self))
73 return "<%s %s>" % (type(self).__name__, str(self))
74
74
75 def __eq__(self, other):
75 def __eq__(self, other):
76 try:
76 try:
77 return type(self) == type(other) and self._rev == other._rev
77 return type(self) == type(other) and self._rev == other._rev
78 except AttributeError:
78 except AttributeError:
79 return False
79 return False
80
80
81 def __ne__(self, other):
81 def __ne__(self, other):
82 return not (self == other)
82 return not (self == other)
83
83
84 def __contains__(self, key):
84 def __contains__(self, key):
85 return key in self._manifest
85 return key in self._manifest
86
86
87 def __getitem__(self, key):
87 def __getitem__(self, key):
88 return self.filectx(key)
88 return self.filectx(key)
89
89
90 def __iter__(self):
90 def __iter__(self):
91 return iter(self._manifest)
91 return iter(self._manifest)
92
92
93 def _manifestmatches(self, match, s):
93 def _manifestmatches(self, match, s):
94 """generate a new manifest filtered by the match argument
94 """generate a new manifest filtered by the match argument
95
95
96 This method is for internal use only and mainly exists to provide an
96 This method is for internal use only and mainly exists to provide an
97 object oriented way for other contexts to customize the manifest
97 object oriented way for other contexts to customize the manifest
98 generation.
98 generation.
99 """
99 """
100 return self.manifest().matches(match)
100 return self.manifest().matches(match)
101
101
102 def _matchstatus(self, other, match):
102 def _matchstatus(self, other, match):
103 """return match.always if match is none
103 """return match.always if match is none
104
104
105 This internal method provides a way for child objects to override the
105 This internal method provides a way for child objects to override the
106 match operator.
106 match operator.
107 """
107 """
108 return match or matchmod.always(self._repo.root, self._repo.getcwd())
108 return match or matchmod.always(self._repo.root, self._repo.getcwd())
109
109
110 def _buildstatus(self, other, s, match, listignored, listclean,
110 def _buildstatus(self, other, s, match, listignored, listclean,
111 listunknown):
111 listunknown):
112 """build a status with respect to another context"""
112 """build a status with respect to another context"""
113 # Load earliest manifest first for caching reasons. More specifically,
113 # Load earliest manifest first for caching reasons. More specifically,
114 # if you have revisions 1000 and 1001, 1001 is probably stored as a
114 # if you have revisions 1000 and 1001, 1001 is probably stored as a
115 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
115 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
116 # 1000 and cache it so that when you read 1001, we just need to apply a
116 # 1000 and cache it so that when you read 1001, we just need to apply a
117 # delta to what's in the cache. So that's one full reconstruction + one
117 # delta to what's in the cache. So that's one full reconstruction + one
118 # delta application.
118 # delta application.
119 if self.rev() is not None and self.rev() < other.rev():
119 if self.rev() is not None and self.rev() < other.rev():
120 self.manifest()
120 self.manifest()
121 mf1 = other._manifestmatches(match, s)
121 mf1 = other._manifestmatches(match, s)
122 mf2 = self._manifestmatches(match, s)
122 mf2 = self._manifestmatches(match, s)
123
123
124 modified, added = [], []
124 modified, added = [], []
125 removed = []
125 removed = []
126 clean = []
126 clean = []
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 deletedset = set(deleted)
128 deletedset = set(deleted)
129 d = mf1.diff(mf2, clean=listclean)
129 d = mf1.diff(mf2, clean=listclean)
130 for fn, value in d.iteritems():
130 for fn, value in d.iteritems():
131 if fn in deletedset:
131 if fn in deletedset:
132 continue
132 continue
133 if value is None:
133 if value is None:
134 clean.append(fn)
134 clean.append(fn)
135 continue
135 continue
136 (node1, flag1), (node2, flag2) = value
136 (node1, flag1), (node2, flag2) = value
137 if node1 is None:
137 if node1 is None:
138 added.append(fn)
138 added.append(fn)
139 elif node2 is None:
139 elif node2 is None:
140 removed.append(fn)
140 removed.append(fn)
141 elif flag1 != flag2:
141 elif flag1 != flag2:
142 modified.append(fn)
142 modified.append(fn)
143 elif node2 != newnodeid:
143 elif node2 != newnodeid:
144 # When comparing files between two commits, we save time by
144 # When comparing files between two commits, we save time by
145 # not comparing the file contents when the nodeids differ.
145 # not comparing the file contents when the nodeids differ.
146 # Note that this means we incorrectly report a reverted change
146 # Note that this means we incorrectly report a reverted change
147 # to a file as a modification.
147 # to a file as a modification.
148 modified.append(fn)
148 modified.append(fn)
149 elif self[fn].cmp(other[fn]):
149 elif self[fn].cmp(other[fn]):
150 modified.append(fn)
150 modified.append(fn)
151 else:
151 else:
152 clean.append(fn)
152 clean.append(fn)
153
153
154 if removed:
154 if removed:
155 # need to filter files if they are already reported as removed
155 # need to filter files if they are already reported as removed
156 unknown = [fn for fn in unknown if fn not in mf1]
156 unknown = [fn for fn in unknown if fn not in mf1]
157 ignored = [fn for fn in ignored if fn not in mf1]
157 ignored = [fn for fn in ignored if fn not in mf1]
158 # if they're deleted, don't report them as removed
158 # if they're deleted, don't report them as removed
159 removed = [fn for fn in removed if fn not in deletedset]
159 removed = [fn for fn in removed if fn not in deletedset]
160
160
161 return scmutil.status(modified, added, removed, deleted, unknown,
161 return scmutil.status(modified, added, removed, deleted, unknown,
162 ignored, clean)
162 ignored, clean)
163
163
164 @propertycache
164 @propertycache
165 def substate(self):
165 def substate(self):
166 return subrepo.state(self, self._repo.ui)
166 return subrepo.state(self, self._repo.ui)
167
167
168 def subrev(self, subpath):
168 def subrev(self, subpath):
169 return self.substate[subpath][1]
169 return self.substate[subpath][1]
170
170
171 def rev(self):
171 def rev(self):
172 return self._rev
172 return self._rev
173 def node(self):
173 def node(self):
174 return self._node
174 return self._node
175 def hex(self):
175 def hex(self):
176 return hex(self.node())
176 return hex(self.node())
177 def manifest(self):
177 def manifest(self):
178 return self._manifest
178 return self._manifest
179 def manifestctx(self):
179 def manifestctx(self):
180 return self._manifestctx
180 return self._manifestctx
181 def repo(self):
181 def repo(self):
182 return self._repo
182 return self._repo
183 def phasestr(self):
183 def phasestr(self):
184 return phases.phasenames[self.phase()]
184 return phases.phasenames[self.phase()]
185 def mutable(self):
185 def mutable(self):
186 return self.phase() > phases.public
186 return self.phase() > phases.public
187
187
188 def getfileset(self, expr):
188 def getfileset(self, expr):
189 return fileset.getfileset(self, expr)
189 return fileset.getfileset(self, expr)
190
190
191 def obsolete(self):
191 def obsolete(self):
192 """True if the changeset is obsolete"""
192 """True if the changeset is obsolete"""
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194
194
195 def extinct(self):
195 def extinct(self):
196 """True if the changeset is extinct"""
196 """True if the changeset is extinct"""
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198
198
199 def unstable(self):
199 def unstable(self):
200 """True if the changeset is not obsolete but it's ancestor are"""
200 """True if the changeset is not obsolete but it's ancestor are"""
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202
202
203 def bumped(self):
203 def bumped(self):
204 """True if the changeset try to be a successor of a public changeset
204 """True if the changeset try to be a successor of a public changeset
205
205
206 Only non-public and non-obsolete changesets may be bumped.
206 Only non-public and non-obsolete changesets may be bumped.
207 """
207 """
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209
209
210 def divergent(self):
210 def divergent(self):
211 """Is a successors of a changeset with multiple possible successors set
211 """Is a successors of a changeset with multiple possible successors set
212
212
213 Only non-public and non-obsolete changesets may be divergent.
213 Only non-public and non-obsolete changesets may be divergent.
214 """
214 """
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216
216
217 def troubled(self):
217 def troubled(self):
218 """True if the changeset is either unstable, bumped or divergent"""
218 """True if the changeset is either unstable, bumped or divergent"""
219 return self.unstable() or self.bumped() or self.divergent()
219 return self.unstable() or self.bumped() or self.divergent()
220
220
221 def troubles(self):
221 def troubles(self):
222 """return the list of troubles affecting this changesets.
222 """return the list of troubles affecting this changesets.
223
223
224 Troubles are returned as strings. possible values are:
224 Troubles are returned as strings. possible values are:
225 - unstable,
225 - unstable,
226 - bumped,
226 - bumped,
227 - divergent.
227 - divergent.
228 """
228 """
229 troubles = []
229 troubles = []
230 if self.unstable():
230 if self.unstable():
231 troubles.append('unstable')
231 troubles.append('unstable')
232 if self.bumped():
232 if self.bumped():
233 troubles.append('bumped')
233 troubles.append('bumped')
234 if self.divergent():
234 if self.divergent():
235 troubles.append('divergent')
235 troubles.append('divergent')
236 return troubles
236 return troubles
237
237
238 def parents(self):
238 def parents(self):
239 """return contexts for each parent changeset"""
239 """return contexts for each parent changeset"""
240 return self._parents
240 return self._parents
241
241
242 def p1(self):
242 def p1(self):
243 return self._parents[0]
243 return self._parents[0]
244
244
245 def p2(self):
245 def p2(self):
246 parents = self._parents
246 parents = self._parents
247 if len(parents) == 2:
247 if len(parents) == 2:
248 return parents[1]
248 return parents[1]
249 return changectx(self._repo, nullrev)
249 return changectx(self._repo, nullrev)
250
250
251 def _fileinfo(self, path):
251 def _fileinfo(self, path):
252 if '_manifest' in self.__dict__:
252 if '_manifest' in self.__dict__:
253 try:
253 try:
254 return self._manifest[path], self._manifest.flags(path)
254 return self._manifest[path], self._manifest.flags(path)
255 except KeyError:
255 except KeyError:
256 raise error.ManifestLookupError(self._node, path,
256 raise error.ManifestLookupError(self._node, path,
257 _('not found in manifest'))
257 _('not found in manifest'))
258 if '_manifestdelta' in self.__dict__ or path in self.files():
258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 if path in self._manifestdelta:
259 if path in self._manifestdelta:
260 return (self._manifestdelta[path],
260 return (self._manifestdelta[path],
261 self._manifestdelta.flags(path))
261 self._manifestdelta.flags(path))
262 mfl = self._repo.manifestlog
262 mfl = self._repo.manifestlog
263 try:
263 try:
264 node, flag = mfl[self._changeset.manifest].find(path)
264 node, flag = mfl[self._changeset.manifest].find(path)
265 except KeyError:
265 except KeyError:
266 raise error.ManifestLookupError(self._node, path,
266 raise error.ManifestLookupError(self._node, path,
267 _('not found in manifest'))
267 _('not found in manifest'))
268
268
269 return node, flag
269 return node, flag
270
270
271 def filenode(self, path):
271 def filenode(self, path):
272 return self._fileinfo(path)[0]
272 return self._fileinfo(path)[0]
273
273
274 def flags(self, path):
274 def flags(self, path):
275 try:
275 try:
276 return self._fileinfo(path)[1]
276 return self._fileinfo(path)[1]
277 except error.LookupError:
277 except error.LookupError:
278 return ''
278 return ''
279
279
280 def sub(self, path, allowcreate=True):
280 def sub(self, path, allowcreate=True):
281 '''return a subrepo for the stored revision of path, never wdir()'''
281 '''return a subrepo for the stored revision of path, never wdir()'''
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
283
283
284 def nullsub(self, path, pctx):
284 def nullsub(self, path, pctx):
285 return subrepo.nullsubrepo(self, path, pctx)
285 return subrepo.nullsubrepo(self, path, pctx)
286
286
287 def workingsub(self, path):
287 def workingsub(self, path):
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
289 context.
289 context.
290 '''
290 '''
291 return subrepo.subrepo(self, path, allowwdir=True)
291 return subrepo.subrepo(self, path, allowwdir=True)
292
292
293 def match(self, pats=[], include=None, exclude=None, default='glob',
293 def match(self, pats=[], include=None, exclude=None, default='glob',
294 listsubrepos=False, badfn=None):
294 listsubrepos=False, badfn=None):
295 r = self._repo
295 r = self._repo
296 return matchmod.match(r.root, r.getcwd(), pats,
296 return matchmod.match(r.root, r.getcwd(), pats,
297 include, exclude, default,
297 include, exclude, default,
298 auditor=r.nofsauditor, ctx=self,
298 auditor=r.nofsauditor, ctx=self,
299 listsubrepos=listsubrepos, badfn=badfn)
299 listsubrepos=listsubrepos, badfn=badfn)
300
300
301 def diff(self, ctx2=None, match=None, **opts):
301 def diff(self, ctx2=None, match=None, **opts):
302 """Returns a diff generator for the given contexts and matcher"""
302 """Returns a diff generator for the given contexts and matcher"""
303 if ctx2 is None:
303 if ctx2 is None:
304 ctx2 = self.p1()
304 ctx2 = self.p1()
305 if ctx2 is not None:
305 if ctx2 is not None:
306 ctx2 = self._repo[ctx2]
306 ctx2 = self._repo[ctx2]
307 diffopts = patch.diffopts(self._repo.ui, opts)
307 diffopts = patch.diffopts(self._repo.ui, opts)
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
309
309
310 def dirs(self):
310 def dirs(self):
311 return self._manifest.dirs()
311 return self._manifest.dirs()
312
312
313 def hasdir(self, dir):
313 def hasdir(self, dir):
314 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
315
315
316 def dirty(self, missing=False, merge=True, branch=True):
316 def dirty(self, missing=False, merge=True, branch=True):
317 return False
317 return False
318
318
319 def status(self, other=None, match=None, listignored=False,
319 def status(self, other=None, match=None, listignored=False,
320 listclean=False, listunknown=False, listsubrepos=False):
320 listclean=False, listunknown=False, listsubrepos=False):
321 """return status of files between two nodes or node and working
321 """return status of files between two nodes or node and working
322 directory.
322 directory.
323
323
324 If other is None, compare this node with working directory.
324 If other is None, compare this node with working directory.
325
325
326 returns (modified, added, removed, deleted, unknown, ignored, clean)
326 returns (modified, added, removed, deleted, unknown, ignored, clean)
327 """
327 """
328
328
329 ctx1 = self
329 ctx1 = self
330 ctx2 = self._repo[other]
330 ctx2 = self._repo[other]
331
331
332 # This next code block is, admittedly, fragile logic that tests for
332 # This next code block is, admittedly, fragile logic that tests for
333 # reversing the contexts and wouldn't need to exist if it weren't for
333 # reversing the contexts and wouldn't need to exist if it weren't for
334 # the fast (and common) code path of comparing the working directory
334 # the fast (and common) code path of comparing the working directory
335 # with its first parent.
335 # with its first parent.
336 #
336 #
337 # What we're aiming for here is the ability to call:
337 # What we're aiming for here is the ability to call:
338 #
338 #
339 # workingctx.status(parentctx)
339 # workingctx.status(parentctx)
340 #
340 #
341 # If we always built the manifest for each context and compared those,
341 # If we always built the manifest for each context and compared those,
342 # then we'd be done. But the special case of the above call means we
342 # then we'd be done. But the special case of the above call means we
343 # just copy the manifest of the parent.
343 # just copy the manifest of the parent.
344 reversed = False
344 reversed = False
345 if (not isinstance(ctx1, changectx)
345 if (not isinstance(ctx1, changectx)
346 and isinstance(ctx2, changectx)):
346 and isinstance(ctx2, changectx)):
347 reversed = True
347 reversed = True
348 ctx1, ctx2 = ctx2, ctx1
348 ctx1, ctx2 = ctx2, ctx1
349
349
350 match = ctx2._matchstatus(ctx1, match)
350 match = ctx2._matchstatus(ctx1, match)
351 r = scmutil.status([], [], [], [], [], [], [])
351 r = scmutil.status([], [], [], [], [], [], [])
352 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
352 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
353 listunknown)
353 listunknown)
354
354
355 if reversed:
355 if reversed:
356 # Reverse added and removed. Clear deleted, unknown and ignored as
356 # Reverse added and removed. Clear deleted, unknown and ignored as
357 # these make no sense to reverse.
357 # these make no sense to reverse.
358 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
358 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
359 r.clean)
359 r.clean)
360
360
361 if listsubrepos:
361 if listsubrepos:
362 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
362 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
363 try:
363 try:
364 rev2 = ctx2.subrev(subpath)
364 rev2 = ctx2.subrev(subpath)
365 except KeyError:
365 except KeyError:
366 # A subrepo that existed in node1 was deleted between
366 # A subrepo that existed in node1 was deleted between
367 # node1 and node2 (inclusive). Thus, ctx2's substate
367 # node1 and node2 (inclusive). Thus, ctx2's substate
368 # won't contain that subpath. The best we can do ignore it.
368 # won't contain that subpath. The best we can do ignore it.
369 rev2 = None
369 rev2 = None
370 submatch = matchmod.subdirmatcher(subpath, match)
370 submatch = matchmod.subdirmatcher(subpath, match)
371 s = sub.status(rev2, match=submatch, ignored=listignored,
371 s = sub.status(rev2, match=submatch, ignored=listignored,
372 clean=listclean, unknown=listunknown,
372 clean=listclean, unknown=listunknown,
373 listsubrepos=True)
373 listsubrepos=True)
374 for rfiles, sfiles in zip(r, s):
374 for rfiles, sfiles in zip(r, s):
375 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
375 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
376
376
377 for l in r:
377 for l in r:
378 l.sort()
378 l.sort()
379
379
380 return r
380 return r
381
381
382
382
383 def makememctx(repo, parents, text, user, date, branch, files, store,
383 def makememctx(repo, parents, text, user, date, branch, files, store,
384 editor=None, extra=None):
384 editor=None, extra=None):
385 def getfilectx(repo, memctx, path):
385 def getfilectx(repo, memctx, path):
386 data, mode, copied = store.getfile(path)
386 data, mode, copied = store.getfile(path)
387 if data is None:
387 if data is None:
388 return None
388 return None
389 islink, isexec = mode
389 islink, isexec = mode
390 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
390 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
391 copied=copied, memctx=memctx)
391 copied=copied, memctx=memctx)
392 if extra is None:
392 if extra is None:
393 extra = {}
393 extra = {}
394 if branch:
394 if branch:
395 extra['branch'] = encoding.fromlocal(branch)
395 extra['branch'] = encoding.fromlocal(branch)
396 ctx = memctx(repo, parents, text, files, getfilectx, user,
396 ctx = memctx(repo, parents, text, files, getfilectx, user,
397 date, extra, editor)
397 date, extra, editor)
398 return ctx
398 return ctx
399
399
400 class changectx(basectx):
400 class changectx(basectx):
401 """A changecontext object makes access to data related to a particular
401 """A changecontext object makes access to data related to a particular
402 changeset convenient. It represents a read-only context already present in
402 changeset convenient. It represents a read-only context already present in
403 the repo."""
403 the repo."""
404 def __init__(self, repo, changeid=''):
404 def __init__(self, repo, changeid=''):
405 """changeid is a revision number, node, or tag"""
405 """changeid is a revision number, node, or tag"""
406
406
407 # since basectx.__new__ already took care of copying the object, we
407 # since basectx.__new__ already took care of copying the object, we
408 # don't need to do anything in __init__, so we just exit here
408 # don't need to do anything in __init__, so we just exit here
409 if isinstance(changeid, basectx):
409 if isinstance(changeid, basectx):
410 return
410 return
411
411
412 if changeid == '':
412 if changeid == '':
413 changeid = '.'
413 changeid = '.'
414 self._repo = repo
414 self._repo = repo
415
415
416 try:
416 try:
417 if isinstance(changeid, int):
417 if isinstance(changeid, int):
418 self._node = repo.changelog.node(changeid)
418 self._node = repo.changelog.node(changeid)
419 self._rev = changeid
419 self._rev = changeid
420 return
420 return
421 if isinstance(changeid, long):
421 if isinstance(changeid, long):
422 changeid = str(changeid)
422 changeid = str(changeid)
423 if changeid == 'null':
423 if changeid == 'null':
424 self._node = nullid
424 self._node = nullid
425 self._rev = nullrev
425 self._rev = nullrev
426 return
426 return
427 if changeid == 'tip':
427 if changeid == 'tip':
428 self._node = repo.changelog.tip()
428 self._node = repo.changelog.tip()
429 self._rev = repo.changelog.rev(self._node)
429 self._rev = repo.changelog.rev(self._node)
430 return
430 return
431 if changeid == '.' or changeid == repo.dirstate.p1():
431 if changeid == '.' or changeid == repo.dirstate.p1():
432 # this is a hack to delay/avoid loading obsmarkers
432 # this is a hack to delay/avoid loading obsmarkers
433 # when we know that '.' won't be hidden
433 # when we know that '.' won't be hidden
434 self._node = repo.dirstate.p1()
434 self._node = repo.dirstate.p1()
435 self._rev = repo.unfiltered().changelog.rev(self._node)
435 self._rev = repo.unfiltered().changelog.rev(self._node)
436 return
436 return
437 if len(changeid) == 20:
437 if len(changeid) == 20:
438 try:
438 try:
439 self._node = changeid
439 self._node = changeid
440 self._rev = repo.changelog.rev(changeid)
440 self._rev = repo.changelog.rev(changeid)
441 return
441 return
442 except error.FilteredRepoLookupError:
442 except error.FilteredRepoLookupError:
443 raise
443 raise
444 except LookupError:
444 except LookupError:
445 pass
445 pass
446
446
447 try:
447 try:
448 r = int(changeid)
448 r = int(changeid)
449 if str(r) != changeid:
449 if str(r) != changeid:
450 raise ValueError
450 raise ValueError
451 l = len(repo.changelog)
451 l = len(repo.changelog)
452 if r < 0:
452 if r < 0:
453 r += l
453 r += l
454 if r < 0 or r >= l:
454 if r < 0 or r >= l:
455 raise ValueError
455 raise ValueError
456 self._rev = r
456 self._rev = r
457 self._node = repo.changelog.node(r)
457 self._node = repo.changelog.node(r)
458 return
458 return
459 except error.FilteredIndexError:
459 except error.FilteredIndexError:
460 raise
460 raise
461 except (ValueError, OverflowError, IndexError):
461 except (ValueError, OverflowError, IndexError):
462 pass
462 pass
463
463
464 if len(changeid) == 40:
464 if len(changeid) == 40:
465 try:
465 try:
466 self._node = bin(changeid)
466 self._node = bin(changeid)
467 self._rev = repo.changelog.rev(self._node)
467 self._rev = repo.changelog.rev(self._node)
468 return
468 return
469 except error.FilteredLookupError:
469 except error.FilteredLookupError:
470 raise
470 raise
471 except (TypeError, LookupError):
471 except (TypeError, LookupError):
472 pass
472 pass
473
473
474 # lookup bookmarks through the name interface
474 # lookup bookmarks through the name interface
475 try:
475 try:
476 self._node = repo.names.singlenode(repo, changeid)
476 self._node = repo.names.singlenode(repo, changeid)
477 self._rev = repo.changelog.rev(self._node)
477 self._rev = repo.changelog.rev(self._node)
478 return
478 return
479 except KeyError:
479 except KeyError:
480 pass
480 pass
481 except error.FilteredRepoLookupError:
481 except error.FilteredRepoLookupError:
482 raise
482 raise
483 except error.RepoLookupError:
483 except error.RepoLookupError:
484 pass
484 pass
485
485
486 self._node = repo.unfiltered().changelog._partialmatch(changeid)
486 self._node = repo.unfiltered().changelog._partialmatch(changeid)
487 if self._node is not None:
487 if self._node is not None:
488 self._rev = repo.changelog.rev(self._node)
488 self._rev = repo.changelog.rev(self._node)
489 return
489 return
490
490
491 # lookup failed
491 # lookup failed
492 # check if it might have come from damaged dirstate
492 # check if it might have come from damaged dirstate
493 #
493 #
494 # XXX we could avoid the unfiltered if we had a recognizable
494 # XXX we could avoid the unfiltered if we had a recognizable
495 # exception for filtered changeset access
495 # exception for filtered changeset access
496 if changeid in repo.unfiltered().dirstate.parents():
496 if changeid in repo.unfiltered().dirstate.parents():
497 msg = _("working directory has unknown parent '%s'!")
497 msg = _("working directory has unknown parent '%s'!")
498 raise error.Abort(msg % short(changeid))
498 raise error.Abort(msg % short(changeid))
499 try:
499 try:
500 if len(changeid) == 20 and nonascii(changeid):
500 if len(changeid) == 20 and nonascii(changeid):
501 changeid = hex(changeid)
501 changeid = hex(changeid)
502 except TypeError:
502 except TypeError:
503 pass
503 pass
504 except (error.FilteredIndexError, error.FilteredLookupError,
504 except (error.FilteredIndexError, error.FilteredLookupError,
505 error.FilteredRepoLookupError):
505 error.FilteredRepoLookupError):
506 if repo.filtername.startswith('visible'):
506 if repo.filtername.startswith('visible'):
507 msg = _("hidden revision '%s'") % changeid
507 msg = _("hidden revision '%s'") % changeid
508 hint = _('use --hidden to access hidden revisions')
508 hint = _('use --hidden to access hidden revisions')
509 raise error.FilteredRepoLookupError(msg, hint=hint)
509 raise error.FilteredRepoLookupError(msg, hint=hint)
510 msg = _("filtered revision '%s' (not in '%s' subset)")
510 msg = _("filtered revision '%s' (not in '%s' subset)")
511 msg %= (changeid, repo.filtername)
511 msg %= (changeid, repo.filtername)
512 raise error.FilteredRepoLookupError(msg)
512 raise error.FilteredRepoLookupError(msg)
513 except IndexError:
513 except IndexError:
514 pass
514 pass
515 raise error.RepoLookupError(
515 raise error.RepoLookupError(
516 _("unknown revision '%s'") % changeid)
516 _("unknown revision '%s'") % changeid)
517
517
518 def __hash__(self):
518 def __hash__(self):
519 try:
519 try:
520 return hash(self._rev)
520 return hash(self._rev)
521 except AttributeError:
521 except AttributeError:
522 return id(self)
522 return id(self)
523
523
524 def __nonzero__(self):
524 def __nonzero__(self):
525 return self._rev != nullrev
525 return self._rev != nullrev
526
526
527 @propertycache
527 @propertycache
528 def _changeset(self):
528 def _changeset(self):
529 return self._repo.changelog.changelogrevision(self.rev())
529 return self._repo.changelog.changelogrevision(self.rev())
530
530
531 @propertycache
531 @propertycache
532 def _manifest(self):
532 def _manifest(self):
533 return self._manifestctx.read()
533 return self._manifestctx.read()
534
534
535 @propertycache
535 @propertycache
536 def _manifestctx(self):
536 def _manifestctx(self):
537 return self._repo.manifestlog[self._changeset.manifest]
537 return self._repo.manifestlog[self._changeset.manifest]
538
538
539 @propertycache
539 @propertycache
540 def _manifestdelta(self):
540 def _manifestdelta(self):
541 return self._manifestctx.readdelta()
541 return self._manifestctx.readdelta()
542
542
543 @propertycache
543 @propertycache
544 def _parents(self):
544 def _parents(self):
545 repo = self._repo
545 repo = self._repo
546 p1, p2 = repo.changelog.parentrevs(self._rev)
546 p1, p2 = repo.changelog.parentrevs(self._rev)
547 if p2 == nullrev:
547 if p2 == nullrev:
548 return [changectx(repo, p1)]
548 return [changectx(repo, p1)]
549 return [changectx(repo, p1), changectx(repo, p2)]
549 return [changectx(repo, p1), changectx(repo, p2)]
550
550
551 def changeset(self):
551 def changeset(self):
552 c = self._changeset
552 c = self._changeset
553 return (
553 return (
554 c.manifest,
554 c.manifest,
555 c.user,
555 c.user,
556 c.date,
556 c.date,
557 c.files,
557 c.files,
558 c.description,
558 c.description,
559 c.extra,
559 c.extra,
560 )
560 )
561 def manifestnode(self):
561 def manifestnode(self):
562 return self._changeset.manifest
562 return self._changeset.manifest
563
563
564 def user(self):
564 def user(self):
565 return self._changeset.user
565 return self._changeset.user
566 def date(self):
566 def date(self):
567 return self._changeset.date
567 return self._changeset.date
568 def files(self):
568 def files(self):
569 return self._changeset.files
569 return self._changeset.files
570 def description(self):
570 def description(self):
571 return self._changeset.description
571 return self._changeset.description
572 def branch(self):
572 def branch(self):
573 return encoding.tolocal(self._changeset.extra.get("branch"))
573 return encoding.tolocal(self._changeset.extra.get("branch"))
574 def closesbranch(self):
574 def closesbranch(self):
575 return 'close' in self._changeset.extra
575 return 'close' in self._changeset.extra
576 def extra(self):
576 def extra(self):
577 return self._changeset.extra
577 return self._changeset.extra
578 def tags(self):
578 def tags(self):
579 return self._repo.nodetags(self._node)
579 return self._repo.nodetags(self._node)
580 def bookmarks(self):
580 def bookmarks(self):
581 return self._repo.nodebookmarks(self._node)
581 return self._repo.nodebookmarks(self._node)
582 def phase(self):
582 def phase(self):
583 return self._repo._phasecache.phase(self._repo, self._rev)
583 return self._repo._phasecache.phase(self._repo, self._rev)
584 def hidden(self):
584 def hidden(self):
585 return self._rev in repoview.filterrevs(self._repo, 'visible')
585 return self._rev in repoview.filterrevs(self._repo, 'visible')
586
586
587 def children(self):
587 def children(self):
588 """return contexts for each child changeset"""
588 """return contexts for each child changeset"""
589 c = self._repo.changelog.children(self._node)
589 c = self._repo.changelog.children(self._node)
590 return [changectx(self._repo, x) for x in c]
590 return [changectx(self._repo, x) for x in c]
591
591
592 def ancestors(self):
592 def ancestors(self):
593 for a in self._repo.changelog.ancestors([self._rev]):
593 for a in self._repo.changelog.ancestors([self._rev]):
594 yield changectx(self._repo, a)
594 yield changectx(self._repo, a)
595
595
596 def descendants(self):
596 def descendants(self):
597 for d in self._repo.changelog.descendants([self._rev]):
597 for d in self._repo.changelog.descendants([self._rev]):
598 yield changectx(self._repo, d)
598 yield changectx(self._repo, d)
599
599
600 def filectx(self, path, fileid=None, filelog=None):
600 def filectx(self, path, fileid=None, filelog=None):
601 """get a file context from this changeset"""
601 """get a file context from this changeset"""
602 if fileid is None:
602 if fileid is None:
603 fileid = self.filenode(path)
603 fileid = self.filenode(path)
604 return filectx(self._repo, path, fileid=fileid,
604 return filectx(self._repo, path, fileid=fileid,
605 changectx=self, filelog=filelog)
605 changectx=self, filelog=filelog)
606
606
607 def ancestor(self, c2, warn=False):
607 def ancestor(self, c2, warn=False):
608 """return the "best" ancestor context of self and c2
608 """return the "best" ancestor context of self and c2
609
609
610 If there are multiple candidates, it will show a message and check
610 If there are multiple candidates, it will show a message and check
611 merge.preferancestor configuration before falling back to the
611 merge.preferancestor configuration before falling back to the
612 revlog ancestor."""
612 revlog ancestor."""
613 # deal with workingctxs
613 # deal with workingctxs
614 n2 = c2._node
614 n2 = c2._node
615 if n2 is None:
615 if n2 is None:
616 n2 = c2._parents[0]._node
616 n2 = c2._parents[0]._node
617 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
617 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
618 if not cahs:
618 if not cahs:
619 anc = nullid
619 anc = nullid
620 elif len(cahs) == 1:
620 elif len(cahs) == 1:
621 anc = cahs[0]
621 anc = cahs[0]
622 else:
622 else:
623 # experimental config: merge.preferancestor
623 # experimental config: merge.preferancestor
624 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
624 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
625 try:
625 try:
626 ctx = changectx(self._repo, r)
626 ctx = changectx(self._repo, r)
627 except error.RepoLookupError:
627 except error.RepoLookupError:
628 continue
628 continue
629 anc = ctx.node()
629 anc = ctx.node()
630 if anc in cahs:
630 if anc in cahs:
631 break
631 break
632 else:
632 else:
633 anc = self._repo.changelog.ancestor(self._node, n2)
633 anc = self._repo.changelog.ancestor(self._node, n2)
634 if warn:
634 if warn:
635 self._repo.ui.status(
635 self._repo.ui.status(
636 (_("note: using %s as ancestor of %s and %s\n") %
636 (_("note: using %s as ancestor of %s and %s\n") %
637 (short(anc), short(self._node), short(n2))) +
637 (short(anc), short(self._node), short(n2))) +
638 ''.join(_(" alternatively, use --config "
638 ''.join(_(" alternatively, use --config "
639 "merge.preferancestor=%s\n") %
639 "merge.preferancestor=%s\n") %
640 short(n) for n in sorted(cahs) if n != anc))
640 short(n) for n in sorted(cahs) if n != anc))
641 return changectx(self._repo, anc)
641 return changectx(self._repo, anc)
642
642
643 def descendant(self, other):
643 def descendant(self, other):
644 """True if other is descendant of this changeset"""
644 """True if other is descendant of this changeset"""
645 return self._repo.changelog.descendant(self._rev, other._rev)
645 return self._repo.changelog.descendant(self._rev, other._rev)
646
646
647 def walk(self, match):
647 def walk(self, match):
648 '''Generates matching file names.'''
648 '''Generates matching file names.'''
649
649
650 # Wrap match.bad method to have message with nodeid
650 # Wrap match.bad method to have message with nodeid
651 def bad(fn, msg):
651 def bad(fn, msg):
652 # The manifest doesn't know about subrepos, so don't complain about
652 # The manifest doesn't know about subrepos, so don't complain about
653 # paths into valid subrepos.
653 # paths into valid subrepos.
654 if any(fn == s or fn.startswith(s + '/')
654 if any(fn == s or fn.startswith(s + '/')
655 for s in self.substate):
655 for s in self.substate):
656 return
656 return
657 match.bad(fn, _('no such file in rev %s') % self)
657 match.bad(fn, _('no such file in rev %s') % self)
658
658
659 m = matchmod.badmatch(match, bad)
659 m = matchmod.badmatch(match, bad)
660 return self._manifest.walk(m)
660 return self._manifest.walk(m)
661
661
662 def matches(self, match):
662 def matches(self, match):
663 return self.walk(match)
663 return self.walk(match)
664
664
665 class basefilectx(object):
665 class basefilectx(object):
666 """A filecontext object represents the common logic for its children:
666 """A filecontext object represents the common logic for its children:
667 filectx: read-only access to a filerevision that is already present
667 filectx: read-only access to a filerevision that is already present
668 in the repo,
668 in the repo,
669 workingfilectx: a filecontext that represents files from the working
669 workingfilectx: a filecontext that represents files from the working
670 directory,
670 directory,
671 memfilectx: a filecontext that represents files in-memory."""
671 memfilectx: a filecontext that represents files in-memory."""
672 def __new__(cls, repo, path, *args, **kwargs):
672 def __new__(cls, repo, path, *args, **kwargs):
673 return super(basefilectx, cls).__new__(cls)
673 return super(basefilectx, cls).__new__(cls)
674
674
675 @propertycache
675 @propertycache
676 def _filelog(self):
676 def _filelog(self):
677 return self._repo.file(self._path)
677 return self._repo.file(self._path)
678
678
679 @propertycache
679 @propertycache
680 def _changeid(self):
680 def _changeid(self):
681 if '_changeid' in self.__dict__:
681 if '_changeid' in self.__dict__:
682 return self._changeid
682 return self._changeid
683 elif '_changectx' in self.__dict__:
683 elif '_changectx' in self.__dict__:
684 return self._changectx.rev()
684 return self._changectx.rev()
685 elif '_descendantrev' in self.__dict__:
685 elif '_descendantrev' in self.__dict__:
686 # this file context was created from a revision with a known
686 # this file context was created from a revision with a known
687 # descendant, we can (lazily) correct for linkrev aliases
687 # descendant, we can (lazily) correct for linkrev aliases
688 return self._adjustlinkrev(self._descendantrev)
688 return self._adjustlinkrev(self._descendantrev)
689 else:
689 else:
690 return self._filelog.linkrev(self._filerev)
690 return self._filelog.linkrev(self._filerev)
691
691
692 @propertycache
692 @propertycache
693 def _filenode(self):
693 def _filenode(self):
694 if '_fileid' in self.__dict__:
694 if '_fileid' in self.__dict__:
695 return self._filelog.lookup(self._fileid)
695 return self._filelog.lookup(self._fileid)
696 else:
696 else:
697 return self._changectx.filenode(self._path)
697 return self._changectx.filenode(self._path)
698
698
699 @propertycache
699 @propertycache
700 def _filerev(self):
700 def _filerev(self):
701 return self._filelog.rev(self._filenode)
701 return self._filelog.rev(self._filenode)
702
702
703 @propertycache
703 @propertycache
704 def _repopath(self):
704 def _repopath(self):
705 return self._path
705 return self._path
706
706
707 def __nonzero__(self):
707 def __nonzero__(self):
708 try:
708 try:
709 self._filenode
709 self._filenode
710 return True
710 return True
711 except error.LookupError:
711 except error.LookupError:
712 # file is missing
712 # file is missing
713 return False
713 return False
714
714
715 def __str__(self):
715 def __str__(self):
716 try:
716 try:
717 return "%s@%s" % (self.path(), self._changectx)
717 return "%s@%s" % (self.path(), self._changectx)
718 except error.LookupError:
718 except error.LookupError:
719 return "%s@???" % self.path()
719 return "%s@???" % self.path()
720
720
721 def __repr__(self):
721 def __repr__(self):
722 return "<%s %s>" % (type(self).__name__, str(self))
722 return "<%s %s>" % (type(self).__name__, str(self))
723
723
724 def __hash__(self):
724 def __hash__(self):
725 try:
725 try:
726 return hash((self._path, self._filenode))
726 return hash((self._path, self._filenode))
727 except AttributeError:
727 except AttributeError:
728 return id(self)
728 return id(self)
729
729
730 def __eq__(self, other):
730 def __eq__(self, other):
731 try:
731 try:
732 return (type(self) == type(other) and self._path == other._path
732 return (type(self) == type(other) and self._path == other._path
733 and self._filenode == other._filenode)
733 and self._filenode == other._filenode)
734 except AttributeError:
734 except AttributeError:
735 return False
735 return False
736
736
737 def __ne__(self, other):
737 def __ne__(self, other):
738 return not (self == other)
738 return not (self == other)
739
739
740 def filerev(self):
740 def filerev(self):
741 return self._filerev
741 return self._filerev
742 def filenode(self):
742 def filenode(self):
743 return self._filenode
743 return self._filenode
744 def flags(self):
744 def flags(self):
745 return self._changectx.flags(self._path)
745 return self._changectx.flags(self._path)
746 def filelog(self):
746 def filelog(self):
747 return self._filelog
747 return self._filelog
748 def rev(self):
748 def rev(self):
749 return self._changeid
749 return self._changeid
750 def linkrev(self):
750 def linkrev(self):
751 return self._filelog.linkrev(self._filerev)
751 return self._filelog.linkrev(self._filerev)
752 def node(self):
752 def node(self):
753 return self._changectx.node()
753 return self._changectx.node()
754 def hex(self):
754 def hex(self):
755 return self._changectx.hex()
755 return self._changectx.hex()
756 def user(self):
756 def user(self):
757 return self._changectx.user()
757 return self._changectx.user()
758 def date(self):
758 def date(self):
759 return self._changectx.date()
759 return self._changectx.date()
760 def files(self):
760 def files(self):
761 return self._changectx.files()
761 return self._changectx.files()
762 def description(self):
762 def description(self):
763 return self._changectx.description()
763 return self._changectx.description()
764 def branch(self):
764 def branch(self):
765 return self._changectx.branch()
765 return self._changectx.branch()
766 def extra(self):
766 def extra(self):
767 return self._changectx.extra()
767 return self._changectx.extra()
768 def phase(self):
768 def phase(self):
769 return self._changectx.phase()
769 return self._changectx.phase()
770 def phasestr(self):
770 def phasestr(self):
771 return self._changectx.phasestr()
771 return self._changectx.phasestr()
772 def manifest(self):
772 def manifest(self):
773 return self._changectx.manifest()
773 return self._changectx.manifest()
774 def changectx(self):
774 def changectx(self):
775 return self._changectx
775 return self._changectx
776 def repo(self):
776 def repo(self):
777 return self._repo
777 return self._repo
778
778
779 def path(self):
779 def path(self):
780 return self._path
780 return self._path
781
781
782 def isbinary(self):
782 def isbinary(self):
783 try:
783 try:
784 return util.binary(self.data())
784 return util.binary(self.data())
785 except IOError:
785 except IOError:
786 return False
786 return False
787 def isexec(self):
787 def isexec(self):
788 return 'x' in self.flags()
788 return 'x' in self.flags()
789 def islink(self):
789 def islink(self):
790 return 'l' in self.flags()
790 return 'l' in self.flags()
791
791
792 def isabsent(self):
792 def isabsent(self):
793 """whether this filectx represents a file not in self._changectx
793 """whether this filectx represents a file not in self._changectx
794
794
795 This is mainly for merge code to detect change/delete conflicts. This is
795 This is mainly for merge code to detect change/delete conflicts. This is
796 expected to be True for all subclasses of basectx."""
796 expected to be True for all subclasses of basectx."""
797 return False
797 return False
798
798
799 _customcmp = False
799 _customcmp = False
800 def cmp(self, fctx):
800 def cmp(self, fctx):
801 """compare with other file context
801 """compare with other file context
802
802
803 returns True if different than fctx.
803 returns True if different than fctx.
804 """
804 """
805 if fctx._customcmp:
805 if fctx._customcmp:
806 return fctx.cmp(self)
806 return fctx.cmp(self)
807
807
808 if (fctx._filenode is None
808 if (fctx._filenode is None
809 and (self._repo._encodefilterpats
809 and (self._repo._encodefilterpats
810 # if file data starts with '\1\n', empty metadata block is
810 # if file data starts with '\1\n', empty metadata block is
811 # prepended, which adds 4 bytes to filelog.size().
811 # prepended, which adds 4 bytes to filelog.size().
812 or self.size() - 4 == fctx.size())
812 or self.size() - 4 == fctx.size())
813 or self.size() == fctx.size()):
813 or self.size() == fctx.size()):
814 return self._filelog.cmp(self._filenode, fctx.data())
814 return self._filelog.cmp(self._filenode, fctx.data())
815
815
816 return True
816 return True
817
817
818 def _adjustlinkrev(self, srcrev, inclusive=False):
818 def _adjustlinkrev(self, srcrev, inclusive=False):
819 """return the first ancestor of <srcrev> introducing <fnode>
819 """return the first ancestor of <srcrev> introducing <fnode>
820
820
821 If the linkrev of the file revision does not point to an ancestor of
821 If the linkrev of the file revision does not point to an ancestor of
822 srcrev, we'll walk down the ancestors until we find one introducing
822 srcrev, we'll walk down the ancestors until we find one introducing
823 this file revision.
823 this file revision.
824
824
825 :srcrev: the changeset revision we search ancestors from
825 :srcrev: the changeset revision we search ancestors from
826 :inclusive: if true, the src revision will also be checked
826 :inclusive: if true, the src revision will also be checked
827 """
827 """
828 repo = self._repo
828 repo = self._repo
829 cl = repo.unfiltered().changelog
829 cl = repo.unfiltered().changelog
830 mfl = repo.manifestlog
830 mfl = repo.manifestlog
831 # fetch the linkrev
831 # fetch the linkrev
832 lkr = self.linkrev()
832 lkr = self.linkrev()
833 # hack to reuse ancestor computation when searching for renames
833 # hack to reuse ancestor computation when searching for renames
834 memberanc = getattr(self, '_ancestrycontext', None)
834 memberanc = getattr(self, '_ancestrycontext', None)
835 iteranc = None
835 iteranc = None
836 if srcrev is None:
836 if srcrev is None:
837 # wctx case, used by workingfilectx during mergecopy
837 # wctx case, used by workingfilectx during mergecopy
838 revs = [p.rev() for p in self._repo[None].parents()]
838 revs = [p.rev() for p in self._repo[None].parents()]
839 inclusive = True # we skipped the real (revless) source
839 inclusive = True # we skipped the real (revless) source
840 else:
840 else:
841 revs = [srcrev]
841 revs = [srcrev]
842 if memberanc is None:
842 if memberanc is None:
843 memberanc = iteranc = cl.ancestors(revs, lkr,
843 memberanc = iteranc = cl.ancestors(revs, lkr,
844 inclusive=inclusive)
844 inclusive=inclusive)
845 # check if this linkrev is an ancestor of srcrev
845 # check if this linkrev is an ancestor of srcrev
846 if lkr not in memberanc:
846 if lkr not in memberanc:
847 if iteranc is None:
847 if iteranc is None:
848 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
848 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
849 fnode = self._filenode
849 fnode = self._filenode
850 path = self._path
850 path = self._path
851 for a in iteranc:
851 for a in iteranc:
852 ac = cl.read(a) # get changeset data (we avoid object creation)
852 ac = cl.read(a) # get changeset data (we avoid object creation)
853 if path in ac[3]: # checking the 'files' field.
853 if path in ac[3]: # checking the 'files' field.
854 # The file has been touched, check if the content is
854 # The file has been touched, check if the content is
855 # similar to the one we search for.
855 # similar to the one we search for.
856 if fnode == mfl[ac[0]].readfast().get(path):
856 if fnode == mfl[ac[0]].readfast().get(path):
857 return a
857 return a
858 # In theory, we should never get out of that loop without a result.
858 # In theory, we should never get out of that loop without a result.
859 # But if manifest uses a buggy file revision (not children of the
859 # But if manifest uses a buggy file revision (not children of the
860 # one it replaces) we could. Such a buggy situation will likely
860 # one it replaces) we could. Such a buggy situation will likely
861 # result is crash somewhere else at to some point.
861 # result is crash somewhere else at to some point.
862 return lkr
862 return lkr
863
863
864 def introrev(self):
864 def introrev(self):
865 """return the rev of the changeset which introduced this file revision
865 """return the rev of the changeset which introduced this file revision
866
866
867 This method is different from linkrev because it take into account the
867 This method is different from linkrev because it take into account the
868 changeset the filectx was created from. It ensures the returned
868 changeset the filectx was created from. It ensures the returned
869 revision is one of its ancestors. This prevents bugs from
869 revision is one of its ancestors. This prevents bugs from
870 'linkrev-shadowing' when a file revision is used by multiple
870 'linkrev-shadowing' when a file revision is used by multiple
871 changesets.
871 changesets.
872 """
872 """
873 lkr = self.linkrev()
873 lkr = self.linkrev()
874 attrs = vars(self)
874 attrs = vars(self)
875 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
875 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
876 if noctx or self.rev() == lkr:
876 if noctx or self.rev() == lkr:
877 return self.linkrev()
877 return self.linkrev()
878 return self._adjustlinkrev(self.rev(), inclusive=True)
878 return self._adjustlinkrev(self.rev(), inclusive=True)
879
879
880 def _parentfilectx(self, path, fileid, filelog):
880 def _parentfilectx(self, path, fileid, filelog):
881 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
881 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
882 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
882 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
883 if '_changeid' in vars(self) or '_changectx' in vars(self):
883 if '_changeid' in vars(self) or '_changectx' in vars(self):
884 # If self is associated with a changeset (probably explicitly
884 # If self is associated with a changeset (probably explicitly
885 # fed), ensure the created filectx is associated with a
885 # fed), ensure the created filectx is associated with a
886 # changeset that is an ancestor of self.changectx.
886 # changeset that is an ancestor of self.changectx.
887 # This lets us later use _adjustlinkrev to get a correct link.
887 # This lets us later use _adjustlinkrev to get a correct link.
888 fctx._descendantrev = self.rev()
888 fctx._descendantrev = self.rev()
889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 elif '_descendantrev' in vars(self):
890 elif '_descendantrev' in vars(self):
891 # Otherwise propagate _descendantrev if we have one associated.
891 # Otherwise propagate _descendantrev if we have one associated.
892 fctx._descendantrev = self._descendantrev
892 fctx._descendantrev = self._descendantrev
893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 return fctx
894 return fctx
895
895
896 def parents(self):
896 def parents(self):
897 _path = self._path
897 _path = self._path
898 fl = self._filelog
898 fl = self._filelog
899 parents = self._filelog.parents(self._filenode)
899 parents = self._filelog.parents(self._filenode)
900 pl = [(_path, node, fl) for node in parents if node != nullid]
900 pl = [(_path, node, fl) for node in parents if node != nullid]
901
901
902 r = fl.renamed(self._filenode)
902 r = fl.renamed(self._filenode)
903 if r:
903 if r:
904 # - In the simple rename case, both parent are nullid, pl is empty.
904 # - In the simple rename case, both parent are nullid, pl is empty.
905 # - In case of merge, only one of the parent is null id and should
905 # - In case of merge, only one of the parent is null id and should
906 # be replaced with the rename information. This parent is -always-
906 # be replaced with the rename information. This parent is -always-
907 # the first one.
907 # the first one.
908 #
908 #
909 # As null id have always been filtered out in the previous list
909 # As null id have always been filtered out in the previous list
910 # comprehension, inserting to 0 will always result in "replacing
910 # comprehension, inserting to 0 will always result in "replacing
911 # first nullid parent with rename information.
911 # first nullid parent with rename information.
912 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
912 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
913
913
914 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
914 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
915
915
916 def p1(self):
916 def p1(self):
917 return self.parents()[0]
917 return self.parents()[0]
918
918
919 def p2(self):
919 def p2(self):
920 p = self.parents()
920 p = self.parents()
921 if len(p) == 2:
921 if len(p) == 2:
922 return p[1]
922 return p[1]
923 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
923 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
924
924
925 def annotate(self, follow=False, linenumber=False, diffopts=None):
925 def annotate(self, follow=False, linenumber=False, diffopts=None):
926 '''returns a list of tuples of ((ctx, number), line) for each line
926 '''returns a list of tuples of ((ctx, number), line) for each line
927 in the file, where ctx is the filectx of the node where
927 in the file, where ctx is the filectx of the node where
928 that line was last changed; if linenumber parameter is true, number is
928 that line was last changed; if linenumber parameter is true, number is
929 the line number at the first appearance in the managed file, otherwise,
929 the line number at the first appearance in the managed file, otherwise,
930 number has a fixed value of False.
930 number has a fixed value of False.
931 '''
931 '''
932
932
933 def lines(text):
933 def lines(text):
934 if text.endswith("\n"):
934 if text.endswith("\n"):
935 return text.count("\n")
935 return text.count("\n")
936 return text.count("\n") + int(bool(text))
936 return text.count("\n") + int(bool(text))
937
937
938 if linenumber:
938 if linenumber:
939 def decorate(text, rev):
939 def decorate(text, rev):
940 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
940 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
941 else:
941 else:
942 def decorate(text, rev):
942 def decorate(text, rev):
943 return ([(rev, False)] * lines(text), text)
943 return ([(rev, False)] * lines(text), text)
944
944
945 def pair(parent, child):
945 def pair(parent, child):
946 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
946 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
947 for (a1, a2, b1, b2), t in blocks:
947 for (a1, a2, b1, b2), t in blocks:
948 # Changed blocks ('!') or blocks made only of blank lines ('~')
948 # Changed blocks ('!') or blocks made only of blank lines ('~')
949 # belong to the child.
949 # belong to the child.
950 if t == '=':
950 if t == '=':
951 child[0][b1:b2] = parent[0][a1:a2]
951 child[0][b1:b2] = parent[0][a1:a2]
952 return child
952 return child
953
953
954 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
954 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
955
955
956 def parents(f):
956 def parents(f):
957 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
957 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
958 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
958 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
959 # from the topmost introrev (= srcrev) down to p.linkrev() if it
959 # from the topmost introrev (= srcrev) down to p.linkrev() if it
960 # isn't an ancestor of the srcrev.
960 # isn't an ancestor of the srcrev.
961 f._changeid
961 f._changeid
962 pl = f.parents()
962 pl = f.parents()
963
963
964 # Don't return renamed parents if we aren't following.
964 # Don't return renamed parents if we aren't following.
965 if not follow:
965 if not follow:
966 pl = [p for p in pl if p.path() == f.path()]
966 pl = [p for p in pl if p.path() == f.path()]
967
967
968 # renamed filectx won't have a filelog yet, so set it
968 # renamed filectx won't have a filelog yet, so set it
969 # from the cache to save time
969 # from the cache to save time
970 for p in pl:
970 for p in pl:
971 if not '_filelog' in p.__dict__:
971 if not '_filelog' in p.__dict__:
972 p._filelog = getlog(p.path())
972 p._filelog = getlog(p.path())
973
973
974 return pl
974 return pl
975
975
976 # use linkrev to find the first changeset where self appeared
976 # use linkrev to find the first changeset where self appeared
977 base = self
977 base = self
978 introrev = self.introrev()
978 introrev = self.introrev()
979 if self.rev() != introrev:
979 if self.rev() != introrev:
980 base = self.filectx(self.filenode(), changeid=introrev)
980 base = self.filectx(self.filenode(), changeid=introrev)
981 if getattr(base, '_ancestrycontext', None) is None:
981 if getattr(base, '_ancestrycontext', None) is None:
982 cl = self._repo.changelog
982 cl = self._repo.changelog
983 if introrev is None:
983 if introrev is None:
984 # wctx is not inclusive, but works because _ancestrycontext
984 # wctx is not inclusive, but works because _ancestrycontext
985 # is used to test filelog revisions
985 # is used to test filelog revisions
986 ac = cl.ancestors([p.rev() for p in base.parents()],
986 ac = cl.ancestors([p.rev() for p in base.parents()],
987 inclusive=True)
987 inclusive=True)
988 else:
988 else:
989 ac = cl.ancestors([introrev], inclusive=True)
989 ac = cl.ancestors([introrev], inclusive=True)
990 base._ancestrycontext = ac
990 base._ancestrycontext = ac
991
991
992 # This algorithm would prefer to be recursive, but Python is a
992 # This algorithm would prefer to be recursive, but Python is a
993 # bit recursion-hostile. Instead we do an iterative
993 # bit recursion-hostile. Instead we do an iterative
994 # depth-first search.
994 # depth-first search.
995
995
996 # 1st DFS pre-calculates pcache and needed
996 # 1st DFS pre-calculates pcache and needed
997 visit = [base]
997 visit = [base]
998 pcache = {}
998 pcache = {}
999 needed = {base: 1}
999 needed = {base: 1}
1000 while visit:
1000 while visit:
1001 f = visit.pop()
1001 f = visit.pop()
1002 if f in pcache:
1002 if f in pcache:
1003 continue
1003 continue
1004 pl = parents(f)
1004 pl = parents(f)
1005 pcache[f] = pl
1005 pcache[f] = pl
1006 for p in pl:
1006 for p in pl:
1007 needed[p] = needed.get(p, 0) + 1
1007 needed[p] = needed.get(p, 0) + 1
1008 if p not in pcache:
1008 if p not in pcache:
1009 visit.append(p)
1009 visit.append(p)
1010
1010
1011 # 2nd DFS does the actual annotate
1011 # 2nd DFS does the actual annotate
1012 visit[:] = [base]
1012 visit[:] = [base]
1013 hist = {}
1013 hist = {}
1014 while visit:
1014 while visit:
1015 f = visit[-1]
1015 f = visit[-1]
1016 if f in hist:
1016 if f in hist:
1017 visit.pop()
1017 visit.pop()
1018 continue
1018 continue
1019
1019
1020 ready = True
1020 ready = True
1021 pl = pcache[f]
1021 pl = pcache[f]
1022 for p in pl:
1022 for p in pl:
1023 if p not in hist:
1023 if p not in hist:
1024 ready = False
1024 ready = False
1025 visit.append(p)
1025 visit.append(p)
1026 if ready:
1026 if ready:
1027 visit.pop()
1027 visit.pop()
1028 curr = decorate(f.data(), f)
1028 curr = decorate(f.data(), f)
1029 for p in pl:
1029 for p in pl:
1030 curr = pair(hist[p], curr)
1030 curr = pair(hist[p], curr)
1031 if needed[p] == 1:
1031 if needed[p] == 1:
1032 del hist[p]
1032 del hist[p]
1033 del needed[p]
1033 del needed[p]
1034 else:
1034 else:
1035 needed[p] -= 1
1035 needed[p] -= 1
1036
1036
1037 hist[f] = curr
1037 hist[f] = curr
1038 del pcache[f]
1038 del pcache[f]
1039
1039
1040 return zip(hist[base][0], hist[base][1].splitlines(True))
1040 return zip(hist[base][0], hist[base][1].splitlines(True))
1041
1041
1042 def ancestors(self, followfirst=False):
1042 def ancestors(self, followfirst=False):
1043 visit = {}
1043 visit = {}
1044 c = self
1044 c = self
1045 if followfirst:
1045 if followfirst:
1046 cut = 1
1046 cut = 1
1047 else:
1047 else:
1048 cut = None
1048 cut = None
1049
1049
1050 while True:
1050 while True:
1051 for parent in c.parents()[:cut]:
1051 for parent in c.parents()[:cut]:
1052 visit[(parent.linkrev(), parent.filenode())] = parent
1052 visit[(parent.linkrev(), parent.filenode())] = parent
1053 if not visit:
1053 if not visit:
1054 break
1054 break
1055 c = visit.pop(max(visit))
1055 c = visit.pop(max(visit))
1056 yield c
1056 yield c
1057
1057
1058 class filectx(basefilectx):
1058 class filectx(basefilectx):
1059 """A filecontext object makes access to data related to a particular
1059 """A filecontext object makes access to data related to a particular
1060 filerevision convenient."""
1060 filerevision convenient."""
1061 def __init__(self, repo, path, changeid=None, fileid=None,
1061 def __init__(self, repo, path, changeid=None, fileid=None,
1062 filelog=None, changectx=None):
1062 filelog=None, changectx=None):
1063 """changeid can be a changeset revision, node, or tag.
1063 """changeid can be a changeset revision, node, or tag.
1064 fileid can be a file revision or node."""
1064 fileid can be a file revision or node."""
1065 self._repo = repo
1065 self._repo = repo
1066 self._path = path
1066 self._path = path
1067
1067
1068 assert (changeid is not None
1068 assert (changeid is not None
1069 or fileid is not None
1069 or fileid is not None
1070 or changectx is not None), \
1070 or changectx is not None), \
1071 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1071 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1072 % (changeid, fileid, changectx))
1072 % (changeid, fileid, changectx))
1073
1073
1074 if filelog is not None:
1074 if filelog is not None:
1075 self._filelog = filelog
1075 self._filelog = filelog
1076
1076
1077 if changeid is not None:
1077 if changeid is not None:
1078 self._changeid = changeid
1078 self._changeid = changeid
1079 if changectx is not None:
1079 if changectx is not None:
1080 self._changectx = changectx
1080 self._changectx = changectx
1081 if fileid is not None:
1081 if fileid is not None:
1082 self._fileid = fileid
1082 self._fileid = fileid
1083
1083
1084 @propertycache
1084 @propertycache
1085 def _changectx(self):
1085 def _changectx(self):
1086 try:
1086 try:
1087 return changectx(self._repo, self._changeid)
1087 return changectx(self._repo, self._changeid)
1088 except error.FilteredRepoLookupError:
1088 except error.FilteredRepoLookupError:
1089 # Linkrev may point to any revision in the repository. When the
1089 # Linkrev may point to any revision in the repository. When the
1090 # repository is filtered this may lead to `filectx` trying to build
1090 # repository is filtered this may lead to `filectx` trying to build
1091 # `changectx` for filtered revision. In such case we fallback to
1091 # `changectx` for filtered revision. In such case we fallback to
1092 # creating `changectx` on the unfiltered version of the reposition.
1092 # creating `changectx` on the unfiltered version of the reposition.
1093 # This fallback should not be an issue because `changectx` from
1093 # This fallback should not be an issue because `changectx` from
1094 # `filectx` are not used in complex operations that care about
1094 # `filectx` are not used in complex operations that care about
1095 # filtering.
1095 # filtering.
1096 #
1096 #
1097 # This fallback is a cheap and dirty fix that prevent several
1097 # This fallback is a cheap and dirty fix that prevent several
1098 # crashes. It does not ensure the behavior is correct. However the
1098 # crashes. It does not ensure the behavior is correct. However the
1099 # behavior was not correct before filtering either and "incorrect
1099 # behavior was not correct before filtering either and "incorrect
1100 # behavior" is seen as better as "crash"
1100 # behavior" is seen as better as "crash"
1101 #
1101 #
1102 # Linkrevs have several serious troubles with filtering that are
1102 # Linkrevs have several serious troubles with filtering that are
1103 # complicated to solve. Proper handling of the issue here should be
1103 # complicated to solve. Proper handling of the issue here should be
1104 # considered when solving linkrev issue are on the table.
1104 # considered when solving linkrev issue are on the table.
1105 return changectx(self._repo.unfiltered(), self._changeid)
1105 return changectx(self._repo.unfiltered(), self._changeid)
1106
1106
1107 def filectx(self, fileid, changeid=None):
1107 def filectx(self, fileid, changeid=None):
1108 '''opens an arbitrary revision of the file without
1108 '''opens an arbitrary revision of the file without
1109 opening a new filelog'''
1109 opening a new filelog'''
1110 return filectx(self._repo, self._path, fileid=fileid,
1110 return filectx(self._repo, self._path, fileid=fileid,
1111 filelog=self._filelog, changeid=changeid)
1111 filelog=self._filelog, changeid=changeid)
1112
1112
1113 def rawdata(self):
1113 def rawdata(self):
1114 return self._filelog.revision(self._filenode, raw=True)
1114 return self._filelog.revision(self._filenode, raw=True)
1115
1115
1116 def data(self):
1116 def data(self):
1117 try:
1117 try:
1118 return self._filelog.read(self._filenode)
1118 return self._filelog.read(self._filenode)
1119 except error.CensoredNodeError:
1119 except error.CensoredNodeError:
1120 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1120 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1121 return ""
1121 return ""
1122 raise error.Abort(_("censored node: %s") % short(self._filenode),
1122 raise error.Abort(_("censored node: %s") % short(self._filenode),
1123 hint=_("set censor.policy to ignore errors"))
1123 hint=_("set censor.policy to ignore errors"))
1124
1124
1125 def size(self):
1125 def size(self):
1126 return self._filelog.size(self._filerev)
1126 return self._filelog.size(self._filerev)
1127
1127
1128 def renamed(self):
1128 def renamed(self):
1129 """check if file was actually renamed in this changeset revision
1129 """check if file was actually renamed in this changeset revision
1130
1130
1131 If rename logged in file revision, we report copy for changeset only
1131 If rename logged in file revision, we report copy for changeset only
1132 if file revisions linkrev points back to the changeset in question
1132 if file revisions linkrev points back to the changeset in question
1133 or both changeset parents contain different file revisions.
1133 or both changeset parents contain different file revisions.
1134 """
1134 """
1135
1135
1136 renamed = self._filelog.renamed(self._filenode)
1136 renamed = self._filelog.renamed(self._filenode)
1137 if not renamed:
1137 if not renamed:
1138 return renamed
1138 return renamed
1139
1139
1140 if self.rev() == self.linkrev():
1140 if self.rev() == self.linkrev():
1141 return renamed
1141 return renamed
1142
1142
1143 name = self.path()
1143 name = self.path()
1144 fnode = self._filenode
1144 fnode = self._filenode
1145 for p in self._changectx.parents():
1145 for p in self._changectx.parents():
1146 try:
1146 try:
1147 if fnode == p.filenode(name):
1147 if fnode == p.filenode(name):
1148 return None
1148 return None
1149 except error.LookupError:
1149 except error.LookupError:
1150 pass
1150 pass
1151 return renamed
1151 return renamed
1152
1152
1153 def children(self):
1153 def children(self):
1154 # hard for renames
1154 # hard for renames
1155 c = self._filelog.children(self._filenode)
1155 c = self._filelog.children(self._filenode)
1156 return [filectx(self._repo, self._path, fileid=x,
1156 return [filectx(self._repo, self._path, fileid=x,
1157 filelog=self._filelog) for x in c]
1157 filelog=self._filelog) for x in c]
1158
1158
1159 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1159 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1160 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1160 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1161 if diff from fctx2 to fctx1 has changes in linerange2 and
1161 if diff from fctx2 to fctx1 has changes in linerange2 and
1162 `linerange1` is the new line range for fctx1.
1162 `linerange1` is the new line range for fctx1.
1163 """
1163 """
1164 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1164 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1165 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1165 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1166 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1166 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1167 return diffinrange, linerange1
1167 return diffinrange, linerange1
1168
1168
1169 def blockancestors(fctx, fromline, toline, followfirst=False):
1169 def blockancestors(fctx, fromline, toline, followfirst=False):
1170 """Yield ancestors of `fctx` with respect to the block of lines within
1170 """Yield ancestors of `fctx` with respect to the block of lines within
1171 `fromline`-`toline` range.
1171 `fromline`-`toline` range.
1172 """
1172 """
1173 diffopts = patch.diffopts(fctx._repo.ui)
1173 diffopts = patch.diffopts(fctx._repo.ui)
1174 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1174 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1175 while visit:
1175 while visit:
1176 c, linerange2 = visit.pop(max(visit))
1176 c, linerange2 = visit.pop(max(visit))
1177 pl = c.parents()
1177 pl = c.parents()
1178 if followfirst:
1178 if followfirst:
1179 pl = pl[:1]
1179 pl = pl[:1]
1180 if not pl:
1180 if not pl:
1181 # The block originates from the initial revision.
1181 # The block originates from the initial revision.
1182 yield c
1182 yield c, linerange2
1183 continue
1183 continue
1184 inrange = False
1184 inrange = False
1185 for p in pl:
1185 for p in pl:
1186 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1186 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1187 inrange = inrange or inrangep
1187 inrange = inrange or inrangep
1188 if linerange1[0] == linerange1[1]:
1188 if linerange1[0] == linerange1[1]:
1189 # Parent's linerange is empty, meaning that the block got
1189 # Parent's linerange is empty, meaning that the block got
1190 # introduced in this revision; no need to go futher in this
1190 # introduced in this revision; no need to go futher in this
1191 # branch.
1191 # branch.
1192 continue
1192 continue
1193 visit[p.linkrev(), p.filenode()] = p, linerange1
1193 visit[p.linkrev(), p.filenode()] = p, linerange1
1194 if inrange:
1194 if inrange:
1195 yield c
1195 yield c, linerange2
1196
1196
1197 class committablectx(basectx):
1197 class committablectx(basectx):
1198 """A committablectx object provides common functionality for a context that
1198 """A committablectx object provides common functionality for a context that
1199 wants the ability to commit, e.g. workingctx or memctx."""
1199 wants the ability to commit, e.g. workingctx or memctx."""
1200 def __init__(self, repo, text="", user=None, date=None, extra=None,
1200 def __init__(self, repo, text="", user=None, date=None, extra=None,
1201 changes=None):
1201 changes=None):
1202 self._repo = repo
1202 self._repo = repo
1203 self._rev = None
1203 self._rev = None
1204 self._node = None
1204 self._node = None
1205 self._text = text
1205 self._text = text
1206 if date:
1206 if date:
1207 self._date = util.parsedate(date)
1207 self._date = util.parsedate(date)
1208 if user:
1208 if user:
1209 self._user = user
1209 self._user = user
1210 if changes:
1210 if changes:
1211 self._status = changes
1211 self._status = changes
1212
1212
1213 self._extra = {}
1213 self._extra = {}
1214 if extra:
1214 if extra:
1215 self._extra = extra.copy()
1215 self._extra = extra.copy()
1216 if 'branch' not in self._extra:
1216 if 'branch' not in self._extra:
1217 try:
1217 try:
1218 branch = encoding.fromlocal(self._repo.dirstate.branch())
1218 branch = encoding.fromlocal(self._repo.dirstate.branch())
1219 except UnicodeDecodeError:
1219 except UnicodeDecodeError:
1220 raise error.Abort(_('branch name not in UTF-8!'))
1220 raise error.Abort(_('branch name not in UTF-8!'))
1221 self._extra['branch'] = branch
1221 self._extra['branch'] = branch
1222 if self._extra['branch'] == '':
1222 if self._extra['branch'] == '':
1223 self._extra['branch'] = 'default'
1223 self._extra['branch'] = 'default'
1224
1224
1225 def __str__(self):
1225 def __str__(self):
1226 return str(self._parents[0]) + "+"
1226 return str(self._parents[0]) + "+"
1227
1227
1228 def __nonzero__(self):
1228 def __nonzero__(self):
1229 return True
1229 return True
1230
1230
1231 def _buildflagfunc(self):
1231 def _buildflagfunc(self):
1232 # Create a fallback function for getting file flags when the
1232 # Create a fallback function for getting file flags when the
1233 # filesystem doesn't support them
1233 # filesystem doesn't support them
1234
1234
1235 copiesget = self._repo.dirstate.copies().get
1235 copiesget = self._repo.dirstate.copies().get
1236 parents = self.parents()
1236 parents = self.parents()
1237 if len(parents) < 2:
1237 if len(parents) < 2:
1238 # when we have one parent, it's easy: copy from parent
1238 # when we have one parent, it's easy: copy from parent
1239 man = parents[0].manifest()
1239 man = parents[0].manifest()
1240 def func(f):
1240 def func(f):
1241 f = copiesget(f, f)
1241 f = copiesget(f, f)
1242 return man.flags(f)
1242 return man.flags(f)
1243 else:
1243 else:
1244 # merges are tricky: we try to reconstruct the unstored
1244 # merges are tricky: we try to reconstruct the unstored
1245 # result from the merge (issue1802)
1245 # result from the merge (issue1802)
1246 p1, p2 = parents
1246 p1, p2 = parents
1247 pa = p1.ancestor(p2)
1247 pa = p1.ancestor(p2)
1248 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1248 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1249
1249
1250 def func(f):
1250 def func(f):
1251 f = copiesget(f, f) # may be wrong for merges with copies
1251 f = copiesget(f, f) # may be wrong for merges with copies
1252 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1252 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1253 if fl1 == fl2:
1253 if fl1 == fl2:
1254 return fl1
1254 return fl1
1255 if fl1 == fla:
1255 if fl1 == fla:
1256 return fl2
1256 return fl2
1257 if fl2 == fla:
1257 if fl2 == fla:
1258 return fl1
1258 return fl1
1259 return '' # punt for conflicts
1259 return '' # punt for conflicts
1260
1260
1261 return func
1261 return func
1262
1262
1263 @propertycache
1263 @propertycache
1264 def _flagfunc(self):
1264 def _flagfunc(self):
1265 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1265 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1266
1266
1267 @propertycache
1267 @propertycache
1268 def _manifest(self):
1268 def _manifest(self):
1269 """generate a manifest corresponding to the values in self._status
1269 """generate a manifest corresponding to the values in self._status
1270
1270
1271 This reuse the file nodeid from parent, but we append an extra letter
1271 This reuse the file nodeid from parent, but we append an extra letter
1272 when modified. Modified files get an extra 'm' while added files get
1272 when modified. Modified files get an extra 'm' while added files get
1273 an extra 'a'. This is used by manifests merge to see that files
1273 an extra 'a'. This is used by manifests merge to see that files
1274 are different and by update logic to avoid deleting newly added files.
1274 are different and by update logic to avoid deleting newly added files.
1275 """
1275 """
1276 parents = self.parents()
1276 parents = self.parents()
1277
1277
1278 man = parents[0].manifest().copy()
1278 man = parents[0].manifest().copy()
1279
1279
1280 ff = self._flagfunc
1280 ff = self._flagfunc
1281 for i, l in ((addednodeid, self._status.added),
1281 for i, l in ((addednodeid, self._status.added),
1282 (modifiednodeid, self._status.modified)):
1282 (modifiednodeid, self._status.modified)):
1283 for f in l:
1283 for f in l:
1284 man[f] = i
1284 man[f] = i
1285 try:
1285 try:
1286 man.setflag(f, ff(f))
1286 man.setflag(f, ff(f))
1287 except OSError:
1287 except OSError:
1288 pass
1288 pass
1289
1289
1290 for f in self._status.deleted + self._status.removed:
1290 for f in self._status.deleted + self._status.removed:
1291 if f in man:
1291 if f in man:
1292 del man[f]
1292 del man[f]
1293
1293
1294 return man
1294 return man
1295
1295
1296 @propertycache
1296 @propertycache
1297 def _status(self):
1297 def _status(self):
1298 return self._repo.status()
1298 return self._repo.status()
1299
1299
1300 @propertycache
1300 @propertycache
1301 def _user(self):
1301 def _user(self):
1302 return self._repo.ui.username()
1302 return self._repo.ui.username()
1303
1303
1304 @propertycache
1304 @propertycache
1305 def _date(self):
1305 def _date(self):
1306 return util.makedate()
1306 return util.makedate()
1307
1307
1308 def subrev(self, subpath):
1308 def subrev(self, subpath):
1309 return None
1309 return None
1310
1310
1311 def manifestnode(self):
1311 def manifestnode(self):
1312 return None
1312 return None
1313 def user(self):
1313 def user(self):
1314 return self._user or self._repo.ui.username()
1314 return self._user or self._repo.ui.username()
1315 def date(self):
1315 def date(self):
1316 return self._date
1316 return self._date
1317 def description(self):
1317 def description(self):
1318 return self._text
1318 return self._text
1319 def files(self):
1319 def files(self):
1320 return sorted(self._status.modified + self._status.added +
1320 return sorted(self._status.modified + self._status.added +
1321 self._status.removed)
1321 self._status.removed)
1322
1322
1323 def modified(self):
1323 def modified(self):
1324 return self._status.modified
1324 return self._status.modified
1325 def added(self):
1325 def added(self):
1326 return self._status.added
1326 return self._status.added
1327 def removed(self):
1327 def removed(self):
1328 return self._status.removed
1328 return self._status.removed
1329 def deleted(self):
1329 def deleted(self):
1330 return self._status.deleted
1330 return self._status.deleted
1331 def branch(self):
1331 def branch(self):
1332 return encoding.tolocal(self._extra['branch'])
1332 return encoding.tolocal(self._extra['branch'])
1333 def closesbranch(self):
1333 def closesbranch(self):
1334 return 'close' in self._extra
1334 return 'close' in self._extra
1335 def extra(self):
1335 def extra(self):
1336 return self._extra
1336 return self._extra
1337
1337
1338 def tags(self):
1338 def tags(self):
1339 return []
1339 return []
1340
1340
1341 def bookmarks(self):
1341 def bookmarks(self):
1342 b = []
1342 b = []
1343 for p in self.parents():
1343 for p in self.parents():
1344 b.extend(p.bookmarks())
1344 b.extend(p.bookmarks())
1345 return b
1345 return b
1346
1346
1347 def phase(self):
1347 def phase(self):
1348 phase = phases.draft # default phase to draft
1348 phase = phases.draft # default phase to draft
1349 for p in self.parents():
1349 for p in self.parents():
1350 phase = max(phase, p.phase())
1350 phase = max(phase, p.phase())
1351 return phase
1351 return phase
1352
1352
1353 def hidden(self):
1353 def hidden(self):
1354 return False
1354 return False
1355
1355
1356 def children(self):
1356 def children(self):
1357 return []
1357 return []
1358
1358
1359 def flags(self, path):
1359 def flags(self, path):
1360 if '_manifest' in self.__dict__:
1360 if '_manifest' in self.__dict__:
1361 try:
1361 try:
1362 return self._manifest.flags(path)
1362 return self._manifest.flags(path)
1363 except KeyError:
1363 except KeyError:
1364 return ''
1364 return ''
1365
1365
1366 try:
1366 try:
1367 return self._flagfunc(path)
1367 return self._flagfunc(path)
1368 except OSError:
1368 except OSError:
1369 return ''
1369 return ''
1370
1370
1371 def ancestor(self, c2):
1371 def ancestor(self, c2):
1372 """return the "best" ancestor context of self and c2"""
1372 """return the "best" ancestor context of self and c2"""
1373 return self._parents[0].ancestor(c2) # punt on two parents for now
1373 return self._parents[0].ancestor(c2) # punt on two parents for now
1374
1374
1375 def walk(self, match):
1375 def walk(self, match):
1376 '''Generates matching file names.'''
1376 '''Generates matching file names.'''
1377 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1377 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1378 True, False))
1378 True, False))
1379
1379
1380 def matches(self, match):
1380 def matches(self, match):
1381 return sorted(self._repo.dirstate.matches(match))
1381 return sorted(self._repo.dirstate.matches(match))
1382
1382
1383 def ancestors(self):
1383 def ancestors(self):
1384 for p in self._parents:
1384 for p in self._parents:
1385 yield p
1385 yield p
1386 for a in self._repo.changelog.ancestors(
1386 for a in self._repo.changelog.ancestors(
1387 [p.rev() for p in self._parents]):
1387 [p.rev() for p in self._parents]):
1388 yield changectx(self._repo, a)
1388 yield changectx(self._repo, a)
1389
1389
1390 def markcommitted(self, node):
1390 def markcommitted(self, node):
1391 """Perform post-commit cleanup necessary after committing this ctx
1391 """Perform post-commit cleanup necessary after committing this ctx
1392
1392
1393 Specifically, this updates backing stores this working context
1393 Specifically, this updates backing stores this working context
1394 wraps to reflect the fact that the changes reflected by this
1394 wraps to reflect the fact that the changes reflected by this
1395 workingctx have been committed. For example, it marks
1395 workingctx have been committed. For example, it marks
1396 modified and added files as normal in the dirstate.
1396 modified and added files as normal in the dirstate.
1397
1397
1398 """
1398 """
1399
1399
1400 self._repo.dirstate.beginparentchange()
1400 self._repo.dirstate.beginparentchange()
1401 for f in self.modified() + self.added():
1401 for f in self.modified() + self.added():
1402 self._repo.dirstate.normal(f)
1402 self._repo.dirstate.normal(f)
1403 for f in self.removed():
1403 for f in self.removed():
1404 self._repo.dirstate.drop(f)
1404 self._repo.dirstate.drop(f)
1405 self._repo.dirstate.setparents(node)
1405 self._repo.dirstate.setparents(node)
1406 self._repo.dirstate.endparentchange()
1406 self._repo.dirstate.endparentchange()
1407
1407
1408 # write changes out explicitly, because nesting wlock at
1408 # write changes out explicitly, because nesting wlock at
1409 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1409 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1410 # from immediately doing so for subsequent changing files
1410 # from immediately doing so for subsequent changing files
1411 self._repo.dirstate.write(self._repo.currenttransaction())
1411 self._repo.dirstate.write(self._repo.currenttransaction())
1412
1412
1413 class workingctx(committablectx):
1413 class workingctx(committablectx):
1414 """A workingctx object makes access to data related to
1414 """A workingctx object makes access to data related to
1415 the current working directory convenient.
1415 the current working directory convenient.
1416 date - any valid date string or (unixtime, offset), or None.
1416 date - any valid date string or (unixtime, offset), or None.
1417 user - username string, or None.
1417 user - username string, or None.
1418 extra - a dictionary of extra values, or None.
1418 extra - a dictionary of extra values, or None.
1419 changes - a list of file lists as returned by localrepo.status()
1419 changes - a list of file lists as returned by localrepo.status()
1420 or None to use the repository status.
1420 or None to use the repository status.
1421 """
1421 """
1422 def __init__(self, repo, text="", user=None, date=None, extra=None,
1422 def __init__(self, repo, text="", user=None, date=None, extra=None,
1423 changes=None):
1423 changes=None):
1424 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1424 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1425
1425
1426 def __iter__(self):
1426 def __iter__(self):
1427 d = self._repo.dirstate
1427 d = self._repo.dirstate
1428 for f in d:
1428 for f in d:
1429 if d[f] != 'r':
1429 if d[f] != 'r':
1430 yield f
1430 yield f
1431
1431
1432 def __contains__(self, key):
1432 def __contains__(self, key):
1433 return self._repo.dirstate[key] not in "?r"
1433 return self._repo.dirstate[key] not in "?r"
1434
1434
1435 def hex(self):
1435 def hex(self):
1436 return hex(wdirid)
1436 return hex(wdirid)
1437
1437
1438 @propertycache
1438 @propertycache
1439 def _parents(self):
1439 def _parents(self):
1440 p = self._repo.dirstate.parents()
1440 p = self._repo.dirstate.parents()
1441 if p[1] == nullid:
1441 if p[1] == nullid:
1442 p = p[:-1]
1442 p = p[:-1]
1443 return [changectx(self._repo, x) for x in p]
1443 return [changectx(self._repo, x) for x in p]
1444
1444
1445 def filectx(self, path, filelog=None):
1445 def filectx(self, path, filelog=None):
1446 """get a file context from the working directory"""
1446 """get a file context from the working directory"""
1447 return workingfilectx(self._repo, path, workingctx=self,
1447 return workingfilectx(self._repo, path, workingctx=self,
1448 filelog=filelog)
1448 filelog=filelog)
1449
1449
1450 def dirty(self, missing=False, merge=True, branch=True):
1450 def dirty(self, missing=False, merge=True, branch=True):
1451 "check whether a working directory is modified"
1451 "check whether a working directory is modified"
1452 # check subrepos first
1452 # check subrepos first
1453 for s in sorted(self.substate):
1453 for s in sorted(self.substate):
1454 if self.sub(s).dirty():
1454 if self.sub(s).dirty():
1455 return True
1455 return True
1456 # check current working dir
1456 # check current working dir
1457 return ((merge and self.p2()) or
1457 return ((merge and self.p2()) or
1458 (branch and self.branch() != self.p1().branch()) or
1458 (branch and self.branch() != self.p1().branch()) or
1459 self.modified() or self.added() or self.removed() or
1459 self.modified() or self.added() or self.removed() or
1460 (missing and self.deleted()))
1460 (missing and self.deleted()))
1461
1461
1462 def add(self, list, prefix=""):
1462 def add(self, list, prefix=""):
1463 join = lambda f: os.path.join(prefix, f)
1463 join = lambda f: os.path.join(prefix, f)
1464 with self._repo.wlock():
1464 with self._repo.wlock():
1465 ui, ds = self._repo.ui, self._repo.dirstate
1465 ui, ds = self._repo.ui, self._repo.dirstate
1466 rejected = []
1466 rejected = []
1467 lstat = self._repo.wvfs.lstat
1467 lstat = self._repo.wvfs.lstat
1468 for f in list:
1468 for f in list:
1469 scmutil.checkportable(ui, join(f))
1469 scmutil.checkportable(ui, join(f))
1470 try:
1470 try:
1471 st = lstat(f)
1471 st = lstat(f)
1472 except OSError:
1472 except OSError:
1473 ui.warn(_("%s does not exist!\n") % join(f))
1473 ui.warn(_("%s does not exist!\n") % join(f))
1474 rejected.append(f)
1474 rejected.append(f)
1475 continue
1475 continue
1476 if st.st_size > 10000000:
1476 if st.st_size > 10000000:
1477 ui.warn(_("%s: up to %d MB of RAM may be required "
1477 ui.warn(_("%s: up to %d MB of RAM may be required "
1478 "to manage this file\n"
1478 "to manage this file\n"
1479 "(use 'hg revert %s' to cancel the "
1479 "(use 'hg revert %s' to cancel the "
1480 "pending addition)\n")
1480 "pending addition)\n")
1481 % (f, 3 * st.st_size // 1000000, join(f)))
1481 % (f, 3 * st.st_size // 1000000, join(f)))
1482 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1482 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1483 ui.warn(_("%s not added: only files and symlinks "
1483 ui.warn(_("%s not added: only files and symlinks "
1484 "supported currently\n") % join(f))
1484 "supported currently\n") % join(f))
1485 rejected.append(f)
1485 rejected.append(f)
1486 elif ds[f] in 'amn':
1486 elif ds[f] in 'amn':
1487 ui.warn(_("%s already tracked!\n") % join(f))
1487 ui.warn(_("%s already tracked!\n") % join(f))
1488 elif ds[f] == 'r':
1488 elif ds[f] == 'r':
1489 ds.normallookup(f)
1489 ds.normallookup(f)
1490 else:
1490 else:
1491 ds.add(f)
1491 ds.add(f)
1492 return rejected
1492 return rejected
1493
1493
1494 def forget(self, files, prefix=""):
1494 def forget(self, files, prefix=""):
1495 join = lambda f: os.path.join(prefix, f)
1495 join = lambda f: os.path.join(prefix, f)
1496 with self._repo.wlock():
1496 with self._repo.wlock():
1497 rejected = []
1497 rejected = []
1498 for f in files:
1498 for f in files:
1499 if f not in self._repo.dirstate:
1499 if f not in self._repo.dirstate:
1500 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1500 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1501 rejected.append(f)
1501 rejected.append(f)
1502 elif self._repo.dirstate[f] != 'a':
1502 elif self._repo.dirstate[f] != 'a':
1503 self._repo.dirstate.remove(f)
1503 self._repo.dirstate.remove(f)
1504 else:
1504 else:
1505 self._repo.dirstate.drop(f)
1505 self._repo.dirstate.drop(f)
1506 return rejected
1506 return rejected
1507
1507
1508 def undelete(self, list):
1508 def undelete(self, list):
1509 pctxs = self.parents()
1509 pctxs = self.parents()
1510 with self._repo.wlock():
1510 with self._repo.wlock():
1511 for f in list:
1511 for f in list:
1512 if self._repo.dirstate[f] != 'r':
1512 if self._repo.dirstate[f] != 'r':
1513 self._repo.ui.warn(_("%s not removed!\n") % f)
1513 self._repo.ui.warn(_("%s not removed!\n") % f)
1514 else:
1514 else:
1515 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1515 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1516 t = fctx.data()
1516 t = fctx.data()
1517 self._repo.wwrite(f, t, fctx.flags())
1517 self._repo.wwrite(f, t, fctx.flags())
1518 self._repo.dirstate.normal(f)
1518 self._repo.dirstate.normal(f)
1519
1519
1520 def copy(self, source, dest):
1520 def copy(self, source, dest):
1521 try:
1521 try:
1522 st = self._repo.wvfs.lstat(dest)
1522 st = self._repo.wvfs.lstat(dest)
1523 except OSError as err:
1523 except OSError as err:
1524 if err.errno != errno.ENOENT:
1524 if err.errno != errno.ENOENT:
1525 raise
1525 raise
1526 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1526 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1527 return
1527 return
1528 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1528 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1529 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1529 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1530 "symbolic link\n") % dest)
1530 "symbolic link\n") % dest)
1531 else:
1531 else:
1532 with self._repo.wlock():
1532 with self._repo.wlock():
1533 if self._repo.dirstate[dest] in '?':
1533 if self._repo.dirstate[dest] in '?':
1534 self._repo.dirstate.add(dest)
1534 self._repo.dirstate.add(dest)
1535 elif self._repo.dirstate[dest] in 'r':
1535 elif self._repo.dirstate[dest] in 'r':
1536 self._repo.dirstate.normallookup(dest)
1536 self._repo.dirstate.normallookup(dest)
1537 self._repo.dirstate.copy(source, dest)
1537 self._repo.dirstate.copy(source, dest)
1538
1538
1539 def match(self, pats=[], include=None, exclude=None, default='glob',
1539 def match(self, pats=[], include=None, exclude=None, default='glob',
1540 listsubrepos=False, badfn=None):
1540 listsubrepos=False, badfn=None):
1541 r = self._repo
1541 r = self._repo
1542
1542
1543 # Only a case insensitive filesystem needs magic to translate user input
1543 # Only a case insensitive filesystem needs magic to translate user input
1544 # to actual case in the filesystem.
1544 # to actual case in the filesystem.
1545 if not util.fscasesensitive(r.root):
1545 if not util.fscasesensitive(r.root):
1546 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1546 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1547 exclude, default, r.auditor, self,
1547 exclude, default, r.auditor, self,
1548 listsubrepos=listsubrepos,
1548 listsubrepos=listsubrepos,
1549 badfn=badfn)
1549 badfn=badfn)
1550 return matchmod.match(r.root, r.getcwd(), pats,
1550 return matchmod.match(r.root, r.getcwd(), pats,
1551 include, exclude, default,
1551 include, exclude, default,
1552 auditor=r.auditor, ctx=self,
1552 auditor=r.auditor, ctx=self,
1553 listsubrepos=listsubrepos, badfn=badfn)
1553 listsubrepos=listsubrepos, badfn=badfn)
1554
1554
1555 def _filtersuspectsymlink(self, files):
1555 def _filtersuspectsymlink(self, files):
1556 if not files or self._repo.dirstate._checklink:
1556 if not files or self._repo.dirstate._checklink:
1557 return files
1557 return files
1558
1558
1559 # Symlink placeholders may get non-symlink-like contents
1559 # Symlink placeholders may get non-symlink-like contents
1560 # via user error or dereferencing by NFS or Samba servers,
1560 # via user error or dereferencing by NFS or Samba servers,
1561 # so we filter out any placeholders that don't look like a
1561 # so we filter out any placeholders that don't look like a
1562 # symlink
1562 # symlink
1563 sane = []
1563 sane = []
1564 for f in files:
1564 for f in files:
1565 if self.flags(f) == 'l':
1565 if self.flags(f) == 'l':
1566 d = self[f].data()
1566 d = self[f].data()
1567 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1567 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1568 self._repo.ui.debug('ignoring suspect symlink placeholder'
1568 self._repo.ui.debug('ignoring suspect symlink placeholder'
1569 ' "%s"\n' % f)
1569 ' "%s"\n' % f)
1570 continue
1570 continue
1571 sane.append(f)
1571 sane.append(f)
1572 return sane
1572 return sane
1573
1573
1574 def _checklookup(self, files):
1574 def _checklookup(self, files):
1575 # check for any possibly clean files
1575 # check for any possibly clean files
1576 if not files:
1576 if not files:
1577 return [], []
1577 return [], []
1578
1578
1579 modified = []
1579 modified = []
1580 fixup = []
1580 fixup = []
1581 pctx = self._parents[0]
1581 pctx = self._parents[0]
1582 # do a full compare of any files that might have changed
1582 # do a full compare of any files that might have changed
1583 for f in sorted(files):
1583 for f in sorted(files):
1584 if (f not in pctx or self.flags(f) != pctx.flags(f)
1584 if (f not in pctx or self.flags(f) != pctx.flags(f)
1585 or pctx[f].cmp(self[f])):
1585 or pctx[f].cmp(self[f])):
1586 modified.append(f)
1586 modified.append(f)
1587 else:
1587 else:
1588 fixup.append(f)
1588 fixup.append(f)
1589
1589
1590 # update dirstate for files that are actually clean
1590 # update dirstate for files that are actually clean
1591 if fixup:
1591 if fixup:
1592 try:
1592 try:
1593 # updating the dirstate is optional
1593 # updating the dirstate is optional
1594 # so we don't wait on the lock
1594 # so we don't wait on the lock
1595 # wlock can invalidate the dirstate, so cache normal _after_
1595 # wlock can invalidate the dirstate, so cache normal _after_
1596 # taking the lock
1596 # taking the lock
1597 with self._repo.wlock(False):
1597 with self._repo.wlock(False):
1598 normal = self._repo.dirstate.normal
1598 normal = self._repo.dirstate.normal
1599 for f in fixup:
1599 for f in fixup:
1600 normal(f)
1600 normal(f)
1601 # write changes out explicitly, because nesting
1601 # write changes out explicitly, because nesting
1602 # wlock at runtime may prevent 'wlock.release()'
1602 # wlock at runtime may prevent 'wlock.release()'
1603 # after this block from doing so for subsequent
1603 # after this block from doing so for subsequent
1604 # changing files
1604 # changing files
1605 self._repo.dirstate.write(self._repo.currenttransaction())
1605 self._repo.dirstate.write(self._repo.currenttransaction())
1606 except error.LockError:
1606 except error.LockError:
1607 pass
1607 pass
1608 return modified, fixup
1608 return modified, fixup
1609
1609
1610 def _manifestmatches(self, match, s):
1610 def _manifestmatches(self, match, s):
1611 """Slow path for workingctx
1611 """Slow path for workingctx
1612
1612
1613 The fast path is when we compare the working directory to its parent
1613 The fast path is when we compare the working directory to its parent
1614 which means this function is comparing with a non-parent; therefore we
1614 which means this function is comparing with a non-parent; therefore we
1615 need to build a manifest and return what matches.
1615 need to build a manifest and return what matches.
1616 """
1616 """
1617 mf = self._repo['.']._manifestmatches(match, s)
1617 mf = self._repo['.']._manifestmatches(match, s)
1618 for f in s.modified + s.added:
1618 for f in s.modified + s.added:
1619 mf[f] = newnodeid
1619 mf[f] = newnodeid
1620 mf.setflag(f, self.flags(f))
1620 mf.setflag(f, self.flags(f))
1621 for f in s.removed:
1621 for f in s.removed:
1622 if f in mf:
1622 if f in mf:
1623 del mf[f]
1623 del mf[f]
1624 return mf
1624 return mf
1625
1625
1626 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1626 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1627 unknown=False):
1627 unknown=False):
1628 '''Gets the status from the dirstate -- internal use only.'''
1628 '''Gets the status from the dirstate -- internal use only.'''
1629 listignored, listclean, listunknown = ignored, clean, unknown
1629 listignored, listclean, listunknown = ignored, clean, unknown
1630 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1630 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1631 subrepos = []
1631 subrepos = []
1632 if '.hgsub' in self:
1632 if '.hgsub' in self:
1633 subrepos = sorted(self.substate)
1633 subrepos = sorted(self.substate)
1634 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1634 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1635 listclean, listunknown)
1635 listclean, listunknown)
1636
1636
1637 # check for any possibly clean files
1637 # check for any possibly clean files
1638 if cmp:
1638 if cmp:
1639 modified2, fixup = self._checklookup(cmp)
1639 modified2, fixup = self._checklookup(cmp)
1640 s.modified.extend(modified2)
1640 s.modified.extend(modified2)
1641
1641
1642 # update dirstate for files that are actually clean
1642 # update dirstate for files that are actually clean
1643 if fixup and listclean:
1643 if fixup and listclean:
1644 s.clean.extend(fixup)
1644 s.clean.extend(fixup)
1645
1645
1646 if match.always():
1646 if match.always():
1647 # cache for performance
1647 # cache for performance
1648 if s.unknown or s.ignored or s.clean:
1648 if s.unknown or s.ignored or s.clean:
1649 # "_status" is cached with list*=False in the normal route
1649 # "_status" is cached with list*=False in the normal route
1650 self._status = scmutil.status(s.modified, s.added, s.removed,
1650 self._status = scmutil.status(s.modified, s.added, s.removed,
1651 s.deleted, [], [], [])
1651 s.deleted, [], [], [])
1652 else:
1652 else:
1653 self._status = s
1653 self._status = s
1654
1654
1655 return s
1655 return s
1656
1656
1657 def _buildstatus(self, other, s, match, listignored, listclean,
1657 def _buildstatus(self, other, s, match, listignored, listclean,
1658 listunknown):
1658 listunknown):
1659 """build a status with respect to another context
1659 """build a status with respect to another context
1660
1660
1661 This includes logic for maintaining the fast path of status when
1661 This includes logic for maintaining the fast path of status when
1662 comparing the working directory against its parent, which is to skip
1662 comparing the working directory against its parent, which is to skip
1663 building a new manifest if self (working directory) is not comparing
1663 building a new manifest if self (working directory) is not comparing
1664 against its parent (repo['.']).
1664 against its parent (repo['.']).
1665 """
1665 """
1666 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1666 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1667 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1667 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1668 # might have accidentally ended up with the entire contents of the file
1668 # might have accidentally ended up with the entire contents of the file
1669 # they are supposed to be linking to.
1669 # they are supposed to be linking to.
1670 s.modified[:] = self._filtersuspectsymlink(s.modified)
1670 s.modified[:] = self._filtersuspectsymlink(s.modified)
1671 if other != self._repo['.']:
1671 if other != self._repo['.']:
1672 s = super(workingctx, self)._buildstatus(other, s, match,
1672 s = super(workingctx, self)._buildstatus(other, s, match,
1673 listignored, listclean,
1673 listignored, listclean,
1674 listunknown)
1674 listunknown)
1675 return s
1675 return s
1676
1676
1677 def _matchstatus(self, other, match):
1677 def _matchstatus(self, other, match):
1678 """override the match method with a filter for directory patterns
1678 """override the match method with a filter for directory patterns
1679
1679
1680 We use inheritance to customize the match.bad method only in cases of
1680 We use inheritance to customize the match.bad method only in cases of
1681 workingctx since it belongs only to the working directory when
1681 workingctx since it belongs only to the working directory when
1682 comparing against the parent changeset.
1682 comparing against the parent changeset.
1683
1683
1684 If we aren't comparing against the working directory's parent, then we
1684 If we aren't comparing against the working directory's parent, then we
1685 just use the default match object sent to us.
1685 just use the default match object sent to us.
1686 """
1686 """
1687 superself = super(workingctx, self)
1687 superself = super(workingctx, self)
1688 match = superself._matchstatus(other, match)
1688 match = superself._matchstatus(other, match)
1689 if other != self._repo['.']:
1689 if other != self._repo['.']:
1690 def bad(f, msg):
1690 def bad(f, msg):
1691 # 'f' may be a directory pattern from 'match.files()',
1691 # 'f' may be a directory pattern from 'match.files()',
1692 # so 'f not in ctx1' is not enough
1692 # so 'f not in ctx1' is not enough
1693 if f not in other and not other.hasdir(f):
1693 if f not in other and not other.hasdir(f):
1694 self._repo.ui.warn('%s: %s\n' %
1694 self._repo.ui.warn('%s: %s\n' %
1695 (self._repo.dirstate.pathto(f), msg))
1695 (self._repo.dirstate.pathto(f), msg))
1696 match.bad = bad
1696 match.bad = bad
1697 return match
1697 return match
1698
1698
1699 class committablefilectx(basefilectx):
1699 class committablefilectx(basefilectx):
1700 """A committablefilectx provides common functionality for a file context
1700 """A committablefilectx provides common functionality for a file context
1701 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1701 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1702 def __init__(self, repo, path, filelog=None, ctx=None):
1702 def __init__(self, repo, path, filelog=None, ctx=None):
1703 self._repo = repo
1703 self._repo = repo
1704 self._path = path
1704 self._path = path
1705 self._changeid = None
1705 self._changeid = None
1706 self._filerev = self._filenode = None
1706 self._filerev = self._filenode = None
1707
1707
1708 if filelog is not None:
1708 if filelog is not None:
1709 self._filelog = filelog
1709 self._filelog = filelog
1710 if ctx:
1710 if ctx:
1711 self._changectx = ctx
1711 self._changectx = ctx
1712
1712
1713 def __nonzero__(self):
1713 def __nonzero__(self):
1714 return True
1714 return True
1715
1715
1716 def linkrev(self):
1716 def linkrev(self):
1717 # linked to self._changectx no matter if file is modified or not
1717 # linked to self._changectx no matter if file is modified or not
1718 return self.rev()
1718 return self.rev()
1719
1719
1720 def parents(self):
1720 def parents(self):
1721 '''return parent filectxs, following copies if necessary'''
1721 '''return parent filectxs, following copies if necessary'''
1722 def filenode(ctx, path):
1722 def filenode(ctx, path):
1723 return ctx._manifest.get(path, nullid)
1723 return ctx._manifest.get(path, nullid)
1724
1724
1725 path = self._path
1725 path = self._path
1726 fl = self._filelog
1726 fl = self._filelog
1727 pcl = self._changectx._parents
1727 pcl = self._changectx._parents
1728 renamed = self.renamed()
1728 renamed = self.renamed()
1729
1729
1730 if renamed:
1730 if renamed:
1731 pl = [renamed + (None,)]
1731 pl = [renamed + (None,)]
1732 else:
1732 else:
1733 pl = [(path, filenode(pcl[0], path), fl)]
1733 pl = [(path, filenode(pcl[0], path), fl)]
1734
1734
1735 for pc in pcl[1:]:
1735 for pc in pcl[1:]:
1736 pl.append((path, filenode(pc, path), fl))
1736 pl.append((path, filenode(pc, path), fl))
1737
1737
1738 return [self._parentfilectx(p, fileid=n, filelog=l)
1738 return [self._parentfilectx(p, fileid=n, filelog=l)
1739 for p, n, l in pl if n != nullid]
1739 for p, n, l in pl if n != nullid]
1740
1740
1741 def children(self):
1741 def children(self):
1742 return []
1742 return []
1743
1743
1744 class workingfilectx(committablefilectx):
1744 class workingfilectx(committablefilectx):
1745 """A workingfilectx object makes access to data related to a particular
1745 """A workingfilectx object makes access to data related to a particular
1746 file in the working directory convenient."""
1746 file in the working directory convenient."""
1747 def __init__(self, repo, path, filelog=None, workingctx=None):
1747 def __init__(self, repo, path, filelog=None, workingctx=None):
1748 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1748 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1749
1749
1750 @propertycache
1750 @propertycache
1751 def _changectx(self):
1751 def _changectx(self):
1752 return workingctx(self._repo)
1752 return workingctx(self._repo)
1753
1753
1754 def data(self):
1754 def data(self):
1755 return self._repo.wread(self._path)
1755 return self._repo.wread(self._path)
1756 def renamed(self):
1756 def renamed(self):
1757 rp = self._repo.dirstate.copied(self._path)
1757 rp = self._repo.dirstate.copied(self._path)
1758 if not rp:
1758 if not rp:
1759 return None
1759 return None
1760 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1760 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1761
1761
1762 def size(self):
1762 def size(self):
1763 return self._repo.wvfs.lstat(self._path).st_size
1763 return self._repo.wvfs.lstat(self._path).st_size
1764 def date(self):
1764 def date(self):
1765 t, tz = self._changectx.date()
1765 t, tz = self._changectx.date()
1766 try:
1766 try:
1767 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1767 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1768 except OSError as err:
1768 except OSError as err:
1769 if err.errno != errno.ENOENT:
1769 if err.errno != errno.ENOENT:
1770 raise
1770 raise
1771 return (t, tz)
1771 return (t, tz)
1772
1772
1773 def cmp(self, fctx):
1773 def cmp(self, fctx):
1774 """compare with other file context
1774 """compare with other file context
1775
1775
1776 returns True if different than fctx.
1776 returns True if different than fctx.
1777 """
1777 """
1778 # fctx should be a filectx (not a workingfilectx)
1778 # fctx should be a filectx (not a workingfilectx)
1779 # invert comparison to reuse the same code path
1779 # invert comparison to reuse the same code path
1780 return fctx.cmp(self)
1780 return fctx.cmp(self)
1781
1781
1782 def remove(self, ignoremissing=False):
1782 def remove(self, ignoremissing=False):
1783 """wraps unlink for a repo's working directory"""
1783 """wraps unlink for a repo's working directory"""
1784 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1784 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1785
1785
1786 def write(self, data, flags):
1786 def write(self, data, flags):
1787 """wraps repo.wwrite"""
1787 """wraps repo.wwrite"""
1788 self._repo.wwrite(self._path, data, flags)
1788 self._repo.wwrite(self._path, data, flags)
1789
1789
1790 class workingcommitctx(workingctx):
1790 class workingcommitctx(workingctx):
1791 """A workingcommitctx object makes access to data related to
1791 """A workingcommitctx object makes access to data related to
1792 the revision being committed convenient.
1792 the revision being committed convenient.
1793
1793
1794 This hides changes in the working directory, if they aren't
1794 This hides changes in the working directory, if they aren't
1795 committed in this context.
1795 committed in this context.
1796 """
1796 """
1797 def __init__(self, repo, changes,
1797 def __init__(self, repo, changes,
1798 text="", user=None, date=None, extra=None):
1798 text="", user=None, date=None, extra=None):
1799 super(workingctx, self).__init__(repo, text, user, date, extra,
1799 super(workingctx, self).__init__(repo, text, user, date, extra,
1800 changes)
1800 changes)
1801
1801
1802 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1802 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1803 unknown=False):
1803 unknown=False):
1804 """Return matched files only in ``self._status``
1804 """Return matched files only in ``self._status``
1805
1805
1806 Uncommitted files appear "clean" via this context, even if
1806 Uncommitted files appear "clean" via this context, even if
1807 they aren't actually so in the working directory.
1807 they aren't actually so in the working directory.
1808 """
1808 """
1809 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1809 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1810 if clean:
1810 if clean:
1811 clean = [f for f in self._manifest if f not in self._changedset]
1811 clean = [f for f in self._manifest if f not in self._changedset]
1812 else:
1812 else:
1813 clean = []
1813 clean = []
1814 return scmutil.status([f for f in self._status.modified if match(f)],
1814 return scmutil.status([f for f in self._status.modified if match(f)],
1815 [f for f in self._status.added if match(f)],
1815 [f for f in self._status.added if match(f)],
1816 [f for f in self._status.removed if match(f)],
1816 [f for f in self._status.removed if match(f)],
1817 [], [], [], clean)
1817 [], [], [], clean)
1818
1818
1819 @propertycache
1819 @propertycache
1820 def _changedset(self):
1820 def _changedset(self):
1821 """Return the set of files changed in this context
1821 """Return the set of files changed in this context
1822 """
1822 """
1823 changed = set(self._status.modified)
1823 changed = set(self._status.modified)
1824 changed.update(self._status.added)
1824 changed.update(self._status.added)
1825 changed.update(self._status.removed)
1825 changed.update(self._status.removed)
1826 return changed
1826 return changed
1827
1827
1828 def makecachingfilectxfn(func):
1828 def makecachingfilectxfn(func):
1829 """Create a filectxfn that caches based on the path.
1829 """Create a filectxfn that caches based on the path.
1830
1830
1831 We can't use util.cachefunc because it uses all arguments as the cache
1831 We can't use util.cachefunc because it uses all arguments as the cache
1832 key and this creates a cycle since the arguments include the repo and
1832 key and this creates a cycle since the arguments include the repo and
1833 memctx.
1833 memctx.
1834 """
1834 """
1835 cache = {}
1835 cache = {}
1836
1836
1837 def getfilectx(repo, memctx, path):
1837 def getfilectx(repo, memctx, path):
1838 if path not in cache:
1838 if path not in cache:
1839 cache[path] = func(repo, memctx, path)
1839 cache[path] = func(repo, memctx, path)
1840 return cache[path]
1840 return cache[path]
1841
1841
1842 return getfilectx
1842 return getfilectx
1843
1843
1844 class memctx(committablectx):
1844 class memctx(committablectx):
1845 """Use memctx to perform in-memory commits via localrepo.commitctx().
1845 """Use memctx to perform in-memory commits via localrepo.commitctx().
1846
1846
1847 Revision information is supplied at initialization time while
1847 Revision information is supplied at initialization time while
1848 related files data and is made available through a callback
1848 related files data and is made available through a callback
1849 mechanism. 'repo' is the current localrepo, 'parents' is a
1849 mechanism. 'repo' is the current localrepo, 'parents' is a
1850 sequence of two parent revisions identifiers (pass None for every
1850 sequence of two parent revisions identifiers (pass None for every
1851 missing parent), 'text' is the commit message and 'files' lists
1851 missing parent), 'text' is the commit message and 'files' lists
1852 names of files touched by the revision (normalized and relative to
1852 names of files touched by the revision (normalized and relative to
1853 repository root).
1853 repository root).
1854
1854
1855 filectxfn(repo, memctx, path) is a callable receiving the
1855 filectxfn(repo, memctx, path) is a callable receiving the
1856 repository, the current memctx object and the normalized path of
1856 repository, the current memctx object and the normalized path of
1857 requested file, relative to repository root. It is fired by the
1857 requested file, relative to repository root. It is fired by the
1858 commit function for every file in 'files', but calls order is
1858 commit function for every file in 'files', but calls order is
1859 undefined. If the file is available in the revision being
1859 undefined. If the file is available in the revision being
1860 committed (updated or added), filectxfn returns a memfilectx
1860 committed (updated or added), filectxfn returns a memfilectx
1861 object. If the file was removed, filectxfn raises an
1861 object. If the file was removed, filectxfn raises an
1862 IOError. Moved files are represented by marking the source file
1862 IOError. Moved files are represented by marking the source file
1863 removed and the new file added with copy information (see
1863 removed and the new file added with copy information (see
1864 memfilectx).
1864 memfilectx).
1865
1865
1866 user receives the committer name and defaults to current
1866 user receives the committer name and defaults to current
1867 repository username, date is the commit date in any format
1867 repository username, date is the commit date in any format
1868 supported by util.parsedate() and defaults to current date, extra
1868 supported by util.parsedate() and defaults to current date, extra
1869 is a dictionary of metadata or is left empty.
1869 is a dictionary of metadata or is left empty.
1870 """
1870 """
1871
1871
1872 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1872 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1873 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1873 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1874 # this field to determine what to do in filectxfn.
1874 # this field to determine what to do in filectxfn.
1875 _returnnoneformissingfiles = True
1875 _returnnoneformissingfiles = True
1876
1876
1877 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1877 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1878 date=None, extra=None, editor=False):
1878 date=None, extra=None, editor=False):
1879 super(memctx, self).__init__(repo, text, user, date, extra)
1879 super(memctx, self).__init__(repo, text, user, date, extra)
1880 self._rev = None
1880 self._rev = None
1881 self._node = None
1881 self._node = None
1882 parents = [(p or nullid) for p in parents]
1882 parents = [(p or nullid) for p in parents]
1883 p1, p2 = parents
1883 p1, p2 = parents
1884 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1884 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1885 files = sorted(set(files))
1885 files = sorted(set(files))
1886 self._files = files
1886 self._files = files
1887 self.substate = {}
1887 self.substate = {}
1888
1888
1889 # if store is not callable, wrap it in a function
1889 # if store is not callable, wrap it in a function
1890 if not callable(filectxfn):
1890 if not callable(filectxfn):
1891 def getfilectx(repo, memctx, path):
1891 def getfilectx(repo, memctx, path):
1892 fctx = filectxfn[path]
1892 fctx = filectxfn[path]
1893 # this is weird but apparently we only keep track of one parent
1893 # this is weird but apparently we only keep track of one parent
1894 # (why not only store that instead of a tuple?)
1894 # (why not only store that instead of a tuple?)
1895 copied = fctx.renamed()
1895 copied = fctx.renamed()
1896 if copied:
1896 if copied:
1897 copied = copied[0]
1897 copied = copied[0]
1898 return memfilectx(repo, path, fctx.data(),
1898 return memfilectx(repo, path, fctx.data(),
1899 islink=fctx.islink(), isexec=fctx.isexec(),
1899 islink=fctx.islink(), isexec=fctx.isexec(),
1900 copied=copied, memctx=memctx)
1900 copied=copied, memctx=memctx)
1901 self._filectxfn = getfilectx
1901 self._filectxfn = getfilectx
1902 else:
1902 else:
1903 # memoizing increases performance for e.g. vcs convert scenarios.
1903 # memoizing increases performance for e.g. vcs convert scenarios.
1904 self._filectxfn = makecachingfilectxfn(filectxfn)
1904 self._filectxfn = makecachingfilectxfn(filectxfn)
1905
1905
1906 if extra:
1906 if extra:
1907 self._extra = extra.copy()
1907 self._extra = extra.copy()
1908 else:
1908 else:
1909 self._extra = {}
1909 self._extra = {}
1910
1910
1911 if self._extra.get('branch', '') == '':
1911 if self._extra.get('branch', '') == '':
1912 self._extra['branch'] = 'default'
1912 self._extra['branch'] = 'default'
1913
1913
1914 if editor:
1914 if editor:
1915 self._text = editor(self._repo, self, [])
1915 self._text = editor(self._repo, self, [])
1916 self._repo.savecommitmessage(self._text)
1916 self._repo.savecommitmessage(self._text)
1917
1917
1918 def filectx(self, path, filelog=None):
1918 def filectx(self, path, filelog=None):
1919 """get a file context from the working directory
1919 """get a file context from the working directory
1920
1920
1921 Returns None if file doesn't exist and should be removed."""
1921 Returns None if file doesn't exist and should be removed."""
1922 return self._filectxfn(self._repo, self, path)
1922 return self._filectxfn(self._repo, self, path)
1923
1923
1924 def commit(self):
1924 def commit(self):
1925 """commit context to the repo"""
1925 """commit context to the repo"""
1926 return self._repo.commitctx(self)
1926 return self._repo.commitctx(self)
1927
1927
1928 @propertycache
1928 @propertycache
1929 def _manifest(self):
1929 def _manifest(self):
1930 """generate a manifest based on the return values of filectxfn"""
1930 """generate a manifest based on the return values of filectxfn"""
1931
1931
1932 # keep this simple for now; just worry about p1
1932 # keep this simple for now; just worry about p1
1933 pctx = self._parents[0]
1933 pctx = self._parents[0]
1934 man = pctx.manifest().copy()
1934 man = pctx.manifest().copy()
1935
1935
1936 for f in self._status.modified:
1936 for f in self._status.modified:
1937 p1node = nullid
1937 p1node = nullid
1938 p2node = nullid
1938 p2node = nullid
1939 p = pctx[f].parents() # if file isn't in pctx, check p2?
1939 p = pctx[f].parents() # if file isn't in pctx, check p2?
1940 if len(p) > 0:
1940 if len(p) > 0:
1941 p1node = p[0].filenode()
1941 p1node = p[0].filenode()
1942 if len(p) > 1:
1942 if len(p) > 1:
1943 p2node = p[1].filenode()
1943 p2node = p[1].filenode()
1944 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1944 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1945
1945
1946 for f in self._status.added:
1946 for f in self._status.added:
1947 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1947 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1948
1948
1949 for f in self._status.removed:
1949 for f in self._status.removed:
1950 if f in man:
1950 if f in man:
1951 del man[f]
1951 del man[f]
1952
1952
1953 return man
1953 return man
1954
1954
1955 @propertycache
1955 @propertycache
1956 def _status(self):
1956 def _status(self):
1957 """Calculate exact status from ``files`` specified at construction
1957 """Calculate exact status from ``files`` specified at construction
1958 """
1958 """
1959 man1 = self.p1().manifest()
1959 man1 = self.p1().manifest()
1960 p2 = self._parents[1]
1960 p2 = self._parents[1]
1961 # "1 < len(self._parents)" can't be used for checking
1961 # "1 < len(self._parents)" can't be used for checking
1962 # existence of the 2nd parent, because "memctx._parents" is
1962 # existence of the 2nd parent, because "memctx._parents" is
1963 # explicitly initialized by the list, of which length is 2.
1963 # explicitly initialized by the list, of which length is 2.
1964 if p2.node() != nullid:
1964 if p2.node() != nullid:
1965 man2 = p2.manifest()
1965 man2 = p2.manifest()
1966 managing = lambda f: f in man1 or f in man2
1966 managing = lambda f: f in man1 or f in man2
1967 else:
1967 else:
1968 managing = lambda f: f in man1
1968 managing = lambda f: f in man1
1969
1969
1970 modified, added, removed = [], [], []
1970 modified, added, removed = [], [], []
1971 for f in self._files:
1971 for f in self._files:
1972 if not managing(f):
1972 if not managing(f):
1973 added.append(f)
1973 added.append(f)
1974 elif self[f]:
1974 elif self[f]:
1975 modified.append(f)
1975 modified.append(f)
1976 else:
1976 else:
1977 removed.append(f)
1977 removed.append(f)
1978
1978
1979 return scmutil.status(modified, added, removed, [], [], [], [])
1979 return scmutil.status(modified, added, removed, [], [], [], [])
1980
1980
1981 class memfilectx(committablefilectx):
1981 class memfilectx(committablefilectx):
1982 """memfilectx represents an in-memory file to commit.
1982 """memfilectx represents an in-memory file to commit.
1983
1983
1984 See memctx and committablefilectx for more details.
1984 See memctx and committablefilectx for more details.
1985 """
1985 """
1986 def __init__(self, repo, path, data, islink=False,
1986 def __init__(self, repo, path, data, islink=False,
1987 isexec=False, copied=None, memctx=None):
1987 isexec=False, copied=None, memctx=None):
1988 """
1988 """
1989 path is the normalized file path relative to repository root.
1989 path is the normalized file path relative to repository root.
1990 data is the file content as a string.
1990 data is the file content as a string.
1991 islink is True if the file is a symbolic link.
1991 islink is True if the file is a symbolic link.
1992 isexec is True if the file is executable.
1992 isexec is True if the file is executable.
1993 copied is the source file path if current file was copied in the
1993 copied is the source file path if current file was copied in the
1994 revision being committed, or None."""
1994 revision being committed, or None."""
1995 super(memfilectx, self).__init__(repo, path, None, memctx)
1995 super(memfilectx, self).__init__(repo, path, None, memctx)
1996 self._data = data
1996 self._data = data
1997 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1997 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1998 self._copied = None
1998 self._copied = None
1999 if copied:
1999 if copied:
2000 self._copied = (copied, nullid)
2000 self._copied = (copied, nullid)
2001
2001
2002 def data(self):
2002 def data(self):
2003 return self._data
2003 return self._data
2004 def size(self):
2004 def size(self):
2005 return len(self.data())
2005 return len(self.data())
2006 def flags(self):
2006 def flags(self):
2007 return self._flags
2007 return self._flags
2008 def renamed(self):
2008 def renamed(self):
2009 return self._copied
2009 return self._copied
2010
2010
2011 def remove(self, ignoremissing=False):
2011 def remove(self, ignoremissing=False):
2012 """wraps unlink for a repo's working directory"""
2012 """wraps unlink for a repo's working directory"""
2013 # need to figure out what to do here
2013 # need to figure out what to do here
2014 del self._changectx[self._path]
2014 del self._changectx[self._path]
2015
2015
2016 def write(self, data, flags):
2016 def write(self, data, flags):
2017 """wraps repo.wwrite"""
2017 """wraps repo.wwrite"""
2018 self._data = data
2018 self._data = data
2019
2019
2020 class metadataonlyctx(committablectx):
2020 class metadataonlyctx(committablectx):
2021 """Like memctx but it's reusing the manifest of different commit.
2021 """Like memctx but it's reusing the manifest of different commit.
2022 Intended to be used by lightweight operations that are creating
2022 Intended to be used by lightweight operations that are creating
2023 metadata-only changes.
2023 metadata-only changes.
2024
2024
2025 Revision information is supplied at initialization time. 'repo' is the
2025 Revision information is supplied at initialization time. 'repo' is the
2026 current localrepo, 'ctx' is original revision which manifest we're reuisng
2026 current localrepo, 'ctx' is original revision which manifest we're reuisng
2027 'parents' is a sequence of two parent revisions identifiers (pass None for
2027 'parents' is a sequence of two parent revisions identifiers (pass None for
2028 every missing parent), 'text' is the commit.
2028 every missing parent), 'text' is the commit.
2029
2029
2030 user receives the committer name and defaults to current repository
2030 user receives the committer name and defaults to current repository
2031 username, date is the commit date in any format supported by
2031 username, date is the commit date in any format supported by
2032 util.parsedate() and defaults to current date, extra is a dictionary of
2032 util.parsedate() and defaults to current date, extra is a dictionary of
2033 metadata or is left empty.
2033 metadata or is left empty.
2034 """
2034 """
2035 def __new__(cls, repo, originalctx, *args, **kwargs):
2035 def __new__(cls, repo, originalctx, *args, **kwargs):
2036 return super(metadataonlyctx, cls).__new__(cls, repo)
2036 return super(metadataonlyctx, cls).__new__(cls, repo)
2037
2037
2038 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2038 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2039 extra=None, editor=False):
2039 extra=None, editor=False):
2040 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2040 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2041 self._rev = None
2041 self._rev = None
2042 self._node = None
2042 self._node = None
2043 self._originalctx = originalctx
2043 self._originalctx = originalctx
2044 self._manifestnode = originalctx.manifestnode()
2044 self._manifestnode = originalctx.manifestnode()
2045 parents = [(p or nullid) for p in parents]
2045 parents = [(p or nullid) for p in parents]
2046 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2046 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2047
2047
2048 # sanity check to ensure that the reused manifest parents are
2048 # sanity check to ensure that the reused manifest parents are
2049 # manifests of our commit parents
2049 # manifests of our commit parents
2050 mp1, mp2 = self.manifestctx().parents
2050 mp1, mp2 = self.manifestctx().parents
2051 if p1 != nullid and p1.manifestctx().node() != mp1:
2051 if p1 != nullid and p1.manifestctx().node() != mp1:
2052 raise RuntimeError('can\'t reuse the manifest: '
2052 raise RuntimeError('can\'t reuse the manifest: '
2053 'its p1 doesn\'t match the new ctx p1')
2053 'its p1 doesn\'t match the new ctx p1')
2054 if p2 != nullid and p2.manifestctx().node() != mp2:
2054 if p2 != nullid and p2.manifestctx().node() != mp2:
2055 raise RuntimeError('can\'t reuse the manifest: '
2055 raise RuntimeError('can\'t reuse the manifest: '
2056 'its p2 doesn\'t match the new ctx p2')
2056 'its p2 doesn\'t match the new ctx p2')
2057
2057
2058 self._files = originalctx.files()
2058 self._files = originalctx.files()
2059 self.substate = {}
2059 self.substate = {}
2060
2060
2061 if extra:
2061 if extra:
2062 self._extra = extra.copy()
2062 self._extra = extra.copy()
2063 else:
2063 else:
2064 self._extra = {}
2064 self._extra = {}
2065
2065
2066 if self._extra.get('branch', '') == '':
2066 if self._extra.get('branch', '') == '':
2067 self._extra['branch'] = 'default'
2067 self._extra['branch'] = 'default'
2068
2068
2069 if editor:
2069 if editor:
2070 self._text = editor(self._repo, self, [])
2070 self._text = editor(self._repo, self, [])
2071 self._repo.savecommitmessage(self._text)
2071 self._repo.savecommitmessage(self._text)
2072
2072
2073 def manifestnode(self):
2073 def manifestnode(self):
2074 return self._manifestnode
2074 return self._manifestnode
2075
2075
2076 @propertycache
2076 @propertycache
2077 def _manifestctx(self):
2077 def _manifestctx(self):
2078 return self._repo.manifestlog[self._manifestnode]
2078 return self._repo.manifestlog[self._manifestnode]
2079
2079
2080 def filectx(self, path, filelog=None):
2080 def filectx(self, path, filelog=None):
2081 return self._originalctx.filectx(path, filelog=filelog)
2081 return self._originalctx.filectx(path, filelog=filelog)
2082
2082
2083 def commit(self):
2083 def commit(self):
2084 """commit context to the repo"""
2084 """commit context to the repo"""
2085 return self._repo.commitctx(self)
2085 return self._repo.commitctx(self)
2086
2086
2087 @property
2087 @property
2088 def _manifest(self):
2088 def _manifest(self):
2089 return self._originalctx.manifest()
2089 return self._originalctx.manifest()
2090
2090
2091 @propertycache
2091 @propertycache
2092 def _status(self):
2092 def _status(self):
2093 """Calculate exact status from ``files`` specified in the ``origctx``
2093 """Calculate exact status from ``files`` specified in the ``origctx``
2094 and parents manifests.
2094 and parents manifests.
2095 """
2095 """
2096 man1 = self.p1().manifest()
2096 man1 = self.p1().manifest()
2097 p2 = self._parents[1]
2097 p2 = self._parents[1]
2098 # "1 < len(self._parents)" can't be used for checking
2098 # "1 < len(self._parents)" can't be used for checking
2099 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2099 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2100 # explicitly initialized by the list, of which length is 2.
2100 # explicitly initialized by the list, of which length is 2.
2101 if p2.node() != nullid:
2101 if p2.node() != nullid:
2102 man2 = p2.manifest()
2102 man2 = p2.manifest()
2103 managing = lambda f: f in man1 or f in man2
2103 managing = lambda f: f in man1 or f in man2
2104 else:
2104 else:
2105 managing = lambda f: f in man1
2105 managing = lambda f: f in man1
2106
2106
2107 modified, added, removed = [], [], []
2107 modified, added, removed = [], [], []
2108 for f in self._files:
2108 for f in self._files:
2109 if not managing(f):
2109 if not managing(f):
2110 added.append(f)
2110 added.append(f)
2111 elif self[f]:
2111 elif self[f]:
2112 modified.append(f)
2112 modified.append(f)
2113 else:
2113 else:
2114 removed.append(f)
2114 removed.append(f)
2115
2115
2116 return scmutil.status(modified, added, removed, [], [], [], [])
2116 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,2288 +1,2289 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 pathutil,
22 pathutil,
23 phases,
23 phases,
24 registrar,
24 registrar,
25 repoview,
25 repoview,
26 revsetlang,
26 revsetlang,
27 smartset,
27 smartset,
28 util,
28 util,
29 )
29 )
30
30
31 # helpers for processing parsed tree
31 # helpers for processing parsed tree
32 getsymbol = revsetlang.getsymbol
32 getsymbol = revsetlang.getsymbol
33 getstring = revsetlang.getstring
33 getstring = revsetlang.getstring
34 getinteger = revsetlang.getinteger
34 getinteger = revsetlang.getinteger
35 getlist = revsetlang.getlist
35 getlist = revsetlang.getlist
36 getrange = revsetlang.getrange
36 getrange = revsetlang.getrange
37 getargs = revsetlang.getargs
37 getargs = revsetlang.getargs
38 getargsdict = revsetlang.getargsdict
38 getargsdict = revsetlang.getargsdict
39
39
40 # constants used as an argument of match() and matchany()
40 # constants used as an argument of match() and matchany()
41 anyorder = revsetlang.anyorder
41 anyorder = revsetlang.anyorder
42 defineorder = revsetlang.defineorder
42 defineorder = revsetlang.defineorder
43 followorder = revsetlang.followorder
43 followorder = revsetlang.followorder
44
44
45 baseset = smartset.baseset
45 baseset = smartset.baseset
46 generatorset = smartset.generatorset
46 generatorset = smartset.generatorset
47 spanset = smartset.spanset
47 spanset = smartset.spanset
48 fullreposet = smartset.fullreposet
48 fullreposet = smartset.fullreposet
49
49
50 def _revancestors(repo, revs, followfirst):
50 def _revancestors(repo, revs, followfirst):
51 """Like revlog.ancestors(), but supports followfirst."""
51 """Like revlog.ancestors(), but supports followfirst."""
52 if followfirst:
52 if followfirst:
53 cut = 1
53 cut = 1
54 else:
54 else:
55 cut = None
55 cut = None
56 cl = repo.changelog
56 cl = repo.changelog
57
57
58 def iterate():
58 def iterate():
59 revs.sort(reverse=True)
59 revs.sort(reverse=True)
60 irevs = iter(revs)
60 irevs = iter(revs)
61 h = []
61 h = []
62
62
63 inputrev = next(irevs, None)
63 inputrev = next(irevs, None)
64 if inputrev is not None:
64 if inputrev is not None:
65 heapq.heappush(h, -inputrev)
65 heapq.heappush(h, -inputrev)
66
66
67 seen = set()
67 seen = set()
68 while h:
68 while h:
69 current = -heapq.heappop(h)
69 current = -heapq.heappop(h)
70 if current == inputrev:
70 if current == inputrev:
71 inputrev = next(irevs, None)
71 inputrev = next(irevs, None)
72 if inputrev is not None:
72 if inputrev is not None:
73 heapq.heappush(h, -inputrev)
73 heapq.heappush(h, -inputrev)
74 if current not in seen:
74 if current not in seen:
75 seen.add(current)
75 seen.add(current)
76 yield current
76 yield current
77 for parent in cl.parentrevs(current)[:cut]:
77 for parent in cl.parentrevs(current)[:cut]:
78 if parent != node.nullrev:
78 if parent != node.nullrev:
79 heapq.heappush(h, -parent)
79 heapq.heappush(h, -parent)
80
80
81 return generatorset(iterate(), iterasc=False)
81 return generatorset(iterate(), iterasc=False)
82
82
83 def _revdescendants(repo, revs, followfirst):
83 def _revdescendants(repo, revs, followfirst):
84 """Like revlog.descendants() but supports followfirst."""
84 """Like revlog.descendants() but supports followfirst."""
85 if followfirst:
85 if followfirst:
86 cut = 1
86 cut = 1
87 else:
87 else:
88 cut = None
88 cut = None
89
89
90 def iterate():
90 def iterate():
91 cl = repo.changelog
91 cl = repo.changelog
92 # XXX this should be 'parentset.min()' assuming 'parentset' is a
92 # XXX this should be 'parentset.min()' assuming 'parentset' is a
93 # smartset (and if it is not, it should.)
93 # smartset (and if it is not, it should.)
94 first = min(revs)
94 first = min(revs)
95 nullrev = node.nullrev
95 nullrev = node.nullrev
96 if first == nullrev:
96 if first == nullrev:
97 # Are there nodes with a null first parent and a non-null
97 # Are there nodes with a null first parent and a non-null
98 # second one? Maybe. Do we care? Probably not.
98 # second one? Maybe. Do we care? Probably not.
99 for i in cl:
99 for i in cl:
100 yield i
100 yield i
101 else:
101 else:
102 seen = set(revs)
102 seen = set(revs)
103 for i in cl.revs(first + 1):
103 for i in cl.revs(first + 1):
104 for x in cl.parentrevs(i)[:cut]:
104 for x in cl.parentrevs(i)[:cut]:
105 if x != nullrev and x in seen:
105 if x != nullrev and x in seen:
106 seen.add(i)
106 seen.add(i)
107 yield i
107 yield i
108 break
108 break
109
109
110 return generatorset(iterate(), iterasc=True)
110 return generatorset(iterate(), iterasc=True)
111
111
112 def _reachablerootspure(repo, minroot, roots, heads, includepath):
112 def _reachablerootspure(repo, minroot, roots, heads, includepath):
113 """return (heads(::<roots> and ::<heads>))
113 """return (heads(::<roots> and ::<heads>))
114
114
115 If includepath is True, return (<roots>::<heads>)."""
115 If includepath is True, return (<roots>::<heads>)."""
116 if not roots:
116 if not roots:
117 return []
117 return []
118 parentrevs = repo.changelog.parentrevs
118 parentrevs = repo.changelog.parentrevs
119 roots = set(roots)
119 roots = set(roots)
120 visit = list(heads)
120 visit = list(heads)
121 reachable = set()
121 reachable = set()
122 seen = {}
122 seen = {}
123 # prefetch all the things! (because python is slow)
123 # prefetch all the things! (because python is slow)
124 reached = reachable.add
124 reached = reachable.add
125 dovisit = visit.append
125 dovisit = visit.append
126 nextvisit = visit.pop
126 nextvisit = visit.pop
127 # open-code the post-order traversal due to the tiny size of
127 # open-code the post-order traversal due to the tiny size of
128 # sys.getrecursionlimit()
128 # sys.getrecursionlimit()
129 while visit:
129 while visit:
130 rev = nextvisit()
130 rev = nextvisit()
131 if rev in roots:
131 if rev in roots:
132 reached(rev)
132 reached(rev)
133 if not includepath:
133 if not includepath:
134 continue
134 continue
135 parents = parentrevs(rev)
135 parents = parentrevs(rev)
136 seen[rev] = parents
136 seen[rev] = parents
137 for parent in parents:
137 for parent in parents:
138 if parent >= minroot and parent not in seen:
138 if parent >= minroot and parent not in seen:
139 dovisit(parent)
139 dovisit(parent)
140 if not reachable:
140 if not reachable:
141 return baseset()
141 return baseset()
142 if not includepath:
142 if not includepath:
143 return reachable
143 return reachable
144 for rev in sorted(seen):
144 for rev in sorted(seen):
145 for parent in seen[rev]:
145 for parent in seen[rev]:
146 if parent in reachable:
146 if parent in reachable:
147 reached(rev)
147 reached(rev)
148 return reachable
148 return reachable
149
149
150 def reachableroots(repo, roots, heads, includepath=False):
150 def reachableroots(repo, roots, heads, includepath=False):
151 """return (heads(::<roots> and ::<heads>))
151 """return (heads(::<roots> and ::<heads>))
152
152
153 If includepath is True, return (<roots>::<heads>)."""
153 If includepath is True, return (<roots>::<heads>)."""
154 if not roots:
154 if not roots:
155 return baseset()
155 return baseset()
156 minroot = roots.min()
156 minroot = roots.min()
157 roots = list(roots)
157 roots = list(roots)
158 heads = list(heads)
158 heads = list(heads)
159 try:
159 try:
160 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
160 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
161 except AttributeError:
161 except AttributeError:
162 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
162 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
163 revs = baseset(revs)
163 revs = baseset(revs)
164 revs.sort()
164 revs.sort()
165 return revs
165 return revs
166
166
167 # helpers
167 # helpers
168
168
169 def getset(repo, subset, x):
169 def getset(repo, subset, x):
170 if not x:
170 if not x:
171 raise error.ParseError(_("missing argument"))
171 raise error.ParseError(_("missing argument"))
172 s = methods[x[0]](repo, subset, *x[1:])
172 s = methods[x[0]](repo, subset, *x[1:])
173 if util.safehasattr(s, 'isascending'):
173 if util.safehasattr(s, 'isascending'):
174 return s
174 return s
175 # else case should not happen, because all non-func are internal,
175 # else case should not happen, because all non-func are internal,
176 # ignoring for now.
176 # ignoring for now.
177 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
177 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
178 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
178 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
179 % x[1][1],
179 % x[1][1],
180 '3.9')
180 '3.9')
181 return baseset(s)
181 return baseset(s)
182
182
183 def _getrevsource(repo, r):
183 def _getrevsource(repo, r):
184 extra = repo[r].extra()
184 extra = repo[r].extra()
185 for label in ('source', 'transplant_source', 'rebase_source'):
185 for label in ('source', 'transplant_source', 'rebase_source'):
186 if label in extra:
186 if label in extra:
187 try:
187 try:
188 return repo[extra[label]].rev()
188 return repo[extra[label]].rev()
189 except error.RepoLookupError:
189 except error.RepoLookupError:
190 pass
190 pass
191 return None
191 return None
192
192
193 # operator methods
193 # operator methods
194
194
195 def stringset(repo, subset, x):
195 def stringset(repo, subset, x):
196 x = repo[x].rev()
196 x = repo[x].rev()
197 if (x in subset
197 if (x in subset
198 or x == node.nullrev and isinstance(subset, fullreposet)):
198 or x == node.nullrev and isinstance(subset, fullreposet)):
199 return baseset([x])
199 return baseset([x])
200 return baseset()
200 return baseset()
201
201
202 def rangeset(repo, subset, x, y, order):
202 def rangeset(repo, subset, x, y, order):
203 m = getset(repo, fullreposet(repo), x)
203 m = getset(repo, fullreposet(repo), x)
204 n = getset(repo, fullreposet(repo), y)
204 n = getset(repo, fullreposet(repo), y)
205
205
206 if not m or not n:
206 if not m or not n:
207 return baseset()
207 return baseset()
208 return _makerangeset(repo, subset, m.first(), n.last(), order)
208 return _makerangeset(repo, subset, m.first(), n.last(), order)
209
209
210 def rangeall(repo, subset, x, order):
210 def rangeall(repo, subset, x, order):
211 assert x is None
211 assert x is None
212 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
212 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
213
213
214 def rangepre(repo, subset, y, order):
214 def rangepre(repo, subset, y, order):
215 # ':y' can't be rewritten to '0:y' since '0' may be hidden
215 # ':y' can't be rewritten to '0:y' since '0' may be hidden
216 n = getset(repo, fullreposet(repo), y)
216 n = getset(repo, fullreposet(repo), y)
217 if not n:
217 if not n:
218 return baseset()
218 return baseset()
219 return _makerangeset(repo, subset, 0, n.last(), order)
219 return _makerangeset(repo, subset, 0, n.last(), order)
220
220
221 def rangepost(repo, subset, x, order):
221 def rangepost(repo, subset, x, order):
222 m = getset(repo, fullreposet(repo), x)
222 m = getset(repo, fullreposet(repo), x)
223 if not m:
223 if not m:
224 return baseset()
224 return baseset()
225 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
225 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
226
226
227 def _makerangeset(repo, subset, m, n, order):
227 def _makerangeset(repo, subset, m, n, order):
228 if m == n:
228 if m == n:
229 r = baseset([m])
229 r = baseset([m])
230 elif n == node.wdirrev:
230 elif n == node.wdirrev:
231 r = spanset(repo, m, len(repo)) + baseset([n])
231 r = spanset(repo, m, len(repo)) + baseset([n])
232 elif m == node.wdirrev:
232 elif m == node.wdirrev:
233 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
233 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
234 elif m < n:
234 elif m < n:
235 r = spanset(repo, m, n + 1)
235 r = spanset(repo, m, n + 1)
236 else:
236 else:
237 r = spanset(repo, m, n - 1)
237 r = spanset(repo, m, n - 1)
238
238
239 if order == defineorder:
239 if order == defineorder:
240 return r & subset
240 return r & subset
241 else:
241 else:
242 # carrying the sorting over when possible would be more efficient
242 # carrying the sorting over when possible would be more efficient
243 return subset & r
243 return subset & r
244
244
245 def dagrange(repo, subset, x, y, order):
245 def dagrange(repo, subset, x, y, order):
246 r = fullreposet(repo)
246 r = fullreposet(repo)
247 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
247 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
248 includepath=True)
248 includepath=True)
249 return subset & xs
249 return subset & xs
250
250
251 def andset(repo, subset, x, y, order):
251 def andset(repo, subset, x, y, order):
252 return getset(repo, getset(repo, subset, x), y)
252 return getset(repo, getset(repo, subset, x), y)
253
253
254 def differenceset(repo, subset, x, y, order):
254 def differenceset(repo, subset, x, y, order):
255 return getset(repo, subset, x) - getset(repo, subset, y)
255 return getset(repo, subset, x) - getset(repo, subset, y)
256
256
257 def _orsetlist(repo, subset, xs):
257 def _orsetlist(repo, subset, xs):
258 assert xs
258 assert xs
259 if len(xs) == 1:
259 if len(xs) == 1:
260 return getset(repo, subset, xs[0])
260 return getset(repo, subset, xs[0])
261 p = len(xs) // 2
261 p = len(xs) // 2
262 a = _orsetlist(repo, subset, xs[:p])
262 a = _orsetlist(repo, subset, xs[:p])
263 b = _orsetlist(repo, subset, xs[p:])
263 b = _orsetlist(repo, subset, xs[p:])
264 return a + b
264 return a + b
265
265
266 def orset(repo, subset, x, order):
266 def orset(repo, subset, x, order):
267 xs = getlist(x)
267 xs = getlist(x)
268 if order == followorder:
268 if order == followorder:
269 # slow path to take the subset order
269 # slow path to take the subset order
270 return subset & _orsetlist(repo, fullreposet(repo), xs)
270 return subset & _orsetlist(repo, fullreposet(repo), xs)
271 else:
271 else:
272 return _orsetlist(repo, subset, xs)
272 return _orsetlist(repo, subset, xs)
273
273
274 def notset(repo, subset, x, order):
274 def notset(repo, subset, x, order):
275 return subset - getset(repo, subset, x)
275 return subset - getset(repo, subset, x)
276
276
277 def listset(repo, subset, *xs):
277 def listset(repo, subset, *xs):
278 raise error.ParseError(_("can't use a list in this context"),
278 raise error.ParseError(_("can't use a list in this context"),
279 hint=_('see hg help "revsets.x or y"'))
279 hint=_('see hg help "revsets.x or y"'))
280
280
281 def keyvaluepair(repo, subset, k, v):
281 def keyvaluepair(repo, subset, k, v):
282 raise error.ParseError(_("can't use a key-value pair in this context"))
282 raise error.ParseError(_("can't use a key-value pair in this context"))
283
283
284 def func(repo, subset, a, b, order):
284 def func(repo, subset, a, b, order):
285 f = getsymbol(a)
285 f = getsymbol(a)
286 if f in symbols:
286 if f in symbols:
287 func = symbols[f]
287 func = symbols[f]
288 if getattr(func, '_takeorder', False):
288 if getattr(func, '_takeorder', False):
289 return func(repo, subset, b, order)
289 return func(repo, subset, b, order)
290 return func(repo, subset, b)
290 return func(repo, subset, b)
291
291
292 keep = lambda fn: getattr(fn, '__doc__', None) is not None
292 keep = lambda fn: getattr(fn, '__doc__', None) is not None
293
293
294 syms = [s for (s, fn) in symbols.items() if keep(fn)]
294 syms = [s for (s, fn) in symbols.items() if keep(fn)]
295 raise error.UnknownIdentifier(f, syms)
295 raise error.UnknownIdentifier(f, syms)
296
296
297 # functions
297 # functions
298
298
299 # symbols are callables like:
299 # symbols are callables like:
300 # fn(repo, subset, x)
300 # fn(repo, subset, x)
301 # with:
301 # with:
302 # repo - current repository instance
302 # repo - current repository instance
303 # subset - of revisions to be examined
303 # subset - of revisions to be examined
304 # x - argument in tree form
304 # x - argument in tree form
305 symbols = {}
305 symbols = {}
306
306
307 # symbols which can't be used for a DoS attack for any given input
307 # symbols which can't be used for a DoS attack for any given input
308 # (e.g. those which accept regexes as plain strings shouldn't be included)
308 # (e.g. those which accept regexes as plain strings shouldn't be included)
309 # functions that just return a lot of changesets (like all) don't count here
309 # functions that just return a lot of changesets (like all) don't count here
310 safesymbols = set()
310 safesymbols = set()
311
311
312 predicate = registrar.revsetpredicate()
312 predicate = registrar.revsetpredicate()
313
313
314 @predicate('_destupdate')
314 @predicate('_destupdate')
315 def _destupdate(repo, subset, x):
315 def _destupdate(repo, subset, x):
316 # experimental revset for update destination
316 # experimental revset for update destination
317 args = getargsdict(x, 'limit', 'clean')
317 args = getargsdict(x, 'limit', 'clean')
318 return subset & baseset([destutil.destupdate(repo, **args)[0]])
318 return subset & baseset([destutil.destupdate(repo, **args)[0]])
319
319
320 @predicate('_destmerge')
320 @predicate('_destmerge')
321 def _destmerge(repo, subset, x):
321 def _destmerge(repo, subset, x):
322 # experimental revset for merge destination
322 # experimental revset for merge destination
323 sourceset = None
323 sourceset = None
324 if x is not None:
324 if x is not None:
325 sourceset = getset(repo, fullreposet(repo), x)
325 sourceset = getset(repo, fullreposet(repo), x)
326 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
326 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
327
327
328 @predicate('adds(pattern)', safe=True)
328 @predicate('adds(pattern)', safe=True)
329 def adds(repo, subset, x):
329 def adds(repo, subset, x):
330 """Changesets that add a file matching pattern.
330 """Changesets that add a file matching pattern.
331
331
332 The pattern without explicit kind like ``glob:`` is expected to be
332 The pattern without explicit kind like ``glob:`` is expected to be
333 relative to the current directory and match against a file or a
333 relative to the current directory and match against a file or a
334 directory.
334 directory.
335 """
335 """
336 # i18n: "adds" is a keyword
336 # i18n: "adds" is a keyword
337 pat = getstring(x, _("adds requires a pattern"))
337 pat = getstring(x, _("adds requires a pattern"))
338 return checkstatus(repo, subset, pat, 1)
338 return checkstatus(repo, subset, pat, 1)
339
339
340 @predicate('ancestor(*changeset)', safe=True)
340 @predicate('ancestor(*changeset)', safe=True)
341 def ancestor(repo, subset, x):
341 def ancestor(repo, subset, x):
342 """A greatest common ancestor of the changesets.
342 """A greatest common ancestor of the changesets.
343
343
344 Accepts 0 or more changesets.
344 Accepts 0 or more changesets.
345 Will return empty list when passed no args.
345 Will return empty list when passed no args.
346 Greatest common ancestor of a single changeset is that changeset.
346 Greatest common ancestor of a single changeset is that changeset.
347 """
347 """
348 # i18n: "ancestor" is a keyword
348 # i18n: "ancestor" is a keyword
349 l = getlist(x)
349 l = getlist(x)
350 rl = fullreposet(repo)
350 rl = fullreposet(repo)
351 anc = None
351 anc = None
352
352
353 # (getset(repo, rl, i) for i in l) generates a list of lists
353 # (getset(repo, rl, i) for i in l) generates a list of lists
354 for revs in (getset(repo, rl, i) for i in l):
354 for revs in (getset(repo, rl, i) for i in l):
355 for r in revs:
355 for r in revs:
356 if anc is None:
356 if anc is None:
357 anc = repo[r]
357 anc = repo[r]
358 else:
358 else:
359 anc = anc.ancestor(repo[r])
359 anc = anc.ancestor(repo[r])
360
360
361 if anc is not None and anc.rev() in subset:
361 if anc is not None and anc.rev() in subset:
362 return baseset([anc.rev()])
362 return baseset([anc.rev()])
363 return baseset()
363 return baseset()
364
364
365 def _ancestors(repo, subset, x, followfirst=False):
365 def _ancestors(repo, subset, x, followfirst=False):
366 heads = getset(repo, fullreposet(repo), x)
366 heads = getset(repo, fullreposet(repo), x)
367 if not heads:
367 if not heads:
368 return baseset()
368 return baseset()
369 s = _revancestors(repo, heads, followfirst)
369 s = _revancestors(repo, heads, followfirst)
370 return subset & s
370 return subset & s
371
371
372 @predicate('ancestors(set)', safe=True)
372 @predicate('ancestors(set)', safe=True)
373 def ancestors(repo, subset, x):
373 def ancestors(repo, subset, x):
374 """Changesets that are ancestors of a changeset in set.
374 """Changesets that are ancestors of a changeset in set.
375 """
375 """
376 return _ancestors(repo, subset, x)
376 return _ancestors(repo, subset, x)
377
377
378 @predicate('_firstancestors', safe=True)
378 @predicate('_firstancestors', safe=True)
379 def _firstancestors(repo, subset, x):
379 def _firstancestors(repo, subset, x):
380 # ``_firstancestors(set)``
380 # ``_firstancestors(set)``
381 # Like ``ancestors(set)`` but follows only the first parents.
381 # Like ``ancestors(set)`` but follows only the first parents.
382 return _ancestors(repo, subset, x, followfirst=True)
382 return _ancestors(repo, subset, x, followfirst=True)
383
383
384 def ancestorspec(repo, subset, x, n, order):
384 def ancestorspec(repo, subset, x, n, order):
385 """``set~n``
385 """``set~n``
386 Changesets that are the Nth ancestor (first parents only) of a changeset
386 Changesets that are the Nth ancestor (first parents only) of a changeset
387 in set.
387 in set.
388 """
388 """
389 n = getinteger(n, _("~ expects a number"))
389 n = getinteger(n, _("~ expects a number"))
390 ps = set()
390 ps = set()
391 cl = repo.changelog
391 cl = repo.changelog
392 for r in getset(repo, fullreposet(repo), x):
392 for r in getset(repo, fullreposet(repo), x):
393 for i in range(n):
393 for i in range(n):
394 r = cl.parentrevs(r)[0]
394 r = cl.parentrevs(r)[0]
395 ps.add(r)
395 ps.add(r)
396 return subset & ps
396 return subset & ps
397
397
398 @predicate('author(string)', safe=True)
398 @predicate('author(string)', safe=True)
399 def author(repo, subset, x):
399 def author(repo, subset, x):
400 """Alias for ``user(string)``.
400 """Alias for ``user(string)``.
401 """
401 """
402 # i18n: "author" is a keyword
402 # i18n: "author" is a keyword
403 n = getstring(x, _("author requires a string"))
403 n = getstring(x, _("author requires a string"))
404 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
404 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
405 return subset.filter(lambda x: matcher(repo[x].user()),
405 return subset.filter(lambda x: matcher(repo[x].user()),
406 condrepr=('<user %r>', n))
406 condrepr=('<user %r>', n))
407
407
408 @predicate('bisect(string)', safe=True)
408 @predicate('bisect(string)', safe=True)
409 def bisect(repo, subset, x):
409 def bisect(repo, subset, x):
410 """Changesets marked in the specified bisect status:
410 """Changesets marked in the specified bisect status:
411
411
412 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
412 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
413 - ``goods``, ``bads`` : csets topologically good/bad
413 - ``goods``, ``bads`` : csets topologically good/bad
414 - ``range`` : csets taking part in the bisection
414 - ``range`` : csets taking part in the bisection
415 - ``pruned`` : csets that are goods, bads or skipped
415 - ``pruned`` : csets that are goods, bads or skipped
416 - ``untested`` : csets whose fate is yet unknown
416 - ``untested`` : csets whose fate is yet unknown
417 - ``ignored`` : csets ignored due to DAG topology
417 - ``ignored`` : csets ignored due to DAG topology
418 - ``current`` : the cset currently being bisected
418 - ``current`` : the cset currently being bisected
419 """
419 """
420 # i18n: "bisect" is a keyword
420 # i18n: "bisect" is a keyword
421 status = getstring(x, _("bisect requires a string")).lower()
421 status = getstring(x, _("bisect requires a string")).lower()
422 state = set(hbisect.get(repo, status))
422 state = set(hbisect.get(repo, status))
423 return subset & state
423 return subset & state
424
424
425 # Backward-compatibility
425 # Backward-compatibility
426 # - no help entry so that we do not advertise it any more
426 # - no help entry so that we do not advertise it any more
427 @predicate('bisected', safe=True)
427 @predicate('bisected', safe=True)
428 def bisected(repo, subset, x):
428 def bisected(repo, subset, x):
429 return bisect(repo, subset, x)
429 return bisect(repo, subset, x)
430
430
431 @predicate('bookmark([name])', safe=True)
431 @predicate('bookmark([name])', safe=True)
432 def bookmark(repo, subset, x):
432 def bookmark(repo, subset, x):
433 """The named bookmark or all bookmarks.
433 """The named bookmark or all bookmarks.
434
434
435 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
435 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
436 """
436 """
437 # i18n: "bookmark" is a keyword
437 # i18n: "bookmark" is a keyword
438 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
438 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
439 if args:
439 if args:
440 bm = getstring(args[0],
440 bm = getstring(args[0],
441 # i18n: "bookmark" is a keyword
441 # i18n: "bookmark" is a keyword
442 _('the argument to bookmark must be a string'))
442 _('the argument to bookmark must be a string'))
443 kind, pattern, matcher = util.stringmatcher(bm)
443 kind, pattern, matcher = util.stringmatcher(bm)
444 bms = set()
444 bms = set()
445 if kind == 'literal':
445 if kind == 'literal':
446 bmrev = repo._bookmarks.get(pattern, None)
446 bmrev = repo._bookmarks.get(pattern, None)
447 if not bmrev:
447 if not bmrev:
448 raise error.RepoLookupError(_("bookmark '%s' does not exist")
448 raise error.RepoLookupError(_("bookmark '%s' does not exist")
449 % pattern)
449 % pattern)
450 bms.add(repo[bmrev].rev())
450 bms.add(repo[bmrev].rev())
451 else:
451 else:
452 matchrevs = set()
452 matchrevs = set()
453 for name, bmrev in repo._bookmarks.iteritems():
453 for name, bmrev in repo._bookmarks.iteritems():
454 if matcher(name):
454 if matcher(name):
455 matchrevs.add(bmrev)
455 matchrevs.add(bmrev)
456 if not matchrevs:
456 if not matchrevs:
457 raise error.RepoLookupError(_("no bookmarks exist"
457 raise error.RepoLookupError(_("no bookmarks exist"
458 " that match '%s'") % pattern)
458 " that match '%s'") % pattern)
459 for bmrev in matchrevs:
459 for bmrev in matchrevs:
460 bms.add(repo[bmrev].rev())
460 bms.add(repo[bmrev].rev())
461 else:
461 else:
462 bms = set([repo[r].rev()
462 bms = set([repo[r].rev()
463 for r in repo._bookmarks.values()])
463 for r in repo._bookmarks.values()])
464 bms -= set([node.nullrev])
464 bms -= set([node.nullrev])
465 return subset & bms
465 return subset & bms
466
466
467 @predicate('branch(string or set)', safe=True)
467 @predicate('branch(string or set)', safe=True)
468 def branch(repo, subset, x):
468 def branch(repo, subset, x):
469 """
469 """
470 All changesets belonging to the given branch or the branches of the given
470 All changesets belonging to the given branch or the branches of the given
471 changesets.
471 changesets.
472
472
473 Pattern matching is supported for `string`. See
473 Pattern matching is supported for `string`. See
474 :hg:`help revisions.patterns`.
474 :hg:`help revisions.patterns`.
475 """
475 """
476 getbi = repo.revbranchcache().branchinfo
476 getbi = repo.revbranchcache().branchinfo
477
477
478 try:
478 try:
479 b = getstring(x, '')
479 b = getstring(x, '')
480 except error.ParseError:
480 except error.ParseError:
481 # not a string, but another revspec, e.g. tip()
481 # not a string, but another revspec, e.g. tip()
482 pass
482 pass
483 else:
483 else:
484 kind, pattern, matcher = util.stringmatcher(b)
484 kind, pattern, matcher = util.stringmatcher(b)
485 if kind == 'literal':
485 if kind == 'literal':
486 # note: falls through to the revspec case if no branch with
486 # note: falls through to the revspec case if no branch with
487 # this name exists and pattern kind is not specified explicitly
487 # this name exists and pattern kind is not specified explicitly
488 if pattern in repo.branchmap():
488 if pattern in repo.branchmap():
489 return subset.filter(lambda r: matcher(getbi(r)[0]),
489 return subset.filter(lambda r: matcher(getbi(r)[0]),
490 condrepr=('<branch %r>', b))
490 condrepr=('<branch %r>', b))
491 if b.startswith('literal:'):
491 if b.startswith('literal:'):
492 raise error.RepoLookupError(_("branch '%s' does not exist")
492 raise error.RepoLookupError(_("branch '%s' does not exist")
493 % pattern)
493 % pattern)
494 else:
494 else:
495 return subset.filter(lambda r: matcher(getbi(r)[0]),
495 return subset.filter(lambda r: matcher(getbi(r)[0]),
496 condrepr=('<branch %r>', b))
496 condrepr=('<branch %r>', b))
497
497
498 s = getset(repo, fullreposet(repo), x)
498 s = getset(repo, fullreposet(repo), x)
499 b = set()
499 b = set()
500 for r in s:
500 for r in s:
501 b.add(getbi(r)[0])
501 b.add(getbi(r)[0])
502 c = s.__contains__
502 c = s.__contains__
503 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
503 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
504 condrepr=lambda: '<branch %r>' % sorted(b))
504 condrepr=lambda: '<branch %r>' % sorted(b))
505
505
506 @predicate('bumped()', safe=True)
506 @predicate('bumped()', safe=True)
507 def bumped(repo, subset, x):
507 def bumped(repo, subset, x):
508 """Mutable changesets marked as successors of public changesets.
508 """Mutable changesets marked as successors of public changesets.
509
509
510 Only non-public and non-obsolete changesets can be `bumped`.
510 Only non-public and non-obsolete changesets can be `bumped`.
511 """
511 """
512 # i18n: "bumped" is a keyword
512 # i18n: "bumped" is a keyword
513 getargs(x, 0, 0, _("bumped takes no arguments"))
513 getargs(x, 0, 0, _("bumped takes no arguments"))
514 bumped = obsmod.getrevs(repo, 'bumped')
514 bumped = obsmod.getrevs(repo, 'bumped')
515 return subset & bumped
515 return subset & bumped
516
516
517 @predicate('bundle()', safe=True)
517 @predicate('bundle()', safe=True)
518 def bundle(repo, subset, x):
518 def bundle(repo, subset, x):
519 """Changesets in the bundle.
519 """Changesets in the bundle.
520
520
521 Bundle must be specified by the -R option."""
521 Bundle must be specified by the -R option."""
522
522
523 try:
523 try:
524 bundlerevs = repo.changelog.bundlerevs
524 bundlerevs = repo.changelog.bundlerevs
525 except AttributeError:
525 except AttributeError:
526 raise error.Abort(_("no bundle provided - specify with -R"))
526 raise error.Abort(_("no bundle provided - specify with -R"))
527 return subset & bundlerevs
527 return subset & bundlerevs
528
528
529 def checkstatus(repo, subset, pat, field):
529 def checkstatus(repo, subset, pat, field):
530 hasset = matchmod.patkind(pat) == 'set'
530 hasset = matchmod.patkind(pat) == 'set'
531
531
532 mcache = [None]
532 mcache = [None]
533 def matches(x):
533 def matches(x):
534 c = repo[x]
534 c = repo[x]
535 if not mcache[0] or hasset:
535 if not mcache[0] or hasset:
536 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
536 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
537 m = mcache[0]
537 m = mcache[0]
538 fname = None
538 fname = None
539 if not m.anypats() and len(m.files()) == 1:
539 if not m.anypats() and len(m.files()) == 1:
540 fname = m.files()[0]
540 fname = m.files()[0]
541 if fname is not None:
541 if fname is not None:
542 if fname not in c.files():
542 if fname not in c.files():
543 return False
543 return False
544 else:
544 else:
545 for f in c.files():
545 for f in c.files():
546 if m(f):
546 if m(f):
547 break
547 break
548 else:
548 else:
549 return False
549 return False
550 files = repo.status(c.p1().node(), c.node())[field]
550 files = repo.status(c.p1().node(), c.node())[field]
551 if fname is not None:
551 if fname is not None:
552 if fname in files:
552 if fname in files:
553 return True
553 return True
554 else:
554 else:
555 for f in files:
555 for f in files:
556 if m(f):
556 if m(f):
557 return True
557 return True
558
558
559 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
559 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
560
560
561 def _children(repo, subset, parentset):
561 def _children(repo, subset, parentset):
562 if not parentset:
562 if not parentset:
563 return baseset()
563 return baseset()
564 cs = set()
564 cs = set()
565 pr = repo.changelog.parentrevs
565 pr = repo.changelog.parentrevs
566 minrev = parentset.min()
566 minrev = parentset.min()
567 nullrev = node.nullrev
567 nullrev = node.nullrev
568 for r in subset:
568 for r in subset:
569 if r <= minrev:
569 if r <= minrev:
570 continue
570 continue
571 p1, p2 = pr(r)
571 p1, p2 = pr(r)
572 if p1 in parentset:
572 if p1 in parentset:
573 cs.add(r)
573 cs.add(r)
574 if p2 != nullrev and p2 in parentset:
574 if p2 != nullrev and p2 in parentset:
575 cs.add(r)
575 cs.add(r)
576 return baseset(cs)
576 return baseset(cs)
577
577
578 @predicate('children(set)', safe=True)
578 @predicate('children(set)', safe=True)
579 def children(repo, subset, x):
579 def children(repo, subset, x):
580 """Child changesets of changesets in set.
580 """Child changesets of changesets in set.
581 """
581 """
582 s = getset(repo, fullreposet(repo), x)
582 s = getset(repo, fullreposet(repo), x)
583 cs = _children(repo, subset, s)
583 cs = _children(repo, subset, s)
584 return subset & cs
584 return subset & cs
585
585
586 @predicate('closed()', safe=True)
586 @predicate('closed()', safe=True)
587 def closed(repo, subset, x):
587 def closed(repo, subset, x):
588 """Changeset is closed.
588 """Changeset is closed.
589 """
589 """
590 # i18n: "closed" is a keyword
590 # i18n: "closed" is a keyword
591 getargs(x, 0, 0, _("closed takes no arguments"))
591 getargs(x, 0, 0, _("closed takes no arguments"))
592 return subset.filter(lambda r: repo[r].closesbranch(),
592 return subset.filter(lambda r: repo[r].closesbranch(),
593 condrepr='<branch closed>')
593 condrepr='<branch closed>')
594
594
595 @predicate('contains(pattern)')
595 @predicate('contains(pattern)')
596 def contains(repo, subset, x):
596 def contains(repo, subset, x):
597 """The revision's manifest contains a file matching pattern (but might not
597 """The revision's manifest contains a file matching pattern (but might not
598 modify it). See :hg:`help patterns` for information about file patterns.
598 modify it). See :hg:`help patterns` for information about file patterns.
599
599
600 The pattern without explicit kind like ``glob:`` is expected to be
600 The pattern without explicit kind like ``glob:`` is expected to be
601 relative to the current directory and match against a file exactly
601 relative to the current directory and match against a file exactly
602 for efficiency.
602 for efficiency.
603 """
603 """
604 # i18n: "contains" is a keyword
604 # i18n: "contains" is a keyword
605 pat = getstring(x, _("contains requires a pattern"))
605 pat = getstring(x, _("contains requires a pattern"))
606
606
607 def matches(x):
607 def matches(x):
608 if not matchmod.patkind(pat):
608 if not matchmod.patkind(pat):
609 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
609 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
610 if pats in repo[x]:
610 if pats in repo[x]:
611 return True
611 return True
612 else:
612 else:
613 c = repo[x]
613 c = repo[x]
614 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
614 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
615 for f in c.manifest():
615 for f in c.manifest():
616 if m(f):
616 if m(f):
617 return True
617 return True
618 return False
618 return False
619
619
620 return subset.filter(matches, condrepr=('<contains %r>', pat))
620 return subset.filter(matches, condrepr=('<contains %r>', pat))
621
621
622 @predicate('converted([id])', safe=True)
622 @predicate('converted([id])', safe=True)
623 def converted(repo, subset, x):
623 def converted(repo, subset, x):
624 """Changesets converted from the given identifier in the old repository if
624 """Changesets converted from the given identifier in the old repository if
625 present, or all converted changesets if no identifier is specified.
625 present, or all converted changesets if no identifier is specified.
626 """
626 """
627
627
628 # There is exactly no chance of resolving the revision, so do a simple
628 # There is exactly no chance of resolving the revision, so do a simple
629 # string compare and hope for the best
629 # string compare and hope for the best
630
630
631 rev = None
631 rev = None
632 # i18n: "converted" is a keyword
632 # i18n: "converted" is a keyword
633 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
633 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
634 if l:
634 if l:
635 # i18n: "converted" is a keyword
635 # i18n: "converted" is a keyword
636 rev = getstring(l[0], _('converted requires a revision'))
636 rev = getstring(l[0], _('converted requires a revision'))
637
637
638 def _matchvalue(r):
638 def _matchvalue(r):
639 source = repo[r].extra().get('convert_revision', None)
639 source = repo[r].extra().get('convert_revision', None)
640 return source is not None and (rev is None or source.startswith(rev))
640 return source is not None and (rev is None or source.startswith(rev))
641
641
642 return subset.filter(lambda r: _matchvalue(r),
642 return subset.filter(lambda r: _matchvalue(r),
643 condrepr=('<converted %r>', rev))
643 condrepr=('<converted %r>', rev))
644
644
645 @predicate('date(interval)', safe=True)
645 @predicate('date(interval)', safe=True)
646 def date(repo, subset, x):
646 def date(repo, subset, x):
647 """Changesets within the interval, see :hg:`help dates`.
647 """Changesets within the interval, see :hg:`help dates`.
648 """
648 """
649 # i18n: "date" is a keyword
649 # i18n: "date" is a keyword
650 ds = getstring(x, _("date requires a string"))
650 ds = getstring(x, _("date requires a string"))
651 dm = util.matchdate(ds)
651 dm = util.matchdate(ds)
652 return subset.filter(lambda x: dm(repo[x].date()[0]),
652 return subset.filter(lambda x: dm(repo[x].date()[0]),
653 condrepr=('<date %r>', ds))
653 condrepr=('<date %r>', ds))
654
654
655 @predicate('desc(string)', safe=True)
655 @predicate('desc(string)', safe=True)
656 def desc(repo, subset, x):
656 def desc(repo, subset, x):
657 """Search commit message for string. The match is case-insensitive.
657 """Search commit message for string. The match is case-insensitive.
658
658
659 Pattern matching is supported for `string`. See
659 Pattern matching is supported for `string`. See
660 :hg:`help revisions.patterns`.
660 :hg:`help revisions.patterns`.
661 """
661 """
662 # i18n: "desc" is a keyword
662 # i18n: "desc" is a keyword
663 ds = getstring(x, _("desc requires a string"))
663 ds = getstring(x, _("desc requires a string"))
664
664
665 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
665 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
666
666
667 return subset.filter(lambda r: matcher(repo[r].description()),
667 return subset.filter(lambda r: matcher(repo[r].description()),
668 condrepr=('<desc %r>', ds))
668 condrepr=('<desc %r>', ds))
669
669
670 def _descendants(repo, subset, x, followfirst=False):
670 def _descendants(repo, subset, x, followfirst=False):
671 roots = getset(repo, fullreposet(repo), x)
671 roots = getset(repo, fullreposet(repo), x)
672 if not roots:
672 if not roots:
673 return baseset()
673 return baseset()
674 s = _revdescendants(repo, roots, followfirst)
674 s = _revdescendants(repo, roots, followfirst)
675
675
676 # Both sets need to be ascending in order to lazily return the union
676 # Both sets need to be ascending in order to lazily return the union
677 # in the correct order.
677 # in the correct order.
678 base = subset & roots
678 base = subset & roots
679 desc = subset & s
679 desc = subset & s
680 result = base + desc
680 result = base + desc
681 if subset.isascending():
681 if subset.isascending():
682 result.sort()
682 result.sort()
683 elif subset.isdescending():
683 elif subset.isdescending():
684 result.sort(reverse=True)
684 result.sort(reverse=True)
685 else:
685 else:
686 result = subset & result
686 result = subset & result
687 return result
687 return result
688
688
689 @predicate('descendants(set)', safe=True)
689 @predicate('descendants(set)', safe=True)
690 def descendants(repo, subset, x):
690 def descendants(repo, subset, x):
691 """Changesets which are descendants of changesets in set.
691 """Changesets which are descendants of changesets in set.
692 """
692 """
693 return _descendants(repo, subset, x)
693 return _descendants(repo, subset, x)
694
694
695 @predicate('_firstdescendants', safe=True)
695 @predicate('_firstdescendants', safe=True)
696 def _firstdescendants(repo, subset, x):
696 def _firstdescendants(repo, subset, x):
697 # ``_firstdescendants(set)``
697 # ``_firstdescendants(set)``
698 # Like ``descendants(set)`` but follows only the first parents.
698 # Like ``descendants(set)`` but follows only the first parents.
699 return _descendants(repo, subset, x, followfirst=True)
699 return _descendants(repo, subset, x, followfirst=True)
700
700
701 @predicate('destination([set])', safe=True)
701 @predicate('destination([set])', safe=True)
702 def destination(repo, subset, x):
702 def destination(repo, subset, x):
703 """Changesets that were created by a graft, transplant or rebase operation,
703 """Changesets that were created by a graft, transplant or rebase operation,
704 with the given revisions specified as the source. Omitting the optional set
704 with the given revisions specified as the source. Omitting the optional set
705 is the same as passing all().
705 is the same as passing all().
706 """
706 """
707 if x is not None:
707 if x is not None:
708 sources = getset(repo, fullreposet(repo), x)
708 sources = getset(repo, fullreposet(repo), x)
709 else:
709 else:
710 sources = fullreposet(repo)
710 sources = fullreposet(repo)
711
711
712 dests = set()
712 dests = set()
713
713
714 # subset contains all of the possible destinations that can be returned, so
714 # subset contains all of the possible destinations that can be returned, so
715 # iterate over them and see if their source(s) were provided in the arg set.
715 # iterate over them and see if their source(s) were provided in the arg set.
716 # Even if the immediate src of r is not in the arg set, src's source (or
716 # Even if the immediate src of r is not in the arg set, src's source (or
717 # further back) may be. Scanning back further than the immediate src allows
717 # further back) may be. Scanning back further than the immediate src allows
718 # transitive transplants and rebases to yield the same results as transitive
718 # transitive transplants and rebases to yield the same results as transitive
719 # grafts.
719 # grafts.
720 for r in subset:
720 for r in subset:
721 src = _getrevsource(repo, r)
721 src = _getrevsource(repo, r)
722 lineage = None
722 lineage = None
723
723
724 while src is not None:
724 while src is not None:
725 if lineage is None:
725 if lineage is None:
726 lineage = list()
726 lineage = list()
727
727
728 lineage.append(r)
728 lineage.append(r)
729
729
730 # The visited lineage is a match if the current source is in the arg
730 # The visited lineage is a match if the current source is in the arg
731 # set. Since every candidate dest is visited by way of iterating
731 # set. Since every candidate dest is visited by way of iterating
732 # subset, any dests further back in the lineage will be tested by a
732 # subset, any dests further back in the lineage will be tested by a
733 # different iteration over subset. Likewise, if the src was already
733 # different iteration over subset. Likewise, if the src was already
734 # selected, the current lineage can be selected without going back
734 # selected, the current lineage can be selected without going back
735 # further.
735 # further.
736 if src in sources or src in dests:
736 if src in sources or src in dests:
737 dests.update(lineage)
737 dests.update(lineage)
738 break
738 break
739
739
740 r = src
740 r = src
741 src = _getrevsource(repo, r)
741 src = _getrevsource(repo, r)
742
742
743 return subset.filter(dests.__contains__,
743 return subset.filter(dests.__contains__,
744 condrepr=lambda: '<destination %r>' % sorted(dests))
744 condrepr=lambda: '<destination %r>' % sorted(dests))
745
745
746 @predicate('divergent()', safe=True)
746 @predicate('divergent()', safe=True)
747 def divergent(repo, subset, x):
747 def divergent(repo, subset, x):
748 """
748 """
749 Final successors of changesets with an alternative set of final successors.
749 Final successors of changesets with an alternative set of final successors.
750 """
750 """
751 # i18n: "divergent" is a keyword
751 # i18n: "divergent" is a keyword
752 getargs(x, 0, 0, _("divergent takes no arguments"))
752 getargs(x, 0, 0, _("divergent takes no arguments"))
753 divergent = obsmod.getrevs(repo, 'divergent')
753 divergent = obsmod.getrevs(repo, 'divergent')
754 return subset & divergent
754 return subset & divergent
755
755
756 @predicate('extinct()', safe=True)
756 @predicate('extinct()', safe=True)
757 def extinct(repo, subset, x):
757 def extinct(repo, subset, x):
758 """Obsolete changesets with obsolete descendants only.
758 """Obsolete changesets with obsolete descendants only.
759 """
759 """
760 # i18n: "extinct" is a keyword
760 # i18n: "extinct" is a keyword
761 getargs(x, 0, 0, _("extinct takes no arguments"))
761 getargs(x, 0, 0, _("extinct takes no arguments"))
762 extincts = obsmod.getrevs(repo, 'extinct')
762 extincts = obsmod.getrevs(repo, 'extinct')
763 return subset & extincts
763 return subset & extincts
764
764
765 @predicate('extra(label, [value])', safe=True)
765 @predicate('extra(label, [value])', safe=True)
766 def extra(repo, subset, x):
766 def extra(repo, subset, x):
767 """Changesets with the given label in the extra metadata, with the given
767 """Changesets with the given label in the extra metadata, with the given
768 optional value.
768 optional value.
769
769
770 Pattern matching is supported for `value`. See
770 Pattern matching is supported for `value`. See
771 :hg:`help revisions.patterns`.
771 :hg:`help revisions.patterns`.
772 """
772 """
773 args = getargsdict(x, 'extra', 'label value')
773 args = getargsdict(x, 'extra', 'label value')
774 if 'label' not in args:
774 if 'label' not in args:
775 # i18n: "extra" is a keyword
775 # i18n: "extra" is a keyword
776 raise error.ParseError(_('extra takes at least 1 argument'))
776 raise error.ParseError(_('extra takes at least 1 argument'))
777 # i18n: "extra" is a keyword
777 # i18n: "extra" is a keyword
778 label = getstring(args['label'], _('first argument to extra must be '
778 label = getstring(args['label'], _('first argument to extra must be '
779 'a string'))
779 'a string'))
780 value = None
780 value = None
781
781
782 if 'value' in args:
782 if 'value' in args:
783 # i18n: "extra" is a keyword
783 # i18n: "extra" is a keyword
784 value = getstring(args['value'], _('second argument to extra must be '
784 value = getstring(args['value'], _('second argument to extra must be '
785 'a string'))
785 'a string'))
786 kind, value, matcher = util.stringmatcher(value)
786 kind, value, matcher = util.stringmatcher(value)
787
787
788 def _matchvalue(r):
788 def _matchvalue(r):
789 extra = repo[r].extra()
789 extra = repo[r].extra()
790 return label in extra and (value is None or matcher(extra[label]))
790 return label in extra and (value is None or matcher(extra[label]))
791
791
792 return subset.filter(lambda r: _matchvalue(r),
792 return subset.filter(lambda r: _matchvalue(r),
793 condrepr=('<extra[%r] %r>', label, value))
793 condrepr=('<extra[%r] %r>', label, value))
794
794
795 @predicate('filelog(pattern)', safe=True)
795 @predicate('filelog(pattern)', safe=True)
796 def filelog(repo, subset, x):
796 def filelog(repo, subset, x):
797 """Changesets connected to the specified filelog.
797 """Changesets connected to the specified filelog.
798
798
799 For performance reasons, visits only revisions mentioned in the file-level
799 For performance reasons, visits only revisions mentioned in the file-level
800 filelog, rather than filtering through all changesets (much faster, but
800 filelog, rather than filtering through all changesets (much faster, but
801 doesn't include deletes or duplicate changes). For a slower, more accurate
801 doesn't include deletes or duplicate changes). For a slower, more accurate
802 result, use ``file()``.
802 result, use ``file()``.
803
803
804 The pattern without explicit kind like ``glob:`` is expected to be
804 The pattern without explicit kind like ``glob:`` is expected to be
805 relative to the current directory and match against a file exactly
805 relative to the current directory and match against a file exactly
806 for efficiency.
806 for efficiency.
807
807
808 If some linkrev points to revisions filtered by the current repoview, we'll
808 If some linkrev points to revisions filtered by the current repoview, we'll
809 work around it to return a non-filtered value.
809 work around it to return a non-filtered value.
810 """
810 """
811
811
812 # i18n: "filelog" is a keyword
812 # i18n: "filelog" is a keyword
813 pat = getstring(x, _("filelog requires a pattern"))
813 pat = getstring(x, _("filelog requires a pattern"))
814 s = set()
814 s = set()
815 cl = repo.changelog
815 cl = repo.changelog
816
816
817 if not matchmod.patkind(pat):
817 if not matchmod.patkind(pat):
818 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
818 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
819 files = [f]
819 files = [f]
820 else:
820 else:
821 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
821 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
822 files = (f for f in repo[None] if m(f))
822 files = (f for f in repo[None] if m(f))
823
823
824 for f in files:
824 for f in files:
825 fl = repo.file(f)
825 fl = repo.file(f)
826 known = {}
826 known = {}
827 scanpos = 0
827 scanpos = 0
828 for fr in list(fl):
828 for fr in list(fl):
829 fn = fl.node(fr)
829 fn = fl.node(fr)
830 if fn in known:
830 if fn in known:
831 s.add(known[fn])
831 s.add(known[fn])
832 continue
832 continue
833
833
834 lr = fl.linkrev(fr)
834 lr = fl.linkrev(fr)
835 if lr in cl:
835 if lr in cl:
836 s.add(lr)
836 s.add(lr)
837 elif scanpos is not None:
837 elif scanpos is not None:
838 # lowest matching changeset is filtered, scan further
838 # lowest matching changeset is filtered, scan further
839 # ahead in changelog
839 # ahead in changelog
840 start = max(lr, scanpos) + 1
840 start = max(lr, scanpos) + 1
841 scanpos = None
841 scanpos = None
842 for r in cl.revs(start):
842 for r in cl.revs(start):
843 # minimize parsing of non-matching entries
843 # minimize parsing of non-matching entries
844 if f in cl.revision(r) and f in cl.readfiles(r):
844 if f in cl.revision(r) and f in cl.readfiles(r):
845 try:
845 try:
846 # try to use manifest delta fastpath
846 # try to use manifest delta fastpath
847 n = repo[r].filenode(f)
847 n = repo[r].filenode(f)
848 if n not in known:
848 if n not in known:
849 if n == fn:
849 if n == fn:
850 s.add(r)
850 s.add(r)
851 scanpos = r
851 scanpos = r
852 break
852 break
853 else:
853 else:
854 known[n] = r
854 known[n] = r
855 except error.ManifestLookupError:
855 except error.ManifestLookupError:
856 # deletion in changelog
856 # deletion in changelog
857 continue
857 continue
858
858
859 return subset & s
859 return subset & s
860
860
861 @predicate('first(set, [n])', safe=True)
861 @predicate('first(set, [n])', safe=True)
862 def first(repo, subset, x):
862 def first(repo, subset, x):
863 """An alias for limit().
863 """An alias for limit().
864 """
864 """
865 return limit(repo, subset, x)
865 return limit(repo, subset, x)
866
866
867 def _follow(repo, subset, x, name, followfirst=False):
867 def _follow(repo, subset, x, name, followfirst=False):
868 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
868 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
869 "and an optional revset") % name)
869 "and an optional revset") % name)
870 c = repo['.']
870 c = repo['.']
871 if l:
871 if l:
872 x = getstring(l[0], _("%s expected a pattern") % name)
872 x = getstring(l[0], _("%s expected a pattern") % name)
873 rev = None
873 rev = None
874 if len(l) >= 2:
874 if len(l) >= 2:
875 revs = getset(repo, fullreposet(repo), l[1])
875 revs = getset(repo, fullreposet(repo), l[1])
876 if len(revs) != 1:
876 if len(revs) != 1:
877 raise error.RepoLookupError(
877 raise error.RepoLookupError(
878 _("%s expected one starting revision") % name)
878 _("%s expected one starting revision") % name)
879 rev = revs.last()
879 rev = revs.last()
880 c = repo[rev]
880 c = repo[rev]
881 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
881 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
882 ctx=repo[rev], default='path')
882 ctx=repo[rev], default='path')
883
883
884 files = c.manifest().walk(matcher)
884 files = c.manifest().walk(matcher)
885
885
886 s = set()
886 s = set()
887 for fname in files:
887 for fname in files:
888 fctx = c[fname]
888 fctx = c[fname]
889 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
889 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
890 # include the revision responsible for the most recent version
890 # include the revision responsible for the most recent version
891 s.add(fctx.introrev())
891 s.add(fctx.introrev())
892 else:
892 else:
893 s = _revancestors(repo, baseset([c.rev()]), followfirst)
893 s = _revancestors(repo, baseset([c.rev()]), followfirst)
894
894
895 return subset & s
895 return subset & s
896
896
897 @predicate('follow([pattern[, startrev]])', safe=True)
897 @predicate('follow([pattern[, startrev]])', safe=True)
898 def follow(repo, subset, x):
898 def follow(repo, subset, x):
899 """
899 """
900 An alias for ``::.`` (ancestors of the working directory's first parent).
900 An alias for ``::.`` (ancestors of the working directory's first parent).
901 If pattern is specified, the histories of files matching given
901 If pattern is specified, the histories of files matching given
902 pattern in the revision given by startrev are followed, including copies.
902 pattern in the revision given by startrev are followed, including copies.
903 """
903 """
904 return _follow(repo, subset, x, 'follow')
904 return _follow(repo, subset, x, 'follow')
905
905
906 @predicate('_followfirst', safe=True)
906 @predicate('_followfirst', safe=True)
907 def _followfirst(repo, subset, x):
907 def _followfirst(repo, subset, x):
908 # ``followfirst([pattern[, startrev]])``
908 # ``followfirst([pattern[, startrev]])``
909 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
909 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
910 # of every revisions or files revisions.
910 # of every revisions or files revisions.
911 return _follow(repo, subset, x, '_followfirst', followfirst=True)
911 return _follow(repo, subset, x, '_followfirst', followfirst=True)
912
912
913 @predicate('followlines(file, fromline:toline[, startrev=.])', safe=True)
913 @predicate('followlines(file, fromline:toline[, startrev=.])', safe=True)
914 def followlines(repo, subset, x):
914 def followlines(repo, subset, x):
915 """Changesets modifying `file` in line range ('fromline', 'toline').
915 """Changesets modifying `file` in line range ('fromline', 'toline').
916
916
917 Line range corresponds to 'file' content at 'startrev' and should hence be
917 Line range corresponds to 'file' content at 'startrev' and should hence be
918 consistent with file size. If startrev is not specified, working directory's
918 consistent with file size. If startrev is not specified, working directory's
919 parent is used.
919 parent is used.
920 """
920 """
921 from . import context # avoid circular import issues
921 from . import context # avoid circular import issues
922
922
923 args = getargsdict(x, 'followlines', 'file *lines startrev')
923 args = getargsdict(x, 'followlines', 'file *lines startrev')
924 if len(args['lines']) != 1:
924 if len(args['lines']) != 1:
925 raise error.ParseError(_("followlines requires a line range"))
925 raise error.ParseError(_("followlines requires a line range"))
926
926
927 rev = '.'
927 rev = '.'
928 if 'startrev' in args:
928 if 'startrev' in args:
929 revs = getset(repo, fullreposet(repo), args['startrev'])
929 revs = getset(repo, fullreposet(repo), args['startrev'])
930 if len(revs) != 1:
930 if len(revs) != 1:
931 raise error.ParseError(
931 raise error.ParseError(
932 _("followlines expects exactly one revision"))
932 _("followlines expects exactly one revision"))
933 rev = revs.last()
933 rev = revs.last()
934
934
935 pat = getstring(args['file'], _("followlines requires a pattern"))
935 pat = getstring(args['file'], _("followlines requires a pattern"))
936 if not matchmod.patkind(pat):
936 if not matchmod.patkind(pat):
937 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
937 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
938 else:
938 else:
939 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
939 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
940 files = [f for f in repo[rev] if m(f)]
940 files = [f for f in repo[rev] if m(f)]
941 if len(files) != 1:
941 if len(files) != 1:
942 raise error.ParseError(_("followlines expects exactly one file"))
942 raise error.ParseError(_("followlines expects exactly one file"))
943 fname = files[0]
943 fname = files[0]
944
944
945 lr = getrange(args['lines'][0], _("followlines expects a line range"))
945 lr = getrange(args['lines'][0], _("followlines expects a line range"))
946 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
946 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
947 for a in lr]
947 for a in lr]
948 if toline - fromline < 0:
948 if toline - fromline < 0:
949 raise error.ParseError(_("line range must be positive"))
949 raise error.ParseError(_("line range must be positive"))
950 if fromline < 1:
950 if fromline < 1:
951 raise error.ParseError(_("fromline must be strictly positive"))
951 raise error.ParseError(_("fromline must be strictly positive"))
952 fromline -= 1
952 fromline -= 1
953
953
954 fctx = repo[rev].filectx(fname)
954 fctx = repo[rev].filectx(fname)
955 revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
955 revs = (c.rev() for c, _linerange
956 in context.blockancestors(fctx, fromline, toline))
956 return subset & generatorset(revs, iterasc=False)
957 return subset & generatorset(revs, iterasc=False)
957
958
958 @predicate('all()', safe=True)
959 @predicate('all()', safe=True)
959 def getall(repo, subset, x):
960 def getall(repo, subset, x):
960 """All changesets, the same as ``0:tip``.
961 """All changesets, the same as ``0:tip``.
961 """
962 """
962 # i18n: "all" is a keyword
963 # i18n: "all" is a keyword
963 getargs(x, 0, 0, _("all takes no arguments"))
964 getargs(x, 0, 0, _("all takes no arguments"))
964 return subset & spanset(repo) # drop "null" if any
965 return subset & spanset(repo) # drop "null" if any
965
966
966 @predicate('grep(regex)')
967 @predicate('grep(regex)')
967 def grep(repo, subset, x):
968 def grep(repo, subset, x):
968 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
969 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
969 to ensure special escape characters are handled correctly. Unlike
970 to ensure special escape characters are handled correctly. Unlike
970 ``keyword(string)``, the match is case-sensitive.
971 ``keyword(string)``, the match is case-sensitive.
971 """
972 """
972 try:
973 try:
973 # i18n: "grep" is a keyword
974 # i18n: "grep" is a keyword
974 gr = re.compile(getstring(x, _("grep requires a string")))
975 gr = re.compile(getstring(x, _("grep requires a string")))
975 except re.error as e:
976 except re.error as e:
976 raise error.ParseError(_('invalid match pattern: %s') % e)
977 raise error.ParseError(_('invalid match pattern: %s') % e)
977
978
978 def matches(x):
979 def matches(x):
979 c = repo[x]
980 c = repo[x]
980 for e in c.files() + [c.user(), c.description()]:
981 for e in c.files() + [c.user(), c.description()]:
981 if gr.search(e):
982 if gr.search(e):
982 return True
983 return True
983 return False
984 return False
984
985
985 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
986 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
986
987
987 @predicate('_matchfiles', safe=True)
988 @predicate('_matchfiles', safe=True)
988 def _matchfiles(repo, subset, x):
989 def _matchfiles(repo, subset, x):
989 # _matchfiles takes a revset list of prefixed arguments:
990 # _matchfiles takes a revset list of prefixed arguments:
990 #
991 #
991 # [p:foo, i:bar, x:baz]
992 # [p:foo, i:bar, x:baz]
992 #
993 #
993 # builds a match object from them and filters subset. Allowed
994 # builds a match object from them and filters subset. Allowed
994 # prefixes are 'p:' for regular patterns, 'i:' for include
995 # prefixes are 'p:' for regular patterns, 'i:' for include
995 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
996 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
996 # a revision identifier, or the empty string to reference the
997 # a revision identifier, or the empty string to reference the
997 # working directory, from which the match object is
998 # working directory, from which the match object is
998 # initialized. Use 'd:' to set the default matching mode, default
999 # initialized. Use 'd:' to set the default matching mode, default
999 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1000 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1000
1001
1001 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1002 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1002 pats, inc, exc = [], [], []
1003 pats, inc, exc = [], [], []
1003 rev, default = None, None
1004 rev, default = None, None
1004 for arg in l:
1005 for arg in l:
1005 s = getstring(arg, "_matchfiles requires string arguments")
1006 s = getstring(arg, "_matchfiles requires string arguments")
1006 prefix, value = s[:2], s[2:]
1007 prefix, value = s[:2], s[2:]
1007 if prefix == 'p:':
1008 if prefix == 'p:':
1008 pats.append(value)
1009 pats.append(value)
1009 elif prefix == 'i:':
1010 elif prefix == 'i:':
1010 inc.append(value)
1011 inc.append(value)
1011 elif prefix == 'x:':
1012 elif prefix == 'x:':
1012 exc.append(value)
1013 exc.append(value)
1013 elif prefix == 'r:':
1014 elif prefix == 'r:':
1014 if rev is not None:
1015 if rev is not None:
1015 raise error.ParseError('_matchfiles expected at most one '
1016 raise error.ParseError('_matchfiles expected at most one '
1016 'revision')
1017 'revision')
1017 if value != '': # empty means working directory; leave rev as None
1018 if value != '': # empty means working directory; leave rev as None
1018 rev = value
1019 rev = value
1019 elif prefix == 'd:':
1020 elif prefix == 'd:':
1020 if default is not None:
1021 if default is not None:
1021 raise error.ParseError('_matchfiles expected at most one '
1022 raise error.ParseError('_matchfiles expected at most one '
1022 'default mode')
1023 'default mode')
1023 default = value
1024 default = value
1024 else:
1025 else:
1025 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1026 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1026 if not default:
1027 if not default:
1027 default = 'glob'
1028 default = 'glob'
1028
1029
1029 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1030 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1030 exclude=exc, ctx=repo[rev], default=default)
1031 exclude=exc, ctx=repo[rev], default=default)
1031
1032
1032 # This directly read the changelog data as creating changectx for all
1033 # This directly read the changelog data as creating changectx for all
1033 # revisions is quite expensive.
1034 # revisions is quite expensive.
1034 getfiles = repo.changelog.readfiles
1035 getfiles = repo.changelog.readfiles
1035 wdirrev = node.wdirrev
1036 wdirrev = node.wdirrev
1036 def matches(x):
1037 def matches(x):
1037 if x == wdirrev:
1038 if x == wdirrev:
1038 files = repo[x].files()
1039 files = repo[x].files()
1039 else:
1040 else:
1040 files = getfiles(x)
1041 files = getfiles(x)
1041 for f in files:
1042 for f in files:
1042 if m(f):
1043 if m(f):
1043 return True
1044 return True
1044 return False
1045 return False
1045
1046
1046 return subset.filter(matches,
1047 return subset.filter(matches,
1047 condrepr=('<matchfiles patterns=%r, include=%r '
1048 condrepr=('<matchfiles patterns=%r, include=%r '
1048 'exclude=%r, default=%r, rev=%r>',
1049 'exclude=%r, default=%r, rev=%r>',
1049 pats, inc, exc, default, rev))
1050 pats, inc, exc, default, rev))
1050
1051
1051 @predicate('file(pattern)', safe=True)
1052 @predicate('file(pattern)', safe=True)
1052 def hasfile(repo, subset, x):
1053 def hasfile(repo, subset, x):
1053 """Changesets affecting files matched by pattern.
1054 """Changesets affecting files matched by pattern.
1054
1055
1055 For a faster but less accurate result, consider using ``filelog()``
1056 For a faster but less accurate result, consider using ``filelog()``
1056 instead.
1057 instead.
1057
1058
1058 This predicate uses ``glob:`` as the default kind of pattern.
1059 This predicate uses ``glob:`` as the default kind of pattern.
1059 """
1060 """
1060 # i18n: "file" is a keyword
1061 # i18n: "file" is a keyword
1061 pat = getstring(x, _("file requires a pattern"))
1062 pat = getstring(x, _("file requires a pattern"))
1062 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1063 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1063
1064
1064 @predicate('head()', safe=True)
1065 @predicate('head()', safe=True)
1065 def head(repo, subset, x):
1066 def head(repo, subset, x):
1066 """Changeset is a named branch head.
1067 """Changeset is a named branch head.
1067 """
1068 """
1068 # i18n: "head" is a keyword
1069 # i18n: "head" is a keyword
1069 getargs(x, 0, 0, _("head takes no arguments"))
1070 getargs(x, 0, 0, _("head takes no arguments"))
1070 hs = set()
1071 hs = set()
1071 cl = repo.changelog
1072 cl = repo.changelog
1072 for ls in repo.branchmap().itervalues():
1073 for ls in repo.branchmap().itervalues():
1073 hs.update(cl.rev(h) for h in ls)
1074 hs.update(cl.rev(h) for h in ls)
1074 return subset & baseset(hs)
1075 return subset & baseset(hs)
1075
1076
1076 @predicate('heads(set)', safe=True)
1077 @predicate('heads(set)', safe=True)
1077 def heads(repo, subset, x):
1078 def heads(repo, subset, x):
1078 """Members of set with no children in set.
1079 """Members of set with no children in set.
1079 """
1080 """
1080 s = getset(repo, subset, x)
1081 s = getset(repo, subset, x)
1081 ps = parents(repo, subset, x)
1082 ps = parents(repo, subset, x)
1082 return s - ps
1083 return s - ps
1083
1084
1084 @predicate('hidden()', safe=True)
1085 @predicate('hidden()', safe=True)
1085 def hidden(repo, subset, x):
1086 def hidden(repo, subset, x):
1086 """Hidden changesets.
1087 """Hidden changesets.
1087 """
1088 """
1088 # i18n: "hidden" is a keyword
1089 # i18n: "hidden" is a keyword
1089 getargs(x, 0, 0, _("hidden takes no arguments"))
1090 getargs(x, 0, 0, _("hidden takes no arguments"))
1090 hiddenrevs = repoview.filterrevs(repo, 'visible')
1091 hiddenrevs = repoview.filterrevs(repo, 'visible')
1091 return subset & hiddenrevs
1092 return subset & hiddenrevs
1092
1093
1093 @predicate('keyword(string)', safe=True)
1094 @predicate('keyword(string)', safe=True)
1094 def keyword(repo, subset, x):
1095 def keyword(repo, subset, x):
1095 """Search commit message, user name, and names of changed files for
1096 """Search commit message, user name, and names of changed files for
1096 string. The match is case-insensitive.
1097 string. The match is case-insensitive.
1097
1098
1098 For a regular expression or case sensitive search of these fields, use
1099 For a regular expression or case sensitive search of these fields, use
1099 ``grep(regex)``.
1100 ``grep(regex)``.
1100 """
1101 """
1101 # i18n: "keyword" is a keyword
1102 # i18n: "keyword" is a keyword
1102 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1103 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1103
1104
1104 def matches(r):
1105 def matches(r):
1105 c = repo[r]
1106 c = repo[r]
1106 return any(kw in encoding.lower(t)
1107 return any(kw in encoding.lower(t)
1107 for t in c.files() + [c.user(), c.description()])
1108 for t in c.files() + [c.user(), c.description()])
1108
1109
1109 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1110 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1110
1111
1111 @predicate('limit(set[, n[, offset]])', safe=True)
1112 @predicate('limit(set[, n[, offset]])', safe=True)
1112 def limit(repo, subset, x):
1113 def limit(repo, subset, x):
1113 """First n members of set, defaulting to 1, starting from offset.
1114 """First n members of set, defaulting to 1, starting from offset.
1114 """
1115 """
1115 args = getargsdict(x, 'limit', 'set n offset')
1116 args = getargsdict(x, 'limit', 'set n offset')
1116 if 'set' not in args:
1117 if 'set' not in args:
1117 # i18n: "limit" is a keyword
1118 # i18n: "limit" is a keyword
1118 raise error.ParseError(_("limit requires one to three arguments"))
1119 raise error.ParseError(_("limit requires one to three arguments"))
1119 # i18n: "limit" is a keyword
1120 # i18n: "limit" is a keyword
1120 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1121 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1121 # i18n: "limit" is a keyword
1122 # i18n: "limit" is a keyword
1122 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1123 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1123 if ofs < 0:
1124 if ofs < 0:
1124 raise error.ParseError(_("negative offset"))
1125 raise error.ParseError(_("negative offset"))
1125 os = getset(repo, fullreposet(repo), args['set'])
1126 os = getset(repo, fullreposet(repo), args['set'])
1126 result = []
1127 result = []
1127 it = iter(os)
1128 it = iter(os)
1128 for x in xrange(ofs):
1129 for x in xrange(ofs):
1129 y = next(it, None)
1130 y = next(it, None)
1130 if y is None:
1131 if y is None:
1131 break
1132 break
1132 for x in xrange(lim):
1133 for x in xrange(lim):
1133 y = next(it, None)
1134 y = next(it, None)
1134 if y is None:
1135 if y is None:
1135 break
1136 break
1136 elif y in subset:
1137 elif y in subset:
1137 result.append(y)
1138 result.append(y)
1138 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1139 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1139 lim, ofs, subset, os))
1140 lim, ofs, subset, os))
1140
1141
1141 @predicate('last(set, [n])', safe=True)
1142 @predicate('last(set, [n])', safe=True)
1142 def last(repo, subset, x):
1143 def last(repo, subset, x):
1143 """Last n members of set, defaulting to 1.
1144 """Last n members of set, defaulting to 1.
1144 """
1145 """
1145 # i18n: "last" is a keyword
1146 # i18n: "last" is a keyword
1146 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1147 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1147 lim = 1
1148 lim = 1
1148 if len(l) == 2:
1149 if len(l) == 2:
1149 # i18n: "last" is a keyword
1150 # i18n: "last" is a keyword
1150 lim = getinteger(l[1], _("last expects a number"))
1151 lim = getinteger(l[1], _("last expects a number"))
1151 os = getset(repo, fullreposet(repo), l[0])
1152 os = getset(repo, fullreposet(repo), l[0])
1152 os.reverse()
1153 os.reverse()
1153 result = []
1154 result = []
1154 it = iter(os)
1155 it = iter(os)
1155 for x in xrange(lim):
1156 for x in xrange(lim):
1156 y = next(it, None)
1157 y = next(it, None)
1157 if y is None:
1158 if y is None:
1158 break
1159 break
1159 elif y in subset:
1160 elif y in subset:
1160 result.append(y)
1161 result.append(y)
1161 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1162 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1162
1163
1163 @predicate('max(set)', safe=True)
1164 @predicate('max(set)', safe=True)
1164 def maxrev(repo, subset, x):
1165 def maxrev(repo, subset, x):
1165 """Changeset with highest revision number in set.
1166 """Changeset with highest revision number in set.
1166 """
1167 """
1167 os = getset(repo, fullreposet(repo), x)
1168 os = getset(repo, fullreposet(repo), x)
1168 try:
1169 try:
1169 m = os.max()
1170 m = os.max()
1170 if m in subset:
1171 if m in subset:
1171 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1172 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1172 except ValueError:
1173 except ValueError:
1173 # os.max() throws a ValueError when the collection is empty.
1174 # os.max() throws a ValueError when the collection is empty.
1174 # Same as python's max().
1175 # Same as python's max().
1175 pass
1176 pass
1176 return baseset(datarepr=('<max %r, %r>', subset, os))
1177 return baseset(datarepr=('<max %r, %r>', subset, os))
1177
1178
1178 @predicate('merge()', safe=True)
1179 @predicate('merge()', safe=True)
1179 def merge(repo, subset, x):
1180 def merge(repo, subset, x):
1180 """Changeset is a merge changeset.
1181 """Changeset is a merge changeset.
1181 """
1182 """
1182 # i18n: "merge" is a keyword
1183 # i18n: "merge" is a keyword
1183 getargs(x, 0, 0, _("merge takes no arguments"))
1184 getargs(x, 0, 0, _("merge takes no arguments"))
1184 cl = repo.changelog
1185 cl = repo.changelog
1185 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1186 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1186 condrepr='<merge>')
1187 condrepr='<merge>')
1187
1188
1188 @predicate('branchpoint()', safe=True)
1189 @predicate('branchpoint()', safe=True)
1189 def branchpoint(repo, subset, x):
1190 def branchpoint(repo, subset, x):
1190 """Changesets with more than one child.
1191 """Changesets with more than one child.
1191 """
1192 """
1192 # i18n: "branchpoint" is a keyword
1193 # i18n: "branchpoint" is a keyword
1193 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1194 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1194 cl = repo.changelog
1195 cl = repo.changelog
1195 if not subset:
1196 if not subset:
1196 return baseset()
1197 return baseset()
1197 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1198 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1198 # (and if it is not, it should.)
1199 # (and if it is not, it should.)
1199 baserev = min(subset)
1200 baserev = min(subset)
1200 parentscount = [0]*(len(repo) - baserev)
1201 parentscount = [0]*(len(repo) - baserev)
1201 for r in cl.revs(start=baserev + 1):
1202 for r in cl.revs(start=baserev + 1):
1202 for p in cl.parentrevs(r):
1203 for p in cl.parentrevs(r):
1203 if p >= baserev:
1204 if p >= baserev:
1204 parentscount[p - baserev] += 1
1205 parentscount[p - baserev] += 1
1205 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1206 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1206 condrepr='<branchpoint>')
1207 condrepr='<branchpoint>')
1207
1208
1208 @predicate('min(set)', safe=True)
1209 @predicate('min(set)', safe=True)
1209 def minrev(repo, subset, x):
1210 def minrev(repo, subset, x):
1210 """Changeset with lowest revision number in set.
1211 """Changeset with lowest revision number in set.
1211 """
1212 """
1212 os = getset(repo, fullreposet(repo), x)
1213 os = getset(repo, fullreposet(repo), x)
1213 try:
1214 try:
1214 m = os.min()
1215 m = os.min()
1215 if m in subset:
1216 if m in subset:
1216 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1217 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1217 except ValueError:
1218 except ValueError:
1218 # os.min() throws a ValueError when the collection is empty.
1219 # os.min() throws a ValueError when the collection is empty.
1219 # Same as python's min().
1220 # Same as python's min().
1220 pass
1221 pass
1221 return baseset(datarepr=('<min %r, %r>', subset, os))
1222 return baseset(datarepr=('<min %r, %r>', subset, os))
1222
1223
1223 @predicate('modifies(pattern)', safe=True)
1224 @predicate('modifies(pattern)', safe=True)
1224 def modifies(repo, subset, x):
1225 def modifies(repo, subset, x):
1225 """Changesets modifying files matched by pattern.
1226 """Changesets modifying files matched by pattern.
1226
1227
1227 The pattern without explicit kind like ``glob:`` is expected to be
1228 The pattern without explicit kind like ``glob:`` is expected to be
1228 relative to the current directory and match against a file or a
1229 relative to the current directory and match against a file or a
1229 directory.
1230 directory.
1230 """
1231 """
1231 # i18n: "modifies" is a keyword
1232 # i18n: "modifies" is a keyword
1232 pat = getstring(x, _("modifies requires a pattern"))
1233 pat = getstring(x, _("modifies requires a pattern"))
1233 return checkstatus(repo, subset, pat, 0)
1234 return checkstatus(repo, subset, pat, 0)
1234
1235
1235 @predicate('named(namespace)')
1236 @predicate('named(namespace)')
1236 def named(repo, subset, x):
1237 def named(repo, subset, x):
1237 """The changesets in a given namespace.
1238 """The changesets in a given namespace.
1238
1239
1239 Pattern matching is supported for `namespace`. See
1240 Pattern matching is supported for `namespace`. See
1240 :hg:`help revisions.patterns`.
1241 :hg:`help revisions.patterns`.
1241 """
1242 """
1242 # i18n: "named" is a keyword
1243 # i18n: "named" is a keyword
1243 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1244 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1244
1245
1245 ns = getstring(args[0],
1246 ns = getstring(args[0],
1246 # i18n: "named" is a keyword
1247 # i18n: "named" is a keyword
1247 _('the argument to named must be a string'))
1248 _('the argument to named must be a string'))
1248 kind, pattern, matcher = util.stringmatcher(ns)
1249 kind, pattern, matcher = util.stringmatcher(ns)
1249 namespaces = set()
1250 namespaces = set()
1250 if kind == 'literal':
1251 if kind == 'literal':
1251 if pattern not in repo.names:
1252 if pattern not in repo.names:
1252 raise error.RepoLookupError(_("namespace '%s' does not exist")
1253 raise error.RepoLookupError(_("namespace '%s' does not exist")
1253 % ns)
1254 % ns)
1254 namespaces.add(repo.names[pattern])
1255 namespaces.add(repo.names[pattern])
1255 else:
1256 else:
1256 for name, ns in repo.names.iteritems():
1257 for name, ns in repo.names.iteritems():
1257 if matcher(name):
1258 if matcher(name):
1258 namespaces.add(ns)
1259 namespaces.add(ns)
1259 if not namespaces:
1260 if not namespaces:
1260 raise error.RepoLookupError(_("no namespace exists"
1261 raise error.RepoLookupError(_("no namespace exists"
1261 " that match '%s'") % pattern)
1262 " that match '%s'") % pattern)
1262
1263
1263 names = set()
1264 names = set()
1264 for ns in namespaces:
1265 for ns in namespaces:
1265 for name in ns.listnames(repo):
1266 for name in ns.listnames(repo):
1266 if name not in ns.deprecated:
1267 if name not in ns.deprecated:
1267 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1268 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1268
1269
1269 names -= set([node.nullrev])
1270 names -= set([node.nullrev])
1270 return subset & names
1271 return subset & names
1271
1272
1272 @predicate('id(string)', safe=True)
1273 @predicate('id(string)', safe=True)
1273 def node_(repo, subset, x):
1274 def node_(repo, subset, x):
1274 """Revision non-ambiguously specified by the given hex string prefix.
1275 """Revision non-ambiguously specified by the given hex string prefix.
1275 """
1276 """
1276 # i18n: "id" is a keyword
1277 # i18n: "id" is a keyword
1277 l = getargs(x, 1, 1, _("id requires one argument"))
1278 l = getargs(x, 1, 1, _("id requires one argument"))
1278 # i18n: "id" is a keyword
1279 # i18n: "id" is a keyword
1279 n = getstring(l[0], _("id requires a string"))
1280 n = getstring(l[0], _("id requires a string"))
1280 if len(n) == 40:
1281 if len(n) == 40:
1281 try:
1282 try:
1282 rn = repo.changelog.rev(node.bin(n))
1283 rn = repo.changelog.rev(node.bin(n))
1283 except (LookupError, TypeError):
1284 except (LookupError, TypeError):
1284 rn = None
1285 rn = None
1285 else:
1286 else:
1286 rn = None
1287 rn = None
1287 pm = repo.changelog._partialmatch(n)
1288 pm = repo.changelog._partialmatch(n)
1288 if pm is not None:
1289 if pm is not None:
1289 rn = repo.changelog.rev(pm)
1290 rn = repo.changelog.rev(pm)
1290
1291
1291 if rn is None:
1292 if rn is None:
1292 return baseset()
1293 return baseset()
1293 result = baseset([rn])
1294 result = baseset([rn])
1294 return result & subset
1295 return result & subset
1295
1296
1296 @predicate('obsolete()', safe=True)
1297 @predicate('obsolete()', safe=True)
1297 def obsolete(repo, subset, x):
1298 def obsolete(repo, subset, x):
1298 """Mutable changeset with a newer version."""
1299 """Mutable changeset with a newer version."""
1299 # i18n: "obsolete" is a keyword
1300 # i18n: "obsolete" is a keyword
1300 getargs(x, 0, 0, _("obsolete takes no arguments"))
1301 getargs(x, 0, 0, _("obsolete takes no arguments"))
1301 obsoletes = obsmod.getrevs(repo, 'obsolete')
1302 obsoletes = obsmod.getrevs(repo, 'obsolete')
1302 return subset & obsoletes
1303 return subset & obsoletes
1303
1304
1304 @predicate('only(set, [set])', safe=True)
1305 @predicate('only(set, [set])', safe=True)
1305 def only(repo, subset, x):
1306 def only(repo, subset, x):
1306 """Changesets that are ancestors of the first set that are not ancestors
1307 """Changesets that are ancestors of the first set that are not ancestors
1307 of any other head in the repo. If a second set is specified, the result
1308 of any other head in the repo. If a second set is specified, the result
1308 is ancestors of the first set that are not ancestors of the second set
1309 is ancestors of the first set that are not ancestors of the second set
1309 (i.e. ::<set1> - ::<set2>).
1310 (i.e. ::<set1> - ::<set2>).
1310 """
1311 """
1311 cl = repo.changelog
1312 cl = repo.changelog
1312 # i18n: "only" is a keyword
1313 # i18n: "only" is a keyword
1313 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1314 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1314 include = getset(repo, fullreposet(repo), args[0])
1315 include = getset(repo, fullreposet(repo), args[0])
1315 if len(args) == 1:
1316 if len(args) == 1:
1316 if not include:
1317 if not include:
1317 return baseset()
1318 return baseset()
1318
1319
1319 descendants = set(_revdescendants(repo, include, False))
1320 descendants = set(_revdescendants(repo, include, False))
1320 exclude = [rev for rev in cl.headrevs()
1321 exclude = [rev for rev in cl.headrevs()
1321 if not rev in descendants and not rev in include]
1322 if not rev in descendants and not rev in include]
1322 else:
1323 else:
1323 exclude = getset(repo, fullreposet(repo), args[1])
1324 exclude = getset(repo, fullreposet(repo), args[1])
1324
1325
1325 results = set(cl.findmissingrevs(common=exclude, heads=include))
1326 results = set(cl.findmissingrevs(common=exclude, heads=include))
1326 # XXX we should turn this into a baseset instead of a set, smartset may do
1327 # XXX we should turn this into a baseset instead of a set, smartset may do
1327 # some optimizations from the fact this is a baseset.
1328 # some optimizations from the fact this is a baseset.
1328 return subset & results
1329 return subset & results
1329
1330
1330 @predicate('origin([set])', safe=True)
1331 @predicate('origin([set])', safe=True)
1331 def origin(repo, subset, x):
1332 def origin(repo, subset, x):
1332 """
1333 """
1333 Changesets that were specified as a source for the grafts, transplants or
1334 Changesets that were specified as a source for the grafts, transplants or
1334 rebases that created the given revisions. Omitting the optional set is the
1335 rebases that created the given revisions. Omitting the optional set is the
1335 same as passing all(). If a changeset created by these operations is itself
1336 same as passing all(). If a changeset created by these operations is itself
1336 specified as a source for one of these operations, only the source changeset
1337 specified as a source for one of these operations, only the source changeset
1337 for the first operation is selected.
1338 for the first operation is selected.
1338 """
1339 """
1339 if x is not None:
1340 if x is not None:
1340 dests = getset(repo, fullreposet(repo), x)
1341 dests = getset(repo, fullreposet(repo), x)
1341 else:
1342 else:
1342 dests = fullreposet(repo)
1343 dests = fullreposet(repo)
1343
1344
1344 def _firstsrc(rev):
1345 def _firstsrc(rev):
1345 src = _getrevsource(repo, rev)
1346 src = _getrevsource(repo, rev)
1346 if src is None:
1347 if src is None:
1347 return None
1348 return None
1348
1349
1349 while True:
1350 while True:
1350 prev = _getrevsource(repo, src)
1351 prev = _getrevsource(repo, src)
1351
1352
1352 if prev is None:
1353 if prev is None:
1353 return src
1354 return src
1354 src = prev
1355 src = prev
1355
1356
1356 o = set([_firstsrc(r) for r in dests])
1357 o = set([_firstsrc(r) for r in dests])
1357 o -= set([None])
1358 o -= set([None])
1358 # XXX we should turn this into a baseset instead of a set, smartset may do
1359 # XXX we should turn this into a baseset instead of a set, smartset may do
1359 # some optimizations from the fact this is a baseset.
1360 # some optimizations from the fact this is a baseset.
1360 return subset & o
1361 return subset & o
1361
1362
1362 @predicate('outgoing([path])', safe=False)
1363 @predicate('outgoing([path])', safe=False)
1363 def outgoing(repo, subset, x):
1364 def outgoing(repo, subset, x):
1364 """Changesets not found in the specified destination repository, or the
1365 """Changesets not found in the specified destination repository, or the
1365 default push location.
1366 default push location.
1366 """
1367 """
1367 # Avoid cycles.
1368 # Avoid cycles.
1368 from . import (
1369 from . import (
1369 discovery,
1370 discovery,
1370 hg,
1371 hg,
1371 )
1372 )
1372 # i18n: "outgoing" is a keyword
1373 # i18n: "outgoing" is a keyword
1373 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1374 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1374 # i18n: "outgoing" is a keyword
1375 # i18n: "outgoing" is a keyword
1375 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1376 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1376 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1377 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1377 dest, branches = hg.parseurl(dest)
1378 dest, branches = hg.parseurl(dest)
1378 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1379 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1379 if revs:
1380 if revs:
1380 revs = [repo.lookup(rev) for rev in revs]
1381 revs = [repo.lookup(rev) for rev in revs]
1381 other = hg.peer(repo, {}, dest)
1382 other = hg.peer(repo, {}, dest)
1382 repo.ui.pushbuffer()
1383 repo.ui.pushbuffer()
1383 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1384 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1384 repo.ui.popbuffer()
1385 repo.ui.popbuffer()
1385 cl = repo.changelog
1386 cl = repo.changelog
1386 o = set([cl.rev(r) for r in outgoing.missing])
1387 o = set([cl.rev(r) for r in outgoing.missing])
1387 return subset & o
1388 return subset & o
1388
1389
1389 @predicate('p1([set])', safe=True)
1390 @predicate('p1([set])', safe=True)
1390 def p1(repo, subset, x):
1391 def p1(repo, subset, x):
1391 """First parent of changesets in set, or the working directory.
1392 """First parent of changesets in set, or the working directory.
1392 """
1393 """
1393 if x is None:
1394 if x is None:
1394 p = repo[x].p1().rev()
1395 p = repo[x].p1().rev()
1395 if p >= 0:
1396 if p >= 0:
1396 return subset & baseset([p])
1397 return subset & baseset([p])
1397 return baseset()
1398 return baseset()
1398
1399
1399 ps = set()
1400 ps = set()
1400 cl = repo.changelog
1401 cl = repo.changelog
1401 for r in getset(repo, fullreposet(repo), x):
1402 for r in getset(repo, fullreposet(repo), x):
1402 ps.add(cl.parentrevs(r)[0])
1403 ps.add(cl.parentrevs(r)[0])
1403 ps -= set([node.nullrev])
1404 ps -= set([node.nullrev])
1404 # XXX we should turn this into a baseset instead of a set, smartset may do
1405 # XXX we should turn this into a baseset instead of a set, smartset may do
1405 # some optimizations from the fact this is a baseset.
1406 # some optimizations from the fact this is a baseset.
1406 return subset & ps
1407 return subset & ps
1407
1408
1408 @predicate('p2([set])', safe=True)
1409 @predicate('p2([set])', safe=True)
1409 def p2(repo, subset, x):
1410 def p2(repo, subset, x):
1410 """Second parent of changesets in set, or the working directory.
1411 """Second parent of changesets in set, or the working directory.
1411 """
1412 """
1412 if x is None:
1413 if x is None:
1413 ps = repo[x].parents()
1414 ps = repo[x].parents()
1414 try:
1415 try:
1415 p = ps[1].rev()
1416 p = ps[1].rev()
1416 if p >= 0:
1417 if p >= 0:
1417 return subset & baseset([p])
1418 return subset & baseset([p])
1418 return baseset()
1419 return baseset()
1419 except IndexError:
1420 except IndexError:
1420 return baseset()
1421 return baseset()
1421
1422
1422 ps = set()
1423 ps = set()
1423 cl = repo.changelog
1424 cl = repo.changelog
1424 for r in getset(repo, fullreposet(repo), x):
1425 for r in getset(repo, fullreposet(repo), x):
1425 ps.add(cl.parentrevs(r)[1])
1426 ps.add(cl.parentrevs(r)[1])
1426 ps -= set([node.nullrev])
1427 ps -= set([node.nullrev])
1427 # XXX we should turn this into a baseset instead of a set, smartset may do
1428 # XXX we should turn this into a baseset instead of a set, smartset may do
1428 # some optimizations from the fact this is a baseset.
1429 # some optimizations from the fact this is a baseset.
1429 return subset & ps
1430 return subset & ps
1430
1431
1431 def parentpost(repo, subset, x, order):
1432 def parentpost(repo, subset, x, order):
1432 return p1(repo, subset, x)
1433 return p1(repo, subset, x)
1433
1434
1434 @predicate('parents([set])', safe=True)
1435 @predicate('parents([set])', safe=True)
1435 def parents(repo, subset, x):
1436 def parents(repo, subset, x):
1436 """
1437 """
1437 The set of all parents for all changesets in set, or the working directory.
1438 The set of all parents for all changesets in set, or the working directory.
1438 """
1439 """
1439 if x is None:
1440 if x is None:
1440 ps = set(p.rev() for p in repo[x].parents())
1441 ps = set(p.rev() for p in repo[x].parents())
1441 else:
1442 else:
1442 ps = set()
1443 ps = set()
1443 cl = repo.changelog
1444 cl = repo.changelog
1444 up = ps.update
1445 up = ps.update
1445 parentrevs = cl.parentrevs
1446 parentrevs = cl.parentrevs
1446 for r in getset(repo, fullreposet(repo), x):
1447 for r in getset(repo, fullreposet(repo), x):
1447 if r == node.wdirrev:
1448 if r == node.wdirrev:
1448 up(p.rev() for p in repo[r].parents())
1449 up(p.rev() for p in repo[r].parents())
1449 else:
1450 else:
1450 up(parentrevs(r))
1451 up(parentrevs(r))
1451 ps -= set([node.nullrev])
1452 ps -= set([node.nullrev])
1452 return subset & ps
1453 return subset & ps
1453
1454
1454 def _phase(repo, subset, *targets):
1455 def _phase(repo, subset, *targets):
1455 """helper to select all rev in <targets> phases"""
1456 """helper to select all rev in <targets> phases"""
1456 s = repo._phasecache.getrevset(repo, targets)
1457 s = repo._phasecache.getrevset(repo, targets)
1457 return subset & s
1458 return subset & s
1458
1459
1459 @predicate('draft()', safe=True)
1460 @predicate('draft()', safe=True)
1460 def draft(repo, subset, x):
1461 def draft(repo, subset, x):
1461 """Changeset in draft phase."""
1462 """Changeset in draft phase."""
1462 # i18n: "draft" is a keyword
1463 # i18n: "draft" is a keyword
1463 getargs(x, 0, 0, _("draft takes no arguments"))
1464 getargs(x, 0, 0, _("draft takes no arguments"))
1464 target = phases.draft
1465 target = phases.draft
1465 return _phase(repo, subset, target)
1466 return _phase(repo, subset, target)
1466
1467
1467 @predicate('secret()', safe=True)
1468 @predicate('secret()', safe=True)
1468 def secret(repo, subset, x):
1469 def secret(repo, subset, x):
1469 """Changeset in secret phase."""
1470 """Changeset in secret phase."""
1470 # i18n: "secret" is a keyword
1471 # i18n: "secret" is a keyword
1471 getargs(x, 0, 0, _("secret takes no arguments"))
1472 getargs(x, 0, 0, _("secret takes no arguments"))
1472 target = phases.secret
1473 target = phases.secret
1473 return _phase(repo, subset, target)
1474 return _phase(repo, subset, target)
1474
1475
1475 def parentspec(repo, subset, x, n, order):
1476 def parentspec(repo, subset, x, n, order):
1476 """``set^0``
1477 """``set^0``
1477 The set.
1478 The set.
1478 ``set^1`` (or ``set^``), ``set^2``
1479 ``set^1`` (or ``set^``), ``set^2``
1479 First or second parent, respectively, of all changesets in set.
1480 First or second parent, respectively, of all changesets in set.
1480 """
1481 """
1481 try:
1482 try:
1482 n = int(n[1])
1483 n = int(n[1])
1483 if n not in (0, 1, 2):
1484 if n not in (0, 1, 2):
1484 raise ValueError
1485 raise ValueError
1485 except (TypeError, ValueError):
1486 except (TypeError, ValueError):
1486 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1487 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1487 ps = set()
1488 ps = set()
1488 cl = repo.changelog
1489 cl = repo.changelog
1489 for r in getset(repo, fullreposet(repo), x):
1490 for r in getset(repo, fullreposet(repo), x):
1490 if n == 0:
1491 if n == 0:
1491 ps.add(r)
1492 ps.add(r)
1492 elif n == 1:
1493 elif n == 1:
1493 ps.add(cl.parentrevs(r)[0])
1494 ps.add(cl.parentrevs(r)[0])
1494 elif n == 2:
1495 elif n == 2:
1495 parents = cl.parentrevs(r)
1496 parents = cl.parentrevs(r)
1496 if parents[1] != node.nullrev:
1497 if parents[1] != node.nullrev:
1497 ps.add(parents[1])
1498 ps.add(parents[1])
1498 return subset & ps
1499 return subset & ps
1499
1500
1500 @predicate('present(set)', safe=True)
1501 @predicate('present(set)', safe=True)
1501 def present(repo, subset, x):
1502 def present(repo, subset, x):
1502 """An empty set, if any revision in set isn't found; otherwise,
1503 """An empty set, if any revision in set isn't found; otherwise,
1503 all revisions in set.
1504 all revisions in set.
1504
1505
1505 If any of specified revisions is not present in the local repository,
1506 If any of specified revisions is not present in the local repository,
1506 the query is normally aborted. But this predicate allows the query
1507 the query is normally aborted. But this predicate allows the query
1507 to continue even in such cases.
1508 to continue even in such cases.
1508 """
1509 """
1509 try:
1510 try:
1510 return getset(repo, subset, x)
1511 return getset(repo, subset, x)
1511 except error.RepoLookupError:
1512 except error.RepoLookupError:
1512 return baseset()
1513 return baseset()
1513
1514
1514 # for internal use
1515 # for internal use
1515 @predicate('_notpublic', safe=True)
1516 @predicate('_notpublic', safe=True)
1516 def _notpublic(repo, subset, x):
1517 def _notpublic(repo, subset, x):
1517 getargs(x, 0, 0, "_notpublic takes no arguments")
1518 getargs(x, 0, 0, "_notpublic takes no arguments")
1518 return _phase(repo, subset, phases.draft, phases.secret)
1519 return _phase(repo, subset, phases.draft, phases.secret)
1519
1520
1520 @predicate('public()', safe=True)
1521 @predicate('public()', safe=True)
1521 def public(repo, subset, x):
1522 def public(repo, subset, x):
1522 """Changeset in public phase."""
1523 """Changeset in public phase."""
1523 # i18n: "public" is a keyword
1524 # i18n: "public" is a keyword
1524 getargs(x, 0, 0, _("public takes no arguments"))
1525 getargs(x, 0, 0, _("public takes no arguments"))
1525 phase = repo._phasecache.phase
1526 phase = repo._phasecache.phase
1526 target = phases.public
1527 target = phases.public
1527 condition = lambda r: phase(repo, r) == target
1528 condition = lambda r: phase(repo, r) == target
1528 return subset.filter(condition, condrepr=('<phase %r>', target),
1529 return subset.filter(condition, condrepr=('<phase %r>', target),
1529 cache=False)
1530 cache=False)
1530
1531
1531 @predicate('remote([id [,path]])', safe=False)
1532 @predicate('remote([id [,path]])', safe=False)
1532 def remote(repo, subset, x):
1533 def remote(repo, subset, x):
1533 """Local revision that corresponds to the given identifier in a
1534 """Local revision that corresponds to the given identifier in a
1534 remote repository, if present. Here, the '.' identifier is a
1535 remote repository, if present. Here, the '.' identifier is a
1535 synonym for the current local branch.
1536 synonym for the current local branch.
1536 """
1537 """
1537
1538
1538 from . import hg # avoid start-up nasties
1539 from . import hg # avoid start-up nasties
1539 # i18n: "remote" is a keyword
1540 # i18n: "remote" is a keyword
1540 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1541 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1541
1542
1542 q = '.'
1543 q = '.'
1543 if len(l) > 0:
1544 if len(l) > 0:
1544 # i18n: "remote" is a keyword
1545 # i18n: "remote" is a keyword
1545 q = getstring(l[0], _("remote requires a string id"))
1546 q = getstring(l[0], _("remote requires a string id"))
1546 if q == '.':
1547 if q == '.':
1547 q = repo['.'].branch()
1548 q = repo['.'].branch()
1548
1549
1549 dest = ''
1550 dest = ''
1550 if len(l) > 1:
1551 if len(l) > 1:
1551 # i18n: "remote" is a keyword
1552 # i18n: "remote" is a keyword
1552 dest = getstring(l[1], _("remote requires a repository path"))
1553 dest = getstring(l[1], _("remote requires a repository path"))
1553 dest = repo.ui.expandpath(dest or 'default')
1554 dest = repo.ui.expandpath(dest or 'default')
1554 dest, branches = hg.parseurl(dest)
1555 dest, branches = hg.parseurl(dest)
1555 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1556 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1556 if revs:
1557 if revs:
1557 revs = [repo.lookup(rev) for rev in revs]
1558 revs = [repo.lookup(rev) for rev in revs]
1558 other = hg.peer(repo, {}, dest)
1559 other = hg.peer(repo, {}, dest)
1559 n = other.lookup(q)
1560 n = other.lookup(q)
1560 if n in repo:
1561 if n in repo:
1561 r = repo[n].rev()
1562 r = repo[n].rev()
1562 if r in subset:
1563 if r in subset:
1563 return baseset([r])
1564 return baseset([r])
1564 return baseset()
1565 return baseset()
1565
1566
1566 @predicate('removes(pattern)', safe=True)
1567 @predicate('removes(pattern)', safe=True)
1567 def removes(repo, subset, x):
1568 def removes(repo, subset, x):
1568 """Changesets which remove files matching pattern.
1569 """Changesets which remove files matching pattern.
1569
1570
1570 The pattern without explicit kind like ``glob:`` is expected to be
1571 The pattern without explicit kind like ``glob:`` is expected to be
1571 relative to the current directory and match against a file or a
1572 relative to the current directory and match against a file or a
1572 directory.
1573 directory.
1573 """
1574 """
1574 # i18n: "removes" is a keyword
1575 # i18n: "removes" is a keyword
1575 pat = getstring(x, _("removes requires a pattern"))
1576 pat = getstring(x, _("removes requires a pattern"))
1576 return checkstatus(repo, subset, pat, 2)
1577 return checkstatus(repo, subset, pat, 2)
1577
1578
1578 @predicate('rev(number)', safe=True)
1579 @predicate('rev(number)', safe=True)
1579 def rev(repo, subset, x):
1580 def rev(repo, subset, x):
1580 """Revision with the given numeric identifier.
1581 """Revision with the given numeric identifier.
1581 """
1582 """
1582 # i18n: "rev" is a keyword
1583 # i18n: "rev" is a keyword
1583 l = getargs(x, 1, 1, _("rev requires one argument"))
1584 l = getargs(x, 1, 1, _("rev requires one argument"))
1584 try:
1585 try:
1585 # i18n: "rev" is a keyword
1586 # i18n: "rev" is a keyword
1586 l = int(getstring(l[0], _("rev requires a number")))
1587 l = int(getstring(l[0], _("rev requires a number")))
1587 except (TypeError, ValueError):
1588 except (TypeError, ValueError):
1588 # i18n: "rev" is a keyword
1589 # i18n: "rev" is a keyword
1589 raise error.ParseError(_("rev expects a number"))
1590 raise error.ParseError(_("rev expects a number"))
1590 if l not in repo.changelog and l != node.nullrev:
1591 if l not in repo.changelog and l != node.nullrev:
1591 return baseset()
1592 return baseset()
1592 return subset & baseset([l])
1593 return subset & baseset([l])
1593
1594
1594 @predicate('matching(revision [, field])', safe=True)
1595 @predicate('matching(revision [, field])', safe=True)
1595 def matching(repo, subset, x):
1596 def matching(repo, subset, x):
1596 """Changesets in which a given set of fields match the set of fields in the
1597 """Changesets in which a given set of fields match the set of fields in the
1597 selected revision or set.
1598 selected revision or set.
1598
1599
1599 To match more than one field pass the list of fields to match separated
1600 To match more than one field pass the list of fields to match separated
1600 by spaces (e.g. ``author description``).
1601 by spaces (e.g. ``author description``).
1601
1602
1602 Valid fields are most regular revision fields and some special fields.
1603 Valid fields are most regular revision fields and some special fields.
1603
1604
1604 Regular revision fields are ``description``, ``author``, ``branch``,
1605 Regular revision fields are ``description``, ``author``, ``branch``,
1605 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1606 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1606 and ``diff``.
1607 and ``diff``.
1607 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1608 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1608 contents of the revision. Two revisions matching their ``diff`` will
1609 contents of the revision. Two revisions matching their ``diff`` will
1609 also match their ``files``.
1610 also match their ``files``.
1610
1611
1611 Special fields are ``summary`` and ``metadata``:
1612 Special fields are ``summary`` and ``metadata``:
1612 ``summary`` matches the first line of the description.
1613 ``summary`` matches the first line of the description.
1613 ``metadata`` is equivalent to matching ``description user date``
1614 ``metadata`` is equivalent to matching ``description user date``
1614 (i.e. it matches the main metadata fields).
1615 (i.e. it matches the main metadata fields).
1615
1616
1616 ``metadata`` is the default field which is used when no fields are
1617 ``metadata`` is the default field which is used when no fields are
1617 specified. You can match more than one field at a time.
1618 specified. You can match more than one field at a time.
1618 """
1619 """
1619 # i18n: "matching" is a keyword
1620 # i18n: "matching" is a keyword
1620 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1621 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1621
1622
1622 revs = getset(repo, fullreposet(repo), l[0])
1623 revs = getset(repo, fullreposet(repo), l[0])
1623
1624
1624 fieldlist = ['metadata']
1625 fieldlist = ['metadata']
1625 if len(l) > 1:
1626 if len(l) > 1:
1626 fieldlist = getstring(l[1],
1627 fieldlist = getstring(l[1],
1627 # i18n: "matching" is a keyword
1628 # i18n: "matching" is a keyword
1628 _("matching requires a string "
1629 _("matching requires a string "
1629 "as its second argument")).split()
1630 "as its second argument")).split()
1630
1631
1631 # Make sure that there are no repeated fields,
1632 # Make sure that there are no repeated fields,
1632 # expand the 'special' 'metadata' field type
1633 # expand the 'special' 'metadata' field type
1633 # and check the 'files' whenever we check the 'diff'
1634 # and check the 'files' whenever we check the 'diff'
1634 fields = []
1635 fields = []
1635 for field in fieldlist:
1636 for field in fieldlist:
1636 if field == 'metadata':
1637 if field == 'metadata':
1637 fields += ['user', 'description', 'date']
1638 fields += ['user', 'description', 'date']
1638 elif field == 'diff':
1639 elif field == 'diff':
1639 # a revision matching the diff must also match the files
1640 # a revision matching the diff must also match the files
1640 # since matching the diff is very costly, make sure to
1641 # since matching the diff is very costly, make sure to
1641 # also match the files first
1642 # also match the files first
1642 fields += ['files', 'diff']
1643 fields += ['files', 'diff']
1643 else:
1644 else:
1644 if field == 'author':
1645 if field == 'author':
1645 field = 'user'
1646 field = 'user'
1646 fields.append(field)
1647 fields.append(field)
1647 fields = set(fields)
1648 fields = set(fields)
1648 if 'summary' in fields and 'description' in fields:
1649 if 'summary' in fields and 'description' in fields:
1649 # If a revision matches its description it also matches its summary
1650 # If a revision matches its description it also matches its summary
1650 fields.discard('summary')
1651 fields.discard('summary')
1651
1652
1652 # We may want to match more than one field
1653 # We may want to match more than one field
1653 # Not all fields take the same amount of time to be matched
1654 # Not all fields take the same amount of time to be matched
1654 # Sort the selected fields in order of increasing matching cost
1655 # Sort the selected fields in order of increasing matching cost
1655 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1656 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1656 'files', 'description', 'substate', 'diff']
1657 'files', 'description', 'substate', 'diff']
1657 def fieldkeyfunc(f):
1658 def fieldkeyfunc(f):
1658 try:
1659 try:
1659 return fieldorder.index(f)
1660 return fieldorder.index(f)
1660 except ValueError:
1661 except ValueError:
1661 # assume an unknown field is very costly
1662 # assume an unknown field is very costly
1662 return len(fieldorder)
1663 return len(fieldorder)
1663 fields = list(fields)
1664 fields = list(fields)
1664 fields.sort(key=fieldkeyfunc)
1665 fields.sort(key=fieldkeyfunc)
1665
1666
1666 # Each field will be matched with its own "getfield" function
1667 # Each field will be matched with its own "getfield" function
1667 # which will be added to the getfieldfuncs array of functions
1668 # which will be added to the getfieldfuncs array of functions
1668 getfieldfuncs = []
1669 getfieldfuncs = []
1669 _funcs = {
1670 _funcs = {
1670 'user': lambda r: repo[r].user(),
1671 'user': lambda r: repo[r].user(),
1671 'branch': lambda r: repo[r].branch(),
1672 'branch': lambda r: repo[r].branch(),
1672 'date': lambda r: repo[r].date(),
1673 'date': lambda r: repo[r].date(),
1673 'description': lambda r: repo[r].description(),
1674 'description': lambda r: repo[r].description(),
1674 'files': lambda r: repo[r].files(),
1675 'files': lambda r: repo[r].files(),
1675 'parents': lambda r: repo[r].parents(),
1676 'parents': lambda r: repo[r].parents(),
1676 'phase': lambda r: repo[r].phase(),
1677 'phase': lambda r: repo[r].phase(),
1677 'substate': lambda r: repo[r].substate,
1678 'substate': lambda r: repo[r].substate,
1678 'summary': lambda r: repo[r].description().splitlines()[0],
1679 'summary': lambda r: repo[r].description().splitlines()[0],
1679 'diff': lambda r: list(repo[r].diff(git=True),)
1680 'diff': lambda r: list(repo[r].diff(git=True),)
1680 }
1681 }
1681 for info in fields:
1682 for info in fields:
1682 getfield = _funcs.get(info, None)
1683 getfield = _funcs.get(info, None)
1683 if getfield is None:
1684 if getfield is None:
1684 raise error.ParseError(
1685 raise error.ParseError(
1685 # i18n: "matching" is a keyword
1686 # i18n: "matching" is a keyword
1686 _("unexpected field name passed to matching: %s") % info)
1687 _("unexpected field name passed to matching: %s") % info)
1687 getfieldfuncs.append(getfield)
1688 getfieldfuncs.append(getfield)
1688 # convert the getfield array of functions into a "getinfo" function
1689 # convert the getfield array of functions into a "getinfo" function
1689 # which returns an array of field values (or a single value if there
1690 # which returns an array of field values (or a single value if there
1690 # is only one field to match)
1691 # is only one field to match)
1691 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1692 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1692
1693
1693 def matches(x):
1694 def matches(x):
1694 for rev in revs:
1695 for rev in revs:
1695 target = getinfo(rev)
1696 target = getinfo(rev)
1696 match = True
1697 match = True
1697 for n, f in enumerate(getfieldfuncs):
1698 for n, f in enumerate(getfieldfuncs):
1698 if target[n] != f(x):
1699 if target[n] != f(x):
1699 match = False
1700 match = False
1700 if match:
1701 if match:
1701 return True
1702 return True
1702 return False
1703 return False
1703
1704
1704 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1705 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1705
1706
1706 @predicate('reverse(set)', safe=True, takeorder=True)
1707 @predicate('reverse(set)', safe=True, takeorder=True)
1707 def reverse(repo, subset, x, order):
1708 def reverse(repo, subset, x, order):
1708 """Reverse order of set.
1709 """Reverse order of set.
1709 """
1710 """
1710 l = getset(repo, subset, x)
1711 l = getset(repo, subset, x)
1711 if order == defineorder:
1712 if order == defineorder:
1712 l.reverse()
1713 l.reverse()
1713 return l
1714 return l
1714
1715
1715 @predicate('roots(set)', safe=True)
1716 @predicate('roots(set)', safe=True)
1716 def roots(repo, subset, x):
1717 def roots(repo, subset, x):
1717 """Changesets in set with no parent changeset in set.
1718 """Changesets in set with no parent changeset in set.
1718 """
1719 """
1719 s = getset(repo, fullreposet(repo), x)
1720 s = getset(repo, fullreposet(repo), x)
1720 parents = repo.changelog.parentrevs
1721 parents = repo.changelog.parentrevs
1721 def filter(r):
1722 def filter(r):
1722 for p in parents(r):
1723 for p in parents(r):
1723 if 0 <= p and p in s:
1724 if 0 <= p and p in s:
1724 return False
1725 return False
1725 return True
1726 return True
1726 return subset & s.filter(filter, condrepr='<roots>')
1727 return subset & s.filter(filter, condrepr='<roots>')
1727
1728
1728 _sortkeyfuncs = {
1729 _sortkeyfuncs = {
1729 'rev': lambda c: c.rev(),
1730 'rev': lambda c: c.rev(),
1730 'branch': lambda c: c.branch(),
1731 'branch': lambda c: c.branch(),
1731 'desc': lambda c: c.description(),
1732 'desc': lambda c: c.description(),
1732 'user': lambda c: c.user(),
1733 'user': lambda c: c.user(),
1733 'author': lambda c: c.user(),
1734 'author': lambda c: c.user(),
1734 'date': lambda c: c.date()[0],
1735 'date': lambda c: c.date()[0],
1735 }
1736 }
1736
1737
1737 def _getsortargs(x):
1738 def _getsortargs(x):
1738 """Parse sort options into (set, [(key, reverse)], opts)"""
1739 """Parse sort options into (set, [(key, reverse)], opts)"""
1739 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1740 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1740 if 'set' not in args:
1741 if 'set' not in args:
1741 # i18n: "sort" is a keyword
1742 # i18n: "sort" is a keyword
1742 raise error.ParseError(_('sort requires one or two arguments'))
1743 raise error.ParseError(_('sort requires one or two arguments'))
1743 keys = "rev"
1744 keys = "rev"
1744 if 'keys' in args:
1745 if 'keys' in args:
1745 # i18n: "sort" is a keyword
1746 # i18n: "sort" is a keyword
1746 keys = getstring(args['keys'], _("sort spec must be a string"))
1747 keys = getstring(args['keys'], _("sort spec must be a string"))
1747
1748
1748 keyflags = []
1749 keyflags = []
1749 for k in keys.split():
1750 for k in keys.split():
1750 fk = k
1751 fk = k
1751 reverse = (k[0] == '-')
1752 reverse = (k[0] == '-')
1752 if reverse:
1753 if reverse:
1753 k = k[1:]
1754 k = k[1:]
1754 if k not in _sortkeyfuncs and k != 'topo':
1755 if k not in _sortkeyfuncs and k != 'topo':
1755 raise error.ParseError(_("unknown sort key %r") % fk)
1756 raise error.ParseError(_("unknown sort key %r") % fk)
1756 keyflags.append((k, reverse))
1757 keyflags.append((k, reverse))
1757
1758
1758 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1759 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1759 # i18n: "topo" is a keyword
1760 # i18n: "topo" is a keyword
1760 raise error.ParseError(_('topo sort order cannot be combined '
1761 raise error.ParseError(_('topo sort order cannot be combined '
1761 'with other sort keys'))
1762 'with other sort keys'))
1762
1763
1763 opts = {}
1764 opts = {}
1764 if 'topo.firstbranch' in args:
1765 if 'topo.firstbranch' in args:
1765 if any(k == 'topo' for k, reverse in keyflags):
1766 if any(k == 'topo' for k, reverse in keyflags):
1766 opts['topo.firstbranch'] = args['topo.firstbranch']
1767 opts['topo.firstbranch'] = args['topo.firstbranch']
1767 else:
1768 else:
1768 # i18n: "topo" and "topo.firstbranch" are keywords
1769 # i18n: "topo" and "topo.firstbranch" are keywords
1769 raise error.ParseError(_('topo.firstbranch can only be used '
1770 raise error.ParseError(_('topo.firstbranch can only be used '
1770 'when using the topo sort key'))
1771 'when using the topo sort key'))
1771
1772
1772 return args['set'], keyflags, opts
1773 return args['set'], keyflags, opts
1773
1774
1774 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1775 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1775 def sort(repo, subset, x, order):
1776 def sort(repo, subset, x, order):
1776 """Sort set by keys. The default sort order is ascending, specify a key
1777 """Sort set by keys. The default sort order is ascending, specify a key
1777 as ``-key`` to sort in descending order.
1778 as ``-key`` to sort in descending order.
1778
1779
1779 The keys can be:
1780 The keys can be:
1780
1781
1781 - ``rev`` for the revision number,
1782 - ``rev`` for the revision number,
1782 - ``branch`` for the branch name,
1783 - ``branch`` for the branch name,
1783 - ``desc`` for the commit message (description),
1784 - ``desc`` for the commit message (description),
1784 - ``user`` for user name (``author`` can be used as an alias),
1785 - ``user`` for user name (``author`` can be used as an alias),
1785 - ``date`` for the commit date
1786 - ``date`` for the commit date
1786 - ``topo`` for a reverse topographical sort
1787 - ``topo`` for a reverse topographical sort
1787
1788
1788 The ``topo`` sort order cannot be combined with other sort keys. This sort
1789 The ``topo`` sort order cannot be combined with other sort keys. This sort
1789 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1790 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1790 specifies what topographical branches to prioritize in the sort.
1791 specifies what topographical branches to prioritize in the sort.
1791
1792
1792 """
1793 """
1793 s, keyflags, opts = _getsortargs(x)
1794 s, keyflags, opts = _getsortargs(x)
1794 revs = getset(repo, subset, s)
1795 revs = getset(repo, subset, s)
1795
1796
1796 if not keyflags or order != defineorder:
1797 if not keyflags or order != defineorder:
1797 return revs
1798 return revs
1798 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1799 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1799 revs.sort(reverse=keyflags[0][1])
1800 revs.sort(reverse=keyflags[0][1])
1800 return revs
1801 return revs
1801 elif keyflags[0][0] == "topo":
1802 elif keyflags[0][0] == "topo":
1802 firstbranch = ()
1803 firstbranch = ()
1803 if 'topo.firstbranch' in opts:
1804 if 'topo.firstbranch' in opts:
1804 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1805 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1805 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1806 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1806 istopo=True)
1807 istopo=True)
1807 if keyflags[0][1]:
1808 if keyflags[0][1]:
1808 revs.reverse()
1809 revs.reverse()
1809 return revs
1810 return revs
1810
1811
1811 # sort() is guaranteed to be stable
1812 # sort() is guaranteed to be stable
1812 ctxs = [repo[r] for r in revs]
1813 ctxs = [repo[r] for r in revs]
1813 for k, reverse in reversed(keyflags):
1814 for k, reverse in reversed(keyflags):
1814 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1815 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1815 return baseset([c.rev() for c in ctxs])
1816 return baseset([c.rev() for c in ctxs])
1816
1817
1817 def _toposort(revs, parentsfunc, firstbranch=()):
1818 def _toposort(revs, parentsfunc, firstbranch=()):
1818 """Yield revisions from heads to roots one (topo) branch at a time.
1819 """Yield revisions from heads to roots one (topo) branch at a time.
1819
1820
1820 This function aims to be used by a graph generator that wishes to minimize
1821 This function aims to be used by a graph generator that wishes to minimize
1821 the number of parallel branches and their interleaving.
1822 the number of parallel branches and their interleaving.
1822
1823
1823 Example iteration order (numbers show the "true" order in a changelog):
1824 Example iteration order (numbers show the "true" order in a changelog):
1824
1825
1825 o 4
1826 o 4
1826 |
1827 |
1827 o 1
1828 o 1
1828 |
1829 |
1829 | o 3
1830 | o 3
1830 | |
1831 | |
1831 | o 2
1832 | o 2
1832 |/
1833 |/
1833 o 0
1834 o 0
1834
1835
1835 Note that the ancestors of merges are understood by the current
1836 Note that the ancestors of merges are understood by the current
1836 algorithm to be on the same branch. This means no reordering will
1837 algorithm to be on the same branch. This means no reordering will
1837 occur behind a merge.
1838 occur behind a merge.
1838 """
1839 """
1839
1840
1840 ### Quick summary of the algorithm
1841 ### Quick summary of the algorithm
1841 #
1842 #
1842 # This function is based around a "retention" principle. We keep revisions
1843 # This function is based around a "retention" principle. We keep revisions
1843 # in memory until we are ready to emit a whole branch that immediately
1844 # in memory until we are ready to emit a whole branch that immediately
1844 # "merges" into an existing one. This reduces the number of parallel
1845 # "merges" into an existing one. This reduces the number of parallel
1845 # branches with interleaved revisions.
1846 # branches with interleaved revisions.
1846 #
1847 #
1847 # During iteration revs are split into two groups:
1848 # During iteration revs are split into two groups:
1848 # A) revision already emitted
1849 # A) revision already emitted
1849 # B) revision in "retention". They are stored as different subgroups.
1850 # B) revision in "retention". They are stored as different subgroups.
1850 #
1851 #
1851 # for each REV, we do the following logic:
1852 # for each REV, we do the following logic:
1852 #
1853 #
1853 # 1) if REV is a parent of (A), we will emit it. If there is a
1854 # 1) if REV is a parent of (A), we will emit it. If there is a
1854 # retention group ((B) above) that is blocked on REV being
1855 # retention group ((B) above) that is blocked on REV being
1855 # available, we emit all the revisions out of that retention
1856 # available, we emit all the revisions out of that retention
1856 # group first.
1857 # group first.
1857 #
1858 #
1858 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1859 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1859 # available, if such subgroup exist, we add REV to it and the subgroup is
1860 # available, if such subgroup exist, we add REV to it and the subgroup is
1860 # now awaiting for REV.parents() to be available.
1861 # now awaiting for REV.parents() to be available.
1861 #
1862 #
1862 # 3) finally if no such group existed in (B), we create a new subgroup.
1863 # 3) finally if no such group existed in (B), we create a new subgroup.
1863 #
1864 #
1864 #
1865 #
1865 # To bootstrap the algorithm, we emit the tipmost revision (which
1866 # To bootstrap the algorithm, we emit the tipmost revision (which
1866 # puts it in group (A) from above).
1867 # puts it in group (A) from above).
1867
1868
1868 revs.sort(reverse=True)
1869 revs.sort(reverse=True)
1869
1870
1870 # Set of parents of revision that have been emitted. They can be considered
1871 # Set of parents of revision that have been emitted. They can be considered
1871 # unblocked as the graph generator is already aware of them so there is no
1872 # unblocked as the graph generator is already aware of them so there is no
1872 # need to delay the revisions that reference them.
1873 # need to delay the revisions that reference them.
1873 #
1874 #
1874 # If someone wants to prioritize a branch over the others, pre-filling this
1875 # If someone wants to prioritize a branch over the others, pre-filling this
1875 # set will force all other branches to wait until this branch is ready to be
1876 # set will force all other branches to wait until this branch is ready to be
1876 # emitted.
1877 # emitted.
1877 unblocked = set(firstbranch)
1878 unblocked = set(firstbranch)
1878
1879
1879 # list of groups waiting to be displayed, each group is defined by:
1880 # list of groups waiting to be displayed, each group is defined by:
1880 #
1881 #
1881 # (revs: lists of revs waiting to be displayed,
1882 # (revs: lists of revs waiting to be displayed,
1882 # blocked: set of that cannot be displayed before those in 'revs')
1883 # blocked: set of that cannot be displayed before those in 'revs')
1883 #
1884 #
1884 # The second value ('blocked') correspond to parents of any revision in the
1885 # The second value ('blocked') correspond to parents of any revision in the
1885 # group ('revs') that is not itself contained in the group. The main idea
1886 # group ('revs') that is not itself contained in the group. The main idea
1886 # of this algorithm is to delay as much as possible the emission of any
1887 # of this algorithm is to delay as much as possible the emission of any
1887 # revision. This means waiting for the moment we are about to display
1888 # revision. This means waiting for the moment we are about to display
1888 # these parents to display the revs in a group.
1889 # these parents to display the revs in a group.
1889 #
1890 #
1890 # This first implementation is smart until it encounters a merge: it will
1891 # This first implementation is smart until it encounters a merge: it will
1891 # emit revs as soon as any parent is about to be emitted and can grow an
1892 # emit revs as soon as any parent is about to be emitted and can grow an
1892 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1893 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1893 # retains new branches but gives up on any special ordering for ancestors
1894 # retains new branches but gives up on any special ordering for ancestors
1894 # of merges. The implementation can be improved to handle this better.
1895 # of merges. The implementation can be improved to handle this better.
1895 #
1896 #
1896 # The first subgroup is special. It corresponds to all the revision that
1897 # The first subgroup is special. It corresponds to all the revision that
1897 # were already emitted. The 'revs' lists is expected to be empty and the
1898 # were already emitted. The 'revs' lists is expected to be empty and the
1898 # 'blocked' set contains the parents revisions of already emitted revision.
1899 # 'blocked' set contains the parents revisions of already emitted revision.
1899 #
1900 #
1900 # You could pre-seed the <parents> set of groups[0] to a specific
1901 # You could pre-seed the <parents> set of groups[0] to a specific
1901 # changesets to select what the first emitted branch should be.
1902 # changesets to select what the first emitted branch should be.
1902 groups = [([], unblocked)]
1903 groups = [([], unblocked)]
1903 pendingheap = []
1904 pendingheap = []
1904 pendingset = set()
1905 pendingset = set()
1905
1906
1906 heapq.heapify(pendingheap)
1907 heapq.heapify(pendingheap)
1907 heappop = heapq.heappop
1908 heappop = heapq.heappop
1908 heappush = heapq.heappush
1909 heappush = heapq.heappush
1909 for currentrev in revs:
1910 for currentrev in revs:
1910 # Heap works with smallest element, we want highest so we invert
1911 # Heap works with smallest element, we want highest so we invert
1911 if currentrev not in pendingset:
1912 if currentrev not in pendingset:
1912 heappush(pendingheap, -currentrev)
1913 heappush(pendingheap, -currentrev)
1913 pendingset.add(currentrev)
1914 pendingset.add(currentrev)
1914 # iterates on pending rev until after the current rev have been
1915 # iterates on pending rev until after the current rev have been
1915 # processed.
1916 # processed.
1916 rev = None
1917 rev = None
1917 while rev != currentrev:
1918 while rev != currentrev:
1918 rev = -heappop(pendingheap)
1919 rev = -heappop(pendingheap)
1919 pendingset.remove(rev)
1920 pendingset.remove(rev)
1920
1921
1921 # Seek for a subgroup blocked, waiting for the current revision.
1922 # Seek for a subgroup blocked, waiting for the current revision.
1922 matching = [i for i, g in enumerate(groups) if rev in g[1]]
1923 matching = [i for i, g in enumerate(groups) if rev in g[1]]
1923
1924
1924 if matching:
1925 if matching:
1925 # The main idea is to gather together all sets that are blocked
1926 # The main idea is to gather together all sets that are blocked
1926 # on the same revision.
1927 # on the same revision.
1927 #
1928 #
1928 # Groups are merged when a common blocking ancestor is
1929 # Groups are merged when a common blocking ancestor is
1929 # observed. For example, given two groups:
1930 # observed. For example, given two groups:
1930 #
1931 #
1931 # revs [5, 4] waiting for 1
1932 # revs [5, 4] waiting for 1
1932 # revs [3, 2] waiting for 1
1933 # revs [3, 2] waiting for 1
1933 #
1934 #
1934 # These two groups will be merged when we process
1935 # These two groups will be merged when we process
1935 # 1. In theory, we could have merged the groups when
1936 # 1. In theory, we could have merged the groups when
1936 # we added 2 to the group it is now in (we could have
1937 # we added 2 to the group it is now in (we could have
1937 # noticed the groups were both blocked on 1 then), but
1938 # noticed the groups were both blocked on 1 then), but
1938 # the way it works now makes the algorithm simpler.
1939 # the way it works now makes the algorithm simpler.
1939 #
1940 #
1940 # We also always keep the oldest subgroup first. We can
1941 # We also always keep the oldest subgroup first. We can
1941 # probably improve the behavior by having the longest set
1942 # probably improve the behavior by having the longest set
1942 # first. That way, graph algorithms could minimise the length
1943 # first. That way, graph algorithms could minimise the length
1943 # of parallel lines their drawing. This is currently not done.
1944 # of parallel lines their drawing. This is currently not done.
1944 targetidx = matching.pop(0)
1945 targetidx = matching.pop(0)
1945 trevs, tparents = groups[targetidx]
1946 trevs, tparents = groups[targetidx]
1946 for i in matching:
1947 for i in matching:
1947 gr = groups[i]
1948 gr = groups[i]
1948 trevs.extend(gr[0])
1949 trevs.extend(gr[0])
1949 tparents |= gr[1]
1950 tparents |= gr[1]
1950 # delete all merged subgroups (except the one we kept)
1951 # delete all merged subgroups (except the one we kept)
1951 # (starting from the last subgroup for performance and
1952 # (starting from the last subgroup for performance and
1952 # sanity reasons)
1953 # sanity reasons)
1953 for i in reversed(matching):
1954 for i in reversed(matching):
1954 del groups[i]
1955 del groups[i]
1955 else:
1956 else:
1956 # This is a new head. We create a new subgroup for it.
1957 # This is a new head. We create a new subgroup for it.
1957 targetidx = len(groups)
1958 targetidx = len(groups)
1958 groups.append(([], set([rev])))
1959 groups.append(([], set([rev])))
1959
1960
1960 gr = groups[targetidx]
1961 gr = groups[targetidx]
1961
1962
1962 # We now add the current nodes to this subgroups. This is done
1963 # We now add the current nodes to this subgroups. This is done
1963 # after the subgroup merging because all elements from a subgroup
1964 # after the subgroup merging because all elements from a subgroup
1964 # that relied on this rev must precede it.
1965 # that relied on this rev must precede it.
1965 #
1966 #
1966 # we also update the <parents> set to include the parents of the
1967 # we also update the <parents> set to include the parents of the
1967 # new nodes.
1968 # new nodes.
1968 if rev == currentrev: # only display stuff in rev
1969 if rev == currentrev: # only display stuff in rev
1969 gr[0].append(rev)
1970 gr[0].append(rev)
1970 gr[1].remove(rev)
1971 gr[1].remove(rev)
1971 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
1972 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
1972 gr[1].update(parents)
1973 gr[1].update(parents)
1973 for p in parents:
1974 for p in parents:
1974 if p not in pendingset:
1975 if p not in pendingset:
1975 pendingset.add(p)
1976 pendingset.add(p)
1976 heappush(pendingheap, -p)
1977 heappush(pendingheap, -p)
1977
1978
1978 # Look for a subgroup to display
1979 # Look for a subgroup to display
1979 #
1980 #
1980 # When unblocked is empty (if clause), we were not waiting for any
1981 # When unblocked is empty (if clause), we were not waiting for any
1981 # revisions during the first iteration (if no priority was given) or
1982 # revisions during the first iteration (if no priority was given) or
1982 # if we emitted a whole disconnected set of the graph (reached a
1983 # if we emitted a whole disconnected set of the graph (reached a
1983 # root). In that case we arbitrarily take the oldest known
1984 # root). In that case we arbitrarily take the oldest known
1984 # subgroup. The heuristic could probably be better.
1985 # subgroup. The heuristic could probably be better.
1985 #
1986 #
1986 # Otherwise (elif clause) if the subgroup is blocked on
1987 # Otherwise (elif clause) if the subgroup is blocked on
1987 # a revision we just emitted, we can safely emit it as
1988 # a revision we just emitted, we can safely emit it as
1988 # well.
1989 # well.
1989 if not unblocked:
1990 if not unblocked:
1990 if len(groups) > 1: # display other subset
1991 if len(groups) > 1: # display other subset
1991 targetidx = 1
1992 targetidx = 1
1992 gr = groups[1]
1993 gr = groups[1]
1993 elif not gr[1] & unblocked:
1994 elif not gr[1] & unblocked:
1994 gr = None
1995 gr = None
1995
1996
1996 if gr is not None:
1997 if gr is not None:
1997 # update the set of awaited revisions with the one from the
1998 # update the set of awaited revisions with the one from the
1998 # subgroup
1999 # subgroup
1999 unblocked |= gr[1]
2000 unblocked |= gr[1]
2000 # output all revisions in the subgroup
2001 # output all revisions in the subgroup
2001 for r in gr[0]:
2002 for r in gr[0]:
2002 yield r
2003 yield r
2003 # delete the subgroup that you just output
2004 # delete the subgroup that you just output
2004 # unless it is groups[0] in which case you just empty it.
2005 # unless it is groups[0] in which case you just empty it.
2005 if targetidx:
2006 if targetidx:
2006 del groups[targetidx]
2007 del groups[targetidx]
2007 else:
2008 else:
2008 gr[0][:] = []
2009 gr[0][:] = []
2009 # Check if we have some subgroup waiting for revisions we are not going to
2010 # Check if we have some subgroup waiting for revisions we are not going to
2010 # iterate over
2011 # iterate over
2011 for g in groups:
2012 for g in groups:
2012 for r in g[0]:
2013 for r in g[0]:
2013 yield r
2014 yield r
2014
2015
2015 @predicate('subrepo([pattern])')
2016 @predicate('subrepo([pattern])')
2016 def subrepo(repo, subset, x):
2017 def subrepo(repo, subset, x):
2017 """Changesets that add, modify or remove the given subrepo. If no subrepo
2018 """Changesets that add, modify or remove the given subrepo. If no subrepo
2018 pattern is named, any subrepo changes are returned.
2019 pattern is named, any subrepo changes are returned.
2019 """
2020 """
2020 # i18n: "subrepo" is a keyword
2021 # i18n: "subrepo" is a keyword
2021 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2022 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2022 pat = None
2023 pat = None
2023 if len(args) != 0:
2024 if len(args) != 0:
2024 pat = getstring(args[0], _("subrepo requires a pattern"))
2025 pat = getstring(args[0], _("subrepo requires a pattern"))
2025
2026
2026 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2027 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2027
2028
2028 def submatches(names):
2029 def submatches(names):
2029 k, p, m = util.stringmatcher(pat)
2030 k, p, m = util.stringmatcher(pat)
2030 for name in names:
2031 for name in names:
2031 if m(name):
2032 if m(name):
2032 yield name
2033 yield name
2033
2034
2034 def matches(x):
2035 def matches(x):
2035 c = repo[x]
2036 c = repo[x]
2036 s = repo.status(c.p1().node(), c.node(), match=m)
2037 s = repo.status(c.p1().node(), c.node(), match=m)
2037
2038
2038 if pat is None:
2039 if pat is None:
2039 return s.added or s.modified or s.removed
2040 return s.added or s.modified or s.removed
2040
2041
2041 if s.added:
2042 if s.added:
2042 return any(submatches(c.substate.keys()))
2043 return any(submatches(c.substate.keys()))
2043
2044
2044 if s.modified:
2045 if s.modified:
2045 subs = set(c.p1().substate.keys())
2046 subs = set(c.p1().substate.keys())
2046 subs.update(c.substate.keys())
2047 subs.update(c.substate.keys())
2047
2048
2048 for path in submatches(subs):
2049 for path in submatches(subs):
2049 if c.p1().substate.get(path) != c.substate.get(path):
2050 if c.p1().substate.get(path) != c.substate.get(path):
2050 return True
2051 return True
2051
2052
2052 if s.removed:
2053 if s.removed:
2053 return any(submatches(c.p1().substate.keys()))
2054 return any(submatches(c.p1().substate.keys()))
2054
2055
2055 return False
2056 return False
2056
2057
2057 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2058 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2058
2059
2059 def _substringmatcher(pattern, casesensitive=True):
2060 def _substringmatcher(pattern, casesensitive=True):
2060 kind, pattern, matcher = util.stringmatcher(pattern,
2061 kind, pattern, matcher = util.stringmatcher(pattern,
2061 casesensitive=casesensitive)
2062 casesensitive=casesensitive)
2062 if kind == 'literal':
2063 if kind == 'literal':
2063 if not casesensitive:
2064 if not casesensitive:
2064 pattern = encoding.lower(pattern)
2065 pattern = encoding.lower(pattern)
2065 matcher = lambda s: pattern in encoding.lower(s)
2066 matcher = lambda s: pattern in encoding.lower(s)
2066 else:
2067 else:
2067 matcher = lambda s: pattern in s
2068 matcher = lambda s: pattern in s
2068 return kind, pattern, matcher
2069 return kind, pattern, matcher
2069
2070
2070 @predicate('tag([name])', safe=True)
2071 @predicate('tag([name])', safe=True)
2071 def tag(repo, subset, x):
2072 def tag(repo, subset, x):
2072 """The specified tag by name, or all tagged revisions if no name is given.
2073 """The specified tag by name, or all tagged revisions if no name is given.
2073
2074
2074 Pattern matching is supported for `name`. See
2075 Pattern matching is supported for `name`. See
2075 :hg:`help revisions.patterns`.
2076 :hg:`help revisions.patterns`.
2076 """
2077 """
2077 # i18n: "tag" is a keyword
2078 # i18n: "tag" is a keyword
2078 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2079 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2079 cl = repo.changelog
2080 cl = repo.changelog
2080 if args:
2081 if args:
2081 pattern = getstring(args[0],
2082 pattern = getstring(args[0],
2082 # i18n: "tag" is a keyword
2083 # i18n: "tag" is a keyword
2083 _('the argument to tag must be a string'))
2084 _('the argument to tag must be a string'))
2084 kind, pattern, matcher = util.stringmatcher(pattern)
2085 kind, pattern, matcher = util.stringmatcher(pattern)
2085 if kind == 'literal':
2086 if kind == 'literal':
2086 # avoid resolving all tags
2087 # avoid resolving all tags
2087 tn = repo._tagscache.tags.get(pattern, None)
2088 tn = repo._tagscache.tags.get(pattern, None)
2088 if tn is None:
2089 if tn is None:
2089 raise error.RepoLookupError(_("tag '%s' does not exist")
2090 raise error.RepoLookupError(_("tag '%s' does not exist")
2090 % pattern)
2091 % pattern)
2091 s = set([repo[tn].rev()])
2092 s = set([repo[tn].rev()])
2092 else:
2093 else:
2093 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2094 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2094 else:
2095 else:
2095 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2096 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2096 return subset & s
2097 return subset & s
2097
2098
2098 @predicate('tagged', safe=True)
2099 @predicate('tagged', safe=True)
2099 def tagged(repo, subset, x):
2100 def tagged(repo, subset, x):
2100 return tag(repo, subset, x)
2101 return tag(repo, subset, x)
2101
2102
2102 @predicate('unstable()', safe=True)
2103 @predicate('unstable()', safe=True)
2103 def unstable(repo, subset, x):
2104 def unstable(repo, subset, x):
2104 """Non-obsolete changesets with obsolete ancestors.
2105 """Non-obsolete changesets with obsolete ancestors.
2105 """
2106 """
2106 # i18n: "unstable" is a keyword
2107 # i18n: "unstable" is a keyword
2107 getargs(x, 0, 0, _("unstable takes no arguments"))
2108 getargs(x, 0, 0, _("unstable takes no arguments"))
2108 unstables = obsmod.getrevs(repo, 'unstable')
2109 unstables = obsmod.getrevs(repo, 'unstable')
2109 return subset & unstables
2110 return subset & unstables
2110
2111
2111
2112
2112 @predicate('user(string)', safe=True)
2113 @predicate('user(string)', safe=True)
2113 def user(repo, subset, x):
2114 def user(repo, subset, x):
2114 """User name contains string. The match is case-insensitive.
2115 """User name contains string. The match is case-insensitive.
2115
2116
2116 Pattern matching is supported for `string`. See
2117 Pattern matching is supported for `string`. See
2117 :hg:`help revisions.patterns`.
2118 :hg:`help revisions.patterns`.
2118 """
2119 """
2119 return author(repo, subset, x)
2120 return author(repo, subset, x)
2120
2121
2121 @predicate('wdir', safe=True)
2122 @predicate('wdir', safe=True)
2122 def wdir(repo, subset, x):
2123 def wdir(repo, subset, x):
2123 """Working directory. (EXPERIMENTAL)"""
2124 """Working directory. (EXPERIMENTAL)"""
2124 # i18n: "wdir" is a keyword
2125 # i18n: "wdir" is a keyword
2125 getargs(x, 0, 0, _("wdir takes no arguments"))
2126 getargs(x, 0, 0, _("wdir takes no arguments"))
2126 if node.wdirrev in subset or isinstance(subset, fullreposet):
2127 if node.wdirrev in subset or isinstance(subset, fullreposet):
2127 return baseset([node.wdirrev])
2128 return baseset([node.wdirrev])
2128 return baseset()
2129 return baseset()
2129
2130
2130 def _orderedlist(repo, subset, x):
2131 def _orderedlist(repo, subset, x):
2131 s = getstring(x, "internal error")
2132 s = getstring(x, "internal error")
2132 if not s:
2133 if not s:
2133 return baseset()
2134 return baseset()
2134 # remove duplicates here. it's difficult for caller to deduplicate sets
2135 # remove duplicates here. it's difficult for caller to deduplicate sets
2135 # because different symbols can point to the same rev.
2136 # because different symbols can point to the same rev.
2136 cl = repo.changelog
2137 cl = repo.changelog
2137 ls = []
2138 ls = []
2138 seen = set()
2139 seen = set()
2139 for t in s.split('\0'):
2140 for t in s.split('\0'):
2140 try:
2141 try:
2141 # fast path for integer revision
2142 # fast path for integer revision
2142 r = int(t)
2143 r = int(t)
2143 if str(r) != t or r not in cl:
2144 if str(r) != t or r not in cl:
2144 raise ValueError
2145 raise ValueError
2145 revs = [r]
2146 revs = [r]
2146 except ValueError:
2147 except ValueError:
2147 revs = stringset(repo, subset, t)
2148 revs = stringset(repo, subset, t)
2148
2149
2149 for r in revs:
2150 for r in revs:
2150 if r in seen:
2151 if r in seen:
2151 continue
2152 continue
2152 if (r in subset
2153 if (r in subset
2153 or r == node.nullrev and isinstance(subset, fullreposet)):
2154 or r == node.nullrev and isinstance(subset, fullreposet)):
2154 ls.append(r)
2155 ls.append(r)
2155 seen.add(r)
2156 seen.add(r)
2156 return baseset(ls)
2157 return baseset(ls)
2157
2158
2158 # for internal use
2159 # for internal use
2159 @predicate('_list', safe=True, takeorder=True)
2160 @predicate('_list', safe=True, takeorder=True)
2160 def _list(repo, subset, x, order):
2161 def _list(repo, subset, x, order):
2161 if order == followorder:
2162 if order == followorder:
2162 # slow path to take the subset order
2163 # slow path to take the subset order
2163 return subset & _orderedlist(repo, fullreposet(repo), x)
2164 return subset & _orderedlist(repo, fullreposet(repo), x)
2164 else:
2165 else:
2165 return _orderedlist(repo, subset, x)
2166 return _orderedlist(repo, subset, x)
2166
2167
2167 def _orderedintlist(repo, subset, x):
2168 def _orderedintlist(repo, subset, x):
2168 s = getstring(x, "internal error")
2169 s = getstring(x, "internal error")
2169 if not s:
2170 if not s:
2170 return baseset()
2171 return baseset()
2171 ls = [int(r) for r in s.split('\0')]
2172 ls = [int(r) for r in s.split('\0')]
2172 s = subset
2173 s = subset
2173 return baseset([r for r in ls if r in s])
2174 return baseset([r for r in ls if r in s])
2174
2175
2175 # for internal use
2176 # for internal use
2176 @predicate('_intlist', safe=True, takeorder=True)
2177 @predicate('_intlist', safe=True, takeorder=True)
2177 def _intlist(repo, subset, x, order):
2178 def _intlist(repo, subset, x, order):
2178 if order == followorder:
2179 if order == followorder:
2179 # slow path to take the subset order
2180 # slow path to take the subset order
2180 return subset & _orderedintlist(repo, fullreposet(repo), x)
2181 return subset & _orderedintlist(repo, fullreposet(repo), x)
2181 else:
2182 else:
2182 return _orderedintlist(repo, subset, x)
2183 return _orderedintlist(repo, subset, x)
2183
2184
2184 def _orderedhexlist(repo, subset, x):
2185 def _orderedhexlist(repo, subset, x):
2185 s = getstring(x, "internal error")
2186 s = getstring(x, "internal error")
2186 if not s:
2187 if not s:
2187 return baseset()
2188 return baseset()
2188 cl = repo.changelog
2189 cl = repo.changelog
2189 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2190 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2190 s = subset
2191 s = subset
2191 return baseset([r for r in ls if r in s])
2192 return baseset([r for r in ls if r in s])
2192
2193
2193 # for internal use
2194 # for internal use
2194 @predicate('_hexlist', safe=True, takeorder=True)
2195 @predicate('_hexlist', safe=True, takeorder=True)
2195 def _hexlist(repo, subset, x, order):
2196 def _hexlist(repo, subset, x, order):
2196 if order == followorder:
2197 if order == followorder:
2197 # slow path to take the subset order
2198 # slow path to take the subset order
2198 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2199 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2199 else:
2200 else:
2200 return _orderedhexlist(repo, subset, x)
2201 return _orderedhexlist(repo, subset, x)
2201
2202
2202 methods = {
2203 methods = {
2203 "range": rangeset,
2204 "range": rangeset,
2204 "rangeall": rangeall,
2205 "rangeall": rangeall,
2205 "rangepre": rangepre,
2206 "rangepre": rangepre,
2206 "rangepost": rangepost,
2207 "rangepost": rangepost,
2207 "dagrange": dagrange,
2208 "dagrange": dagrange,
2208 "string": stringset,
2209 "string": stringset,
2209 "symbol": stringset,
2210 "symbol": stringset,
2210 "and": andset,
2211 "and": andset,
2211 "or": orset,
2212 "or": orset,
2212 "not": notset,
2213 "not": notset,
2213 "difference": differenceset,
2214 "difference": differenceset,
2214 "list": listset,
2215 "list": listset,
2215 "keyvalue": keyvaluepair,
2216 "keyvalue": keyvaluepair,
2216 "func": func,
2217 "func": func,
2217 "ancestor": ancestorspec,
2218 "ancestor": ancestorspec,
2218 "parent": parentspec,
2219 "parent": parentspec,
2219 "parentpost": parentpost,
2220 "parentpost": parentpost,
2220 }
2221 }
2221
2222
2222 def posttreebuilthook(tree, repo):
2223 def posttreebuilthook(tree, repo):
2223 # hook for extensions to execute code on the optimized tree
2224 # hook for extensions to execute code on the optimized tree
2224 pass
2225 pass
2225
2226
2226 def match(ui, spec, repo=None, order=defineorder):
2227 def match(ui, spec, repo=None, order=defineorder):
2227 """Create a matcher for a single revision spec
2228 """Create a matcher for a single revision spec
2228
2229
2229 If order=followorder, a matcher takes the ordering specified by the input
2230 If order=followorder, a matcher takes the ordering specified by the input
2230 set.
2231 set.
2231 """
2232 """
2232 return matchany(ui, [spec], repo=repo, order=order)
2233 return matchany(ui, [spec], repo=repo, order=order)
2233
2234
2234 def matchany(ui, specs, repo=None, order=defineorder):
2235 def matchany(ui, specs, repo=None, order=defineorder):
2235 """Create a matcher that will include any revisions matching one of the
2236 """Create a matcher that will include any revisions matching one of the
2236 given specs
2237 given specs
2237
2238
2238 If order=followorder, a matcher takes the ordering specified by the input
2239 If order=followorder, a matcher takes the ordering specified by the input
2239 set.
2240 set.
2240 """
2241 """
2241 if not specs:
2242 if not specs:
2242 def mfunc(repo, subset=None):
2243 def mfunc(repo, subset=None):
2243 return baseset()
2244 return baseset()
2244 return mfunc
2245 return mfunc
2245 if not all(specs):
2246 if not all(specs):
2246 raise error.ParseError(_("empty query"))
2247 raise error.ParseError(_("empty query"))
2247 lookup = None
2248 lookup = None
2248 if repo:
2249 if repo:
2249 lookup = repo.__contains__
2250 lookup = repo.__contains__
2250 if len(specs) == 1:
2251 if len(specs) == 1:
2251 tree = revsetlang.parse(specs[0], lookup)
2252 tree = revsetlang.parse(specs[0], lookup)
2252 else:
2253 else:
2253 tree = ('or',
2254 tree = ('or',
2254 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2255 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2255
2256
2256 if ui:
2257 if ui:
2257 tree = revsetlang.expandaliases(ui, tree)
2258 tree = revsetlang.expandaliases(ui, tree)
2258 tree = revsetlang.foldconcat(tree)
2259 tree = revsetlang.foldconcat(tree)
2259 tree = revsetlang.analyze(tree, order)
2260 tree = revsetlang.analyze(tree, order)
2260 tree = revsetlang.optimize(tree)
2261 tree = revsetlang.optimize(tree)
2261 posttreebuilthook(tree, repo)
2262 posttreebuilthook(tree, repo)
2262 return makematcher(tree)
2263 return makematcher(tree)
2263
2264
2264 def makematcher(tree):
2265 def makematcher(tree):
2265 """Create a matcher from an evaluatable tree"""
2266 """Create a matcher from an evaluatable tree"""
2266 def mfunc(repo, subset=None):
2267 def mfunc(repo, subset=None):
2267 if subset is None:
2268 if subset is None:
2268 subset = fullreposet(repo)
2269 subset = fullreposet(repo)
2269 if util.safehasattr(subset, 'isascending'):
2270 if util.safehasattr(subset, 'isascending'):
2270 result = getset(repo, subset, tree)
2271 result = getset(repo, subset, tree)
2271 else:
2272 else:
2272 result = getset(repo, baseset(subset), tree)
2273 result = getset(repo, baseset(subset), tree)
2273 return result
2274 return result
2274 return mfunc
2275 return mfunc
2275
2276
2276 def loadpredicate(ui, extname, registrarobj):
2277 def loadpredicate(ui, extname, registrarobj):
2277 """Load revset predicates from specified registrarobj
2278 """Load revset predicates from specified registrarobj
2278 """
2279 """
2279 for name, func in registrarobj._table.iteritems():
2280 for name, func in registrarobj._table.iteritems():
2280 symbols[name] = func
2281 symbols[name] = func
2281 if func._safe:
2282 if func._safe:
2282 safesymbols.add(name)
2283 safesymbols.add(name)
2283
2284
2284 # load built-in predicates explicitly to setup safesymbols
2285 # load built-in predicates explicitly to setup safesymbols
2285 loadpredicate(None, None, predicate)
2286 loadpredicate(None, None, predicate)
2286
2287
2287 # tell hggettext to extract docstrings from these functions:
2288 # tell hggettext to extract docstrings from these functions:
2288 i18nfunctions = symbols.values()
2289 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now