##// END OF EJS Templates
dagop: move blockancestors() and blockdescendants() from context...
Yuya Nishihara -
r32904:582080a4 default
parent child Browse files
Show More
@@ -1,2389 +1,2306
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 mdiff,
33 mdiff,
34 obsolete as obsmod,
34 obsolete as obsmod,
35 patch,
35 patch,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 revlog,
39 revlog,
40 scmutil,
40 scmutil,
41 subrepo,
41 subrepo,
42 util,
42 util,
43 )
43 )
44
44
45 propertycache = util.propertycache
45 propertycache = util.propertycache
46
46
47 nonascii = re.compile(r'[^\x21-\x7f]').search
47 nonascii = re.compile(r'[^\x21-\x7f]').search
48
48
49 class basectx(object):
49 class basectx(object):
50 """A basectx object represents the common logic for its children:
50 """A basectx object represents the common logic for its children:
51 changectx: read-only context that is already present in the repo,
51 changectx: read-only context that is already present in the repo,
52 workingctx: a context that represents the working directory and can
52 workingctx: a context that represents the working directory and can
53 be committed,
53 be committed,
54 memctx: a context that represents changes in-memory and can also
54 memctx: a context that represents changes in-memory and can also
55 be committed."""
55 be committed."""
56 def __new__(cls, repo, changeid='', *args, **kwargs):
56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 if isinstance(changeid, basectx):
57 if isinstance(changeid, basectx):
58 return changeid
58 return changeid
59
59
60 o = super(basectx, cls).__new__(cls)
60 o = super(basectx, cls).__new__(cls)
61
61
62 o._repo = repo
62 o._repo = repo
63 o._rev = nullrev
63 o._rev = nullrev
64 o._node = nullid
64 o._node = nullid
65
65
66 return o
66 return o
67
67
68 def __str__(self):
68 def __str__(self):
69 r = short(self.node())
69 r = short(self.node())
70 if pycompat.ispy3:
70 if pycompat.ispy3:
71 return r.decode('ascii')
71 return r.decode('ascii')
72 return r
72 return r
73
73
74 def __bytes__(self):
74 def __bytes__(self):
75 return short(self.node())
75 return short(self.node())
76
76
77 def __int__(self):
77 def __int__(self):
78 return self.rev()
78 return self.rev()
79
79
80 def __repr__(self):
80 def __repr__(self):
81 return r"<%s %s>" % (type(self).__name__, str(self))
81 return r"<%s %s>" % (type(self).__name__, str(self))
82
82
83 def __eq__(self, other):
83 def __eq__(self, other):
84 try:
84 try:
85 return type(self) == type(other) and self._rev == other._rev
85 return type(self) == type(other) and self._rev == other._rev
86 except AttributeError:
86 except AttributeError:
87 return False
87 return False
88
88
89 def __ne__(self, other):
89 def __ne__(self, other):
90 return not (self == other)
90 return not (self == other)
91
91
92 def __contains__(self, key):
92 def __contains__(self, key):
93 return key in self._manifest
93 return key in self._manifest
94
94
95 def __getitem__(self, key):
95 def __getitem__(self, key):
96 return self.filectx(key)
96 return self.filectx(key)
97
97
98 def __iter__(self):
98 def __iter__(self):
99 return iter(self._manifest)
99 return iter(self._manifest)
100
100
101 def _buildstatusmanifest(self, status):
101 def _buildstatusmanifest(self, status):
102 """Builds a manifest that includes the given status results, if this is
102 """Builds a manifest that includes the given status results, if this is
103 a working copy context. For non-working copy contexts, it just returns
103 a working copy context. For non-working copy contexts, it just returns
104 the normal manifest."""
104 the normal manifest."""
105 return self.manifest()
105 return self.manifest()
106
106
107 def _matchstatus(self, other, match):
107 def _matchstatus(self, other, match):
108 """return match.always if match is none
108 """return match.always if match is none
109
109
110 This internal method provides a way for child objects to override the
110 This internal method provides a way for child objects to override the
111 match operator.
111 match operator.
112 """
112 """
113 return match or matchmod.always(self._repo.root, self._repo.getcwd())
113 return match or matchmod.always(self._repo.root, self._repo.getcwd())
114
114
115 def _buildstatus(self, other, s, match, listignored, listclean,
115 def _buildstatus(self, other, s, match, listignored, listclean,
116 listunknown):
116 listunknown):
117 """build a status with respect to another context"""
117 """build a status with respect to another context"""
118 # Load earliest manifest first for caching reasons. More specifically,
118 # Load earliest manifest first for caching reasons. More specifically,
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 # 1000 and cache it so that when you read 1001, we just need to apply a
121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 # delta to what's in the cache. So that's one full reconstruction + one
122 # delta to what's in the cache. So that's one full reconstruction + one
123 # delta application.
123 # delta application.
124 mf2 = None
124 mf2 = None
125 if self.rev() is not None and self.rev() < other.rev():
125 if self.rev() is not None and self.rev() < other.rev():
126 mf2 = self._buildstatusmanifest(s)
126 mf2 = self._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
128 if mf2 is None:
128 if mf2 is None:
129 mf2 = self._buildstatusmanifest(s)
129 mf2 = self._buildstatusmanifest(s)
130
130
131 modified, added = [], []
131 modified, added = [], []
132 removed = []
132 removed = []
133 clean = []
133 clean = []
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 deletedset = set(deleted)
135 deletedset = set(deleted)
136 d = mf1.diff(mf2, match=match, clean=listclean)
136 d = mf1.diff(mf2, match=match, clean=listclean)
137 for fn, value in d.iteritems():
137 for fn, value in d.iteritems():
138 if fn in deletedset:
138 if fn in deletedset:
139 continue
139 continue
140 if value is None:
140 if value is None:
141 clean.append(fn)
141 clean.append(fn)
142 continue
142 continue
143 (node1, flag1), (node2, flag2) = value
143 (node1, flag1), (node2, flag2) = value
144 if node1 is None:
144 if node1 is None:
145 added.append(fn)
145 added.append(fn)
146 elif node2 is None:
146 elif node2 is None:
147 removed.append(fn)
147 removed.append(fn)
148 elif flag1 != flag2:
148 elif flag1 != flag2:
149 modified.append(fn)
149 modified.append(fn)
150 elif node2 not in wdirnodes:
150 elif node2 not in wdirnodes:
151 # When comparing files between two commits, we save time by
151 # When comparing files between two commits, we save time by
152 # not comparing the file contents when the nodeids differ.
152 # not comparing the file contents when the nodeids differ.
153 # Note that this means we incorrectly report a reverted change
153 # Note that this means we incorrectly report a reverted change
154 # to a file as a modification.
154 # to a file as a modification.
155 modified.append(fn)
155 modified.append(fn)
156 elif self[fn].cmp(other[fn]):
156 elif self[fn].cmp(other[fn]):
157 modified.append(fn)
157 modified.append(fn)
158 else:
158 else:
159 clean.append(fn)
159 clean.append(fn)
160
160
161 if removed:
161 if removed:
162 # need to filter files if they are already reported as removed
162 # need to filter files if they are already reported as removed
163 unknown = [fn for fn in unknown if fn not in mf1 and
163 unknown = [fn for fn in unknown if fn not in mf1 and
164 (not match or match(fn))]
164 (not match or match(fn))]
165 ignored = [fn for fn in ignored if fn not in mf1 and
165 ignored = [fn for fn in ignored if fn not in mf1 and
166 (not match or match(fn))]
166 (not match or match(fn))]
167 # if they're deleted, don't report them as removed
167 # if they're deleted, don't report them as removed
168 removed = [fn for fn in removed if fn not in deletedset]
168 removed = [fn for fn in removed if fn not in deletedset]
169
169
170 return scmutil.status(modified, added, removed, deleted, unknown,
170 return scmutil.status(modified, added, removed, deleted, unknown,
171 ignored, clean)
171 ignored, clean)
172
172
173 @propertycache
173 @propertycache
174 def substate(self):
174 def substate(self):
175 return subrepo.state(self, self._repo.ui)
175 return subrepo.state(self, self._repo.ui)
176
176
177 def subrev(self, subpath):
177 def subrev(self, subpath):
178 return self.substate[subpath][1]
178 return self.substate[subpath][1]
179
179
180 def rev(self):
180 def rev(self):
181 return self._rev
181 return self._rev
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184 def hex(self):
184 def hex(self):
185 return hex(self.node())
185 return hex(self.node())
186 def manifest(self):
186 def manifest(self):
187 return self._manifest
187 return self._manifest
188 def manifestctx(self):
188 def manifestctx(self):
189 return self._manifestctx
189 return self._manifestctx
190 def repo(self):
190 def repo(self):
191 return self._repo
191 return self._repo
192 def phasestr(self):
192 def phasestr(self):
193 return phases.phasenames[self.phase()]
193 return phases.phasenames[self.phase()]
194 def mutable(self):
194 def mutable(self):
195 return self.phase() > phases.public
195 return self.phase() > phases.public
196
196
197 def getfileset(self, expr):
197 def getfileset(self, expr):
198 return fileset.getfileset(self, expr)
198 return fileset.getfileset(self, expr)
199
199
200 def obsolete(self):
200 def obsolete(self):
201 """True if the changeset is obsolete"""
201 """True if the changeset is obsolete"""
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203
203
204 def extinct(self):
204 def extinct(self):
205 """True if the changeset is extinct"""
205 """True if the changeset is extinct"""
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207
207
208 def unstable(self):
208 def unstable(self):
209 """True if the changeset is not obsolete but it's ancestor are"""
209 """True if the changeset is not obsolete but it's ancestor are"""
210 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
211
211
212 def bumped(self):
212 def bumped(self):
213 """True if the changeset try to be a successor of a public changeset
213 """True if the changeset try to be a successor of a public changeset
214
214
215 Only non-public and non-obsolete changesets may be bumped.
215 Only non-public and non-obsolete changesets may be bumped.
216 """
216 """
217 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
218
218
219 def divergent(self):
219 def divergent(self):
220 """Is a successors of a changeset with multiple possible successors set
220 """Is a successors of a changeset with multiple possible successors set
221
221
222 Only non-public and non-obsolete changesets may be divergent.
222 Only non-public and non-obsolete changesets may be divergent.
223 """
223 """
224 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
225
225
226 def troubled(self):
226 def troubled(self):
227 """True if the changeset is either unstable, bumped or divergent"""
227 """True if the changeset is either unstable, bumped or divergent"""
228 return self.unstable() or self.bumped() or self.divergent()
228 return self.unstable() or self.bumped() or self.divergent()
229
229
230 def troubles(self):
230 def troubles(self):
231 """return the list of troubles affecting this changesets.
231 """return the list of troubles affecting this changesets.
232
232
233 Troubles are returned as strings. possible values are:
233 Troubles are returned as strings. possible values are:
234 - unstable,
234 - unstable,
235 - bumped,
235 - bumped,
236 - divergent.
236 - divergent.
237 """
237 """
238 troubles = []
238 troubles = []
239 if self.unstable():
239 if self.unstable():
240 troubles.append('unstable')
240 troubles.append('unstable')
241 if self.bumped():
241 if self.bumped():
242 troubles.append('bumped')
242 troubles.append('bumped')
243 if self.divergent():
243 if self.divergent():
244 troubles.append('divergent')
244 troubles.append('divergent')
245 return troubles
245 return troubles
246
246
247 def parents(self):
247 def parents(self):
248 """return contexts for each parent changeset"""
248 """return contexts for each parent changeset"""
249 return self._parents
249 return self._parents
250
250
251 def p1(self):
251 def p1(self):
252 return self._parents[0]
252 return self._parents[0]
253
253
254 def p2(self):
254 def p2(self):
255 parents = self._parents
255 parents = self._parents
256 if len(parents) == 2:
256 if len(parents) == 2:
257 return parents[1]
257 return parents[1]
258 return changectx(self._repo, nullrev)
258 return changectx(self._repo, nullrev)
259
259
260 def _fileinfo(self, path):
260 def _fileinfo(self, path):
261 if r'_manifest' in self.__dict__:
261 if r'_manifest' in self.__dict__:
262 try:
262 try:
263 return self._manifest[path], self._manifest.flags(path)
263 return self._manifest[path], self._manifest.flags(path)
264 except KeyError:
264 except KeyError:
265 raise error.ManifestLookupError(self._node, path,
265 raise error.ManifestLookupError(self._node, path,
266 _('not found in manifest'))
266 _('not found in manifest'))
267 if r'_manifestdelta' in self.__dict__ or path in self.files():
267 if r'_manifestdelta' in self.__dict__ or path in self.files():
268 if path in self._manifestdelta:
268 if path in self._manifestdelta:
269 return (self._manifestdelta[path],
269 return (self._manifestdelta[path],
270 self._manifestdelta.flags(path))
270 self._manifestdelta.flags(path))
271 mfl = self._repo.manifestlog
271 mfl = self._repo.manifestlog
272 try:
272 try:
273 node, flag = mfl[self._changeset.manifest].find(path)
273 node, flag = mfl[self._changeset.manifest].find(path)
274 except KeyError:
274 except KeyError:
275 raise error.ManifestLookupError(self._node, path,
275 raise error.ManifestLookupError(self._node, path,
276 _('not found in manifest'))
276 _('not found in manifest'))
277
277
278 return node, flag
278 return node, flag
279
279
280 def filenode(self, path):
280 def filenode(self, path):
281 return self._fileinfo(path)[0]
281 return self._fileinfo(path)[0]
282
282
283 def flags(self, path):
283 def flags(self, path):
284 try:
284 try:
285 return self._fileinfo(path)[1]
285 return self._fileinfo(path)[1]
286 except error.LookupError:
286 except error.LookupError:
287 return ''
287 return ''
288
288
289 def sub(self, path, allowcreate=True):
289 def sub(self, path, allowcreate=True):
290 '''return a subrepo for the stored revision of path, never wdir()'''
290 '''return a subrepo for the stored revision of path, never wdir()'''
291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
292
292
293 def nullsub(self, path, pctx):
293 def nullsub(self, path, pctx):
294 return subrepo.nullsubrepo(self, path, pctx)
294 return subrepo.nullsubrepo(self, path, pctx)
295
295
296 def workingsub(self, path):
296 def workingsub(self, path):
297 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 '''return a subrepo for the stored revision, or wdir if this is a wdir
298 context.
298 context.
299 '''
299 '''
300 return subrepo.subrepo(self, path, allowwdir=True)
300 return subrepo.subrepo(self, path, allowwdir=True)
301
301
302 def match(self, pats=None, include=None, exclude=None, default='glob',
302 def match(self, pats=None, include=None, exclude=None, default='glob',
303 listsubrepos=False, badfn=None):
303 listsubrepos=False, badfn=None):
304 r = self._repo
304 r = self._repo
305 return matchmod.match(r.root, r.getcwd(), pats,
305 return matchmod.match(r.root, r.getcwd(), pats,
306 include, exclude, default,
306 include, exclude, default,
307 auditor=r.nofsauditor, ctx=self,
307 auditor=r.nofsauditor, ctx=self,
308 listsubrepos=listsubrepos, badfn=badfn)
308 listsubrepos=listsubrepos, badfn=badfn)
309
309
310 def diff(self, ctx2=None, match=None, **opts):
310 def diff(self, ctx2=None, match=None, **opts):
311 """Returns a diff generator for the given contexts and matcher"""
311 """Returns a diff generator for the given contexts and matcher"""
312 if ctx2 is None:
312 if ctx2 is None:
313 ctx2 = self.p1()
313 ctx2 = self.p1()
314 if ctx2 is not None:
314 if ctx2 is not None:
315 ctx2 = self._repo[ctx2]
315 ctx2 = self._repo[ctx2]
316 diffopts = patch.diffopts(self._repo.ui, opts)
316 diffopts = patch.diffopts(self._repo.ui, opts)
317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318
318
319 def dirs(self):
319 def dirs(self):
320 return self._manifest.dirs()
320 return self._manifest.dirs()
321
321
322 def hasdir(self, dir):
322 def hasdir(self, dir):
323 return self._manifest.hasdir(dir)
323 return self._manifest.hasdir(dir)
324
324
325 def status(self, other=None, match=None, listignored=False,
325 def status(self, other=None, match=None, listignored=False,
326 listclean=False, listunknown=False, listsubrepos=False):
326 listclean=False, listunknown=False, listsubrepos=False):
327 """return status of files between two nodes or node and working
327 """return status of files between two nodes or node and working
328 directory.
328 directory.
329
329
330 If other is None, compare this node with working directory.
330 If other is None, compare this node with working directory.
331
331
332 returns (modified, added, removed, deleted, unknown, ignored, clean)
332 returns (modified, added, removed, deleted, unknown, ignored, clean)
333 """
333 """
334
334
335 ctx1 = self
335 ctx1 = self
336 ctx2 = self._repo[other]
336 ctx2 = self._repo[other]
337
337
338 # This next code block is, admittedly, fragile logic that tests for
338 # This next code block is, admittedly, fragile logic that tests for
339 # reversing the contexts and wouldn't need to exist if it weren't for
339 # reversing the contexts and wouldn't need to exist if it weren't for
340 # the fast (and common) code path of comparing the working directory
340 # the fast (and common) code path of comparing the working directory
341 # with its first parent.
341 # with its first parent.
342 #
342 #
343 # What we're aiming for here is the ability to call:
343 # What we're aiming for here is the ability to call:
344 #
344 #
345 # workingctx.status(parentctx)
345 # workingctx.status(parentctx)
346 #
346 #
347 # If we always built the manifest for each context and compared those,
347 # If we always built the manifest for each context and compared those,
348 # then we'd be done. But the special case of the above call means we
348 # then we'd be done. But the special case of the above call means we
349 # just copy the manifest of the parent.
349 # just copy the manifest of the parent.
350 reversed = False
350 reversed = False
351 if (not isinstance(ctx1, changectx)
351 if (not isinstance(ctx1, changectx)
352 and isinstance(ctx2, changectx)):
352 and isinstance(ctx2, changectx)):
353 reversed = True
353 reversed = True
354 ctx1, ctx2 = ctx2, ctx1
354 ctx1, ctx2 = ctx2, ctx1
355
355
356 match = ctx2._matchstatus(ctx1, match)
356 match = ctx2._matchstatus(ctx1, match)
357 r = scmutil.status([], [], [], [], [], [], [])
357 r = scmutil.status([], [], [], [], [], [], [])
358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
359 listunknown)
359 listunknown)
360
360
361 if reversed:
361 if reversed:
362 # Reverse added and removed. Clear deleted, unknown and ignored as
362 # Reverse added and removed. Clear deleted, unknown and ignored as
363 # these make no sense to reverse.
363 # these make no sense to reverse.
364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
365 r.clean)
365 r.clean)
366
366
367 if listsubrepos:
367 if listsubrepos:
368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
369 try:
369 try:
370 rev2 = ctx2.subrev(subpath)
370 rev2 = ctx2.subrev(subpath)
371 except KeyError:
371 except KeyError:
372 # A subrepo that existed in node1 was deleted between
372 # A subrepo that existed in node1 was deleted between
373 # node1 and node2 (inclusive). Thus, ctx2's substate
373 # node1 and node2 (inclusive). Thus, ctx2's substate
374 # won't contain that subpath. The best we can do ignore it.
374 # won't contain that subpath. The best we can do ignore it.
375 rev2 = None
375 rev2 = None
376 submatch = matchmod.subdirmatcher(subpath, match)
376 submatch = matchmod.subdirmatcher(subpath, match)
377 s = sub.status(rev2, match=submatch, ignored=listignored,
377 s = sub.status(rev2, match=submatch, ignored=listignored,
378 clean=listclean, unknown=listunknown,
378 clean=listclean, unknown=listunknown,
379 listsubrepos=True)
379 listsubrepos=True)
380 for rfiles, sfiles in zip(r, s):
380 for rfiles, sfiles in zip(r, s):
381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
382
382
383 for l in r:
383 for l in r:
384 l.sort()
384 l.sort()
385
385
386 return r
386 return r
387
387
388 def _filterederror(repo, changeid):
388 def _filterederror(repo, changeid):
389 """build an exception to be raised about a filtered changeid
389 """build an exception to be raised about a filtered changeid
390
390
391 This is extracted in a function to help extensions (eg: evolve) to
391 This is extracted in a function to help extensions (eg: evolve) to
392 experiment with various message variants."""
392 experiment with various message variants."""
393 if repo.filtername.startswith('visible'):
393 if repo.filtername.startswith('visible'):
394 msg = _("hidden revision '%s'") % changeid
394 msg = _("hidden revision '%s'") % changeid
395 hint = _('use --hidden to access hidden revisions')
395 hint = _('use --hidden to access hidden revisions')
396 return error.FilteredRepoLookupError(msg, hint=hint)
396 return error.FilteredRepoLookupError(msg, hint=hint)
397 msg = _("filtered revision '%s' (not in '%s' subset)")
397 msg = _("filtered revision '%s' (not in '%s' subset)")
398 msg %= (changeid, repo.filtername)
398 msg %= (changeid, repo.filtername)
399 return error.FilteredRepoLookupError(msg)
399 return error.FilteredRepoLookupError(msg)
400
400
401 class changectx(basectx):
401 class changectx(basectx):
402 """A changecontext object makes access to data related to a particular
402 """A changecontext object makes access to data related to a particular
403 changeset convenient. It represents a read-only context already present in
403 changeset convenient. It represents a read-only context already present in
404 the repo."""
404 the repo."""
405 def __init__(self, repo, changeid=''):
405 def __init__(self, repo, changeid=''):
406 """changeid is a revision number, node, or tag"""
406 """changeid is a revision number, node, or tag"""
407
407
408 # since basectx.__new__ already took care of copying the object, we
408 # since basectx.__new__ already took care of copying the object, we
409 # don't need to do anything in __init__, so we just exit here
409 # don't need to do anything in __init__, so we just exit here
410 if isinstance(changeid, basectx):
410 if isinstance(changeid, basectx):
411 return
411 return
412
412
413 if changeid == '':
413 if changeid == '':
414 changeid = '.'
414 changeid = '.'
415 self._repo = repo
415 self._repo = repo
416
416
417 try:
417 try:
418 if isinstance(changeid, int):
418 if isinstance(changeid, int):
419 self._node = repo.changelog.node(changeid)
419 self._node = repo.changelog.node(changeid)
420 self._rev = changeid
420 self._rev = changeid
421 return
421 return
422 if not pycompat.ispy3 and isinstance(changeid, long):
422 if not pycompat.ispy3 and isinstance(changeid, long):
423 changeid = str(changeid)
423 changeid = str(changeid)
424 if changeid == 'null':
424 if changeid == 'null':
425 self._node = nullid
425 self._node = nullid
426 self._rev = nullrev
426 self._rev = nullrev
427 return
427 return
428 if changeid == 'tip':
428 if changeid == 'tip':
429 self._node = repo.changelog.tip()
429 self._node = repo.changelog.tip()
430 self._rev = repo.changelog.rev(self._node)
430 self._rev = repo.changelog.rev(self._node)
431 return
431 return
432 if changeid == '.' or changeid == repo.dirstate.p1():
432 if changeid == '.' or changeid == repo.dirstate.p1():
433 # this is a hack to delay/avoid loading obsmarkers
433 # this is a hack to delay/avoid loading obsmarkers
434 # when we know that '.' won't be hidden
434 # when we know that '.' won't be hidden
435 self._node = repo.dirstate.p1()
435 self._node = repo.dirstate.p1()
436 self._rev = repo.unfiltered().changelog.rev(self._node)
436 self._rev = repo.unfiltered().changelog.rev(self._node)
437 return
437 return
438 if len(changeid) == 20:
438 if len(changeid) == 20:
439 try:
439 try:
440 self._node = changeid
440 self._node = changeid
441 self._rev = repo.changelog.rev(changeid)
441 self._rev = repo.changelog.rev(changeid)
442 return
442 return
443 except error.FilteredRepoLookupError:
443 except error.FilteredRepoLookupError:
444 raise
444 raise
445 except LookupError:
445 except LookupError:
446 pass
446 pass
447
447
448 try:
448 try:
449 r = int(changeid)
449 r = int(changeid)
450 if '%d' % r != changeid:
450 if '%d' % r != changeid:
451 raise ValueError
451 raise ValueError
452 l = len(repo.changelog)
452 l = len(repo.changelog)
453 if r < 0:
453 if r < 0:
454 r += l
454 r += l
455 if r < 0 or r >= l and r != wdirrev:
455 if r < 0 or r >= l and r != wdirrev:
456 raise ValueError
456 raise ValueError
457 self._rev = r
457 self._rev = r
458 self._node = repo.changelog.node(r)
458 self._node = repo.changelog.node(r)
459 return
459 return
460 except error.FilteredIndexError:
460 except error.FilteredIndexError:
461 raise
461 raise
462 except (ValueError, OverflowError, IndexError):
462 except (ValueError, OverflowError, IndexError):
463 pass
463 pass
464
464
465 if len(changeid) == 40:
465 if len(changeid) == 40:
466 try:
466 try:
467 self._node = bin(changeid)
467 self._node = bin(changeid)
468 self._rev = repo.changelog.rev(self._node)
468 self._rev = repo.changelog.rev(self._node)
469 return
469 return
470 except error.FilteredLookupError:
470 except error.FilteredLookupError:
471 raise
471 raise
472 except (TypeError, LookupError):
472 except (TypeError, LookupError):
473 pass
473 pass
474
474
475 # lookup bookmarks through the name interface
475 # lookup bookmarks through the name interface
476 try:
476 try:
477 self._node = repo.names.singlenode(repo, changeid)
477 self._node = repo.names.singlenode(repo, changeid)
478 self._rev = repo.changelog.rev(self._node)
478 self._rev = repo.changelog.rev(self._node)
479 return
479 return
480 except KeyError:
480 except KeyError:
481 pass
481 pass
482 except error.FilteredRepoLookupError:
482 except error.FilteredRepoLookupError:
483 raise
483 raise
484 except error.RepoLookupError:
484 except error.RepoLookupError:
485 pass
485 pass
486
486
487 self._node = repo.unfiltered().changelog._partialmatch(changeid)
487 self._node = repo.unfiltered().changelog._partialmatch(changeid)
488 if self._node is not None:
488 if self._node is not None:
489 self._rev = repo.changelog.rev(self._node)
489 self._rev = repo.changelog.rev(self._node)
490 return
490 return
491
491
492 # lookup failed
492 # lookup failed
493 # check if it might have come from damaged dirstate
493 # check if it might have come from damaged dirstate
494 #
494 #
495 # XXX we could avoid the unfiltered if we had a recognizable
495 # XXX we could avoid the unfiltered if we had a recognizable
496 # exception for filtered changeset access
496 # exception for filtered changeset access
497 if changeid in repo.unfiltered().dirstate.parents():
497 if changeid in repo.unfiltered().dirstate.parents():
498 msg = _("working directory has unknown parent '%s'!")
498 msg = _("working directory has unknown parent '%s'!")
499 raise error.Abort(msg % short(changeid))
499 raise error.Abort(msg % short(changeid))
500 try:
500 try:
501 if len(changeid) == 20 and nonascii(changeid):
501 if len(changeid) == 20 and nonascii(changeid):
502 changeid = hex(changeid)
502 changeid = hex(changeid)
503 except TypeError:
503 except TypeError:
504 pass
504 pass
505 except (error.FilteredIndexError, error.FilteredLookupError,
505 except (error.FilteredIndexError, error.FilteredLookupError,
506 error.FilteredRepoLookupError):
506 error.FilteredRepoLookupError):
507 raise _filterederror(repo, changeid)
507 raise _filterederror(repo, changeid)
508 except IndexError:
508 except IndexError:
509 pass
509 pass
510 raise error.RepoLookupError(
510 raise error.RepoLookupError(
511 _("unknown revision '%s'") % changeid)
511 _("unknown revision '%s'") % changeid)
512
512
513 def __hash__(self):
513 def __hash__(self):
514 try:
514 try:
515 return hash(self._rev)
515 return hash(self._rev)
516 except AttributeError:
516 except AttributeError:
517 return id(self)
517 return id(self)
518
518
519 def __nonzero__(self):
519 def __nonzero__(self):
520 return self._rev != nullrev
520 return self._rev != nullrev
521
521
522 __bool__ = __nonzero__
522 __bool__ = __nonzero__
523
523
524 @propertycache
524 @propertycache
525 def _changeset(self):
525 def _changeset(self):
526 return self._repo.changelog.changelogrevision(self.rev())
526 return self._repo.changelog.changelogrevision(self.rev())
527
527
528 @propertycache
528 @propertycache
529 def _manifest(self):
529 def _manifest(self):
530 return self._manifestctx.read()
530 return self._manifestctx.read()
531
531
532 @property
532 @property
533 def _manifestctx(self):
533 def _manifestctx(self):
534 return self._repo.manifestlog[self._changeset.manifest]
534 return self._repo.manifestlog[self._changeset.manifest]
535
535
536 @propertycache
536 @propertycache
537 def _manifestdelta(self):
537 def _manifestdelta(self):
538 return self._manifestctx.readdelta()
538 return self._manifestctx.readdelta()
539
539
540 @propertycache
540 @propertycache
541 def _parents(self):
541 def _parents(self):
542 repo = self._repo
542 repo = self._repo
543 p1, p2 = repo.changelog.parentrevs(self._rev)
543 p1, p2 = repo.changelog.parentrevs(self._rev)
544 if p2 == nullrev:
544 if p2 == nullrev:
545 return [changectx(repo, p1)]
545 return [changectx(repo, p1)]
546 return [changectx(repo, p1), changectx(repo, p2)]
546 return [changectx(repo, p1), changectx(repo, p2)]
547
547
548 def changeset(self):
548 def changeset(self):
549 c = self._changeset
549 c = self._changeset
550 return (
550 return (
551 c.manifest,
551 c.manifest,
552 c.user,
552 c.user,
553 c.date,
553 c.date,
554 c.files,
554 c.files,
555 c.description,
555 c.description,
556 c.extra,
556 c.extra,
557 )
557 )
558 def manifestnode(self):
558 def manifestnode(self):
559 return self._changeset.manifest
559 return self._changeset.manifest
560
560
561 def user(self):
561 def user(self):
562 return self._changeset.user
562 return self._changeset.user
563 def date(self):
563 def date(self):
564 return self._changeset.date
564 return self._changeset.date
565 def files(self):
565 def files(self):
566 return self._changeset.files
566 return self._changeset.files
567 def description(self):
567 def description(self):
568 return self._changeset.description
568 return self._changeset.description
569 def branch(self):
569 def branch(self):
570 return encoding.tolocal(self._changeset.extra.get("branch"))
570 return encoding.tolocal(self._changeset.extra.get("branch"))
571 def closesbranch(self):
571 def closesbranch(self):
572 return 'close' in self._changeset.extra
572 return 'close' in self._changeset.extra
573 def extra(self):
573 def extra(self):
574 return self._changeset.extra
574 return self._changeset.extra
575 def tags(self):
575 def tags(self):
576 return self._repo.nodetags(self._node)
576 return self._repo.nodetags(self._node)
577 def bookmarks(self):
577 def bookmarks(self):
578 return self._repo.nodebookmarks(self._node)
578 return self._repo.nodebookmarks(self._node)
579 def phase(self):
579 def phase(self):
580 return self._repo._phasecache.phase(self._repo, self._rev)
580 return self._repo._phasecache.phase(self._repo, self._rev)
581 def hidden(self):
581 def hidden(self):
582 return self._rev in repoview.filterrevs(self._repo, 'visible')
582 return self._rev in repoview.filterrevs(self._repo, 'visible')
583
583
584 def children(self):
584 def children(self):
585 """return contexts for each child changeset"""
585 """return contexts for each child changeset"""
586 c = self._repo.changelog.children(self._node)
586 c = self._repo.changelog.children(self._node)
587 return [changectx(self._repo, x) for x in c]
587 return [changectx(self._repo, x) for x in c]
588
588
589 def ancestors(self):
589 def ancestors(self):
590 for a in self._repo.changelog.ancestors([self._rev]):
590 for a in self._repo.changelog.ancestors([self._rev]):
591 yield changectx(self._repo, a)
591 yield changectx(self._repo, a)
592
592
593 def descendants(self):
593 def descendants(self):
594 for d in self._repo.changelog.descendants([self._rev]):
594 for d in self._repo.changelog.descendants([self._rev]):
595 yield changectx(self._repo, d)
595 yield changectx(self._repo, d)
596
596
597 def filectx(self, path, fileid=None, filelog=None):
597 def filectx(self, path, fileid=None, filelog=None):
598 """get a file context from this changeset"""
598 """get a file context from this changeset"""
599 if fileid is None:
599 if fileid is None:
600 fileid = self.filenode(path)
600 fileid = self.filenode(path)
601 return filectx(self._repo, path, fileid=fileid,
601 return filectx(self._repo, path, fileid=fileid,
602 changectx=self, filelog=filelog)
602 changectx=self, filelog=filelog)
603
603
604 def ancestor(self, c2, warn=False):
604 def ancestor(self, c2, warn=False):
605 """return the "best" ancestor context of self and c2
605 """return the "best" ancestor context of self and c2
606
606
607 If there are multiple candidates, it will show a message and check
607 If there are multiple candidates, it will show a message and check
608 merge.preferancestor configuration before falling back to the
608 merge.preferancestor configuration before falling back to the
609 revlog ancestor."""
609 revlog ancestor."""
610 # deal with workingctxs
610 # deal with workingctxs
611 n2 = c2._node
611 n2 = c2._node
612 if n2 is None:
612 if n2 is None:
613 n2 = c2._parents[0]._node
613 n2 = c2._parents[0]._node
614 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
614 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
615 if not cahs:
615 if not cahs:
616 anc = nullid
616 anc = nullid
617 elif len(cahs) == 1:
617 elif len(cahs) == 1:
618 anc = cahs[0]
618 anc = cahs[0]
619 else:
619 else:
620 # experimental config: merge.preferancestor
620 # experimental config: merge.preferancestor
621 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
621 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
622 try:
622 try:
623 ctx = changectx(self._repo, r)
623 ctx = changectx(self._repo, r)
624 except error.RepoLookupError:
624 except error.RepoLookupError:
625 continue
625 continue
626 anc = ctx.node()
626 anc = ctx.node()
627 if anc in cahs:
627 if anc in cahs:
628 break
628 break
629 else:
629 else:
630 anc = self._repo.changelog.ancestor(self._node, n2)
630 anc = self._repo.changelog.ancestor(self._node, n2)
631 if warn:
631 if warn:
632 self._repo.ui.status(
632 self._repo.ui.status(
633 (_("note: using %s as ancestor of %s and %s\n") %
633 (_("note: using %s as ancestor of %s and %s\n") %
634 (short(anc), short(self._node), short(n2))) +
634 (short(anc), short(self._node), short(n2))) +
635 ''.join(_(" alternatively, use --config "
635 ''.join(_(" alternatively, use --config "
636 "merge.preferancestor=%s\n") %
636 "merge.preferancestor=%s\n") %
637 short(n) for n in sorted(cahs) if n != anc))
637 short(n) for n in sorted(cahs) if n != anc))
638 return changectx(self._repo, anc)
638 return changectx(self._repo, anc)
639
639
640 def descendant(self, other):
640 def descendant(self, other):
641 """True if other is descendant of this changeset"""
641 """True if other is descendant of this changeset"""
642 return self._repo.changelog.descendant(self._rev, other._rev)
642 return self._repo.changelog.descendant(self._rev, other._rev)
643
643
644 def walk(self, match):
644 def walk(self, match):
645 '''Generates matching file names.'''
645 '''Generates matching file names.'''
646
646
647 # Wrap match.bad method to have message with nodeid
647 # Wrap match.bad method to have message with nodeid
648 def bad(fn, msg):
648 def bad(fn, msg):
649 # The manifest doesn't know about subrepos, so don't complain about
649 # The manifest doesn't know about subrepos, so don't complain about
650 # paths into valid subrepos.
650 # paths into valid subrepos.
651 if any(fn == s or fn.startswith(s + '/')
651 if any(fn == s or fn.startswith(s + '/')
652 for s in self.substate):
652 for s in self.substate):
653 return
653 return
654 match.bad(fn, _('no such file in rev %s') % self)
654 match.bad(fn, _('no such file in rev %s') % self)
655
655
656 m = matchmod.badmatch(match, bad)
656 m = matchmod.badmatch(match, bad)
657 return self._manifest.walk(m)
657 return self._manifest.walk(m)
658
658
659 def matches(self, match):
659 def matches(self, match):
660 return self.walk(match)
660 return self.walk(match)
661
661
662 class basefilectx(object):
662 class basefilectx(object):
663 """A filecontext object represents the common logic for its children:
663 """A filecontext object represents the common logic for its children:
664 filectx: read-only access to a filerevision that is already present
664 filectx: read-only access to a filerevision that is already present
665 in the repo,
665 in the repo,
666 workingfilectx: a filecontext that represents files from the working
666 workingfilectx: a filecontext that represents files from the working
667 directory,
667 directory,
668 memfilectx: a filecontext that represents files in-memory,
668 memfilectx: a filecontext that represents files in-memory,
669 overlayfilectx: duplicate another filecontext with some fields overridden.
669 overlayfilectx: duplicate another filecontext with some fields overridden.
670 """
670 """
671 @propertycache
671 @propertycache
672 def _filelog(self):
672 def _filelog(self):
673 return self._repo.file(self._path)
673 return self._repo.file(self._path)
674
674
675 @propertycache
675 @propertycache
676 def _changeid(self):
676 def _changeid(self):
677 if r'_changeid' in self.__dict__:
677 if r'_changeid' in self.__dict__:
678 return self._changeid
678 return self._changeid
679 elif r'_changectx' in self.__dict__:
679 elif r'_changectx' in self.__dict__:
680 return self._changectx.rev()
680 return self._changectx.rev()
681 elif r'_descendantrev' in self.__dict__:
681 elif r'_descendantrev' in self.__dict__:
682 # this file context was created from a revision with a known
682 # this file context was created from a revision with a known
683 # descendant, we can (lazily) correct for linkrev aliases
683 # descendant, we can (lazily) correct for linkrev aliases
684 return self._adjustlinkrev(self._descendantrev)
684 return self._adjustlinkrev(self._descendantrev)
685 else:
685 else:
686 return self._filelog.linkrev(self._filerev)
686 return self._filelog.linkrev(self._filerev)
687
687
688 @propertycache
688 @propertycache
689 def _filenode(self):
689 def _filenode(self):
690 if r'_fileid' in self.__dict__:
690 if r'_fileid' in self.__dict__:
691 return self._filelog.lookup(self._fileid)
691 return self._filelog.lookup(self._fileid)
692 else:
692 else:
693 return self._changectx.filenode(self._path)
693 return self._changectx.filenode(self._path)
694
694
695 @propertycache
695 @propertycache
696 def _filerev(self):
696 def _filerev(self):
697 return self._filelog.rev(self._filenode)
697 return self._filelog.rev(self._filenode)
698
698
699 @propertycache
699 @propertycache
700 def _repopath(self):
700 def _repopath(self):
701 return self._path
701 return self._path
702
702
703 def __nonzero__(self):
703 def __nonzero__(self):
704 try:
704 try:
705 self._filenode
705 self._filenode
706 return True
706 return True
707 except error.LookupError:
707 except error.LookupError:
708 # file is missing
708 # file is missing
709 return False
709 return False
710
710
711 __bool__ = __nonzero__
711 __bool__ = __nonzero__
712
712
713 def __str__(self):
713 def __str__(self):
714 try:
714 try:
715 return "%s@%s" % (self.path(), self._changectx)
715 return "%s@%s" % (self.path(), self._changectx)
716 except error.LookupError:
716 except error.LookupError:
717 return "%s@???" % self.path()
717 return "%s@???" % self.path()
718
718
719 def __repr__(self):
719 def __repr__(self):
720 return "<%s %s>" % (type(self).__name__, str(self))
720 return "<%s %s>" % (type(self).__name__, str(self))
721
721
722 def __hash__(self):
722 def __hash__(self):
723 try:
723 try:
724 return hash((self._path, self._filenode))
724 return hash((self._path, self._filenode))
725 except AttributeError:
725 except AttributeError:
726 return id(self)
726 return id(self)
727
727
728 def __eq__(self, other):
728 def __eq__(self, other):
729 try:
729 try:
730 return (type(self) == type(other) and self._path == other._path
730 return (type(self) == type(other) and self._path == other._path
731 and self._filenode == other._filenode)
731 and self._filenode == other._filenode)
732 except AttributeError:
732 except AttributeError:
733 return False
733 return False
734
734
735 def __ne__(self, other):
735 def __ne__(self, other):
736 return not (self == other)
736 return not (self == other)
737
737
738 def filerev(self):
738 def filerev(self):
739 return self._filerev
739 return self._filerev
740 def filenode(self):
740 def filenode(self):
741 return self._filenode
741 return self._filenode
742 @propertycache
742 @propertycache
743 def _flags(self):
743 def _flags(self):
744 return self._changectx.flags(self._path)
744 return self._changectx.flags(self._path)
745 def flags(self):
745 def flags(self):
746 return self._flags
746 return self._flags
747 def filelog(self):
747 def filelog(self):
748 return self._filelog
748 return self._filelog
749 def rev(self):
749 def rev(self):
750 return self._changeid
750 return self._changeid
751 def linkrev(self):
751 def linkrev(self):
752 return self._filelog.linkrev(self._filerev)
752 return self._filelog.linkrev(self._filerev)
753 def node(self):
753 def node(self):
754 return self._changectx.node()
754 return self._changectx.node()
755 def hex(self):
755 def hex(self):
756 return self._changectx.hex()
756 return self._changectx.hex()
757 def user(self):
757 def user(self):
758 return self._changectx.user()
758 return self._changectx.user()
759 def date(self):
759 def date(self):
760 return self._changectx.date()
760 return self._changectx.date()
761 def files(self):
761 def files(self):
762 return self._changectx.files()
762 return self._changectx.files()
763 def description(self):
763 def description(self):
764 return self._changectx.description()
764 return self._changectx.description()
765 def branch(self):
765 def branch(self):
766 return self._changectx.branch()
766 return self._changectx.branch()
767 def extra(self):
767 def extra(self):
768 return self._changectx.extra()
768 return self._changectx.extra()
769 def phase(self):
769 def phase(self):
770 return self._changectx.phase()
770 return self._changectx.phase()
771 def phasestr(self):
771 def phasestr(self):
772 return self._changectx.phasestr()
772 return self._changectx.phasestr()
773 def manifest(self):
773 def manifest(self):
774 return self._changectx.manifest()
774 return self._changectx.manifest()
775 def changectx(self):
775 def changectx(self):
776 return self._changectx
776 return self._changectx
777 def renamed(self):
777 def renamed(self):
778 return self._copied
778 return self._copied
779 def repo(self):
779 def repo(self):
780 return self._repo
780 return self._repo
781 def size(self):
781 def size(self):
782 return len(self.data())
782 return len(self.data())
783
783
784 def path(self):
784 def path(self):
785 return self._path
785 return self._path
786
786
787 def isbinary(self):
787 def isbinary(self):
788 try:
788 try:
789 return util.binary(self.data())
789 return util.binary(self.data())
790 except IOError:
790 except IOError:
791 return False
791 return False
792 def isexec(self):
792 def isexec(self):
793 return 'x' in self.flags()
793 return 'x' in self.flags()
794 def islink(self):
794 def islink(self):
795 return 'l' in self.flags()
795 return 'l' in self.flags()
796
796
797 def isabsent(self):
797 def isabsent(self):
798 """whether this filectx represents a file not in self._changectx
798 """whether this filectx represents a file not in self._changectx
799
799
800 This is mainly for merge code to detect change/delete conflicts. This is
800 This is mainly for merge code to detect change/delete conflicts. This is
801 expected to be True for all subclasses of basectx."""
801 expected to be True for all subclasses of basectx."""
802 return False
802 return False
803
803
804 _customcmp = False
804 _customcmp = False
805 def cmp(self, fctx):
805 def cmp(self, fctx):
806 """compare with other file context
806 """compare with other file context
807
807
808 returns True if different than fctx.
808 returns True if different than fctx.
809 """
809 """
810 if fctx._customcmp:
810 if fctx._customcmp:
811 return fctx.cmp(self)
811 return fctx.cmp(self)
812
812
813 if (fctx._filenode is None
813 if (fctx._filenode is None
814 and (self._repo._encodefilterpats
814 and (self._repo._encodefilterpats
815 # if file data starts with '\1\n', empty metadata block is
815 # if file data starts with '\1\n', empty metadata block is
816 # prepended, which adds 4 bytes to filelog.size().
816 # prepended, which adds 4 bytes to filelog.size().
817 or self.size() - 4 == fctx.size())
817 or self.size() - 4 == fctx.size())
818 or self.size() == fctx.size()):
818 or self.size() == fctx.size()):
819 return self._filelog.cmp(self._filenode, fctx.data())
819 return self._filelog.cmp(self._filenode, fctx.data())
820
820
821 return True
821 return True
822
822
823 def _adjustlinkrev(self, srcrev, inclusive=False):
823 def _adjustlinkrev(self, srcrev, inclusive=False):
824 """return the first ancestor of <srcrev> introducing <fnode>
824 """return the first ancestor of <srcrev> introducing <fnode>
825
825
826 If the linkrev of the file revision does not point to an ancestor of
826 If the linkrev of the file revision does not point to an ancestor of
827 srcrev, we'll walk down the ancestors until we find one introducing
827 srcrev, we'll walk down the ancestors until we find one introducing
828 this file revision.
828 this file revision.
829
829
830 :srcrev: the changeset revision we search ancestors from
830 :srcrev: the changeset revision we search ancestors from
831 :inclusive: if true, the src revision will also be checked
831 :inclusive: if true, the src revision will also be checked
832 """
832 """
833 repo = self._repo
833 repo = self._repo
834 cl = repo.unfiltered().changelog
834 cl = repo.unfiltered().changelog
835 mfl = repo.manifestlog
835 mfl = repo.manifestlog
836 # fetch the linkrev
836 # fetch the linkrev
837 lkr = self.linkrev()
837 lkr = self.linkrev()
838 # hack to reuse ancestor computation when searching for renames
838 # hack to reuse ancestor computation when searching for renames
839 memberanc = getattr(self, '_ancestrycontext', None)
839 memberanc = getattr(self, '_ancestrycontext', None)
840 iteranc = None
840 iteranc = None
841 if srcrev is None:
841 if srcrev is None:
842 # wctx case, used by workingfilectx during mergecopy
842 # wctx case, used by workingfilectx during mergecopy
843 revs = [p.rev() for p in self._repo[None].parents()]
843 revs = [p.rev() for p in self._repo[None].parents()]
844 inclusive = True # we skipped the real (revless) source
844 inclusive = True # we skipped the real (revless) source
845 else:
845 else:
846 revs = [srcrev]
846 revs = [srcrev]
847 if memberanc is None:
847 if memberanc is None:
848 memberanc = iteranc = cl.ancestors(revs, lkr,
848 memberanc = iteranc = cl.ancestors(revs, lkr,
849 inclusive=inclusive)
849 inclusive=inclusive)
850 # check if this linkrev is an ancestor of srcrev
850 # check if this linkrev is an ancestor of srcrev
851 if lkr not in memberanc:
851 if lkr not in memberanc:
852 if iteranc is None:
852 if iteranc is None:
853 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
853 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
854 fnode = self._filenode
854 fnode = self._filenode
855 path = self._path
855 path = self._path
856 for a in iteranc:
856 for a in iteranc:
857 ac = cl.read(a) # get changeset data (we avoid object creation)
857 ac = cl.read(a) # get changeset data (we avoid object creation)
858 if path in ac[3]: # checking the 'files' field.
858 if path in ac[3]: # checking the 'files' field.
859 # The file has been touched, check if the content is
859 # The file has been touched, check if the content is
860 # similar to the one we search for.
860 # similar to the one we search for.
861 if fnode == mfl[ac[0]].readfast().get(path):
861 if fnode == mfl[ac[0]].readfast().get(path):
862 return a
862 return a
863 # In theory, we should never get out of that loop without a result.
863 # In theory, we should never get out of that loop without a result.
864 # But if manifest uses a buggy file revision (not children of the
864 # But if manifest uses a buggy file revision (not children of the
865 # one it replaces) we could. Such a buggy situation will likely
865 # one it replaces) we could. Such a buggy situation will likely
866 # result is crash somewhere else at to some point.
866 # result is crash somewhere else at to some point.
867 return lkr
867 return lkr
868
868
869 def introrev(self):
869 def introrev(self):
870 """return the rev of the changeset which introduced this file revision
870 """return the rev of the changeset which introduced this file revision
871
871
872 This method is different from linkrev because it take into account the
872 This method is different from linkrev because it take into account the
873 changeset the filectx was created from. It ensures the returned
873 changeset the filectx was created from. It ensures the returned
874 revision is one of its ancestors. This prevents bugs from
874 revision is one of its ancestors. This prevents bugs from
875 'linkrev-shadowing' when a file revision is used by multiple
875 'linkrev-shadowing' when a file revision is used by multiple
876 changesets.
876 changesets.
877 """
877 """
878 lkr = self.linkrev()
878 lkr = self.linkrev()
879 attrs = vars(self)
879 attrs = vars(self)
880 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
880 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
881 if noctx or self.rev() == lkr:
881 if noctx or self.rev() == lkr:
882 return self.linkrev()
882 return self.linkrev()
883 return self._adjustlinkrev(self.rev(), inclusive=True)
883 return self._adjustlinkrev(self.rev(), inclusive=True)
884
884
885 def _parentfilectx(self, path, fileid, filelog):
885 def _parentfilectx(self, path, fileid, filelog):
886 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
886 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
887 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
887 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
888 if '_changeid' in vars(self) or '_changectx' in vars(self):
888 if '_changeid' in vars(self) or '_changectx' in vars(self):
889 # If self is associated with a changeset (probably explicitly
889 # If self is associated with a changeset (probably explicitly
890 # fed), ensure the created filectx is associated with a
890 # fed), ensure the created filectx is associated with a
891 # changeset that is an ancestor of self.changectx.
891 # changeset that is an ancestor of self.changectx.
892 # This lets us later use _adjustlinkrev to get a correct link.
892 # This lets us later use _adjustlinkrev to get a correct link.
893 fctx._descendantrev = self.rev()
893 fctx._descendantrev = self.rev()
894 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
895 elif '_descendantrev' in vars(self):
895 elif '_descendantrev' in vars(self):
896 # Otherwise propagate _descendantrev if we have one associated.
896 # Otherwise propagate _descendantrev if we have one associated.
897 fctx._descendantrev = self._descendantrev
897 fctx._descendantrev = self._descendantrev
898 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
898 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
899 return fctx
899 return fctx
900
900
901 def parents(self):
901 def parents(self):
902 _path = self._path
902 _path = self._path
903 fl = self._filelog
903 fl = self._filelog
904 parents = self._filelog.parents(self._filenode)
904 parents = self._filelog.parents(self._filenode)
905 pl = [(_path, node, fl) for node in parents if node != nullid]
905 pl = [(_path, node, fl) for node in parents if node != nullid]
906
906
907 r = fl.renamed(self._filenode)
907 r = fl.renamed(self._filenode)
908 if r:
908 if r:
909 # - In the simple rename case, both parent are nullid, pl is empty.
909 # - In the simple rename case, both parent are nullid, pl is empty.
910 # - In case of merge, only one of the parent is null id and should
910 # - In case of merge, only one of the parent is null id and should
911 # be replaced with the rename information. This parent is -always-
911 # be replaced with the rename information. This parent is -always-
912 # the first one.
912 # the first one.
913 #
913 #
914 # As null id have always been filtered out in the previous list
914 # As null id have always been filtered out in the previous list
915 # comprehension, inserting to 0 will always result in "replacing
915 # comprehension, inserting to 0 will always result in "replacing
916 # first nullid parent with rename information.
916 # first nullid parent with rename information.
917 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
917 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
918
918
919 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
919 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
920
920
921 def p1(self):
921 def p1(self):
922 return self.parents()[0]
922 return self.parents()[0]
923
923
924 def p2(self):
924 def p2(self):
925 p = self.parents()
925 p = self.parents()
926 if len(p) == 2:
926 if len(p) == 2:
927 return p[1]
927 return p[1]
928 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
928 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
929
929
930 def annotate(self, follow=False, linenumber=False, skiprevs=None,
930 def annotate(self, follow=False, linenumber=False, skiprevs=None,
931 diffopts=None):
931 diffopts=None):
932 '''returns a list of tuples of ((ctx, number), line) for each line
932 '''returns a list of tuples of ((ctx, number), line) for each line
933 in the file, where ctx is the filectx of the node where
933 in the file, where ctx is the filectx of the node where
934 that line was last changed; if linenumber parameter is true, number is
934 that line was last changed; if linenumber parameter is true, number is
935 the line number at the first appearance in the managed file, otherwise,
935 the line number at the first appearance in the managed file, otherwise,
936 number has a fixed value of False.
936 number has a fixed value of False.
937 '''
937 '''
938
938
939 def lines(text):
939 def lines(text):
940 if text.endswith("\n"):
940 if text.endswith("\n"):
941 return text.count("\n")
941 return text.count("\n")
942 return text.count("\n") + int(bool(text))
942 return text.count("\n") + int(bool(text))
943
943
944 if linenumber:
944 if linenumber:
945 def decorate(text, rev):
945 def decorate(text, rev):
946 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
946 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
947 else:
947 else:
948 def decorate(text, rev):
948 def decorate(text, rev):
949 return ([(rev, False)] * lines(text), text)
949 return ([(rev, False)] * lines(text), text)
950
950
951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
952
952
953 def parents(f):
953 def parents(f):
954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
957 # isn't an ancestor of the srcrev.
957 # isn't an ancestor of the srcrev.
958 f._changeid
958 f._changeid
959 pl = f.parents()
959 pl = f.parents()
960
960
961 # Don't return renamed parents if we aren't following.
961 # Don't return renamed parents if we aren't following.
962 if not follow:
962 if not follow:
963 pl = [p for p in pl if p.path() == f.path()]
963 pl = [p for p in pl if p.path() == f.path()]
964
964
965 # renamed filectx won't have a filelog yet, so set it
965 # renamed filectx won't have a filelog yet, so set it
966 # from the cache to save time
966 # from the cache to save time
967 for p in pl:
967 for p in pl:
968 if not '_filelog' in p.__dict__:
968 if not '_filelog' in p.__dict__:
969 p._filelog = getlog(p.path())
969 p._filelog = getlog(p.path())
970
970
971 return pl
971 return pl
972
972
973 # use linkrev to find the first changeset where self appeared
973 # use linkrev to find the first changeset where self appeared
974 base = self
974 base = self
975 introrev = self.introrev()
975 introrev = self.introrev()
976 if self.rev() != introrev:
976 if self.rev() != introrev:
977 base = self.filectx(self.filenode(), changeid=introrev)
977 base = self.filectx(self.filenode(), changeid=introrev)
978 if getattr(base, '_ancestrycontext', None) is None:
978 if getattr(base, '_ancestrycontext', None) is None:
979 cl = self._repo.changelog
979 cl = self._repo.changelog
980 if introrev is None:
980 if introrev is None:
981 # wctx is not inclusive, but works because _ancestrycontext
981 # wctx is not inclusive, but works because _ancestrycontext
982 # is used to test filelog revisions
982 # is used to test filelog revisions
983 ac = cl.ancestors([p.rev() for p in base.parents()],
983 ac = cl.ancestors([p.rev() for p in base.parents()],
984 inclusive=True)
984 inclusive=True)
985 else:
985 else:
986 ac = cl.ancestors([introrev], inclusive=True)
986 ac = cl.ancestors([introrev], inclusive=True)
987 base._ancestrycontext = ac
987 base._ancestrycontext = ac
988
988
989 # This algorithm would prefer to be recursive, but Python is a
989 # This algorithm would prefer to be recursive, but Python is a
990 # bit recursion-hostile. Instead we do an iterative
990 # bit recursion-hostile. Instead we do an iterative
991 # depth-first search.
991 # depth-first search.
992
992
993 # 1st DFS pre-calculates pcache and needed
993 # 1st DFS pre-calculates pcache and needed
994 visit = [base]
994 visit = [base]
995 pcache = {}
995 pcache = {}
996 needed = {base: 1}
996 needed = {base: 1}
997 while visit:
997 while visit:
998 f = visit.pop()
998 f = visit.pop()
999 if f in pcache:
999 if f in pcache:
1000 continue
1000 continue
1001 pl = parents(f)
1001 pl = parents(f)
1002 pcache[f] = pl
1002 pcache[f] = pl
1003 for p in pl:
1003 for p in pl:
1004 needed[p] = needed.get(p, 0) + 1
1004 needed[p] = needed.get(p, 0) + 1
1005 if p not in pcache:
1005 if p not in pcache:
1006 visit.append(p)
1006 visit.append(p)
1007
1007
1008 # 2nd DFS does the actual annotate
1008 # 2nd DFS does the actual annotate
1009 visit[:] = [base]
1009 visit[:] = [base]
1010 hist = {}
1010 hist = {}
1011 while visit:
1011 while visit:
1012 f = visit[-1]
1012 f = visit[-1]
1013 if f in hist:
1013 if f in hist:
1014 visit.pop()
1014 visit.pop()
1015 continue
1015 continue
1016
1016
1017 ready = True
1017 ready = True
1018 pl = pcache[f]
1018 pl = pcache[f]
1019 for p in pl:
1019 for p in pl:
1020 if p not in hist:
1020 if p not in hist:
1021 ready = False
1021 ready = False
1022 visit.append(p)
1022 visit.append(p)
1023 if ready:
1023 if ready:
1024 visit.pop()
1024 visit.pop()
1025 curr = decorate(f.data(), f)
1025 curr = decorate(f.data(), f)
1026 skipchild = False
1026 skipchild = False
1027 if skiprevs is not None:
1027 if skiprevs is not None:
1028 skipchild = f._changeid in skiprevs
1028 skipchild = f._changeid in skiprevs
1029 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1029 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1030 diffopts)
1030 diffopts)
1031 for p in pl:
1031 for p in pl:
1032 if needed[p] == 1:
1032 if needed[p] == 1:
1033 del hist[p]
1033 del hist[p]
1034 del needed[p]
1034 del needed[p]
1035 else:
1035 else:
1036 needed[p] -= 1
1036 needed[p] -= 1
1037
1037
1038 hist[f] = curr
1038 hist[f] = curr
1039 del pcache[f]
1039 del pcache[f]
1040
1040
1041 return zip(hist[base][0], hist[base][1].splitlines(True))
1041 return zip(hist[base][0], hist[base][1].splitlines(True))
1042
1042
1043 def ancestors(self, followfirst=False):
1043 def ancestors(self, followfirst=False):
1044 visit = {}
1044 visit = {}
1045 c = self
1045 c = self
1046 if followfirst:
1046 if followfirst:
1047 cut = 1
1047 cut = 1
1048 else:
1048 else:
1049 cut = None
1049 cut = None
1050
1050
1051 while True:
1051 while True:
1052 for parent in c.parents()[:cut]:
1052 for parent in c.parents()[:cut]:
1053 visit[(parent.linkrev(), parent.filenode())] = parent
1053 visit[(parent.linkrev(), parent.filenode())] = parent
1054 if not visit:
1054 if not visit:
1055 break
1055 break
1056 c = visit.pop(max(visit))
1056 c = visit.pop(max(visit))
1057 yield c
1057 yield c
1058
1058
1059 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1059 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1060 r'''
1060 r'''
1061 Given parent and child fctxes and annotate data for parents, for all lines
1061 Given parent and child fctxes and annotate data for parents, for all lines
1062 in either parent that match the child, annotate the child with the parent's
1062 in either parent that match the child, annotate the child with the parent's
1063 data.
1063 data.
1064
1064
1065 Additionally, if `skipchild` is True, replace all other lines with parent
1065 Additionally, if `skipchild` is True, replace all other lines with parent
1066 annotate data as well such that child is never blamed for any lines.
1066 annotate data as well such that child is never blamed for any lines.
1067
1067
1068 >>> oldfctx = 'old'
1068 >>> oldfctx = 'old'
1069 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1069 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1070 >>> olddata = 'a\nb\n'
1070 >>> olddata = 'a\nb\n'
1071 >>> p1data = 'a\nb\nc\n'
1071 >>> p1data = 'a\nb\nc\n'
1072 >>> p2data = 'a\nc\nd\n'
1072 >>> p2data = 'a\nc\nd\n'
1073 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1073 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1074 >>> diffopts = mdiff.diffopts()
1074 >>> diffopts = mdiff.diffopts()
1075
1075
1076 >>> def decorate(text, rev):
1076 >>> def decorate(text, rev):
1077 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1077 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1078
1078
1079 Basic usage:
1079 Basic usage:
1080
1080
1081 >>> oldann = decorate(olddata, oldfctx)
1081 >>> oldann = decorate(olddata, oldfctx)
1082 >>> p1ann = decorate(p1data, p1fctx)
1082 >>> p1ann = decorate(p1data, p1fctx)
1083 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1083 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1084 >>> p1ann[0]
1084 >>> p1ann[0]
1085 [('old', 1), ('old', 2), ('p1', 3)]
1085 [('old', 1), ('old', 2), ('p1', 3)]
1086 >>> p2ann = decorate(p2data, p2fctx)
1086 >>> p2ann = decorate(p2data, p2fctx)
1087 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1087 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1088 >>> p2ann[0]
1088 >>> p2ann[0]
1089 [('old', 1), ('p2', 2), ('p2', 3)]
1089 [('old', 1), ('p2', 2), ('p2', 3)]
1090
1090
1091 Test with multiple parents (note the difference caused by ordering):
1091 Test with multiple parents (note the difference caused by ordering):
1092
1092
1093 >>> childann = decorate(childdata, childfctx)
1093 >>> childann = decorate(childdata, childfctx)
1094 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1094 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1095 ... diffopts)
1095 ... diffopts)
1096 >>> childann[0]
1096 >>> childann[0]
1097 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1097 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1098
1098
1099 >>> childann = decorate(childdata, childfctx)
1099 >>> childann = decorate(childdata, childfctx)
1100 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1100 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1101 ... diffopts)
1101 ... diffopts)
1102 >>> childann[0]
1102 >>> childann[0]
1103 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1103 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1104
1104
1105 Test with skipchild (note the difference caused by ordering):
1105 Test with skipchild (note the difference caused by ordering):
1106
1106
1107 >>> childann = decorate(childdata, childfctx)
1107 >>> childann = decorate(childdata, childfctx)
1108 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1108 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1109 ... diffopts)
1109 ... diffopts)
1110 >>> childann[0]
1110 >>> childann[0]
1111 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1111 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1112
1112
1113 >>> childann = decorate(childdata, childfctx)
1113 >>> childann = decorate(childdata, childfctx)
1114 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1114 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1115 ... diffopts)
1115 ... diffopts)
1116 >>> childann[0]
1116 >>> childann[0]
1117 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1117 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1118 '''
1118 '''
1119 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1119 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1120 for parent in parents]
1120 for parent in parents]
1121
1121
1122 if skipchild:
1122 if skipchild:
1123 # Need to iterate over the blocks twice -- make it a list
1123 # Need to iterate over the blocks twice -- make it a list
1124 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1124 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1125 # Mercurial currently prefers p2 over p1 for annotate.
1125 # Mercurial currently prefers p2 over p1 for annotate.
1126 # TODO: change this?
1126 # TODO: change this?
1127 for parent, blocks in pblocks:
1127 for parent, blocks in pblocks:
1128 for (a1, a2, b1, b2), t in blocks:
1128 for (a1, a2, b1, b2), t in blocks:
1129 # Changed blocks ('!') or blocks made only of blank lines ('~')
1129 # Changed blocks ('!') or blocks made only of blank lines ('~')
1130 # belong to the child.
1130 # belong to the child.
1131 if t == '=':
1131 if t == '=':
1132 child[0][b1:b2] = parent[0][a1:a2]
1132 child[0][b1:b2] = parent[0][a1:a2]
1133
1133
1134 if skipchild:
1134 if skipchild:
1135 # Now try and match up anything that couldn't be matched,
1135 # Now try and match up anything that couldn't be matched,
1136 # Reversing pblocks maintains bias towards p2, matching above
1136 # Reversing pblocks maintains bias towards p2, matching above
1137 # behavior.
1137 # behavior.
1138 pblocks.reverse()
1138 pblocks.reverse()
1139
1139
1140 # The heuristics are:
1140 # The heuristics are:
1141 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1141 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1142 # This could potentially be smarter but works well enough.
1142 # This could potentially be smarter but works well enough.
1143 # * For a non-matching section, do a best-effort fit. Match lines in
1143 # * For a non-matching section, do a best-effort fit. Match lines in
1144 # diff hunks 1:1, dropping lines as necessary.
1144 # diff hunks 1:1, dropping lines as necessary.
1145 # * Repeat the last line as a last resort.
1145 # * Repeat the last line as a last resort.
1146
1146
1147 # First, replace as much as possible without repeating the last line.
1147 # First, replace as much as possible without repeating the last line.
1148 remaining = [(parent, []) for parent, _blocks in pblocks]
1148 remaining = [(parent, []) for parent, _blocks in pblocks]
1149 for idx, (parent, blocks) in enumerate(pblocks):
1149 for idx, (parent, blocks) in enumerate(pblocks):
1150 for (a1, a2, b1, b2), _t in blocks:
1150 for (a1, a2, b1, b2), _t in blocks:
1151 if a2 - a1 >= b2 - b1:
1151 if a2 - a1 >= b2 - b1:
1152 for bk in xrange(b1, b2):
1152 for bk in xrange(b1, b2):
1153 if child[0][bk][0] == childfctx:
1153 if child[0][bk][0] == childfctx:
1154 ak = min(a1 + (bk - b1), a2 - 1)
1154 ak = min(a1 + (bk - b1), a2 - 1)
1155 child[0][bk] = parent[0][ak]
1155 child[0][bk] = parent[0][ak]
1156 else:
1156 else:
1157 remaining[idx][1].append((a1, a2, b1, b2))
1157 remaining[idx][1].append((a1, a2, b1, b2))
1158
1158
1159 # Then, look at anything left, which might involve repeating the last
1159 # Then, look at anything left, which might involve repeating the last
1160 # line.
1160 # line.
1161 for parent, blocks in remaining:
1161 for parent, blocks in remaining:
1162 for a1, a2, b1, b2 in blocks:
1162 for a1, a2, b1, b2 in blocks:
1163 for bk in xrange(b1, b2):
1163 for bk in xrange(b1, b2):
1164 if child[0][bk][0] == childfctx:
1164 if child[0][bk][0] == childfctx:
1165 ak = min(a1 + (bk - b1), a2 - 1)
1165 ak = min(a1 + (bk - b1), a2 - 1)
1166 child[0][bk] = parent[0][ak]
1166 child[0][bk] = parent[0][ak]
1167 return child
1167 return child
1168
1168
1169 class filectx(basefilectx):
1169 class filectx(basefilectx):
1170 """A filecontext object makes access to data related to a particular
1170 """A filecontext object makes access to data related to a particular
1171 filerevision convenient."""
1171 filerevision convenient."""
1172 def __init__(self, repo, path, changeid=None, fileid=None,
1172 def __init__(self, repo, path, changeid=None, fileid=None,
1173 filelog=None, changectx=None):
1173 filelog=None, changectx=None):
1174 """changeid can be a changeset revision, node, or tag.
1174 """changeid can be a changeset revision, node, or tag.
1175 fileid can be a file revision or node."""
1175 fileid can be a file revision or node."""
1176 self._repo = repo
1176 self._repo = repo
1177 self._path = path
1177 self._path = path
1178
1178
1179 assert (changeid is not None
1179 assert (changeid is not None
1180 or fileid is not None
1180 or fileid is not None
1181 or changectx is not None), \
1181 or changectx is not None), \
1182 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1182 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1183 % (changeid, fileid, changectx))
1183 % (changeid, fileid, changectx))
1184
1184
1185 if filelog is not None:
1185 if filelog is not None:
1186 self._filelog = filelog
1186 self._filelog = filelog
1187
1187
1188 if changeid is not None:
1188 if changeid is not None:
1189 self._changeid = changeid
1189 self._changeid = changeid
1190 if changectx is not None:
1190 if changectx is not None:
1191 self._changectx = changectx
1191 self._changectx = changectx
1192 if fileid is not None:
1192 if fileid is not None:
1193 self._fileid = fileid
1193 self._fileid = fileid
1194
1194
1195 @propertycache
1195 @propertycache
1196 def _changectx(self):
1196 def _changectx(self):
1197 try:
1197 try:
1198 return changectx(self._repo, self._changeid)
1198 return changectx(self._repo, self._changeid)
1199 except error.FilteredRepoLookupError:
1199 except error.FilteredRepoLookupError:
1200 # Linkrev may point to any revision in the repository. When the
1200 # Linkrev may point to any revision in the repository. When the
1201 # repository is filtered this may lead to `filectx` trying to build
1201 # repository is filtered this may lead to `filectx` trying to build
1202 # `changectx` for filtered revision. In such case we fallback to
1202 # `changectx` for filtered revision. In such case we fallback to
1203 # creating `changectx` on the unfiltered version of the reposition.
1203 # creating `changectx` on the unfiltered version of the reposition.
1204 # This fallback should not be an issue because `changectx` from
1204 # This fallback should not be an issue because `changectx` from
1205 # `filectx` are not used in complex operations that care about
1205 # `filectx` are not used in complex operations that care about
1206 # filtering.
1206 # filtering.
1207 #
1207 #
1208 # This fallback is a cheap and dirty fix that prevent several
1208 # This fallback is a cheap and dirty fix that prevent several
1209 # crashes. It does not ensure the behavior is correct. However the
1209 # crashes. It does not ensure the behavior is correct. However the
1210 # behavior was not correct before filtering either and "incorrect
1210 # behavior was not correct before filtering either and "incorrect
1211 # behavior" is seen as better as "crash"
1211 # behavior" is seen as better as "crash"
1212 #
1212 #
1213 # Linkrevs have several serious troubles with filtering that are
1213 # Linkrevs have several serious troubles with filtering that are
1214 # complicated to solve. Proper handling of the issue here should be
1214 # complicated to solve. Proper handling of the issue here should be
1215 # considered when solving linkrev issue are on the table.
1215 # considered when solving linkrev issue are on the table.
1216 return changectx(self._repo.unfiltered(), self._changeid)
1216 return changectx(self._repo.unfiltered(), self._changeid)
1217
1217
1218 def filectx(self, fileid, changeid=None):
1218 def filectx(self, fileid, changeid=None):
1219 '''opens an arbitrary revision of the file without
1219 '''opens an arbitrary revision of the file without
1220 opening a new filelog'''
1220 opening a new filelog'''
1221 return filectx(self._repo, self._path, fileid=fileid,
1221 return filectx(self._repo, self._path, fileid=fileid,
1222 filelog=self._filelog, changeid=changeid)
1222 filelog=self._filelog, changeid=changeid)
1223
1223
1224 def rawdata(self):
1224 def rawdata(self):
1225 return self._filelog.revision(self._filenode, raw=True)
1225 return self._filelog.revision(self._filenode, raw=True)
1226
1226
1227 def rawflags(self):
1227 def rawflags(self):
1228 """low-level revlog flags"""
1228 """low-level revlog flags"""
1229 return self._filelog.flags(self._filerev)
1229 return self._filelog.flags(self._filerev)
1230
1230
1231 def data(self):
1231 def data(self):
1232 try:
1232 try:
1233 return self._filelog.read(self._filenode)
1233 return self._filelog.read(self._filenode)
1234 except error.CensoredNodeError:
1234 except error.CensoredNodeError:
1235 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1235 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1236 return ""
1236 return ""
1237 raise error.Abort(_("censored node: %s") % short(self._filenode),
1237 raise error.Abort(_("censored node: %s") % short(self._filenode),
1238 hint=_("set censor.policy to ignore errors"))
1238 hint=_("set censor.policy to ignore errors"))
1239
1239
1240 def size(self):
1240 def size(self):
1241 return self._filelog.size(self._filerev)
1241 return self._filelog.size(self._filerev)
1242
1242
1243 @propertycache
1243 @propertycache
1244 def _copied(self):
1244 def _copied(self):
1245 """check if file was actually renamed in this changeset revision
1245 """check if file was actually renamed in this changeset revision
1246
1246
1247 If rename logged in file revision, we report copy for changeset only
1247 If rename logged in file revision, we report copy for changeset only
1248 if file revisions linkrev points back to the changeset in question
1248 if file revisions linkrev points back to the changeset in question
1249 or both changeset parents contain different file revisions.
1249 or both changeset parents contain different file revisions.
1250 """
1250 """
1251
1251
1252 renamed = self._filelog.renamed(self._filenode)
1252 renamed = self._filelog.renamed(self._filenode)
1253 if not renamed:
1253 if not renamed:
1254 return renamed
1254 return renamed
1255
1255
1256 if self.rev() == self.linkrev():
1256 if self.rev() == self.linkrev():
1257 return renamed
1257 return renamed
1258
1258
1259 name = self.path()
1259 name = self.path()
1260 fnode = self._filenode
1260 fnode = self._filenode
1261 for p in self._changectx.parents():
1261 for p in self._changectx.parents():
1262 try:
1262 try:
1263 if fnode == p.filenode(name):
1263 if fnode == p.filenode(name):
1264 return None
1264 return None
1265 except error.LookupError:
1265 except error.LookupError:
1266 pass
1266 pass
1267 return renamed
1267 return renamed
1268
1268
1269 def children(self):
1269 def children(self):
1270 # hard for renames
1270 # hard for renames
1271 c = self._filelog.children(self._filenode)
1271 c = self._filelog.children(self._filenode)
1272 return [filectx(self._repo, self._path, fileid=x,
1272 return [filectx(self._repo, self._path, fileid=x,
1273 filelog=self._filelog) for x in c]
1273 filelog=self._filelog) for x in c]
1274
1274
1275 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1276 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1277 if diff from fctx2 to fctx1 has changes in linerange2 and
1278 `linerange1` is the new line range for fctx1.
1279 """
1280 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1281 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1282 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1283 return diffinrange, linerange1
1284
1285 def blockancestors(fctx, fromline, toline, followfirst=False):
1286 """Yield ancestors of `fctx` with respect to the block of lines within
1287 `fromline`-`toline` range.
1288 """
1289 diffopts = patch.diffopts(fctx._repo.ui)
1290 introrev = fctx.introrev()
1291 if fctx.rev() != introrev:
1292 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1293 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1294 while visit:
1295 c, linerange2 = visit.pop(max(visit))
1296 pl = c.parents()
1297 if followfirst:
1298 pl = pl[:1]
1299 if not pl:
1300 # The block originates from the initial revision.
1301 yield c, linerange2
1302 continue
1303 inrange = False
1304 for p in pl:
1305 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1306 inrange = inrange or inrangep
1307 if linerange1[0] == linerange1[1]:
1308 # Parent's linerange is empty, meaning that the block got
1309 # introduced in this revision; no need to go futher in this
1310 # branch.
1311 continue
1312 # Set _descendantrev with 'c' (a known descendant) so that, when
1313 # _adjustlinkrev is called for 'p', it receives this descendant
1314 # (as srcrev) instead possibly topmost introrev.
1315 p._descendantrev = c.rev()
1316 visit[p.linkrev(), p.filenode()] = p, linerange1
1317 if inrange:
1318 yield c, linerange2
1319
1320 def blockdescendants(fctx, fromline, toline):
1321 """Yield descendants of `fctx` with respect to the block of lines within
1322 `fromline`-`toline` range.
1323 """
1324 # First possibly yield 'fctx' if it has changes in range with respect to
1325 # its parents.
1326 try:
1327 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1328 except StopIteration:
1329 pass
1330 else:
1331 if c == fctx:
1332 yield c, linerange1
1333
1334 diffopts = patch.diffopts(fctx._repo.ui)
1335 fl = fctx.filelog()
1336 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1337 for i in fl.descendants([fctx.filerev()]):
1338 c = fctx.filectx(i)
1339 inrange = False
1340 for x in fl.parentrevs(i):
1341 try:
1342 p, linerange2 = seen[x]
1343 except KeyError:
1344 # nullrev or other branch
1345 continue
1346 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1347 inrange = inrange or inrangep
1348 # If revision 'i' has been seen (it's a merge), we assume that its
1349 # line range is the same independently of which parents was used
1350 # to compute it.
1351 assert i not in seen or seen[i][1] == linerange1, (
1352 'computed line range for %s is not consistent between '
1353 'ancestor branches' % c)
1354 seen[i] = c, linerange1
1355 if inrange:
1356 yield c, linerange1
1357
1358 class committablectx(basectx):
1275 class committablectx(basectx):
1359 """A committablectx object provides common functionality for a context that
1276 """A committablectx object provides common functionality for a context that
1360 wants the ability to commit, e.g. workingctx or memctx."""
1277 wants the ability to commit, e.g. workingctx or memctx."""
1361 def __init__(self, repo, text="", user=None, date=None, extra=None,
1278 def __init__(self, repo, text="", user=None, date=None, extra=None,
1362 changes=None):
1279 changes=None):
1363 self._repo = repo
1280 self._repo = repo
1364 self._rev = None
1281 self._rev = None
1365 self._node = None
1282 self._node = None
1366 self._text = text
1283 self._text = text
1367 if date:
1284 if date:
1368 self._date = util.parsedate(date)
1285 self._date = util.parsedate(date)
1369 if user:
1286 if user:
1370 self._user = user
1287 self._user = user
1371 if changes:
1288 if changes:
1372 self._status = changes
1289 self._status = changes
1373
1290
1374 self._extra = {}
1291 self._extra = {}
1375 if extra:
1292 if extra:
1376 self._extra = extra.copy()
1293 self._extra = extra.copy()
1377 if 'branch' not in self._extra:
1294 if 'branch' not in self._extra:
1378 try:
1295 try:
1379 branch = encoding.fromlocal(self._repo.dirstate.branch())
1296 branch = encoding.fromlocal(self._repo.dirstate.branch())
1380 except UnicodeDecodeError:
1297 except UnicodeDecodeError:
1381 raise error.Abort(_('branch name not in UTF-8!'))
1298 raise error.Abort(_('branch name not in UTF-8!'))
1382 self._extra['branch'] = branch
1299 self._extra['branch'] = branch
1383 if self._extra['branch'] == '':
1300 if self._extra['branch'] == '':
1384 self._extra['branch'] = 'default'
1301 self._extra['branch'] = 'default'
1385
1302
1386 def __str__(self):
1303 def __str__(self):
1387 return str(self._parents[0]) + r"+"
1304 return str(self._parents[0]) + r"+"
1388
1305
1389 def __bytes__(self):
1306 def __bytes__(self):
1390 return bytes(self._parents[0]) + "+"
1307 return bytes(self._parents[0]) + "+"
1391
1308
1392 def __nonzero__(self):
1309 def __nonzero__(self):
1393 return True
1310 return True
1394
1311
1395 __bool__ = __nonzero__
1312 __bool__ = __nonzero__
1396
1313
1397 def _buildflagfunc(self):
1314 def _buildflagfunc(self):
1398 # Create a fallback function for getting file flags when the
1315 # Create a fallback function for getting file flags when the
1399 # filesystem doesn't support them
1316 # filesystem doesn't support them
1400
1317
1401 copiesget = self._repo.dirstate.copies().get
1318 copiesget = self._repo.dirstate.copies().get
1402 parents = self.parents()
1319 parents = self.parents()
1403 if len(parents) < 2:
1320 if len(parents) < 2:
1404 # when we have one parent, it's easy: copy from parent
1321 # when we have one parent, it's easy: copy from parent
1405 man = parents[0].manifest()
1322 man = parents[0].manifest()
1406 def func(f):
1323 def func(f):
1407 f = copiesget(f, f)
1324 f = copiesget(f, f)
1408 return man.flags(f)
1325 return man.flags(f)
1409 else:
1326 else:
1410 # merges are tricky: we try to reconstruct the unstored
1327 # merges are tricky: we try to reconstruct the unstored
1411 # result from the merge (issue1802)
1328 # result from the merge (issue1802)
1412 p1, p2 = parents
1329 p1, p2 = parents
1413 pa = p1.ancestor(p2)
1330 pa = p1.ancestor(p2)
1414 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1331 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1415
1332
1416 def func(f):
1333 def func(f):
1417 f = copiesget(f, f) # may be wrong for merges with copies
1334 f = copiesget(f, f) # may be wrong for merges with copies
1418 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1335 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1419 if fl1 == fl2:
1336 if fl1 == fl2:
1420 return fl1
1337 return fl1
1421 if fl1 == fla:
1338 if fl1 == fla:
1422 return fl2
1339 return fl2
1423 if fl2 == fla:
1340 if fl2 == fla:
1424 return fl1
1341 return fl1
1425 return '' # punt for conflicts
1342 return '' # punt for conflicts
1426
1343
1427 return func
1344 return func
1428
1345
1429 @propertycache
1346 @propertycache
1430 def _flagfunc(self):
1347 def _flagfunc(self):
1431 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1348 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1432
1349
1433 @propertycache
1350 @propertycache
1434 def _status(self):
1351 def _status(self):
1435 return self._repo.status()
1352 return self._repo.status()
1436
1353
1437 @propertycache
1354 @propertycache
1438 def _user(self):
1355 def _user(self):
1439 return self._repo.ui.username()
1356 return self._repo.ui.username()
1440
1357
1441 @propertycache
1358 @propertycache
1442 def _date(self):
1359 def _date(self):
1443 ui = self._repo.ui
1360 ui = self._repo.ui
1444 date = ui.configdate('devel', 'default-date')
1361 date = ui.configdate('devel', 'default-date')
1445 if date is None:
1362 if date is None:
1446 date = util.makedate()
1363 date = util.makedate()
1447 return date
1364 return date
1448
1365
1449 def subrev(self, subpath):
1366 def subrev(self, subpath):
1450 return None
1367 return None
1451
1368
1452 def manifestnode(self):
1369 def manifestnode(self):
1453 return None
1370 return None
1454 def user(self):
1371 def user(self):
1455 return self._user or self._repo.ui.username()
1372 return self._user or self._repo.ui.username()
1456 def date(self):
1373 def date(self):
1457 return self._date
1374 return self._date
1458 def description(self):
1375 def description(self):
1459 return self._text
1376 return self._text
1460 def files(self):
1377 def files(self):
1461 return sorted(self._status.modified + self._status.added +
1378 return sorted(self._status.modified + self._status.added +
1462 self._status.removed)
1379 self._status.removed)
1463
1380
1464 def modified(self):
1381 def modified(self):
1465 return self._status.modified
1382 return self._status.modified
1466 def added(self):
1383 def added(self):
1467 return self._status.added
1384 return self._status.added
1468 def removed(self):
1385 def removed(self):
1469 return self._status.removed
1386 return self._status.removed
1470 def deleted(self):
1387 def deleted(self):
1471 return self._status.deleted
1388 return self._status.deleted
1472 def branch(self):
1389 def branch(self):
1473 return encoding.tolocal(self._extra['branch'])
1390 return encoding.tolocal(self._extra['branch'])
1474 def closesbranch(self):
1391 def closesbranch(self):
1475 return 'close' in self._extra
1392 return 'close' in self._extra
1476 def extra(self):
1393 def extra(self):
1477 return self._extra
1394 return self._extra
1478
1395
1479 def tags(self):
1396 def tags(self):
1480 return []
1397 return []
1481
1398
1482 def bookmarks(self):
1399 def bookmarks(self):
1483 b = []
1400 b = []
1484 for p in self.parents():
1401 for p in self.parents():
1485 b.extend(p.bookmarks())
1402 b.extend(p.bookmarks())
1486 return b
1403 return b
1487
1404
1488 def phase(self):
1405 def phase(self):
1489 phase = phases.draft # default phase to draft
1406 phase = phases.draft # default phase to draft
1490 for p in self.parents():
1407 for p in self.parents():
1491 phase = max(phase, p.phase())
1408 phase = max(phase, p.phase())
1492 return phase
1409 return phase
1493
1410
1494 def hidden(self):
1411 def hidden(self):
1495 return False
1412 return False
1496
1413
1497 def children(self):
1414 def children(self):
1498 return []
1415 return []
1499
1416
1500 def flags(self, path):
1417 def flags(self, path):
1501 if r'_manifest' in self.__dict__:
1418 if r'_manifest' in self.__dict__:
1502 try:
1419 try:
1503 return self._manifest.flags(path)
1420 return self._manifest.flags(path)
1504 except KeyError:
1421 except KeyError:
1505 return ''
1422 return ''
1506
1423
1507 try:
1424 try:
1508 return self._flagfunc(path)
1425 return self._flagfunc(path)
1509 except OSError:
1426 except OSError:
1510 return ''
1427 return ''
1511
1428
1512 def ancestor(self, c2):
1429 def ancestor(self, c2):
1513 """return the "best" ancestor context of self and c2"""
1430 """return the "best" ancestor context of self and c2"""
1514 return self._parents[0].ancestor(c2) # punt on two parents for now
1431 return self._parents[0].ancestor(c2) # punt on two parents for now
1515
1432
1516 def walk(self, match):
1433 def walk(self, match):
1517 '''Generates matching file names.'''
1434 '''Generates matching file names.'''
1518 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1435 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1519 True, False))
1436 True, False))
1520
1437
1521 def matches(self, match):
1438 def matches(self, match):
1522 return sorted(self._repo.dirstate.matches(match))
1439 return sorted(self._repo.dirstate.matches(match))
1523
1440
1524 def ancestors(self):
1441 def ancestors(self):
1525 for p in self._parents:
1442 for p in self._parents:
1526 yield p
1443 yield p
1527 for a in self._repo.changelog.ancestors(
1444 for a in self._repo.changelog.ancestors(
1528 [p.rev() for p in self._parents]):
1445 [p.rev() for p in self._parents]):
1529 yield changectx(self._repo, a)
1446 yield changectx(self._repo, a)
1530
1447
1531 def markcommitted(self, node):
1448 def markcommitted(self, node):
1532 """Perform post-commit cleanup necessary after committing this ctx
1449 """Perform post-commit cleanup necessary after committing this ctx
1533
1450
1534 Specifically, this updates backing stores this working context
1451 Specifically, this updates backing stores this working context
1535 wraps to reflect the fact that the changes reflected by this
1452 wraps to reflect the fact that the changes reflected by this
1536 workingctx have been committed. For example, it marks
1453 workingctx have been committed. For example, it marks
1537 modified and added files as normal in the dirstate.
1454 modified and added files as normal in the dirstate.
1538
1455
1539 """
1456 """
1540
1457
1541 with self._repo.dirstate.parentchange():
1458 with self._repo.dirstate.parentchange():
1542 for f in self.modified() + self.added():
1459 for f in self.modified() + self.added():
1543 self._repo.dirstate.normal(f)
1460 self._repo.dirstate.normal(f)
1544 for f in self.removed():
1461 for f in self.removed():
1545 self._repo.dirstate.drop(f)
1462 self._repo.dirstate.drop(f)
1546 self._repo.dirstate.setparents(node)
1463 self._repo.dirstate.setparents(node)
1547
1464
1548 # write changes out explicitly, because nesting wlock at
1465 # write changes out explicitly, because nesting wlock at
1549 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1466 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1550 # from immediately doing so for subsequent changing files
1467 # from immediately doing so for subsequent changing files
1551 self._repo.dirstate.write(self._repo.currenttransaction())
1468 self._repo.dirstate.write(self._repo.currenttransaction())
1552
1469
1553 def dirty(self, missing=False, merge=True, branch=True):
1470 def dirty(self, missing=False, merge=True, branch=True):
1554 return False
1471 return False
1555
1472
1556 class workingctx(committablectx):
1473 class workingctx(committablectx):
1557 """A workingctx object makes access to data related to
1474 """A workingctx object makes access to data related to
1558 the current working directory convenient.
1475 the current working directory convenient.
1559 date - any valid date string or (unixtime, offset), or None.
1476 date - any valid date string or (unixtime, offset), or None.
1560 user - username string, or None.
1477 user - username string, or None.
1561 extra - a dictionary of extra values, or None.
1478 extra - a dictionary of extra values, or None.
1562 changes - a list of file lists as returned by localrepo.status()
1479 changes - a list of file lists as returned by localrepo.status()
1563 or None to use the repository status.
1480 or None to use the repository status.
1564 """
1481 """
1565 def __init__(self, repo, text="", user=None, date=None, extra=None,
1482 def __init__(self, repo, text="", user=None, date=None, extra=None,
1566 changes=None):
1483 changes=None):
1567 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1484 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1568
1485
1569 def __iter__(self):
1486 def __iter__(self):
1570 d = self._repo.dirstate
1487 d = self._repo.dirstate
1571 for f in d:
1488 for f in d:
1572 if d[f] != 'r':
1489 if d[f] != 'r':
1573 yield f
1490 yield f
1574
1491
1575 def __contains__(self, key):
1492 def __contains__(self, key):
1576 return self._repo.dirstate[key] not in "?r"
1493 return self._repo.dirstate[key] not in "?r"
1577
1494
1578 def hex(self):
1495 def hex(self):
1579 return hex(wdirid)
1496 return hex(wdirid)
1580
1497
1581 @propertycache
1498 @propertycache
1582 def _parents(self):
1499 def _parents(self):
1583 p = self._repo.dirstate.parents()
1500 p = self._repo.dirstate.parents()
1584 if p[1] == nullid:
1501 if p[1] == nullid:
1585 p = p[:-1]
1502 p = p[:-1]
1586 return [changectx(self._repo, x) for x in p]
1503 return [changectx(self._repo, x) for x in p]
1587
1504
1588 def filectx(self, path, filelog=None):
1505 def filectx(self, path, filelog=None):
1589 """get a file context from the working directory"""
1506 """get a file context from the working directory"""
1590 return workingfilectx(self._repo, path, workingctx=self,
1507 return workingfilectx(self._repo, path, workingctx=self,
1591 filelog=filelog)
1508 filelog=filelog)
1592
1509
1593 def dirty(self, missing=False, merge=True, branch=True):
1510 def dirty(self, missing=False, merge=True, branch=True):
1594 "check whether a working directory is modified"
1511 "check whether a working directory is modified"
1595 # check subrepos first
1512 # check subrepos first
1596 for s in sorted(self.substate):
1513 for s in sorted(self.substate):
1597 if self.sub(s).dirty():
1514 if self.sub(s).dirty():
1598 return True
1515 return True
1599 # check current working dir
1516 # check current working dir
1600 return ((merge and self.p2()) or
1517 return ((merge and self.p2()) or
1601 (branch and self.branch() != self.p1().branch()) or
1518 (branch and self.branch() != self.p1().branch()) or
1602 self.modified() or self.added() or self.removed() or
1519 self.modified() or self.added() or self.removed() or
1603 (missing and self.deleted()))
1520 (missing and self.deleted()))
1604
1521
1605 def add(self, list, prefix=""):
1522 def add(self, list, prefix=""):
1606 join = lambda f: os.path.join(prefix, f)
1523 join = lambda f: os.path.join(prefix, f)
1607 with self._repo.wlock():
1524 with self._repo.wlock():
1608 ui, ds = self._repo.ui, self._repo.dirstate
1525 ui, ds = self._repo.ui, self._repo.dirstate
1609 rejected = []
1526 rejected = []
1610 lstat = self._repo.wvfs.lstat
1527 lstat = self._repo.wvfs.lstat
1611 for f in list:
1528 for f in list:
1612 scmutil.checkportable(ui, join(f))
1529 scmutil.checkportable(ui, join(f))
1613 try:
1530 try:
1614 st = lstat(f)
1531 st = lstat(f)
1615 except OSError:
1532 except OSError:
1616 ui.warn(_("%s does not exist!\n") % join(f))
1533 ui.warn(_("%s does not exist!\n") % join(f))
1617 rejected.append(f)
1534 rejected.append(f)
1618 continue
1535 continue
1619 if st.st_size > 10000000:
1536 if st.st_size > 10000000:
1620 ui.warn(_("%s: up to %d MB of RAM may be required "
1537 ui.warn(_("%s: up to %d MB of RAM may be required "
1621 "to manage this file\n"
1538 "to manage this file\n"
1622 "(use 'hg revert %s' to cancel the "
1539 "(use 'hg revert %s' to cancel the "
1623 "pending addition)\n")
1540 "pending addition)\n")
1624 % (f, 3 * st.st_size // 1000000, join(f)))
1541 % (f, 3 * st.st_size // 1000000, join(f)))
1625 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1542 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1626 ui.warn(_("%s not added: only files and symlinks "
1543 ui.warn(_("%s not added: only files and symlinks "
1627 "supported currently\n") % join(f))
1544 "supported currently\n") % join(f))
1628 rejected.append(f)
1545 rejected.append(f)
1629 elif ds[f] in 'amn':
1546 elif ds[f] in 'amn':
1630 ui.warn(_("%s already tracked!\n") % join(f))
1547 ui.warn(_("%s already tracked!\n") % join(f))
1631 elif ds[f] == 'r':
1548 elif ds[f] == 'r':
1632 ds.normallookup(f)
1549 ds.normallookup(f)
1633 else:
1550 else:
1634 ds.add(f)
1551 ds.add(f)
1635 return rejected
1552 return rejected
1636
1553
1637 def forget(self, files, prefix=""):
1554 def forget(self, files, prefix=""):
1638 join = lambda f: os.path.join(prefix, f)
1555 join = lambda f: os.path.join(prefix, f)
1639 with self._repo.wlock():
1556 with self._repo.wlock():
1640 rejected = []
1557 rejected = []
1641 for f in files:
1558 for f in files:
1642 if f not in self._repo.dirstate:
1559 if f not in self._repo.dirstate:
1643 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1560 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1644 rejected.append(f)
1561 rejected.append(f)
1645 elif self._repo.dirstate[f] != 'a':
1562 elif self._repo.dirstate[f] != 'a':
1646 self._repo.dirstate.remove(f)
1563 self._repo.dirstate.remove(f)
1647 else:
1564 else:
1648 self._repo.dirstate.drop(f)
1565 self._repo.dirstate.drop(f)
1649 return rejected
1566 return rejected
1650
1567
1651 def undelete(self, list):
1568 def undelete(self, list):
1652 pctxs = self.parents()
1569 pctxs = self.parents()
1653 with self._repo.wlock():
1570 with self._repo.wlock():
1654 for f in list:
1571 for f in list:
1655 if self._repo.dirstate[f] != 'r':
1572 if self._repo.dirstate[f] != 'r':
1656 self._repo.ui.warn(_("%s not removed!\n") % f)
1573 self._repo.ui.warn(_("%s not removed!\n") % f)
1657 else:
1574 else:
1658 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1575 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1659 t = fctx.data()
1576 t = fctx.data()
1660 self._repo.wwrite(f, t, fctx.flags())
1577 self._repo.wwrite(f, t, fctx.flags())
1661 self._repo.dirstate.normal(f)
1578 self._repo.dirstate.normal(f)
1662
1579
1663 def copy(self, source, dest):
1580 def copy(self, source, dest):
1664 try:
1581 try:
1665 st = self._repo.wvfs.lstat(dest)
1582 st = self._repo.wvfs.lstat(dest)
1666 except OSError as err:
1583 except OSError as err:
1667 if err.errno != errno.ENOENT:
1584 if err.errno != errno.ENOENT:
1668 raise
1585 raise
1669 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1586 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1670 return
1587 return
1671 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1588 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1672 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1589 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1673 "symbolic link\n") % dest)
1590 "symbolic link\n") % dest)
1674 else:
1591 else:
1675 with self._repo.wlock():
1592 with self._repo.wlock():
1676 if self._repo.dirstate[dest] in '?':
1593 if self._repo.dirstate[dest] in '?':
1677 self._repo.dirstate.add(dest)
1594 self._repo.dirstate.add(dest)
1678 elif self._repo.dirstate[dest] in 'r':
1595 elif self._repo.dirstate[dest] in 'r':
1679 self._repo.dirstate.normallookup(dest)
1596 self._repo.dirstate.normallookup(dest)
1680 self._repo.dirstate.copy(source, dest)
1597 self._repo.dirstate.copy(source, dest)
1681
1598
1682 def match(self, pats=None, include=None, exclude=None, default='glob',
1599 def match(self, pats=None, include=None, exclude=None, default='glob',
1683 listsubrepos=False, badfn=None):
1600 listsubrepos=False, badfn=None):
1684 r = self._repo
1601 r = self._repo
1685
1602
1686 # Only a case insensitive filesystem needs magic to translate user input
1603 # Only a case insensitive filesystem needs magic to translate user input
1687 # to actual case in the filesystem.
1604 # to actual case in the filesystem.
1688 icasefs = not util.fscasesensitive(r.root)
1605 icasefs = not util.fscasesensitive(r.root)
1689 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1606 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1690 default, auditor=r.auditor, ctx=self,
1607 default, auditor=r.auditor, ctx=self,
1691 listsubrepos=listsubrepos, badfn=badfn,
1608 listsubrepos=listsubrepos, badfn=badfn,
1692 icasefs=icasefs)
1609 icasefs=icasefs)
1693
1610
1694 def _filtersuspectsymlink(self, files):
1611 def _filtersuspectsymlink(self, files):
1695 if not files or self._repo.dirstate._checklink:
1612 if not files or self._repo.dirstate._checklink:
1696 return files
1613 return files
1697
1614
1698 # Symlink placeholders may get non-symlink-like contents
1615 # Symlink placeholders may get non-symlink-like contents
1699 # via user error or dereferencing by NFS or Samba servers,
1616 # via user error or dereferencing by NFS or Samba servers,
1700 # so we filter out any placeholders that don't look like a
1617 # so we filter out any placeholders that don't look like a
1701 # symlink
1618 # symlink
1702 sane = []
1619 sane = []
1703 for f in files:
1620 for f in files:
1704 if self.flags(f) == 'l':
1621 if self.flags(f) == 'l':
1705 d = self[f].data()
1622 d = self[f].data()
1706 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1623 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1707 self._repo.ui.debug('ignoring suspect symlink placeholder'
1624 self._repo.ui.debug('ignoring suspect symlink placeholder'
1708 ' "%s"\n' % f)
1625 ' "%s"\n' % f)
1709 continue
1626 continue
1710 sane.append(f)
1627 sane.append(f)
1711 return sane
1628 return sane
1712
1629
1713 def _checklookup(self, files):
1630 def _checklookup(self, files):
1714 # check for any possibly clean files
1631 # check for any possibly clean files
1715 if not files:
1632 if not files:
1716 return [], [], []
1633 return [], [], []
1717
1634
1718 modified = []
1635 modified = []
1719 deleted = []
1636 deleted = []
1720 fixup = []
1637 fixup = []
1721 pctx = self._parents[0]
1638 pctx = self._parents[0]
1722 # do a full compare of any files that might have changed
1639 # do a full compare of any files that might have changed
1723 for f in sorted(files):
1640 for f in sorted(files):
1724 try:
1641 try:
1725 # This will return True for a file that got replaced by a
1642 # This will return True for a file that got replaced by a
1726 # directory in the interim, but fixing that is pretty hard.
1643 # directory in the interim, but fixing that is pretty hard.
1727 if (f not in pctx or self.flags(f) != pctx.flags(f)
1644 if (f not in pctx or self.flags(f) != pctx.flags(f)
1728 or pctx[f].cmp(self[f])):
1645 or pctx[f].cmp(self[f])):
1729 modified.append(f)
1646 modified.append(f)
1730 else:
1647 else:
1731 fixup.append(f)
1648 fixup.append(f)
1732 except (IOError, OSError):
1649 except (IOError, OSError):
1733 # A file become inaccessible in between? Mark it as deleted,
1650 # A file become inaccessible in between? Mark it as deleted,
1734 # matching dirstate behavior (issue5584).
1651 # matching dirstate behavior (issue5584).
1735 # The dirstate has more complex behavior around whether a
1652 # The dirstate has more complex behavior around whether a
1736 # missing file matches a directory, etc, but we don't need to
1653 # missing file matches a directory, etc, but we don't need to
1737 # bother with that: if f has made it to this point, we're sure
1654 # bother with that: if f has made it to this point, we're sure
1738 # it's in the dirstate.
1655 # it's in the dirstate.
1739 deleted.append(f)
1656 deleted.append(f)
1740
1657
1741 return modified, deleted, fixup
1658 return modified, deleted, fixup
1742
1659
1743 def _poststatusfixup(self, status, fixup):
1660 def _poststatusfixup(self, status, fixup):
1744 """update dirstate for files that are actually clean"""
1661 """update dirstate for files that are actually clean"""
1745 poststatus = self._repo.postdsstatus()
1662 poststatus = self._repo.postdsstatus()
1746 if fixup or poststatus:
1663 if fixup or poststatus:
1747 try:
1664 try:
1748 oldid = self._repo.dirstate.identity()
1665 oldid = self._repo.dirstate.identity()
1749
1666
1750 # updating the dirstate is optional
1667 # updating the dirstate is optional
1751 # so we don't wait on the lock
1668 # so we don't wait on the lock
1752 # wlock can invalidate the dirstate, so cache normal _after_
1669 # wlock can invalidate the dirstate, so cache normal _after_
1753 # taking the lock
1670 # taking the lock
1754 with self._repo.wlock(False):
1671 with self._repo.wlock(False):
1755 if self._repo.dirstate.identity() == oldid:
1672 if self._repo.dirstate.identity() == oldid:
1756 if fixup:
1673 if fixup:
1757 normal = self._repo.dirstate.normal
1674 normal = self._repo.dirstate.normal
1758 for f in fixup:
1675 for f in fixup:
1759 normal(f)
1676 normal(f)
1760 # write changes out explicitly, because nesting
1677 # write changes out explicitly, because nesting
1761 # wlock at runtime may prevent 'wlock.release()'
1678 # wlock at runtime may prevent 'wlock.release()'
1762 # after this block from doing so for subsequent
1679 # after this block from doing so for subsequent
1763 # changing files
1680 # changing files
1764 tr = self._repo.currenttransaction()
1681 tr = self._repo.currenttransaction()
1765 self._repo.dirstate.write(tr)
1682 self._repo.dirstate.write(tr)
1766
1683
1767 if poststatus:
1684 if poststatus:
1768 for ps in poststatus:
1685 for ps in poststatus:
1769 ps(self, status)
1686 ps(self, status)
1770 else:
1687 else:
1771 # in this case, writing changes out breaks
1688 # in this case, writing changes out breaks
1772 # consistency, because .hg/dirstate was
1689 # consistency, because .hg/dirstate was
1773 # already changed simultaneously after last
1690 # already changed simultaneously after last
1774 # caching (see also issue5584 for detail)
1691 # caching (see also issue5584 for detail)
1775 self._repo.ui.debug('skip updating dirstate: '
1692 self._repo.ui.debug('skip updating dirstate: '
1776 'identity mismatch\n')
1693 'identity mismatch\n')
1777 except error.LockError:
1694 except error.LockError:
1778 pass
1695 pass
1779 finally:
1696 finally:
1780 # Even if the wlock couldn't be grabbed, clear out the list.
1697 # Even if the wlock couldn't be grabbed, clear out the list.
1781 self._repo.clearpostdsstatus()
1698 self._repo.clearpostdsstatus()
1782
1699
1783 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1700 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1784 unknown=False):
1701 unknown=False):
1785 '''Gets the status from the dirstate -- internal use only.'''
1702 '''Gets the status from the dirstate -- internal use only.'''
1786 listignored, listclean, listunknown = ignored, clean, unknown
1703 listignored, listclean, listunknown = ignored, clean, unknown
1787 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1704 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1788 subrepos = []
1705 subrepos = []
1789 if '.hgsub' in self:
1706 if '.hgsub' in self:
1790 subrepos = sorted(self.substate)
1707 subrepos = sorted(self.substate)
1791 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1708 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1792 listclean, listunknown)
1709 listclean, listunknown)
1793
1710
1794 # check for any possibly clean files
1711 # check for any possibly clean files
1795 fixup = []
1712 fixup = []
1796 if cmp:
1713 if cmp:
1797 modified2, deleted2, fixup = self._checklookup(cmp)
1714 modified2, deleted2, fixup = self._checklookup(cmp)
1798 s.modified.extend(modified2)
1715 s.modified.extend(modified2)
1799 s.deleted.extend(deleted2)
1716 s.deleted.extend(deleted2)
1800
1717
1801 if fixup and listclean:
1718 if fixup and listclean:
1802 s.clean.extend(fixup)
1719 s.clean.extend(fixup)
1803
1720
1804 self._poststatusfixup(s, fixup)
1721 self._poststatusfixup(s, fixup)
1805
1722
1806 if match.always():
1723 if match.always():
1807 # cache for performance
1724 # cache for performance
1808 if s.unknown or s.ignored or s.clean:
1725 if s.unknown or s.ignored or s.clean:
1809 # "_status" is cached with list*=False in the normal route
1726 # "_status" is cached with list*=False in the normal route
1810 self._status = scmutil.status(s.modified, s.added, s.removed,
1727 self._status = scmutil.status(s.modified, s.added, s.removed,
1811 s.deleted, [], [], [])
1728 s.deleted, [], [], [])
1812 else:
1729 else:
1813 self._status = s
1730 self._status = s
1814
1731
1815 return s
1732 return s
1816
1733
1817 @propertycache
1734 @propertycache
1818 def _manifest(self):
1735 def _manifest(self):
1819 """generate a manifest corresponding to the values in self._status
1736 """generate a manifest corresponding to the values in self._status
1820
1737
1821 This reuse the file nodeid from parent, but we use special node
1738 This reuse the file nodeid from parent, but we use special node
1822 identifiers for added and modified files. This is used by manifests
1739 identifiers for added and modified files. This is used by manifests
1823 merge to see that files are different and by update logic to avoid
1740 merge to see that files are different and by update logic to avoid
1824 deleting newly added files.
1741 deleting newly added files.
1825 """
1742 """
1826 return self._buildstatusmanifest(self._status)
1743 return self._buildstatusmanifest(self._status)
1827
1744
1828 def _buildstatusmanifest(self, status):
1745 def _buildstatusmanifest(self, status):
1829 """Builds a manifest that includes the given status results."""
1746 """Builds a manifest that includes the given status results."""
1830 parents = self.parents()
1747 parents = self.parents()
1831
1748
1832 man = parents[0].manifest().copy()
1749 man = parents[0].manifest().copy()
1833
1750
1834 ff = self._flagfunc
1751 ff = self._flagfunc
1835 for i, l in ((addednodeid, status.added),
1752 for i, l in ((addednodeid, status.added),
1836 (modifiednodeid, status.modified)):
1753 (modifiednodeid, status.modified)):
1837 for f in l:
1754 for f in l:
1838 man[f] = i
1755 man[f] = i
1839 try:
1756 try:
1840 man.setflag(f, ff(f))
1757 man.setflag(f, ff(f))
1841 except OSError:
1758 except OSError:
1842 pass
1759 pass
1843
1760
1844 for f in status.deleted + status.removed:
1761 for f in status.deleted + status.removed:
1845 if f in man:
1762 if f in man:
1846 del man[f]
1763 del man[f]
1847
1764
1848 return man
1765 return man
1849
1766
1850 def _buildstatus(self, other, s, match, listignored, listclean,
1767 def _buildstatus(self, other, s, match, listignored, listclean,
1851 listunknown):
1768 listunknown):
1852 """build a status with respect to another context
1769 """build a status with respect to another context
1853
1770
1854 This includes logic for maintaining the fast path of status when
1771 This includes logic for maintaining the fast path of status when
1855 comparing the working directory against its parent, which is to skip
1772 comparing the working directory against its parent, which is to skip
1856 building a new manifest if self (working directory) is not comparing
1773 building a new manifest if self (working directory) is not comparing
1857 against its parent (repo['.']).
1774 against its parent (repo['.']).
1858 """
1775 """
1859 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1776 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1860 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1777 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1861 # might have accidentally ended up with the entire contents of the file
1778 # might have accidentally ended up with the entire contents of the file
1862 # they are supposed to be linking to.
1779 # they are supposed to be linking to.
1863 s.modified[:] = self._filtersuspectsymlink(s.modified)
1780 s.modified[:] = self._filtersuspectsymlink(s.modified)
1864 if other != self._repo['.']:
1781 if other != self._repo['.']:
1865 s = super(workingctx, self)._buildstatus(other, s, match,
1782 s = super(workingctx, self)._buildstatus(other, s, match,
1866 listignored, listclean,
1783 listignored, listclean,
1867 listunknown)
1784 listunknown)
1868 return s
1785 return s
1869
1786
1870 def _matchstatus(self, other, match):
1787 def _matchstatus(self, other, match):
1871 """override the match method with a filter for directory patterns
1788 """override the match method with a filter for directory patterns
1872
1789
1873 We use inheritance to customize the match.bad method only in cases of
1790 We use inheritance to customize the match.bad method only in cases of
1874 workingctx since it belongs only to the working directory when
1791 workingctx since it belongs only to the working directory when
1875 comparing against the parent changeset.
1792 comparing against the parent changeset.
1876
1793
1877 If we aren't comparing against the working directory's parent, then we
1794 If we aren't comparing against the working directory's parent, then we
1878 just use the default match object sent to us.
1795 just use the default match object sent to us.
1879 """
1796 """
1880 superself = super(workingctx, self)
1797 superself = super(workingctx, self)
1881 match = superself._matchstatus(other, match)
1798 match = superself._matchstatus(other, match)
1882 if other != self._repo['.']:
1799 if other != self._repo['.']:
1883 def bad(f, msg):
1800 def bad(f, msg):
1884 # 'f' may be a directory pattern from 'match.files()',
1801 # 'f' may be a directory pattern from 'match.files()',
1885 # so 'f not in ctx1' is not enough
1802 # so 'f not in ctx1' is not enough
1886 if f not in other and not other.hasdir(f):
1803 if f not in other and not other.hasdir(f):
1887 self._repo.ui.warn('%s: %s\n' %
1804 self._repo.ui.warn('%s: %s\n' %
1888 (self._repo.dirstate.pathto(f), msg))
1805 (self._repo.dirstate.pathto(f), msg))
1889 match.bad = bad
1806 match.bad = bad
1890 return match
1807 return match
1891
1808
1892 class committablefilectx(basefilectx):
1809 class committablefilectx(basefilectx):
1893 """A committablefilectx provides common functionality for a file context
1810 """A committablefilectx provides common functionality for a file context
1894 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1811 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1895 def __init__(self, repo, path, filelog=None, ctx=None):
1812 def __init__(self, repo, path, filelog=None, ctx=None):
1896 self._repo = repo
1813 self._repo = repo
1897 self._path = path
1814 self._path = path
1898 self._changeid = None
1815 self._changeid = None
1899 self._filerev = self._filenode = None
1816 self._filerev = self._filenode = None
1900
1817
1901 if filelog is not None:
1818 if filelog is not None:
1902 self._filelog = filelog
1819 self._filelog = filelog
1903 if ctx:
1820 if ctx:
1904 self._changectx = ctx
1821 self._changectx = ctx
1905
1822
1906 def __nonzero__(self):
1823 def __nonzero__(self):
1907 return True
1824 return True
1908
1825
1909 __bool__ = __nonzero__
1826 __bool__ = __nonzero__
1910
1827
1911 def linkrev(self):
1828 def linkrev(self):
1912 # linked to self._changectx no matter if file is modified or not
1829 # linked to self._changectx no matter if file is modified or not
1913 return self.rev()
1830 return self.rev()
1914
1831
1915 def parents(self):
1832 def parents(self):
1916 '''return parent filectxs, following copies if necessary'''
1833 '''return parent filectxs, following copies if necessary'''
1917 def filenode(ctx, path):
1834 def filenode(ctx, path):
1918 return ctx._manifest.get(path, nullid)
1835 return ctx._manifest.get(path, nullid)
1919
1836
1920 path = self._path
1837 path = self._path
1921 fl = self._filelog
1838 fl = self._filelog
1922 pcl = self._changectx._parents
1839 pcl = self._changectx._parents
1923 renamed = self.renamed()
1840 renamed = self.renamed()
1924
1841
1925 if renamed:
1842 if renamed:
1926 pl = [renamed + (None,)]
1843 pl = [renamed + (None,)]
1927 else:
1844 else:
1928 pl = [(path, filenode(pcl[0], path), fl)]
1845 pl = [(path, filenode(pcl[0], path), fl)]
1929
1846
1930 for pc in pcl[1:]:
1847 for pc in pcl[1:]:
1931 pl.append((path, filenode(pc, path), fl))
1848 pl.append((path, filenode(pc, path), fl))
1932
1849
1933 return [self._parentfilectx(p, fileid=n, filelog=l)
1850 return [self._parentfilectx(p, fileid=n, filelog=l)
1934 for p, n, l in pl if n != nullid]
1851 for p, n, l in pl if n != nullid]
1935
1852
1936 def children(self):
1853 def children(self):
1937 return []
1854 return []
1938
1855
1939 class workingfilectx(committablefilectx):
1856 class workingfilectx(committablefilectx):
1940 """A workingfilectx object makes access to data related to a particular
1857 """A workingfilectx object makes access to data related to a particular
1941 file in the working directory convenient."""
1858 file in the working directory convenient."""
1942 def __init__(self, repo, path, filelog=None, workingctx=None):
1859 def __init__(self, repo, path, filelog=None, workingctx=None):
1943 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1860 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1944
1861
1945 @propertycache
1862 @propertycache
1946 def _changectx(self):
1863 def _changectx(self):
1947 return workingctx(self._repo)
1864 return workingctx(self._repo)
1948
1865
1949 def data(self):
1866 def data(self):
1950 return self._repo.wread(self._path)
1867 return self._repo.wread(self._path)
1951 def renamed(self):
1868 def renamed(self):
1952 rp = self._repo.dirstate.copied(self._path)
1869 rp = self._repo.dirstate.copied(self._path)
1953 if not rp:
1870 if not rp:
1954 return None
1871 return None
1955 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1872 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1956
1873
1957 def size(self):
1874 def size(self):
1958 return self._repo.wvfs.lstat(self._path).st_size
1875 return self._repo.wvfs.lstat(self._path).st_size
1959 def date(self):
1876 def date(self):
1960 t, tz = self._changectx.date()
1877 t, tz = self._changectx.date()
1961 try:
1878 try:
1962 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1879 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1963 except OSError as err:
1880 except OSError as err:
1964 if err.errno != errno.ENOENT:
1881 if err.errno != errno.ENOENT:
1965 raise
1882 raise
1966 return (t, tz)
1883 return (t, tz)
1967
1884
1968 def cmp(self, fctx):
1885 def cmp(self, fctx):
1969 """compare with other file context
1886 """compare with other file context
1970
1887
1971 returns True if different than fctx.
1888 returns True if different than fctx.
1972 """
1889 """
1973 # fctx should be a filectx (not a workingfilectx)
1890 # fctx should be a filectx (not a workingfilectx)
1974 # invert comparison to reuse the same code path
1891 # invert comparison to reuse the same code path
1975 return fctx.cmp(self)
1892 return fctx.cmp(self)
1976
1893
1977 def remove(self, ignoremissing=False):
1894 def remove(self, ignoremissing=False):
1978 """wraps unlink for a repo's working directory"""
1895 """wraps unlink for a repo's working directory"""
1979 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1896 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1980
1897
1981 def write(self, data, flags):
1898 def write(self, data, flags):
1982 """wraps repo.wwrite"""
1899 """wraps repo.wwrite"""
1983 self._repo.wwrite(self._path, data, flags)
1900 self._repo.wwrite(self._path, data, flags)
1984
1901
1985 class workingcommitctx(workingctx):
1902 class workingcommitctx(workingctx):
1986 """A workingcommitctx object makes access to data related to
1903 """A workingcommitctx object makes access to data related to
1987 the revision being committed convenient.
1904 the revision being committed convenient.
1988
1905
1989 This hides changes in the working directory, if they aren't
1906 This hides changes in the working directory, if they aren't
1990 committed in this context.
1907 committed in this context.
1991 """
1908 """
1992 def __init__(self, repo, changes,
1909 def __init__(self, repo, changes,
1993 text="", user=None, date=None, extra=None):
1910 text="", user=None, date=None, extra=None):
1994 super(workingctx, self).__init__(repo, text, user, date, extra,
1911 super(workingctx, self).__init__(repo, text, user, date, extra,
1995 changes)
1912 changes)
1996
1913
1997 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1914 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1998 unknown=False):
1915 unknown=False):
1999 """Return matched files only in ``self._status``
1916 """Return matched files only in ``self._status``
2000
1917
2001 Uncommitted files appear "clean" via this context, even if
1918 Uncommitted files appear "clean" via this context, even if
2002 they aren't actually so in the working directory.
1919 they aren't actually so in the working directory.
2003 """
1920 """
2004 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1921 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
2005 if clean:
1922 if clean:
2006 clean = [f for f in self._manifest if f not in self._changedset]
1923 clean = [f for f in self._manifest if f not in self._changedset]
2007 else:
1924 else:
2008 clean = []
1925 clean = []
2009 return scmutil.status([f for f in self._status.modified if match(f)],
1926 return scmutil.status([f for f in self._status.modified if match(f)],
2010 [f for f in self._status.added if match(f)],
1927 [f for f in self._status.added if match(f)],
2011 [f for f in self._status.removed if match(f)],
1928 [f for f in self._status.removed if match(f)],
2012 [], [], [], clean)
1929 [], [], [], clean)
2013
1930
2014 @propertycache
1931 @propertycache
2015 def _changedset(self):
1932 def _changedset(self):
2016 """Return the set of files changed in this context
1933 """Return the set of files changed in this context
2017 """
1934 """
2018 changed = set(self._status.modified)
1935 changed = set(self._status.modified)
2019 changed.update(self._status.added)
1936 changed.update(self._status.added)
2020 changed.update(self._status.removed)
1937 changed.update(self._status.removed)
2021 return changed
1938 return changed
2022
1939
2023 def makecachingfilectxfn(func):
1940 def makecachingfilectxfn(func):
2024 """Create a filectxfn that caches based on the path.
1941 """Create a filectxfn that caches based on the path.
2025
1942
2026 We can't use util.cachefunc because it uses all arguments as the cache
1943 We can't use util.cachefunc because it uses all arguments as the cache
2027 key and this creates a cycle since the arguments include the repo and
1944 key and this creates a cycle since the arguments include the repo and
2028 memctx.
1945 memctx.
2029 """
1946 """
2030 cache = {}
1947 cache = {}
2031
1948
2032 def getfilectx(repo, memctx, path):
1949 def getfilectx(repo, memctx, path):
2033 if path not in cache:
1950 if path not in cache:
2034 cache[path] = func(repo, memctx, path)
1951 cache[path] = func(repo, memctx, path)
2035 return cache[path]
1952 return cache[path]
2036
1953
2037 return getfilectx
1954 return getfilectx
2038
1955
2039 def memfilefromctx(ctx):
1956 def memfilefromctx(ctx):
2040 """Given a context return a memfilectx for ctx[path]
1957 """Given a context return a memfilectx for ctx[path]
2041
1958
2042 This is a convenience method for building a memctx based on another
1959 This is a convenience method for building a memctx based on another
2043 context.
1960 context.
2044 """
1961 """
2045 def getfilectx(repo, memctx, path):
1962 def getfilectx(repo, memctx, path):
2046 fctx = ctx[path]
1963 fctx = ctx[path]
2047 # this is weird but apparently we only keep track of one parent
1964 # this is weird but apparently we only keep track of one parent
2048 # (why not only store that instead of a tuple?)
1965 # (why not only store that instead of a tuple?)
2049 copied = fctx.renamed()
1966 copied = fctx.renamed()
2050 if copied:
1967 if copied:
2051 copied = copied[0]
1968 copied = copied[0]
2052 return memfilectx(repo, path, fctx.data(),
1969 return memfilectx(repo, path, fctx.data(),
2053 islink=fctx.islink(), isexec=fctx.isexec(),
1970 islink=fctx.islink(), isexec=fctx.isexec(),
2054 copied=copied, memctx=memctx)
1971 copied=copied, memctx=memctx)
2055
1972
2056 return getfilectx
1973 return getfilectx
2057
1974
2058 def memfilefrompatch(patchstore):
1975 def memfilefrompatch(patchstore):
2059 """Given a patch (e.g. patchstore object) return a memfilectx
1976 """Given a patch (e.g. patchstore object) return a memfilectx
2060
1977
2061 This is a convenience method for building a memctx based on a patchstore.
1978 This is a convenience method for building a memctx based on a patchstore.
2062 """
1979 """
2063 def getfilectx(repo, memctx, path):
1980 def getfilectx(repo, memctx, path):
2064 data, mode, copied = patchstore.getfile(path)
1981 data, mode, copied = patchstore.getfile(path)
2065 if data is None:
1982 if data is None:
2066 return None
1983 return None
2067 islink, isexec = mode
1984 islink, isexec = mode
2068 return memfilectx(repo, path, data, islink=islink,
1985 return memfilectx(repo, path, data, islink=islink,
2069 isexec=isexec, copied=copied,
1986 isexec=isexec, copied=copied,
2070 memctx=memctx)
1987 memctx=memctx)
2071
1988
2072 return getfilectx
1989 return getfilectx
2073
1990
2074 class memctx(committablectx):
1991 class memctx(committablectx):
2075 """Use memctx to perform in-memory commits via localrepo.commitctx().
1992 """Use memctx to perform in-memory commits via localrepo.commitctx().
2076
1993
2077 Revision information is supplied at initialization time while
1994 Revision information is supplied at initialization time while
2078 related files data and is made available through a callback
1995 related files data and is made available through a callback
2079 mechanism. 'repo' is the current localrepo, 'parents' is a
1996 mechanism. 'repo' is the current localrepo, 'parents' is a
2080 sequence of two parent revisions identifiers (pass None for every
1997 sequence of two parent revisions identifiers (pass None for every
2081 missing parent), 'text' is the commit message and 'files' lists
1998 missing parent), 'text' is the commit message and 'files' lists
2082 names of files touched by the revision (normalized and relative to
1999 names of files touched by the revision (normalized and relative to
2083 repository root).
2000 repository root).
2084
2001
2085 filectxfn(repo, memctx, path) is a callable receiving the
2002 filectxfn(repo, memctx, path) is a callable receiving the
2086 repository, the current memctx object and the normalized path of
2003 repository, the current memctx object and the normalized path of
2087 requested file, relative to repository root. It is fired by the
2004 requested file, relative to repository root. It is fired by the
2088 commit function for every file in 'files', but calls order is
2005 commit function for every file in 'files', but calls order is
2089 undefined. If the file is available in the revision being
2006 undefined. If the file is available in the revision being
2090 committed (updated or added), filectxfn returns a memfilectx
2007 committed (updated or added), filectxfn returns a memfilectx
2091 object. If the file was removed, filectxfn return None for recent
2008 object. If the file was removed, filectxfn return None for recent
2092 Mercurial. Moved files are represented by marking the source file
2009 Mercurial. Moved files are represented by marking the source file
2093 removed and the new file added with copy information (see
2010 removed and the new file added with copy information (see
2094 memfilectx).
2011 memfilectx).
2095
2012
2096 user receives the committer name and defaults to current
2013 user receives the committer name and defaults to current
2097 repository username, date is the commit date in any format
2014 repository username, date is the commit date in any format
2098 supported by util.parsedate() and defaults to current date, extra
2015 supported by util.parsedate() and defaults to current date, extra
2099 is a dictionary of metadata or is left empty.
2016 is a dictionary of metadata or is left empty.
2100 """
2017 """
2101
2018
2102 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2019 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2103 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2020 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2104 # this field to determine what to do in filectxfn.
2021 # this field to determine what to do in filectxfn.
2105 _returnnoneformissingfiles = True
2022 _returnnoneformissingfiles = True
2106
2023
2107 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2024 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2108 date=None, extra=None, branch=None, editor=False):
2025 date=None, extra=None, branch=None, editor=False):
2109 super(memctx, self).__init__(repo, text, user, date, extra)
2026 super(memctx, self).__init__(repo, text, user, date, extra)
2110 self._rev = None
2027 self._rev = None
2111 self._node = None
2028 self._node = None
2112 parents = [(p or nullid) for p in parents]
2029 parents = [(p or nullid) for p in parents]
2113 p1, p2 = parents
2030 p1, p2 = parents
2114 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2031 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2115 files = sorted(set(files))
2032 files = sorted(set(files))
2116 self._files = files
2033 self._files = files
2117 if branch is not None:
2034 if branch is not None:
2118 self._extra['branch'] = encoding.fromlocal(branch)
2035 self._extra['branch'] = encoding.fromlocal(branch)
2119 self.substate = {}
2036 self.substate = {}
2120
2037
2121 if isinstance(filectxfn, patch.filestore):
2038 if isinstance(filectxfn, patch.filestore):
2122 filectxfn = memfilefrompatch(filectxfn)
2039 filectxfn = memfilefrompatch(filectxfn)
2123 elif not callable(filectxfn):
2040 elif not callable(filectxfn):
2124 # if store is not callable, wrap it in a function
2041 # if store is not callable, wrap it in a function
2125 filectxfn = memfilefromctx(filectxfn)
2042 filectxfn = memfilefromctx(filectxfn)
2126
2043
2127 # memoizing increases performance for e.g. vcs convert scenarios.
2044 # memoizing increases performance for e.g. vcs convert scenarios.
2128 self._filectxfn = makecachingfilectxfn(filectxfn)
2045 self._filectxfn = makecachingfilectxfn(filectxfn)
2129
2046
2130 if editor:
2047 if editor:
2131 self._text = editor(self._repo, self, [])
2048 self._text = editor(self._repo, self, [])
2132 self._repo.savecommitmessage(self._text)
2049 self._repo.savecommitmessage(self._text)
2133
2050
2134 def filectx(self, path, filelog=None):
2051 def filectx(self, path, filelog=None):
2135 """get a file context from the working directory
2052 """get a file context from the working directory
2136
2053
2137 Returns None if file doesn't exist and should be removed."""
2054 Returns None if file doesn't exist and should be removed."""
2138 return self._filectxfn(self._repo, self, path)
2055 return self._filectxfn(self._repo, self, path)
2139
2056
2140 def commit(self):
2057 def commit(self):
2141 """commit context to the repo"""
2058 """commit context to the repo"""
2142 return self._repo.commitctx(self)
2059 return self._repo.commitctx(self)
2143
2060
2144 @propertycache
2061 @propertycache
2145 def _manifest(self):
2062 def _manifest(self):
2146 """generate a manifest based on the return values of filectxfn"""
2063 """generate a manifest based on the return values of filectxfn"""
2147
2064
2148 # keep this simple for now; just worry about p1
2065 # keep this simple for now; just worry about p1
2149 pctx = self._parents[0]
2066 pctx = self._parents[0]
2150 man = pctx.manifest().copy()
2067 man = pctx.manifest().copy()
2151
2068
2152 for f in self._status.modified:
2069 for f in self._status.modified:
2153 p1node = nullid
2070 p1node = nullid
2154 p2node = nullid
2071 p2node = nullid
2155 p = pctx[f].parents() # if file isn't in pctx, check p2?
2072 p = pctx[f].parents() # if file isn't in pctx, check p2?
2156 if len(p) > 0:
2073 if len(p) > 0:
2157 p1node = p[0].filenode()
2074 p1node = p[0].filenode()
2158 if len(p) > 1:
2075 if len(p) > 1:
2159 p2node = p[1].filenode()
2076 p2node = p[1].filenode()
2160 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2077 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2161
2078
2162 for f in self._status.added:
2079 for f in self._status.added:
2163 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2080 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2164
2081
2165 for f in self._status.removed:
2082 for f in self._status.removed:
2166 if f in man:
2083 if f in man:
2167 del man[f]
2084 del man[f]
2168
2085
2169 return man
2086 return man
2170
2087
2171 @propertycache
2088 @propertycache
2172 def _status(self):
2089 def _status(self):
2173 """Calculate exact status from ``files`` specified at construction
2090 """Calculate exact status from ``files`` specified at construction
2174 """
2091 """
2175 man1 = self.p1().manifest()
2092 man1 = self.p1().manifest()
2176 p2 = self._parents[1]
2093 p2 = self._parents[1]
2177 # "1 < len(self._parents)" can't be used for checking
2094 # "1 < len(self._parents)" can't be used for checking
2178 # existence of the 2nd parent, because "memctx._parents" is
2095 # existence of the 2nd parent, because "memctx._parents" is
2179 # explicitly initialized by the list, of which length is 2.
2096 # explicitly initialized by the list, of which length is 2.
2180 if p2.node() != nullid:
2097 if p2.node() != nullid:
2181 man2 = p2.manifest()
2098 man2 = p2.manifest()
2182 managing = lambda f: f in man1 or f in man2
2099 managing = lambda f: f in man1 or f in man2
2183 else:
2100 else:
2184 managing = lambda f: f in man1
2101 managing = lambda f: f in man1
2185
2102
2186 modified, added, removed = [], [], []
2103 modified, added, removed = [], [], []
2187 for f in self._files:
2104 for f in self._files:
2188 if not managing(f):
2105 if not managing(f):
2189 added.append(f)
2106 added.append(f)
2190 elif self[f]:
2107 elif self[f]:
2191 modified.append(f)
2108 modified.append(f)
2192 else:
2109 else:
2193 removed.append(f)
2110 removed.append(f)
2194
2111
2195 return scmutil.status(modified, added, removed, [], [], [], [])
2112 return scmutil.status(modified, added, removed, [], [], [], [])
2196
2113
2197 class memfilectx(committablefilectx):
2114 class memfilectx(committablefilectx):
2198 """memfilectx represents an in-memory file to commit.
2115 """memfilectx represents an in-memory file to commit.
2199
2116
2200 See memctx and committablefilectx for more details.
2117 See memctx and committablefilectx for more details.
2201 """
2118 """
2202 def __init__(self, repo, path, data, islink=False,
2119 def __init__(self, repo, path, data, islink=False,
2203 isexec=False, copied=None, memctx=None):
2120 isexec=False, copied=None, memctx=None):
2204 """
2121 """
2205 path is the normalized file path relative to repository root.
2122 path is the normalized file path relative to repository root.
2206 data is the file content as a string.
2123 data is the file content as a string.
2207 islink is True if the file is a symbolic link.
2124 islink is True if the file is a symbolic link.
2208 isexec is True if the file is executable.
2125 isexec is True if the file is executable.
2209 copied is the source file path if current file was copied in the
2126 copied is the source file path if current file was copied in the
2210 revision being committed, or None."""
2127 revision being committed, or None."""
2211 super(memfilectx, self).__init__(repo, path, None, memctx)
2128 super(memfilectx, self).__init__(repo, path, None, memctx)
2212 self._data = data
2129 self._data = data
2213 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2130 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2214 self._copied = None
2131 self._copied = None
2215 if copied:
2132 if copied:
2216 self._copied = (copied, nullid)
2133 self._copied = (copied, nullid)
2217
2134
2218 def data(self):
2135 def data(self):
2219 return self._data
2136 return self._data
2220
2137
2221 def remove(self, ignoremissing=False):
2138 def remove(self, ignoremissing=False):
2222 """wraps unlink for a repo's working directory"""
2139 """wraps unlink for a repo's working directory"""
2223 # need to figure out what to do here
2140 # need to figure out what to do here
2224 del self._changectx[self._path]
2141 del self._changectx[self._path]
2225
2142
2226 def write(self, data, flags):
2143 def write(self, data, flags):
2227 """wraps repo.wwrite"""
2144 """wraps repo.wwrite"""
2228 self._data = data
2145 self._data = data
2229
2146
2230 class overlayfilectx(committablefilectx):
2147 class overlayfilectx(committablefilectx):
2231 """Like memfilectx but take an original filectx and optional parameters to
2148 """Like memfilectx but take an original filectx and optional parameters to
2232 override parts of it. This is useful when fctx.data() is expensive (i.e.
2149 override parts of it. This is useful when fctx.data() is expensive (i.e.
2233 flag processor is expensive) and raw data, flags, and filenode could be
2150 flag processor is expensive) and raw data, flags, and filenode could be
2234 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2151 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2235 """
2152 """
2236
2153
2237 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2154 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2238 copied=None, ctx=None):
2155 copied=None, ctx=None):
2239 """originalfctx: filecontext to duplicate
2156 """originalfctx: filecontext to duplicate
2240
2157
2241 datafunc: None or a function to override data (file content). It is a
2158 datafunc: None or a function to override data (file content). It is a
2242 function to be lazy. path, flags, copied, ctx: None or overridden value
2159 function to be lazy. path, flags, copied, ctx: None or overridden value
2243
2160
2244 copied could be (path, rev), or False. copied could also be just path,
2161 copied could be (path, rev), or False. copied could also be just path,
2245 and will be converted to (path, nullid). This simplifies some callers.
2162 and will be converted to (path, nullid). This simplifies some callers.
2246 """
2163 """
2247
2164
2248 if path is None:
2165 if path is None:
2249 path = originalfctx.path()
2166 path = originalfctx.path()
2250 if ctx is None:
2167 if ctx is None:
2251 ctx = originalfctx.changectx()
2168 ctx = originalfctx.changectx()
2252 ctxmatch = lambda: True
2169 ctxmatch = lambda: True
2253 else:
2170 else:
2254 ctxmatch = lambda: ctx == originalfctx.changectx()
2171 ctxmatch = lambda: ctx == originalfctx.changectx()
2255
2172
2256 repo = originalfctx.repo()
2173 repo = originalfctx.repo()
2257 flog = originalfctx.filelog()
2174 flog = originalfctx.filelog()
2258 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2175 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2259
2176
2260 if copied is None:
2177 if copied is None:
2261 copied = originalfctx.renamed()
2178 copied = originalfctx.renamed()
2262 copiedmatch = lambda: True
2179 copiedmatch = lambda: True
2263 else:
2180 else:
2264 if copied and not isinstance(copied, tuple):
2181 if copied and not isinstance(copied, tuple):
2265 # repo._filecommit will recalculate copyrev so nullid is okay
2182 # repo._filecommit will recalculate copyrev so nullid is okay
2266 copied = (copied, nullid)
2183 copied = (copied, nullid)
2267 copiedmatch = lambda: copied == originalfctx.renamed()
2184 copiedmatch = lambda: copied == originalfctx.renamed()
2268
2185
2269 # When data, copied (could affect data), ctx (could affect filelog
2186 # When data, copied (could affect data), ctx (could affect filelog
2270 # parents) are not overridden, rawdata, rawflags, and filenode may be
2187 # parents) are not overridden, rawdata, rawflags, and filenode may be
2271 # reused (repo._filecommit should double check filelog parents).
2188 # reused (repo._filecommit should double check filelog parents).
2272 #
2189 #
2273 # path, flags are not hashed in filelog (but in manifestlog) so they do
2190 # path, flags are not hashed in filelog (but in manifestlog) so they do
2274 # not affect reusable here.
2191 # not affect reusable here.
2275 #
2192 #
2276 # If ctx or copied is overridden to a same value with originalfctx,
2193 # If ctx or copied is overridden to a same value with originalfctx,
2277 # still consider it's reusable. originalfctx.renamed() may be a bit
2194 # still consider it's reusable. originalfctx.renamed() may be a bit
2278 # expensive so it's not called unless necessary. Assuming datafunc is
2195 # expensive so it's not called unless necessary. Assuming datafunc is
2279 # always expensive, do not call it for this "reusable" test.
2196 # always expensive, do not call it for this "reusable" test.
2280 reusable = datafunc is None and ctxmatch() and copiedmatch()
2197 reusable = datafunc is None and ctxmatch() and copiedmatch()
2281
2198
2282 if datafunc is None:
2199 if datafunc is None:
2283 datafunc = originalfctx.data
2200 datafunc = originalfctx.data
2284 if flags is None:
2201 if flags is None:
2285 flags = originalfctx.flags()
2202 flags = originalfctx.flags()
2286
2203
2287 self._datafunc = datafunc
2204 self._datafunc = datafunc
2288 self._flags = flags
2205 self._flags = flags
2289 self._copied = copied
2206 self._copied = copied
2290
2207
2291 if reusable:
2208 if reusable:
2292 # copy extra fields from originalfctx
2209 # copy extra fields from originalfctx
2293 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2210 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2294 for attr in attrs:
2211 for attr in attrs:
2295 if util.safehasattr(originalfctx, attr):
2212 if util.safehasattr(originalfctx, attr):
2296 setattr(self, attr, getattr(originalfctx, attr))
2213 setattr(self, attr, getattr(originalfctx, attr))
2297
2214
2298 def data(self):
2215 def data(self):
2299 return self._datafunc()
2216 return self._datafunc()
2300
2217
2301 class metadataonlyctx(committablectx):
2218 class metadataonlyctx(committablectx):
2302 """Like memctx but it's reusing the manifest of different commit.
2219 """Like memctx but it's reusing the manifest of different commit.
2303 Intended to be used by lightweight operations that are creating
2220 Intended to be used by lightweight operations that are creating
2304 metadata-only changes.
2221 metadata-only changes.
2305
2222
2306 Revision information is supplied at initialization time. 'repo' is the
2223 Revision information is supplied at initialization time. 'repo' is the
2307 current localrepo, 'ctx' is original revision which manifest we're reuisng
2224 current localrepo, 'ctx' is original revision which manifest we're reuisng
2308 'parents' is a sequence of two parent revisions identifiers (pass None for
2225 'parents' is a sequence of two parent revisions identifiers (pass None for
2309 every missing parent), 'text' is the commit.
2226 every missing parent), 'text' is the commit.
2310
2227
2311 user receives the committer name and defaults to current repository
2228 user receives the committer name and defaults to current repository
2312 username, date is the commit date in any format supported by
2229 username, date is the commit date in any format supported by
2313 util.parsedate() and defaults to current date, extra is a dictionary of
2230 util.parsedate() and defaults to current date, extra is a dictionary of
2314 metadata or is left empty.
2231 metadata or is left empty.
2315 """
2232 """
2316 def __new__(cls, repo, originalctx, *args, **kwargs):
2233 def __new__(cls, repo, originalctx, *args, **kwargs):
2317 return super(metadataonlyctx, cls).__new__(cls, repo)
2234 return super(metadataonlyctx, cls).__new__(cls, repo)
2318
2235
2319 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2236 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2320 extra=None, editor=False):
2237 extra=None, editor=False):
2321 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2238 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2322 self._rev = None
2239 self._rev = None
2323 self._node = None
2240 self._node = None
2324 self._originalctx = originalctx
2241 self._originalctx = originalctx
2325 self._manifestnode = originalctx.manifestnode()
2242 self._manifestnode = originalctx.manifestnode()
2326 parents = [(p or nullid) for p in parents]
2243 parents = [(p or nullid) for p in parents]
2327 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2244 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2328
2245
2329 # sanity check to ensure that the reused manifest parents are
2246 # sanity check to ensure that the reused manifest parents are
2330 # manifests of our commit parents
2247 # manifests of our commit parents
2331 mp1, mp2 = self.manifestctx().parents
2248 mp1, mp2 = self.manifestctx().parents
2332 if p1 != nullid and p1.manifestnode() != mp1:
2249 if p1 != nullid and p1.manifestnode() != mp1:
2333 raise RuntimeError('can\'t reuse the manifest: '
2250 raise RuntimeError('can\'t reuse the manifest: '
2334 'its p1 doesn\'t match the new ctx p1')
2251 'its p1 doesn\'t match the new ctx p1')
2335 if p2 != nullid and p2.manifestnode() != mp2:
2252 if p2 != nullid and p2.manifestnode() != mp2:
2336 raise RuntimeError('can\'t reuse the manifest: '
2253 raise RuntimeError('can\'t reuse the manifest: '
2337 'its p2 doesn\'t match the new ctx p2')
2254 'its p2 doesn\'t match the new ctx p2')
2338
2255
2339 self._files = originalctx.files()
2256 self._files = originalctx.files()
2340 self.substate = {}
2257 self.substate = {}
2341
2258
2342 if editor:
2259 if editor:
2343 self._text = editor(self._repo, self, [])
2260 self._text = editor(self._repo, self, [])
2344 self._repo.savecommitmessage(self._text)
2261 self._repo.savecommitmessage(self._text)
2345
2262
2346 def manifestnode(self):
2263 def manifestnode(self):
2347 return self._manifestnode
2264 return self._manifestnode
2348
2265
2349 @property
2266 @property
2350 def _manifestctx(self):
2267 def _manifestctx(self):
2351 return self._repo.manifestlog[self._manifestnode]
2268 return self._repo.manifestlog[self._manifestnode]
2352
2269
2353 def filectx(self, path, filelog=None):
2270 def filectx(self, path, filelog=None):
2354 return self._originalctx.filectx(path, filelog=filelog)
2271 return self._originalctx.filectx(path, filelog=filelog)
2355
2272
2356 def commit(self):
2273 def commit(self):
2357 """commit context to the repo"""
2274 """commit context to the repo"""
2358 return self._repo.commitctx(self)
2275 return self._repo.commitctx(self)
2359
2276
2360 @property
2277 @property
2361 def _manifest(self):
2278 def _manifest(self):
2362 return self._originalctx.manifest()
2279 return self._originalctx.manifest()
2363
2280
2364 @propertycache
2281 @propertycache
2365 def _status(self):
2282 def _status(self):
2366 """Calculate exact status from ``files`` specified in the ``origctx``
2283 """Calculate exact status from ``files`` specified in the ``origctx``
2367 and parents manifests.
2284 and parents manifests.
2368 """
2285 """
2369 man1 = self.p1().manifest()
2286 man1 = self.p1().manifest()
2370 p2 = self._parents[1]
2287 p2 = self._parents[1]
2371 # "1 < len(self._parents)" can't be used for checking
2288 # "1 < len(self._parents)" can't be used for checking
2372 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2289 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2373 # explicitly initialized by the list, of which length is 2.
2290 # explicitly initialized by the list, of which length is 2.
2374 if p2.node() != nullid:
2291 if p2.node() != nullid:
2375 man2 = p2.manifest()
2292 man2 = p2.manifest()
2376 managing = lambda f: f in man1 or f in man2
2293 managing = lambda f: f in man1 or f in man2
2377 else:
2294 else:
2378 managing = lambda f: f in man1
2295 managing = lambda f: f in man1
2379
2296
2380 modified, added, removed = [], [], []
2297 modified, added, removed = [], [], []
2381 for f in self._files:
2298 for f in self._files:
2382 if not managing(f):
2299 if not managing(f):
2383 added.append(f)
2300 added.append(f)
2384 elif self[f]:
2301 elif self[f]:
2385 modified.append(f)
2302 modified.append(f)
2386 else:
2303 else:
2387 removed.append(f)
2304 removed.append(f)
2388
2305
2389 return scmutil.status(modified, added, removed, [], [], [], [])
2306 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,339 +1,424
1 # dagop.py - graph ancestry and topology algorithm for revset
1 # dagop.py - graph ancestry and topology algorithm for revset
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 mdiff,
14 node,
15 node,
16 patch,
15 smartset,
17 smartset,
16 )
18 )
17
19
18 baseset = smartset.baseset
20 baseset = smartset.baseset
19 generatorset = smartset.generatorset
21 generatorset = smartset.generatorset
20
22
21 def revancestors(repo, revs, followfirst):
23 def revancestors(repo, revs, followfirst):
22 """Like revlog.ancestors(), but supports followfirst."""
24 """Like revlog.ancestors(), but supports followfirst."""
23 if followfirst:
25 if followfirst:
24 cut = 1
26 cut = 1
25 else:
27 else:
26 cut = None
28 cut = None
27 cl = repo.changelog
29 cl = repo.changelog
28
30
29 def iterate():
31 def iterate():
30 revs.sort(reverse=True)
32 revs.sort(reverse=True)
31 irevs = iter(revs)
33 irevs = iter(revs)
32 h = []
34 h = []
33
35
34 inputrev = next(irevs, None)
36 inputrev = next(irevs, None)
35 if inputrev is not None:
37 if inputrev is not None:
36 heapq.heappush(h, -inputrev)
38 heapq.heappush(h, -inputrev)
37
39
38 seen = set()
40 seen = set()
39 while h:
41 while h:
40 current = -heapq.heappop(h)
42 current = -heapq.heappop(h)
41 if current == inputrev:
43 if current == inputrev:
42 inputrev = next(irevs, None)
44 inputrev = next(irevs, None)
43 if inputrev is not None:
45 if inputrev is not None:
44 heapq.heappush(h, -inputrev)
46 heapq.heappush(h, -inputrev)
45 if current not in seen:
47 if current not in seen:
46 seen.add(current)
48 seen.add(current)
47 yield current
49 yield current
48 try:
50 try:
49 for parent in cl.parentrevs(current)[:cut]:
51 for parent in cl.parentrevs(current)[:cut]:
50 if parent != node.nullrev:
52 if parent != node.nullrev:
51 heapq.heappush(h, -parent)
53 heapq.heappush(h, -parent)
52 except error.WdirUnsupported:
54 except error.WdirUnsupported:
53 for parent in repo[current].parents()[:cut]:
55 for parent in repo[current].parents()[:cut]:
54 if parent.rev() != node.nullrev:
56 if parent.rev() != node.nullrev:
55 heapq.heappush(h, -parent.rev())
57 heapq.heappush(h, -parent.rev())
56
58
57 return generatorset(iterate(), iterasc=False)
59 return generatorset(iterate(), iterasc=False)
58
60
59 def revdescendants(repo, revs, followfirst):
61 def revdescendants(repo, revs, followfirst):
60 """Like revlog.descendants() but supports followfirst."""
62 """Like revlog.descendants() but supports followfirst."""
61 if followfirst:
63 if followfirst:
62 cut = 1
64 cut = 1
63 else:
65 else:
64 cut = None
66 cut = None
65
67
66 def iterate():
68 def iterate():
67 cl = repo.changelog
69 cl = repo.changelog
68 # XXX this should be 'parentset.min()' assuming 'parentset' is a
70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
69 # smartset (and if it is not, it should.)
71 # smartset (and if it is not, it should.)
70 first = min(revs)
72 first = min(revs)
71 nullrev = node.nullrev
73 nullrev = node.nullrev
72 if first == nullrev:
74 if first == nullrev:
73 # Are there nodes with a null first parent and a non-null
75 # Are there nodes with a null first parent and a non-null
74 # second one? Maybe. Do we care? Probably not.
76 # second one? Maybe. Do we care? Probably not.
75 for i in cl:
77 for i in cl:
76 yield i
78 yield i
77 else:
79 else:
78 seen = set(revs)
80 seen = set(revs)
79 for i in cl.revs(first + 1):
81 for i in cl.revs(first + 1):
80 for x in cl.parentrevs(i)[:cut]:
82 for x in cl.parentrevs(i)[:cut]:
81 if x != nullrev and x in seen:
83 if x != nullrev and x in seen:
82 seen.add(i)
84 seen.add(i)
83 yield i
85 yield i
84 break
86 break
85
87
86 return generatorset(iterate(), iterasc=True)
88 return generatorset(iterate(), iterasc=True)
87
89
88 def _reachablerootspure(repo, minroot, roots, heads, includepath):
90 def _reachablerootspure(repo, minroot, roots, heads, includepath):
89 """return (heads(::<roots> and ::<heads>))
91 """return (heads(::<roots> and ::<heads>))
90
92
91 If includepath is True, return (<roots>::<heads>)."""
93 If includepath is True, return (<roots>::<heads>)."""
92 if not roots:
94 if not roots:
93 return []
95 return []
94 parentrevs = repo.changelog.parentrevs
96 parentrevs = repo.changelog.parentrevs
95 roots = set(roots)
97 roots = set(roots)
96 visit = list(heads)
98 visit = list(heads)
97 reachable = set()
99 reachable = set()
98 seen = {}
100 seen = {}
99 # prefetch all the things! (because python is slow)
101 # prefetch all the things! (because python is slow)
100 reached = reachable.add
102 reached = reachable.add
101 dovisit = visit.append
103 dovisit = visit.append
102 nextvisit = visit.pop
104 nextvisit = visit.pop
103 # open-code the post-order traversal due to the tiny size of
105 # open-code the post-order traversal due to the tiny size of
104 # sys.getrecursionlimit()
106 # sys.getrecursionlimit()
105 while visit:
107 while visit:
106 rev = nextvisit()
108 rev = nextvisit()
107 if rev in roots:
109 if rev in roots:
108 reached(rev)
110 reached(rev)
109 if not includepath:
111 if not includepath:
110 continue
112 continue
111 parents = parentrevs(rev)
113 parents = parentrevs(rev)
112 seen[rev] = parents
114 seen[rev] = parents
113 for parent in parents:
115 for parent in parents:
114 if parent >= minroot and parent not in seen:
116 if parent >= minroot and parent not in seen:
115 dovisit(parent)
117 dovisit(parent)
116 if not reachable:
118 if not reachable:
117 return baseset()
119 return baseset()
118 if not includepath:
120 if not includepath:
119 return reachable
121 return reachable
120 for rev in sorted(seen):
122 for rev in sorted(seen):
121 for parent in seen[rev]:
123 for parent in seen[rev]:
122 if parent in reachable:
124 if parent in reachable:
123 reached(rev)
125 reached(rev)
124 return reachable
126 return reachable
125
127
126 def reachableroots(repo, roots, heads, includepath=False):
128 def reachableroots(repo, roots, heads, includepath=False):
127 """return (heads(::<roots> and ::<heads>))
129 """return (heads(::<roots> and ::<heads>))
128
130
129 If includepath is True, return (<roots>::<heads>)."""
131 If includepath is True, return (<roots>::<heads>)."""
130 if not roots:
132 if not roots:
131 return baseset()
133 return baseset()
132 minroot = roots.min()
134 minroot = roots.min()
133 roots = list(roots)
135 roots = list(roots)
134 heads = list(heads)
136 heads = list(heads)
135 try:
137 try:
136 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
138 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
137 except AttributeError:
139 except AttributeError:
138 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
140 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
139 revs = baseset(revs)
141 revs = baseset(revs)
140 revs.sort()
142 revs.sort()
141 return revs
143 return revs
142
144
145 def _changesrange(fctx1, fctx2, linerange2, diffopts):
146 """Return `(diffinrange, linerange1)` where `diffinrange` is True
147 if diff from fctx2 to fctx1 has changes in linerange2 and
148 `linerange1` is the new line range for fctx1.
149 """
150 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
151 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
152 diffinrange = any(stype == '!' for _, stype in filteredblocks)
153 return diffinrange, linerange1
154
155 def blockancestors(fctx, fromline, toline, followfirst=False):
156 """Yield ancestors of `fctx` with respect to the block of lines within
157 `fromline`-`toline` range.
158 """
159 diffopts = patch.diffopts(fctx._repo.ui)
160 introrev = fctx.introrev()
161 if fctx.rev() != introrev:
162 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
163 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
164 while visit:
165 c, linerange2 = visit.pop(max(visit))
166 pl = c.parents()
167 if followfirst:
168 pl = pl[:1]
169 if not pl:
170 # The block originates from the initial revision.
171 yield c, linerange2
172 continue
173 inrange = False
174 for p in pl:
175 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
176 inrange = inrange or inrangep
177 if linerange1[0] == linerange1[1]:
178 # Parent's linerange is empty, meaning that the block got
179 # introduced in this revision; no need to go futher in this
180 # branch.
181 continue
182 # Set _descendantrev with 'c' (a known descendant) so that, when
183 # _adjustlinkrev is called for 'p', it receives this descendant
184 # (as srcrev) instead possibly topmost introrev.
185 p._descendantrev = c.rev()
186 visit[p.linkrev(), p.filenode()] = p, linerange1
187 if inrange:
188 yield c, linerange2
189
190 def blockdescendants(fctx, fromline, toline):
191 """Yield descendants of `fctx` with respect to the block of lines within
192 `fromline`-`toline` range.
193 """
194 # First possibly yield 'fctx' if it has changes in range with respect to
195 # its parents.
196 try:
197 c, linerange1 = next(blockancestors(fctx, fromline, toline))
198 except StopIteration:
199 pass
200 else:
201 if c == fctx:
202 yield c, linerange1
203
204 diffopts = patch.diffopts(fctx._repo.ui)
205 fl = fctx.filelog()
206 seen = {fctx.filerev(): (fctx, (fromline, toline))}
207 for i in fl.descendants([fctx.filerev()]):
208 c = fctx.filectx(i)
209 inrange = False
210 for x in fl.parentrevs(i):
211 try:
212 p, linerange2 = seen[x]
213 except KeyError:
214 # nullrev or other branch
215 continue
216 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
217 inrange = inrange or inrangep
218 # If revision 'i' has been seen (it's a merge), we assume that its
219 # line range is the same independently of which parents was used
220 # to compute it.
221 assert i not in seen or seen[i][1] == linerange1, (
222 'computed line range for %s is not consistent between '
223 'ancestor branches' % c)
224 seen[i] = c, linerange1
225 if inrange:
226 yield c, linerange1
227
143 def toposort(revs, parentsfunc, firstbranch=()):
228 def toposort(revs, parentsfunc, firstbranch=()):
144 """Yield revisions from heads to roots one (topo) branch at a time.
229 """Yield revisions from heads to roots one (topo) branch at a time.
145
230
146 This function aims to be used by a graph generator that wishes to minimize
231 This function aims to be used by a graph generator that wishes to minimize
147 the number of parallel branches and their interleaving.
232 the number of parallel branches and their interleaving.
148
233
149 Example iteration order (numbers show the "true" order in a changelog):
234 Example iteration order (numbers show the "true" order in a changelog):
150
235
151 o 4
236 o 4
152 |
237 |
153 o 1
238 o 1
154 |
239 |
155 | o 3
240 | o 3
156 | |
241 | |
157 | o 2
242 | o 2
158 |/
243 |/
159 o 0
244 o 0
160
245
161 Note that the ancestors of merges are understood by the current
246 Note that the ancestors of merges are understood by the current
162 algorithm to be on the same branch. This means no reordering will
247 algorithm to be on the same branch. This means no reordering will
163 occur behind a merge.
248 occur behind a merge.
164 """
249 """
165
250
166 ### Quick summary of the algorithm
251 ### Quick summary of the algorithm
167 #
252 #
168 # This function is based around a "retention" principle. We keep revisions
253 # This function is based around a "retention" principle. We keep revisions
169 # in memory until we are ready to emit a whole branch that immediately
254 # in memory until we are ready to emit a whole branch that immediately
170 # "merges" into an existing one. This reduces the number of parallel
255 # "merges" into an existing one. This reduces the number of parallel
171 # branches with interleaved revisions.
256 # branches with interleaved revisions.
172 #
257 #
173 # During iteration revs are split into two groups:
258 # During iteration revs are split into two groups:
174 # A) revision already emitted
259 # A) revision already emitted
175 # B) revision in "retention". They are stored as different subgroups.
260 # B) revision in "retention". They are stored as different subgroups.
176 #
261 #
177 # for each REV, we do the following logic:
262 # for each REV, we do the following logic:
178 #
263 #
179 # 1) if REV is a parent of (A), we will emit it. If there is a
264 # 1) if REV is a parent of (A), we will emit it. If there is a
180 # retention group ((B) above) that is blocked on REV being
265 # retention group ((B) above) that is blocked on REV being
181 # available, we emit all the revisions out of that retention
266 # available, we emit all the revisions out of that retention
182 # group first.
267 # group first.
183 #
268 #
184 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
269 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
185 # available, if such subgroup exist, we add REV to it and the subgroup is
270 # available, if such subgroup exist, we add REV to it and the subgroup is
186 # now awaiting for REV.parents() to be available.
271 # now awaiting for REV.parents() to be available.
187 #
272 #
188 # 3) finally if no such group existed in (B), we create a new subgroup.
273 # 3) finally if no such group existed in (B), we create a new subgroup.
189 #
274 #
190 #
275 #
191 # To bootstrap the algorithm, we emit the tipmost revision (which
276 # To bootstrap the algorithm, we emit the tipmost revision (which
192 # puts it in group (A) from above).
277 # puts it in group (A) from above).
193
278
194 revs.sort(reverse=True)
279 revs.sort(reverse=True)
195
280
196 # Set of parents of revision that have been emitted. They can be considered
281 # Set of parents of revision that have been emitted. They can be considered
197 # unblocked as the graph generator is already aware of them so there is no
282 # unblocked as the graph generator is already aware of them so there is no
198 # need to delay the revisions that reference them.
283 # need to delay the revisions that reference them.
199 #
284 #
200 # If someone wants to prioritize a branch over the others, pre-filling this
285 # If someone wants to prioritize a branch over the others, pre-filling this
201 # set will force all other branches to wait until this branch is ready to be
286 # set will force all other branches to wait until this branch is ready to be
202 # emitted.
287 # emitted.
203 unblocked = set(firstbranch)
288 unblocked = set(firstbranch)
204
289
205 # list of groups waiting to be displayed, each group is defined by:
290 # list of groups waiting to be displayed, each group is defined by:
206 #
291 #
207 # (revs: lists of revs waiting to be displayed,
292 # (revs: lists of revs waiting to be displayed,
208 # blocked: set of that cannot be displayed before those in 'revs')
293 # blocked: set of that cannot be displayed before those in 'revs')
209 #
294 #
210 # The second value ('blocked') correspond to parents of any revision in the
295 # The second value ('blocked') correspond to parents of any revision in the
211 # group ('revs') that is not itself contained in the group. The main idea
296 # group ('revs') that is not itself contained in the group. The main idea
212 # of this algorithm is to delay as much as possible the emission of any
297 # of this algorithm is to delay as much as possible the emission of any
213 # revision. This means waiting for the moment we are about to display
298 # revision. This means waiting for the moment we are about to display
214 # these parents to display the revs in a group.
299 # these parents to display the revs in a group.
215 #
300 #
216 # This first implementation is smart until it encounters a merge: it will
301 # This first implementation is smart until it encounters a merge: it will
217 # emit revs as soon as any parent is about to be emitted and can grow an
302 # emit revs as soon as any parent is about to be emitted and can grow an
218 # arbitrary number of revs in 'blocked'. In practice this mean we properly
303 # arbitrary number of revs in 'blocked'. In practice this mean we properly
219 # retains new branches but gives up on any special ordering for ancestors
304 # retains new branches but gives up on any special ordering for ancestors
220 # of merges. The implementation can be improved to handle this better.
305 # of merges. The implementation can be improved to handle this better.
221 #
306 #
222 # The first subgroup is special. It corresponds to all the revision that
307 # The first subgroup is special. It corresponds to all the revision that
223 # were already emitted. The 'revs' lists is expected to be empty and the
308 # were already emitted. The 'revs' lists is expected to be empty and the
224 # 'blocked' set contains the parents revisions of already emitted revision.
309 # 'blocked' set contains the parents revisions of already emitted revision.
225 #
310 #
226 # You could pre-seed the <parents> set of groups[0] to a specific
311 # You could pre-seed the <parents> set of groups[0] to a specific
227 # changesets to select what the first emitted branch should be.
312 # changesets to select what the first emitted branch should be.
228 groups = [([], unblocked)]
313 groups = [([], unblocked)]
229 pendingheap = []
314 pendingheap = []
230 pendingset = set()
315 pendingset = set()
231
316
232 heapq.heapify(pendingheap)
317 heapq.heapify(pendingheap)
233 heappop = heapq.heappop
318 heappop = heapq.heappop
234 heappush = heapq.heappush
319 heappush = heapq.heappush
235 for currentrev in revs:
320 for currentrev in revs:
236 # Heap works with smallest element, we want highest so we invert
321 # Heap works with smallest element, we want highest so we invert
237 if currentrev not in pendingset:
322 if currentrev not in pendingset:
238 heappush(pendingheap, -currentrev)
323 heappush(pendingheap, -currentrev)
239 pendingset.add(currentrev)
324 pendingset.add(currentrev)
240 # iterates on pending rev until after the current rev have been
325 # iterates on pending rev until after the current rev have been
241 # processed.
326 # processed.
242 rev = None
327 rev = None
243 while rev != currentrev:
328 while rev != currentrev:
244 rev = -heappop(pendingheap)
329 rev = -heappop(pendingheap)
245 pendingset.remove(rev)
330 pendingset.remove(rev)
246
331
247 # Seek for a subgroup blocked, waiting for the current revision.
332 # Seek for a subgroup blocked, waiting for the current revision.
248 matching = [i for i, g in enumerate(groups) if rev in g[1]]
333 matching = [i for i, g in enumerate(groups) if rev in g[1]]
249
334
250 if matching:
335 if matching:
251 # The main idea is to gather together all sets that are blocked
336 # The main idea is to gather together all sets that are blocked
252 # on the same revision.
337 # on the same revision.
253 #
338 #
254 # Groups are merged when a common blocking ancestor is
339 # Groups are merged when a common blocking ancestor is
255 # observed. For example, given two groups:
340 # observed. For example, given two groups:
256 #
341 #
257 # revs [5, 4] waiting for 1
342 # revs [5, 4] waiting for 1
258 # revs [3, 2] waiting for 1
343 # revs [3, 2] waiting for 1
259 #
344 #
260 # These two groups will be merged when we process
345 # These two groups will be merged when we process
261 # 1. In theory, we could have merged the groups when
346 # 1. In theory, we could have merged the groups when
262 # we added 2 to the group it is now in (we could have
347 # we added 2 to the group it is now in (we could have
263 # noticed the groups were both blocked on 1 then), but
348 # noticed the groups were both blocked on 1 then), but
264 # the way it works now makes the algorithm simpler.
349 # the way it works now makes the algorithm simpler.
265 #
350 #
266 # We also always keep the oldest subgroup first. We can
351 # We also always keep the oldest subgroup first. We can
267 # probably improve the behavior by having the longest set
352 # probably improve the behavior by having the longest set
268 # first. That way, graph algorithms could minimise the length
353 # first. That way, graph algorithms could minimise the length
269 # of parallel lines their drawing. This is currently not done.
354 # of parallel lines their drawing. This is currently not done.
270 targetidx = matching.pop(0)
355 targetidx = matching.pop(0)
271 trevs, tparents = groups[targetidx]
356 trevs, tparents = groups[targetidx]
272 for i in matching:
357 for i in matching:
273 gr = groups[i]
358 gr = groups[i]
274 trevs.extend(gr[0])
359 trevs.extend(gr[0])
275 tparents |= gr[1]
360 tparents |= gr[1]
276 # delete all merged subgroups (except the one we kept)
361 # delete all merged subgroups (except the one we kept)
277 # (starting from the last subgroup for performance and
362 # (starting from the last subgroup for performance and
278 # sanity reasons)
363 # sanity reasons)
279 for i in reversed(matching):
364 for i in reversed(matching):
280 del groups[i]
365 del groups[i]
281 else:
366 else:
282 # This is a new head. We create a new subgroup for it.
367 # This is a new head. We create a new subgroup for it.
283 targetidx = len(groups)
368 targetidx = len(groups)
284 groups.append(([], {rev}))
369 groups.append(([], {rev}))
285
370
286 gr = groups[targetidx]
371 gr = groups[targetidx]
287
372
288 # We now add the current nodes to this subgroups. This is done
373 # We now add the current nodes to this subgroups. This is done
289 # after the subgroup merging because all elements from a subgroup
374 # after the subgroup merging because all elements from a subgroup
290 # that relied on this rev must precede it.
375 # that relied on this rev must precede it.
291 #
376 #
292 # we also update the <parents> set to include the parents of the
377 # we also update the <parents> set to include the parents of the
293 # new nodes.
378 # new nodes.
294 if rev == currentrev: # only display stuff in rev
379 if rev == currentrev: # only display stuff in rev
295 gr[0].append(rev)
380 gr[0].append(rev)
296 gr[1].remove(rev)
381 gr[1].remove(rev)
297 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
382 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
298 gr[1].update(parents)
383 gr[1].update(parents)
299 for p in parents:
384 for p in parents:
300 if p not in pendingset:
385 if p not in pendingset:
301 pendingset.add(p)
386 pendingset.add(p)
302 heappush(pendingheap, -p)
387 heappush(pendingheap, -p)
303
388
304 # Look for a subgroup to display
389 # Look for a subgroup to display
305 #
390 #
306 # When unblocked is empty (if clause), we were not waiting for any
391 # When unblocked is empty (if clause), we were not waiting for any
307 # revisions during the first iteration (if no priority was given) or
392 # revisions during the first iteration (if no priority was given) or
308 # if we emitted a whole disconnected set of the graph (reached a
393 # if we emitted a whole disconnected set of the graph (reached a
309 # root). In that case we arbitrarily take the oldest known
394 # root). In that case we arbitrarily take the oldest known
310 # subgroup. The heuristic could probably be better.
395 # subgroup. The heuristic could probably be better.
311 #
396 #
312 # Otherwise (elif clause) if the subgroup is blocked on
397 # Otherwise (elif clause) if the subgroup is blocked on
313 # a revision we just emitted, we can safely emit it as
398 # a revision we just emitted, we can safely emit it as
314 # well.
399 # well.
315 if not unblocked:
400 if not unblocked:
316 if len(groups) > 1: # display other subset
401 if len(groups) > 1: # display other subset
317 targetidx = 1
402 targetidx = 1
318 gr = groups[1]
403 gr = groups[1]
319 elif not gr[1] & unblocked:
404 elif not gr[1] & unblocked:
320 gr = None
405 gr = None
321
406
322 if gr is not None:
407 if gr is not None:
323 # update the set of awaited revisions with the one from the
408 # update the set of awaited revisions with the one from the
324 # subgroup
409 # subgroup
325 unblocked |= gr[1]
410 unblocked |= gr[1]
326 # output all revisions in the subgroup
411 # output all revisions in the subgroup
327 for r in gr[0]:
412 for r in gr[0]:
328 yield r
413 yield r
329 # delete the subgroup that you just output
414 # delete the subgroup that you just output
330 # unless it is groups[0] in which case you just empty it.
415 # unless it is groups[0] in which case you just empty it.
331 if targetidx:
416 if targetidx:
332 del groups[targetidx]
417 del groups[targetidx]
333 else:
418 else:
334 gr[0][:] = []
419 gr[0][:] = []
335 # Check if we have some subgroup waiting for revisions we are not going to
420 # Check if we have some subgroup waiting for revisions we are not going to
336 # iterate over
421 # iterate over
337 for g in groups:
422 for g in groups:
338 for r in g[0]:
423 for r in g[0]:
339 yield r
424 yield r
@@ -1,1383 +1,1383
1 #
1 #
2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import cgi
10 import cgi
11 import copy
11 import copy
12 import mimetypes
12 import mimetypes
13 import os
13 import os
14 import re
14 import re
15
15
16 from ..i18n import _
16 from ..i18n import _
17 from ..node import hex, short
17 from ..node import hex, short
18
18
19 from .common import (
19 from .common import (
20 ErrorResponse,
20 ErrorResponse,
21 HTTP_FORBIDDEN,
21 HTTP_FORBIDDEN,
22 HTTP_NOT_FOUND,
22 HTTP_NOT_FOUND,
23 HTTP_OK,
23 HTTP_OK,
24 get_contact,
24 get_contact,
25 paritygen,
25 paritygen,
26 staticfile,
26 staticfile,
27 )
27 )
28
28
29 from .. import (
29 from .. import (
30 archival,
30 archival,
31 context,
31 dagop,
32 encoding,
32 encoding,
33 error,
33 error,
34 graphmod,
34 graphmod,
35 revset,
35 revset,
36 revsetlang,
36 revsetlang,
37 scmutil,
37 scmutil,
38 smartset,
38 smartset,
39 templatefilters,
39 templatefilters,
40 templater,
40 templater,
41 util,
41 util,
42 )
42 )
43
43
44 from . import (
44 from . import (
45 webutil,
45 webutil,
46 )
46 )
47
47
48 __all__ = []
48 __all__ = []
49 commands = {}
49 commands = {}
50
50
51 class webcommand(object):
51 class webcommand(object):
52 """Decorator used to register a web command handler.
52 """Decorator used to register a web command handler.
53
53
54 The decorator takes as its positional arguments the name/path the
54 The decorator takes as its positional arguments the name/path the
55 command should be accessible under.
55 command should be accessible under.
56
56
57 Usage:
57 Usage:
58
58
59 @webcommand('mycommand')
59 @webcommand('mycommand')
60 def mycommand(web, req, tmpl):
60 def mycommand(web, req, tmpl):
61 pass
61 pass
62 """
62 """
63
63
64 def __init__(self, name):
64 def __init__(self, name):
65 self.name = name
65 self.name = name
66
66
67 def __call__(self, func):
67 def __call__(self, func):
68 __all__.append(self.name)
68 __all__.append(self.name)
69 commands[self.name] = func
69 commands[self.name] = func
70 return func
70 return func
71
71
72 @webcommand('log')
72 @webcommand('log')
73 def log(web, req, tmpl):
73 def log(web, req, tmpl):
74 """
74 """
75 /log[/{revision}[/{path}]]
75 /log[/{revision}[/{path}]]
76 --------------------------
76 --------------------------
77
77
78 Show repository or file history.
78 Show repository or file history.
79
79
80 For URLs of the form ``/log/{revision}``, a list of changesets starting at
80 For URLs of the form ``/log/{revision}``, a list of changesets starting at
81 the specified changeset identifier is shown. If ``{revision}`` is not
81 the specified changeset identifier is shown. If ``{revision}`` is not
82 defined, the default is ``tip``. This form is equivalent to the
82 defined, the default is ``tip``. This form is equivalent to the
83 ``changelog`` handler.
83 ``changelog`` handler.
84
84
85 For URLs of the form ``/log/{revision}/{file}``, the history for a specific
85 For URLs of the form ``/log/{revision}/{file}``, the history for a specific
86 file will be shown. This form is equivalent to the ``filelog`` handler.
86 file will be shown. This form is equivalent to the ``filelog`` handler.
87 """
87 """
88
88
89 if 'file' in req.form and req.form['file'][0]:
89 if 'file' in req.form and req.form['file'][0]:
90 return filelog(web, req, tmpl)
90 return filelog(web, req, tmpl)
91 else:
91 else:
92 return changelog(web, req, tmpl)
92 return changelog(web, req, tmpl)
93
93
94 @webcommand('rawfile')
94 @webcommand('rawfile')
95 def rawfile(web, req, tmpl):
95 def rawfile(web, req, tmpl):
96 guessmime = web.configbool('web', 'guessmime', False)
96 guessmime = web.configbool('web', 'guessmime', False)
97
97
98 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
98 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
99 if not path:
99 if not path:
100 content = manifest(web, req, tmpl)
100 content = manifest(web, req, tmpl)
101 req.respond(HTTP_OK, web.ctype)
101 req.respond(HTTP_OK, web.ctype)
102 return content
102 return content
103
103
104 try:
104 try:
105 fctx = webutil.filectx(web.repo, req)
105 fctx = webutil.filectx(web.repo, req)
106 except error.LookupError as inst:
106 except error.LookupError as inst:
107 try:
107 try:
108 content = manifest(web, req, tmpl)
108 content = manifest(web, req, tmpl)
109 req.respond(HTTP_OK, web.ctype)
109 req.respond(HTTP_OK, web.ctype)
110 return content
110 return content
111 except ErrorResponse:
111 except ErrorResponse:
112 raise inst
112 raise inst
113
113
114 path = fctx.path()
114 path = fctx.path()
115 text = fctx.data()
115 text = fctx.data()
116 mt = 'application/binary'
116 mt = 'application/binary'
117 if guessmime:
117 if guessmime:
118 mt = mimetypes.guess_type(path)[0]
118 mt = mimetypes.guess_type(path)[0]
119 if mt is None:
119 if mt is None:
120 if util.binary(text):
120 if util.binary(text):
121 mt = 'application/binary'
121 mt = 'application/binary'
122 else:
122 else:
123 mt = 'text/plain'
123 mt = 'text/plain'
124 if mt.startswith('text/'):
124 if mt.startswith('text/'):
125 mt += '; charset="%s"' % encoding.encoding
125 mt += '; charset="%s"' % encoding.encoding
126
126
127 req.respond(HTTP_OK, mt, path, body=text)
127 req.respond(HTTP_OK, mt, path, body=text)
128 return []
128 return []
129
129
130 def _filerevision(web, req, tmpl, fctx):
130 def _filerevision(web, req, tmpl, fctx):
131 f = fctx.path()
131 f = fctx.path()
132 text = fctx.data()
132 text = fctx.data()
133 parity = paritygen(web.stripecount)
133 parity = paritygen(web.stripecount)
134 ishead = fctx.filerev() in fctx.filelog().headrevs()
134 ishead = fctx.filerev() in fctx.filelog().headrevs()
135
135
136 if util.binary(text):
136 if util.binary(text):
137 mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
137 mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
138 text = '(binary:%s)' % mt
138 text = '(binary:%s)' % mt
139
139
140 def lines():
140 def lines():
141 for lineno, t in enumerate(text.splitlines(True)):
141 for lineno, t in enumerate(text.splitlines(True)):
142 yield {"line": t,
142 yield {"line": t,
143 "lineid": "l%d" % (lineno + 1),
143 "lineid": "l%d" % (lineno + 1),
144 "linenumber": "% 6d" % (lineno + 1),
144 "linenumber": "% 6d" % (lineno + 1),
145 "parity": next(parity)}
145 "parity": next(parity)}
146
146
147 return tmpl("filerevision",
147 return tmpl("filerevision",
148 file=f,
148 file=f,
149 path=webutil.up(f),
149 path=webutil.up(f),
150 text=lines(),
150 text=lines(),
151 symrev=webutil.symrevorshortnode(req, fctx),
151 symrev=webutil.symrevorshortnode(req, fctx),
152 rename=webutil.renamelink(fctx),
152 rename=webutil.renamelink(fctx),
153 permissions=fctx.manifest().flags(f),
153 permissions=fctx.manifest().flags(f),
154 ishead=int(ishead),
154 ishead=int(ishead),
155 **webutil.commonentry(web.repo, fctx))
155 **webutil.commonentry(web.repo, fctx))
156
156
157 @webcommand('file')
157 @webcommand('file')
158 def file(web, req, tmpl):
158 def file(web, req, tmpl):
159 """
159 """
160 /file/{revision}[/{path}]
160 /file/{revision}[/{path}]
161 -------------------------
161 -------------------------
162
162
163 Show information about a directory or file in the repository.
163 Show information about a directory or file in the repository.
164
164
165 Info about the ``path`` given as a URL parameter will be rendered.
165 Info about the ``path`` given as a URL parameter will be rendered.
166
166
167 If ``path`` is a directory, information about the entries in that
167 If ``path`` is a directory, information about the entries in that
168 directory will be rendered. This form is equivalent to the ``manifest``
168 directory will be rendered. This form is equivalent to the ``manifest``
169 handler.
169 handler.
170
170
171 If ``path`` is a file, information about that file will be shown via
171 If ``path`` is a file, information about that file will be shown via
172 the ``filerevision`` template.
172 the ``filerevision`` template.
173
173
174 If ``path`` is not defined, information about the root directory will
174 If ``path`` is not defined, information about the root directory will
175 be rendered.
175 be rendered.
176 """
176 """
177 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
177 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
178 if not path:
178 if not path:
179 return manifest(web, req, tmpl)
179 return manifest(web, req, tmpl)
180 try:
180 try:
181 return _filerevision(web, req, tmpl, webutil.filectx(web.repo, req))
181 return _filerevision(web, req, tmpl, webutil.filectx(web.repo, req))
182 except error.LookupError as inst:
182 except error.LookupError as inst:
183 try:
183 try:
184 return manifest(web, req, tmpl)
184 return manifest(web, req, tmpl)
185 except ErrorResponse:
185 except ErrorResponse:
186 raise inst
186 raise inst
187
187
188 def _search(web, req, tmpl):
188 def _search(web, req, tmpl):
189 MODE_REVISION = 'rev'
189 MODE_REVISION = 'rev'
190 MODE_KEYWORD = 'keyword'
190 MODE_KEYWORD = 'keyword'
191 MODE_REVSET = 'revset'
191 MODE_REVSET = 'revset'
192
192
193 def revsearch(ctx):
193 def revsearch(ctx):
194 yield ctx
194 yield ctx
195
195
196 def keywordsearch(query):
196 def keywordsearch(query):
197 lower = encoding.lower
197 lower = encoding.lower
198 qw = lower(query).split()
198 qw = lower(query).split()
199
199
200 def revgen():
200 def revgen():
201 cl = web.repo.changelog
201 cl = web.repo.changelog
202 for i in xrange(len(web.repo) - 1, 0, -100):
202 for i in xrange(len(web.repo) - 1, 0, -100):
203 l = []
203 l = []
204 for j in cl.revs(max(0, i - 99), i):
204 for j in cl.revs(max(0, i - 99), i):
205 ctx = web.repo[j]
205 ctx = web.repo[j]
206 l.append(ctx)
206 l.append(ctx)
207 l.reverse()
207 l.reverse()
208 for e in l:
208 for e in l:
209 yield e
209 yield e
210
210
211 for ctx in revgen():
211 for ctx in revgen():
212 miss = 0
212 miss = 0
213 for q in qw:
213 for q in qw:
214 if not (q in lower(ctx.user()) or
214 if not (q in lower(ctx.user()) or
215 q in lower(ctx.description()) or
215 q in lower(ctx.description()) or
216 q in lower(" ".join(ctx.files()))):
216 q in lower(" ".join(ctx.files()))):
217 miss = 1
217 miss = 1
218 break
218 break
219 if miss:
219 if miss:
220 continue
220 continue
221
221
222 yield ctx
222 yield ctx
223
223
224 def revsetsearch(revs):
224 def revsetsearch(revs):
225 for r in revs:
225 for r in revs:
226 yield web.repo[r]
226 yield web.repo[r]
227
227
228 searchfuncs = {
228 searchfuncs = {
229 MODE_REVISION: (revsearch, 'exact revision search'),
229 MODE_REVISION: (revsearch, 'exact revision search'),
230 MODE_KEYWORD: (keywordsearch, 'literal keyword search'),
230 MODE_KEYWORD: (keywordsearch, 'literal keyword search'),
231 MODE_REVSET: (revsetsearch, 'revset expression search'),
231 MODE_REVSET: (revsetsearch, 'revset expression search'),
232 }
232 }
233
233
234 def getsearchmode(query):
234 def getsearchmode(query):
235 try:
235 try:
236 ctx = web.repo[query]
236 ctx = web.repo[query]
237 except (error.RepoError, error.LookupError):
237 except (error.RepoError, error.LookupError):
238 # query is not an exact revision pointer, need to
238 # query is not an exact revision pointer, need to
239 # decide if it's a revset expression or keywords
239 # decide if it's a revset expression or keywords
240 pass
240 pass
241 else:
241 else:
242 return MODE_REVISION, ctx
242 return MODE_REVISION, ctx
243
243
244 revdef = 'reverse(%s)' % query
244 revdef = 'reverse(%s)' % query
245 try:
245 try:
246 tree = revsetlang.parse(revdef)
246 tree = revsetlang.parse(revdef)
247 except error.ParseError:
247 except error.ParseError:
248 # can't parse to a revset tree
248 # can't parse to a revset tree
249 return MODE_KEYWORD, query
249 return MODE_KEYWORD, query
250
250
251 if revsetlang.depth(tree) <= 2:
251 if revsetlang.depth(tree) <= 2:
252 # no revset syntax used
252 # no revset syntax used
253 return MODE_KEYWORD, query
253 return MODE_KEYWORD, query
254
254
255 if any((token, (value or '')[:3]) == ('string', 're:')
255 if any((token, (value or '')[:3]) == ('string', 're:')
256 for token, value, pos in revsetlang.tokenize(revdef)):
256 for token, value, pos in revsetlang.tokenize(revdef)):
257 return MODE_KEYWORD, query
257 return MODE_KEYWORD, query
258
258
259 funcsused = revsetlang.funcsused(tree)
259 funcsused = revsetlang.funcsused(tree)
260 if not funcsused.issubset(revset.safesymbols):
260 if not funcsused.issubset(revset.safesymbols):
261 return MODE_KEYWORD, query
261 return MODE_KEYWORD, query
262
262
263 mfunc = revset.match(web.repo.ui, revdef)
263 mfunc = revset.match(web.repo.ui, revdef)
264 try:
264 try:
265 revs = mfunc(web.repo)
265 revs = mfunc(web.repo)
266 return MODE_REVSET, revs
266 return MODE_REVSET, revs
267 # ParseError: wrongly placed tokens, wrongs arguments, etc
267 # ParseError: wrongly placed tokens, wrongs arguments, etc
268 # RepoLookupError: no such revision, e.g. in 'revision:'
268 # RepoLookupError: no such revision, e.g. in 'revision:'
269 # Abort: bookmark/tag not exists
269 # Abort: bookmark/tag not exists
270 # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo
270 # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo
271 except (error.ParseError, error.RepoLookupError, error.Abort,
271 except (error.ParseError, error.RepoLookupError, error.Abort,
272 LookupError):
272 LookupError):
273 return MODE_KEYWORD, query
273 return MODE_KEYWORD, query
274
274
275 def changelist(**map):
275 def changelist(**map):
276 count = 0
276 count = 0
277
277
278 for ctx in searchfunc[0](funcarg):
278 for ctx in searchfunc[0](funcarg):
279 count += 1
279 count += 1
280 n = ctx.node()
280 n = ctx.node()
281 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
281 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
282 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
282 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
283
283
284 yield tmpl('searchentry',
284 yield tmpl('searchentry',
285 parity=next(parity),
285 parity=next(parity),
286 changelogtag=showtags,
286 changelogtag=showtags,
287 files=files,
287 files=files,
288 **webutil.commonentry(web.repo, ctx))
288 **webutil.commonentry(web.repo, ctx))
289
289
290 if count >= revcount:
290 if count >= revcount:
291 break
291 break
292
292
293 query = req.form['rev'][0]
293 query = req.form['rev'][0]
294 revcount = web.maxchanges
294 revcount = web.maxchanges
295 if 'revcount' in req.form:
295 if 'revcount' in req.form:
296 try:
296 try:
297 revcount = int(req.form.get('revcount', [revcount])[0])
297 revcount = int(req.form.get('revcount', [revcount])[0])
298 revcount = max(revcount, 1)
298 revcount = max(revcount, 1)
299 tmpl.defaults['sessionvars']['revcount'] = revcount
299 tmpl.defaults['sessionvars']['revcount'] = revcount
300 except ValueError:
300 except ValueError:
301 pass
301 pass
302
302
303 lessvars = copy.copy(tmpl.defaults['sessionvars'])
303 lessvars = copy.copy(tmpl.defaults['sessionvars'])
304 lessvars['revcount'] = max(revcount / 2, 1)
304 lessvars['revcount'] = max(revcount / 2, 1)
305 lessvars['rev'] = query
305 lessvars['rev'] = query
306 morevars = copy.copy(tmpl.defaults['sessionvars'])
306 morevars = copy.copy(tmpl.defaults['sessionvars'])
307 morevars['revcount'] = revcount * 2
307 morevars['revcount'] = revcount * 2
308 morevars['rev'] = query
308 morevars['rev'] = query
309
309
310 mode, funcarg = getsearchmode(query)
310 mode, funcarg = getsearchmode(query)
311
311
312 if 'forcekw' in req.form:
312 if 'forcekw' in req.form:
313 showforcekw = ''
313 showforcekw = ''
314 showunforcekw = searchfuncs[mode][1]
314 showunforcekw = searchfuncs[mode][1]
315 mode = MODE_KEYWORD
315 mode = MODE_KEYWORD
316 funcarg = query
316 funcarg = query
317 else:
317 else:
318 if mode != MODE_KEYWORD:
318 if mode != MODE_KEYWORD:
319 showforcekw = searchfuncs[MODE_KEYWORD][1]
319 showforcekw = searchfuncs[MODE_KEYWORD][1]
320 else:
320 else:
321 showforcekw = ''
321 showforcekw = ''
322 showunforcekw = ''
322 showunforcekw = ''
323
323
324 searchfunc = searchfuncs[mode]
324 searchfunc = searchfuncs[mode]
325
325
326 tip = web.repo['tip']
326 tip = web.repo['tip']
327 parity = paritygen(web.stripecount)
327 parity = paritygen(web.stripecount)
328
328
329 return tmpl('search', query=query, node=tip.hex(), symrev='tip',
329 return tmpl('search', query=query, node=tip.hex(), symrev='tip',
330 entries=changelist, archives=web.archivelist("tip"),
330 entries=changelist, archives=web.archivelist("tip"),
331 morevars=morevars, lessvars=lessvars,
331 morevars=morevars, lessvars=lessvars,
332 modedesc=searchfunc[1],
332 modedesc=searchfunc[1],
333 showforcekw=showforcekw, showunforcekw=showunforcekw)
333 showforcekw=showforcekw, showunforcekw=showunforcekw)
334
334
335 @webcommand('changelog')
335 @webcommand('changelog')
336 def changelog(web, req, tmpl, shortlog=False):
336 def changelog(web, req, tmpl, shortlog=False):
337 """
337 """
338 /changelog[/{revision}]
338 /changelog[/{revision}]
339 -----------------------
339 -----------------------
340
340
341 Show information about multiple changesets.
341 Show information about multiple changesets.
342
342
343 If the optional ``revision`` URL argument is absent, information about
343 If the optional ``revision`` URL argument is absent, information about
344 all changesets starting at ``tip`` will be rendered. If the ``revision``
344 all changesets starting at ``tip`` will be rendered. If the ``revision``
345 argument is present, changesets will be shown starting from the specified
345 argument is present, changesets will be shown starting from the specified
346 revision.
346 revision.
347
347
348 If ``revision`` is absent, the ``rev`` query string argument may be
348 If ``revision`` is absent, the ``rev`` query string argument may be
349 defined. This will perform a search for changesets.
349 defined. This will perform a search for changesets.
350
350
351 The argument for ``rev`` can be a single revision, a revision set,
351 The argument for ``rev`` can be a single revision, a revision set,
352 or a literal keyword to search for in changeset data (equivalent to
352 or a literal keyword to search for in changeset data (equivalent to
353 :hg:`log -k`).
353 :hg:`log -k`).
354
354
355 The ``revcount`` query string argument defines the maximum numbers of
355 The ``revcount`` query string argument defines the maximum numbers of
356 changesets to render.
356 changesets to render.
357
357
358 For non-searches, the ``changelog`` template will be rendered.
358 For non-searches, the ``changelog`` template will be rendered.
359 """
359 """
360
360
361 query = ''
361 query = ''
362 if 'node' in req.form:
362 if 'node' in req.form:
363 ctx = webutil.changectx(web.repo, req)
363 ctx = webutil.changectx(web.repo, req)
364 symrev = webutil.symrevorshortnode(req, ctx)
364 symrev = webutil.symrevorshortnode(req, ctx)
365 elif 'rev' in req.form:
365 elif 'rev' in req.form:
366 return _search(web, req, tmpl)
366 return _search(web, req, tmpl)
367 else:
367 else:
368 ctx = web.repo['tip']
368 ctx = web.repo['tip']
369 symrev = 'tip'
369 symrev = 'tip'
370
370
371 def changelist():
371 def changelist():
372 revs = []
372 revs = []
373 if pos != -1:
373 if pos != -1:
374 revs = web.repo.changelog.revs(pos, 0)
374 revs = web.repo.changelog.revs(pos, 0)
375 curcount = 0
375 curcount = 0
376 for rev in revs:
376 for rev in revs:
377 curcount += 1
377 curcount += 1
378 if curcount > revcount + 1:
378 if curcount > revcount + 1:
379 break
379 break
380
380
381 entry = webutil.changelistentry(web, web.repo[rev], tmpl)
381 entry = webutil.changelistentry(web, web.repo[rev], tmpl)
382 entry['parity'] = next(parity)
382 entry['parity'] = next(parity)
383 yield entry
383 yield entry
384
384
385 if shortlog:
385 if shortlog:
386 revcount = web.maxshortchanges
386 revcount = web.maxshortchanges
387 else:
387 else:
388 revcount = web.maxchanges
388 revcount = web.maxchanges
389
389
390 if 'revcount' in req.form:
390 if 'revcount' in req.form:
391 try:
391 try:
392 revcount = int(req.form.get('revcount', [revcount])[0])
392 revcount = int(req.form.get('revcount', [revcount])[0])
393 revcount = max(revcount, 1)
393 revcount = max(revcount, 1)
394 tmpl.defaults['sessionvars']['revcount'] = revcount
394 tmpl.defaults['sessionvars']['revcount'] = revcount
395 except ValueError:
395 except ValueError:
396 pass
396 pass
397
397
398 lessvars = copy.copy(tmpl.defaults['sessionvars'])
398 lessvars = copy.copy(tmpl.defaults['sessionvars'])
399 lessvars['revcount'] = max(revcount / 2, 1)
399 lessvars['revcount'] = max(revcount / 2, 1)
400 morevars = copy.copy(tmpl.defaults['sessionvars'])
400 morevars = copy.copy(tmpl.defaults['sessionvars'])
401 morevars['revcount'] = revcount * 2
401 morevars['revcount'] = revcount * 2
402
402
403 count = len(web.repo)
403 count = len(web.repo)
404 pos = ctx.rev()
404 pos = ctx.rev()
405 parity = paritygen(web.stripecount)
405 parity = paritygen(web.stripecount)
406
406
407 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
407 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
408
408
409 entries = list(changelist())
409 entries = list(changelist())
410 latestentry = entries[:1]
410 latestentry = entries[:1]
411 if len(entries) > revcount:
411 if len(entries) > revcount:
412 nextentry = entries[-1:]
412 nextentry = entries[-1:]
413 entries = entries[:-1]
413 entries = entries[:-1]
414 else:
414 else:
415 nextentry = []
415 nextentry = []
416
416
417 return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav,
417 return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav,
418 node=ctx.hex(), rev=pos, symrev=symrev, changesets=count,
418 node=ctx.hex(), rev=pos, symrev=symrev, changesets=count,
419 entries=entries,
419 entries=entries,
420 latestentry=latestentry, nextentry=nextentry,
420 latestentry=latestentry, nextentry=nextentry,
421 archives=web.archivelist("tip"), revcount=revcount,
421 archives=web.archivelist("tip"), revcount=revcount,
422 morevars=morevars, lessvars=lessvars, query=query)
422 morevars=morevars, lessvars=lessvars, query=query)
423
423
424 @webcommand('shortlog')
424 @webcommand('shortlog')
425 def shortlog(web, req, tmpl):
425 def shortlog(web, req, tmpl):
426 """
426 """
427 /shortlog
427 /shortlog
428 ---------
428 ---------
429
429
430 Show basic information about a set of changesets.
430 Show basic information about a set of changesets.
431
431
432 This accepts the same parameters as the ``changelog`` handler. The only
432 This accepts the same parameters as the ``changelog`` handler. The only
433 difference is the ``shortlog`` template will be rendered instead of the
433 difference is the ``shortlog`` template will be rendered instead of the
434 ``changelog`` template.
434 ``changelog`` template.
435 """
435 """
436 return changelog(web, req, tmpl, shortlog=True)
436 return changelog(web, req, tmpl, shortlog=True)
437
437
438 @webcommand('changeset')
438 @webcommand('changeset')
439 def changeset(web, req, tmpl):
439 def changeset(web, req, tmpl):
440 """
440 """
441 /changeset[/{revision}]
441 /changeset[/{revision}]
442 -----------------------
442 -----------------------
443
443
444 Show information about a single changeset.
444 Show information about a single changeset.
445
445
446 A URL path argument is the changeset identifier to show. See ``hg help
446 A URL path argument is the changeset identifier to show. See ``hg help
447 revisions`` for possible values. If not defined, the ``tip`` changeset
447 revisions`` for possible values. If not defined, the ``tip`` changeset
448 will be shown.
448 will be shown.
449
449
450 The ``changeset`` template is rendered. Contents of the ``changesettag``,
450 The ``changeset`` template is rendered. Contents of the ``changesettag``,
451 ``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many
451 ``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many
452 templates related to diffs may all be used to produce the output.
452 templates related to diffs may all be used to produce the output.
453 """
453 """
454 ctx = webutil.changectx(web.repo, req)
454 ctx = webutil.changectx(web.repo, req)
455
455
456 return tmpl('changeset', **webutil.changesetentry(web, req, tmpl, ctx))
456 return tmpl('changeset', **webutil.changesetentry(web, req, tmpl, ctx))
457
457
458 rev = webcommand('rev')(changeset)
458 rev = webcommand('rev')(changeset)
459
459
460 def decodepath(path):
460 def decodepath(path):
461 """Hook for mapping a path in the repository to a path in the
461 """Hook for mapping a path in the repository to a path in the
462 working copy.
462 working copy.
463
463
464 Extensions (e.g., largefiles) can override this to remap files in
464 Extensions (e.g., largefiles) can override this to remap files in
465 the virtual file system presented by the manifest command below."""
465 the virtual file system presented by the manifest command below."""
466 return path
466 return path
467
467
468 @webcommand('manifest')
468 @webcommand('manifest')
469 def manifest(web, req, tmpl):
469 def manifest(web, req, tmpl):
470 """
470 """
471 /manifest[/{revision}[/{path}]]
471 /manifest[/{revision}[/{path}]]
472 -------------------------------
472 -------------------------------
473
473
474 Show information about a directory.
474 Show information about a directory.
475
475
476 If the URL path arguments are omitted, information about the root
476 If the URL path arguments are omitted, information about the root
477 directory for the ``tip`` changeset will be shown.
477 directory for the ``tip`` changeset will be shown.
478
478
479 Because this handler can only show information for directories, it
479 Because this handler can only show information for directories, it
480 is recommended to use the ``file`` handler instead, as it can handle both
480 is recommended to use the ``file`` handler instead, as it can handle both
481 directories and files.
481 directories and files.
482
482
483 The ``manifest`` template will be rendered for this handler.
483 The ``manifest`` template will be rendered for this handler.
484 """
484 """
485 if 'node' in req.form:
485 if 'node' in req.form:
486 ctx = webutil.changectx(web.repo, req)
486 ctx = webutil.changectx(web.repo, req)
487 symrev = webutil.symrevorshortnode(req, ctx)
487 symrev = webutil.symrevorshortnode(req, ctx)
488 else:
488 else:
489 ctx = web.repo['tip']
489 ctx = web.repo['tip']
490 symrev = 'tip'
490 symrev = 'tip'
491 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
491 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
492 mf = ctx.manifest()
492 mf = ctx.manifest()
493 node = ctx.node()
493 node = ctx.node()
494
494
495 files = {}
495 files = {}
496 dirs = {}
496 dirs = {}
497 parity = paritygen(web.stripecount)
497 parity = paritygen(web.stripecount)
498
498
499 if path and path[-1] != "/":
499 if path and path[-1] != "/":
500 path += "/"
500 path += "/"
501 l = len(path)
501 l = len(path)
502 abspath = "/" + path
502 abspath = "/" + path
503
503
504 for full, n in mf.iteritems():
504 for full, n in mf.iteritems():
505 # the virtual path (working copy path) used for the full
505 # the virtual path (working copy path) used for the full
506 # (repository) path
506 # (repository) path
507 f = decodepath(full)
507 f = decodepath(full)
508
508
509 if f[:l] != path:
509 if f[:l] != path:
510 continue
510 continue
511 remain = f[l:]
511 remain = f[l:]
512 elements = remain.split('/')
512 elements = remain.split('/')
513 if len(elements) == 1:
513 if len(elements) == 1:
514 files[remain] = full
514 files[remain] = full
515 else:
515 else:
516 h = dirs # need to retain ref to dirs (root)
516 h = dirs # need to retain ref to dirs (root)
517 for elem in elements[0:-1]:
517 for elem in elements[0:-1]:
518 if elem not in h:
518 if elem not in h:
519 h[elem] = {}
519 h[elem] = {}
520 h = h[elem]
520 h = h[elem]
521 if len(h) > 1:
521 if len(h) > 1:
522 break
522 break
523 h[None] = None # denotes files present
523 h[None] = None # denotes files present
524
524
525 if mf and not files and not dirs:
525 if mf and not files and not dirs:
526 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
526 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
527
527
528 def filelist(**map):
528 def filelist(**map):
529 for f in sorted(files):
529 for f in sorted(files):
530 full = files[f]
530 full = files[f]
531
531
532 fctx = ctx.filectx(full)
532 fctx = ctx.filectx(full)
533 yield {"file": full,
533 yield {"file": full,
534 "parity": next(parity),
534 "parity": next(parity),
535 "basename": f,
535 "basename": f,
536 "date": fctx.date(),
536 "date": fctx.date(),
537 "size": fctx.size(),
537 "size": fctx.size(),
538 "permissions": mf.flags(full)}
538 "permissions": mf.flags(full)}
539
539
540 def dirlist(**map):
540 def dirlist(**map):
541 for d in sorted(dirs):
541 for d in sorted(dirs):
542
542
543 emptydirs = []
543 emptydirs = []
544 h = dirs[d]
544 h = dirs[d]
545 while isinstance(h, dict) and len(h) == 1:
545 while isinstance(h, dict) and len(h) == 1:
546 k, v = h.items()[0]
546 k, v = h.items()[0]
547 if v:
547 if v:
548 emptydirs.append(k)
548 emptydirs.append(k)
549 h = v
549 h = v
550
550
551 path = "%s%s" % (abspath, d)
551 path = "%s%s" % (abspath, d)
552 yield {"parity": next(parity),
552 yield {"parity": next(parity),
553 "path": path,
553 "path": path,
554 "emptydirs": "/".join(emptydirs),
554 "emptydirs": "/".join(emptydirs),
555 "basename": d}
555 "basename": d}
556
556
557 return tmpl("manifest",
557 return tmpl("manifest",
558 symrev=symrev,
558 symrev=symrev,
559 path=abspath,
559 path=abspath,
560 up=webutil.up(abspath),
560 up=webutil.up(abspath),
561 upparity=next(parity),
561 upparity=next(parity),
562 fentries=filelist,
562 fentries=filelist,
563 dentries=dirlist,
563 dentries=dirlist,
564 archives=web.archivelist(hex(node)),
564 archives=web.archivelist(hex(node)),
565 **webutil.commonentry(web.repo, ctx))
565 **webutil.commonentry(web.repo, ctx))
566
566
567 @webcommand('tags')
567 @webcommand('tags')
568 def tags(web, req, tmpl):
568 def tags(web, req, tmpl):
569 """
569 """
570 /tags
570 /tags
571 -----
571 -----
572
572
573 Show information about tags.
573 Show information about tags.
574
574
575 No arguments are accepted.
575 No arguments are accepted.
576
576
577 The ``tags`` template is rendered.
577 The ``tags`` template is rendered.
578 """
578 """
579 i = list(reversed(web.repo.tagslist()))
579 i = list(reversed(web.repo.tagslist()))
580 parity = paritygen(web.stripecount)
580 parity = paritygen(web.stripecount)
581
581
582 def entries(notip, latestonly, **map):
582 def entries(notip, latestonly, **map):
583 t = i
583 t = i
584 if notip:
584 if notip:
585 t = [(k, n) for k, n in i if k != "tip"]
585 t = [(k, n) for k, n in i if k != "tip"]
586 if latestonly:
586 if latestonly:
587 t = t[:1]
587 t = t[:1]
588 for k, n in t:
588 for k, n in t:
589 yield {"parity": next(parity),
589 yield {"parity": next(parity),
590 "tag": k,
590 "tag": k,
591 "date": web.repo[n].date(),
591 "date": web.repo[n].date(),
592 "node": hex(n)}
592 "node": hex(n)}
593
593
594 return tmpl("tags",
594 return tmpl("tags",
595 node=hex(web.repo.changelog.tip()),
595 node=hex(web.repo.changelog.tip()),
596 entries=lambda **x: entries(False, False, **x),
596 entries=lambda **x: entries(False, False, **x),
597 entriesnotip=lambda **x: entries(True, False, **x),
597 entriesnotip=lambda **x: entries(True, False, **x),
598 latestentry=lambda **x: entries(True, True, **x))
598 latestentry=lambda **x: entries(True, True, **x))
599
599
600 @webcommand('bookmarks')
600 @webcommand('bookmarks')
601 def bookmarks(web, req, tmpl):
601 def bookmarks(web, req, tmpl):
602 """
602 """
603 /bookmarks
603 /bookmarks
604 ----------
604 ----------
605
605
606 Show information about bookmarks.
606 Show information about bookmarks.
607
607
608 No arguments are accepted.
608 No arguments are accepted.
609
609
610 The ``bookmarks`` template is rendered.
610 The ``bookmarks`` template is rendered.
611 """
611 """
612 i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
612 i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
613 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
613 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
614 i = sorted(i, key=sortkey, reverse=True)
614 i = sorted(i, key=sortkey, reverse=True)
615 parity = paritygen(web.stripecount)
615 parity = paritygen(web.stripecount)
616
616
617 def entries(latestonly, **map):
617 def entries(latestonly, **map):
618 t = i
618 t = i
619 if latestonly:
619 if latestonly:
620 t = i[:1]
620 t = i[:1]
621 for k, n in t:
621 for k, n in t:
622 yield {"parity": next(parity),
622 yield {"parity": next(parity),
623 "bookmark": k,
623 "bookmark": k,
624 "date": web.repo[n].date(),
624 "date": web.repo[n].date(),
625 "node": hex(n)}
625 "node": hex(n)}
626
626
627 if i:
627 if i:
628 latestrev = i[0][1]
628 latestrev = i[0][1]
629 else:
629 else:
630 latestrev = -1
630 latestrev = -1
631
631
632 return tmpl("bookmarks",
632 return tmpl("bookmarks",
633 node=hex(web.repo.changelog.tip()),
633 node=hex(web.repo.changelog.tip()),
634 lastchange=[{"date": web.repo[latestrev].date()}],
634 lastchange=[{"date": web.repo[latestrev].date()}],
635 entries=lambda **x: entries(latestonly=False, **x),
635 entries=lambda **x: entries(latestonly=False, **x),
636 latestentry=lambda **x: entries(latestonly=True, **x))
636 latestentry=lambda **x: entries(latestonly=True, **x))
637
637
638 @webcommand('branches')
638 @webcommand('branches')
639 def branches(web, req, tmpl):
639 def branches(web, req, tmpl):
640 """
640 """
641 /branches
641 /branches
642 ---------
642 ---------
643
643
644 Show information about branches.
644 Show information about branches.
645
645
646 All known branches are contained in the output, even closed branches.
646 All known branches are contained in the output, even closed branches.
647
647
648 No arguments are accepted.
648 No arguments are accepted.
649
649
650 The ``branches`` template is rendered.
650 The ``branches`` template is rendered.
651 """
651 """
652 entries = webutil.branchentries(web.repo, web.stripecount)
652 entries = webutil.branchentries(web.repo, web.stripecount)
653 latestentry = webutil.branchentries(web.repo, web.stripecount, 1)
653 latestentry = webutil.branchentries(web.repo, web.stripecount, 1)
654 return tmpl('branches', node=hex(web.repo.changelog.tip()),
654 return tmpl('branches', node=hex(web.repo.changelog.tip()),
655 entries=entries, latestentry=latestentry)
655 entries=entries, latestentry=latestentry)
656
656
657 @webcommand('summary')
657 @webcommand('summary')
658 def summary(web, req, tmpl):
658 def summary(web, req, tmpl):
659 """
659 """
660 /summary
660 /summary
661 --------
661 --------
662
662
663 Show a summary of repository state.
663 Show a summary of repository state.
664
664
665 Information about the latest changesets, bookmarks, tags, and branches
665 Information about the latest changesets, bookmarks, tags, and branches
666 is captured by this handler.
666 is captured by this handler.
667
667
668 The ``summary`` template is rendered.
668 The ``summary`` template is rendered.
669 """
669 """
670 i = reversed(web.repo.tagslist())
670 i = reversed(web.repo.tagslist())
671
671
672 def tagentries(**map):
672 def tagentries(**map):
673 parity = paritygen(web.stripecount)
673 parity = paritygen(web.stripecount)
674 count = 0
674 count = 0
675 for k, n in i:
675 for k, n in i:
676 if k == "tip": # skip tip
676 if k == "tip": # skip tip
677 continue
677 continue
678
678
679 count += 1
679 count += 1
680 if count > 10: # limit to 10 tags
680 if count > 10: # limit to 10 tags
681 break
681 break
682
682
683 yield tmpl("tagentry",
683 yield tmpl("tagentry",
684 parity=next(parity),
684 parity=next(parity),
685 tag=k,
685 tag=k,
686 node=hex(n),
686 node=hex(n),
687 date=web.repo[n].date())
687 date=web.repo[n].date())
688
688
689 def bookmarks(**map):
689 def bookmarks(**map):
690 parity = paritygen(web.stripecount)
690 parity = paritygen(web.stripecount)
691 marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
691 marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
692 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
692 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
693 marks = sorted(marks, key=sortkey, reverse=True)
693 marks = sorted(marks, key=sortkey, reverse=True)
694 for k, n in marks[:10]: # limit to 10 bookmarks
694 for k, n in marks[:10]: # limit to 10 bookmarks
695 yield {'parity': next(parity),
695 yield {'parity': next(parity),
696 'bookmark': k,
696 'bookmark': k,
697 'date': web.repo[n].date(),
697 'date': web.repo[n].date(),
698 'node': hex(n)}
698 'node': hex(n)}
699
699
700 def changelist(**map):
700 def changelist(**map):
701 parity = paritygen(web.stripecount, offset=start - end)
701 parity = paritygen(web.stripecount, offset=start - end)
702 l = [] # build a list in forward order for efficiency
702 l = [] # build a list in forward order for efficiency
703 revs = []
703 revs = []
704 if start < end:
704 if start < end:
705 revs = web.repo.changelog.revs(start, end - 1)
705 revs = web.repo.changelog.revs(start, end - 1)
706 for i in revs:
706 for i in revs:
707 ctx = web.repo[i]
707 ctx = web.repo[i]
708
708
709 l.append(tmpl(
709 l.append(tmpl(
710 'shortlogentry',
710 'shortlogentry',
711 parity=next(parity),
711 parity=next(parity),
712 **webutil.commonentry(web.repo, ctx)))
712 **webutil.commonentry(web.repo, ctx)))
713
713
714 for entry in reversed(l):
714 for entry in reversed(l):
715 yield entry
715 yield entry
716
716
717 tip = web.repo['tip']
717 tip = web.repo['tip']
718 count = len(web.repo)
718 count = len(web.repo)
719 start = max(0, count - web.maxchanges)
719 start = max(0, count - web.maxchanges)
720 end = min(count, start + web.maxchanges)
720 end = min(count, start + web.maxchanges)
721
721
722 return tmpl("summary",
722 return tmpl("summary",
723 desc=web.config("web", "description", "unknown"),
723 desc=web.config("web", "description", "unknown"),
724 owner=get_contact(web.config) or "unknown",
724 owner=get_contact(web.config) or "unknown",
725 lastchange=tip.date(),
725 lastchange=tip.date(),
726 tags=tagentries,
726 tags=tagentries,
727 bookmarks=bookmarks,
727 bookmarks=bookmarks,
728 branches=webutil.branchentries(web.repo, web.stripecount, 10),
728 branches=webutil.branchentries(web.repo, web.stripecount, 10),
729 shortlog=changelist,
729 shortlog=changelist,
730 node=tip.hex(),
730 node=tip.hex(),
731 symrev='tip',
731 symrev='tip',
732 archives=web.archivelist("tip"),
732 archives=web.archivelist("tip"),
733 labels=web.configlist('web', 'labels'))
733 labels=web.configlist('web', 'labels'))
734
734
735 @webcommand('filediff')
735 @webcommand('filediff')
736 def filediff(web, req, tmpl):
736 def filediff(web, req, tmpl):
737 """
737 """
738 /diff/{revision}/{path}
738 /diff/{revision}/{path}
739 -----------------------
739 -----------------------
740
740
741 Show how a file changed in a particular commit.
741 Show how a file changed in a particular commit.
742
742
743 The ``filediff`` template is rendered.
743 The ``filediff`` template is rendered.
744
744
745 This handler is registered under both the ``/diff`` and ``/filediff``
745 This handler is registered under both the ``/diff`` and ``/filediff``
746 paths. ``/diff`` is used in modern code.
746 paths. ``/diff`` is used in modern code.
747 """
747 """
748 fctx, ctx = None, None
748 fctx, ctx = None, None
749 try:
749 try:
750 fctx = webutil.filectx(web.repo, req)
750 fctx = webutil.filectx(web.repo, req)
751 except LookupError:
751 except LookupError:
752 ctx = webutil.changectx(web.repo, req)
752 ctx = webutil.changectx(web.repo, req)
753 path = webutil.cleanpath(web.repo, req.form['file'][0])
753 path = webutil.cleanpath(web.repo, req.form['file'][0])
754 if path not in ctx.files():
754 if path not in ctx.files():
755 raise
755 raise
756
756
757 if fctx is not None:
757 if fctx is not None:
758 path = fctx.path()
758 path = fctx.path()
759 ctx = fctx.changectx()
759 ctx = fctx.changectx()
760 basectx = ctx.p1()
760 basectx = ctx.p1()
761
761
762 style = web.config('web', 'style', 'paper')
762 style = web.config('web', 'style', 'paper')
763 if 'style' in req.form:
763 if 'style' in req.form:
764 style = req.form['style'][0]
764 style = req.form['style'][0]
765
765
766 diffs = webutil.diffs(web, tmpl, ctx, basectx, [path], style)
766 diffs = webutil.diffs(web, tmpl, ctx, basectx, [path], style)
767 if fctx is not None:
767 if fctx is not None:
768 rename = webutil.renamelink(fctx)
768 rename = webutil.renamelink(fctx)
769 ctx = fctx
769 ctx = fctx
770 else:
770 else:
771 rename = []
771 rename = []
772 ctx = ctx
772 ctx = ctx
773 return tmpl("filediff",
773 return tmpl("filediff",
774 file=path,
774 file=path,
775 symrev=webutil.symrevorshortnode(req, ctx),
775 symrev=webutil.symrevorshortnode(req, ctx),
776 rename=rename,
776 rename=rename,
777 diff=diffs,
777 diff=diffs,
778 **webutil.commonentry(web.repo, ctx))
778 **webutil.commonentry(web.repo, ctx))
779
779
780 diff = webcommand('diff')(filediff)
780 diff = webcommand('diff')(filediff)
781
781
782 @webcommand('comparison')
782 @webcommand('comparison')
783 def comparison(web, req, tmpl):
783 def comparison(web, req, tmpl):
784 """
784 """
785 /comparison/{revision}/{path}
785 /comparison/{revision}/{path}
786 -----------------------------
786 -----------------------------
787
787
788 Show a comparison between the old and new versions of a file from changes
788 Show a comparison between the old and new versions of a file from changes
789 made on a particular revision.
789 made on a particular revision.
790
790
791 This is similar to the ``diff`` handler. However, this form features
791 This is similar to the ``diff`` handler. However, this form features
792 a split or side-by-side diff rather than a unified diff.
792 a split or side-by-side diff rather than a unified diff.
793
793
794 The ``context`` query string argument can be used to control the lines of
794 The ``context`` query string argument can be used to control the lines of
795 context in the diff.
795 context in the diff.
796
796
797 The ``filecomparison`` template is rendered.
797 The ``filecomparison`` template is rendered.
798 """
798 """
799 ctx = webutil.changectx(web.repo, req)
799 ctx = webutil.changectx(web.repo, req)
800 if 'file' not in req.form:
800 if 'file' not in req.form:
801 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
801 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
802 path = webutil.cleanpath(web.repo, req.form['file'][0])
802 path = webutil.cleanpath(web.repo, req.form['file'][0])
803
803
804 parsecontext = lambda v: v == 'full' and -1 or int(v)
804 parsecontext = lambda v: v == 'full' and -1 or int(v)
805 if 'context' in req.form:
805 if 'context' in req.form:
806 context = parsecontext(req.form['context'][0])
806 context = parsecontext(req.form['context'][0])
807 else:
807 else:
808 context = parsecontext(web.config('web', 'comparisoncontext', '5'))
808 context = parsecontext(web.config('web', 'comparisoncontext', '5'))
809
809
810 def filelines(f):
810 def filelines(f):
811 if f.isbinary():
811 if f.isbinary():
812 mt = mimetypes.guess_type(f.path())[0]
812 mt = mimetypes.guess_type(f.path())[0]
813 if not mt:
813 if not mt:
814 mt = 'application/octet-stream'
814 mt = 'application/octet-stream'
815 return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
815 return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
816 return f.data().splitlines()
816 return f.data().splitlines()
817
817
818 fctx = None
818 fctx = None
819 parent = ctx.p1()
819 parent = ctx.p1()
820 leftrev = parent.rev()
820 leftrev = parent.rev()
821 leftnode = parent.node()
821 leftnode = parent.node()
822 rightrev = ctx.rev()
822 rightrev = ctx.rev()
823 rightnode = ctx.node()
823 rightnode = ctx.node()
824 if path in ctx:
824 if path in ctx:
825 fctx = ctx[path]
825 fctx = ctx[path]
826 rightlines = filelines(fctx)
826 rightlines = filelines(fctx)
827 if path not in parent:
827 if path not in parent:
828 leftlines = ()
828 leftlines = ()
829 else:
829 else:
830 pfctx = parent[path]
830 pfctx = parent[path]
831 leftlines = filelines(pfctx)
831 leftlines = filelines(pfctx)
832 else:
832 else:
833 rightlines = ()
833 rightlines = ()
834 pfctx = ctx.parents()[0][path]
834 pfctx = ctx.parents()[0][path]
835 leftlines = filelines(pfctx)
835 leftlines = filelines(pfctx)
836
836
837 comparison = webutil.compare(tmpl, context, leftlines, rightlines)
837 comparison = webutil.compare(tmpl, context, leftlines, rightlines)
838 if fctx is not None:
838 if fctx is not None:
839 rename = webutil.renamelink(fctx)
839 rename = webutil.renamelink(fctx)
840 ctx = fctx
840 ctx = fctx
841 else:
841 else:
842 rename = []
842 rename = []
843 ctx = ctx
843 ctx = ctx
844 return tmpl('filecomparison',
844 return tmpl('filecomparison',
845 file=path,
845 file=path,
846 symrev=webutil.symrevorshortnode(req, ctx),
846 symrev=webutil.symrevorshortnode(req, ctx),
847 rename=rename,
847 rename=rename,
848 leftrev=leftrev,
848 leftrev=leftrev,
849 leftnode=hex(leftnode),
849 leftnode=hex(leftnode),
850 rightrev=rightrev,
850 rightrev=rightrev,
851 rightnode=hex(rightnode),
851 rightnode=hex(rightnode),
852 comparison=comparison,
852 comparison=comparison,
853 **webutil.commonentry(web.repo, ctx))
853 **webutil.commonentry(web.repo, ctx))
854
854
855 @webcommand('annotate')
855 @webcommand('annotate')
856 def annotate(web, req, tmpl):
856 def annotate(web, req, tmpl):
857 """
857 """
858 /annotate/{revision}/{path}
858 /annotate/{revision}/{path}
859 ---------------------------
859 ---------------------------
860
860
861 Show changeset information for each line in a file.
861 Show changeset information for each line in a file.
862
862
863 The ``fileannotate`` template is rendered.
863 The ``fileannotate`` template is rendered.
864 """
864 """
865 fctx = webutil.filectx(web.repo, req)
865 fctx = webutil.filectx(web.repo, req)
866 f = fctx.path()
866 f = fctx.path()
867 parity = paritygen(web.stripecount)
867 parity = paritygen(web.stripecount)
868
868
869 # parents() is called once per line and several lines likely belong to
869 # parents() is called once per line and several lines likely belong to
870 # same revision. So it is worth caching.
870 # same revision. So it is worth caching.
871 # TODO there are still redundant operations within basefilectx.parents()
871 # TODO there are still redundant operations within basefilectx.parents()
872 # and from the fctx.annotate() call itself that could be cached.
872 # and from the fctx.annotate() call itself that could be cached.
873 parentscache = {}
873 parentscache = {}
874 def parents(f):
874 def parents(f):
875 rev = f.rev()
875 rev = f.rev()
876 if rev not in parentscache:
876 if rev not in parentscache:
877 parentscache[rev] = []
877 parentscache[rev] = []
878 for p in f.parents():
878 for p in f.parents():
879 entry = {
879 entry = {
880 'node': p.hex(),
880 'node': p.hex(),
881 'rev': p.rev(),
881 'rev': p.rev(),
882 }
882 }
883 parentscache[rev].append(entry)
883 parentscache[rev].append(entry)
884
884
885 for p in parentscache[rev]:
885 for p in parentscache[rev]:
886 yield p
886 yield p
887
887
888 def annotate(**map):
888 def annotate(**map):
889 if fctx.isbinary():
889 if fctx.isbinary():
890 mt = (mimetypes.guess_type(fctx.path())[0]
890 mt = (mimetypes.guess_type(fctx.path())[0]
891 or 'application/octet-stream')
891 or 'application/octet-stream')
892 lines = [((fctx.filectx(fctx.filerev()), 1), '(binary:%s)' % mt)]
892 lines = [((fctx.filectx(fctx.filerev()), 1), '(binary:%s)' % mt)]
893 else:
893 else:
894 lines = webutil.annotate(fctx, web.repo.ui)
894 lines = webutil.annotate(fctx, web.repo.ui)
895
895
896 previousrev = None
896 previousrev = None
897 blockparitygen = paritygen(1)
897 blockparitygen = paritygen(1)
898 for lineno, ((f, targetline), l) in enumerate(lines):
898 for lineno, ((f, targetline), l) in enumerate(lines):
899 rev = f.rev()
899 rev = f.rev()
900 if rev != previousrev:
900 if rev != previousrev:
901 blockhead = True
901 blockhead = True
902 blockparity = next(blockparitygen)
902 blockparity = next(blockparitygen)
903 else:
903 else:
904 blockhead = None
904 blockhead = None
905 previousrev = rev
905 previousrev = rev
906 yield {"parity": next(parity),
906 yield {"parity": next(parity),
907 "node": f.hex(),
907 "node": f.hex(),
908 "rev": rev,
908 "rev": rev,
909 "author": f.user(),
909 "author": f.user(),
910 "parents": parents(f),
910 "parents": parents(f),
911 "desc": f.description(),
911 "desc": f.description(),
912 "extra": f.extra(),
912 "extra": f.extra(),
913 "file": f.path(),
913 "file": f.path(),
914 "blockhead": blockhead,
914 "blockhead": blockhead,
915 "blockparity": blockparity,
915 "blockparity": blockparity,
916 "targetline": targetline,
916 "targetline": targetline,
917 "line": l,
917 "line": l,
918 "lineno": lineno + 1,
918 "lineno": lineno + 1,
919 "lineid": "l%d" % (lineno + 1),
919 "lineid": "l%d" % (lineno + 1),
920 "linenumber": "% 6d" % (lineno + 1),
920 "linenumber": "% 6d" % (lineno + 1),
921 "revdate": f.date()}
921 "revdate": f.date()}
922
922
923 return tmpl("fileannotate",
923 return tmpl("fileannotate",
924 file=f,
924 file=f,
925 annotate=annotate,
925 annotate=annotate,
926 path=webutil.up(f),
926 path=webutil.up(f),
927 symrev=webutil.symrevorshortnode(req, fctx),
927 symrev=webutil.symrevorshortnode(req, fctx),
928 rename=webutil.renamelink(fctx),
928 rename=webutil.renamelink(fctx),
929 permissions=fctx.manifest().flags(f),
929 permissions=fctx.manifest().flags(f),
930 **webutil.commonentry(web.repo, fctx))
930 **webutil.commonentry(web.repo, fctx))
931
931
932 @webcommand('filelog')
932 @webcommand('filelog')
933 def filelog(web, req, tmpl):
933 def filelog(web, req, tmpl):
934 """
934 """
935 /filelog/{revision}/{path}
935 /filelog/{revision}/{path}
936 --------------------------
936 --------------------------
937
937
938 Show information about the history of a file in the repository.
938 Show information about the history of a file in the repository.
939
939
940 The ``revcount`` query string argument can be defined to control the
940 The ``revcount`` query string argument can be defined to control the
941 maximum number of entries to show.
941 maximum number of entries to show.
942
942
943 The ``filelog`` template will be rendered.
943 The ``filelog`` template will be rendered.
944 """
944 """
945
945
946 try:
946 try:
947 fctx = webutil.filectx(web.repo, req)
947 fctx = webutil.filectx(web.repo, req)
948 f = fctx.path()
948 f = fctx.path()
949 fl = fctx.filelog()
949 fl = fctx.filelog()
950 except error.LookupError:
950 except error.LookupError:
951 f = webutil.cleanpath(web.repo, req.form['file'][0])
951 f = webutil.cleanpath(web.repo, req.form['file'][0])
952 fl = web.repo.file(f)
952 fl = web.repo.file(f)
953 numrevs = len(fl)
953 numrevs = len(fl)
954 if not numrevs: # file doesn't exist at all
954 if not numrevs: # file doesn't exist at all
955 raise
955 raise
956 rev = webutil.changectx(web.repo, req).rev()
956 rev = webutil.changectx(web.repo, req).rev()
957 first = fl.linkrev(0)
957 first = fl.linkrev(0)
958 if rev < first: # current rev is from before file existed
958 if rev < first: # current rev is from before file existed
959 raise
959 raise
960 frev = numrevs - 1
960 frev = numrevs - 1
961 while fl.linkrev(frev) > rev:
961 while fl.linkrev(frev) > rev:
962 frev -= 1
962 frev -= 1
963 fctx = web.repo.filectx(f, fl.linkrev(frev))
963 fctx = web.repo.filectx(f, fl.linkrev(frev))
964
964
965 revcount = web.maxshortchanges
965 revcount = web.maxshortchanges
966 if 'revcount' in req.form:
966 if 'revcount' in req.form:
967 try:
967 try:
968 revcount = int(req.form.get('revcount', [revcount])[0])
968 revcount = int(req.form.get('revcount', [revcount])[0])
969 revcount = max(revcount, 1)
969 revcount = max(revcount, 1)
970 tmpl.defaults['sessionvars']['revcount'] = revcount
970 tmpl.defaults['sessionvars']['revcount'] = revcount
971 except ValueError:
971 except ValueError:
972 pass
972 pass
973
973
974 lrange = webutil.linerange(req)
974 lrange = webutil.linerange(req)
975
975
976 lessvars = copy.copy(tmpl.defaults['sessionvars'])
976 lessvars = copy.copy(tmpl.defaults['sessionvars'])
977 lessvars['revcount'] = max(revcount / 2, 1)
977 lessvars['revcount'] = max(revcount / 2, 1)
978 morevars = copy.copy(tmpl.defaults['sessionvars'])
978 morevars = copy.copy(tmpl.defaults['sessionvars'])
979 morevars['revcount'] = revcount * 2
979 morevars['revcount'] = revcount * 2
980
980
981 patch = 'patch' in req.form
981 patch = 'patch' in req.form
982 if patch:
982 if patch:
983 lessvars['patch'] = morevars['patch'] = req.form['patch'][0]
983 lessvars['patch'] = morevars['patch'] = req.form['patch'][0]
984 descend = 'descend' in req.form
984 descend = 'descend' in req.form
985 if descend:
985 if descend:
986 lessvars['descend'] = morevars['descend'] = req.form['descend'][0]
986 lessvars['descend'] = morevars['descend'] = req.form['descend'][0]
987
987
988 count = fctx.filerev() + 1
988 count = fctx.filerev() + 1
989 start = max(0, count - revcount) # first rev on this page
989 start = max(0, count - revcount) # first rev on this page
990 end = min(count, start + revcount) # last rev on this page
990 end = min(count, start + revcount) # last rev on this page
991 parity = paritygen(web.stripecount, offset=start - end)
991 parity = paritygen(web.stripecount, offset=start - end)
992
992
993 repo = web.repo
993 repo = web.repo
994 revs = fctx.filelog().revs(start, end - 1)
994 revs = fctx.filelog().revs(start, end - 1)
995 entries = []
995 entries = []
996
996
997 diffstyle = web.config('web', 'style', 'paper')
997 diffstyle = web.config('web', 'style', 'paper')
998 if 'style' in req.form:
998 if 'style' in req.form:
999 diffstyle = req.form['style'][0]
999 diffstyle = req.form['style'][0]
1000
1000
1001 def diff(fctx, linerange=None):
1001 def diff(fctx, linerange=None):
1002 ctx = fctx.changectx()
1002 ctx = fctx.changectx()
1003 basectx = ctx.p1()
1003 basectx = ctx.p1()
1004 path = fctx.path()
1004 path = fctx.path()
1005 return webutil.diffs(web, tmpl, ctx, basectx, [path], diffstyle,
1005 return webutil.diffs(web, tmpl, ctx, basectx, [path], diffstyle,
1006 linerange=linerange,
1006 linerange=linerange,
1007 lineidprefix='%s-' % ctx.hex()[:12])
1007 lineidprefix='%s-' % ctx.hex()[:12])
1008
1008
1009 linerange = None
1009 linerange = None
1010 if lrange is not None:
1010 if lrange is not None:
1011 linerange = webutil.formatlinerange(*lrange)
1011 linerange = webutil.formatlinerange(*lrange)
1012 # deactivate numeric nav links when linerange is specified as this
1012 # deactivate numeric nav links when linerange is specified as this
1013 # would required a dedicated "revnav" class
1013 # would required a dedicated "revnav" class
1014 nav = None
1014 nav = None
1015 if descend:
1015 if descend:
1016 it = context.blockdescendants(fctx, *lrange)
1016 it = dagop.blockdescendants(fctx, *lrange)
1017 else:
1017 else:
1018 it = context.blockancestors(fctx, *lrange)
1018 it = dagop.blockancestors(fctx, *lrange)
1019 for i, (c, lr) in enumerate(it, 1):
1019 for i, (c, lr) in enumerate(it, 1):
1020 diffs = None
1020 diffs = None
1021 if patch:
1021 if patch:
1022 diffs = diff(c, linerange=lr)
1022 diffs = diff(c, linerange=lr)
1023 # follow renames accross filtered (not in range) revisions
1023 # follow renames accross filtered (not in range) revisions
1024 path = c.path()
1024 path = c.path()
1025 entries.append(dict(
1025 entries.append(dict(
1026 parity=next(parity),
1026 parity=next(parity),
1027 filerev=c.rev(),
1027 filerev=c.rev(),
1028 file=path,
1028 file=path,
1029 diff=diffs,
1029 diff=diffs,
1030 linerange=webutil.formatlinerange(*lr),
1030 linerange=webutil.formatlinerange(*lr),
1031 **webutil.commonentry(repo, c)))
1031 **webutil.commonentry(repo, c)))
1032 if i == revcount:
1032 if i == revcount:
1033 break
1033 break
1034 lessvars['linerange'] = webutil.formatlinerange(*lrange)
1034 lessvars['linerange'] = webutil.formatlinerange(*lrange)
1035 morevars['linerange'] = lessvars['linerange']
1035 morevars['linerange'] = lessvars['linerange']
1036 else:
1036 else:
1037 for i in revs:
1037 for i in revs:
1038 iterfctx = fctx.filectx(i)
1038 iterfctx = fctx.filectx(i)
1039 diffs = None
1039 diffs = None
1040 if patch:
1040 if patch:
1041 diffs = diff(iterfctx)
1041 diffs = diff(iterfctx)
1042 entries.append(dict(
1042 entries.append(dict(
1043 parity=next(parity),
1043 parity=next(parity),
1044 filerev=i,
1044 filerev=i,
1045 file=f,
1045 file=f,
1046 diff=diffs,
1046 diff=diffs,
1047 rename=webutil.renamelink(iterfctx),
1047 rename=webutil.renamelink(iterfctx),
1048 **webutil.commonentry(repo, iterfctx)))
1048 **webutil.commonentry(repo, iterfctx)))
1049 entries.reverse()
1049 entries.reverse()
1050 revnav = webutil.filerevnav(web.repo, fctx.path())
1050 revnav = webutil.filerevnav(web.repo, fctx.path())
1051 nav = revnav.gen(end - 1, revcount, count)
1051 nav = revnav.gen(end - 1, revcount, count)
1052
1052
1053 latestentry = entries[:1]
1053 latestentry = entries[:1]
1054
1054
1055 return tmpl("filelog",
1055 return tmpl("filelog",
1056 file=f,
1056 file=f,
1057 nav=nav,
1057 nav=nav,
1058 symrev=webutil.symrevorshortnode(req, fctx),
1058 symrev=webutil.symrevorshortnode(req, fctx),
1059 entries=entries,
1059 entries=entries,
1060 descend=descend,
1060 descend=descend,
1061 patch=patch,
1061 patch=patch,
1062 latestentry=latestentry,
1062 latestentry=latestentry,
1063 linerange=linerange,
1063 linerange=linerange,
1064 revcount=revcount,
1064 revcount=revcount,
1065 morevars=morevars,
1065 morevars=morevars,
1066 lessvars=lessvars,
1066 lessvars=lessvars,
1067 **webutil.commonentry(web.repo, fctx))
1067 **webutil.commonentry(web.repo, fctx))
1068
1068
1069 @webcommand('archive')
1069 @webcommand('archive')
1070 def archive(web, req, tmpl):
1070 def archive(web, req, tmpl):
1071 """
1071 """
1072 /archive/{revision}.{format}[/{path}]
1072 /archive/{revision}.{format}[/{path}]
1073 -------------------------------------
1073 -------------------------------------
1074
1074
1075 Obtain an archive of repository content.
1075 Obtain an archive of repository content.
1076
1076
1077 The content and type of the archive is defined by a URL path parameter.
1077 The content and type of the archive is defined by a URL path parameter.
1078 ``format`` is the file extension of the archive type to be generated. e.g.
1078 ``format`` is the file extension of the archive type to be generated. e.g.
1079 ``zip`` or ``tar.bz2``. Not all archive types may be allowed by your
1079 ``zip`` or ``tar.bz2``. Not all archive types may be allowed by your
1080 server configuration.
1080 server configuration.
1081
1081
1082 The optional ``path`` URL parameter controls content to include in the
1082 The optional ``path`` URL parameter controls content to include in the
1083 archive. If omitted, every file in the specified revision is present in the
1083 archive. If omitted, every file in the specified revision is present in the
1084 archive. If included, only the specified file or contents of the specified
1084 archive. If included, only the specified file or contents of the specified
1085 directory will be included in the archive.
1085 directory will be included in the archive.
1086
1086
1087 No template is used for this handler. Raw, binary content is generated.
1087 No template is used for this handler. Raw, binary content is generated.
1088 """
1088 """
1089
1089
1090 type_ = req.form.get('type', [None])[0]
1090 type_ = req.form.get('type', [None])[0]
1091 allowed = web.configlist("web", "allow_archive")
1091 allowed = web.configlist("web", "allow_archive")
1092 key = req.form['node'][0]
1092 key = req.form['node'][0]
1093
1093
1094 if type_ not in web.archivespecs:
1094 if type_ not in web.archivespecs:
1095 msg = 'Unsupported archive type: %s' % type_
1095 msg = 'Unsupported archive type: %s' % type_
1096 raise ErrorResponse(HTTP_NOT_FOUND, msg)
1096 raise ErrorResponse(HTTP_NOT_FOUND, msg)
1097
1097
1098 if not ((type_ in allowed or
1098 if not ((type_ in allowed or
1099 web.configbool("web", "allow" + type_, False))):
1099 web.configbool("web", "allow" + type_, False))):
1100 msg = 'Archive type not allowed: %s' % type_
1100 msg = 'Archive type not allowed: %s' % type_
1101 raise ErrorResponse(HTTP_FORBIDDEN, msg)
1101 raise ErrorResponse(HTTP_FORBIDDEN, msg)
1102
1102
1103 reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
1103 reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
1104 cnode = web.repo.lookup(key)
1104 cnode = web.repo.lookup(key)
1105 arch_version = key
1105 arch_version = key
1106 if cnode == key or key == 'tip':
1106 if cnode == key or key == 'tip':
1107 arch_version = short(cnode)
1107 arch_version = short(cnode)
1108 name = "%s-%s" % (reponame, arch_version)
1108 name = "%s-%s" % (reponame, arch_version)
1109
1109
1110 ctx = webutil.changectx(web.repo, req)
1110 ctx = webutil.changectx(web.repo, req)
1111 pats = []
1111 pats = []
1112 matchfn = scmutil.match(ctx, [])
1112 matchfn = scmutil.match(ctx, [])
1113 file = req.form.get('file', None)
1113 file = req.form.get('file', None)
1114 if file:
1114 if file:
1115 pats = ['path:' + file[0]]
1115 pats = ['path:' + file[0]]
1116 matchfn = scmutil.match(ctx, pats, default='path')
1116 matchfn = scmutil.match(ctx, pats, default='path')
1117 if pats:
1117 if pats:
1118 files = [f for f in ctx.manifest().keys() if matchfn(f)]
1118 files = [f for f in ctx.manifest().keys() if matchfn(f)]
1119 if not files:
1119 if not files:
1120 raise ErrorResponse(HTTP_NOT_FOUND,
1120 raise ErrorResponse(HTTP_NOT_FOUND,
1121 'file(s) not found: %s' % file[0])
1121 'file(s) not found: %s' % file[0])
1122
1122
1123 mimetype, artype, extension, encoding = web.archivespecs[type_]
1123 mimetype, artype, extension, encoding = web.archivespecs[type_]
1124 headers = [
1124 headers = [
1125 ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
1125 ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
1126 ]
1126 ]
1127 if encoding:
1127 if encoding:
1128 headers.append(('Content-Encoding', encoding))
1128 headers.append(('Content-Encoding', encoding))
1129 req.headers.extend(headers)
1129 req.headers.extend(headers)
1130 req.respond(HTTP_OK, mimetype)
1130 req.respond(HTTP_OK, mimetype)
1131
1131
1132 archival.archive(web.repo, req, cnode, artype, prefix=name,
1132 archival.archive(web.repo, req, cnode, artype, prefix=name,
1133 matchfn=matchfn,
1133 matchfn=matchfn,
1134 subrepos=web.configbool("web", "archivesubrepos"))
1134 subrepos=web.configbool("web", "archivesubrepos"))
1135 return []
1135 return []
1136
1136
1137
1137
1138 @webcommand('static')
1138 @webcommand('static')
1139 def static(web, req, tmpl):
1139 def static(web, req, tmpl):
1140 fname = req.form['file'][0]
1140 fname = req.form['file'][0]
1141 # a repo owner may set web.static in .hg/hgrc to get any file
1141 # a repo owner may set web.static in .hg/hgrc to get any file
1142 # readable by the user running the CGI script
1142 # readable by the user running the CGI script
1143 static = web.config("web", "static", None, untrusted=False)
1143 static = web.config("web", "static", None, untrusted=False)
1144 if not static:
1144 if not static:
1145 tp = web.templatepath or templater.templatepaths()
1145 tp = web.templatepath or templater.templatepaths()
1146 if isinstance(tp, str):
1146 if isinstance(tp, str):
1147 tp = [tp]
1147 tp = [tp]
1148 static = [os.path.join(p, 'static') for p in tp]
1148 static = [os.path.join(p, 'static') for p in tp]
1149 staticfile(static, fname, req)
1149 staticfile(static, fname, req)
1150 return []
1150 return []
1151
1151
1152 @webcommand('graph')
1152 @webcommand('graph')
1153 def graph(web, req, tmpl):
1153 def graph(web, req, tmpl):
1154 """
1154 """
1155 /graph[/{revision}]
1155 /graph[/{revision}]
1156 -------------------
1156 -------------------
1157
1157
1158 Show information about the graphical topology of the repository.
1158 Show information about the graphical topology of the repository.
1159
1159
1160 Information rendered by this handler can be used to create visual
1160 Information rendered by this handler can be used to create visual
1161 representations of repository topology.
1161 representations of repository topology.
1162
1162
1163 The ``revision`` URL parameter controls the starting changeset.
1163 The ``revision`` URL parameter controls the starting changeset.
1164
1164
1165 The ``revcount`` query string argument can define the number of changesets
1165 The ``revcount`` query string argument can define the number of changesets
1166 to show information for.
1166 to show information for.
1167
1167
1168 This handler will render the ``graph`` template.
1168 This handler will render the ``graph`` template.
1169 """
1169 """
1170
1170
1171 if 'node' in req.form:
1171 if 'node' in req.form:
1172 ctx = webutil.changectx(web.repo, req)
1172 ctx = webutil.changectx(web.repo, req)
1173 symrev = webutil.symrevorshortnode(req, ctx)
1173 symrev = webutil.symrevorshortnode(req, ctx)
1174 else:
1174 else:
1175 ctx = web.repo['tip']
1175 ctx = web.repo['tip']
1176 symrev = 'tip'
1176 symrev = 'tip'
1177 rev = ctx.rev()
1177 rev = ctx.rev()
1178
1178
1179 bg_height = 39
1179 bg_height = 39
1180 revcount = web.maxshortchanges
1180 revcount = web.maxshortchanges
1181 if 'revcount' in req.form:
1181 if 'revcount' in req.form:
1182 try:
1182 try:
1183 revcount = int(req.form.get('revcount', [revcount])[0])
1183 revcount = int(req.form.get('revcount', [revcount])[0])
1184 revcount = max(revcount, 1)
1184 revcount = max(revcount, 1)
1185 tmpl.defaults['sessionvars']['revcount'] = revcount
1185 tmpl.defaults['sessionvars']['revcount'] = revcount
1186 except ValueError:
1186 except ValueError:
1187 pass
1187 pass
1188
1188
1189 lessvars = copy.copy(tmpl.defaults['sessionvars'])
1189 lessvars = copy.copy(tmpl.defaults['sessionvars'])
1190 lessvars['revcount'] = max(revcount / 2, 1)
1190 lessvars['revcount'] = max(revcount / 2, 1)
1191 morevars = copy.copy(tmpl.defaults['sessionvars'])
1191 morevars = copy.copy(tmpl.defaults['sessionvars'])
1192 morevars['revcount'] = revcount * 2
1192 morevars['revcount'] = revcount * 2
1193
1193
1194 count = len(web.repo)
1194 count = len(web.repo)
1195 pos = rev
1195 pos = rev
1196
1196
1197 uprev = min(max(0, count - 1), rev + revcount)
1197 uprev = min(max(0, count - 1), rev + revcount)
1198 downrev = max(0, rev - revcount)
1198 downrev = max(0, rev - revcount)
1199 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
1199 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
1200
1200
1201 tree = []
1201 tree = []
1202 if pos != -1:
1202 if pos != -1:
1203 allrevs = web.repo.changelog.revs(pos, 0)
1203 allrevs = web.repo.changelog.revs(pos, 0)
1204 revs = []
1204 revs = []
1205 for i in allrevs:
1205 for i in allrevs:
1206 revs.append(i)
1206 revs.append(i)
1207 if len(revs) >= revcount:
1207 if len(revs) >= revcount:
1208 break
1208 break
1209
1209
1210 # We have to feed a baseset to dagwalker as it is expecting smartset
1210 # We have to feed a baseset to dagwalker as it is expecting smartset
1211 # object. This does not have a big impact on hgweb performance itself
1211 # object. This does not have a big impact on hgweb performance itself
1212 # since hgweb graphing code is not itself lazy yet.
1212 # since hgweb graphing code is not itself lazy yet.
1213 dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
1213 dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
1214 # As we said one line above... not lazy.
1214 # As we said one line above... not lazy.
1215 tree = list(graphmod.colored(dag, web.repo))
1215 tree = list(graphmod.colored(dag, web.repo))
1216
1216
1217 def getcolumns(tree):
1217 def getcolumns(tree):
1218 cols = 0
1218 cols = 0
1219 for (id, type, ctx, vtx, edges) in tree:
1219 for (id, type, ctx, vtx, edges) in tree:
1220 if type != graphmod.CHANGESET:
1220 if type != graphmod.CHANGESET:
1221 continue
1221 continue
1222 cols = max(cols, max([edge[0] for edge in edges] or [0]),
1222 cols = max(cols, max([edge[0] for edge in edges] or [0]),
1223 max([edge[1] for edge in edges] or [0]))
1223 max([edge[1] for edge in edges] or [0]))
1224 return cols
1224 return cols
1225
1225
1226 def graphdata(usetuples, encodestr):
1226 def graphdata(usetuples, encodestr):
1227 data = []
1227 data = []
1228
1228
1229 row = 0
1229 row = 0
1230 for (id, type, ctx, vtx, edges) in tree:
1230 for (id, type, ctx, vtx, edges) in tree:
1231 if type != graphmod.CHANGESET:
1231 if type != graphmod.CHANGESET:
1232 continue
1232 continue
1233 node = str(ctx)
1233 node = str(ctx)
1234 age = encodestr(templatefilters.age(ctx.date()))
1234 age = encodestr(templatefilters.age(ctx.date()))
1235 desc = templatefilters.firstline(encodestr(ctx.description()))
1235 desc = templatefilters.firstline(encodestr(ctx.description()))
1236 desc = cgi.escape(templatefilters.nonempty(desc))
1236 desc = cgi.escape(templatefilters.nonempty(desc))
1237 user = cgi.escape(templatefilters.person(encodestr(ctx.user())))
1237 user = cgi.escape(templatefilters.person(encodestr(ctx.user())))
1238 branch = cgi.escape(encodestr(ctx.branch()))
1238 branch = cgi.escape(encodestr(ctx.branch()))
1239 try:
1239 try:
1240 branchnode = web.repo.branchtip(branch)
1240 branchnode = web.repo.branchtip(branch)
1241 except error.RepoLookupError:
1241 except error.RepoLookupError:
1242 branchnode = None
1242 branchnode = None
1243 branch = branch, branchnode == ctx.node()
1243 branch = branch, branchnode == ctx.node()
1244
1244
1245 if usetuples:
1245 if usetuples:
1246 data.append((node, vtx, edges, desc, user, age, branch,
1246 data.append((node, vtx, edges, desc, user, age, branch,
1247 [cgi.escape(encodestr(x)) for x in ctx.tags()],
1247 [cgi.escape(encodestr(x)) for x in ctx.tags()],
1248 [cgi.escape(encodestr(x))
1248 [cgi.escape(encodestr(x))
1249 for x in ctx.bookmarks()]))
1249 for x in ctx.bookmarks()]))
1250 else:
1250 else:
1251 edgedata = [{'col': edge[0], 'nextcol': edge[1],
1251 edgedata = [{'col': edge[0], 'nextcol': edge[1],
1252 'color': (edge[2] - 1) % 6 + 1,
1252 'color': (edge[2] - 1) % 6 + 1,
1253 'width': edge[3], 'bcolor': edge[4]}
1253 'width': edge[3], 'bcolor': edge[4]}
1254 for edge in edges]
1254 for edge in edges]
1255
1255
1256 data.append(
1256 data.append(
1257 {'node': node,
1257 {'node': node,
1258 'col': vtx[0],
1258 'col': vtx[0],
1259 'color': (vtx[1] - 1) % 6 + 1,
1259 'color': (vtx[1] - 1) % 6 + 1,
1260 'edges': edgedata,
1260 'edges': edgedata,
1261 'row': row,
1261 'row': row,
1262 'nextrow': row + 1,
1262 'nextrow': row + 1,
1263 'desc': desc,
1263 'desc': desc,
1264 'user': user,
1264 'user': user,
1265 'age': age,
1265 'age': age,
1266 'bookmarks': webutil.nodebookmarksdict(
1266 'bookmarks': webutil.nodebookmarksdict(
1267 web.repo, ctx.node()),
1267 web.repo, ctx.node()),
1268 'branches': webutil.nodebranchdict(web.repo, ctx),
1268 'branches': webutil.nodebranchdict(web.repo, ctx),
1269 'inbranch': webutil.nodeinbranch(web.repo, ctx),
1269 'inbranch': webutil.nodeinbranch(web.repo, ctx),
1270 'tags': webutil.nodetagsdict(web.repo, ctx.node())})
1270 'tags': webutil.nodetagsdict(web.repo, ctx.node())})
1271
1271
1272 row += 1
1272 row += 1
1273
1273
1274 return data
1274 return data
1275
1275
1276 cols = getcolumns(tree)
1276 cols = getcolumns(tree)
1277 rows = len(tree)
1277 rows = len(tree)
1278 canvasheight = (rows + 1) * bg_height - 27
1278 canvasheight = (rows + 1) * bg_height - 27
1279
1279
1280 return tmpl('graph', rev=rev, symrev=symrev, revcount=revcount,
1280 return tmpl('graph', rev=rev, symrev=symrev, revcount=revcount,
1281 uprev=uprev,
1281 uprev=uprev,
1282 lessvars=lessvars, morevars=morevars, downrev=downrev,
1282 lessvars=lessvars, morevars=morevars, downrev=downrev,
1283 cols=cols, rows=rows,
1283 cols=cols, rows=rows,
1284 canvaswidth=(cols + 1) * bg_height,
1284 canvaswidth=(cols + 1) * bg_height,
1285 truecanvasheight=rows * bg_height,
1285 truecanvasheight=rows * bg_height,
1286 canvasheight=canvasheight, bg_height=bg_height,
1286 canvasheight=canvasheight, bg_height=bg_height,
1287 # {jsdata} will be passed to |json, so it must be in utf-8
1287 # {jsdata} will be passed to |json, so it must be in utf-8
1288 jsdata=lambda **x: graphdata(True, encoding.fromlocal),
1288 jsdata=lambda **x: graphdata(True, encoding.fromlocal),
1289 nodes=lambda **x: graphdata(False, str),
1289 nodes=lambda **x: graphdata(False, str),
1290 node=ctx.hex(), changenav=changenav)
1290 node=ctx.hex(), changenav=changenav)
1291
1291
1292 def _getdoc(e):
1292 def _getdoc(e):
1293 doc = e[0].__doc__
1293 doc = e[0].__doc__
1294 if doc:
1294 if doc:
1295 doc = _(doc).partition('\n')[0]
1295 doc = _(doc).partition('\n')[0]
1296 else:
1296 else:
1297 doc = _('(no help text available)')
1297 doc = _('(no help text available)')
1298 return doc
1298 return doc
1299
1299
1300 @webcommand('help')
1300 @webcommand('help')
1301 def help(web, req, tmpl):
1301 def help(web, req, tmpl):
1302 """
1302 """
1303 /help[/{topic}]
1303 /help[/{topic}]
1304 ---------------
1304 ---------------
1305
1305
1306 Render help documentation.
1306 Render help documentation.
1307
1307
1308 This web command is roughly equivalent to :hg:`help`. If a ``topic``
1308 This web command is roughly equivalent to :hg:`help`. If a ``topic``
1309 is defined, that help topic will be rendered. If not, an index of
1309 is defined, that help topic will be rendered. If not, an index of
1310 available help topics will be rendered.
1310 available help topics will be rendered.
1311
1311
1312 The ``help`` template will be rendered when requesting help for a topic.
1312 The ``help`` template will be rendered when requesting help for a topic.
1313 ``helptopics`` will be rendered for the index of help topics.
1313 ``helptopics`` will be rendered for the index of help topics.
1314 """
1314 """
1315 from .. import commands, help as helpmod # avoid cycle
1315 from .. import commands, help as helpmod # avoid cycle
1316
1316
1317 topicname = req.form.get('node', [None])[0]
1317 topicname = req.form.get('node', [None])[0]
1318 if not topicname:
1318 if not topicname:
1319 def topics(**map):
1319 def topics(**map):
1320 for entries, summary, _doc in helpmod.helptable:
1320 for entries, summary, _doc in helpmod.helptable:
1321 yield {'topic': entries[0], 'summary': summary}
1321 yield {'topic': entries[0], 'summary': summary}
1322
1322
1323 early, other = [], []
1323 early, other = [], []
1324 primary = lambda s: s.partition('|')[0]
1324 primary = lambda s: s.partition('|')[0]
1325 for c, e in commands.table.iteritems():
1325 for c, e in commands.table.iteritems():
1326 doc = _getdoc(e)
1326 doc = _getdoc(e)
1327 if 'DEPRECATED' in doc or c.startswith('debug'):
1327 if 'DEPRECATED' in doc or c.startswith('debug'):
1328 continue
1328 continue
1329 cmd = primary(c)
1329 cmd = primary(c)
1330 if cmd.startswith('^'):
1330 if cmd.startswith('^'):
1331 early.append((cmd[1:], doc))
1331 early.append((cmd[1:], doc))
1332 else:
1332 else:
1333 other.append((cmd, doc))
1333 other.append((cmd, doc))
1334
1334
1335 early.sort()
1335 early.sort()
1336 other.sort()
1336 other.sort()
1337
1337
1338 def earlycommands(**map):
1338 def earlycommands(**map):
1339 for c, doc in early:
1339 for c, doc in early:
1340 yield {'topic': c, 'summary': doc}
1340 yield {'topic': c, 'summary': doc}
1341
1341
1342 def othercommands(**map):
1342 def othercommands(**map):
1343 for c, doc in other:
1343 for c, doc in other:
1344 yield {'topic': c, 'summary': doc}
1344 yield {'topic': c, 'summary': doc}
1345
1345
1346 return tmpl('helptopics', topics=topics, earlycommands=earlycommands,
1346 return tmpl('helptopics', topics=topics, earlycommands=earlycommands,
1347 othercommands=othercommands, title='Index')
1347 othercommands=othercommands, title='Index')
1348
1348
1349 # Render an index of sub-topics.
1349 # Render an index of sub-topics.
1350 if topicname in helpmod.subtopics:
1350 if topicname in helpmod.subtopics:
1351 topics = []
1351 topics = []
1352 for entries, summary, _doc in helpmod.subtopics[topicname]:
1352 for entries, summary, _doc in helpmod.subtopics[topicname]:
1353 topics.append({
1353 topics.append({
1354 'topic': '%s.%s' % (topicname, entries[0]),
1354 'topic': '%s.%s' % (topicname, entries[0]),
1355 'basename': entries[0],
1355 'basename': entries[0],
1356 'summary': summary,
1356 'summary': summary,
1357 })
1357 })
1358
1358
1359 return tmpl('helptopics', topics=topics, title=topicname,
1359 return tmpl('helptopics', topics=topics, title=topicname,
1360 subindex=True)
1360 subindex=True)
1361
1361
1362 u = webutil.wsgiui.load()
1362 u = webutil.wsgiui.load()
1363 u.verbose = True
1363 u.verbose = True
1364
1364
1365 # Render a page from a sub-topic.
1365 # Render a page from a sub-topic.
1366 if '.' in topicname:
1366 if '.' in topicname:
1367 # TODO implement support for rendering sections, like
1367 # TODO implement support for rendering sections, like
1368 # `hg help` works.
1368 # `hg help` works.
1369 topic, subtopic = topicname.split('.', 1)
1369 topic, subtopic = topicname.split('.', 1)
1370 if topic not in helpmod.subtopics:
1370 if topic not in helpmod.subtopics:
1371 raise ErrorResponse(HTTP_NOT_FOUND)
1371 raise ErrorResponse(HTTP_NOT_FOUND)
1372 else:
1372 else:
1373 topic = topicname
1373 topic = topicname
1374 subtopic = None
1374 subtopic = None
1375
1375
1376 try:
1376 try:
1377 doc = helpmod.help_(u, commands, topic, subtopic=subtopic)
1377 doc = helpmod.help_(u, commands, topic, subtopic=subtopic)
1378 except error.UnknownCommand:
1378 except error.UnknownCommand:
1379 raise ErrorResponse(HTTP_NOT_FOUND)
1379 raise ErrorResponse(HTTP_NOT_FOUND)
1380 return tmpl('help', topic=topicname, doc=doc)
1380 return tmpl('help', topic=topicname, doc=doc)
1381
1381
1382 # tell hggettext to extract docstrings from these functions:
1382 # tell hggettext to extract docstrings from these functions:
1383 i18nfunctions = commands.values()
1383 i18nfunctions = commands.values()
@@ -1,2020 +1,2018
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 dagop,
14 dagop,
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 pathutil,
22 pathutil,
23 phases,
23 phases,
24 registrar,
24 registrar,
25 repoview,
25 repoview,
26 revsetlang,
26 revsetlang,
27 scmutil,
27 scmutil,
28 smartset,
28 smartset,
29 util,
29 util,
30 )
30 )
31
31
32 # helpers for processing parsed tree
32 # helpers for processing parsed tree
33 getsymbol = revsetlang.getsymbol
33 getsymbol = revsetlang.getsymbol
34 getstring = revsetlang.getstring
34 getstring = revsetlang.getstring
35 getinteger = revsetlang.getinteger
35 getinteger = revsetlang.getinteger
36 getboolean = revsetlang.getboolean
36 getboolean = revsetlang.getboolean
37 getlist = revsetlang.getlist
37 getlist = revsetlang.getlist
38 getrange = revsetlang.getrange
38 getrange = revsetlang.getrange
39 getargs = revsetlang.getargs
39 getargs = revsetlang.getargs
40 getargsdict = revsetlang.getargsdict
40 getargsdict = revsetlang.getargsdict
41
41
42 # constants used as an argument of match() and matchany()
42 # constants used as an argument of match() and matchany()
43 anyorder = revsetlang.anyorder
43 anyorder = revsetlang.anyorder
44 defineorder = revsetlang.defineorder
44 defineorder = revsetlang.defineorder
45 followorder = revsetlang.followorder
45 followorder = revsetlang.followorder
46
46
47 baseset = smartset.baseset
47 baseset = smartset.baseset
48 generatorset = smartset.generatorset
48 generatorset = smartset.generatorset
49 spanset = smartset.spanset
49 spanset = smartset.spanset
50 fullreposet = smartset.fullreposet
50 fullreposet = smartset.fullreposet
51
51
52 # helpers
52 # helpers
53
53
54 def getset(repo, subset, x):
54 def getset(repo, subset, x):
55 if not x:
55 if not x:
56 raise error.ParseError(_("missing argument"))
56 raise error.ParseError(_("missing argument"))
57 return methods[x[0]](repo, subset, *x[1:])
57 return methods[x[0]](repo, subset, *x[1:])
58
58
59 def _getrevsource(repo, r):
59 def _getrevsource(repo, r):
60 extra = repo[r].extra()
60 extra = repo[r].extra()
61 for label in ('source', 'transplant_source', 'rebase_source'):
61 for label in ('source', 'transplant_source', 'rebase_source'):
62 if label in extra:
62 if label in extra:
63 try:
63 try:
64 return repo[extra[label]].rev()
64 return repo[extra[label]].rev()
65 except error.RepoLookupError:
65 except error.RepoLookupError:
66 pass
66 pass
67 return None
67 return None
68
68
69 # operator methods
69 # operator methods
70
70
71 def stringset(repo, subset, x):
71 def stringset(repo, subset, x):
72 x = scmutil.intrev(repo[x])
72 x = scmutil.intrev(repo[x])
73 if (x in subset
73 if (x in subset
74 or x == node.nullrev and isinstance(subset, fullreposet)):
74 or x == node.nullrev and isinstance(subset, fullreposet)):
75 return baseset([x])
75 return baseset([x])
76 return baseset()
76 return baseset()
77
77
78 def rangeset(repo, subset, x, y, order):
78 def rangeset(repo, subset, x, y, order):
79 m = getset(repo, fullreposet(repo), x)
79 m = getset(repo, fullreposet(repo), x)
80 n = getset(repo, fullreposet(repo), y)
80 n = getset(repo, fullreposet(repo), y)
81
81
82 if not m or not n:
82 if not m or not n:
83 return baseset()
83 return baseset()
84 return _makerangeset(repo, subset, m.first(), n.last(), order)
84 return _makerangeset(repo, subset, m.first(), n.last(), order)
85
85
86 def rangeall(repo, subset, x, order):
86 def rangeall(repo, subset, x, order):
87 assert x is None
87 assert x is None
88 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
88 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
89
89
90 def rangepre(repo, subset, y, order):
90 def rangepre(repo, subset, y, order):
91 # ':y' can't be rewritten to '0:y' since '0' may be hidden
91 # ':y' can't be rewritten to '0:y' since '0' may be hidden
92 n = getset(repo, fullreposet(repo), y)
92 n = getset(repo, fullreposet(repo), y)
93 if not n:
93 if not n:
94 return baseset()
94 return baseset()
95 return _makerangeset(repo, subset, 0, n.last(), order)
95 return _makerangeset(repo, subset, 0, n.last(), order)
96
96
97 def rangepost(repo, subset, x, order):
97 def rangepost(repo, subset, x, order):
98 m = getset(repo, fullreposet(repo), x)
98 m = getset(repo, fullreposet(repo), x)
99 if not m:
99 if not m:
100 return baseset()
100 return baseset()
101 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
101 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
102
102
103 def _makerangeset(repo, subset, m, n, order):
103 def _makerangeset(repo, subset, m, n, order):
104 if m == n:
104 if m == n:
105 r = baseset([m])
105 r = baseset([m])
106 elif n == node.wdirrev:
106 elif n == node.wdirrev:
107 r = spanset(repo, m, len(repo)) + baseset([n])
107 r = spanset(repo, m, len(repo)) + baseset([n])
108 elif m == node.wdirrev:
108 elif m == node.wdirrev:
109 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
109 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
110 elif m < n:
110 elif m < n:
111 r = spanset(repo, m, n + 1)
111 r = spanset(repo, m, n + 1)
112 else:
112 else:
113 r = spanset(repo, m, n - 1)
113 r = spanset(repo, m, n - 1)
114
114
115 if order == defineorder:
115 if order == defineorder:
116 return r & subset
116 return r & subset
117 else:
117 else:
118 # carrying the sorting over when possible would be more efficient
118 # carrying the sorting over when possible would be more efficient
119 return subset & r
119 return subset & r
120
120
121 def dagrange(repo, subset, x, y, order):
121 def dagrange(repo, subset, x, y, order):
122 r = fullreposet(repo)
122 r = fullreposet(repo)
123 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
123 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
124 includepath=True)
124 includepath=True)
125 return subset & xs
125 return subset & xs
126
126
127 def andset(repo, subset, x, y, order):
127 def andset(repo, subset, x, y, order):
128 return getset(repo, getset(repo, subset, x), y)
128 return getset(repo, getset(repo, subset, x), y)
129
129
130 def differenceset(repo, subset, x, y, order):
130 def differenceset(repo, subset, x, y, order):
131 return getset(repo, subset, x) - getset(repo, subset, y)
131 return getset(repo, subset, x) - getset(repo, subset, y)
132
132
133 def _orsetlist(repo, subset, xs):
133 def _orsetlist(repo, subset, xs):
134 assert xs
134 assert xs
135 if len(xs) == 1:
135 if len(xs) == 1:
136 return getset(repo, subset, xs[0])
136 return getset(repo, subset, xs[0])
137 p = len(xs) // 2
137 p = len(xs) // 2
138 a = _orsetlist(repo, subset, xs[:p])
138 a = _orsetlist(repo, subset, xs[:p])
139 b = _orsetlist(repo, subset, xs[p:])
139 b = _orsetlist(repo, subset, xs[p:])
140 return a + b
140 return a + b
141
141
142 def orset(repo, subset, x, order):
142 def orset(repo, subset, x, order):
143 xs = getlist(x)
143 xs = getlist(x)
144 if order == followorder:
144 if order == followorder:
145 # slow path to take the subset order
145 # slow path to take the subset order
146 return subset & _orsetlist(repo, fullreposet(repo), xs)
146 return subset & _orsetlist(repo, fullreposet(repo), xs)
147 else:
147 else:
148 return _orsetlist(repo, subset, xs)
148 return _orsetlist(repo, subset, xs)
149
149
150 def notset(repo, subset, x, order):
150 def notset(repo, subset, x, order):
151 return subset - getset(repo, subset, x)
151 return subset - getset(repo, subset, x)
152
152
153 def listset(repo, subset, *xs):
153 def listset(repo, subset, *xs):
154 raise error.ParseError(_("can't use a list in this context"),
154 raise error.ParseError(_("can't use a list in this context"),
155 hint=_('see hg help "revsets.x or y"'))
155 hint=_('see hg help "revsets.x or y"'))
156
156
157 def keyvaluepair(repo, subset, k, v):
157 def keyvaluepair(repo, subset, k, v):
158 raise error.ParseError(_("can't use a key-value pair in this context"))
158 raise error.ParseError(_("can't use a key-value pair in this context"))
159
159
160 def func(repo, subset, a, b, order):
160 def func(repo, subset, a, b, order):
161 f = getsymbol(a)
161 f = getsymbol(a)
162 if f in symbols:
162 if f in symbols:
163 func = symbols[f]
163 func = symbols[f]
164 if getattr(func, '_takeorder', False):
164 if getattr(func, '_takeorder', False):
165 return func(repo, subset, b, order)
165 return func(repo, subset, b, order)
166 return func(repo, subset, b)
166 return func(repo, subset, b)
167
167
168 keep = lambda fn: getattr(fn, '__doc__', None) is not None
168 keep = lambda fn: getattr(fn, '__doc__', None) is not None
169
169
170 syms = [s for (s, fn) in symbols.items() if keep(fn)]
170 syms = [s for (s, fn) in symbols.items() if keep(fn)]
171 raise error.UnknownIdentifier(f, syms)
171 raise error.UnknownIdentifier(f, syms)
172
172
173 # functions
173 # functions
174
174
175 # symbols are callables like:
175 # symbols are callables like:
176 # fn(repo, subset, x)
176 # fn(repo, subset, x)
177 # with:
177 # with:
178 # repo - current repository instance
178 # repo - current repository instance
179 # subset - of revisions to be examined
179 # subset - of revisions to be examined
180 # x - argument in tree form
180 # x - argument in tree form
181 symbols = {}
181 symbols = {}
182
182
183 # symbols which can't be used for a DoS attack for any given input
183 # symbols which can't be used for a DoS attack for any given input
184 # (e.g. those which accept regexes as plain strings shouldn't be included)
184 # (e.g. those which accept regexes as plain strings shouldn't be included)
185 # functions that just return a lot of changesets (like all) don't count here
185 # functions that just return a lot of changesets (like all) don't count here
186 safesymbols = set()
186 safesymbols = set()
187
187
188 predicate = registrar.revsetpredicate()
188 predicate = registrar.revsetpredicate()
189
189
190 @predicate('_destupdate')
190 @predicate('_destupdate')
191 def _destupdate(repo, subset, x):
191 def _destupdate(repo, subset, x):
192 # experimental revset for update destination
192 # experimental revset for update destination
193 args = getargsdict(x, 'limit', 'clean')
193 args = getargsdict(x, 'limit', 'clean')
194 return subset & baseset([destutil.destupdate(repo, **args)[0]])
194 return subset & baseset([destutil.destupdate(repo, **args)[0]])
195
195
196 @predicate('_destmerge')
196 @predicate('_destmerge')
197 def _destmerge(repo, subset, x):
197 def _destmerge(repo, subset, x):
198 # experimental revset for merge destination
198 # experimental revset for merge destination
199 sourceset = None
199 sourceset = None
200 if x is not None:
200 if x is not None:
201 sourceset = getset(repo, fullreposet(repo), x)
201 sourceset = getset(repo, fullreposet(repo), x)
202 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
202 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
203
203
204 @predicate('adds(pattern)', safe=True)
204 @predicate('adds(pattern)', safe=True)
205 def adds(repo, subset, x):
205 def adds(repo, subset, x):
206 """Changesets that add a file matching pattern.
206 """Changesets that add a file matching pattern.
207
207
208 The pattern without explicit kind like ``glob:`` is expected to be
208 The pattern without explicit kind like ``glob:`` is expected to be
209 relative to the current directory and match against a file or a
209 relative to the current directory and match against a file or a
210 directory.
210 directory.
211 """
211 """
212 # i18n: "adds" is a keyword
212 # i18n: "adds" is a keyword
213 pat = getstring(x, _("adds requires a pattern"))
213 pat = getstring(x, _("adds requires a pattern"))
214 return checkstatus(repo, subset, pat, 1)
214 return checkstatus(repo, subset, pat, 1)
215
215
216 @predicate('ancestor(*changeset)', safe=True)
216 @predicate('ancestor(*changeset)', safe=True)
217 def ancestor(repo, subset, x):
217 def ancestor(repo, subset, x):
218 """A greatest common ancestor of the changesets.
218 """A greatest common ancestor of the changesets.
219
219
220 Accepts 0 or more changesets.
220 Accepts 0 or more changesets.
221 Will return empty list when passed no args.
221 Will return empty list when passed no args.
222 Greatest common ancestor of a single changeset is that changeset.
222 Greatest common ancestor of a single changeset is that changeset.
223 """
223 """
224 # i18n: "ancestor" is a keyword
224 # i18n: "ancestor" is a keyword
225 l = getlist(x)
225 l = getlist(x)
226 rl = fullreposet(repo)
226 rl = fullreposet(repo)
227 anc = None
227 anc = None
228
228
229 # (getset(repo, rl, i) for i in l) generates a list of lists
229 # (getset(repo, rl, i) for i in l) generates a list of lists
230 for revs in (getset(repo, rl, i) for i in l):
230 for revs in (getset(repo, rl, i) for i in l):
231 for r in revs:
231 for r in revs:
232 if anc is None:
232 if anc is None:
233 anc = repo[r]
233 anc = repo[r]
234 else:
234 else:
235 anc = anc.ancestor(repo[r])
235 anc = anc.ancestor(repo[r])
236
236
237 if anc is not None and anc.rev() in subset:
237 if anc is not None and anc.rev() in subset:
238 return baseset([anc.rev()])
238 return baseset([anc.rev()])
239 return baseset()
239 return baseset()
240
240
241 def _ancestors(repo, subset, x, followfirst=False):
241 def _ancestors(repo, subset, x, followfirst=False):
242 heads = getset(repo, fullreposet(repo), x)
242 heads = getset(repo, fullreposet(repo), x)
243 if not heads:
243 if not heads:
244 return baseset()
244 return baseset()
245 s = dagop.revancestors(repo, heads, followfirst)
245 s = dagop.revancestors(repo, heads, followfirst)
246 return subset & s
246 return subset & s
247
247
248 @predicate('ancestors(set)', safe=True)
248 @predicate('ancestors(set)', safe=True)
249 def ancestors(repo, subset, x):
249 def ancestors(repo, subset, x):
250 """Changesets that are ancestors of a changeset in set.
250 """Changesets that are ancestors of a changeset in set.
251 """
251 """
252 return _ancestors(repo, subset, x)
252 return _ancestors(repo, subset, x)
253
253
254 @predicate('_firstancestors', safe=True)
254 @predicate('_firstancestors', safe=True)
255 def _firstancestors(repo, subset, x):
255 def _firstancestors(repo, subset, x):
256 # ``_firstancestors(set)``
256 # ``_firstancestors(set)``
257 # Like ``ancestors(set)`` but follows only the first parents.
257 # Like ``ancestors(set)`` but follows only the first parents.
258 return _ancestors(repo, subset, x, followfirst=True)
258 return _ancestors(repo, subset, x, followfirst=True)
259
259
260 def _childrenspec(repo, subset, x, n, order):
260 def _childrenspec(repo, subset, x, n, order):
261 """Changesets that are the Nth child of a changeset
261 """Changesets that are the Nth child of a changeset
262 in set.
262 in set.
263 """
263 """
264 cs = set()
264 cs = set()
265 for r in getset(repo, fullreposet(repo), x):
265 for r in getset(repo, fullreposet(repo), x):
266 for i in range(n):
266 for i in range(n):
267 c = repo[r].children()
267 c = repo[r].children()
268 if len(c) == 0:
268 if len(c) == 0:
269 break
269 break
270 if len(c) > 1:
270 if len(c) > 1:
271 raise error.RepoLookupError(
271 raise error.RepoLookupError(
272 _("revision in set has more than one child"))
272 _("revision in set has more than one child"))
273 r = c[0].rev()
273 r = c[0].rev()
274 else:
274 else:
275 cs.add(r)
275 cs.add(r)
276 return subset & cs
276 return subset & cs
277
277
278 def ancestorspec(repo, subset, x, n, order):
278 def ancestorspec(repo, subset, x, n, order):
279 """``set~n``
279 """``set~n``
280 Changesets that are the Nth ancestor (first parents only) of a changeset
280 Changesets that are the Nth ancestor (first parents only) of a changeset
281 in set.
281 in set.
282 """
282 """
283 n = getinteger(n, _("~ expects a number"))
283 n = getinteger(n, _("~ expects a number"))
284 if n < 0:
284 if n < 0:
285 # children lookup
285 # children lookup
286 return _childrenspec(repo, subset, x, -n, order)
286 return _childrenspec(repo, subset, x, -n, order)
287 ps = set()
287 ps = set()
288 cl = repo.changelog
288 cl = repo.changelog
289 for r in getset(repo, fullreposet(repo), x):
289 for r in getset(repo, fullreposet(repo), x):
290 for i in range(n):
290 for i in range(n):
291 try:
291 try:
292 r = cl.parentrevs(r)[0]
292 r = cl.parentrevs(r)[0]
293 except error.WdirUnsupported:
293 except error.WdirUnsupported:
294 r = repo[r].parents()[0].rev()
294 r = repo[r].parents()[0].rev()
295 ps.add(r)
295 ps.add(r)
296 return subset & ps
296 return subset & ps
297
297
298 @predicate('author(string)', safe=True)
298 @predicate('author(string)', safe=True)
299 def author(repo, subset, x):
299 def author(repo, subset, x):
300 """Alias for ``user(string)``.
300 """Alias for ``user(string)``.
301 """
301 """
302 # i18n: "author" is a keyword
302 # i18n: "author" is a keyword
303 n = getstring(x, _("author requires a string"))
303 n = getstring(x, _("author requires a string"))
304 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
304 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
305 return subset.filter(lambda x: matcher(repo[x].user()),
305 return subset.filter(lambda x: matcher(repo[x].user()),
306 condrepr=('<user %r>', n))
306 condrepr=('<user %r>', n))
307
307
308 @predicate('bisect(string)', safe=True)
308 @predicate('bisect(string)', safe=True)
309 def bisect(repo, subset, x):
309 def bisect(repo, subset, x):
310 """Changesets marked in the specified bisect status:
310 """Changesets marked in the specified bisect status:
311
311
312 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
312 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
313 - ``goods``, ``bads`` : csets topologically good/bad
313 - ``goods``, ``bads`` : csets topologically good/bad
314 - ``range`` : csets taking part in the bisection
314 - ``range`` : csets taking part in the bisection
315 - ``pruned`` : csets that are goods, bads or skipped
315 - ``pruned`` : csets that are goods, bads or skipped
316 - ``untested`` : csets whose fate is yet unknown
316 - ``untested`` : csets whose fate is yet unknown
317 - ``ignored`` : csets ignored due to DAG topology
317 - ``ignored`` : csets ignored due to DAG topology
318 - ``current`` : the cset currently being bisected
318 - ``current`` : the cset currently being bisected
319 """
319 """
320 # i18n: "bisect" is a keyword
320 # i18n: "bisect" is a keyword
321 status = getstring(x, _("bisect requires a string")).lower()
321 status = getstring(x, _("bisect requires a string")).lower()
322 state = set(hbisect.get(repo, status))
322 state = set(hbisect.get(repo, status))
323 return subset & state
323 return subset & state
324
324
325 # Backward-compatibility
325 # Backward-compatibility
326 # - no help entry so that we do not advertise it any more
326 # - no help entry so that we do not advertise it any more
327 @predicate('bisected', safe=True)
327 @predicate('bisected', safe=True)
328 def bisected(repo, subset, x):
328 def bisected(repo, subset, x):
329 return bisect(repo, subset, x)
329 return bisect(repo, subset, x)
330
330
331 @predicate('bookmark([name])', safe=True)
331 @predicate('bookmark([name])', safe=True)
332 def bookmark(repo, subset, x):
332 def bookmark(repo, subset, x):
333 """The named bookmark or all bookmarks.
333 """The named bookmark or all bookmarks.
334
334
335 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
335 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
336 """
336 """
337 # i18n: "bookmark" is a keyword
337 # i18n: "bookmark" is a keyword
338 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
338 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
339 if args:
339 if args:
340 bm = getstring(args[0],
340 bm = getstring(args[0],
341 # i18n: "bookmark" is a keyword
341 # i18n: "bookmark" is a keyword
342 _('the argument to bookmark must be a string'))
342 _('the argument to bookmark must be a string'))
343 kind, pattern, matcher = util.stringmatcher(bm)
343 kind, pattern, matcher = util.stringmatcher(bm)
344 bms = set()
344 bms = set()
345 if kind == 'literal':
345 if kind == 'literal':
346 bmrev = repo._bookmarks.get(pattern, None)
346 bmrev = repo._bookmarks.get(pattern, None)
347 if not bmrev:
347 if not bmrev:
348 raise error.RepoLookupError(_("bookmark '%s' does not exist")
348 raise error.RepoLookupError(_("bookmark '%s' does not exist")
349 % pattern)
349 % pattern)
350 bms.add(repo[bmrev].rev())
350 bms.add(repo[bmrev].rev())
351 else:
351 else:
352 matchrevs = set()
352 matchrevs = set()
353 for name, bmrev in repo._bookmarks.iteritems():
353 for name, bmrev in repo._bookmarks.iteritems():
354 if matcher(name):
354 if matcher(name):
355 matchrevs.add(bmrev)
355 matchrevs.add(bmrev)
356 if not matchrevs:
356 if not matchrevs:
357 raise error.RepoLookupError(_("no bookmarks exist"
357 raise error.RepoLookupError(_("no bookmarks exist"
358 " that match '%s'") % pattern)
358 " that match '%s'") % pattern)
359 for bmrev in matchrevs:
359 for bmrev in matchrevs:
360 bms.add(repo[bmrev].rev())
360 bms.add(repo[bmrev].rev())
361 else:
361 else:
362 bms = {repo[r].rev() for r in repo._bookmarks.values()}
362 bms = {repo[r].rev() for r in repo._bookmarks.values()}
363 bms -= {node.nullrev}
363 bms -= {node.nullrev}
364 return subset & bms
364 return subset & bms
365
365
366 @predicate('branch(string or set)', safe=True)
366 @predicate('branch(string or set)', safe=True)
367 def branch(repo, subset, x):
367 def branch(repo, subset, x):
368 """
368 """
369 All changesets belonging to the given branch or the branches of the given
369 All changesets belonging to the given branch or the branches of the given
370 changesets.
370 changesets.
371
371
372 Pattern matching is supported for `string`. See
372 Pattern matching is supported for `string`. See
373 :hg:`help revisions.patterns`.
373 :hg:`help revisions.patterns`.
374 """
374 """
375 getbi = repo.revbranchcache().branchinfo
375 getbi = repo.revbranchcache().branchinfo
376 def getbranch(r):
376 def getbranch(r):
377 try:
377 try:
378 return getbi(r)[0]
378 return getbi(r)[0]
379 except error.WdirUnsupported:
379 except error.WdirUnsupported:
380 return repo[r].branch()
380 return repo[r].branch()
381
381
382 try:
382 try:
383 b = getstring(x, '')
383 b = getstring(x, '')
384 except error.ParseError:
384 except error.ParseError:
385 # not a string, but another revspec, e.g. tip()
385 # not a string, but another revspec, e.g. tip()
386 pass
386 pass
387 else:
387 else:
388 kind, pattern, matcher = util.stringmatcher(b)
388 kind, pattern, matcher = util.stringmatcher(b)
389 if kind == 'literal':
389 if kind == 'literal':
390 # note: falls through to the revspec case if no branch with
390 # note: falls through to the revspec case if no branch with
391 # this name exists and pattern kind is not specified explicitly
391 # this name exists and pattern kind is not specified explicitly
392 if pattern in repo.branchmap():
392 if pattern in repo.branchmap():
393 return subset.filter(lambda r: matcher(getbranch(r)),
393 return subset.filter(lambda r: matcher(getbranch(r)),
394 condrepr=('<branch %r>', b))
394 condrepr=('<branch %r>', b))
395 if b.startswith('literal:'):
395 if b.startswith('literal:'):
396 raise error.RepoLookupError(_("branch '%s' does not exist")
396 raise error.RepoLookupError(_("branch '%s' does not exist")
397 % pattern)
397 % pattern)
398 else:
398 else:
399 return subset.filter(lambda r: matcher(getbranch(r)),
399 return subset.filter(lambda r: matcher(getbranch(r)),
400 condrepr=('<branch %r>', b))
400 condrepr=('<branch %r>', b))
401
401
402 s = getset(repo, fullreposet(repo), x)
402 s = getset(repo, fullreposet(repo), x)
403 b = set()
403 b = set()
404 for r in s:
404 for r in s:
405 b.add(getbranch(r))
405 b.add(getbranch(r))
406 c = s.__contains__
406 c = s.__contains__
407 return subset.filter(lambda r: c(r) or getbranch(r) in b,
407 return subset.filter(lambda r: c(r) or getbranch(r) in b,
408 condrepr=lambda: '<branch %r>' % sorted(b))
408 condrepr=lambda: '<branch %r>' % sorted(b))
409
409
410 @predicate('bumped()', safe=True)
410 @predicate('bumped()', safe=True)
411 def bumped(repo, subset, x):
411 def bumped(repo, subset, x):
412 """Mutable changesets marked as successors of public changesets.
412 """Mutable changesets marked as successors of public changesets.
413
413
414 Only non-public and non-obsolete changesets can be `bumped`.
414 Only non-public and non-obsolete changesets can be `bumped`.
415 """
415 """
416 # i18n: "bumped" is a keyword
416 # i18n: "bumped" is a keyword
417 getargs(x, 0, 0, _("bumped takes no arguments"))
417 getargs(x, 0, 0, _("bumped takes no arguments"))
418 bumped = obsmod.getrevs(repo, 'bumped')
418 bumped = obsmod.getrevs(repo, 'bumped')
419 return subset & bumped
419 return subset & bumped
420
420
421 @predicate('bundle()', safe=True)
421 @predicate('bundle()', safe=True)
422 def bundle(repo, subset, x):
422 def bundle(repo, subset, x):
423 """Changesets in the bundle.
423 """Changesets in the bundle.
424
424
425 Bundle must be specified by the -R option."""
425 Bundle must be specified by the -R option."""
426
426
427 try:
427 try:
428 bundlerevs = repo.changelog.bundlerevs
428 bundlerevs = repo.changelog.bundlerevs
429 except AttributeError:
429 except AttributeError:
430 raise error.Abort(_("no bundle provided - specify with -R"))
430 raise error.Abort(_("no bundle provided - specify with -R"))
431 return subset & bundlerevs
431 return subset & bundlerevs
432
432
433 def checkstatus(repo, subset, pat, field):
433 def checkstatus(repo, subset, pat, field):
434 hasset = matchmod.patkind(pat) == 'set'
434 hasset = matchmod.patkind(pat) == 'set'
435
435
436 mcache = [None]
436 mcache = [None]
437 def matches(x):
437 def matches(x):
438 c = repo[x]
438 c = repo[x]
439 if not mcache[0] or hasset:
439 if not mcache[0] or hasset:
440 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
440 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
441 m = mcache[0]
441 m = mcache[0]
442 fname = None
442 fname = None
443 if not m.anypats() and len(m.files()) == 1:
443 if not m.anypats() and len(m.files()) == 1:
444 fname = m.files()[0]
444 fname = m.files()[0]
445 if fname is not None:
445 if fname is not None:
446 if fname not in c.files():
446 if fname not in c.files():
447 return False
447 return False
448 else:
448 else:
449 for f in c.files():
449 for f in c.files():
450 if m(f):
450 if m(f):
451 break
451 break
452 else:
452 else:
453 return False
453 return False
454 files = repo.status(c.p1().node(), c.node())[field]
454 files = repo.status(c.p1().node(), c.node())[field]
455 if fname is not None:
455 if fname is not None:
456 if fname in files:
456 if fname in files:
457 return True
457 return True
458 else:
458 else:
459 for f in files:
459 for f in files:
460 if m(f):
460 if m(f):
461 return True
461 return True
462
462
463 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
463 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
464
464
465 def _children(repo, subset, parentset):
465 def _children(repo, subset, parentset):
466 if not parentset:
466 if not parentset:
467 return baseset()
467 return baseset()
468 cs = set()
468 cs = set()
469 pr = repo.changelog.parentrevs
469 pr = repo.changelog.parentrevs
470 minrev = parentset.min()
470 minrev = parentset.min()
471 nullrev = node.nullrev
471 nullrev = node.nullrev
472 for r in subset:
472 for r in subset:
473 if r <= minrev:
473 if r <= minrev:
474 continue
474 continue
475 p1, p2 = pr(r)
475 p1, p2 = pr(r)
476 if p1 in parentset:
476 if p1 in parentset:
477 cs.add(r)
477 cs.add(r)
478 if p2 != nullrev and p2 in parentset:
478 if p2 != nullrev and p2 in parentset:
479 cs.add(r)
479 cs.add(r)
480 return baseset(cs)
480 return baseset(cs)
481
481
482 @predicate('children(set)', safe=True)
482 @predicate('children(set)', safe=True)
483 def children(repo, subset, x):
483 def children(repo, subset, x):
484 """Child changesets of changesets in set.
484 """Child changesets of changesets in set.
485 """
485 """
486 s = getset(repo, fullreposet(repo), x)
486 s = getset(repo, fullreposet(repo), x)
487 cs = _children(repo, subset, s)
487 cs = _children(repo, subset, s)
488 return subset & cs
488 return subset & cs
489
489
490 @predicate('closed()', safe=True)
490 @predicate('closed()', safe=True)
491 def closed(repo, subset, x):
491 def closed(repo, subset, x):
492 """Changeset is closed.
492 """Changeset is closed.
493 """
493 """
494 # i18n: "closed" is a keyword
494 # i18n: "closed" is a keyword
495 getargs(x, 0, 0, _("closed takes no arguments"))
495 getargs(x, 0, 0, _("closed takes no arguments"))
496 return subset.filter(lambda r: repo[r].closesbranch(),
496 return subset.filter(lambda r: repo[r].closesbranch(),
497 condrepr='<branch closed>')
497 condrepr='<branch closed>')
498
498
499 @predicate('contains(pattern)')
499 @predicate('contains(pattern)')
500 def contains(repo, subset, x):
500 def contains(repo, subset, x):
501 """The revision's manifest contains a file matching pattern (but might not
501 """The revision's manifest contains a file matching pattern (but might not
502 modify it). See :hg:`help patterns` for information about file patterns.
502 modify it). See :hg:`help patterns` for information about file patterns.
503
503
504 The pattern without explicit kind like ``glob:`` is expected to be
504 The pattern without explicit kind like ``glob:`` is expected to be
505 relative to the current directory and match against a file exactly
505 relative to the current directory and match against a file exactly
506 for efficiency.
506 for efficiency.
507 """
507 """
508 # i18n: "contains" is a keyword
508 # i18n: "contains" is a keyword
509 pat = getstring(x, _("contains requires a pattern"))
509 pat = getstring(x, _("contains requires a pattern"))
510
510
511 def matches(x):
511 def matches(x):
512 if not matchmod.patkind(pat):
512 if not matchmod.patkind(pat):
513 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
513 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
514 if pats in repo[x]:
514 if pats in repo[x]:
515 return True
515 return True
516 else:
516 else:
517 c = repo[x]
517 c = repo[x]
518 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
518 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
519 for f in c.manifest():
519 for f in c.manifest():
520 if m(f):
520 if m(f):
521 return True
521 return True
522 return False
522 return False
523
523
524 return subset.filter(matches, condrepr=('<contains %r>', pat))
524 return subset.filter(matches, condrepr=('<contains %r>', pat))
525
525
526 @predicate('converted([id])', safe=True)
526 @predicate('converted([id])', safe=True)
527 def converted(repo, subset, x):
527 def converted(repo, subset, x):
528 """Changesets converted from the given identifier in the old repository if
528 """Changesets converted from the given identifier in the old repository if
529 present, or all converted changesets if no identifier is specified.
529 present, or all converted changesets if no identifier is specified.
530 """
530 """
531
531
532 # There is exactly no chance of resolving the revision, so do a simple
532 # There is exactly no chance of resolving the revision, so do a simple
533 # string compare and hope for the best
533 # string compare and hope for the best
534
534
535 rev = None
535 rev = None
536 # i18n: "converted" is a keyword
536 # i18n: "converted" is a keyword
537 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
537 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
538 if l:
538 if l:
539 # i18n: "converted" is a keyword
539 # i18n: "converted" is a keyword
540 rev = getstring(l[0], _('converted requires a revision'))
540 rev = getstring(l[0], _('converted requires a revision'))
541
541
542 def _matchvalue(r):
542 def _matchvalue(r):
543 source = repo[r].extra().get('convert_revision', None)
543 source = repo[r].extra().get('convert_revision', None)
544 return source is not None and (rev is None or source.startswith(rev))
544 return source is not None and (rev is None or source.startswith(rev))
545
545
546 return subset.filter(lambda r: _matchvalue(r),
546 return subset.filter(lambda r: _matchvalue(r),
547 condrepr=('<converted %r>', rev))
547 condrepr=('<converted %r>', rev))
548
548
549 @predicate('date(interval)', safe=True)
549 @predicate('date(interval)', safe=True)
550 def date(repo, subset, x):
550 def date(repo, subset, x):
551 """Changesets within the interval, see :hg:`help dates`.
551 """Changesets within the interval, see :hg:`help dates`.
552 """
552 """
553 # i18n: "date" is a keyword
553 # i18n: "date" is a keyword
554 ds = getstring(x, _("date requires a string"))
554 ds = getstring(x, _("date requires a string"))
555 dm = util.matchdate(ds)
555 dm = util.matchdate(ds)
556 return subset.filter(lambda x: dm(repo[x].date()[0]),
556 return subset.filter(lambda x: dm(repo[x].date()[0]),
557 condrepr=('<date %r>', ds))
557 condrepr=('<date %r>', ds))
558
558
559 @predicate('desc(string)', safe=True)
559 @predicate('desc(string)', safe=True)
560 def desc(repo, subset, x):
560 def desc(repo, subset, x):
561 """Search commit message for string. The match is case-insensitive.
561 """Search commit message for string. The match is case-insensitive.
562
562
563 Pattern matching is supported for `string`. See
563 Pattern matching is supported for `string`. See
564 :hg:`help revisions.patterns`.
564 :hg:`help revisions.patterns`.
565 """
565 """
566 # i18n: "desc" is a keyword
566 # i18n: "desc" is a keyword
567 ds = getstring(x, _("desc requires a string"))
567 ds = getstring(x, _("desc requires a string"))
568
568
569 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
569 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
570
570
571 return subset.filter(lambda r: matcher(repo[r].description()),
571 return subset.filter(lambda r: matcher(repo[r].description()),
572 condrepr=('<desc %r>', ds))
572 condrepr=('<desc %r>', ds))
573
573
574 def _descendants(repo, subset, x, followfirst=False):
574 def _descendants(repo, subset, x, followfirst=False):
575 roots = getset(repo, fullreposet(repo), x)
575 roots = getset(repo, fullreposet(repo), x)
576 if not roots:
576 if not roots:
577 return baseset()
577 return baseset()
578 s = dagop.revdescendants(repo, roots, followfirst)
578 s = dagop.revdescendants(repo, roots, followfirst)
579
579
580 # Both sets need to be ascending in order to lazily return the union
580 # Both sets need to be ascending in order to lazily return the union
581 # in the correct order.
581 # in the correct order.
582 base = subset & roots
582 base = subset & roots
583 desc = subset & s
583 desc = subset & s
584 result = base + desc
584 result = base + desc
585 if subset.isascending():
585 if subset.isascending():
586 result.sort()
586 result.sort()
587 elif subset.isdescending():
587 elif subset.isdescending():
588 result.sort(reverse=True)
588 result.sort(reverse=True)
589 else:
589 else:
590 result = subset & result
590 result = subset & result
591 return result
591 return result
592
592
593 @predicate('descendants(set)', safe=True)
593 @predicate('descendants(set)', safe=True)
594 def descendants(repo, subset, x):
594 def descendants(repo, subset, x):
595 """Changesets which are descendants of changesets in set.
595 """Changesets which are descendants of changesets in set.
596 """
596 """
597 return _descendants(repo, subset, x)
597 return _descendants(repo, subset, x)
598
598
599 @predicate('_firstdescendants', safe=True)
599 @predicate('_firstdescendants', safe=True)
600 def _firstdescendants(repo, subset, x):
600 def _firstdescendants(repo, subset, x):
601 # ``_firstdescendants(set)``
601 # ``_firstdescendants(set)``
602 # Like ``descendants(set)`` but follows only the first parents.
602 # Like ``descendants(set)`` but follows only the first parents.
603 return _descendants(repo, subset, x, followfirst=True)
603 return _descendants(repo, subset, x, followfirst=True)
604
604
605 @predicate('destination([set])', safe=True)
605 @predicate('destination([set])', safe=True)
606 def destination(repo, subset, x):
606 def destination(repo, subset, x):
607 """Changesets that were created by a graft, transplant or rebase operation,
607 """Changesets that were created by a graft, transplant or rebase operation,
608 with the given revisions specified as the source. Omitting the optional set
608 with the given revisions specified as the source. Omitting the optional set
609 is the same as passing all().
609 is the same as passing all().
610 """
610 """
611 if x is not None:
611 if x is not None:
612 sources = getset(repo, fullreposet(repo), x)
612 sources = getset(repo, fullreposet(repo), x)
613 else:
613 else:
614 sources = fullreposet(repo)
614 sources = fullreposet(repo)
615
615
616 dests = set()
616 dests = set()
617
617
618 # subset contains all of the possible destinations that can be returned, so
618 # subset contains all of the possible destinations that can be returned, so
619 # iterate over them and see if their source(s) were provided in the arg set.
619 # iterate over them and see if their source(s) were provided in the arg set.
620 # Even if the immediate src of r is not in the arg set, src's source (or
620 # Even if the immediate src of r is not in the arg set, src's source (or
621 # further back) may be. Scanning back further than the immediate src allows
621 # further back) may be. Scanning back further than the immediate src allows
622 # transitive transplants and rebases to yield the same results as transitive
622 # transitive transplants and rebases to yield the same results as transitive
623 # grafts.
623 # grafts.
624 for r in subset:
624 for r in subset:
625 src = _getrevsource(repo, r)
625 src = _getrevsource(repo, r)
626 lineage = None
626 lineage = None
627
627
628 while src is not None:
628 while src is not None:
629 if lineage is None:
629 if lineage is None:
630 lineage = list()
630 lineage = list()
631
631
632 lineage.append(r)
632 lineage.append(r)
633
633
634 # The visited lineage is a match if the current source is in the arg
634 # The visited lineage is a match if the current source is in the arg
635 # set. Since every candidate dest is visited by way of iterating
635 # set. Since every candidate dest is visited by way of iterating
636 # subset, any dests further back in the lineage will be tested by a
636 # subset, any dests further back in the lineage will be tested by a
637 # different iteration over subset. Likewise, if the src was already
637 # different iteration over subset. Likewise, if the src was already
638 # selected, the current lineage can be selected without going back
638 # selected, the current lineage can be selected without going back
639 # further.
639 # further.
640 if src in sources or src in dests:
640 if src in sources or src in dests:
641 dests.update(lineage)
641 dests.update(lineage)
642 break
642 break
643
643
644 r = src
644 r = src
645 src = _getrevsource(repo, r)
645 src = _getrevsource(repo, r)
646
646
647 return subset.filter(dests.__contains__,
647 return subset.filter(dests.__contains__,
648 condrepr=lambda: '<destination %r>' % sorted(dests))
648 condrepr=lambda: '<destination %r>' % sorted(dests))
649
649
650 @predicate('divergent()', safe=True)
650 @predicate('divergent()', safe=True)
651 def divergent(repo, subset, x):
651 def divergent(repo, subset, x):
652 """
652 """
653 Final successors of changesets with an alternative set of final successors.
653 Final successors of changesets with an alternative set of final successors.
654 """
654 """
655 # i18n: "divergent" is a keyword
655 # i18n: "divergent" is a keyword
656 getargs(x, 0, 0, _("divergent takes no arguments"))
656 getargs(x, 0, 0, _("divergent takes no arguments"))
657 divergent = obsmod.getrevs(repo, 'divergent')
657 divergent = obsmod.getrevs(repo, 'divergent')
658 return subset & divergent
658 return subset & divergent
659
659
660 @predicate('extinct()', safe=True)
660 @predicate('extinct()', safe=True)
661 def extinct(repo, subset, x):
661 def extinct(repo, subset, x):
662 """Obsolete changesets with obsolete descendants only.
662 """Obsolete changesets with obsolete descendants only.
663 """
663 """
664 # i18n: "extinct" is a keyword
664 # i18n: "extinct" is a keyword
665 getargs(x, 0, 0, _("extinct takes no arguments"))
665 getargs(x, 0, 0, _("extinct takes no arguments"))
666 extincts = obsmod.getrevs(repo, 'extinct')
666 extincts = obsmod.getrevs(repo, 'extinct')
667 return subset & extincts
667 return subset & extincts
668
668
669 @predicate('extra(label, [value])', safe=True)
669 @predicate('extra(label, [value])', safe=True)
670 def extra(repo, subset, x):
670 def extra(repo, subset, x):
671 """Changesets with the given label in the extra metadata, with the given
671 """Changesets with the given label in the extra metadata, with the given
672 optional value.
672 optional value.
673
673
674 Pattern matching is supported for `value`. See
674 Pattern matching is supported for `value`. See
675 :hg:`help revisions.patterns`.
675 :hg:`help revisions.patterns`.
676 """
676 """
677 args = getargsdict(x, 'extra', 'label value')
677 args = getargsdict(x, 'extra', 'label value')
678 if 'label' not in args:
678 if 'label' not in args:
679 # i18n: "extra" is a keyword
679 # i18n: "extra" is a keyword
680 raise error.ParseError(_('extra takes at least 1 argument'))
680 raise error.ParseError(_('extra takes at least 1 argument'))
681 # i18n: "extra" is a keyword
681 # i18n: "extra" is a keyword
682 label = getstring(args['label'], _('first argument to extra must be '
682 label = getstring(args['label'], _('first argument to extra must be '
683 'a string'))
683 'a string'))
684 value = None
684 value = None
685
685
686 if 'value' in args:
686 if 'value' in args:
687 # i18n: "extra" is a keyword
687 # i18n: "extra" is a keyword
688 value = getstring(args['value'], _('second argument to extra must be '
688 value = getstring(args['value'], _('second argument to extra must be '
689 'a string'))
689 'a string'))
690 kind, value, matcher = util.stringmatcher(value)
690 kind, value, matcher = util.stringmatcher(value)
691
691
692 def _matchvalue(r):
692 def _matchvalue(r):
693 extra = repo[r].extra()
693 extra = repo[r].extra()
694 return label in extra and (value is None or matcher(extra[label]))
694 return label in extra and (value is None or matcher(extra[label]))
695
695
696 return subset.filter(lambda r: _matchvalue(r),
696 return subset.filter(lambda r: _matchvalue(r),
697 condrepr=('<extra[%r] %r>', label, value))
697 condrepr=('<extra[%r] %r>', label, value))
698
698
699 @predicate('filelog(pattern)', safe=True)
699 @predicate('filelog(pattern)', safe=True)
700 def filelog(repo, subset, x):
700 def filelog(repo, subset, x):
701 """Changesets connected to the specified filelog.
701 """Changesets connected to the specified filelog.
702
702
703 For performance reasons, visits only revisions mentioned in the file-level
703 For performance reasons, visits only revisions mentioned in the file-level
704 filelog, rather than filtering through all changesets (much faster, but
704 filelog, rather than filtering through all changesets (much faster, but
705 doesn't include deletes or duplicate changes). For a slower, more accurate
705 doesn't include deletes or duplicate changes). For a slower, more accurate
706 result, use ``file()``.
706 result, use ``file()``.
707
707
708 The pattern without explicit kind like ``glob:`` is expected to be
708 The pattern without explicit kind like ``glob:`` is expected to be
709 relative to the current directory and match against a file exactly
709 relative to the current directory and match against a file exactly
710 for efficiency.
710 for efficiency.
711
711
712 If some linkrev points to revisions filtered by the current repoview, we'll
712 If some linkrev points to revisions filtered by the current repoview, we'll
713 work around it to return a non-filtered value.
713 work around it to return a non-filtered value.
714 """
714 """
715
715
716 # i18n: "filelog" is a keyword
716 # i18n: "filelog" is a keyword
717 pat = getstring(x, _("filelog requires a pattern"))
717 pat = getstring(x, _("filelog requires a pattern"))
718 s = set()
718 s = set()
719 cl = repo.changelog
719 cl = repo.changelog
720
720
721 if not matchmod.patkind(pat):
721 if not matchmod.patkind(pat):
722 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
722 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
723 files = [f]
723 files = [f]
724 else:
724 else:
725 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
725 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
726 files = (f for f in repo[None] if m(f))
726 files = (f for f in repo[None] if m(f))
727
727
728 for f in files:
728 for f in files:
729 fl = repo.file(f)
729 fl = repo.file(f)
730 known = {}
730 known = {}
731 scanpos = 0
731 scanpos = 0
732 for fr in list(fl):
732 for fr in list(fl):
733 fn = fl.node(fr)
733 fn = fl.node(fr)
734 if fn in known:
734 if fn in known:
735 s.add(known[fn])
735 s.add(known[fn])
736 continue
736 continue
737
737
738 lr = fl.linkrev(fr)
738 lr = fl.linkrev(fr)
739 if lr in cl:
739 if lr in cl:
740 s.add(lr)
740 s.add(lr)
741 elif scanpos is not None:
741 elif scanpos is not None:
742 # lowest matching changeset is filtered, scan further
742 # lowest matching changeset is filtered, scan further
743 # ahead in changelog
743 # ahead in changelog
744 start = max(lr, scanpos) + 1
744 start = max(lr, scanpos) + 1
745 scanpos = None
745 scanpos = None
746 for r in cl.revs(start):
746 for r in cl.revs(start):
747 # minimize parsing of non-matching entries
747 # minimize parsing of non-matching entries
748 if f in cl.revision(r) and f in cl.readfiles(r):
748 if f in cl.revision(r) and f in cl.readfiles(r):
749 try:
749 try:
750 # try to use manifest delta fastpath
750 # try to use manifest delta fastpath
751 n = repo[r].filenode(f)
751 n = repo[r].filenode(f)
752 if n not in known:
752 if n not in known:
753 if n == fn:
753 if n == fn:
754 s.add(r)
754 s.add(r)
755 scanpos = r
755 scanpos = r
756 break
756 break
757 else:
757 else:
758 known[n] = r
758 known[n] = r
759 except error.ManifestLookupError:
759 except error.ManifestLookupError:
760 # deletion in changelog
760 # deletion in changelog
761 continue
761 continue
762
762
763 return subset & s
763 return subset & s
764
764
765 @predicate('first(set, [n])', safe=True, takeorder=True)
765 @predicate('first(set, [n])', safe=True, takeorder=True)
766 def first(repo, subset, x, order):
766 def first(repo, subset, x, order):
767 """An alias for limit().
767 """An alias for limit().
768 """
768 """
769 return limit(repo, subset, x, order)
769 return limit(repo, subset, x, order)
770
770
771 def _follow(repo, subset, x, name, followfirst=False):
771 def _follow(repo, subset, x, name, followfirst=False):
772 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
772 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
773 "and an optional revset") % name)
773 "and an optional revset") % name)
774 c = repo['.']
774 c = repo['.']
775 if l:
775 if l:
776 x = getstring(l[0], _("%s expected a pattern") % name)
776 x = getstring(l[0], _("%s expected a pattern") % name)
777 rev = None
777 rev = None
778 if len(l) >= 2:
778 if len(l) >= 2:
779 revs = getset(repo, fullreposet(repo), l[1])
779 revs = getset(repo, fullreposet(repo), l[1])
780 if len(revs) != 1:
780 if len(revs) != 1:
781 raise error.RepoLookupError(
781 raise error.RepoLookupError(
782 _("%s expected one starting revision") % name)
782 _("%s expected one starting revision") % name)
783 rev = revs.last()
783 rev = revs.last()
784 c = repo[rev]
784 c = repo[rev]
785 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
785 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
786 ctx=repo[rev], default='path')
786 ctx=repo[rev], default='path')
787
787
788 files = c.manifest().walk(matcher)
788 files = c.manifest().walk(matcher)
789
789
790 s = set()
790 s = set()
791 for fname in files:
791 for fname in files:
792 fctx = c[fname]
792 fctx = c[fname]
793 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
793 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
794 # include the revision responsible for the most recent version
794 # include the revision responsible for the most recent version
795 s.add(fctx.introrev())
795 s.add(fctx.introrev())
796 else:
796 else:
797 s = dagop.revancestors(repo, baseset([c.rev()]), followfirst)
797 s = dagop.revancestors(repo, baseset([c.rev()]), followfirst)
798
798
799 return subset & s
799 return subset & s
800
800
801 @predicate('follow([pattern[, startrev]])', safe=True)
801 @predicate('follow([pattern[, startrev]])', safe=True)
802 def follow(repo, subset, x):
802 def follow(repo, subset, x):
803 """
803 """
804 An alias for ``::.`` (ancestors of the working directory's first parent).
804 An alias for ``::.`` (ancestors of the working directory's first parent).
805 If pattern is specified, the histories of files matching given
805 If pattern is specified, the histories of files matching given
806 pattern in the revision given by startrev are followed, including copies.
806 pattern in the revision given by startrev are followed, including copies.
807 """
807 """
808 return _follow(repo, subset, x, 'follow')
808 return _follow(repo, subset, x, 'follow')
809
809
810 @predicate('_followfirst', safe=True)
810 @predicate('_followfirst', safe=True)
811 def _followfirst(repo, subset, x):
811 def _followfirst(repo, subset, x):
812 # ``followfirst([pattern[, startrev]])``
812 # ``followfirst([pattern[, startrev]])``
813 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
813 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
814 # of every revisions or files revisions.
814 # of every revisions or files revisions.
815 return _follow(repo, subset, x, '_followfirst', followfirst=True)
815 return _follow(repo, subset, x, '_followfirst', followfirst=True)
816
816
817 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
817 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
818 safe=True)
818 safe=True)
819 def followlines(repo, subset, x):
819 def followlines(repo, subset, x):
820 """Changesets modifying `file` in line range ('fromline', 'toline').
820 """Changesets modifying `file` in line range ('fromline', 'toline').
821
821
822 Line range corresponds to 'file' content at 'startrev' and should hence be
822 Line range corresponds to 'file' content at 'startrev' and should hence be
823 consistent with file size. If startrev is not specified, working directory's
823 consistent with file size. If startrev is not specified, working directory's
824 parent is used.
824 parent is used.
825
825
826 By default, ancestors of 'startrev' are returned. If 'descend' is True,
826 By default, ancestors of 'startrev' are returned. If 'descend' is True,
827 descendants of 'startrev' are returned though renames are (currently) not
827 descendants of 'startrev' are returned though renames are (currently) not
828 followed in this direction.
828 followed in this direction.
829 """
829 """
830 from . import context # avoid circular import issues
831
832 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
830 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
833 if len(args['lines']) != 1:
831 if len(args['lines']) != 1:
834 raise error.ParseError(_("followlines requires a line range"))
832 raise error.ParseError(_("followlines requires a line range"))
835
833
836 rev = '.'
834 rev = '.'
837 if 'startrev' in args:
835 if 'startrev' in args:
838 revs = getset(repo, fullreposet(repo), args['startrev'])
836 revs = getset(repo, fullreposet(repo), args['startrev'])
839 if len(revs) != 1:
837 if len(revs) != 1:
840 raise error.ParseError(
838 raise error.ParseError(
841 # i18n: "followlines" is a keyword
839 # i18n: "followlines" is a keyword
842 _("followlines expects exactly one revision"))
840 _("followlines expects exactly one revision"))
843 rev = revs.last()
841 rev = revs.last()
844
842
845 pat = getstring(args['file'], _("followlines requires a pattern"))
843 pat = getstring(args['file'], _("followlines requires a pattern"))
846 if not matchmod.patkind(pat):
844 if not matchmod.patkind(pat):
847 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
845 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
848 else:
846 else:
849 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
847 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
850 files = [f for f in repo[rev] if m(f)]
848 files = [f for f in repo[rev] if m(f)]
851 if len(files) != 1:
849 if len(files) != 1:
852 # i18n: "followlines" is a keyword
850 # i18n: "followlines" is a keyword
853 raise error.ParseError(_("followlines expects exactly one file"))
851 raise error.ParseError(_("followlines expects exactly one file"))
854 fname = files[0]
852 fname = files[0]
855
853
856 # i18n: "followlines" is a keyword
854 # i18n: "followlines" is a keyword
857 lr = getrange(args['lines'][0], _("followlines expects a line range"))
855 lr = getrange(args['lines'][0], _("followlines expects a line range"))
858 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
856 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
859 for a in lr]
857 for a in lr]
860 fromline, toline = util.processlinerange(fromline, toline)
858 fromline, toline = util.processlinerange(fromline, toline)
861
859
862 fctx = repo[rev].filectx(fname)
860 fctx = repo[rev].filectx(fname)
863 descend = False
861 descend = False
864 if 'descend' in args:
862 if 'descend' in args:
865 descend = getboolean(args['descend'],
863 descend = getboolean(args['descend'],
866 # i18n: "descend" is a keyword
864 # i18n: "descend" is a keyword
867 _("descend argument must be a boolean"))
865 _("descend argument must be a boolean"))
868 if descend:
866 if descend:
869 rs = generatorset(
867 rs = generatorset(
870 (c.rev() for c, _linerange
868 (c.rev() for c, _linerange
871 in context.blockdescendants(fctx, fromline, toline)),
869 in dagop.blockdescendants(fctx, fromline, toline)),
872 iterasc=True)
870 iterasc=True)
873 else:
871 else:
874 rs = generatorset(
872 rs = generatorset(
875 (c.rev() for c, _linerange
873 (c.rev() for c, _linerange
876 in context.blockancestors(fctx, fromline, toline)),
874 in dagop.blockancestors(fctx, fromline, toline)),
877 iterasc=False)
875 iterasc=False)
878 return subset & rs
876 return subset & rs
879
877
880 @predicate('all()', safe=True)
878 @predicate('all()', safe=True)
881 def getall(repo, subset, x):
879 def getall(repo, subset, x):
882 """All changesets, the same as ``0:tip``.
880 """All changesets, the same as ``0:tip``.
883 """
881 """
884 # i18n: "all" is a keyword
882 # i18n: "all" is a keyword
885 getargs(x, 0, 0, _("all takes no arguments"))
883 getargs(x, 0, 0, _("all takes no arguments"))
886 return subset & spanset(repo) # drop "null" if any
884 return subset & spanset(repo) # drop "null" if any
887
885
888 @predicate('grep(regex)')
886 @predicate('grep(regex)')
889 def grep(repo, subset, x):
887 def grep(repo, subset, x):
890 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
888 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
891 to ensure special escape characters are handled correctly. Unlike
889 to ensure special escape characters are handled correctly. Unlike
892 ``keyword(string)``, the match is case-sensitive.
890 ``keyword(string)``, the match is case-sensitive.
893 """
891 """
894 try:
892 try:
895 # i18n: "grep" is a keyword
893 # i18n: "grep" is a keyword
896 gr = re.compile(getstring(x, _("grep requires a string")))
894 gr = re.compile(getstring(x, _("grep requires a string")))
897 except re.error as e:
895 except re.error as e:
898 raise error.ParseError(_('invalid match pattern: %s') % e)
896 raise error.ParseError(_('invalid match pattern: %s') % e)
899
897
900 def matches(x):
898 def matches(x):
901 c = repo[x]
899 c = repo[x]
902 for e in c.files() + [c.user(), c.description()]:
900 for e in c.files() + [c.user(), c.description()]:
903 if gr.search(e):
901 if gr.search(e):
904 return True
902 return True
905 return False
903 return False
906
904
907 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
905 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
908
906
909 @predicate('_matchfiles', safe=True)
907 @predicate('_matchfiles', safe=True)
910 def _matchfiles(repo, subset, x):
908 def _matchfiles(repo, subset, x):
911 # _matchfiles takes a revset list of prefixed arguments:
909 # _matchfiles takes a revset list of prefixed arguments:
912 #
910 #
913 # [p:foo, i:bar, x:baz]
911 # [p:foo, i:bar, x:baz]
914 #
912 #
915 # builds a match object from them and filters subset. Allowed
913 # builds a match object from them and filters subset. Allowed
916 # prefixes are 'p:' for regular patterns, 'i:' for include
914 # prefixes are 'p:' for regular patterns, 'i:' for include
917 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
915 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
918 # a revision identifier, or the empty string to reference the
916 # a revision identifier, or the empty string to reference the
919 # working directory, from which the match object is
917 # working directory, from which the match object is
920 # initialized. Use 'd:' to set the default matching mode, default
918 # initialized. Use 'd:' to set the default matching mode, default
921 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
919 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
922
920
923 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
921 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
924 pats, inc, exc = [], [], []
922 pats, inc, exc = [], [], []
925 rev, default = None, None
923 rev, default = None, None
926 for arg in l:
924 for arg in l:
927 s = getstring(arg, "_matchfiles requires string arguments")
925 s = getstring(arg, "_matchfiles requires string arguments")
928 prefix, value = s[:2], s[2:]
926 prefix, value = s[:2], s[2:]
929 if prefix == 'p:':
927 if prefix == 'p:':
930 pats.append(value)
928 pats.append(value)
931 elif prefix == 'i:':
929 elif prefix == 'i:':
932 inc.append(value)
930 inc.append(value)
933 elif prefix == 'x:':
931 elif prefix == 'x:':
934 exc.append(value)
932 exc.append(value)
935 elif prefix == 'r:':
933 elif prefix == 'r:':
936 if rev is not None:
934 if rev is not None:
937 raise error.ParseError('_matchfiles expected at most one '
935 raise error.ParseError('_matchfiles expected at most one '
938 'revision')
936 'revision')
939 if value != '': # empty means working directory; leave rev as None
937 if value != '': # empty means working directory; leave rev as None
940 rev = value
938 rev = value
941 elif prefix == 'd:':
939 elif prefix == 'd:':
942 if default is not None:
940 if default is not None:
943 raise error.ParseError('_matchfiles expected at most one '
941 raise error.ParseError('_matchfiles expected at most one '
944 'default mode')
942 'default mode')
945 default = value
943 default = value
946 else:
944 else:
947 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
945 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
948 if not default:
946 if not default:
949 default = 'glob'
947 default = 'glob'
950
948
951 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
949 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
952 exclude=exc, ctx=repo[rev], default=default)
950 exclude=exc, ctx=repo[rev], default=default)
953
951
954 # This directly read the changelog data as creating changectx for all
952 # This directly read the changelog data as creating changectx for all
955 # revisions is quite expensive.
953 # revisions is quite expensive.
956 getfiles = repo.changelog.readfiles
954 getfiles = repo.changelog.readfiles
957 wdirrev = node.wdirrev
955 wdirrev = node.wdirrev
958 def matches(x):
956 def matches(x):
959 if x == wdirrev:
957 if x == wdirrev:
960 files = repo[x].files()
958 files = repo[x].files()
961 else:
959 else:
962 files = getfiles(x)
960 files = getfiles(x)
963 for f in files:
961 for f in files:
964 if m(f):
962 if m(f):
965 return True
963 return True
966 return False
964 return False
967
965
968 return subset.filter(matches,
966 return subset.filter(matches,
969 condrepr=('<matchfiles patterns=%r, include=%r '
967 condrepr=('<matchfiles patterns=%r, include=%r '
970 'exclude=%r, default=%r, rev=%r>',
968 'exclude=%r, default=%r, rev=%r>',
971 pats, inc, exc, default, rev))
969 pats, inc, exc, default, rev))
972
970
973 @predicate('file(pattern)', safe=True)
971 @predicate('file(pattern)', safe=True)
974 def hasfile(repo, subset, x):
972 def hasfile(repo, subset, x):
975 """Changesets affecting files matched by pattern.
973 """Changesets affecting files matched by pattern.
976
974
977 For a faster but less accurate result, consider using ``filelog()``
975 For a faster but less accurate result, consider using ``filelog()``
978 instead.
976 instead.
979
977
980 This predicate uses ``glob:`` as the default kind of pattern.
978 This predicate uses ``glob:`` as the default kind of pattern.
981 """
979 """
982 # i18n: "file" is a keyword
980 # i18n: "file" is a keyword
983 pat = getstring(x, _("file requires a pattern"))
981 pat = getstring(x, _("file requires a pattern"))
984 return _matchfiles(repo, subset, ('string', 'p:' + pat))
982 return _matchfiles(repo, subset, ('string', 'p:' + pat))
985
983
986 @predicate('head()', safe=True)
984 @predicate('head()', safe=True)
987 def head(repo, subset, x):
985 def head(repo, subset, x):
988 """Changeset is a named branch head.
986 """Changeset is a named branch head.
989 """
987 """
990 # i18n: "head" is a keyword
988 # i18n: "head" is a keyword
991 getargs(x, 0, 0, _("head takes no arguments"))
989 getargs(x, 0, 0, _("head takes no arguments"))
992 hs = set()
990 hs = set()
993 cl = repo.changelog
991 cl = repo.changelog
994 for ls in repo.branchmap().itervalues():
992 for ls in repo.branchmap().itervalues():
995 hs.update(cl.rev(h) for h in ls)
993 hs.update(cl.rev(h) for h in ls)
996 return subset & baseset(hs)
994 return subset & baseset(hs)
997
995
998 @predicate('heads(set)', safe=True)
996 @predicate('heads(set)', safe=True)
999 def heads(repo, subset, x):
997 def heads(repo, subset, x):
1000 """Members of set with no children in set.
998 """Members of set with no children in set.
1001 """
999 """
1002 s = getset(repo, subset, x)
1000 s = getset(repo, subset, x)
1003 ps = parents(repo, subset, x)
1001 ps = parents(repo, subset, x)
1004 return s - ps
1002 return s - ps
1005
1003
1006 @predicate('hidden()', safe=True)
1004 @predicate('hidden()', safe=True)
1007 def hidden(repo, subset, x):
1005 def hidden(repo, subset, x):
1008 """Hidden changesets.
1006 """Hidden changesets.
1009 """
1007 """
1010 # i18n: "hidden" is a keyword
1008 # i18n: "hidden" is a keyword
1011 getargs(x, 0, 0, _("hidden takes no arguments"))
1009 getargs(x, 0, 0, _("hidden takes no arguments"))
1012 hiddenrevs = repoview.filterrevs(repo, 'visible')
1010 hiddenrevs = repoview.filterrevs(repo, 'visible')
1013 return subset & hiddenrevs
1011 return subset & hiddenrevs
1014
1012
1015 @predicate('keyword(string)', safe=True)
1013 @predicate('keyword(string)', safe=True)
1016 def keyword(repo, subset, x):
1014 def keyword(repo, subset, x):
1017 """Search commit message, user name, and names of changed files for
1015 """Search commit message, user name, and names of changed files for
1018 string. The match is case-insensitive.
1016 string. The match is case-insensitive.
1019
1017
1020 For a regular expression or case sensitive search of these fields, use
1018 For a regular expression or case sensitive search of these fields, use
1021 ``grep(regex)``.
1019 ``grep(regex)``.
1022 """
1020 """
1023 # i18n: "keyword" is a keyword
1021 # i18n: "keyword" is a keyword
1024 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1022 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1025
1023
1026 def matches(r):
1024 def matches(r):
1027 c = repo[r]
1025 c = repo[r]
1028 return any(kw in encoding.lower(t)
1026 return any(kw in encoding.lower(t)
1029 for t in c.files() + [c.user(), c.description()])
1027 for t in c.files() + [c.user(), c.description()])
1030
1028
1031 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1029 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1032
1030
1033 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True)
1031 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True)
1034 def limit(repo, subset, x, order):
1032 def limit(repo, subset, x, order):
1035 """First n members of set, defaulting to 1, starting from offset.
1033 """First n members of set, defaulting to 1, starting from offset.
1036 """
1034 """
1037 args = getargsdict(x, 'limit', 'set n offset')
1035 args = getargsdict(x, 'limit', 'set n offset')
1038 if 'set' not in args:
1036 if 'set' not in args:
1039 # i18n: "limit" is a keyword
1037 # i18n: "limit" is a keyword
1040 raise error.ParseError(_("limit requires one to three arguments"))
1038 raise error.ParseError(_("limit requires one to three arguments"))
1041 # i18n: "limit" is a keyword
1039 # i18n: "limit" is a keyword
1042 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1040 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1043 if lim < 0:
1041 if lim < 0:
1044 raise error.ParseError(_("negative number to select"))
1042 raise error.ParseError(_("negative number to select"))
1045 # i18n: "limit" is a keyword
1043 # i18n: "limit" is a keyword
1046 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1044 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1047 if ofs < 0:
1045 if ofs < 0:
1048 raise error.ParseError(_("negative offset"))
1046 raise error.ParseError(_("negative offset"))
1049 os = getset(repo, fullreposet(repo), args['set'])
1047 os = getset(repo, fullreposet(repo), args['set'])
1050 ls = os.slice(ofs, ofs + lim)
1048 ls = os.slice(ofs, ofs + lim)
1051 if order == followorder and lim > 1:
1049 if order == followorder and lim > 1:
1052 return subset & ls
1050 return subset & ls
1053 return ls & subset
1051 return ls & subset
1054
1052
1055 @predicate('last(set, [n])', safe=True, takeorder=True)
1053 @predicate('last(set, [n])', safe=True, takeorder=True)
1056 def last(repo, subset, x, order):
1054 def last(repo, subset, x, order):
1057 """Last n members of set, defaulting to 1.
1055 """Last n members of set, defaulting to 1.
1058 """
1056 """
1059 # i18n: "last" is a keyword
1057 # i18n: "last" is a keyword
1060 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1058 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1061 lim = 1
1059 lim = 1
1062 if len(l) == 2:
1060 if len(l) == 2:
1063 # i18n: "last" is a keyword
1061 # i18n: "last" is a keyword
1064 lim = getinteger(l[1], _("last expects a number"))
1062 lim = getinteger(l[1], _("last expects a number"))
1065 if lim < 0:
1063 if lim < 0:
1066 raise error.ParseError(_("negative number to select"))
1064 raise error.ParseError(_("negative number to select"))
1067 os = getset(repo, fullreposet(repo), l[0])
1065 os = getset(repo, fullreposet(repo), l[0])
1068 os.reverse()
1066 os.reverse()
1069 ls = os.slice(0, lim)
1067 ls = os.slice(0, lim)
1070 if order == followorder and lim > 1:
1068 if order == followorder and lim > 1:
1071 return subset & ls
1069 return subset & ls
1072 ls.reverse()
1070 ls.reverse()
1073 return ls & subset
1071 return ls & subset
1074
1072
1075 @predicate('max(set)', safe=True)
1073 @predicate('max(set)', safe=True)
1076 def maxrev(repo, subset, x):
1074 def maxrev(repo, subset, x):
1077 """Changeset with highest revision number in set.
1075 """Changeset with highest revision number in set.
1078 """
1076 """
1079 os = getset(repo, fullreposet(repo), x)
1077 os = getset(repo, fullreposet(repo), x)
1080 try:
1078 try:
1081 m = os.max()
1079 m = os.max()
1082 if m in subset:
1080 if m in subset:
1083 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1081 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1084 except ValueError:
1082 except ValueError:
1085 # os.max() throws a ValueError when the collection is empty.
1083 # os.max() throws a ValueError when the collection is empty.
1086 # Same as python's max().
1084 # Same as python's max().
1087 pass
1085 pass
1088 return baseset(datarepr=('<max %r, %r>', subset, os))
1086 return baseset(datarepr=('<max %r, %r>', subset, os))
1089
1087
1090 @predicate('merge()', safe=True)
1088 @predicate('merge()', safe=True)
1091 def merge(repo, subset, x):
1089 def merge(repo, subset, x):
1092 """Changeset is a merge changeset.
1090 """Changeset is a merge changeset.
1093 """
1091 """
1094 # i18n: "merge" is a keyword
1092 # i18n: "merge" is a keyword
1095 getargs(x, 0, 0, _("merge takes no arguments"))
1093 getargs(x, 0, 0, _("merge takes no arguments"))
1096 cl = repo.changelog
1094 cl = repo.changelog
1097 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1095 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1098 condrepr='<merge>')
1096 condrepr='<merge>')
1099
1097
1100 @predicate('branchpoint()', safe=True)
1098 @predicate('branchpoint()', safe=True)
1101 def branchpoint(repo, subset, x):
1099 def branchpoint(repo, subset, x):
1102 """Changesets with more than one child.
1100 """Changesets with more than one child.
1103 """
1101 """
1104 # i18n: "branchpoint" is a keyword
1102 # i18n: "branchpoint" is a keyword
1105 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1103 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1106 cl = repo.changelog
1104 cl = repo.changelog
1107 if not subset:
1105 if not subset:
1108 return baseset()
1106 return baseset()
1109 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1107 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1110 # (and if it is not, it should.)
1108 # (and if it is not, it should.)
1111 baserev = min(subset)
1109 baserev = min(subset)
1112 parentscount = [0]*(len(repo) - baserev)
1110 parentscount = [0]*(len(repo) - baserev)
1113 for r in cl.revs(start=baserev + 1):
1111 for r in cl.revs(start=baserev + 1):
1114 for p in cl.parentrevs(r):
1112 for p in cl.parentrevs(r):
1115 if p >= baserev:
1113 if p >= baserev:
1116 parentscount[p - baserev] += 1
1114 parentscount[p - baserev] += 1
1117 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1115 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1118 condrepr='<branchpoint>')
1116 condrepr='<branchpoint>')
1119
1117
1120 @predicate('min(set)', safe=True)
1118 @predicate('min(set)', safe=True)
1121 def minrev(repo, subset, x):
1119 def minrev(repo, subset, x):
1122 """Changeset with lowest revision number in set.
1120 """Changeset with lowest revision number in set.
1123 """
1121 """
1124 os = getset(repo, fullreposet(repo), x)
1122 os = getset(repo, fullreposet(repo), x)
1125 try:
1123 try:
1126 m = os.min()
1124 m = os.min()
1127 if m in subset:
1125 if m in subset:
1128 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1126 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1129 except ValueError:
1127 except ValueError:
1130 # os.min() throws a ValueError when the collection is empty.
1128 # os.min() throws a ValueError when the collection is empty.
1131 # Same as python's min().
1129 # Same as python's min().
1132 pass
1130 pass
1133 return baseset(datarepr=('<min %r, %r>', subset, os))
1131 return baseset(datarepr=('<min %r, %r>', subset, os))
1134
1132
1135 @predicate('modifies(pattern)', safe=True)
1133 @predicate('modifies(pattern)', safe=True)
1136 def modifies(repo, subset, x):
1134 def modifies(repo, subset, x):
1137 """Changesets modifying files matched by pattern.
1135 """Changesets modifying files matched by pattern.
1138
1136
1139 The pattern without explicit kind like ``glob:`` is expected to be
1137 The pattern without explicit kind like ``glob:`` is expected to be
1140 relative to the current directory and match against a file or a
1138 relative to the current directory and match against a file or a
1141 directory.
1139 directory.
1142 """
1140 """
1143 # i18n: "modifies" is a keyword
1141 # i18n: "modifies" is a keyword
1144 pat = getstring(x, _("modifies requires a pattern"))
1142 pat = getstring(x, _("modifies requires a pattern"))
1145 return checkstatus(repo, subset, pat, 0)
1143 return checkstatus(repo, subset, pat, 0)
1146
1144
1147 @predicate('named(namespace)')
1145 @predicate('named(namespace)')
1148 def named(repo, subset, x):
1146 def named(repo, subset, x):
1149 """The changesets in a given namespace.
1147 """The changesets in a given namespace.
1150
1148
1151 Pattern matching is supported for `namespace`. See
1149 Pattern matching is supported for `namespace`. See
1152 :hg:`help revisions.patterns`.
1150 :hg:`help revisions.patterns`.
1153 """
1151 """
1154 # i18n: "named" is a keyword
1152 # i18n: "named" is a keyword
1155 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1153 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1156
1154
1157 ns = getstring(args[0],
1155 ns = getstring(args[0],
1158 # i18n: "named" is a keyword
1156 # i18n: "named" is a keyword
1159 _('the argument to named must be a string'))
1157 _('the argument to named must be a string'))
1160 kind, pattern, matcher = util.stringmatcher(ns)
1158 kind, pattern, matcher = util.stringmatcher(ns)
1161 namespaces = set()
1159 namespaces = set()
1162 if kind == 'literal':
1160 if kind == 'literal':
1163 if pattern not in repo.names:
1161 if pattern not in repo.names:
1164 raise error.RepoLookupError(_("namespace '%s' does not exist")
1162 raise error.RepoLookupError(_("namespace '%s' does not exist")
1165 % ns)
1163 % ns)
1166 namespaces.add(repo.names[pattern])
1164 namespaces.add(repo.names[pattern])
1167 else:
1165 else:
1168 for name, ns in repo.names.iteritems():
1166 for name, ns in repo.names.iteritems():
1169 if matcher(name):
1167 if matcher(name):
1170 namespaces.add(ns)
1168 namespaces.add(ns)
1171 if not namespaces:
1169 if not namespaces:
1172 raise error.RepoLookupError(_("no namespace exists"
1170 raise error.RepoLookupError(_("no namespace exists"
1173 " that match '%s'") % pattern)
1171 " that match '%s'") % pattern)
1174
1172
1175 names = set()
1173 names = set()
1176 for ns in namespaces:
1174 for ns in namespaces:
1177 for name in ns.listnames(repo):
1175 for name in ns.listnames(repo):
1178 if name not in ns.deprecated:
1176 if name not in ns.deprecated:
1179 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1177 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1180
1178
1181 names -= {node.nullrev}
1179 names -= {node.nullrev}
1182 return subset & names
1180 return subset & names
1183
1181
1184 @predicate('id(string)', safe=True)
1182 @predicate('id(string)', safe=True)
1185 def node_(repo, subset, x):
1183 def node_(repo, subset, x):
1186 """Revision non-ambiguously specified by the given hex string prefix.
1184 """Revision non-ambiguously specified by the given hex string prefix.
1187 """
1185 """
1188 # i18n: "id" is a keyword
1186 # i18n: "id" is a keyword
1189 l = getargs(x, 1, 1, _("id requires one argument"))
1187 l = getargs(x, 1, 1, _("id requires one argument"))
1190 # i18n: "id" is a keyword
1188 # i18n: "id" is a keyword
1191 n = getstring(l[0], _("id requires a string"))
1189 n = getstring(l[0], _("id requires a string"))
1192 if len(n) == 40:
1190 if len(n) == 40:
1193 try:
1191 try:
1194 rn = repo.changelog.rev(node.bin(n))
1192 rn = repo.changelog.rev(node.bin(n))
1195 except error.WdirUnsupported:
1193 except error.WdirUnsupported:
1196 rn = node.wdirrev
1194 rn = node.wdirrev
1197 except (LookupError, TypeError):
1195 except (LookupError, TypeError):
1198 rn = None
1196 rn = None
1199 else:
1197 else:
1200 rn = None
1198 rn = None
1201 try:
1199 try:
1202 pm = repo.changelog._partialmatch(n)
1200 pm = repo.changelog._partialmatch(n)
1203 if pm is not None:
1201 if pm is not None:
1204 rn = repo.changelog.rev(pm)
1202 rn = repo.changelog.rev(pm)
1205 except error.WdirUnsupported:
1203 except error.WdirUnsupported:
1206 rn = node.wdirrev
1204 rn = node.wdirrev
1207
1205
1208 if rn is None:
1206 if rn is None:
1209 return baseset()
1207 return baseset()
1210 result = baseset([rn])
1208 result = baseset([rn])
1211 return result & subset
1209 return result & subset
1212
1210
1213 @predicate('obsolete()', safe=True)
1211 @predicate('obsolete()', safe=True)
1214 def obsolete(repo, subset, x):
1212 def obsolete(repo, subset, x):
1215 """Mutable changeset with a newer version."""
1213 """Mutable changeset with a newer version."""
1216 # i18n: "obsolete" is a keyword
1214 # i18n: "obsolete" is a keyword
1217 getargs(x, 0, 0, _("obsolete takes no arguments"))
1215 getargs(x, 0, 0, _("obsolete takes no arguments"))
1218 obsoletes = obsmod.getrevs(repo, 'obsolete')
1216 obsoletes = obsmod.getrevs(repo, 'obsolete')
1219 return subset & obsoletes
1217 return subset & obsoletes
1220
1218
1221 @predicate('only(set, [set])', safe=True)
1219 @predicate('only(set, [set])', safe=True)
1222 def only(repo, subset, x):
1220 def only(repo, subset, x):
1223 """Changesets that are ancestors of the first set that are not ancestors
1221 """Changesets that are ancestors of the first set that are not ancestors
1224 of any other head in the repo. If a second set is specified, the result
1222 of any other head in the repo. If a second set is specified, the result
1225 is ancestors of the first set that are not ancestors of the second set
1223 is ancestors of the first set that are not ancestors of the second set
1226 (i.e. ::<set1> - ::<set2>).
1224 (i.e. ::<set1> - ::<set2>).
1227 """
1225 """
1228 cl = repo.changelog
1226 cl = repo.changelog
1229 # i18n: "only" is a keyword
1227 # i18n: "only" is a keyword
1230 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1228 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1231 include = getset(repo, fullreposet(repo), args[0])
1229 include = getset(repo, fullreposet(repo), args[0])
1232 if len(args) == 1:
1230 if len(args) == 1:
1233 if not include:
1231 if not include:
1234 return baseset()
1232 return baseset()
1235
1233
1236 descendants = set(dagop.revdescendants(repo, include, False))
1234 descendants = set(dagop.revdescendants(repo, include, False))
1237 exclude = [rev for rev in cl.headrevs()
1235 exclude = [rev for rev in cl.headrevs()
1238 if not rev in descendants and not rev in include]
1236 if not rev in descendants and not rev in include]
1239 else:
1237 else:
1240 exclude = getset(repo, fullreposet(repo), args[1])
1238 exclude = getset(repo, fullreposet(repo), args[1])
1241
1239
1242 results = set(cl.findmissingrevs(common=exclude, heads=include))
1240 results = set(cl.findmissingrevs(common=exclude, heads=include))
1243 # XXX we should turn this into a baseset instead of a set, smartset may do
1241 # XXX we should turn this into a baseset instead of a set, smartset may do
1244 # some optimizations from the fact this is a baseset.
1242 # some optimizations from the fact this is a baseset.
1245 return subset & results
1243 return subset & results
1246
1244
1247 @predicate('origin([set])', safe=True)
1245 @predicate('origin([set])', safe=True)
1248 def origin(repo, subset, x):
1246 def origin(repo, subset, x):
1249 """
1247 """
1250 Changesets that were specified as a source for the grafts, transplants or
1248 Changesets that were specified as a source for the grafts, transplants or
1251 rebases that created the given revisions. Omitting the optional set is the
1249 rebases that created the given revisions. Omitting the optional set is the
1252 same as passing all(). If a changeset created by these operations is itself
1250 same as passing all(). If a changeset created by these operations is itself
1253 specified as a source for one of these operations, only the source changeset
1251 specified as a source for one of these operations, only the source changeset
1254 for the first operation is selected.
1252 for the first operation is selected.
1255 """
1253 """
1256 if x is not None:
1254 if x is not None:
1257 dests = getset(repo, fullreposet(repo), x)
1255 dests = getset(repo, fullreposet(repo), x)
1258 else:
1256 else:
1259 dests = fullreposet(repo)
1257 dests = fullreposet(repo)
1260
1258
1261 def _firstsrc(rev):
1259 def _firstsrc(rev):
1262 src = _getrevsource(repo, rev)
1260 src = _getrevsource(repo, rev)
1263 if src is None:
1261 if src is None:
1264 return None
1262 return None
1265
1263
1266 while True:
1264 while True:
1267 prev = _getrevsource(repo, src)
1265 prev = _getrevsource(repo, src)
1268
1266
1269 if prev is None:
1267 if prev is None:
1270 return src
1268 return src
1271 src = prev
1269 src = prev
1272
1270
1273 o = {_firstsrc(r) for r in dests}
1271 o = {_firstsrc(r) for r in dests}
1274 o -= {None}
1272 o -= {None}
1275 # XXX we should turn this into a baseset instead of a set, smartset may do
1273 # XXX we should turn this into a baseset instead of a set, smartset may do
1276 # some optimizations from the fact this is a baseset.
1274 # some optimizations from the fact this is a baseset.
1277 return subset & o
1275 return subset & o
1278
1276
1279 @predicate('outgoing([path])', safe=False)
1277 @predicate('outgoing([path])', safe=False)
1280 def outgoing(repo, subset, x):
1278 def outgoing(repo, subset, x):
1281 """Changesets not found in the specified destination repository, or the
1279 """Changesets not found in the specified destination repository, or the
1282 default push location.
1280 default push location.
1283 """
1281 """
1284 # Avoid cycles.
1282 # Avoid cycles.
1285 from . import (
1283 from . import (
1286 discovery,
1284 discovery,
1287 hg,
1285 hg,
1288 )
1286 )
1289 # i18n: "outgoing" is a keyword
1287 # i18n: "outgoing" is a keyword
1290 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1288 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1291 # i18n: "outgoing" is a keyword
1289 # i18n: "outgoing" is a keyword
1292 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1290 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1293 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1291 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1294 dest, branches = hg.parseurl(dest)
1292 dest, branches = hg.parseurl(dest)
1295 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1293 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1296 if revs:
1294 if revs:
1297 revs = [repo.lookup(rev) for rev in revs]
1295 revs = [repo.lookup(rev) for rev in revs]
1298 other = hg.peer(repo, {}, dest)
1296 other = hg.peer(repo, {}, dest)
1299 repo.ui.pushbuffer()
1297 repo.ui.pushbuffer()
1300 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1298 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1301 repo.ui.popbuffer()
1299 repo.ui.popbuffer()
1302 cl = repo.changelog
1300 cl = repo.changelog
1303 o = {cl.rev(r) for r in outgoing.missing}
1301 o = {cl.rev(r) for r in outgoing.missing}
1304 return subset & o
1302 return subset & o
1305
1303
1306 @predicate('p1([set])', safe=True)
1304 @predicate('p1([set])', safe=True)
1307 def p1(repo, subset, x):
1305 def p1(repo, subset, x):
1308 """First parent of changesets in set, or the working directory.
1306 """First parent of changesets in set, or the working directory.
1309 """
1307 """
1310 if x is None:
1308 if x is None:
1311 p = repo[x].p1().rev()
1309 p = repo[x].p1().rev()
1312 if p >= 0:
1310 if p >= 0:
1313 return subset & baseset([p])
1311 return subset & baseset([p])
1314 return baseset()
1312 return baseset()
1315
1313
1316 ps = set()
1314 ps = set()
1317 cl = repo.changelog
1315 cl = repo.changelog
1318 for r in getset(repo, fullreposet(repo), x):
1316 for r in getset(repo, fullreposet(repo), x):
1319 try:
1317 try:
1320 ps.add(cl.parentrevs(r)[0])
1318 ps.add(cl.parentrevs(r)[0])
1321 except error.WdirUnsupported:
1319 except error.WdirUnsupported:
1322 ps.add(repo[r].parents()[0].rev())
1320 ps.add(repo[r].parents()[0].rev())
1323 ps -= {node.nullrev}
1321 ps -= {node.nullrev}
1324 # XXX we should turn this into a baseset instead of a set, smartset may do
1322 # XXX we should turn this into a baseset instead of a set, smartset may do
1325 # some optimizations from the fact this is a baseset.
1323 # some optimizations from the fact this is a baseset.
1326 return subset & ps
1324 return subset & ps
1327
1325
1328 @predicate('p2([set])', safe=True)
1326 @predicate('p2([set])', safe=True)
1329 def p2(repo, subset, x):
1327 def p2(repo, subset, x):
1330 """Second parent of changesets in set, or the working directory.
1328 """Second parent of changesets in set, or the working directory.
1331 """
1329 """
1332 if x is None:
1330 if x is None:
1333 ps = repo[x].parents()
1331 ps = repo[x].parents()
1334 try:
1332 try:
1335 p = ps[1].rev()
1333 p = ps[1].rev()
1336 if p >= 0:
1334 if p >= 0:
1337 return subset & baseset([p])
1335 return subset & baseset([p])
1338 return baseset()
1336 return baseset()
1339 except IndexError:
1337 except IndexError:
1340 return baseset()
1338 return baseset()
1341
1339
1342 ps = set()
1340 ps = set()
1343 cl = repo.changelog
1341 cl = repo.changelog
1344 for r in getset(repo, fullreposet(repo), x):
1342 for r in getset(repo, fullreposet(repo), x):
1345 try:
1343 try:
1346 ps.add(cl.parentrevs(r)[1])
1344 ps.add(cl.parentrevs(r)[1])
1347 except error.WdirUnsupported:
1345 except error.WdirUnsupported:
1348 parents = repo[r].parents()
1346 parents = repo[r].parents()
1349 if len(parents) == 2:
1347 if len(parents) == 2:
1350 ps.add(parents[1])
1348 ps.add(parents[1])
1351 ps -= {node.nullrev}
1349 ps -= {node.nullrev}
1352 # XXX we should turn this into a baseset instead of a set, smartset may do
1350 # XXX we should turn this into a baseset instead of a set, smartset may do
1353 # some optimizations from the fact this is a baseset.
1351 # some optimizations from the fact this is a baseset.
1354 return subset & ps
1352 return subset & ps
1355
1353
1356 def parentpost(repo, subset, x, order):
1354 def parentpost(repo, subset, x, order):
1357 return p1(repo, subset, x)
1355 return p1(repo, subset, x)
1358
1356
1359 @predicate('parents([set])', safe=True)
1357 @predicate('parents([set])', safe=True)
1360 def parents(repo, subset, x):
1358 def parents(repo, subset, x):
1361 """
1359 """
1362 The set of all parents for all changesets in set, or the working directory.
1360 The set of all parents for all changesets in set, or the working directory.
1363 """
1361 """
1364 if x is None:
1362 if x is None:
1365 ps = set(p.rev() for p in repo[x].parents())
1363 ps = set(p.rev() for p in repo[x].parents())
1366 else:
1364 else:
1367 ps = set()
1365 ps = set()
1368 cl = repo.changelog
1366 cl = repo.changelog
1369 up = ps.update
1367 up = ps.update
1370 parentrevs = cl.parentrevs
1368 parentrevs = cl.parentrevs
1371 for r in getset(repo, fullreposet(repo), x):
1369 for r in getset(repo, fullreposet(repo), x):
1372 try:
1370 try:
1373 up(parentrevs(r))
1371 up(parentrevs(r))
1374 except error.WdirUnsupported:
1372 except error.WdirUnsupported:
1375 up(p.rev() for p in repo[r].parents())
1373 up(p.rev() for p in repo[r].parents())
1376 ps -= {node.nullrev}
1374 ps -= {node.nullrev}
1377 return subset & ps
1375 return subset & ps
1378
1376
1379 def _phase(repo, subset, *targets):
1377 def _phase(repo, subset, *targets):
1380 """helper to select all rev in <targets> phases"""
1378 """helper to select all rev in <targets> phases"""
1381 s = repo._phasecache.getrevset(repo, targets)
1379 s = repo._phasecache.getrevset(repo, targets)
1382 return subset & s
1380 return subset & s
1383
1381
1384 @predicate('draft()', safe=True)
1382 @predicate('draft()', safe=True)
1385 def draft(repo, subset, x):
1383 def draft(repo, subset, x):
1386 """Changeset in draft phase."""
1384 """Changeset in draft phase."""
1387 # i18n: "draft" is a keyword
1385 # i18n: "draft" is a keyword
1388 getargs(x, 0, 0, _("draft takes no arguments"))
1386 getargs(x, 0, 0, _("draft takes no arguments"))
1389 target = phases.draft
1387 target = phases.draft
1390 return _phase(repo, subset, target)
1388 return _phase(repo, subset, target)
1391
1389
1392 @predicate('secret()', safe=True)
1390 @predicate('secret()', safe=True)
1393 def secret(repo, subset, x):
1391 def secret(repo, subset, x):
1394 """Changeset in secret phase."""
1392 """Changeset in secret phase."""
1395 # i18n: "secret" is a keyword
1393 # i18n: "secret" is a keyword
1396 getargs(x, 0, 0, _("secret takes no arguments"))
1394 getargs(x, 0, 0, _("secret takes no arguments"))
1397 target = phases.secret
1395 target = phases.secret
1398 return _phase(repo, subset, target)
1396 return _phase(repo, subset, target)
1399
1397
1400 def parentspec(repo, subset, x, n, order):
1398 def parentspec(repo, subset, x, n, order):
1401 """``set^0``
1399 """``set^0``
1402 The set.
1400 The set.
1403 ``set^1`` (or ``set^``), ``set^2``
1401 ``set^1`` (or ``set^``), ``set^2``
1404 First or second parent, respectively, of all changesets in set.
1402 First or second parent, respectively, of all changesets in set.
1405 """
1403 """
1406 try:
1404 try:
1407 n = int(n[1])
1405 n = int(n[1])
1408 if n not in (0, 1, 2):
1406 if n not in (0, 1, 2):
1409 raise ValueError
1407 raise ValueError
1410 except (TypeError, ValueError):
1408 except (TypeError, ValueError):
1411 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1409 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1412 ps = set()
1410 ps = set()
1413 cl = repo.changelog
1411 cl = repo.changelog
1414 for r in getset(repo, fullreposet(repo), x):
1412 for r in getset(repo, fullreposet(repo), x):
1415 if n == 0:
1413 if n == 0:
1416 ps.add(r)
1414 ps.add(r)
1417 elif n == 1:
1415 elif n == 1:
1418 try:
1416 try:
1419 ps.add(cl.parentrevs(r)[0])
1417 ps.add(cl.parentrevs(r)[0])
1420 except error.WdirUnsupported:
1418 except error.WdirUnsupported:
1421 ps.add(repo[r].parents()[0].rev())
1419 ps.add(repo[r].parents()[0].rev())
1422 else:
1420 else:
1423 try:
1421 try:
1424 parents = cl.parentrevs(r)
1422 parents = cl.parentrevs(r)
1425 if parents[1] != node.nullrev:
1423 if parents[1] != node.nullrev:
1426 ps.add(parents[1])
1424 ps.add(parents[1])
1427 except error.WdirUnsupported:
1425 except error.WdirUnsupported:
1428 parents = repo[r].parents()
1426 parents = repo[r].parents()
1429 if len(parents) == 2:
1427 if len(parents) == 2:
1430 ps.add(parents[1].rev())
1428 ps.add(parents[1].rev())
1431 return subset & ps
1429 return subset & ps
1432
1430
1433 @predicate('present(set)', safe=True)
1431 @predicate('present(set)', safe=True)
1434 def present(repo, subset, x):
1432 def present(repo, subset, x):
1435 """An empty set, if any revision in set isn't found; otherwise,
1433 """An empty set, if any revision in set isn't found; otherwise,
1436 all revisions in set.
1434 all revisions in set.
1437
1435
1438 If any of specified revisions is not present in the local repository,
1436 If any of specified revisions is not present in the local repository,
1439 the query is normally aborted. But this predicate allows the query
1437 the query is normally aborted. But this predicate allows the query
1440 to continue even in such cases.
1438 to continue even in such cases.
1441 """
1439 """
1442 try:
1440 try:
1443 return getset(repo, subset, x)
1441 return getset(repo, subset, x)
1444 except error.RepoLookupError:
1442 except error.RepoLookupError:
1445 return baseset()
1443 return baseset()
1446
1444
1447 # for internal use
1445 # for internal use
1448 @predicate('_notpublic', safe=True)
1446 @predicate('_notpublic', safe=True)
1449 def _notpublic(repo, subset, x):
1447 def _notpublic(repo, subset, x):
1450 getargs(x, 0, 0, "_notpublic takes no arguments")
1448 getargs(x, 0, 0, "_notpublic takes no arguments")
1451 return _phase(repo, subset, phases.draft, phases.secret)
1449 return _phase(repo, subset, phases.draft, phases.secret)
1452
1450
1453 @predicate('public()', safe=True)
1451 @predicate('public()', safe=True)
1454 def public(repo, subset, x):
1452 def public(repo, subset, x):
1455 """Changeset in public phase."""
1453 """Changeset in public phase."""
1456 # i18n: "public" is a keyword
1454 # i18n: "public" is a keyword
1457 getargs(x, 0, 0, _("public takes no arguments"))
1455 getargs(x, 0, 0, _("public takes no arguments"))
1458 phase = repo._phasecache.phase
1456 phase = repo._phasecache.phase
1459 target = phases.public
1457 target = phases.public
1460 condition = lambda r: phase(repo, r) == target
1458 condition = lambda r: phase(repo, r) == target
1461 return subset.filter(condition, condrepr=('<phase %r>', target),
1459 return subset.filter(condition, condrepr=('<phase %r>', target),
1462 cache=False)
1460 cache=False)
1463
1461
1464 @predicate('remote([id [,path]])', safe=False)
1462 @predicate('remote([id [,path]])', safe=False)
1465 def remote(repo, subset, x):
1463 def remote(repo, subset, x):
1466 """Local revision that corresponds to the given identifier in a
1464 """Local revision that corresponds to the given identifier in a
1467 remote repository, if present. Here, the '.' identifier is a
1465 remote repository, if present. Here, the '.' identifier is a
1468 synonym for the current local branch.
1466 synonym for the current local branch.
1469 """
1467 """
1470
1468
1471 from . import hg # avoid start-up nasties
1469 from . import hg # avoid start-up nasties
1472 # i18n: "remote" is a keyword
1470 # i18n: "remote" is a keyword
1473 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1471 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1474
1472
1475 q = '.'
1473 q = '.'
1476 if len(l) > 0:
1474 if len(l) > 0:
1477 # i18n: "remote" is a keyword
1475 # i18n: "remote" is a keyword
1478 q = getstring(l[0], _("remote requires a string id"))
1476 q = getstring(l[0], _("remote requires a string id"))
1479 if q == '.':
1477 if q == '.':
1480 q = repo['.'].branch()
1478 q = repo['.'].branch()
1481
1479
1482 dest = ''
1480 dest = ''
1483 if len(l) > 1:
1481 if len(l) > 1:
1484 # i18n: "remote" is a keyword
1482 # i18n: "remote" is a keyword
1485 dest = getstring(l[1], _("remote requires a repository path"))
1483 dest = getstring(l[1], _("remote requires a repository path"))
1486 dest = repo.ui.expandpath(dest or 'default')
1484 dest = repo.ui.expandpath(dest or 'default')
1487 dest, branches = hg.parseurl(dest)
1485 dest, branches = hg.parseurl(dest)
1488 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1486 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1489 if revs:
1487 if revs:
1490 revs = [repo.lookup(rev) for rev in revs]
1488 revs = [repo.lookup(rev) for rev in revs]
1491 other = hg.peer(repo, {}, dest)
1489 other = hg.peer(repo, {}, dest)
1492 n = other.lookup(q)
1490 n = other.lookup(q)
1493 if n in repo:
1491 if n in repo:
1494 r = repo[n].rev()
1492 r = repo[n].rev()
1495 if r in subset:
1493 if r in subset:
1496 return baseset([r])
1494 return baseset([r])
1497 return baseset()
1495 return baseset()
1498
1496
1499 @predicate('removes(pattern)', safe=True)
1497 @predicate('removes(pattern)', safe=True)
1500 def removes(repo, subset, x):
1498 def removes(repo, subset, x):
1501 """Changesets which remove files matching pattern.
1499 """Changesets which remove files matching pattern.
1502
1500
1503 The pattern without explicit kind like ``glob:`` is expected to be
1501 The pattern without explicit kind like ``glob:`` is expected to be
1504 relative to the current directory and match against a file or a
1502 relative to the current directory and match against a file or a
1505 directory.
1503 directory.
1506 """
1504 """
1507 # i18n: "removes" is a keyword
1505 # i18n: "removes" is a keyword
1508 pat = getstring(x, _("removes requires a pattern"))
1506 pat = getstring(x, _("removes requires a pattern"))
1509 return checkstatus(repo, subset, pat, 2)
1507 return checkstatus(repo, subset, pat, 2)
1510
1508
1511 @predicate('rev(number)', safe=True)
1509 @predicate('rev(number)', safe=True)
1512 def rev(repo, subset, x):
1510 def rev(repo, subset, x):
1513 """Revision with the given numeric identifier.
1511 """Revision with the given numeric identifier.
1514 """
1512 """
1515 # i18n: "rev" is a keyword
1513 # i18n: "rev" is a keyword
1516 l = getargs(x, 1, 1, _("rev requires one argument"))
1514 l = getargs(x, 1, 1, _("rev requires one argument"))
1517 try:
1515 try:
1518 # i18n: "rev" is a keyword
1516 # i18n: "rev" is a keyword
1519 l = int(getstring(l[0], _("rev requires a number")))
1517 l = int(getstring(l[0], _("rev requires a number")))
1520 except (TypeError, ValueError):
1518 except (TypeError, ValueError):
1521 # i18n: "rev" is a keyword
1519 # i18n: "rev" is a keyword
1522 raise error.ParseError(_("rev expects a number"))
1520 raise error.ParseError(_("rev expects a number"))
1523 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1521 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1524 return baseset()
1522 return baseset()
1525 return subset & baseset([l])
1523 return subset & baseset([l])
1526
1524
1527 @predicate('matching(revision [, field])', safe=True)
1525 @predicate('matching(revision [, field])', safe=True)
1528 def matching(repo, subset, x):
1526 def matching(repo, subset, x):
1529 """Changesets in which a given set of fields match the set of fields in the
1527 """Changesets in which a given set of fields match the set of fields in the
1530 selected revision or set.
1528 selected revision or set.
1531
1529
1532 To match more than one field pass the list of fields to match separated
1530 To match more than one field pass the list of fields to match separated
1533 by spaces (e.g. ``author description``).
1531 by spaces (e.g. ``author description``).
1534
1532
1535 Valid fields are most regular revision fields and some special fields.
1533 Valid fields are most regular revision fields and some special fields.
1536
1534
1537 Regular revision fields are ``description``, ``author``, ``branch``,
1535 Regular revision fields are ``description``, ``author``, ``branch``,
1538 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1536 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1539 and ``diff``.
1537 and ``diff``.
1540 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1538 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1541 contents of the revision. Two revisions matching their ``diff`` will
1539 contents of the revision. Two revisions matching their ``diff`` will
1542 also match their ``files``.
1540 also match their ``files``.
1543
1541
1544 Special fields are ``summary`` and ``metadata``:
1542 Special fields are ``summary`` and ``metadata``:
1545 ``summary`` matches the first line of the description.
1543 ``summary`` matches the first line of the description.
1546 ``metadata`` is equivalent to matching ``description user date``
1544 ``metadata`` is equivalent to matching ``description user date``
1547 (i.e. it matches the main metadata fields).
1545 (i.e. it matches the main metadata fields).
1548
1546
1549 ``metadata`` is the default field which is used when no fields are
1547 ``metadata`` is the default field which is used when no fields are
1550 specified. You can match more than one field at a time.
1548 specified. You can match more than one field at a time.
1551 """
1549 """
1552 # i18n: "matching" is a keyword
1550 # i18n: "matching" is a keyword
1553 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1551 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1554
1552
1555 revs = getset(repo, fullreposet(repo), l[0])
1553 revs = getset(repo, fullreposet(repo), l[0])
1556
1554
1557 fieldlist = ['metadata']
1555 fieldlist = ['metadata']
1558 if len(l) > 1:
1556 if len(l) > 1:
1559 fieldlist = getstring(l[1],
1557 fieldlist = getstring(l[1],
1560 # i18n: "matching" is a keyword
1558 # i18n: "matching" is a keyword
1561 _("matching requires a string "
1559 _("matching requires a string "
1562 "as its second argument")).split()
1560 "as its second argument")).split()
1563
1561
1564 # Make sure that there are no repeated fields,
1562 # Make sure that there are no repeated fields,
1565 # expand the 'special' 'metadata' field type
1563 # expand the 'special' 'metadata' field type
1566 # and check the 'files' whenever we check the 'diff'
1564 # and check the 'files' whenever we check the 'diff'
1567 fields = []
1565 fields = []
1568 for field in fieldlist:
1566 for field in fieldlist:
1569 if field == 'metadata':
1567 if field == 'metadata':
1570 fields += ['user', 'description', 'date']
1568 fields += ['user', 'description', 'date']
1571 elif field == 'diff':
1569 elif field == 'diff':
1572 # a revision matching the diff must also match the files
1570 # a revision matching the diff must also match the files
1573 # since matching the diff is very costly, make sure to
1571 # since matching the diff is very costly, make sure to
1574 # also match the files first
1572 # also match the files first
1575 fields += ['files', 'diff']
1573 fields += ['files', 'diff']
1576 else:
1574 else:
1577 if field == 'author':
1575 if field == 'author':
1578 field = 'user'
1576 field = 'user'
1579 fields.append(field)
1577 fields.append(field)
1580 fields = set(fields)
1578 fields = set(fields)
1581 if 'summary' in fields and 'description' in fields:
1579 if 'summary' in fields and 'description' in fields:
1582 # If a revision matches its description it also matches its summary
1580 # If a revision matches its description it also matches its summary
1583 fields.discard('summary')
1581 fields.discard('summary')
1584
1582
1585 # We may want to match more than one field
1583 # We may want to match more than one field
1586 # Not all fields take the same amount of time to be matched
1584 # Not all fields take the same amount of time to be matched
1587 # Sort the selected fields in order of increasing matching cost
1585 # Sort the selected fields in order of increasing matching cost
1588 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1586 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1589 'files', 'description', 'substate', 'diff']
1587 'files', 'description', 'substate', 'diff']
1590 def fieldkeyfunc(f):
1588 def fieldkeyfunc(f):
1591 try:
1589 try:
1592 return fieldorder.index(f)
1590 return fieldorder.index(f)
1593 except ValueError:
1591 except ValueError:
1594 # assume an unknown field is very costly
1592 # assume an unknown field is very costly
1595 return len(fieldorder)
1593 return len(fieldorder)
1596 fields = list(fields)
1594 fields = list(fields)
1597 fields.sort(key=fieldkeyfunc)
1595 fields.sort(key=fieldkeyfunc)
1598
1596
1599 # Each field will be matched with its own "getfield" function
1597 # Each field will be matched with its own "getfield" function
1600 # which will be added to the getfieldfuncs array of functions
1598 # which will be added to the getfieldfuncs array of functions
1601 getfieldfuncs = []
1599 getfieldfuncs = []
1602 _funcs = {
1600 _funcs = {
1603 'user': lambda r: repo[r].user(),
1601 'user': lambda r: repo[r].user(),
1604 'branch': lambda r: repo[r].branch(),
1602 'branch': lambda r: repo[r].branch(),
1605 'date': lambda r: repo[r].date(),
1603 'date': lambda r: repo[r].date(),
1606 'description': lambda r: repo[r].description(),
1604 'description': lambda r: repo[r].description(),
1607 'files': lambda r: repo[r].files(),
1605 'files': lambda r: repo[r].files(),
1608 'parents': lambda r: repo[r].parents(),
1606 'parents': lambda r: repo[r].parents(),
1609 'phase': lambda r: repo[r].phase(),
1607 'phase': lambda r: repo[r].phase(),
1610 'substate': lambda r: repo[r].substate,
1608 'substate': lambda r: repo[r].substate,
1611 'summary': lambda r: repo[r].description().splitlines()[0],
1609 'summary': lambda r: repo[r].description().splitlines()[0],
1612 'diff': lambda r: list(repo[r].diff(git=True),)
1610 'diff': lambda r: list(repo[r].diff(git=True),)
1613 }
1611 }
1614 for info in fields:
1612 for info in fields:
1615 getfield = _funcs.get(info, None)
1613 getfield = _funcs.get(info, None)
1616 if getfield is None:
1614 if getfield is None:
1617 raise error.ParseError(
1615 raise error.ParseError(
1618 # i18n: "matching" is a keyword
1616 # i18n: "matching" is a keyword
1619 _("unexpected field name passed to matching: %s") % info)
1617 _("unexpected field name passed to matching: %s") % info)
1620 getfieldfuncs.append(getfield)
1618 getfieldfuncs.append(getfield)
1621 # convert the getfield array of functions into a "getinfo" function
1619 # convert the getfield array of functions into a "getinfo" function
1622 # which returns an array of field values (or a single value if there
1620 # which returns an array of field values (or a single value if there
1623 # is only one field to match)
1621 # is only one field to match)
1624 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1622 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1625
1623
1626 def matches(x):
1624 def matches(x):
1627 for rev in revs:
1625 for rev in revs:
1628 target = getinfo(rev)
1626 target = getinfo(rev)
1629 match = True
1627 match = True
1630 for n, f in enumerate(getfieldfuncs):
1628 for n, f in enumerate(getfieldfuncs):
1631 if target[n] != f(x):
1629 if target[n] != f(x):
1632 match = False
1630 match = False
1633 if match:
1631 if match:
1634 return True
1632 return True
1635 return False
1633 return False
1636
1634
1637 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1635 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1638
1636
1639 @predicate('reverse(set)', safe=True, takeorder=True)
1637 @predicate('reverse(set)', safe=True, takeorder=True)
1640 def reverse(repo, subset, x, order):
1638 def reverse(repo, subset, x, order):
1641 """Reverse order of set.
1639 """Reverse order of set.
1642 """
1640 """
1643 l = getset(repo, subset, x)
1641 l = getset(repo, subset, x)
1644 if order == defineorder:
1642 if order == defineorder:
1645 l.reverse()
1643 l.reverse()
1646 return l
1644 return l
1647
1645
1648 @predicate('roots(set)', safe=True)
1646 @predicate('roots(set)', safe=True)
1649 def roots(repo, subset, x):
1647 def roots(repo, subset, x):
1650 """Changesets in set with no parent changeset in set.
1648 """Changesets in set with no parent changeset in set.
1651 """
1649 """
1652 s = getset(repo, fullreposet(repo), x)
1650 s = getset(repo, fullreposet(repo), x)
1653 parents = repo.changelog.parentrevs
1651 parents = repo.changelog.parentrevs
1654 def filter(r):
1652 def filter(r):
1655 for p in parents(r):
1653 for p in parents(r):
1656 if 0 <= p and p in s:
1654 if 0 <= p and p in s:
1657 return False
1655 return False
1658 return True
1656 return True
1659 return subset & s.filter(filter, condrepr='<roots>')
1657 return subset & s.filter(filter, condrepr='<roots>')
1660
1658
1661 _sortkeyfuncs = {
1659 _sortkeyfuncs = {
1662 'rev': lambda c: c.rev(),
1660 'rev': lambda c: c.rev(),
1663 'branch': lambda c: c.branch(),
1661 'branch': lambda c: c.branch(),
1664 'desc': lambda c: c.description(),
1662 'desc': lambda c: c.description(),
1665 'user': lambda c: c.user(),
1663 'user': lambda c: c.user(),
1666 'author': lambda c: c.user(),
1664 'author': lambda c: c.user(),
1667 'date': lambda c: c.date()[0],
1665 'date': lambda c: c.date()[0],
1668 }
1666 }
1669
1667
1670 def _getsortargs(x):
1668 def _getsortargs(x):
1671 """Parse sort options into (set, [(key, reverse)], opts)"""
1669 """Parse sort options into (set, [(key, reverse)], opts)"""
1672 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1670 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1673 if 'set' not in args:
1671 if 'set' not in args:
1674 # i18n: "sort" is a keyword
1672 # i18n: "sort" is a keyword
1675 raise error.ParseError(_('sort requires one or two arguments'))
1673 raise error.ParseError(_('sort requires one or two arguments'))
1676 keys = "rev"
1674 keys = "rev"
1677 if 'keys' in args:
1675 if 'keys' in args:
1678 # i18n: "sort" is a keyword
1676 # i18n: "sort" is a keyword
1679 keys = getstring(args['keys'], _("sort spec must be a string"))
1677 keys = getstring(args['keys'], _("sort spec must be a string"))
1680
1678
1681 keyflags = []
1679 keyflags = []
1682 for k in keys.split():
1680 for k in keys.split():
1683 fk = k
1681 fk = k
1684 reverse = (k[0] == '-')
1682 reverse = (k[0] == '-')
1685 if reverse:
1683 if reverse:
1686 k = k[1:]
1684 k = k[1:]
1687 if k not in _sortkeyfuncs and k != 'topo':
1685 if k not in _sortkeyfuncs and k != 'topo':
1688 raise error.ParseError(_("unknown sort key %r") % fk)
1686 raise error.ParseError(_("unknown sort key %r") % fk)
1689 keyflags.append((k, reverse))
1687 keyflags.append((k, reverse))
1690
1688
1691 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1689 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1692 # i18n: "topo" is a keyword
1690 # i18n: "topo" is a keyword
1693 raise error.ParseError(_('topo sort order cannot be combined '
1691 raise error.ParseError(_('topo sort order cannot be combined '
1694 'with other sort keys'))
1692 'with other sort keys'))
1695
1693
1696 opts = {}
1694 opts = {}
1697 if 'topo.firstbranch' in args:
1695 if 'topo.firstbranch' in args:
1698 if any(k == 'topo' for k, reverse in keyflags):
1696 if any(k == 'topo' for k, reverse in keyflags):
1699 opts['topo.firstbranch'] = args['topo.firstbranch']
1697 opts['topo.firstbranch'] = args['topo.firstbranch']
1700 else:
1698 else:
1701 # i18n: "topo" and "topo.firstbranch" are keywords
1699 # i18n: "topo" and "topo.firstbranch" are keywords
1702 raise error.ParseError(_('topo.firstbranch can only be used '
1700 raise error.ParseError(_('topo.firstbranch can only be used '
1703 'when using the topo sort key'))
1701 'when using the topo sort key'))
1704
1702
1705 return args['set'], keyflags, opts
1703 return args['set'], keyflags, opts
1706
1704
1707 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1705 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1708 def sort(repo, subset, x, order):
1706 def sort(repo, subset, x, order):
1709 """Sort set by keys. The default sort order is ascending, specify a key
1707 """Sort set by keys. The default sort order is ascending, specify a key
1710 as ``-key`` to sort in descending order.
1708 as ``-key`` to sort in descending order.
1711
1709
1712 The keys can be:
1710 The keys can be:
1713
1711
1714 - ``rev`` for the revision number,
1712 - ``rev`` for the revision number,
1715 - ``branch`` for the branch name,
1713 - ``branch`` for the branch name,
1716 - ``desc`` for the commit message (description),
1714 - ``desc`` for the commit message (description),
1717 - ``user`` for user name (``author`` can be used as an alias),
1715 - ``user`` for user name (``author`` can be used as an alias),
1718 - ``date`` for the commit date
1716 - ``date`` for the commit date
1719 - ``topo`` for a reverse topographical sort
1717 - ``topo`` for a reverse topographical sort
1720
1718
1721 The ``topo`` sort order cannot be combined with other sort keys. This sort
1719 The ``topo`` sort order cannot be combined with other sort keys. This sort
1722 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1720 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1723 specifies what topographical branches to prioritize in the sort.
1721 specifies what topographical branches to prioritize in the sort.
1724
1722
1725 """
1723 """
1726 s, keyflags, opts = _getsortargs(x)
1724 s, keyflags, opts = _getsortargs(x)
1727 revs = getset(repo, subset, s)
1725 revs = getset(repo, subset, s)
1728
1726
1729 if not keyflags or order != defineorder:
1727 if not keyflags or order != defineorder:
1730 return revs
1728 return revs
1731 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1729 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1732 revs.sort(reverse=keyflags[0][1])
1730 revs.sort(reverse=keyflags[0][1])
1733 return revs
1731 return revs
1734 elif keyflags[0][0] == "topo":
1732 elif keyflags[0][0] == "topo":
1735 firstbranch = ()
1733 firstbranch = ()
1736 if 'topo.firstbranch' in opts:
1734 if 'topo.firstbranch' in opts:
1737 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1735 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1738 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1736 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1739 firstbranch),
1737 firstbranch),
1740 istopo=True)
1738 istopo=True)
1741 if keyflags[0][1]:
1739 if keyflags[0][1]:
1742 revs.reverse()
1740 revs.reverse()
1743 return revs
1741 return revs
1744
1742
1745 # sort() is guaranteed to be stable
1743 # sort() is guaranteed to be stable
1746 ctxs = [repo[r] for r in revs]
1744 ctxs = [repo[r] for r in revs]
1747 for k, reverse in reversed(keyflags):
1745 for k, reverse in reversed(keyflags):
1748 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1746 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1749 return baseset([c.rev() for c in ctxs])
1747 return baseset([c.rev() for c in ctxs])
1750
1748
1751 @predicate('subrepo([pattern])')
1749 @predicate('subrepo([pattern])')
1752 def subrepo(repo, subset, x):
1750 def subrepo(repo, subset, x):
1753 """Changesets that add, modify or remove the given subrepo. If no subrepo
1751 """Changesets that add, modify or remove the given subrepo. If no subrepo
1754 pattern is named, any subrepo changes are returned.
1752 pattern is named, any subrepo changes are returned.
1755 """
1753 """
1756 # i18n: "subrepo" is a keyword
1754 # i18n: "subrepo" is a keyword
1757 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1755 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1758 pat = None
1756 pat = None
1759 if len(args) != 0:
1757 if len(args) != 0:
1760 pat = getstring(args[0], _("subrepo requires a pattern"))
1758 pat = getstring(args[0], _("subrepo requires a pattern"))
1761
1759
1762 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1760 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1763
1761
1764 def submatches(names):
1762 def submatches(names):
1765 k, p, m = util.stringmatcher(pat)
1763 k, p, m = util.stringmatcher(pat)
1766 for name in names:
1764 for name in names:
1767 if m(name):
1765 if m(name):
1768 yield name
1766 yield name
1769
1767
1770 def matches(x):
1768 def matches(x):
1771 c = repo[x]
1769 c = repo[x]
1772 s = repo.status(c.p1().node(), c.node(), match=m)
1770 s = repo.status(c.p1().node(), c.node(), match=m)
1773
1771
1774 if pat is None:
1772 if pat is None:
1775 return s.added or s.modified or s.removed
1773 return s.added or s.modified or s.removed
1776
1774
1777 if s.added:
1775 if s.added:
1778 return any(submatches(c.substate.keys()))
1776 return any(submatches(c.substate.keys()))
1779
1777
1780 if s.modified:
1778 if s.modified:
1781 subs = set(c.p1().substate.keys())
1779 subs = set(c.p1().substate.keys())
1782 subs.update(c.substate.keys())
1780 subs.update(c.substate.keys())
1783
1781
1784 for path in submatches(subs):
1782 for path in submatches(subs):
1785 if c.p1().substate.get(path) != c.substate.get(path):
1783 if c.p1().substate.get(path) != c.substate.get(path):
1786 return True
1784 return True
1787
1785
1788 if s.removed:
1786 if s.removed:
1789 return any(submatches(c.p1().substate.keys()))
1787 return any(submatches(c.p1().substate.keys()))
1790
1788
1791 return False
1789 return False
1792
1790
1793 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1791 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1794
1792
1795 def _substringmatcher(pattern, casesensitive=True):
1793 def _substringmatcher(pattern, casesensitive=True):
1796 kind, pattern, matcher = util.stringmatcher(pattern,
1794 kind, pattern, matcher = util.stringmatcher(pattern,
1797 casesensitive=casesensitive)
1795 casesensitive=casesensitive)
1798 if kind == 'literal':
1796 if kind == 'literal':
1799 if not casesensitive:
1797 if not casesensitive:
1800 pattern = encoding.lower(pattern)
1798 pattern = encoding.lower(pattern)
1801 matcher = lambda s: pattern in encoding.lower(s)
1799 matcher = lambda s: pattern in encoding.lower(s)
1802 else:
1800 else:
1803 matcher = lambda s: pattern in s
1801 matcher = lambda s: pattern in s
1804 return kind, pattern, matcher
1802 return kind, pattern, matcher
1805
1803
1806 @predicate('tag([name])', safe=True)
1804 @predicate('tag([name])', safe=True)
1807 def tag(repo, subset, x):
1805 def tag(repo, subset, x):
1808 """The specified tag by name, or all tagged revisions if no name is given.
1806 """The specified tag by name, or all tagged revisions if no name is given.
1809
1807
1810 Pattern matching is supported for `name`. See
1808 Pattern matching is supported for `name`. See
1811 :hg:`help revisions.patterns`.
1809 :hg:`help revisions.patterns`.
1812 """
1810 """
1813 # i18n: "tag" is a keyword
1811 # i18n: "tag" is a keyword
1814 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1812 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1815 cl = repo.changelog
1813 cl = repo.changelog
1816 if args:
1814 if args:
1817 pattern = getstring(args[0],
1815 pattern = getstring(args[0],
1818 # i18n: "tag" is a keyword
1816 # i18n: "tag" is a keyword
1819 _('the argument to tag must be a string'))
1817 _('the argument to tag must be a string'))
1820 kind, pattern, matcher = util.stringmatcher(pattern)
1818 kind, pattern, matcher = util.stringmatcher(pattern)
1821 if kind == 'literal':
1819 if kind == 'literal':
1822 # avoid resolving all tags
1820 # avoid resolving all tags
1823 tn = repo._tagscache.tags.get(pattern, None)
1821 tn = repo._tagscache.tags.get(pattern, None)
1824 if tn is None:
1822 if tn is None:
1825 raise error.RepoLookupError(_("tag '%s' does not exist")
1823 raise error.RepoLookupError(_("tag '%s' does not exist")
1826 % pattern)
1824 % pattern)
1827 s = {repo[tn].rev()}
1825 s = {repo[tn].rev()}
1828 else:
1826 else:
1829 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
1827 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
1830 else:
1828 else:
1831 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
1829 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
1832 return subset & s
1830 return subset & s
1833
1831
1834 @predicate('tagged', safe=True)
1832 @predicate('tagged', safe=True)
1835 def tagged(repo, subset, x):
1833 def tagged(repo, subset, x):
1836 return tag(repo, subset, x)
1834 return tag(repo, subset, x)
1837
1835
1838 @predicate('unstable()', safe=True)
1836 @predicate('unstable()', safe=True)
1839 def unstable(repo, subset, x):
1837 def unstable(repo, subset, x):
1840 """Non-obsolete changesets with obsolete ancestors.
1838 """Non-obsolete changesets with obsolete ancestors.
1841 """
1839 """
1842 # i18n: "unstable" is a keyword
1840 # i18n: "unstable" is a keyword
1843 getargs(x, 0, 0, _("unstable takes no arguments"))
1841 getargs(x, 0, 0, _("unstable takes no arguments"))
1844 unstables = obsmod.getrevs(repo, 'unstable')
1842 unstables = obsmod.getrevs(repo, 'unstable')
1845 return subset & unstables
1843 return subset & unstables
1846
1844
1847
1845
1848 @predicate('user(string)', safe=True)
1846 @predicate('user(string)', safe=True)
1849 def user(repo, subset, x):
1847 def user(repo, subset, x):
1850 """User name contains string. The match is case-insensitive.
1848 """User name contains string. The match is case-insensitive.
1851
1849
1852 Pattern matching is supported for `string`. See
1850 Pattern matching is supported for `string`. See
1853 :hg:`help revisions.patterns`.
1851 :hg:`help revisions.patterns`.
1854 """
1852 """
1855 return author(repo, subset, x)
1853 return author(repo, subset, x)
1856
1854
1857 @predicate('wdir()', safe=True)
1855 @predicate('wdir()', safe=True)
1858 def wdir(repo, subset, x):
1856 def wdir(repo, subset, x):
1859 """Working directory. (EXPERIMENTAL)"""
1857 """Working directory. (EXPERIMENTAL)"""
1860 # i18n: "wdir" is a keyword
1858 # i18n: "wdir" is a keyword
1861 getargs(x, 0, 0, _("wdir takes no arguments"))
1859 getargs(x, 0, 0, _("wdir takes no arguments"))
1862 if node.wdirrev in subset or isinstance(subset, fullreposet):
1860 if node.wdirrev in subset or isinstance(subset, fullreposet):
1863 return baseset([node.wdirrev])
1861 return baseset([node.wdirrev])
1864 return baseset()
1862 return baseset()
1865
1863
1866 def _orderedlist(repo, subset, x):
1864 def _orderedlist(repo, subset, x):
1867 s = getstring(x, "internal error")
1865 s = getstring(x, "internal error")
1868 if not s:
1866 if not s:
1869 return baseset()
1867 return baseset()
1870 # remove duplicates here. it's difficult for caller to deduplicate sets
1868 # remove duplicates here. it's difficult for caller to deduplicate sets
1871 # because different symbols can point to the same rev.
1869 # because different symbols can point to the same rev.
1872 cl = repo.changelog
1870 cl = repo.changelog
1873 ls = []
1871 ls = []
1874 seen = set()
1872 seen = set()
1875 for t in s.split('\0'):
1873 for t in s.split('\0'):
1876 try:
1874 try:
1877 # fast path for integer revision
1875 # fast path for integer revision
1878 r = int(t)
1876 r = int(t)
1879 if str(r) != t or r not in cl:
1877 if str(r) != t or r not in cl:
1880 raise ValueError
1878 raise ValueError
1881 revs = [r]
1879 revs = [r]
1882 except ValueError:
1880 except ValueError:
1883 revs = stringset(repo, subset, t)
1881 revs = stringset(repo, subset, t)
1884
1882
1885 for r in revs:
1883 for r in revs:
1886 if r in seen:
1884 if r in seen:
1887 continue
1885 continue
1888 if (r in subset
1886 if (r in subset
1889 or r == node.nullrev and isinstance(subset, fullreposet)):
1887 or r == node.nullrev and isinstance(subset, fullreposet)):
1890 ls.append(r)
1888 ls.append(r)
1891 seen.add(r)
1889 seen.add(r)
1892 return baseset(ls)
1890 return baseset(ls)
1893
1891
1894 # for internal use
1892 # for internal use
1895 @predicate('_list', safe=True, takeorder=True)
1893 @predicate('_list', safe=True, takeorder=True)
1896 def _list(repo, subset, x, order):
1894 def _list(repo, subset, x, order):
1897 if order == followorder:
1895 if order == followorder:
1898 # slow path to take the subset order
1896 # slow path to take the subset order
1899 return subset & _orderedlist(repo, fullreposet(repo), x)
1897 return subset & _orderedlist(repo, fullreposet(repo), x)
1900 else:
1898 else:
1901 return _orderedlist(repo, subset, x)
1899 return _orderedlist(repo, subset, x)
1902
1900
1903 def _orderedintlist(repo, subset, x):
1901 def _orderedintlist(repo, subset, x):
1904 s = getstring(x, "internal error")
1902 s = getstring(x, "internal error")
1905 if not s:
1903 if not s:
1906 return baseset()
1904 return baseset()
1907 ls = [int(r) for r in s.split('\0')]
1905 ls = [int(r) for r in s.split('\0')]
1908 s = subset
1906 s = subset
1909 return baseset([r for r in ls if r in s])
1907 return baseset([r for r in ls if r in s])
1910
1908
1911 # for internal use
1909 # for internal use
1912 @predicate('_intlist', safe=True, takeorder=True)
1910 @predicate('_intlist', safe=True, takeorder=True)
1913 def _intlist(repo, subset, x, order):
1911 def _intlist(repo, subset, x, order):
1914 if order == followorder:
1912 if order == followorder:
1915 # slow path to take the subset order
1913 # slow path to take the subset order
1916 return subset & _orderedintlist(repo, fullreposet(repo), x)
1914 return subset & _orderedintlist(repo, fullreposet(repo), x)
1917 else:
1915 else:
1918 return _orderedintlist(repo, subset, x)
1916 return _orderedintlist(repo, subset, x)
1919
1917
1920 def _orderedhexlist(repo, subset, x):
1918 def _orderedhexlist(repo, subset, x):
1921 s = getstring(x, "internal error")
1919 s = getstring(x, "internal error")
1922 if not s:
1920 if not s:
1923 return baseset()
1921 return baseset()
1924 cl = repo.changelog
1922 cl = repo.changelog
1925 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1923 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1926 s = subset
1924 s = subset
1927 return baseset([r for r in ls if r in s])
1925 return baseset([r for r in ls if r in s])
1928
1926
1929 # for internal use
1927 # for internal use
1930 @predicate('_hexlist', safe=True, takeorder=True)
1928 @predicate('_hexlist', safe=True, takeorder=True)
1931 def _hexlist(repo, subset, x, order):
1929 def _hexlist(repo, subset, x, order):
1932 if order == followorder:
1930 if order == followorder:
1933 # slow path to take the subset order
1931 # slow path to take the subset order
1934 return subset & _orderedhexlist(repo, fullreposet(repo), x)
1932 return subset & _orderedhexlist(repo, fullreposet(repo), x)
1935 else:
1933 else:
1936 return _orderedhexlist(repo, subset, x)
1934 return _orderedhexlist(repo, subset, x)
1937
1935
1938 methods = {
1936 methods = {
1939 "range": rangeset,
1937 "range": rangeset,
1940 "rangeall": rangeall,
1938 "rangeall": rangeall,
1941 "rangepre": rangepre,
1939 "rangepre": rangepre,
1942 "rangepost": rangepost,
1940 "rangepost": rangepost,
1943 "dagrange": dagrange,
1941 "dagrange": dagrange,
1944 "string": stringset,
1942 "string": stringset,
1945 "symbol": stringset,
1943 "symbol": stringset,
1946 "and": andset,
1944 "and": andset,
1947 "or": orset,
1945 "or": orset,
1948 "not": notset,
1946 "not": notset,
1949 "difference": differenceset,
1947 "difference": differenceset,
1950 "list": listset,
1948 "list": listset,
1951 "keyvalue": keyvaluepair,
1949 "keyvalue": keyvaluepair,
1952 "func": func,
1950 "func": func,
1953 "ancestor": ancestorspec,
1951 "ancestor": ancestorspec,
1954 "parent": parentspec,
1952 "parent": parentspec,
1955 "parentpost": parentpost,
1953 "parentpost": parentpost,
1956 }
1954 }
1957
1955
1958 def posttreebuilthook(tree, repo):
1956 def posttreebuilthook(tree, repo):
1959 # hook for extensions to execute code on the optimized tree
1957 # hook for extensions to execute code on the optimized tree
1960 pass
1958 pass
1961
1959
1962 def match(ui, spec, repo=None, order=defineorder):
1960 def match(ui, spec, repo=None, order=defineorder):
1963 """Create a matcher for a single revision spec
1961 """Create a matcher for a single revision spec
1964
1962
1965 If order=followorder, a matcher takes the ordering specified by the input
1963 If order=followorder, a matcher takes the ordering specified by the input
1966 set.
1964 set.
1967 """
1965 """
1968 return matchany(ui, [spec], repo=repo, order=order)
1966 return matchany(ui, [spec], repo=repo, order=order)
1969
1967
1970 def matchany(ui, specs, repo=None, order=defineorder):
1968 def matchany(ui, specs, repo=None, order=defineorder):
1971 """Create a matcher that will include any revisions matching one of the
1969 """Create a matcher that will include any revisions matching one of the
1972 given specs
1970 given specs
1973
1971
1974 If order=followorder, a matcher takes the ordering specified by the input
1972 If order=followorder, a matcher takes the ordering specified by the input
1975 set.
1973 set.
1976 """
1974 """
1977 if not specs:
1975 if not specs:
1978 def mfunc(repo, subset=None):
1976 def mfunc(repo, subset=None):
1979 return baseset()
1977 return baseset()
1980 return mfunc
1978 return mfunc
1981 if not all(specs):
1979 if not all(specs):
1982 raise error.ParseError(_("empty query"))
1980 raise error.ParseError(_("empty query"))
1983 lookup = None
1981 lookup = None
1984 if repo:
1982 if repo:
1985 lookup = repo.__contains__
1983 lookup = repo.__contains__
1986 if len(specs) == 1:
1984 if len(specs) == 1:
1987 tree = revsetlang.parse(specs[0], lookup)
1985 tree = revsetlang.parse(specs[0], lookup)
1988 else:
1986 else:
1989 tree = ('or',
1987 tree = ('or',
1990 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
1988 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
1991
1989
1992 if ui:
1990 if ui:
1993 tree = revsetlang.expandaliases(ui, tree)
1991 tree = revsetlang.expandaliases(ui, tree)
1994 tree = revsetlang.foldconcat(tree)
1992 tree = revsetlang.foldconcat(tree)
1995 tree = revsetlang.analyze(tree, order)
1993 tree = revsetlang.analyze(tree, order)
1996 tree = revsetlang.optimize(tree)
1994 tree = revsetlang.optimize(tree)
1997 posttreebuilthook(tree, repo)
1995 posttreebuilthook(tree, repo)
1998 return makematcher(tree)
1996 return makematcher(tree)
1999
1997
2000 def makematcher(tree):
1998 def makematcher(tree):
2001 """Create a matcher from an evaluatable tree"""
1999 """Create a matcher from an evaluatable tree"""
2002 def mfunc(repo, subset=None):
2000 def mfunc(repo, subset=None):
2003 if subset is None:
2001 if subset is None:
2004 subset = fullreposet(repo)
2002 subset = fullreposet(repo)
2005 return getset(repo, subset, tree)
2003 return getset(repo, subset, tree)
2006 return mfunc
2004 return mfunc
2007
2005
2008 def loadpredicate(ui, extname, registrarobj):
2006 def loadpredicate(ui, extname, registrarobj):
2009 """Load revset predicates from specified registrarobj
2007 """Load revset predicates from specified registrarobj
2010 """
2008 """
2011 for name, func in registrarobj._table.iteritems():
2009 for name, func in registrarobj._table.iteritems():
2012 symbols[name] = func
2010 symbols[name] = func
2013 if func._safe:
2011 if func._safe:
2014 safesymbols.add(name)
2012 safesymbols.add(name)
2015
2013
2016 # load built-in predicates explicitly to setup safesymbols
2014 # load built-in predicates explicitly to setup safesymbols
2017 loadpredicate(None, None, predicate)
2015 loadpredicate(None, None, predicate)
2018
2016
2019 # tell hggettext to extract docstrings from these functions:
2017 # tell hggettext to extract docstrings from these functions:
2020 i18nfunctions = symbols.values()
2018 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now