##// END OF EJS Templates
obsolete: rename divergent volatile set into contentdivergent volatile set...
Boris Feld -
r33773:f3f06c26 default
parent child Browse files
Show More
@@ -1,2371 +1,2371 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 mdiff,
33 mdiff,
34 obsolete as obsmod,
34 obsolete as obsmod,
35 patch,
35 patch,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 repoview,
39 repoview,
40 revlog,
40 revlog,
41 scmutil,
41 scmutil,
42 sparse,
42 sparse,
43 subrepo,
43 subrepo,
44 util,
44 util,
45 )
45 )
46
46
47 propertycache = util.propertycache
47 propertycache = util.propertycache
48
48
49 nonascii = re.compile(r'[^\x21-\x7f]').search
49 nonascii = re.compile(r'[^\x21-\x7f]').search
50
50
51 class basectx(object):
51 class basectx(object):
52 """A basectx object represents the common logic for its children:
52 """A basectx object represents the common logic for its children:
53 changectx: read-only context that is already present in the repo,
53 changectx: read-only context that is already present in the repo,
54 workingctx: a context that represents the working directory and can
54 workingctx: a context that represents the working directory and can
55 be committed,
55 be committed,
56 memctx: a context that represents changes in-memory and can also
56 memctx: a context that represents changes in-memory and can also
57 be committed."""
57 be committed."""
58 def __new__(cls, repo, changeid='', *args, **kwargs):
58 def __new__(cls, repo, changeid='', *args, **kwargs):
59 if isinstance(changeid, basectx):
59 if isinstance(changeid, basectx):
60 return changeid
60 return changeid
61
61
62 o = super(basectx, cls).__new__(cls)
62 o = super(basectx, cls).__new__(cls)
63
63
64 o._repo = repo
64 o._repo = repo
65 o._rev = nullrev
65 o._rev = nullrev
66 o._node = nullid
66 o._node = nullid
67
67
68 return o
68 return o
69
69
70 def __bytes__(self):
70 def __bytes__(self):
71 return short(self.node())
71 return short(self.node())
72
72
73 __str__ = encoding.strmethod(__bytes__)
73 __str__ = encoding.strmethod(__bytes__)
74
74
75 def __int__(self):
75 def __int__(self):
76 return self.rev()
76 return self.rev()
77
77
78 def __repr__(self):
78 def __repr__(self):
79 return r"<%s %s>" % (type(self).__name__, str(self))
79 return r"<%s %s>" % (type(self).__name__, str(self))
80
80
81 def __eq__(self, other):
81 def __eq__(self, other):
82 try:
82 try:
83 return type(self) == type(other) and self._rev == other._rev
83 return type(self) == type(other) and self._rev == other._rev
84 except AttributeError:
84 except AttributeError:
85 return False
85 return False
86
86
87 def __ne__(self, other):
87 def __ne__(self, other):
88 return not (self == other)
88 return not (self == other)
89
89
90 def __contains__(self, key):
90 def __contains__(self, key):
91 return key in self._manifest
91 return key in self._manifest
92
92
93 def __getitem__(self, key):
93 def __getitem__(self, key):
94 return self.filectx(key)
94 return self.filectx(key)
95
95
96 def __iter__(self):
96 def __iter__(self):
97 return iter(self._manifest)
97 return iter(self._manifest)
98
98
99 def _buildstatusmanifest(self, status):
99 def _buildstatusmanifest(self, status):
100 """Builds a manifest that includes the given status results, if this is
100 """Builds a manifest that includes the given status results, if this is
101 a working copy context. For non-working copy contexts, it just returns
101 a working copy context. For non-working copy contexts, it just returns
102 the normal manifest."""
102 the normal manifest."""
103 return self.manifest()
103 return self.manifest()
104
104
105 def _matchstatus(self, other, match):
105 def _matchstatus(self, other, match):
106 """return match.always if match is none
106 """return match.always if match is none
107
107
108 This internal method provides a way for child objects to override the
108 This internal method provides a way for child objects to override the
109 match operator.
109 match operator.
110 """
110 """
111 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111 return match or matchmod.always(self._repo.root, self._repo.getcwd())
112
112
113 def _buildstatus(self, other, s, match, listignored, listclean,
113 def _buildstatus(self, other, s, match, listignored, listclean,
114 listunknown):
114 listunknown):
115 """build a status with respect to another context"""
115 """build a status with respect to another context"""
116 # Load earliest manifest first for caching reasons. More specifically,
116 # Load earliest manifest first for caching reasons. More specifically,
117 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 # if you have revisions 1000 and 1001, 1001 is probably stored as a
118 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
119 # 1000 and cache it so that when you read 1001, we just need to apply a
119 # 1000 and cache it so that when you read 1001, we just need to apply a
120 # delta to what's in the cache. So that's one full reconstruction + one
120 # delta to what's in the cache. So that's one full reconstruction + one
121 # delta application.
121 # delta application.
122 mf2 = None
122 mf2 = None
123 if self.rev() is not None and self.rev() < other.rev():
123 if self.rev() is not None and self.rev() < other.rev():
124 mf2 = self._buildstatusmanifest(s)
124 mf2 = self._buildstatusmanifest(s)
125 mf1 = other._buildstatusmanifest(s)
125 mf1 = other._buildstatusmanifest(s)
126 if mf2 is None:
126 if mf2 is None:
127 mf2 = self._buildstatusmanifest(s)
127 mf2 = self._buildstatusmanifest(s)
128
128
129 modified, added = [], []
129 modified, added = [], []
130 removed = []
130 removed = []
131 clean = []
131 clean = []
132 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
132 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
133 deletedset = set(deleted)
133 deletedset = set(deleted)
134 d = mf1.diff(mf2, match=match, clean=listclean)
134 d = mf1.diff(mf2, match=match, clean=listclean)
135 for fn, value in d.iteritems():
135 for fn, value in d.iteritems():
136 if fn in deletedset:
136 if fn in deletedset:
137 continue
137 continue
138 if value is None:
138 if value is None:
139 clean.append(fn)
139 clean.append(fn)
140 continue
140 continue
141 (node1, flag1), (node2, flag2) = value
141 (node1, flag1), (node2, flag2) = value
142 if node1 is None:
142 if node1 is None:
143 added.append(fn)
143 added.append(fn)
144 elif node2 is None:
144 elif node2 is None:
145 removed.append(fn)
145 removed.append(fn)
146 elif flag1 != flag2:
146 elif flag1 != flag2:
147 modified.append(fn)
147 modified.append(fn)
148 elif node2 not in wdirnodes:
148 elif node2 not in wdirnodes:
149 # When comparing files between two commits, we save time by
149 # When comparing files between two commits, we save time by
150 # not comparing the file contents when the nodeids differ.
150 # not comparing the file contents when the nodeids differ.
151 # Note that this means we incorrectly report a reverted change
151 # Note that this means we incorrectly report a reverted change
152 # to a file as a modification.
152 # to a file as a modification.
153 modified.append(fn)
153 modified.append(fn)
154 elif self[fn].cmp(other[fn]):
154 elif self[fn].cmp(other[fn]):
155 modified.append(fn)
155 modified.append(fn)
156 else:
156 else:
157 clean.append(fn)
157 clean.append(fn)
158
158
159 if removed:
159 if removed:
160 # need to filter files if they are already reported as removed
160 # need to filter files if they are already reported as removed
161 unknown = [fn for fn in unknown if fn not in mf1 and
161 unknown = [fn for fn in unknown if fn not in mf1 and
162 (not match or match(fn))]
162 (not match or match(fn))]
163 ignored = [fn for fn in ignored if fn not in mf1 and
163 ignored = [fn for fn in ignored if fn not in mf1 and
164 (not match or match(fn))]
164 (not match or match(fn))]
165 # if they're deleted, don't report them as removed
165 # if they're deleted, don't report them as removed
166 removed = [fn for fn in removed if fn not in deletedset]
166 removed = [fn for fn in removed if fn not in deletedset]
167
167
168 return scmutil.status(modified, added, removed, deleted, unknown,
168 return scmutil.status(modified, added, removed, deleted, unknown,
169 ignored, clean)
169 ignored, clean)
170
170
171 @propertycache
171 @propertycache
172 def substate(self):
172 def substate(self):
173 return subrepo.state(self, self._repo.ui)
173 return subrepo.state(self, self._repo.ui)
174
174
175 def subrev(self, subpath):
175 def subrev(self, subpath):
176 return self.substate[subpath][1]
176 return self.substate[subpath][1]
177
177
178 def rev(self):
178 def rev(self):
179 return self._rev
179 return self._rev
180 def node(self):
180 def node(self):
181 return self._node
181 return self._node
182 def hex(self):
182 def hex(self):
183 return hex(self.node())
183 return hex(self.node())
184 def manifest(self):
184 def manifest(self):
185 return self._manifest
185 return self._manifest
186 def manifestctx(self):
186 def manifestctx(self):
187 return self._manifestctx
187 return self._manifestctx
188 def repo(self):
188 def repo(self):
189 return self._repo
189 return self._repo
190 def phasestr(self):
190 def phasestr(self):
191 return phases.phasenames[self.phase()]
191 return phases.phasenames[self.phase()]
192 def mutable(self):
192 def mutable(self):
193 return self.phase() > phases.public
193 return self.phase() > phases.public
194
194
195 def getfileset(self, expr):
195 def getfileset(self, expr):
196 return fileset.getfileset(self, expr)
196 return fileset.getfileset(self, expr)
197
197
198 def obsolete(self):
198 def obsolete(self):
199 """True if the changeset is obsolete"""
199 """True if the changeset is obsolete"""
200 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
200 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
201
201
202 def extinct(self):
202 def extinct(self):
203 """True if the changeset is extinct"""
203 """True if the changeset is extinct"""
204 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
204 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
205
205
206 def unstable(self):
206 def unstable(self):
207 msg = ("'context.unstable' is deprecated, "
207 msg = ("'context.unstable' is deprecated, "
208 "use 'context.orphan'")
208 "use 'context.orphan'")
209 self._repo.ui.deprecwarn(msg, '4.4')
209 self._repo.ui.deprecwarn(msg, '4.4')
210 return self.orphan()
210 return self.orphan()
211
211
212 def orphan(self):
212 def orphan(self):
213 """True if the changeset is not obsolete but it's ancestor are"""
213 """True if the changeset is not obsolete but it's ancestor are"""
214 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
214 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
215
215
216 def bumped(self):
216 def bumped(self):
217 msg = ("'context.bumped' is deprecated, "
217 msg = ("'context.bumped' is deprecated, "
218 "use 'context.phasedivergent'")
218 "use 'context.phasedivergent'")
219 self._repo.ui.deprecwarn(msg, '4.4')
219 self._repo.ui.deprecwarn(msg, '4.4')
220 return self.phasedivergent()
220 return self.phasedivergent()
221
221
222 def phasedivergent(self):
222 def phasedivergent(self):
223 """True if the changeset try to be a successor of a public changeset
223 """True if the changeset try to be a successor of a public changeset
224
224
225 Only non-public and non-obsolete changesets may be bumped.
225 Only non-public and non-obsolete changesets may be bumped.
226 """
226 """
227 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
227 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
228
228
229 def divergent(self):
229 def divergent(self):
230 msg = ("'context.divergent' is deprecated, "
230 msg = ("'context.divergent' is deprecated, "
231 "use 'context.contentdivergent'")
231 "use 'context.contentdivergent'")
232 self._repo.ui.deprecwarn(msg, '4.4')
232 self._repo.ui.deprecwarn(msg, '4.4')
233 return self.contentdivergent()
233 return self.contentdivergent()
234
234
235 def contentdivergent(self):
235 def contentdivergent(self):
236 """Is a successors of a changeset with multiple possible successors set
236 """Is a successors of a changeset with multiple possible successors set
237
237
238 Only non-public and non-obsolete changesets may be divergent.
238 Only non-public and non-obsolete changesets may be divergent.
239 """
239 """
240 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
240 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
241
241
242 def troubled(self):
242 def troubled(self):
243 msg = ("'context.troubled' is deprecated, "
243 msg = ("'context.troubled' is deprecated, "
244 "use 'context.isunstable'")
244 "use 'context.isunstable'")
245 self._repo.ui.deprecwarn(msg, '4.4')
245 self._repo.ui.deprecwarn(msg, '4.4')
246 return self.unstable()
246 return self.unstable()
247
247
248 def isunstable(self):
248 def isunstable(self):
249 """True if the changeset is either unstable, bumped or divergent"""
249 """True if the changeset is either unstable, bumped or divergent"""
250 return self.orphan() or self.phasedivergent() or self.contentdivergent()
250 return self.orphan() or self.phasedivergent() or self.contentdivergent()
251
251
252 def troubles(self):
252 def troubles(self):
253 """Keep the old version around in order to avoid breaking extensions
253 """Keep the old version around in order to avoid breaking extensions
254 about different return values.
254 about different return values.
255 """
255 """
256 msg = ("'context.troubles' is deprecated, "
256 msg = ("'context.troubles' is deprecated, "
257 "use 'context.instabilities'")
257 "use 'context.instabilities'")
258 self._repo.ui.deprecwarn(msg, '4.4')
258 self._repo.ui.deprecwarn(msg, '4.4')
259
259
260 troubles = []
260 troubles = []
261 if self.orphan():
261 if self.orphan():
262 troubles.append('orphan')
262 troubles.append('orphan')
263 if self.phasedivergent():
263 if self.phasedivergent():
264 troubles.append('bumped')
264 troubles.append('bumped')
265 if self.contentdivergent():
265 if self.contentdivergent():
266 troubles.append('divergent')
266 troubles.append('divergent')
267 return troubles
267 return troubles
268
268
269 def instabilities(self):
269 def instabilities(self):
270 """return the list of instabilities affecting this changeset.
270 """return the list of instabilities affecting this changeset.
271
271
272 Instabilities are returned as strings. possible values are:
272 Instabilities are returned as strings. possible values are:
273 - orphan,
273 - orphan,
274 - phase-divergent,
274 - phase-divergent,
275 - content-divergent.
275 - content-divergent.
276 """
276 """
277 instabilities = []
277 instabilities = []
278 if self.orphan():
278 if self.orphan():
279 instabilities.append('orphan')
279 instabilities.append('orphan')
280 if self.phasedivergent():
280 if self.phasedivergent():
281 instabilities.append('phase-divergent')
281 instabilities.append('phase-divergent')
282 if self.contentdivergent():
282 if self.contentdivergent():
283 instabilities.append('content-divergent')
283 instabilities.append('content-divergent')
284 return instabilities
284 return instabilities
285
285
286 def parents(self):
286 def parents(self):
287 """return contexts for each parent changeset"""
287 """return contexts for each parent changeset"""
288 return self._parents
288 return self._parents
289
289
290 def p1(self):
290 def p1(self):
291 return self._parents[0]
291 return self._parents[0]
292
292
293 def p2(self):
293 def p2(self):
294 parents = self._parents
294 parents = self._parents
295 if len(parents) == 2:
295 if len(parents) == 2:
296 return parents[1]
296 return parents[1]
297 return changectx(self._repo, nullrev)
297 return changectx(self._repo, nullrev)
298
298
299 def _fileinfo(self, path):
299 def _fileinfo(self, path):
300 if r'_manifest' in self.__dict__:
300 if r'_manifest' in self.__dict__:
301 try:
301 try:
302 return self._manifest[path], self._manifest.flags(path)
302 return self._manifest[path], self._manifest.flags(path)
303 except KeyError:
303 except KeyError:
304 raise error.ManifestLookupError(self._node, path,
304 raise error.ManifestLookupError(self._node, path,
305 _('not found in manifest'))
305 _('not found in manifest'))
306 if r'_manifestdelta' in self.__dict__ or path in self.files():
306 if r'_manifestdelta' in self.__dict__ or path in self.files():
307 if path in self._manifestdelta:
307 if path in self._manifestdelta:
308 return (self._manifestdelta[path],
308 return (self._manifestdelta[path],
309 self._manifestdelta.flags(path))
309 self._manifestdelta.flags(path))
310 mfl = self._repo.manifestlog
310 mfl = self._repo.manifestlog
311 try:
311 try:
312 node, flag = mfl[self._changeset.manifest].find(path)
312 node, flag = mfl[self._changeset.manifest].find(path)
313 except KeyError:
313 except KeyError:
314 raise error.ManifestLookupError(self._node, path,
314 raise error.ManifestLookupError(self._node, path,
315 _('not found in manifest'))
315 _('not found in manifest'))
316
316
317 return node, flag
317 return node, flag
318
318
319 def filenode(self, path):
319 def filenode(self, path):
320 return self._fileinfo(path)[0]
320 return self._fileinfo(path)[0]
321
321
322 def flags(self, path):
322 def flags(self, path):
323 try:
323 try:
324 return self._fileinfo(path)[1]
324 return self._fileinfo(path)[1]
325 except error.LookupError:
325 except error.LookupError:
326 return ''
326 return ''
327
327
328 def sub(self, path, allowcreate=True):
328 def sub(self, path, allowcreate=True):
329 '''return a subrepo for the stored revision of path, never wdir()'''
329 '''return a subrepo for the stored revision of path, never wdir()'''
330 return subrepo.subrepo(self, path, allowcreate=allowcreate)
330 return subrepo.subrepo(self, path, allowcreate=allowcreate)
331
331
332 def nullsub(self, path, pctx):
332 def nullsub(self, path, pctx):
333 return subrepo.nullsubrepo(self, path, pctx)
333 return subrepo.nullsubrepo(self, path, pctx)
334
334
335 def workingsub(self, path):
335 def workingsub(self, path):
336 '''return a subrepo for the stored revision, or wdir if this is a wdir
336 '''return a subrepo for the stored revision, or wdir if this is a wdir
337 context.
337 context.
338 '''
338 '''
339 return subrepo.subrepo(self, path, allowwdir=True)
339 return subrepo.subrepo(self, path, allowwdir=True)
340
340
341 def match(self, pats=None, include=None, exclude=None, default='glob',
341 def match(self, pats=None, include=None, exclude=None, default='glob',
342 listsubrepos=False, badfn=None):
342 listsubrepos=False, badfn=None):
343 r = self._repo
343 r = self._repo
344 return matchmod.match(r.root, r.getcwd(), pats,
344 return matchmod.match(r.root, r.getcwd(), pats,
345 include, exclude, default,
345 include, exclude, default,
346 auditor=r.nofsauditor, ctx=self,
346 auditor=r.nofsauditor, ctx=self,
347 listsubrepos=listsubrepos, badfn=badfn)
347 listsubrepos=listsubrepos, badfn=badfn)
348
348
349 def diff(self, ctx2=None, match=None, **opts):
349 def diff(self, ctx2=None, match=None, **opts):
350 """Returns a diff generator for the given contexts and matcher"""
350 """Returns a diff generator for the given contexts and matcher"""
351 if ctx2 is None:
351 if ctx2 is None:
352 ctx2 = self.p1()
352 ctx2 = self.p1()
353 if ctx2 is not None:
353 if ctx2 is not None:
354 ctx2 = self._repo[ctx2]
354 ctx2 = self._repo[ctx2]
355 diffopts = patch.diffopts(self._repo.ui, opts)
355 diffopts = patch.diffopts(self._repo.ui, opts)
356 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
356 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
357
357
358 def dirs(self):
358 def dirs(self):
359 return self._manifest.dirs()
359 return self._manifest.dirs()
360
360
361 def hasdir(self, dir):
361 def hasdir(self, dir):
362 return self._manifest.hasdir(dir)
362 return self._manifest.hasdir(dir)
363
363
364 def status(self, other=None, match=None, listignored=False,
364 def status(self, other=None, match=None, listignored=False,
365 listclean=False, listunknown=False, listsubrepos=False):
365 listclean=False, listunknown=False, listsubrepos=False):
366 """return status of files between two nodes or node and working
366 """return status of files between two nodes or node and working
367 directory.
367 directory.
368
368
369 If other is None, compare this node with working directory.
369 If other is None, compare this node with working directory.
370
370
371 returns (modified, added, removed, deleted, unknown, ignored, clean)
371 returns (modified, added, removed, deleted, unknown, ignored, clean)
372 """
372 """
373
373
374 ctx1 = self
374 ctx1 = self
375 ctx2 = self._repo[other]
375 ctx2 = self._repo[other]
376
376
377 # This next code block is, admittedly, fragile logic that tests for
377 # This next code block is, admittedly, fragile logic that tests for
378 # reversing the contexts and wouldn't need to exist if it weren't for
378 # reversing the contexts and wouldn't need to exist if it weren't for
379 # the fast (and common) code path of comparing the working directory
379 # the fast (and common) code path of comparing the working directory
380 # with its first parent.
380 # with its first parent.
381 #
381 #
382 # What we're aiming for here is the ability to call:
382 # What we're aiming for here is the ability to call:
383 #
383 #
384 # workingctx.status(parentctx)
384 # workingctx.status(parentctx)
385 #
385 #
386 # If we always built the manifest for each context and compared those,
386 # If we always built the manifest for each context and compared those,
387 # then we'd be done. But the special case of the above call means we
387 # then we'd be done. But the special case of the above call means we
388 # just copy the manifest of the parent.
388 # just copy the manifest of the parent.
389 reversed = False
389 reversed = False
390 if (not isinstance(ctx1, changectx)
390 if (not isinstance(ctx1, changectx)
391 and isinstance(ctx2, changectx)):
391 and isinstance(ctx2, changectx)):
392 reversed = True
392 reversed = True
393 ctx1, ctx2 = ctx2, ctx1
393 ctx1, ctx2 = ctx2, ctx1
394
394
395 match = ctx2._matchstatus(ctx1, match)
395 match = ctx2._matchstatus(ctx1, match)
396 r = scmutil.status([], [], [], [], [], [], [])
396 r = scmutil.status([], [], [], [], [], [], [])
397 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
397 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
398 listunknown)
398 listunknown)
399
399
400 if reversed:
400 if reversed:
401 # Reverse added and removed. Clear deleted, unknown and ignored as
401 # Reverse added and removed. Clear deleted, unknown and ignored as
402 # these make no sense to reverse.
402 # these make no sense to reverse.
403 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
403 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
404 r.clean)
404 r.clean)
405
405
406 if listsubrepos:
406 if listsubrepos:
407 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
407 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
408 try:
408 try:
409 rev2 = ctx2.subrev(subpath)
409 rev2 = ctx2.subrev(subpath)
410 except KeyError:
410 except KeyError:
411 # A subrepo that existed in node1 was deleted between
411 # A subrepo that existed in node1 was deleted between
412 # node1 and node2 (inclusive). Thus, ctx2's substate
412 # node1 and node2 (inclusive). Thus, ctx2's substate
413 # won't contain that subpath. The best we can do ignore it.
413 # won't contain that subpath. The best we can do ignore it.
414 rev2 = None
414 rev2 = None
415 submatch = matchmod.subdirmatcher(subpath, match)
415 submatch = matchmod.subdirmatcher(subpath, match)
416 s = sub.status(rev2, match=submatch, ignored=listignored,
416 s = sub.status(rev2, match=submatch, ignored=listignored,
417 clean=listclean, unknown=listunknown,
417 clean=listclean, unknown=listunknown,
418 listsubrepos=True)
418 listsubrepos=True)
419 for rfiles, sfiles in zip(r, s):
419 for rfiles, sfiles in zip(r, s):
420 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
420 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
421
421
422 for l in r:
422 for l in r:
423 l.sort()
423 l.sort()
424
424
425 return r
425 return r
426
426
427 def _filterederror(repo, changeid):
427 def _filterederror(repo, changeid):
428 """build an exception to be raised about a filtered changeid
428 """build an exception to be raised about a filtered changeid
429
429
430 This is extracted in a function to help extensions (eg: evolve) to
430 This is extracted in a function to help extensions (eg: evolve) to
431 experiment with various message variants."""
431 experiment with various message variants."""
432 if repo.filtername.startswith('visible'):
432 if repo.filtername.startswith('visible'):
433 msg = _("hidden revision '%s'") % changeid
433 msg = _("hidden revision '%s'") % changeid
434 hint = _('use --hidden to access hidden revisions')
434 hint = _('use --hidden to access hidden revisions')
435 return error.FilteredRepoLookupError(msg, hint=hint)
435 return error.FilteredRepoLookupError(msg, hint=hint)
436 msg = _("filtered revision '%s' (not in '%s' subset)")
436 msg = _("filtered revision '%s' (not in '%s' subset)")
437 msg %= (changeid, repo.filtername)
437 msg %= (changeid, repo.filtername)
438 return error.FilteredRepoLookupError(msg)
438 return error.FilteredRepoLookupError(msg)
439
439
440 class changectx(basectx):
440 class changectx(basectx):
441 """A changecontext object makes access to data related to a particular
441 """A changecontext object makes access to data related to a particular
442 changeset convenient. It represents a read-only context already present in
442 changeset convenient. It represents a read-only context already present in
443 the repo."""
443 the repo."""
444 def __init__(self, repo, changeid=''):
444 def __init__(self, repo, changeid=''):
445 """changeid is a revision number, node, or tag"""
445 """changeid is a revision number, node, or tag"""
446
446
447 # since basectx.__new__ already took care of copying the object, we
447 # since basectx.__new__ already took care of copying the object, we
448 # don't need to do anything in __init__, so we just exit here
448 # don't need to do anything in __init__, so we just exit here
449 if isinstance(changeid, basectx):
449 if isinstance(changeid, basectx):
450 return
450 return
451
451
452 if changeid == '':
452 if changeid == '':
453 changeid = '.'
453 changeid = '.'
454 self._repo = repo
454 self._repo = repo
455
455
456 try:
456 try:
457 if isinstance(changeid, int):
457 if isinstance(changeid, int):
458 self._node = repo.changelog.node(changeid)
458 self._node = repo.changelog.node(changeid)
459 self._rev = changeid
459 self._rev = changeid
460 return
460 return
461 if not pycompat.ispy3 and isinstance(changeid, long):
461 if not pycompat.ispy3 and isinstance(changeid, long):
462 changeid = str(changeid)
462 changeid = str(changeid)
463 if changeid == 'null':
463 if changeid == 'null':
464 self._node = nullid
464 self._node = nullid
465 self._rev = nullrev
465 self._rev = nullrev
466 return
466 return
467 if changeid == 'tip':
467 if changeid == 'tip':
468 self._node = repo.changelog.tip()
468 self._node = repo.changelog.tip()
469 self._rev = repo.changelog.rev(self._node)
469 self._rev = repo.changelog.rev(self._node)
470 return
470 return
471 if changeid == '.' or changeid == repo.dirstate.p1():
471 if changeid == '.' or changeid == repo.dirstate.p1():
472 # this is a hack to delay/avoid loading obsmarkers
472 # this is a hack to delay/avoid loading obsmarkers
473 # when we know that '.' won't be hidden
473 # when we know that '.' won't be hidden
474 self._node = repo.dirstate.p1()
474 self._node = repo.dirstate.p1()
475 self._rev = repo.unfiltered().changelog.rev(self._node)
475 self._rev = repo.unfiltered().changelog.rev(self._node)
476 return
476 return
477 if len(changeid) == 20:
477 if len(changeid) == 20:
478 try:
478 try:
479 self._node = changeid
479 self._node = changeid
480 self._rev = repo.changelog.rev(changeid)
480 self._rev = repo.changelog.rev(changeid)
481 return
481 return
482 except error.FilteredRepoLookupError:
482 except error.FilteredRepoLookupError:
483 raise
483 raise
484 except LookupError:
484 except LookupError:
485 pass
485 pass
486
486
487 try:
487 try:
488 r = int(changeid)
488 r = int(changeid)
489 if '%d' % r != changeid:
489 if '%d' % r != changeid:
490 raise ValueError
490 raise ValueError
491 l = len(repo.changelog)
491 l = len(repo.changelog)
492 if r < 0:
492 if r < 0:
493 r += l
493 r += l
494 if r < 0 or r >= l and r != wdirrev:
494 if r < 0 or r >= l and r != wdirrev:
495 raise ValueError
495 raise ValueError
496 self._rev = r
496 self._rev = r
497 self._node = repo.changelog.node(r)
497 self._node = repo.changelog.node(r)
498 return
498 return
499 except error.FilteredIndexError:
499 except error.FilteredIndexError:
500 raise
500 raise
501 except (ValueError, OverflowError, IndexError):
501 except (ValueError, OverflowError, IndexError):
502 pass
502 pass
503
503
504 if len(changeid) == 40:
504 if len(changeid) == 40:
505 try:
505 try:
506 self._node = bin(changeid)
506 self._node = bin(changeid)
507 self._rev = repo.changelog.rev(self._node)
507 self._rev = repo.changelog.rev(self._node)
508 return
508 return
509 except error.FilteredLookupError:
509 except error.FilteredLookupError:
510 raise
510 raise
511 except (TypeError, LookupError):
511 except (TypeError, LookupError):
512 pass
512 pass
513
513
514 # lookup bookmarks through the name interface
514 # lookup bookmarks through the name interface
515 try:
515 try:
516 self._node = repo.names.singlenode(repo, changeid)
516 self._node = repo.names.singlenode(repo, changeid)
517 self._rev = repo.changelog.rev(self._node)
517 self._rev = repo.changelog.rev(self._node)
518 return
518 return
519 except KeyError:
519 except KeyError:
520 pass
520 pass
521 except error.FilteredRepoLookupError:
521 except error.FilteredRepoLookupError:
522 raise
522 raise
523 except error.RepoLookupError:
523 except error.RepoLookupError:
524 pass
524 pass
525
525
526 self._node = repo.unfiltered().changelog._partialmatch(changeid)
526 self._node = repo.unfiltered().changelog._partialmatch(changeid)
527 if self._node is not None:
527 if self._node is not None:
528 self._rev = repo.changelog.rev(self._node)
528 self._rev = repo.changelog.rev(self._node)
529 return
529 return
530
530
531 # lookup failed
531 # lookup failed
532 # check if it might have come from damaged dirstate
532 # check if it might have come from damaged dirstate
533 #
533 #
534 # XXX we could avoid the unfiltered if we had a recognizable
534 # XXX we could avoid the unfiltered if we had a recognizable
535 # exception for filtered changeset access
535 # exception for filtered changeset access
536 if changeid in repo.unfiltered().dirstate.parents():
536 if changeid in repo.unfiltered().dirstate.parents():
537 msg = _("working directory has unknown parent '%s'!")
537 msg = _("working directory has unknown parent '%s'!")
538 raise error.Abort(msg % short(changeid))
538 raise error.Abort(msg % short(changeid))
539 try:
539 try:
540 if len(changeid) == 20 and nonascii(changeid):
540 if len(changeid) == 20 and nonascii(changeid):
541 changeid = hex(changeid)
541 changeid = hex(changeid)
542 except TypeError:
542 except TypeError:
543 pass
543 pass
544 except (error.FilteredIndexError, error.FilteredLookupError,
544 except (error.FilteredIndexError, error.FilteredLookupError,
545 error.FilteredRepoLookupError):
545 error.FilteredRepoLookupError):
546 raise _filterederror(repo, changeid)
546 raise _filterederror(repo, changeid)
547 except IndexError:
547 except IndexError:
548 pass
548 pass
549 raise error.RepoLookupError(
549 raise error.RepoLookupError(
550 _("unknown revision '%s'") % changeid)
550 _("unknown revision '%s'") % changeid)
551
551
552 def __hash__(self):
552 def __hash__(self):
553 try:
553 try:
554 return hash(self._rev)
554 return hash(self._rev)
555 except AttributeError:
555 except AttributeError:
556 return id(self)
556 return id(self)
557
557
558 def __nonzero__(self):
558 def __nonzero__(self):
559 return self._rev != nullrev
559 return self._rev != nullrev
560
560
561 __bool__ = __nonzero__
561 __bool__ = __nonzero__
562
562
563 @propertycache
563 @propertycache
564 def _changeset(self):
564 def _changeset(self):
565 return self._repo.changelog.changelogrevision(self.rev())
565 return self._repo.changelog.changelogrevision(self.rev())
566
566
567 @propertycache
567 @propertycache
568 def _manifest(self):
568 def _manifest(self):
569 return self._manifestctx.read()
569 return self._manifestctx.read()
570
570
571 @property
571 @property
572 def _manifestctx(self):
572 def _manifestctx(self):
573 return self._repo.manifestlog[self._changeset.manifest]
573 return self._repo.manifestlog[self._changeset.manifest]
574
574
575 @propertycache
575 @propertycache
576 def _manifestdelta(self):
576 def _manifestdelta(self):
577 return self._manifestctx.readdelta()
577 return self._manifestctx.readdelta()
578
578
579 @propertycache
579 @propertycache
580 def _parents(self):
580 def _parents(self):
581 repo = self._repo
581 repo = self._repo
582 p1, p2 = repo.changelog.parentrevs(self._rev)
582 p1, p2 = repo.changelog.parentrevs(self._rev)
583 if p2 == nullrev:
583 if p2 == nullrev:
584 return [changectx(repo, p1)]
584 return [changectx(repo, p1)]
585 return [changectx(repo, p1), changectx(repo, p2)]
585 return [changectx(repo, p1), changectx(repo, p2)]
586
586
587 def changeset(self):
587 def changeset(self):
588 c = self._changeset
588 c = self._changeset
589 return (
589 return (
590 c.manifest,
590 c.manifest,
591 c.user,
591 c.user,
592 c.date,
592 c.date,
593 c.files,
593 c.files,
594 c.description,
594 c.description,
595 c.extra,
595 c.extra,
596 )
596 )
597 def manifestnode(self):
597 def manifestnode(self):
598 return self._changeset.manifest
598 return self._changeset.manifest
599
599
600 def user(self):
600 def user(self):
601 return self._changeset.user
601 return self._changeset.user
602 def date(self):
602 def date(self):
603 return self._changeset.date
603 return self._changeset.date
604 def files(self):
604 def files(self):
605 return self._changeset.files
605 return self._changeset.files
606 def description(self):
606 def description(self):
607 return self._changeset.description
607 return self._changeset.description
608 def branch(self):
608 def branch(self):
609 return encoding.tolocal(self._changeset.extra.get("branch"))
609 return encoding.tolocal(self._changeset.extra.get("branch"))
610 def closesbranch(self):
610 def closesbranch(self):
611 return 'close' in self._changeset.extra
611 return 'close' in self._changeset.extra
612 def extra(self):
612 def extra(self):
613 return self._changeset.extra
613 return self._changeset.extra
614 def tags(self):
614 def tags(self):
615 return self._repo.nodetags(self._node)
615 return self._repo.nodetags(self._node)
616 def bookmarks(self):
616 def bookmarks(self):
617 return self._repo.nodebookmarks(self._node)
617 return self._repo.nodebookmarks(self._node)
618 def phase(self):
618 def phase(self):
619 return self._repo._phasecache.phase(self._repo, self._rev)
619 return self._repo._phasecache.phase(self._repo, self._rev)
620 def hidden(self):
620 def hidden(self):
621 return self._rev in repoview.filterrevs(self._repo, 'visible')
621 return self._rev in repoview.filterrevs(self._repo, 'visible')
622
622
623 def children(self):
623 def children(self):
624 """return contexts for each child changeset"""
624 """return contexts for each child changeset"""
625 c = self._repo.changelog.children(self._node)
625 c = self._repo.changelog.children(self._node)
626 return [changectx(self._repo, x) for x in c]
626 return [changectx(self._repo, x) for x in c]
627
627
628 def ancestors(self):
628 def ancestors(self):
629 for a in self._repo.changelog.ancestors([self._rev]):
629 for a in self._repo.changelog.ancestors([self._rev]):
630 yield changectx(self._repo, a)
630 yield changectx(self._repo, a)
631
631
632 def descendants(self):
632 def descendants(self):
633 for d in self._repo.changelog.descendants([self._rev]):
633 for d in self._repo.changelog.descendants([self._rev]):
634 yield changectx(self._repo, d)
634 yield changectx(self._repo, d)
635
635
636 def filectx(self, path, fileid=None, filelog=None):
636 def filectx(self, path, fileid=None, filelog=None):
637 """get a file context from this changeset"""
637 """get a file context from this changeset"""
638 if fileid is None:
638 if fileid is None:
639 fileid = self.filenode(path)
639 fileid = self.filenode(path)
640 return filectx(self._repo, path, fileid=fileid,
640 return filectx(self._repo, path, fileid=fileid,
641 changectx=self, filelog=filelog)
641 changectx=self, filelog=filelog)
642
642
643 def ancestor(self, c2, warn=False):
643 def ancestor(self, c2, warn=False):
644 """return the "best" ancestor context of self and c2
644 """return the "best" ancestor context of self and c2
645
645
646 If there are multiple candidates, it will show a message and check
646 If there are multiple candidates, it will show a message and check
647 merge.preferancestor configuration before falling back to the
647 merge.preferancestor configuration before falling back to the
648 revlog ancestor."""
648 revlog ancestor."""
649 # deal with workingctxs
649 # deal with workingctxs
650 n2 = c2._node
650 n2 = c2._node
651 if n2 is None:
651 if n2 is None:
652 n2 = c2._parents[0]._node
652 n2 = c2._parents[0]._node
653 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
653 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
654 if not cahs:
654 if not cahs:
655 anc = nullid
655 anc = nullid
656 elif len(cahs) == 1:
656 elif len(cahs) == 1:
657 anc = cahs[0]
657 anc = cahs[0]
658 else:
658 else:
659 # experimental config: merge.preferancestor
659 # experimental config: merge.preferancestor
660 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
660 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
661 try:
661 try:
662 ctx = changectx(self._repo, r)
662 ctx = changectx(self._repo, r)
663 except error.RepoLookupError:
663 except error.RepoLookupError:
664 continue
664 continue
665 anc = ctx.node()
665 anc = ctx.node()
666 if anc in cahs:
666 if anc in cahs:
667 break
667 break
668 else:
668 else:
669 anc = self._repo.changelog.ancestor(self._node, n2)
669 anc = self._repo.changelog.ancestor(self._node, n2)
670 if warn:
670 if warn:
671 self._repo.ui.status(
671 self._repo.ui.status(
672 (_("note: using %s as ancestor of %s and %s\n") %
672 (_("note: using %s as ancestor of %s and %s\n") %
673 (short(anc), short(self._node), short(n2))) +
673 (short(anc), short(self._node), short(n2))) +
674 ''.join(_(" alternatively, use --config "
674 ''.join(_(" alternatively, use --config "
675 "merge.preferancestor=%s\n") %
675 "merge.preferancestor=%s\n") %
676 short(n) for n in sorted(cahs) if n != anc))
676 short(n) for n in sorted(cahs) if n != anc))
677 return changectx(self._repo, anc)
677 return changectx(self._repo, anc)
678
678
679 def descendant(self, other):
679 def descendant(self, other):
680 """True if other is descendant of this changeset"""
680 """True if other is descendant of this changeset"""
681 return self._repo.changelog.descendant(self._rev, other._rev)
681 return self._repo.changelog.descendant(self._rev, other._rev)
682
682
683 def walk(self, match):
683 def walk(self, match):
684 '''Generates matching file names.'''
684 '''Generates matching file names.'''
685
685
686 # Wrap match.bad method to have message with nodeid
686 # Wrap match.bad method to have message with nodeid
687 def bad(fn, msg):
687 def bad(fn, msg):
688 # The manifest doesn't know about subrepos, so don't complain about
688 # The manifest doesn't know about subrepos, so don't complain about
689 # paths into valid subrepos.
689 # paths into valid subrepos.
690 if any(fn == s or fn.startswith(s + '/')
690 if any(fn == s or fn.startswith(s + '/')
691 for s in self.substate):
691 for s in self.substate):
692 return
692 return
693 match.bad(fn, _('no such file in rev %s') % self)
693 match.bad(fn, _('no such file in rev %s') % self)
694
694
695 m = matchmod.badmatch(match, bad)
695 m = matchmod.badmatch(match, bad)
696 return self._manifest.walk(m)
696 return self._manifest.walk(m)
697
697
698 def matches(self, match):
698 def matches(self, match):
699 return self.walk(match)
699 return self.walk(match)
700
700
701 class basefilectx(object):
701 class basefilectx(object):
702 """A filecontext object represents the common logic for its children:
702 """A filecontext object represents the common logic for its children:
703 filectx: read-only access to a filerevision that is already present
703 filectx: read-only access to a filerevision that is already present
704 in the repo,
704 in the repo,
705 workingfilectx: a filecontext that represents files from the working
705 workingfilectx: a filecontext that represents files from the working
706 directory,
706 directory,
707 memfilectx: a filecontext that represents files in-memory,
707 memfilectx: a filecontext that represents files in-memory,
708 overlayfilectx: duplicate another filecontext with some fields overridden.
708 overlayfilectx: duplicate another filecontext with some fields overridden.
709 """
709 """
710 @propertycache
710 @propertycache
711 def _filelog(self):
711 def _filelog(self):
712 return self._repo.file(self._path)
712 return self._repo.file(self._path)
713
713
714 @propertycache
714 @propertycache
715 def _changeid(self):
715 def _changeid(self):
716 if r'_changeid' in self.__dict__:
716 if r'_changeid' in self.__dict__:
717 return self._changeid
717 return self._changeid
718 elif r'_changectx' in self.__dict__:
718 elif r'_changectx' in self.__dict__:
719 return self._changectx.rev()
719 return self._changectx.rev()
720 elif r'_descendantrev' in self.__dict__:
720 elif r'_descendantrev' in self.__dict__:
721 # this file context was created from a revision with a known
721 # this file context was created from a revision with a known
722 # descendant, we can (lazily) correct for linkrev aliases
722 # descendant, we can (lazily) correct for linkrev aliases
723 return self._adjustlinkrev(self._descendantrev)
723 return self._adjustlinkrev(self._descendantrev)
724 else:
724 else:
725 return self._filelog.linkrev(self._filerev)
725 return self._filelog.linkrev(self._filerev)
726
726
727 @propertycache
727 @propertycache
728 def _filenode(self):
728 def _filenode(self):
729 if r'_fileid' in self.__dict__:
729 if r'_fileid' in self.__dict__:
730 return self._filelog.lookup(self._fileid)
730 return self._filelog.lookup(self._fileid)
731 else:
731 else:
732 return self._changectx.filenode(self._path)
732 return self._changectx.filenode(self._path)
733
733
734 @propertycache
734 @propertycache
735 def _filerev(self):
735 def _filerev(self):
736 return self._filelog.rev(self._filenode)
736 return self._filelog.rev(self._filenode)
737
737
738 @propertycache
738 @propertycache
739 def _repopath(self):
739 def _repopath(self):
740 return self._path
740 return self._path
741
741
742 def __nonzero__(self):
742 def __nonzero__(self):
743 try:
743 try:
744 self._filenode
744 self._filenode
745 return True
745 return True
746 except error.LookupError:
746 except error.LookupError:
747 # file is missing
747 # file is missing
748 return False
748 return False
749
749
750 __bool__ = __nonzero__
750 __bool__ = __nonzero__
751
751
752 def __bytes__(self):
752 def __bytes__(self):
753 try:
753 try:
754 return "%s@%s" % (self.path(), self._changectx)
754 return "%s@%s" % (self.path(), self._changectx)
755 except error.LookupError:
755 except error.LookupError:
756 return "%s@???" % self.path()
756 return "%s@???" % self.path()
757
757
758 __str__ = encoding.strmethod(__bytes__)
758 __str__ = encoding.strmethod(__bytes__)
759
759
760 def __repr__(self):
760 def __repr__(self):
761 return "<%s %s>" % (type(self).__name__, str(self))
761 return "<%s %s>" % (type(self).__name__, str(self))
762
762
763 def __hash__(self):
763 def __hash__(self):
764 try:
764 try:
765 return hash((self._path, self._filenode))
765 return hash((self._path, self._filenode))
766 except AttributeError:
766 except AttributeError:
767 return id(self)
767 return id(self)
768
768
769 def __eq__(self, other):
769 def __eq__(self, other):
770 try:
770 try:
771 return (type(self) == type(other) and self._path == other._path
771 return (type(self) == type(other) and self._path == other._path
772 and self._filenode == other._filenode)
772 and self._filenode == other._filenode)
773 except AttributeError:
773 except AttributeError:
774 return False
774 return False
775
775
776 def __ne__(self, other):
776 def __ne__(self, other):
777 return not (self == other)
777 return not (self == other)
778
778
779 def filerev(self):
779 def filerev(self):
780 return self._filerev
780 return self._filerev
781 def filenode(self):
781 def filenode(self):
782 return self._filenode
782 return self._filenode
783 @propertycache
783 @propertycache
784 def _flags(self):
784 def _flags(self):
785 return self._changectx.flags(self._path)
785 return self._changectx.flags(self._path)
786 def flags(self):
786 def flags(self):
787 return self._flags
787 return self._flags
788 def filelog(self):
788 def filelog(self):
789 return self._filelog
789 return self._filelog
790 def rev(self):
790 def rev(self):
791 return self._changeid
791 return self._changeid
792 def linkrev(self):
792 def linkrev(self):
793 return self._filelog.linkrev(self._filerev)
793 return self._filelog.linkrev(self._filerev)
794 def node(self):
794 def node(self):
795 return self._changectx.node()
795 return self._changectx.node()
796 def hex(self):
796 def hex(self):
797 return self._changectx.hex()
797 return self._changectx.hex()
798 def user(self):
798 def user(self):
799 return self._changectx.user()
799 return self._changectx.user()
800 def date(self):
800 def date(self):
801 return self._changectx.date()
801 return self._changectx.date()
802 def files(self):
802 def files(self):
803 return self._changectx.files()
803 return self._changectx.files()
804 def description(self):
804 def description(self):
805 return self._changectx.description()
805 return self._changectx.description()
806 def branch(self):
806 def branch(self):
807 return self._changectx.branch()
807 return self._changectx.branch()
808 def extra(self):
808 def extra(self):
809 return self._changectx.extra()
809 return self._changectx.extra()
810 def phase(self):
810 def phase(self):
811 return self._changectx.phase()
811 return self._changectx.phase()
812 def phasestr(self):
812 def phasestr(self):
813 return self._changectx.phasestr()
813 return self._changectx.phasestr()
814 def manifest(self):
814 def manifest(self):
815 return self._changectx.manifest()
815 return self._changectx.manifest()
816 def changectx(self):
816 def changectx(self):
817 return self._changectx
817 return self._changectx
818 def renamed(self):
818 def renamed(self):
819 return self._copied
819 return self._copied
820 def repo(self):
820 def repo(self):
821 return self._repo
821 return self._repo
822 def size(self):
822 def size(self):
823 return len(self.data())
823 return len(self.data())
824
824
825 def path(self):
825 def path(self):
826 return self._path
826 return self._path
827
827
828 def isbinary(self):
828 def isbinary(self):
829 try:
829 try:
830 return util.binary(self.data())
830 return util.binary(self.data())
831 except IOError:
831 except IOError:
832 return False
832 return False
833 def isexec(self):
833 def isexec(self):
834 return 'x' in self.flags()
834 return 'x' in self.flags()
835 def islink(self):
835 def islink(self):
836 return 'l' in self.flags()
836 return 'l' in self.flags()
837
837
838 def isabsent(self):
838 def isabsent(self):
839 """whether this filectx represents a file not in self._changectx
839 """whether this filectx represents a file not in self._changectx
840
840
841 This is mainly for merge code to detect change/delete conflicts. This is
841 This is mainly for merge code to detect change/delete conflicts. This is
842 expected to be True for all subclasses of basectx."""
842 expected to be True for all subclasses of basectx."""
843 return False
843 return False
844
844
845 _customcmp = False
845 _customcmp = False
846 def cmp(self, fctx):
846 def cmp(self, fctx):
847 """compare with other file context
847 """compare with other file context
848
848
849 returns True if different than fctx.
849 returns True if different than fctx.
850 """
850 """
851 if fctx._customcmp:
851 if fctx._customcmp:
852 return fctx.cmp(self)
852 return fctx.cmp(self)
853
853
854 if (fctx._filenode is None
854 if (fctx._filenode is None
855 and (self._repo._encodefilterpats
855 and (self._repo._encodefilterpats
856 # if file data starts with '\1\n', empty metadata block is
856 # if file data starts with '\1\n', empty metadata block is
857 # prepended, which adds 4 bytes to filelog.size().
857 # prepended, which adds 4 bytes to filelog.size().
858 or self.size() - 4 == fctx.size())
858 or self.size() - 4 == fctx.size())
859 or self.size() == fctx.size()):
859 or self.size() == fctx.size()):
860 return self._filelog.cmp(self._filenode, fctx.data())
860 return self._filelog.cmp(self._filenode, fctx.data())
861
861
862 return True
862 return True
863
863
864 def _adjustlinkrev(self, srcrev, inclusive=False):
864 def _adjustlinkrev(self, srcrev, inclusive=False):
865 """return the first ancestor of <srcrev> introducing <fnode>
865 """return the first ancestor of <srcrev> introducing <fnode>
866
866
867 If the linkrev of the file revision does not point to an ancestor of
867 If the linkrev of the file revision does not point to an ancestor of
868 srcrev, we'll walk down the ancestors until we find one introducing
868 srcrev, we'll walk down the ancestors until we find one introducing
869 this file revision.
869 this file revision.
870
870
871 :srcrev: the changeset revision we search ancestors from
871 :srcrev: the changeset revision we search ancestors from
872 :inclusive: if true, the src revision will also be checked
872 :inclusive: if true, the src revision will also be checked
873 """
873 """
874 repo = self._repo
874 repo = self._repo
875 cl = repo.unfiltered().changelog
875 cl = repo.unfiltered().changelog
876 mfl = repo.manifestlog
876 mfl = repo.manifestlog
877 # fetch the linkrev
877 # fetch the linkrev
878 lkr = self.linkrev()
878 lkr = self.linkrev()
879 # hack to reuse ancestor computation when searching for renames
879 # hack to reuse ancestor computation when searching for renames
880 memberanc = getattr(self, '_ancestrycontext', None)
880 memberanc = getattr(self, '_ancestrycontext', None)
881 iteranc = None
881 iteranc = None
882 if srcrev is None:
882 if srcrev is None:
883 # wctx case, used by workingfilectx during mergecopy
883 # wctx case, used by workingfilectx during mergecopy
884 revs = [p.rev() for p in self._repo[None].parents()]
884 revs = [p.rev() for p in self._repo[None].parents()]
885 inclusive = True # we skipped the real (revless) source
885 inclusive = True # we skipped the real (revless) source
886 else:
886 else:
887 revs = [srcrev]
887 revs = [srcrev]
888 if memberanc is None:
888 if memberanc is None:
889 memberanc = iteranc = cl.ancestors(revs, lkr,
889 memberanc = iteranc = cl.ancestors(revs, lkr,
890 inclusive=inclusive)
890 inclusive=inclusive)
891 # check if this linkrev is an ancestor of srcrev
891 # check if this linkrev is an ancestor of srcrev
892 if lkr not in memberanc:
892 if lkr not in memberanc:
893 if iteranc is None:
893 if iteranc is None:
894 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
894 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
895 fnode = self._filenode
895 fnode = self._filenode
896 path = self._path
896 path = self._path
897 for a in iteranc:
897 for a in iteranc:
898 ac = cl.read(a) # get changeset data (we avoid object creation)
898 ac = cl.read(a) # get changeset data (we avoid object creation)
899 if path in ac[3]: # checking the 'files' field.
899 if path in ac[3]: # checking the 'files' field.
900 # The file has been touched, check if the content is
900 # The file has been touched, check if the content is
901 # similar to the one we search for.
901 # similar to the one we search for.
902 if fnode == mfl[ac[0]].readfast().get(path):
902 if fnode == mfl[ac[0]].readfast().get(path):
903 return a
903 return a
904 # In theory, we should never get out of that loop without a result.
904 # In theory, we should never get out of that loop without a result.
905 # But if manifest uses a buggy file revision (not children of the
905 # But if manifest uses a buggy file revision (not children of the
906 # one it replaces) we could. Such a buggy situation will likely
906 # one it replaces) we could. Such a buggy situation will likely
907 # result is crash somewhere else at to some point.
907 # result is crash somewhere else at to some point.
908 return lkr
908 return lkr
909
909
910 def introrev(self):
910 def introrev(self):
911 """return the rev of the changeset which introduced this file revision
911 """return the rev of the changeset which introduced this file revision
912
912
913 This method is different from linkrev because it take into account the
913 This method is different from linkrev because it take into account the
914 changeset the filectx was created from. It ensures the returned
914 changeset the filectx was created from. It ensures the returned
915 revision is one of its ancestors. This prevents bugs from
915 revision is one of its ancestors. This prevents bugs from
916 'linkrev-shadowing' when a file revision is used by multiple
916 'linkrev-shadowing' when a file revision is used by multiple
917 changesets.
917 changesets.
918 """
918 """
919 lkr = self.linkrev()
919 lkr = self.linkrev()
920 attrs = vars(self)
920 attrs = vars(self)
921 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
921 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
922 if noctx or self.rev() == lkr:
922 if noctx or self.rev() == lkr:
923 return self.linkrev()
923 return self.linkrev()
924 return self._adjustlinkrev(self.rev(), inclusive=True)
924 return self._adjustlinkrev(self.rev(), inclusive=True)
925
925
926 def _parentfilectx(self, path, fileid, filelog):
926 def _parentfilectx(self, path, fileid, filelog):
927 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
927 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
928 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
928 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
929 if '_changeid' in vars(self) or '_changectx' in vars(self):
929 if '_changeid' in vars(self) or '_changectx' in vars(self):
930 # If self is associated with a changeset (probably explicitly
930 # If self is associated with a changeset (probably explicitly
931 # fed), ensure the created filectx is associated with a
931 # fed), ensure the created filectx is associated with a
932 # changeset that is an ancestor of self.changectx.
932 # changeset that is an ancestor of self.changectx.
933 # This lets us later use _adjustlinkrev to get a correct link.
933 # This lets us later use _adjustlinkrev to get a correct link.
934 fctx._descendantrev = self.rev()
934 fctx._descendantrev = self.rev()
935 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
935 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
936 elif '_descendantrev' in vars(self):
936 elif '_descendantrev' in vars(self):
937 # Otherwise propagate _descendantrev if we have one associated.
937 # Otherwise propagate _descendantrev if we have one associated.
938 fctx._descendantrev = self._descendantrev
938 fctx._descendantrev = self._descendantrev
939 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
939 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
940 return fctx
940 return fctx
941
941
942 def parents(self):
942 def parents(self):
943 _path = self._path
943 _path = self._path
944 fl = self._filelog
944 fl = self._filelog
945 parents = self._filelog.parents(self._filenode)
945 parents = self._filelog.parents(self._filenode)
946 pl = [(_path, node, fl) for node in parents if node != nullid]
946 pl = [(_path, node, fl) for node in parents if node != nullid]
947
947
948 r = fl.renamed(self._filenode)
948 r = fl.renamed(self._filenode)
949 if r:
949 if r:
950 # - In the simple rename case, both parent are nullid, pl is empty.
950 # - In the simple rename case, both parent are nullid, pl is empty.
951 # - In case of merge, only one of the parent is null id and should
951 # - In case of merge, only one of the parent is null id and should
952 # be replaced with the rename information. This parent is -always-
952 # be replaced with the rename information. This parent is -always-
953 # the first one.
953 # the first one.
954 #
954 #
955 # As null id have always been filtered out in the previous list
955 # As null id have always been filtered out in the previous list
956 # comprehension, inserting to 0 will always result in "replacing
956 # comprehension, inserting to 0 will always result in "replacing
957 # first nullid parent with rename information.
957 # first nullid parent with rename information.
958 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
958 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
959
959
960 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
960 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
961
961
962 def p1(self):
962 def p1(self):
963 return self.parents()[0]
963 return self.parents()[0]
964
964
965 def p2(self):
965 def p2(self):
966 p = self.parents()
966 p = self.parents()
967 if len(p) == 2:
967 if len(p) == 2:
968 return p[1]
968 return p[1]
969 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
969 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
970
970
971 def annotate(self, follow=False, linenumber=False, skiprevs=None,
971 def annotate(self, follow=False, linenumber=False, skiprevs=None,
972 diffopts=None):
972 diffopts=None):
973 '''returns a list of tuples of ((ctx, number), line) for each line
973 '''returns a list of tuples of ((ctx, number), line) for each line
974 in the file, where ctx is the filectx of the node where
974 in the file, where ctx is the filectx of the node where
975 that line was last changed; if linenumber parameter is true, number is
975 that line was last changed; if linenumber parameter is true, number is
976 the line number at the first appearance in the managed file, otherwise,
976 the line number at the first appearance in the managed file, otherwise,
977 number has a fixed value of False.
977 number has a fixed value of False.
978 '''
978 '''
979
979
980 def lines(text):
980 def lines(text):
981 if text.endswith("\n"):
981 if text.endswith("\n"):
982 return text.count("\n")
982 return text.count("\n")
983 return text.count("\n") + int(bool(text))
983 return text.count("\n") + int(bool(text))
984
984
985 if linenumber:
985 if linenumber:
986 def decorate(text, rev):
986 def decorate(text, rev):
987 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
987 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
988 else:
988 else:
989 def decorate(text, rev):
989 def decorate(text, rev):
990 return ([(rev, False)] * lines(text), text)
990 return ([(rev, False)] * lines(text), text)
991
991
992 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
992 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
993
993
994 def parents(f):
994 def parents(f):
995 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
995 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
996 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
996 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
997 # from the topmost introrev (= srcrev) down to p.linkrev() if it
997 # from the topmost introrev (= srcrev) down to p.linkrev() if it
998 # isn't an ancestor of the srcrev.
998 # isn't an ancestor of the srcrev.
999 f._changeid
999 f._changeid
1000 pl = f.parents()
1000 pl = f.parents()
1001
1001
1002 # Don't return renamed parents if we aren't following.
1002 # Don't return renamed parents if we aren't following.
1003 if not follow:
1003 if not follow:
1004 pl = [p for p in pl if p.path() == f.path()]
1004 pl = [p for p in pl if p.path() == f.path()]
1005
1005
1006 # renamed filectx won't have a filelog yet, so set it
1006 # renamed filectx won't have a filelog yet, so set it
1007 # from the cache to save time
1007 # from the cache to save time
1008 for p in pl:
1008 for p in pl:
1009 if not '_filelog' in p.__dict__:
1009 if not '_filelog' in p.__dict__:
1010 p._filelog = getlog(p.path())
1010 p._filelog = getlog(p.path())
1011
1011
1012 return pl
1012 return pl
1013
1013
1014 # use linkrev to find the first changeset where self appeared
1014 # use linkrev to find the first changeset where self appeared
1015 base = self
1015 base = self
1016 introrev = self.introrev()
1016 introrev = self.introrev()
1017 if self.rev() != introrev:
1017 if self.rev() != introrev:
1018 base = self.filectx(self.filenode(), changeid=introrev)
1018 base = self.filectx(self.filenode(), changeid=introrev)
1019 if getattr(base, '_ancestrycontext', None) is None:
1019 if getattr(base, '_ancestrycontext', None) is None:
1020 cl = self._repo.changelog
1020 cl = self._repo.changelog
1021 if introrev is None:
1021 if introrev is None:
1022 # wctx is not inclusive, but works because _ancestrycontext
1022 # wctx is not inclusive, but works because _ancestrycontext
1023 # is used to test filelog revisions
1023 # is used to test filelog revisions
1024 ac = cl.ancestors([p.rev() for p in base.parents()],
1024 ac = cl.ancestors([p.rev() for p in base.parents()],
1025 inclusive=True)
1025 inclusive=True)
1026 else:
1026 else:
1027 ac = cl.ancestors([introrev], inclusive=True)
1027 ac = cl.ancestors([introrev], inclusive=True)
1028 base._ancestrycontext = ac
1028 base._ancestrycontext = ac
1029
1029
1030 # This algorithm would prefer to be recursive, but Python is a
1030 # This algorithm would prefer to be recursive, but Python is a
1031 # bit recursion-hostile. Instead we do an iterative
1031 # bit recursion-hostile. Instead we do an iterative
1032 # depth-first search.
1032 # depth-first search.
1033
1033
1034 # 1st DFS pre-calculates pcache and needed
1034 # 1st DFS pre-calculates pcache and needed
1035 visit = [base]
1035 visit = [base]
1036 pcache = {}
1036 pcache = {}
1037 needed = {base: 1}
1037 needed = {base: 1}
1038 while visit:
1038 while visit:
1039 f = visit.pop()
1039 f = visit.pop()
1040 if f in pcache:
1040 if f in pcache:
1041 continue
1041 continue
1042 pl = parents(f)
1042 pl = parents(f)
1043 pcache[f] = pl
1043 pcache[f] = pl
1044 for p in pl:
1044 for p in pl:
1045 needed[p] = needed.get(p, 0) + 1
1045 needed[p] = needed.get(p, 0) + 1
1046 if p not in pcache:
1046 if p not in pcache:
1047 visit.append(p)
1047 visit.append(p)
1048
1048
1049 # 2nd DFS does the actual annotate
1049 # 2nd DFS does the actual annotate
1050 visit[:] = [base]
1050 visit[:] = [base]
1051 hist = {}
1051 hist = {}
1052 while visit:
1052 while visit:
1053 f = visit[-1]
1053 f = visit[-1]
1054 if f in hist:
1054 if f in hist:
1055 visit.pop()
1055 visit.pop()
1056 continue
1056 continue
1057
1057
1058 ready = True
1058 ready = True
1059 pl = pcache[f]
1059 pl = pcache[f]
1060 for p in pl:
1060 for p in pl:
1061 if p not in hist:
1061 if p not in hist:
1062 ready = False
1062 ready = False
1063 visit.append(p)
1063 visit.append(p)
1064 if ready:
1064 if ready:
1065 visit.pop()
1065 visit.pop()
1066 curr = decorate(f.data(), f)
1066 curr = decorate(f.data(), f)
1067 skipchild = False
1067 skipchild = False
1068 if skiprevs is not None:
1068 if skiprevs is not None:
1069 skipchild = f._changeid in skiprevs
1069 skipchild = f._changeid in skiprevs
1070 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1070 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1071 diffopts)
1071 diffopts)
1072 for p in pl:
1072 for p in pl:
1073 if needed[p] == 1:
1073 if needed[p] == 1:
1074 del hist[p]
1074 del hist[p]
1075 del needed[p]
1075 del needed[p]
1076 else:
1076 else:
1077 needed[p] -= 1
1077 needed[p] -= 1
1078
1078
1079 hist[f] = curr
1079 hist[f] = curr
1080 del pcache[f]
1080 del pcache[f]
1081
1081
1082 return zip(hist[base][0], hist[base][1].splitlines(True))
1082 return zip(hist[base][0], hist[base][1].splitlines(True))
1083
1083
1084 def ancestors(self, followfirst=False):
1084 def ancestors(self, followfirst=False):
1085 visit = {}
1085 visit = {}
1086 c = self
1086 c = self
1087 if followfirst:
1087 if followfirst:
1088 cut = 1
1088 cut = 1
1089 else:
1089 else:
1090 cut = None
1090 cut = None
1091
1091
1092 while True:
1092 while True:
1093 for parent in c.parents()[:cut]:
1093 for parent in c.parents()[:cut]:
1094 visit[(parent.linkrev(), parent.filenode())] = parent
1094 visit[(parent.linkrev(), parent.filenode())] = parent
1095 if not visit:
1095 if not visit:
1096 break
1096 break
1097 c = visit.pop(max(visit))
1097 c = visit.pop(max(visit))
1098 yield c
1098 yield c
1099
1099
1100 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1100 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1101 r'''
1101 r'''
1102 Given parent and child fctxes and annotate data for parents, for all lines
1102 Given parent and child fctxes and annotate data for parents, for all lines
1103 in either parent that match the child, annotate the child with the parent's
1103 in either parent that match the child, annotate the child with the parent's
1104 data.
1104 data.
1105
1105
1106 Additionally, if `skipchild` is True, replace all other lines with parent
1106 Additionally, if `skipchild` is True, replace all other lines with parent
1107 annotate data as well such that child is never blamed for any lines.
1107 annotate data as well such that child is never blamed for any lines.
1108
1108
1109 >>> oldfctx = 'old'
1109 >>> oldfctx = 'old'
1110 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1110 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1111 >>> olddata = 'a\nb\n'
1111 >>> olddata = 'a\nb\n'
1112 >>> p1data = 'a\nb\nc\n'
1112 >>> p1data = 'a\nb\nc\n'
1113 >>> p2data = 'a\nc\nd\n'
1113 >>> p2data = 'a\nc\nd\n'
1114 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1114 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1115 >>> diffopts = mdiff.diffopts()
1115 >>> diffopts = mdiff.diffopts()
1116
1116
1117 >>> def decorate(text, rev):
1117 >>> def decorate(text, rev):
1118 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1118 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1119
1119
1120 Basic usage:
1120 Basic usage:
1121
1121
1122 >>> oldann = decorate(olddata, oldfctx)
1122 >>> oldann = decorate(olddata, oldfctx)
1123 >>> p1ann = decorate(p1data, p1fctx)
1123 >>> p1ann = decorate(p1data, p1fctx)
1124 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1124 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1125 >>> p1ann[0]
1125 >>> p1ann[0]
1126 [('old', 1), ('old', 2), ('p1', 3)]
1126 [('old', 1), ('old', 2), ('p1', 3)]
1127 >>> p2ann = decorate(p2data, p2fctx)
1127 >>> p2ann = decorate(p2data, p2fctx)
1128 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1128 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1129 >>> p2ann[0]
1129 >>> p2ann[0]
1130 [('old', 1), ('p2', 2), ('p2', 3)]
1130 [('old', 1), ('p2', 2), ('p2', 3)]
1131
1131
1132 Test with multiple parents (note the difference caused by ordering):
1132 Test with multiple parents (note the difference caused by ordering):
1133
1133
1134 >>> childann = decorate(childdata, childfctx)
1134 >>> childann = decorate(childdata, childfctx)
1135 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1135 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1136 ... diffopts)
1136 ... diffopts)
1137 >>> childann[0]
1137 >>> childann[0]
1138 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1138 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1139
1139
1140 >>> childann = decorate(childdata, childfctx)
1140 >>> childann = decorate(childdata, childfctx)
1141 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1141 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1142 ... diffopts)
1142 ... diffopts)
1143 >>> childann[0]
1143 >>> childann[0]
1144 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1144 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1145
1145
1146 Test with skipchild (note the difference caused by ordering):
1146 Test with skipchild (note the difference caused by ordering):
1147
1147
1148 >>> childann = decorate(childdata, childfctx)
1148 >>> childann = decorate(childdata, childfctx)
1149 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1149 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1150 ... diffopts)
1150 ... diffopts)
1151 >>> childann[0]
1151 >>> childann[0]
1152 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1152 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1153
1153
1154 >>> childann = decorate(childdata, childfctx)
1154 >>> childann = decorate(childdata, childfctx)
1155 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1155 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1156 ... diffopts)
1156 ... diffopts)
1157 >>> childann[0]
1157 >>> childann[0]
1158 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1158 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1159 '''
1159 '''
1160 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1160 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1161 for parent in parents]
1161 for parent in parents]
1162
1162
1163 if skipchild:
1163 if skipchild:
1164 # Need to iterate over the blocks twice -- make it a list
1164 # Need to iterate over the blocks twice -- make it a list
1165 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1165 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1166 # Mercurial currently prefers p2 over p1 for annotate.
1166 # Mercurial currently prefers p2 over p1 for annotate.
1167 # TODO: change this?
1167 # TODO: change this?
1168 for parent, blocks in pblocks:
1168 for parent, blocks in pblocks:
1169 for (a1, a2, b1, b2), t in blocks:
1169 for (a1, a2, b1, b2), t in blocks:
1170 # Changed blocks ('!') or blocks made only of blank lines ('~')
1170 # Changed blocks ('!') or blocks made only of blank lines ('~')
1171 # belong to the child.
1171 # belong to the child.
1172 if t == '=':
1172 if t == '=':
1173 child[0][b1:b2] = parent[0][a1:a2]
1173 child[0][b1:b2] = parent[0][a1:a2]
1174
1174
1175 if skipchild:
1175 if skipchild:
1176 # Now try and match up anything that couldn't be matched,
1176 # Now try and match up anything that couldn't be matched,
1177 # Reversing pblocks maintains bias towards p2, matching above
1177 # Reversing pblocks maintains bias towards p2, matching above
1178 # behavior.
1178 # behavior.
1179 pblocks.reverse()
1179 pblocks.reverse()
1180
1180
1181 # The heuristics are:
1181 # The heuristics are:
1182 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1182 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1183 # This could potentially be smarter but works well enough.
1183 # This could potentially be smarter but works well enough.
1184 # * For a non-matching section, do a best-effort fit. Match lines in
1184 # * For a non-matching section, do a best-effort fit. Match lines in
1185 # diff hunks 1:1, dropping lines as necessary.
1185 # diff hunks 1:1, dropping lines as necessary.
1186 # * Repeat the last line as a last resort.
1186 # * Repeat the last line as a last resort.
1187
1187
1188 # First, replace as much as possible without repeating the last line.
1188 # First, replace as much as possible without repeating the last line.
1189 remaining = [(parent, []) for parent, _blocks in pblocks]
1189 remaining = [(parent, []) for parent, _blocks in pblocks]
1190 for idx, (parent, blocks) in enumerate(pblocks):
1190 for idx, (parent, blocks) in enumerate(pblocks):
1191 for (a1, a2, b1, b2), _t in blocks:
1191 for (a1, a2, b1, b2), _t in blocks:
1192 if a2 - a1 >= b2 - b1:
1192 if a2 - a1 >= b2 - b1:
1193 for bk in xrange(b1, b2):
1193 for bk in xrange(b1, b2):
1194 if child[0][bk][0] == childfctx:
1194 if child[0][bk][0] == childfctx:
1195 ak = min(a1 + (bk - b1), a2 - 1)
1195 ak = min(a1 + (bk - b1), a2 - 1)
1196 child[0][bk] = parent[0][ak]
1196 child[0][bk] = parent[0][ak]
1197 else:
1197 else:
1198 remaining[idx][1].append((a1, a2, b1, b2))
1198 remaining[idx][1].append((a1, a2, b1, b2))
1199
1199
1200 # Then, look at anything left, which might involve repeating the last
1200 # Then, look at anything left, which might involve repeating the last
1201 # line.
1201 # line.
1202 for parent, blocks in remaining:
1202 for parent, blocks in remaining:
1203 for a1, a2, b1, b2 in blocks:
1203 for a1, a2, b1, b2 in blocks:
1204 for bk in xrange(b1, b2):
1204 for bk in xrange(b1, b2):
1205 if child[0][bk][0] == childfctx:
1205 if child[0][bk][0] == childfctx:
1206 ak = min(a1 + (bk - b1), a2 - 1)
1206 ak = min(a1 + (bk - b1), a2 - 1)
1207 child[0][bk] = parent[0][ak]
1207 child[0][bk] = parent[0][ak]
1208 return child
1208 return child
1209
1209
1210 class filectx(basefilectx):
1210 class filectx(basefilectx):
1211 """A filecontext object makes access to data related to a particular
1211 """A filecontext object makes access to data related to a particular
1212 filerevision convenient."""
1212 filerevision convenient."""
1213 def __init__(self, repo, path, changeid=None, fileid=None,
1213 def __init__(self, repo, path, changeid=None, fileid=None,
1214 filelog=None, changectx=None):
1214 filelog=None, changectx=None):
1215 """changeid can be a changeset revision, node, or tag.
1215 """changeid can be a changeset revision, node, or tag.
1216 fileid can be a file revision or node."""
1216 fileid can be a file revision or node."""
1217 self._repo = repo
1217 self._repo = repo
1218 self._path = path
1218 self._path = path
1219
1219
1220 assert (changeid is not None
1220 assert (changeid is not None
1221 or fileid is not None
1221 or fileid is not None
1222 or changectx is not None), \
1222 or changectx is not None), \
1223 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1223 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1224 % (changeid, fileid, changectx))
1224 % (changeid, fileid, changectx))
1225
1225
1226 if filelog is not None:
1226 if filelog is not None:
1227 self._filelog = filelog
1227 self._filelog = filelog
1228
1228
1229 if changeid is not None:
1229 if changeid is not None:
1230 self._changeid = changeid
1230 self._changeid = changeid
1231 if changectx is not None:
1231 if changectx is not None:
1232 self._changectx = changectx
1232 self._changectx = changectx
1233 if fileid is not None:
1233 if fileid is not None:
1234 self._fileid = fileid
1234 self._fileid = fileid
1235
1235
1236 @propertycache
1236 @propertycache
1237 def _changectx(self):
1237 def _changectx(self):
1238 try:
1238 try:
1239 return changectx(self._repo, self._changeid)
1239 return changectx(self._repo, self._changeid)
1240 except error.FilteredRepoLookupError:
1240 except error.FilteredRepoLookupError:
1241 # Linkrev may point to any revision in the repository. When the
1241 # Linkrev may point to any revision in the repository. When the
1242 # repository is filtered this may lead to `filectx` trying to build
1242 # repository is filtered this may lead to `filectx` trying to build
1243 # `changectx` for filtered revision. In such case we fallback to
1243 # `changectx` for filtered revision. In such case we fallback to
1244 # creating `changectx` on the unfiltered version of the reposition.
1244 # creating `changectx` on the unfiltered version of the reposition.
1245 # This fallback should not be an issue because `changectx` from
1245 # This fallback should not be an issue because `changectx` from
1246 # `filectx` are not used in complex operations that care about
1246 # `filectx` are not used in complex operations that care about
1247 # filtering.
1247 # filtering.
1248 #
1248 #
1249 # This fallback is a cheap and dirty fix that prevent several
1249 # This fallback is a cheap and dirty fix that prevent several
1250 # crashes. It does not ensure the behavior is correct. However the
1250 # crashes. It does not ensure the behavior is correct. However the
1251 # behavior was not correct before filtering either and "incorrect
1251 # behavior was not correct before filtering either and "incorrect
1252 # behavior" is seen as better as "crash"
1252 # behavior" is seen as better as "crash"
1253 #
1253 #
1254 # Linkrevs have several serious troubles with filtering that are
1254 # Linkrevs have several serious troubles with filtering that are
1255 # complicated to solve. Proper handling of the issue here should be
1255 # complicated to solve. Proper handling of the issue here should be
1256 # considered when solving linkrev issue are on the table.
1256 # considered when solving linkrev issue are on the table.
1257 return changectx(self._repo.unfiltered(), self._changeid)
1257 return changectx(self._repo.unfiltered(), self._changeid)
1258
1258
1259 def filectx(self, fileid, changeid=None):
1259 def filectx(self, fileid, changeid=None):
1260 '''opens an arbitrary revision of the file without
1260 '''opens an arbitrary revision of the file without
1261 opening a new filelog'''
1261 opening a new filelog'''
1262 return filectx(self._repo, self._path, fileid=fileid,
1262 return filectx(self._repo, self._path, fileid=fileid,
1263 filelog=self._filelog, changeid=changeid)
1263 filelog=self._filelog, changeid=changeid)
1264
1264
1265 def rawdata(self):
1265 def rawdata(self):
1266 return self._filelog.revision(self._filenode, raw=True)
1266 return self._filelog.revision(self._filenode, raw=True)
1267
1267
1268 def rawflags(self):
1268 def rawflags(self):
1269 """low-level revlog flags"""
1269 """low-level revlog flags"""
1270 return self._filelog.flags(self._filerev)
1270 return self._filelog.flags(self._filerev)
1271
1271
1272 def data(self):
1272 def data(self):
1273 try:
1273 try:
1274 return self._filelog.read(self._filenode)
1274 return self._filelog.read(self._filenode)
1275 except error.CensoredNodeError:
1275 except error.CensoredNodeError:
1276 if self._repo.ui.config("censor", "policy") == "ignore":
1276 if self._repo.ui.config("censor", "policy") == "ignore":
1277 return ""
1277 return ""
1278 raise error.Abort(_("censored node: %s") % short(self._filenode),
1278 raise error.Abort(_("censored node: %s") % short(self._filenode),
1279 hint=_("set censor.policy to ignore errors"))
1279 hint=_("set censor.policy to ignore errors"))
1280
1280
1281 def size(self):
1281 def size(self):
1282 return self._filelog.size(self._filerev)
1282 return self._filelog.size(self._filerev)
1283
1283
1284 @propertycache
1284 @propertycache
1285 def _copied(self):
1285 def _copied(self):
1286 """check if file was actually renamed in this changeset revision
1286 """check if file was actually renamed in this changeset revision
1287
1287
1288 If rename logged in file revision, we report copy for changeset only
1288 If rename logged in file revision, we report copy for changeset only
1289 if file revisions linkrev points back to the changeset in question
1289 if file revisions linkrev points back to the changeset in question
1290 or both changeset parents contain different file revisions.
1290 or both changeset parents contain different file revisions.
1291 """
1291 """
1292
1292
1293 renamed = self._filelog.renamed(self._filenode)
1293 renamed = self._filelog.renamed(self._filenode)
1294 if not renamed:
1294 if not renamed:
1295 return renamed
1295 return renamed
1296
1296
1297 if self.rev() == self.linkrev():
1297 if self.rev() == self.linkrev():
1298 return renamed
1298 return renamed
1299
1299
1300 name = self.path()
1300 name = self.path()
1301 fnode = self._filenode
1301 fnode = self._filenode
1302 for p in self._changectx.parents():
1302 for p in self._changectx.parents():
1303 try:
1303 try:
1304 if fnode == p.filenode(name):
1304 if fnode == p.filenode(name):
1305 return None
1305 return None
1306 except error.LookupError:
1306 except error.LookupError:
1307 pass
1307 pass
1308 return renamed
1308 return renamed
1309
1309
1310 def children(self):
1310 def children(self):
1311 # hard for renames
1311 # hard for renames
1312 c = self._filelog.children(self._filenode)
1312 c = self._filelog.children(self._filenode)
1313 return [filectx(self._repo, self._path, fileid=x,
1313 return [filectx(self._repo, self._path, fileid=x,
1314 filelog=self._filelog) for x in c]
1314 filelog=self._filelog) for x in c]
1315
1315
1316 class committablectx(basectx):
1316 class committablectx(basectx):
1317 """A committablectx object provides common functionality for a context that
1317 """A committablectx object provides common functionality for a context that
1318 wants the ability to commit, e.g. workingctx or memctx."""
1318 wants the ability to commit, e.g. workingctx or memctx."""
1319 def __init__(self, repo, text="", user=None, date=None, extra=None,
1319 def __init__(self, repo, text="", user=None, date=None, extra=None,
1320 changes=None):
1320 changes=None):
1321 self._repo = repo
1321 self._repo = repo
1322 self._rev = None
1322 self._rev = None
1323 self._node = None
1323 self._node = None
1324 self._text = text
1324 self._text = text
1325 if date:
1325 if date:
1326 self._date = util.parsedate(date)
1326 self._date = util.parsedate(date)
1327 if user:
1327 if user:
1328 self._user = user
1328 self._user = user
1329 if changes:
1329 if changes:
1330 self._status = changes
1330 self._status = changes
1331
1331
1332 self._extra = {}
1332 self._extra = {}
1333 if extra:
1333 if extra:
1334 self._extra = extra.copy()
1334 self._extra = extra.copy()
1335 if 'branch' not in self._extra:
1335 if 'branch' not in self._extra:
1336 try:
1336 try:
1337 branch = encoding.fromlocal(self._repo.dirstate.branch())
1337 branch = encoding.fromlocal(self._repo.dirstate.branch())
1338 except UnicodeDecodeError:
1338 except UnicodeDecodeError:
1339 raise error.Abort(_('branch name not in UTF-8!'))
1339 raise error.Abort(_('branch name not in UTF-8!'))
1340 self._extra['branch'] = branch
1340 self._extra['branch'] = branch
1341 if self._extra['branch'] == '':
1341 if self._extra['branch'] == '':
1342 self._extra['branch'] = 'default'
1342 self._extra['branch'] = 'default'
1343
1343
1344 def __bytes__(self):
1344 def __bytes__(self):
1345 return bytes(self._parents[0]) + "+"
1345 return bytes(self._parents[0]) + "+"
1346
1346
1347 __str__ = encoding.strmethod(__bytes__)
1347 __str__ = encoding.strmethod(__bytes__)
1348
1348
1349 def __nonzero__(self):
1349 def __nonzero__(self):
1350 return True
1350 return True
1351
1351
1352 __bool__ = __nonzero__
1352 __bool__ = __nonzero__
1353
1353
1354 def _buildflagfunc(self):
1354 def _buildflagfunc(self):
1355 # Create a fallback function for getting file flags when the
1355 # Create a fallback function for getting file flags when the
1356 # filesystem doesn't support them
1356 # filesystem doesn't support them
1357
1357
1358 copiesget = self._repo.dirstate.copies().get
1358 copiesget = self._repo.dirstate.copies().get
1359 parents = self.parents()
1359 parents = self.parents()
1360 if len(parents) < 2:
1360 if len(parents) < 2:
1361 # when we have one parent, it's easy: copy from parent
1361 # when we have one parent, it's easy: copy from parent
1362 man = parents[0].manifest()
1362 man = parents[0].manifest()
1363 def func(f):
1363 def func(f):
1364 f = copiesget(f, f)
1364 f = copiesget(f, f)
1365 return man.flags(f)
1365 return man.flags(f)
1366 else:
1366 else:
1367 # merges are tricky: we try to reconstruct the unstored
1367 # merges are tricky: we try to reconstruct the unstored
1368 # result from the merge (issue1802)
1368 # result from the merge (issue1802)
1369 p1, p2 = parents
1369 p1, p2 = parents
1370 pa = p1.ancestor(p2)
1370 pa = p1.ancestor(p2)
1371 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1371 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1372
1372
1373 def func(f):
1373 def func(f):
1374 f = copiesget(f, f) # may be wrong for merges with copies
1374 f = copiesget(f, f) # may be wrong for merges with copies
1375 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1375 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1376 if fl1 == fl2:
1376 if fl1 == fl2:
1377 return fl1
1377 return fl1
1378 if fl1 == fla:
1378 if fl1 == fla:
1379 return fl2
1379 return fl2
1380 if fl2 == fla:
1380 if fl2 == fla:
1381 return fl1
1381 return fl1
1382 return '' # punt for conflicts
1382 return '' # punt for conflicts
1383
1383
1384 return func
1384 return func
1385
1385
1386 @propertycache
1386 @propertycache
1387 def _flagfunc(self):
1387 def _flagfunc(self):
1388 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1388 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1389
1389
1390 @propertycache
1390 @propertycache
1391 def _status(self):
1391 def _status(self):
1392 return self._repo.status()
1392 return self._repo.status()
1393
1393
1394 @propertycache
1394 @propertycache
1395 def _user(self):
1395 def _user(self):
1396 return self._repo.ui.username()
1396 return self._repo.ui.username()
1397
1397
1398 @propertycache
1398 @propertycache
1399 def _date(self):
1399 def _date(self):
1400 ui = self._repo.ui
1400 ui = self._repo.ui
1401 date = ui.configdate('devel', 'default-date')
1401 date = ui.configdate('devel', 'default-date')
1402 if date is None:
1402 if date is None:
1403 date = util.makedate()
1403 date = util.makedate()
1404 return date
1404 return date
1405
1405
1406 def subrev(self, subpath):
1406 def subrev(self, subpath):
1407 return None
1407 return None
1408
1408
1409 def manifestnode(self):
1409 def manifestnode(self):
1410 return None
1410 return None
1411 def user(self):
1411 def user(self):
1412 return self._user or self._repo.ui.username()
1412 return self._user or self._repo.ui.username()
1413 def date(self):
1413 def date(self):
1414 return self._date
1414 return self._date
1415 def description(self):
1415 def description(self):
1416 return self._text
1416 return self._text
1417 def files(self):
1417 def files(self):
1418 return sorted(self._status.modified + self._status.added +
1418 return sorted(self._status.modified + self._status.added +
1419 self._status.removed)
1419 self._status.removed)
1420
1420
1421 def modified(self):
1421 def modified(self):
1422 return self._status.modified
1422 return self._status.modified
1423 def added(self):
1423 def added(self):
1424 return self._status.added
1424 return self._status.added
1425 def removed(self):
1425 def removed(self):
1426 return self._status.removed
1426 return self._status.removed
1427 def deleted(self):
1427 def deleted(self):
1428 return self._status.deleted
1428 return self._status.deleted
1429 def branch(self):
1429 def branch(self):
1430 return encoding.tolocal(self._extra['branch'])
1430 return encoding.tolocal(self._extra['branch'])
1431 def closesbranch(self):
1431 def closesbranch(self):
1432 return 'close' in self._extra
1432 return 'close' in self._extra
1433 def extra(self):
1433 def extra(self):
1434 return self._extra
1434 return self._extra
1435
1435
1436 def tags(self):
1436 def tags(self):
1437 return []
1437 return []
1438
1438
1439 def bookmarks(self):
1439 def bookmarks(self):
1440 b = []
1440 b = []
1441 for p in self.parents():
1441 for p in self.parents():
1442 b.extend(p.bookmarks())
1442 b.extend(p.bookmarks())
1443 return b
1443 return b
1444
1444
1445 def phase(self):
1445 def phase(self):
1446 phase = phases.draft # default phase to draft
1446 phase = phases.draft # default phase to draft
1447 for p in self.parents():
1447 for p in self.parents():
1448 phase = max(phase, p.phase())
1448 phase = max(phase, p.phase())
1449 return phase
1449 return phase
1450
1450
1451 def hidden(self):
1451 def hidden(self):
1452 return False
1452 return False
1453
1453
1454 def children(self):
1454 def children(self):
1455 return []
1455 return []
1456
1456
1457 def flags(self, path):
1457 def flags(self, path):
1458 if r'_manifest' in self.__dict__:
1458 if r'_manifest' in self.__dict__:
1459 try:
1459 try:
1460 return self._manifest.flags(path)
1460 return self._manifest.flags(path)
1461 except KeyError:
1461 except KeyError:
1462 return ''
1462 return ''
1463
1463
1464 try:
1464 try:
1465 return self._flagfunc(path)
1465 return self._flagfunc(path)
1466 except OSError:
1466 except OSError:
1467 return ''
1467 return ''
1468
1468
1469 def ancestor(self, c2):
1469 def ancestor(self, c2):
1470 """return the "best" ancestor context of self and c2"""
1470 """return the "best" ancestor context of self and c2"""
1471 return self._parents[0].ancestor(c2) # punt on two parents for now
1471 return self._parents[0].ancestor(c2) # punt on two parents for now
1472
1472
1473 def walk(self, match):
1473 def walk(self, match):
1474 '''Generates matching file names.'''
1474 '''Generates matching file names.'''
1475 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1475 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1476 True, False))
1476 True, False))
1477
1477
1478 def matches(self, match):
1478 def matches(self, match):
1479 return sorted(self._repo.dirstate.matches(match))
1479 return sorted(self._repo.dirstate.matches(match))
1480
1480
1481 def ancestors(self):
1481 def ancestors(self):
1482 for p in self._parents:
1482 for p in self._parents:
1483 yield p
1483 yield p
1484 for a in self._repo.changelog.ancestors(
1484 for a in self._repo.changelog.ancestors(
1485 [p.rev() for p in self._parents]):
1485 [p.rev() for p in self._parents]):
1486 yield changectx(self._repo, a)
1486 yield changectx(self._repo, a)
1487
1487
1488 def markcommitted(self, node):
1488 def markcommitted(self, node):
1489 """Perform post-commit cleanup necessary after committing this ctx
1489 """Perform post-commit cleanup necessary after committing this ctx
1490
1490
1491 Specifically, this updates backing stores this working context
1491 Specifically, this updates backing stores this working context
1492 wraps to reflect the fact that the changes reflected by this
1492 wraps to reflect the fact that the changes reflected by this
1493 workingctx have been committed. For example, it marks
1493 workingctx have been committed. For example, it marks
1494 modified and added files as normal in the dirstate.
1494 modified and added files as normal in the dirstate.
1495
1495
1496 """
1496 """
1497
1497
1498 with self._repo.dirstate.parentchange():
1498 with self._repo.dirstate.parentchange():
1499 for f in self.modified() + self.added():
1499 for f in self.modified() + self.added():
1500 self._repo.dirstate.normal(f)
1500 self._repo.dirstate.normal(f)
1501 for f in self.removed():
1501 for f in self.removed():
1502 self._repo.dirstate.drop(f)
1502 self._repo.dirstate.drop(f)
1503 self._repo.dirstate.setparents(node)
1503 self._repo.dirstate.setparents(node)
1504
1504
1505 # write changes out explicitly, because nesting wlock at
1505 # write changes out explicitly, because nesting wlock at
1506 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1506 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1507 # from immediately doing so for subsequent changing files
1507 # from immediately doing so for subsequent changing files
1508 self._repo.dirstate.write(self._repo.currenttransaction())
1508 self._repo.dirstate.write(self._repo.currenttransaction())
1509
1509
1510 def dirty(self, missing=False, merge=True, branch=True):
1510 def dirty(self, missing=False, merge=True, branch=True):
1511 return False
1511 return False
1512
1512
1513 class workingctx(committablectx):
1513 class workingctx(committablectx):
1514 """A workingctx object makes access to data related to
1514 """A workingctx object makes access to data related to
1515 the current working directory convenient.
1515 the current working directory convenient.
1516 date - any valid date string or (unixtime, offset), or None.
1516 date - any valid date string or (unixtime, offset), or None.
1517 user - username string, or None.
1517 user - username string, or None.
1518 extra - a dictionary of extra values, or None.
1518 extra - a dictionary of extra values, or None.
1519 changes - a list of file lists as returned by localrepo.status()
1519 changes - a list of file lists as returned by localrepo.status()
1520 or None to use the repository status.
1520 or None to use the repository status.
1521 """
1521 """
1522 def __init__(self, repo, text="", user=None, date=None, extra=None,
1522 def __init__(self, repo, text="", user=None, date=None, extra=None,
1523 changes=None):
1523 changes=None):
1524 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1524 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1525
1525
1526 def __iter__(self):
1526 def __iter__(self):
1527 d = self._repo.dirstate
1527 d = self._repo.dirstate
1528 for f in d:
1528 for f in d:
1529 if d[f] != 'r':
1529 if d[f] != 'r':
1530 yield f
1530 yield f
1531
1531
1532 def __contains__(self, key):
1532 def __contains__(self, key):
1533 return self._repo.dirstate[key] not in "?r"
1533 return self._repo.dirstate[key] not in "?r"
1534
1534
1535 def hex(self):
1535 def hex(self):
1536 return hex(wdirid)
1536 return hex(wdirid)
1537
1537
1538 @propertycache
1538 @propertycache
1539 def _parents(self):
1539 def _parents(self):
1540 p = self._repo.dirstate.parents()
1540 p = self._repo.dirstate.parents()
1541 if p[1] == nullid:
1541 if p[1] == nullid:
1542 p = p[:-1]
1542 p = p[:-1]
1543 return [changectx(self._repo, x) for x in p]
1543 return [changectx(self._repo, x) for x in p]
1544
1544
1545 def filectx(self, path, filelog=None):
1545 def filectx(self, path, filelog=None):
1546 """get a file context from the working directory"""
1546 """get a file context from the working directory"""
1547 return workingfilectx(self._repo, path, workingctx=self,
1547 return workingfilectx(self._repo, path, workingctx=self,
1548 filelog=filelog)
1548 filelog=filelog)
1549
1549
1550 def dirty(self, missing=False, merge=True, branch=True):
1550 def dirty(self, missing=False, merge=True, branch=True):
1551 "check whether a working directory is modified"
1551 "check whether a working directory is modified"
1552 # check subrepos first
1552 # check subrepos first
1553 for s in sorted(self.substate):
1553 for s in sorted(self.substate):
1554 if self.sub(s).dirty(missing=missing):
1554 if self.sub(s).dirty(missing=missing):
1555 return True
1555 return True
1556 # check current working dir
1556 # check current working dir
1557 return ((merge and self.p2()) or
1557 return ((merge and self.p2()) or
1558 (branch and self.branch() != self.p1().branch()) or
1558 (branch and self.branch() != self.p1().branch()) or
1559 self.modified() or self.added() or self.removed() or
1559 self.modified() or self.added() or self.removed() or
1560 (missing and self.deleted()))
1560 (missing and self.deleted()))
1561
1561
1562 def add(self, list, prefix=""):
1562 def add(self, list, prefix=""):
1563 with self._repo.wlock():
1563 with self._repo.wlock():
1564 ui, ds = self._repo.ui, self._repo.dirstate
1564 ui, ds = self._repo.ui, self._repo.dirstate
1565 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1565 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1566 rejected = []
1566 rejected = []
1567 lstat = self._repo.wvfs.lstat
1567 lstat = self._repo.wvfs.lstat
1568 for f in list:
1568 for f in list:
1569 # ds.pathto() returns an absolute file when this is invoked from
1569 # ds.pathto() returns an absolute file when this is invoked from
1570 # the keyword extension. That gets flagged as non-portable on
1570 # the keyword extension. That gets flagged as non-portable on
1571 # Windows, since it contains the drive letter and colon.
1571 # Windows, since it contains the drive letter and colon.
1572 scmutil.checkportable(ui, os.path.join(prefix, f))
1572 scmutil.checkportable(ui, os.path.join(prefix, f))
1573 try:
1573 try:
1574 st = lstat(f)
1574 st = lstat(f)
1575 except OSError:
1575 except OSError:
1576 ui.warn(_("%s does not exist!\n") % uipath(f))
1576 ui.warn(_("%s does not exist!\n") % uipath(f))
1577 rejected.append(f)
1577 rejected.append(f)
1578 continue
1578 continue
1579 if st.st_size > 10000000:
1579 if st.st_size > 10000000:
1580 ui.warn(_("%s: up to %d MB of RAM may be required "
1580 ui.warn(_("%s: up to %d MB of RAM may be required "
1581 "to manage this file\n"
1581 "to manage this file\n"
1582 "(use 'hg revert %s' to cancel the "
1582 "(use 'hg revert %s' to cancel the "
1583 "pending addition)\n")
1583 "pending addition)\n")
1584 % (f, 3 * st.st_size // 1000000, uipath(f)))
1584 % (f, 3 * st.st_size // 1000000, uipath(f)))
1585 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1585 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1586 ui.warn(_("%s not added: only files and symlinks "
1586 ui.warn(_("%s not added: only files and symlinks "
1587 "supported currently\n") % uipath(f))
1587 "supported currently\n") % uipath(f))
1588 rejected.append(f)
1588 rejected.append(f)
1589 elif ds[f] in 'amn':
1589 elif ds[f] in 'amn':
1590 ui.warn(_("%s already tracked!\n") % uipath(f))
1590 ui.warn(_("%s already tracked!\n") % uipath(f))
1591 elif ds[f] == 'r':
1591 elif ds[f] == 'r':
1592 ds.normallookup(f)
1592 ds.normallookup(f)
1593 else:
1593 else:
1594 ds.add(f)
1594 ds.add(f)
1595 return rejected
1595 return rejected
1596
1596
1597 def forget(self, files, prefix=""):
1597 def forget(self, files, prefix=""):
1598 with self._repo.wlock():
1598 with self._repo.wlock():
1599 ds = self._repo.dirstate
1599 ds = self._repo.dirstate
1600 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1600 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1601 rejected = []
1601 rejected = []
1602 for f in files:
1602 for f in files:
1603 if f not in self._repo.dirstate:
1603 if f not in self._repo.dirstate:
1604 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1604 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1605 rejected.append(f)
1605 rejected.append(f)
1606 elif self._repo.dirstate[f] != 'a':
1606 elif self._repo.dirstate[f] != 'a':
1607 self._repo.dirstate.remove(f)
1607 self._repo.dirstate.remove(f)
1608 else:
1608 else:
1609 self._repo.dirstate.drop(f)
1609 self._repo.dirstate.drop(f)
1610 return rejected
1610 return rejected
1611
1611
1612 def undelete(self, list):
1612 def undelete(self, list):
1613 pctxs = self.parents()
1613 pctxs = self.parents()
1614 with self._repo.wlock():
1614 with self._repo.wlock():
1615 ds = self._repo.dirstate
1615 ds = self._repo.dirstate
1616 for f in list:
1616 for f in list:
1617 if self._repo.dirstate[f] != 'r':
1617 if self._repo.dirstate[f] != 'r':
1618 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1618 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1619 else:
1619 else:
1620 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1620 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1621 t = fctx.data()
1621 t = fctx.data()
1622 self._repo.wwrite(f, t, fctx.flags())
1622 self._repo.wwrite(f, t, fctx.flags())
1623 self._repo.dirstate.normal(f)
1623 self._repo.dirstate.normal(f)
1624
1624
1625 def copy(self, source, dest):
1625 def copy(self, source, dest):
1626 try:
1626 try:
1627 st = self._repo.wvfs.lstat(dest)
1627 st = self._repo.wvfs.lstat(dest)
1628 except OSError as err:
1628 except OSError as err:
1629 if err.errno != errno.ENOENT:
1629 if err.errno != errno.ENOENT:
1630 raise
1630 raise
1631 self._repo.ui.warn(_("%s does not exist!\n")
1631 self._repo.ui.warn(_("%s does not exist!\n")
1632 % self._repo.dirstate.pathto(dest))
1632 % self._repo.dirstate.pathto(dest))
1633 return
1633 return
1634 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1634 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1635 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1635 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1636 "symbolic link\n")
1636 "symbolic link\n")
1637 % self._repo.dirstate.pathto(dest))
1637 % self._repo.dirstate.pathto(dest))
1638 else:
1638 else:
1639 with self._repo.wlock():
1639 with self._repo.wlock():
1640 if self._repo.dirstate[dest] in '?':
1640 if self._repo.dirstate[dest] in '?':
1641 self._repo.dirstate.add(dest)
1641 self._repo.dirstate.add(dest)
1642 elif self._repo.dirstate[dest] in 'r':
1642 elif self._repo.dirstate[dest] in 'r':
1643 self._repo.dirstate.normallookup(dest)
1643 self._repo.dirstate.normallookup(dest)
1644 self._repo.dirstate.copy(source, dest)
1644 self._repo.dirstate.copy(source, dest)
1645
1645
1646 def match(self, pats=None, include=None, exclude=None, default='glob',
1646 def match(self, pats=None, include=None, exclude=None, default='glob',
1647 listsubrepos=False, badfn=None):
1647 listsubrepos=False, badfn=None):
1648 r = self._repo
1648 r = self._repo
1649
1649
1650 # Only a case insensitive filesystem needs magic to translate user input
1650 # Only a case insensitive filesystem needs magic to translate user input
1651 # to actual case in the filesystem.
1651 # to actual case in the filesystem.
1652 icasefs = not util.fscasesensitive(r.root)
1652 icasefs = not util.fscasesensitive(r.root)
1653 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1653 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1654 default, auditor=r.auditor, ctx=self,
1654 default, auditor=r.auditor, ctx=self,
1655 listsubrepos=listsubrepos, badfn=badfn,
1655 listsubrepos=listsubrepos, badfn=badfn,
1656 icasefs=icasefs)
1656 icasefs=icasefs)
1657
1657
1658 def _filtersuspectsymlink(self, files):
1658 def _filtersuspectsymlink(self, files):
1659 if not files or self._repo.dirstate._checklink:
1659 if not files or self._repo.dirstate._checklink:
1660 return files
1660 return files
1661
1661
1662 # Symlink placeholders may get non-symlink-like contents
1662 # Symlink placeholders may get non-symlink-like contents
1663 # via user error or dereferencing by NFS or Samba servers,
1663 # via user error or dereferencing by NFS or Samba servers,
1664 # so we filter out any placeholders that don't look like a
1664 # so we filter out any placeholders that don't look like a
1665 # symlink
1665 # symlink
1666 sane = []
1666 sane = []
1667 for f in files:
1667 for f in files:
1668 if self.flags(f) == 'l':
1668 if self.flags(f) == 'l':
1669 d = self[f].data()
1669 d = self[f].data()
1670 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1670 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1671 self._repo.ui.debug('ignoring suspect symlink placeholder'
1671 self._repo.ui.debug('ignoring suspect symlink placeholder'
1672 ' "%s"\n' % f)
1672 ' "%s"\n' % f)
1673 continue
1673 continue
1674 sane.append(f)
1674 sane.append(f)
1675 return sane
1675 return sane
1676
1676
1677 def _checklookup(self, files):
1677 def _checklookup(self, files):
1678 # check for any possibly clean files
1678 # check for any possibly clean files
1679 if not files:
1679 if not files:
1680 return [], [], []
1680 return [], [], []
1681
1681
1682 modified = []
1682 modified = []
1683 deleted = []
1683 deleted = []
1684 fixup = []
1684 fixup = []
1685 pctx = self._parents[0]
1685 pctx = self._parents[0]
1686 # do a full compare of any files that might have changed
1686 # do a full compare of any files that might have changed
1687 for f in sorted(files):
1687 for f in sorted(files):
1688 try:
1688 try:
1689 # This will return True for a file that got replaced by a
1689 # This will return True for a file that got replaced by a
1690 # directory in the interim, but fixing that is pretty hard.
1690 # directory in the interim, but fixing that is pretty hard.
1691 if (f not in pctx or self.flags(f) != pctx.flags(f)
1691 if (f not in pctx or self.flags(f) != pctx.flags(f)
1692 or pctx[f].cmp(self[f])):
1692 or pctx[f].cmp(self[f])):
1693 modified.append(f)
1693 modified.append(f)
1694 else:
1694 else:
1695 fixup.append(f)
1695 fixup.append(f)
1696 except (IOError, OSError):
1696 except (IOError, OSError):
1697 # A file become inaccessible in between? Mark it as deleted,
1697 # A file become inaccessible in between? Mark it as deleted,
1698 # matching dirstate behavior (issue5584).
1698 # matching dirstate behavior (issue5584).
1699 # The dirstate has more complex behavior around whether a
1699 # The dirstate has more complex behavior around whether a
1700 # missing file matches a directory, etc, but we don't need to
1700 # missing file matches a directory, etc, but we don't need to
1701 # bother with that: if f has made it to this point, we're sure
1701 # bother with that: if f has made it to this point, we're sure
1702 # it's in the dirstate.
1702 # it's in the dirstate.
1703 deleted.append(f)
1703 deleted.append(f)
1704
1704
1705 return modified, deleted, fixup
1705 return modified, deleted, fixup
1706
1706
1707 def _poststatusfixup(self, status, fixup):
1707 def _poststatusfixup(self, status, fixup):
1708 """update dirstate for files that are actually clean"""
1708 """update dirstate for files that are actually clean"""
1709 poststatus = self._repo.postdsstatus()
1709 poststatus = self._repo.postdsstatus()
1710 if fixup or poststatus:
1710 if fixup or poststatus:
1711 try:
1711 try:
1712 oldid = self._repo.dirstate.identity()
1712 oldid = self._repo.dirstate.identity()
1713
1713
1714 # updating the dirstate is optional
1714 # updating the dirstate is optional
1715 # so we don't wait on the lock
1715 # so we don't wait on the lock
1716 # wlock can invalidate the dirstate, so cache normal _after_
1716 # wlock can invalidate the dirstate, so cache normal _after_
1717 # taking the lock
1717 # taking the lock
1718 with self._repo.wlock(False):
1718 with self._repo.wlock(False):
1719 if self._repo.dirstate.identity() == oldid:
1719 if self._repo.dirstate.identity() == oldid:
1720 if fixup:
1720 if fixup:
1721 normal = self._repo.dirstate.normal
1721 normal = self._repo.dirstate.normal
1722 for f in fixup:
1722 for f in fixup:
1723 normal(f)
1723 normal(f)
1724 # write changes out explicitly, because nesting
1724 # write changes out explicitly, because nesting
1725 # wlock at runtime may prevent 'wlock.release()'
1725 # wlock at runtime may prevent 'wlock.release()'
1726 # after this block from doing so for subsequent
1726 # after this block from doing so for subsequent
1727 # changing files
1727 # changing files
1728 tr = self._repo.currenttransaction()
1728 tr = self._repo.currenttransaction()
1729 self._repo.dirstate.write(tr)
1729 self._repo.dirstate.write(tr)
1730
1730
1731 if poststatus:
1731 if poststatus:
1732 for ps in poststatus:
1732 for ps in poststatus:
1733 ps(self, status)
1733 ps(self, status)
1734 else:
1734 else:
1735 # in this case, writing changes out breaks
1735 # in this case, writing changes out breaks
1736 # consistency, because .hg/dirstate was
1736 # consistency, because .hg/dirstate was
1737 # already changed simultaneously after last
1737 # already changed simultaneously after last
1738 # caching (see also issue5584 for detail)
1738 # caching (see also issue5584 for detail)
1739 self._repo.ui.debug('skip updating dirstate: '
1739 self._repo.ui.debug('skip updating dirstate: '
1740 'identity mismatch\n')
1740 'identity mismatch\n')
1741 except error.LockError:
1741 except error.LockError:
1742 pass
1742 pass
1743 finally:
1743 finally:
1744 # Even if the wlock couldn't be grabbed, clear out the list.
1744 # Even if the wlock couldn't be grabbed, clear out the list.
1745 self._repo.clearpostdsstatus()
1745 self._repo.clearpostdsstatus()
1746
1746
1747 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1747 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1748 unknown=False):
1748 unknown=False):
1749 '''Gets the status from the dirstate -- internal use only.'''
1749 '''Gets the status from the dirstate -- internal use only.'''
1750 listignored, listclean, listunknown = ignored, clean, unknown
1750 listignored, listclean, listunknown = ignored, clean, unknown
1751 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1751 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1752 subrepos = []
1752 subrepos = []
1753 if '.hgsub' in self:
1753 if '.hgsub' in self:
1754 subrepos = sorted(self.substate)
1754 subrepos = sorted(self.substate)
1755 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1755 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1756 listclean, listunknown)
1756 listclean, listunknown)
1757
1757
1758 # check for any possibly clean files
1758 # check for any possibly clean files
1759 fixup = []
1759 fixup = []
1760 if cmp:
1760 if cmp:
1761 modified2, deleted2, fixup = self._checklookup(cmp)
1761 modified2, deleted2, fixup = self._checklookup(cmp)
1762 s.modified.extend(modified2)
1762 s.modified.extend(modified2)
1763 s.deleted.extend(deleted2)
1763 s.deleted.extend(deleted2)
1764
1764
1765 if fixup and listclean:
1765 if fixup and listclean:
1766 s.clean.extend(fixup)
1766 s.clean.extend(fixup)
1767
1767
1768 self._poststatusfixup(s, fixup)
1768 self._poststatusfixup(s, fixup)
1769
1769
1770 if match.always():
1770 if match.always():
1771 # cache for performance
1771 # cache for performance
1772 if s.unknown or s.ignored or s.clean:
1772 if s.unknown or s.ignored or s.clean:
1773 # "_status" is cached with list*=False in the normal route
1773 # "_status" is cached with list*=False in the normal route
1774 self._status = scmutil.status(s.modified, s.added, s.removed,
1774 self._status = scmutil.status(s.modified, s.added, s.removed,
1775 s.deleted, [], [], [])
1775 s.deleted, [], [], [])
1776 else:
1776 else:
1777 self._status = s
1777 self._status = s
1778
1778
1779 return s
1779 return s
1780
1780
1781 @propertycache
1781 @propertycache
1782 def _manifest(self):
1782 def _manifest(self):
1783 """generate a manifest corresponding to the values in self._status
1783 """generate a manifest corresponding to the values in self._status
1784
1784
1785 This reuse the file nodeid from parent, but we use special node
1785 This reuse the file nodeid from parent, but we use special node
1786 identifiers for added and modified files. This is used by manifests
1786 identifiers for added and modified files. This is used by manifests
1787 merge to see that files are different and by update logic to avoid
1787 merge to see that files are different and by update logic to avoid
1788 deleting newly added files.
1788 deleting newly added files.
1789 """
1789 """
1790 return self._buildstatusmanifest(self._status)
1790 return self._buildstatusmanifest(self._status)
1791
1791
1792 def _buildstatusmanifest(self, status):
1792 def _buildstatusmanifest(self, status):
1793 """Builds a manifest that includes the given status results."""
1793 """Builds a manifest that includes the given status results."""
1794 parents = self.parents()
1794 parents = self.parents()
1795
1795
1796 man = parents[0].manifest().copy()
1796 man = parents[0].manifest().copy()
1797
1797
1798 ff = self._flagfunc
1798 ff = self._flagfunc
1799 for i, l in ((addednodeid, status.added),
1799 for i, l in ((addednodeid, status.added),
1800 (modifiednodeid, status.modified)):
1800 (modifiednodeid, status.modified)):
1801 for f in l:
1801 for f in l:
1802 man[f] = i
1802 man[f] = i
1803 try:
1803 try:
1804 man.setflag(f, ff(f))
1804 man.setflag(f, ff(f))
1805 except OSError:
1805 except OSError:
1806 pass
1806 pass
1807
1807
1808 for f in status.deleted + status.removed:
1808 for f in status.deleted + status.removed:
1809 if f in man:
1809 if f in man:
1810 del man[f]
1810 del man[f]
1811
1811
1812 return man
1812 return man
1813
1813
1814 def _buildstatus(self, other, s, match, listignored, listclean,
1814 def _buildstatus(self, other, s, match, listignored, listclean,
1815 listunknown):
1815 listunknown):
1816 """build a status with respect to another context
1816 """build a status with respect to another context
1817
1817
1818 This includes logic for maintaining the fast path of status when
1818 This includes logic for maintaining the fast path of status when
1819 comparing the working directory against its parent, which is to skip
1819 comparing the working directory against its parent, which is to skip
1820 building a new manifest if self (working directory) is not comparing
1820 building a new manifest if self (working directory) is not comparing
1821 against its parent (repo['.']).
1821 against its parent (repo['.']).
1822 """
1822 """
1823 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1823 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1824 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1824 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1825 # might have accidentally ended up with the entire contents of the file
1825 # might have accidentally ended up with the entire contents of the file
1826 # they are supposed to be linking to.
1826 # they are supposed to be linking to.
1827 s.modified[:] = self._filtersuspectsymlink(s.modified)
1827 s.modified[:] = self._filtersuspectsymlink(s.modified)
1828 if other != self._repo['.']:
1828 if other != self._repo['.']:
1829 s = super(workingctx, self)._buildstatus(other, s, match,
1829 s = super(workingctx, self)._buildstatus(other, s, match,
1830 listignored, listclean,
1830 listignored, listclean,
1831 listunknown)
1831 listunknown)
1832 return s
1832 return s
1833
1833
1834 def _matchstatus(self, other, match):
1834 def _matchstatus(self, other, match):
1835 """override the match method with a filter for directory patterns
1835 """override the match method with a filter for directory patterns
1836
1836
1837 We use inheritance to customize the match.bad method only in cases of
1837 We use inheritance to customize the match.bad method only in cases of
1838 workingctx since it belongs only to the working directory when
1838 workingctx since it belongs only to the working directory when
1839 comparing against the parent changeset.
1839 comparing against the parent changeset.
1840
1840
1841 If we aren't comparing against the working directory's parent, then we
1841 If we aren't comparing against the working directory's parent, then we
1842 just use the default match object sent to us.
1842 just use the default match object sent to us.
1843 """
1843 """
1844 superself = super(workingctx, self)
1844 superself = super(workingctx, self)
1845 match = superself._matchstatus(other, match)
1845 match = superself._matchstatus(other, match)
1846 if other != self._repo['.']:
1846 if other != self._repo['.']:
1847 def bad(f, msg):
1847 def bad(f, msg):
1848 # 'f' may be a directory pattern from 'match.files()',
1848 # 'f' may be a directory pattern from 'match.files()',
1849 # so 'f not in ctx1' is not enough
1849 # so 'f not in ctx1' is not enough
1850 if f not in other and not other.hasdir(f):
1850 if f not in other and not other.hasdir(f):
1851 self._repo.ui.warn('%s: %s\n' %
1851 self._repo.ui.warn('%s: %s\n' %
1852 (self._repo.dirstate.pathto(f), msg))
1852 (self._repo.dirstate.pathto(f), msg))
1853 match.bad = bad
1853 match.bad = bad
1854 return match
1854 return match
1855
1855
1856 def markcommitted(self, node):
1856 def markcommitted(self, node):
1857 super(workingctx, self).markcommitted(node)
1857 super(workingctx, self).markcommitted(node)
1858
1858
1859 sparse.aftercommit(self._repo, node)
1859 sparse.aftercommit(self._repo, node)
1860
1860
1861 class committablefilectx(basefilectx):
1861 class committablefilectx(basefilectx):
1862 """A committablefilectx provides common functionality for a file context
1862 """A committablefilectx provides common functionality for a file context
1863 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1863 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1864 def __init__(self, repo, path, filelog=None, ctx=None):
1864 def __init__(self, repo, path, filelog=None, ctx=None):
1865 self._repo = repo
1865 self._repo = repo
1866 self._path = path
1866 self._path = path
1867 self._changeid = None
1867 self._changeid = None
1868 self._filerev = self._filenode = None
1868 self._filerev = self._filenode = None
1869
1869
1870 if filelog is not None:
1870 if filelog is not None:
1871 self._filelog = filelog
1871 self._filelog = filelog
1872 if ctx:
1872 if ctx:
1873 self._changectx = ctx
1873 self._changectx = ctx
1874
1874
1875 def __nonzero__(self):
1875 def __nonzero__(self):
1876 return True
1876 return True
1877
1877
1878 __bool__ = __nonzero__
1878 __bool__ = __nonzero__
1879
1879
1880 def linkrev(self):
1880 def linkrev(self):
1881 # linked to self._changectx no matter if file is modified or not
1881 # linked to self._changectx no matter if file is modified or not
1882 return self.rev()
1882 return self.rev()
1883
1883
1884 def parents(self):
1884 def parents(self):
1885 '''return parent filectxs, following copies if necessary'''
1885 '''return parent filectxs, following copies if necessary'''
1886 def filenode(ctx, path):
1886 def filenode(ctx, path):
1887 return ctx._manifest.get(path, nullid)
1887 return ctx._manifest.get(path, nullid)
1888
1888
1889 path = self._path
1889 path = self._path
1890 fl = self._filelog
1890 fl = self._filelog
1891 pcl = self._changectx._parents
1891 pcl = self._changectx._parents
1892 renamed = self.renamed()
1892 renamed = self.renamed()
1893
1893
1894 if renamed:
1894 if renamed:
1895 pl = [renamed + (None,)]
1895 pl = [renamed + (None,)]
1896 else:
1896 else:
1897 pl = [(path, filenode(pcl[0], path), fl)]
1897 pl = [(path, filenode(pcl[0], path), fl)]
1898
1898
1899 for pc in pcl[1:]:
1899 for pc in pcl[1:]:
1900 pl.append((path, filenode(pc, path), fl))
1900 pl.append((path, filenode(pc, path), fl))
1901
1901
1902 return [self._parentfilectx(p, fileid=n, filelog=l)
1902 return [self._parentfilectx(p, fileid=n, filelog=l)
1903 for p, n, l in pl if n != nullid]
1903 for p, n, l in pl if n != nullid]
1904
1904
1905 def children(self):
1905 def children(self):
1906 return []
1906 return []
1907
1907
1908 class workingfilectx(committablefilectx):
1908 class workingfilectx(committablefilectx):
1909 """A workingfilectx object makes access to data related to a particular
1909 """A workingfilectx object makes access to data related to a particular
1910 file in the working directory convenient."""
1910 file in the working directory convenient."""
1911 def __init__(self, repo, path, filelog=None, workingctx=None):
1911 def __init__(self, repo, path, filelog=None, workingctx=None):
1912 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1912 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1913
1913
1914 @propertycache
1914 @propertycache
1915 def _changectx(self):
1915 def _changectx(self):
1916 return workingctx(self._repo)
1916 return workingctx(self._repo)
1917
1917
1918 def data(self):
1918 def data(self):
1919 return self._repo.wread(self._path)
1919 return self._repo.wread(self._path)
1920 def renamed(self):
1920 def renamed(self):
1921 rp = self._repo.dirstate.copied(self._path)
1921 rp = self._repo.dirstate.copied(self._path)
1922 if not rp:
1922 if not rp:
1923 return None
1923 return None
1924 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1924 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1925
1925
1926 def size(self):
1926 def size(self):
1927 return self._repo.wvfs.lstat(self._path).st_size
1927 return self._repo.wvfs.lstat(self._path).st_size
1928 def date(self):
1928 def date(self):
1929 t, tz = self._changectx.date()
1929 t, tz = self._changectx.date()
1930 try:
1930 try:
1931 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1931 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1932 except OSError as err:
1932 except OSError as err:
1933 if err.errno != errno.ENOENT:
1933 if err.errno != errno.ENOENT:
1934 raise
1934 raise
1935 return (t, tz)
1935 return (t, tz)
1936
1936
1937 def exists(self):
1937 def exists(self):
1938 return self._repo.wvfs.exists(self._path)
1938 return self._repo.wvfs.exists(self._path)
1939
1939
1940 def lexists(self):
1940 def lexists(self):
1941 return self._repo.wvfs.lexists(self._path)
1941 return self._repo.wvfs.lexists(self._path)
1942
1942
1943 def audit(self):
1943 def audit(self):
1944 return self._repo.wvfs.audit(self._path)
1944 return self._repo.wvfs.audit(self._path)
1945
1945
1946 def cmp(self, fctx):
1946 def cmp(self, fctx):
1947 """compare with other file context
1947 """compare with other file context
1948
1948
1949 returns True if different than fctx.
1949 returns True if different than fctx.
1950 """
1950 """
1951 # fctx should be a filectx (not a workingfilectx)
1951 # fctx should be a filectx (not a workingfilectx)
1952 # invert comparison to reuse the same code path
1952 # invert comparison to reuse the same code path
1953 return fctx.cmp(self)
1953 return fctx.cmp(self)
1954
1954
1955 def remove(self, ignoremissing=False):
1955 def remove(self, ignoremissing=False):
1956 """wraps unlink for a repo's working directory"""
1956 """wraps unlink for a repo's working directory"""
1957 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1957 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1958
1958
1959 def write(self, data, flags, backgroundclose=False):
1959 def write(self, data, flags, backgroundclose=False):
1960 """wraps repo.wwrite"""
1960 """wraps repo.wwrite"""
1961 self._repo.wwrite(self._path, data, flags,
1961 self._repo.wwrite(self._path, data, flags,
1962 backgroundclose=backgroundclose)
1962 backgroundclose=backgroundclose)
1963
1963
1964 def setflags(self, l, x):
1964 def setflags(self, l, x):
1965 self._repo.wvfs.setflags(self._path, l, x)
1965 self._repo.wvfs.setflags(self._path, l, x)
1966
1966
1967 class workingcommitctx(workingctx):
1967 class workingcommitctx(workingctx):
1968 """A workingcommitctx object makes access to data related to
1968 """A workingcommitctx object makes access to data related to
1969 the revision being committed convenient.
1969 the revision being committed convenient.
1970
1970
1971 This hides changes in the working directory, if they aren't
1971 This hides changes in the working directory, if they aren't
1972 committed in this context.
1972 committed in this context.
1973 """
1973 """
1974 def __init__(self, repo, changes,
1974 def __init__(self, repo, changes,
1975 text="", user=None, date=None, extra=None):
1975 text="", user=None, date=None, extra=None):
1976 super(workingctx, self).__init__(repo, text, user, date, extra,
1976 super(workingctx, self).__init__(repo, text, user, date, extra,
1977 changes)
1977 changes)
1978
1978
1979 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1979 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1980 unknown=False):
1980 unknown=False):
1981 """Return matched files only in ``self._status``
1981 """Return matched files only in ``self._status``
1982
1982
1983 Uncommitted files appear "clean" via this context, even if
1983 Uncommitted files appear "clean" via this context, even if
1984 they aren't actually so in the working directory.
1984 they aren't actually so in the working directory.
1985 """
1985 """
1986 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1986 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1987 if clean:
1987 if clean:
1988 clean = [f for f in self._manifest if f not in self._changedset]
1988 clean = [f for f in self._manifest if f not in self._changedset]
1989 else:
1989 else:
1990 clean = []
1990 clean = []
1991 return scmutil.status([f for f in self._status.modified if match(f)],
1991 return scmutil.status([f for f in self._status.modified if match(f)],
1992 [f for f in self._status.added if match(f)],
1992 [f for f in self._status.added if match(f)],
1993 [f for f in self._status.removed if match(f)],
1993 [f for f in self._status.removed if match(f)],
1994 [], [], [], clean)
1994 [], [], [], clean)
1995
1995
1996 @propertycache
1996 @propertycache
1997 def _changedset(self):
1997 def _changedset(self):
1998 """Return the set of files changed in this context
1998 """Return the set of files changed in this context
1999 """
1999 """
2000 changed = set(self._status.modified)
2000 changed = set(self._status.modified)
2001 changed.update(self._status.added)
2001 changed.update(self._status.added)
2002 changed.update(self._status.removed)
2002 changed.update(self._status.removed)
2003 return changed
2003 return changed
2004
2004
2005 def makecachingfilectxfn(func):
2005 def makecachingfilectxfn(func):
2006 """Create a filectxfn that caches based on the path.
2006 """Create a filectxfn that caches based on the path.
2007
2007
2008 We can't use util.cachefunc because it uses all arguments as the cache
2008 We can't use util.cachefunc because it uses all arguments as the cache
2009 key and this creates a cycle since the arguments include the repo and
2009 key and this creates a cycle since the arguments include the repo and
2010 memctx.
2010 memctx.
2011 """
2011 """
2012 cache = {}
2012 cache = {}
2013
2013
2014 def getfilectx(repo, memctx, path):
2014 def getfilectx(repo, memctx, path):
2015 if path not in cache:
2015 if path not in cache:
2016 cache[path] = func(repo, memctx, path)
2016 cache[path] = func(repo, memctx, path)
2017 return cache[path]
2017 return cache[path]
2018
2018
2019 return getfilectx
2019 return getfilectx
2020
2020
2021 def memfilefromctx(ctx):
2021 def memfilefromctx(ctx):
2022 """Given a context return a memfilectx for ctx[path]
2022 """Given a context return a memfilectx for ctx[path]
2023
2023
2024 This is a convenience method for building a memctx based on another
2024 This is a convenience method for building a memctx based on another
2025 context.
2025 context.
2026 """
2026 """
2027 def getfilectx(repo, memctx, path):
2027 def getfilectx(repo, memctx, path):
2028 fctx = ctx[path]
2028 fctx = ctx[path]
2029 # this is weird but apparently we only keep track of one parent
2029 # this is weird but apparently we only keep track of one parent
2030 # (why not only store that instead of a tuple?)
2030 # (why not only store that instead of a tuple?)
2031 copied = fctx.renamed()
2031 copied = fctx.renamed()
2032 if copied:
2032 if copied:
2033 copied = copied[0]
2033 copied = copied[0]
2034 return memfilectx(repo, path, fctx.data(),
2034 return memfilectx(repo, path, fctx.data(),
2035 islink=fctx.islink(), isexec=fctx.isexec(),
2035 islink=fctx.islink(), isexec=fctx.isexec(),
2036 copied=copied, memctx=memctx)
2036 copied=copied, memctx=memctx)
2037
2037
2038 return getfilectx
2038 return getfilectx
2039
2039
2040 def memfilefrompatch(patchstore):
2040 def memfilefrompatch(patchstore):
2041 """Given a patch (e.g. patchstore object) return a memfilectx
2041 """Given a patch (e.g. patchstore object) return a memfilectx
2042
2042
2043 This is a convenience method for building a memctx based on a patchstore.
2043 This is a convenience method for building a memctx based on a patchstore.
2044 """
2044 """
2045 def getfilectx(repo, memctx, path):
2045 def getfilectx(repo, memctx, path):
2046 data, mode, copied = patchstore.getfile(path)
2046 data, mode, copied = patchstore.getfile(path)
2047 if data is None:
2047 if data is None:
2048 return None
2048 return None
2049 islink, isexec = mode
2049 islink, isexec = mode
2050 return memfilectx(repo, path, data, islink=islink,
2050 return memfilectx(repo, path, data, islink=islink,
2051 isexec=isexec, copied=copied,
2051 isexec=isexec, copied=copied,
2052 memctx=memctx)
2052 memctx=memctx)
2053
2053
2054 return getfilectx
2054 return getfilectx
2055
2055
2056 class memctx(committablectx):
2056 class memctx(committablectx):
2057 """Use memctx to perform in-memory commits via localrepo.commitctx().
2057 """Use memctx to perform in-memory commits via localrepo.commitctx().
2058
2058
2059 Revision information is supplied at initialization time while
2059 Revision information is supplied at initialization time while
2060 related files data and is made available through a callback
2060 related files data and is made available through a callback
2061 mechanism. 'repo' is the current localrepo, 'parents' is a
2061 mechanism. 'repo' is the current localrepo, 'parents' is a
2062 sequence of two parent revisions identifiers (pass None for every
2062 sequence of two parent revisions identifiers (pass None for every
2063 missing parent), 'text' is the commit message and 'files' lists
2063 missing parent), 'text' is the commit message and 'files' lists
2064 names of files touched by the revision (normalized and relative to
2064 names of files touched by the revision (normalized and relative to
2065 repository root).
2065 repository root).
2066
2066
2067 filectxfn(repo, memctx, path) is a callable receiving the
2067 filectxfn(repo, memctx, path) is a callable receiving the
2068 repository, the current memctx object and the normalized path of
2068 repository, the current memctx object and the normalized path of
2069 requested file, relative to repository root. It is fired by the
2069 requested file, relative to repository root. It is fired by the
2070 commit function for every file in 'files', but calls order is
2070 commit function for every file in 'files', but calls order is
2071 undefined. If the file is available in the revision being
2071 undefined. If the file is available in the revision being
2072 committed (updated or added), filectxfn returns a memfilectx
2072 committed (updated or added), filectxfn returns a memfilectx
2073 object. If the file was removed, filectxfn return None for recent
2073 object. If the file was removed, filectxfn return None for recent
2074 Mercurial. Moved files are represented by marking the source file
2074 Mercurial. Moved files are represented by marking the source file
2075 removed and the new file added with copy information (see
2075 removed and the new file added with copy information (see
2076 memfilectx).
2076 memfilectx).
2077
2077
2078 user receives the committer name and defaults to current
2078 user receives the committer name and defaults to current
2079 repository username, date is the commit date in any format
2079 repository username, date is the commit date in any format
2080 supported by util.parsedate() and defaults to current date, extra
2080 supported by util.parsedate() and defaults to current date, extra
2081 is a dictionary of metadata or is left empty.
2081 is a dictionary of metadata or is left empty.
2082 """
2082 """
2083
2083
2084 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2084 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2085 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2085 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2086 # this field to determine what to do in filectxfn.
2086 # this field to determine what to do in filectxfn.
2087 _returnnoneformissingfiles = True
2087 _returnnoneformissingfiles = True
2088
2088
2089 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2089 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2090 date=None, extra=None, branch=None, editor=False):
2090 date=None, extra=None, branch=None, editor=False):
2091 super(memctx, self).__init__(repo, text, user, date, extra)
2091 super(memctx, self).__init__(repo, text, user, date, extra)
2092 self._rev = None
2092 self._rev = None
2093 self._node = None
2093 self._node = None
2094 parents = [(p or nullid) for p in parents]
2094 parents = [(p or nullid) for p in parents]
2095 p1, p2 = parents
2095 p1, p2 = parents
2096 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2096 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2097 files = sorted(set(files))
2097 files = sorted(set(files))
2098 self._files = files
2098 self._files = files
2099 if branch is not None:
2099 if branch is not None:
2100 self._extra['branch'] = encoding.fromlocal(branch)
2100 self._extra['branch'] = encoding.fromlocal(branch)
2101 self.substate = {}
2101 self.substate = {}
2102
2102
2103 if isinstance(filectxfn, patch.filestore):
2103 if isinstance(filectxfn, patch.filestore):
2104 filectxfn = memfilefrompatch(filectxfn)
2104 filectxfn = memfilefrompatch(filectxfn)
2105 elif not callable(filectxfn):
2105 elif not callable(filectxfn):
2106 # if store is not callable, wrap it in a function
2106 # if store is not callable, wrap it in a function
2107 filectxfn = memfilefromctx(filectxfn)
2107 filectxfn = memfilefromctx(filectxfn)
2108
2108
2109 # memoizing increases performance for e.g. vcs convert scenarios.
2109 # memoizing increases performance for e.g. vcs convert scenarios.
2110 self._filectxfn = makecachingfilectxfn(filectxfn)
2110 self._filectxfn = makecachingfilectxfn(filectxfn)
2111
2111
2112 if editor:
2112 if editor:
2113 self._text = editor(self._repo, self, [])
2113 self._text = editor(self._repo, self, [])
2114 self._repo.savecommitmessage(self._text)
2114 self._repo.savecommitmessage(self._text)
2115
2115
2116 def filectx(self, path, filelog=None):
2116 def filectx(self, path, filelog=None):
2117 """get a file context from the working directory
2117 """get a file context from the working directory
2118
2118
2119 Returns None if file doesn't exist and should be removed."""
2119 Returns None if file doesn't exist and should be removed."""
2120 return self._filectxfn(self._repo, self, path)
2120 return self._filectxfn(self._repo, self, path)
2121
2121
2122 def commit(self):
2122 def commit(self):
2123 """commit context to the repo"""
2123 """commit context to the repo"""
2124 return self._repo.commitctx(self)
2124 return self._repo.commitctx(self)
2125
2125
2126 @propertycache
2126 @propertycache
2127 def _manifest(self):
2127 def _manifest(self):
2128 """generate a manifest based on the return values of filectxfn"""
2128 """generate a manifest based on the return values of filectxfn"""
2129
2129
2130 # keep this simple for now; just worry about p1
2130 # keep this simple for now; just worry about p1
2131 pctx = self._parents[0]
2131 pctx = self._parents[0]
2132 man = pctx.manifest().copy()
2132 man = pctx.manifest().copy()
2133
2133
2134 for f in self._status.modified:
2134 for f in self._status.modified:
2135 p1node = nullid
2135 p1node = nullid
2136 p2node = nullid
2136 p2node = nullid
2137 p = pctx[f].parents() # if file isn't in pctx, check p2?
2137 p = pctx[f].parents() # if file isn't in pctx, check p2?
2138 if len(p) > 0:
2138 if len(p) > 0:
2139 p1node = p[0].filenode()
2139 p1node = p[0].filenode()
2140 if len(p) > 1:
2140 if len(p) > 1:
2141 p2node = p[1].filenode()
2141 p2node = p[1].filenode()
2142 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2142 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2143
2143
2144 for f in self._status.added:
2144 for f in self._status.added:
2145 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2145 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2146
2146
2147 for f in self._status.removed:
2147 for f in self._status.removed:
2148 if f in man:
2148 if f in man:
2149 del man[f]
2149 del man[f]
2150
2150
2151 return man
2151 return man
2152
2152
2153 @propertycache
2153 @propertycache
2154 def _status(self):
2154 def _status(self):
2155 """Calculate exact status from ``files`` specified at construction
2155 """Calculate exact status from ``files`` specified at construction
2156 """
2156 """
2157 man1 = self.p1().manifest()
2157 man1 = self.p1().manifest()
2158 p2 = self._parents[1]
2158 p2 = self._parents[1]
2159 # "1 < len(self._parents)" can't be used for checking
2159 # "1 < len(self._parents)" can't be used for checking
2160 # existence of the 2nd parent, because "memctx._parents" is
2160 # existence of the 2nd parent, because "memctx._parents" is
2161 # explicitly initialized by the list, of which length is 2.
2161 # explicitly initialized by the list, of which length is 2.
2162 if p2.node() != nullid:
2162 if p2.node() != nullid:
2163 man2 = p2.manifest()
2163 man2 = p2.manifest()
2164 managing = lambda f: f in man1 or f in man2
2164 managing = lambda f: f in man1 or f in man2
2165 else:
2165 else:
2166 managing = lambda f: f in man1
2166 managing = lambda f: f in man1
2167
2167
2168 modified, added, removed = [], [], []
2168 modified, added, removed = [], [], []
2169 for f in self._files:
2169 for f in self._files:
2170 if not managing(f):
2170 if not managing(f):
2171 added.append(f)
2171 added.append(f)
2172 elif self[f]:
2172 elif self[f]:
2173 modified.append(f)
2173 modified.append(f)
2174 else:
2174 else:
2175 removed.append(f)
2175 removed.append(f)
2176
2176
2177 return scmutil.status(modified, added, removed, [], [], [], [])
2177 return scmutil.status(modified, added, removed, [], [], [], [])
2178
2178
2179 class memfilectx(committablefilectx):
2179 class memfilectx(committablefilectx):
2180 """memfilectx represents an in-memory file to commit.
2180 """memfilectx represents an in-memory file to commit.
2181
2181
2182 See memctx and committablefilectx for more details.
2182 See memctx and committablefilectx for more details.
2183 """
2183 """
2184 def __init__(self, repo, path, data, islink=False,
2184 def __init__(self, repo, path, data, islink=False,
2185 isexec=False, copied=None, memctx=None):
2185 isexec=False, copied=None, memctx=None):
2186 """
2186 """
2187 path is the normalized file path relative to repository root.
2187 path is the normalized file path relative to repository root.
2188 data is the file content as a string.
2188 data is the file content as a string.
2189 islink is True if the file is a symbolic link.
2189 islink is True if the file is a symbolic link.
2190 isexec is True if the file is executable.
2190 isexec is True if the file is executable.
2191 copied is the source file path if current file was copied in the
2191 copied is the source file path if current file was copied in the
2192 revision being committed, or None."""
2192 revision being committed, or None."""
2193 super(memfilectx, self).__init__(repo, path, None, memctx)
2193 super(memfilectx, self).__init__(repo, path, None, memctx)
2194 self._data = data
2194 self._data = data
2195 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2195 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2196 self._copied = None
2196 self._copied = None
2197 if copied:
2197 if copied:
2198 self._copied = (copied, nullid)
2198 self._copied = (copied, nullid)
2199
2199
2200 def data(self):
2200 def data(self):
2201 return self._data
2201 return self._data
2202
2202
2203 def remove(self, ignoremissing=False):
2203 def remove(self, ignoremissing=False):
2204 """wraps unlink for a repo's working directory"""
2204 """wraps unlink for a repo's working directory"""
2205 # need to figure out what to do here
2205 # need to figure out what to do here
2206 del self._changectx[self._path]
2206 del self._changectx[self._path]
2207
2207
2208 def write(self, data, flags):
2208 def write(self, data, flags):
2209 """wraps repo.wwrite"""
2209 """wraps repo.wwrite"""
2210 self._data = data
2210 self._data = data
2211
2211
2212 class overlayfilectx(committablefilectx):
2212 class overlayfilectx(committablefilectx):
2213 """Like memfilectx but take an original filectx and optional parameters to
2213 """Like memfilectx but take an original filectx and optional parameters to
2214 override parts of it. This is useful when fctx.data() is expensive (i.e.
2214 override parts of it. This is useful when fctx.data() is expensive (i.e.
2215 flag processor is expensive) and raw data, flags, and filenode could be
2215 flag processor is expensive) and raw data, flags, and filenode could be
2216 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2216 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2217 """
2217 """
2218
2218
2219 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2219 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2220 copied=None, ctx=None):
2220 copied=None, ctx=None):
2221 """originalfctx: filecontext to duplicate
2221 """originalfctx: filecontext to duplicate
2222
2222
2223 datafunc: None or a function to override data (file content). It is a
2223 datafunc: None or a function to override data (file content). It is a
2224 function to be lazy. path, flags, copied, ctx: None or overridden value
2224 function to be lazy. path, flags, copied, ctx: None or overridden value
2225
2225
2226 copied could be (path, rev), or False. copied could also be just path,
2226 copied could be (path, rev), or False. copied could also be just path,
2227 and will be converted to (path, nullid). This simplifies some callers.
2227 and will be converted to (path, nullid). This simplifies some callers.
2228 """
2228 """
2229
2229
2230 if path is None:
2230 if path is None:
2231 path = originalfctx.path()
2231 path = originalfctx.path()
2232 if ctx is None:
2232 if ctx is None:
2233 ctx = originalfctx.changectx()
2233 ctx = originalfctx.changectx()
2234 ctxmatch = lambda: True
2234 ctxmatch = lambda: True
2235 else:
2235 else:
2236 ctxmatch = lambda: ctx == originalfctx.changectx()
2236 ctxmatch = lambda: ctx == originalfctx.changectx()
2237
2237
2238 repo = originalfctx.repo()
2238 repo = originalfctx.repo()
2239 flog = originalfctx.filelog()
2239 flog = originalfctx.filelog()
2240 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2240 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2241
2241
2242 if copied is None:
2242 if copied is None:
2243 copied = originalfctx.renamed()
2243 copied = originalfctx.renamed()
2244 copiedmatch = lambda: True
2244 copiedmatch = lambda: True
2245 else:
2245 else:
2246 if copied and not isinstance(copied, tuple):
2246 if copied and not isinstance(copied, tuple):
2247 # repo._filecommit will recalculate copyrev so nullid is okay
2247 # repo._filecommit will recalculate copyrev so nullid is okay
2248 copied = (copied, nullid)
2248 copied = (copied, nullid)
2249 copiedmatch = lambda: copied == originalfctx.renamed()
2249 copiedmatch = lambda: copied == originalfctx.renamed()
2250
2250
2251 # When data, copied (could affect data), ctx (could affect filelog
2251 # When data, copied (could affect data), ctx (could affect filelog
2252 # parents) are not overridden, rawdata, rawflags, and filenode may be
2252 # parents) are not overridden, rawdata, rawflags, and filenode may be
2253 # reused (repo._filecommit should double check filelog parents).
2253 # reused (repo._filecommit should double check filelog parents).
2254 #
2254 #
2255 # path, flags are not hashed in filelog (but in manifestlog) so they do
2255 # path, flags are not hashed in filelog (but in manifestlog) so they do
2256 # not affect reusable here.
2256 # not affect reusable here.
2257 #
2257 #
2258 # If ctx or copied is overridden to a same value with originalfctx,
2258 # If ctx or copied is overridden to a same value with originalfctx,
2259 # still consider it's reusable. originalfctx.renamed() may be a bit
2259 # still consider it's reusable. originalfctx.renamed() may be a bit
2260 # expensive so it's not called unless necessary. Assuming datafunc is
2260 # expensive so it's not called unless necessary. Assuming datafunc is
2261 # always expensive, do not call it for this "reusable" test.
2261 # always expensive, do not call it for this "reusable" test.
2262 reusable = datafunc is None and ctxmatch() and copiedmatch()
2262 reusable = datafunc is None and ctxmatch() and copiedmatch()
2263
2263
2264 if datafunc is None:
2264 if datafunc is None:
2265 datafunc = originalfctx.data
2265 datafunc = originalfctx.data
2266 if flags is None:
2266 if flags is None:
2267 flags = originalfctx.flags()
2267 flags = originalfctx.flags()
2268
2268
2269 self._datafunc = datafunc
2269 self._datafunc = datafunc
2270 self._flags = flags
2270 self._flags = flags
2271 self._copied = copied
2271 self._copied = copied
2272
2272
2273 if reusable:
2273 if reusable:
2274 # copy extra fields from originalfctx
2274 # copy extra fields from originalfctx
2275 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2275 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2276 for attr in attrs:
2276 for attr in attrs:
2277 if util.safehasattr(originalfctx, attr):
2277 if util.safehasattr(originalfctx, attr):
2278 setattr(self, attr, getattr(originalfctx, attr))
2278 setattr(self, attr, getattr(originalfctx, attr))
2279
2279
2280 def data(self):
2280 def data(self):
2281 return self._datafunc()
2281 return self._datafunc()
2282
2282
2283 class metadataonlyctx(committablectx):
2283 class metadataonlyctx(committablectx):
2284 """Like memctx but it's reusing the manifest of different commit.
2284 """Like memctx but it's reusing the manifest of different commit.
2285 Intended to be used by lightweight operations that are creating
2285 Intended to be used by lightweight operations that are creating
2286 metadata-only changes.
2286 metadata-only changes.
2287
2287
2288 Revision information is supplied at initialization time. 'repo' is the
2288 Revision information is supplied at initialization time. 'repo' is the
2289 current localrepo, 'ctx' is original revision which manifest we're reuisng
2289 current localrepo, 'ctx' is original revision which manifest we're reuisng
2290 'parents' is a sequence of two parent revisions identifiers (pass None for
2290 'parents' is a sequence of two parent revisions identifiers (pass None for
2291 every missing parent), 'text' is the commit.
2291 every missing parent), 'text' is the commit.
2292
2292
2293 user receives the committer name and defaults to current repository
2293 user receives the committer name and defaults to current repository
2294 username, date is the commit date in any format supported by
2294 username, date is the commit date in any format supported by
2295 util.parsedate() and defaults to current date, extra is a dictionary of
2295 util.parsedate() and defaults to current date, extra is a dictionary of
2296 metadata or is left empty.
2296 metadata or is left empty.
2297 """
2297 """
2298 def __new__(cls, repo, originalctx, *args, **kwargs):
2298 def __new__(cls, repo, originalctx, *args, **kwargs):
2299 return super(metadataonlyctx, cls).__new__(cls, repo)
2299 return super(metadataonlyctx, cls).__new__(cls, repo)
2300
2300
2301 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2301 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2302 extra=None, editor=False):
2302 extra=None, editor=False):
2303 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2303 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2304 self._rev = None
2304 self._rev = None
2305 self._node = None
2305 self._node = None
2306 self._originalctx = originalctx
2306 self._originalctx = originalctx
2307 self._manifestnode = originalctx.manifestnode()
2307 self._manifestnode = originalctx.manifestnode()
2308 parents = [(p or nullid) for p in parents]
2308 parents = [(p or nullid) for p in parents]
2309 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2309 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2310
2310
2311 # sanity check to ensure that the reused manifest parents are
2311 # sanity check to ensure that the reused manifest parents are
2312 # manifests of our commit parents
2312 # manifests of our commit parents
2313 mp1, mp2 = self.manifestctx().parents
2313 mp1, mp2 = self.manifestctx().parents
2314 if p1 != nullid and p1.manifestnode() != mp1:
2314 if p1 != nullid and p1.manifestnode() != mp1:
2315 raise RuntimeError('can\'t reuse the manifest: '
2315 raise RuntimeError('can\'t reuse the manifest: '
2316 'its p1 doesn\'t match the new ctx p1')
2316 'its p1 doesn\'t match the new ctx p1')
2317 if p2 != nullid and p2.manifestnode() != mp2:
2317 if p2 != nullid and p2.manifestnode() != mp2:
2318 raise RuntimeError('can\'t reuse the manifest: '
2318 raise RuntimeError('can\'t reuse the manifest: '
2319 'its p2 doesn\'t match the new ctx p2')
2319 'its p2 doesn\'t match the new ctx p2')
2320
2320
2321 self._files = originalctx.files()
2321 self._files = originalctx.files()
2322 self.substate = {}
2322 self.substate = {}
2323
2323
2324 if editor:
2324 if editor:
2325 self._text = editor(self._repo, self, [])
2325 self._text = editor(self._repo, self, [])
2326 self._repo.savecommitmessage(self._text)
2326 self._repo.savecommitmessage(self._text)
2327
2327
2328 def manifestnode(self):
2328 def manifestnode(self):
2329 return self._manifestnode
2329 return self._manifestnode
2330
2330
2331 @property
2331 @property
2332 def _manifestctx(self):
2332 def _manifestctx(self):
2333 return self._repo.manifestlog[self._manifestnode]
2333 return self._repo.manifestlog[self._manifestnode]
2334
2334
2335 def filectx(self, path, filelog=None):
2335 def filectx(self, path, filelog=None):
2336 return self._originalctx.filectx(path, filelog=filelog)
2336 return self._originalctx.filectx(path, filelog=filelog)
2337
2337
2338 def commit(self):
2338 def commit(self):
2339 """commit context to the repo"""
2339 """commit context to the repo"""
2340 return self._repo.commitctx(self)
2340 return self._repo.commitctx(self)
2341
2341
2342 @property
2342 @property
2343 def _manifest(self):
2343 def _manifest(self):
2344 return self._originalctx.manifest()
2344 return self._originalctx.manifest()
2345
2345
2346 @propertycache
2346 @propertycache
2347 def _status(self):
2347 def _status(self):
2348 """Calculate exact status from ``files`` specified in the ``origctx``
2348 """Calculate exact status from ``files`` specified in the ``origctx``
2349 and parents manifests.
2349 and parents manifests.
2350 """
2350 """
2351 man1 = self.p1().manifest()
2351 man1 = self.p1().manifest()
2352 p2 = self._parents[1]
2352 p2 = self._parents[1]
2353 # "1 < len(self._parents)" can't be used for checking
2353 # "1 < len(self._parents)" can't be used for checking
2354 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2354 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2355 # explicitly initialized by the list, of which length is 2.
2355 # explicitly initialized by the list, of which length is 2.
2356 if p2.node() != nullid:
2356 if p2.node() != nullid:
2357 man2 = p2.manifest()
2357 man2 = p2.manifest()
2358 managing = lambda f: f in man1 or f in man2
2358 managing = lambda f: f in man1 or f in man2
2359 else:
2359 else:
2360 managing = lambda f: f in man1
2360 managing = lambda f: f in man1
2361
2361
2362 modified, added, removed = [], [], []
2362 modified, added, removed = [], [], []
2363 for f in self._files:
2363 for f in self._files:
2364 if not managing(f):
2364 if not managing(f):
2365 added.append(f)
2365 added.append(f)
2366 elif self[f]:
2366 elif self[f]:
2367 modified.append(f)
2367 modified.append(f)
2368 else:
2368 else:
2369 removed.append(f)
2369 removed.append(f)
2370
2370
2371 return scmutil.status(modified, added, removed, [], [], [], [])
2371 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,1058 +1,1066 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84
84
85 parsers = policy.importmod(r'parsers')
85 parsers = policy.importmod(r'parsers')
86
86
87 _pack = struct.pack
87 _pack = struct.pack
88 _unpack = struct.unpack
88 _unpack = struct.unpack
89 _calcsize = struct.calcsize
89 _calcsize = struct.calcsize
90 propertycache = util.propertycache
90 propertycache = util.propertycache
91
91
92 # the obsolete feature is not mature enough to be enabled by default.
92 # the obsolete feature is not mature enough to be enabled by default.
93 # you have to rely on third party extension extension to enable this.
93 # you have to rely on third party extension extension to enable this.
94 _enabled = False
94 _enabled = False
95
95
96 # Options for obsolescence
96 # Options for obsolescence
97 createmarkersopt = 'createmarkers'
97 createmarkersopt = 'createmarkers'
98 allowunstableopt = 'allowunstable'
98 allowunstableopt = 'allowunstable'
99 exchangeopt = 'exchange'
99 exchangeopt = 'exchange'
100
100
101 def isenabled(repo, option):
101 def isenabled(repo, option):
102 """Returns True if the given repository has the given obsolete option
102 """Returns True if the given repository has the given obsolete option
103 enabled.
103 enabled.
104 """
104 """
105 result = set(repo.ui.configlist('experimental', 'stabilization'))
105 result = set(repo.ui.configlist('experimental', 'stabilization'))
106 if 'all' in result:
106 if 'all' in result:
107 return True
107 return True
108
108
109 # For migration purposes, temporarily return true if the config hasn't been
109 # For migration purposes, temporarily return true if the config hasn't been
110 # set but _enabled is true.
110 # set but _enabled is true.
111 if len(result) == 0 and _enabled:
111 if len(result) == 0 and _enabled:
112 return True
112 return True
113
113
114 # createmarkers must be enabled if other options are enabled
114 # createmarkers must be enabled if other options are enabled
115 if ((allowunstableopt in result or exchangeopt in result) and
115 if ((allowunstableopt in result or exchangeopt in result) and
116 not createmarkersopt in result):
116 not createmarkersopt in result):
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 "if other obsolete options are enabled"))
118 "if other obsolete options are enabled"))
119
119
120 return option in result
120 return option in result
121
121
122 ### obsolescence marker flag
122 ### obsolescence marker flag
123
123
124 ## bumpedfix flag
124 ## bumpedfix flag
125 #
125 #
126 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 # "bumped" because it's a successors of a public changesets
127 # "bumped" because it's a successors of a public changesets
128 #
128 #
129 # o A' (bumped)
129 # o A' (bumped)
130 # |`:
130 # |`:
131 # | o A
131 # | o A
132 # |/
132 # |/
133 # o Z
133 # o Z
134 #
134 #
135 # The way to solve this situation is to create a new changeset Ad as children
135 # The way to solve this situation is to create a new changeset Ad as children
136 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 #
138 #
139 # o Ad
139 # o Ad
140 # |`:
140 # |`:
141 # | x A'
141 # | x A'
142 # |'|
142 # |'|
143 # o | A
143 # o | A
144 # |/
144 # |/
145 # o Z
145 # o Z
146 #
146 #
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 # This flag mean that the successors express the changes between the public and
149 # This flag mean that the successors express the changes between the public and
150 # bumped version and fix the situation, breaking the transitivity of
150 # bumped version and fix the situation, breaking the transitivity of
151 # "bumped" here.
151 # "bumped" here.
152 bumpedfix = 1
152 bumpedfix = 1
153 usingsha256 = 2
153 usingsha256 = 2
154
154
155 ## Parsing and writing of version "0"
155 ## Parsing and writing of version "0"
156 #
156 #
157 # The header is followed by the markers. Each marker is made of:
157 # The header is followed by the markers. Each marker is made of:
158 #
158 #
159 # - 1 uint8 : number of new changesets "N", can be zero.
159 # - 1 uint8 : number of new changesets "N", can be zero.
160 #
160 #
161 # - 1 uint32: metadata size "M" in bytes.
161 # - 1 uint32: metadata size "M" in bytes.
162 #
162 #
163 # - 1 byte: a bit field. It is reserved for flags used in common
163 # - 1 byte: a bit field. It is reserved for flags used in common
164 # obsolete marker operations, to avoid repeated decoding of metadata
164 # obsolete marker operations, to avoid repeated decoding of metadata
165 # entries.
165 # entries.
166 #
166 #
167 # - 20 bytes: obsoleted changeset identifier.
167 # - 20 bytes: obsoleted changeset identifier.
168 #
168 #
169 # - N*20 bytes: new changesets identifiers.
169 # - N*20 bytes: new changesets identifiers.
170 #
170 #
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 # string contains a key and a value, separated by a colon ':', without
172 # string contains a key and a value, separated by a colon ':', without
173 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 # cannot contain '\0'.
174 # cannot contain '\0'.
175 _fm0version = 0
175 _fm0version = 0
176 _fm0fixed = '>BIB20s'
176 _fm0fixed = '>BIB20s'
177 _fm0node = '20s'
177 _fm0node = '20s'
178 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fsize = _calcsize(_fm0fixed)
179 _fm0fnodesize = _calcsize(_fm0node)
179 _fm0fnodesize = _calcsize(_fm0node)
180
180
181 def _fm0readmarkers(data, off, stop):
181 def _fm0readmarkers(data, off, stop):
182 # Loop on markers
182 # Loop on markers
183 while off < stop:
183 while off < stop:
184 # read fixed part
184 # read fixed part
185 cur = data[off:off + _fm0fsize]
185 cur = data[off:off + _fm0fsize]
186 off += _fm0fsize
186 off += _fm0fsize
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 # read replacement
188 # read replacement
189 sucs = ()
189 sucs = ()
190 if numsuc:
190 if numsuc:
191 s = (_fm0fnodesize * numsuc)
191 s = (_fm0fnodesize * numsuc)
192 cur = data[off:off + s]
192 cur = data[off:off + s]
193 sucs = _unpack(_fm0node * numsuc, cur)
193 sucs = _unpack(_fm0node * numsuc, cur)
194 off += s
194 off += s
195 # read metadata
195 # read metadata
196 # (metadata will be decoded on demand)
196 # (metadata will be decoded on demand)
197 metadata = data[off:off + mdsize]
197 metadata = data[off:off + mdsize]
198 if len(metadata) != mdsize:
198 if len(metadata) != mdsize:
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 'short, %d bytes expected, got %d')
200 'short, %d bytes expected, got %d')
201 % (mdsize, len(metadata)))
201 % (mdsize, len(metadata)))
202 off += mdsize
202 off += mdsize
203 metadata = _fm0decodemeta(metadata)
203 metadata = _fm0decodemeta(metadata)
204 try:
204 try:
205 when, offset = metadata.pop('date', '0 0').split(' ')
205 when, offset = metadata.pop('date', '0 0').split(' ')
206 date = float(when), int(offset)
206 date = float(when), int(offset)
207 except ValueError:
207 except ValueError:
208 date = (0., 0)
208 date = (0., 0)
209 parents = None
209 parents = None
210 if 'p2' in metadata:
210 if 'p2' in metadata:
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 elif 'p1' in metadata:
212 elif 'p1' in metadata:
213 parents = (metadata.pop('p1', None),)
213 parents = (metadata.pop('p1', None),)
214 elif 'p0' in metadata:
214 elif 'p0' in metadata:
215 parents = ()
215 parents = ()
216 if parents is not None:
216 if parents is not None:
217 try:
217 try:
218 parents = tuple(node.bin(p) for p in parents)
218 parents = tuple(node.bin(p) for p in parents)
219 # if parent content is not a nodeid, drop the data
219 # if parent content is not a nodeid, drop the data
220 for p in parents:
220 for p in parents:
221 if len(p) != 20:
221 if len(p) != 20:
222 parents = None
222 parents = None
223 break
223 break
224 except TypeError:
224 except TypeError:
225 # if content cannot be translated to nodeid drop the data.
225 # if content cannot be translated to nodeid drop the data.
226 parents = None
226 parents = None
227
227
228 metadata = tuple(sorted(metadata.iteritems()))
228 metadata = tuple(sorted(metadata.iteritems()))
229
229
230 yield (pre, sucs, flags, metadata, date, parents)
230 yield (pre, sucs, flags, metadata, date, parents)
231
231
232 def _fm0encodeonemarker(marker):
232 def _fm0encodeonemarker(marker):
233 pre, sucs, flags, metadata, date, parents = marker
233 pre, sucs, flags, metadata, date, parents = marker
234 if flags & usingsha256:
234 if flags & usingsha256:
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 metadata = dict(metadata)
236 metadata = dict(metadata)
237 time, tz = date
237 time, tz = date
238 metadata['date'] = '%r %i' % (time, tz)
238 metadata['date'] = '%r %i' % (time, tz)
239 if parents is not None:
239 if parents is not None:
240 if not parents:
240 if not parents:
241 # mark that we explicitly recorded no parents
241 # mark that we explicitly recorded no parents
242 metadata['p0'] = ''
242 metadata['p0'] = ''
243 for i, p in enumerate(parents, 1):
243 for i, p in enumerate(parents, 1):
244 metadata['p%i' % i] = node.hex(p)
244 metadata['p%i' % i] = node.hex(p)
245 metadata = _fm0encodemeta(metadata)
245 metadata = _fm0encodemeta(metadata)
246 numsuc = len(sucs)
246 numsuc = len(sucs)
247 format = _fm0fixed + (_fm0node * numsuc)
247 format = _fm0fixed + (_fm0node * numsuc)
248 data = [numsuc, len(metadata), flags, pre]
248 data = [numsuc, len(metadata), flags, pre]
249 data.extend(sucs)
249 data.extend(sucs)
250 return _pack(format, *data) + metadata
250 return _pack(format, *data) + metadata
251
251
252 def _fm0encodemeta(meta):
252 def _fm0encodemeta(meta):
253 """Return encoded metadata string to string mapping.
253 """Return encoded metadata string to string mapping.
254
254
255 Assume no ':' in key and no '\0' in both key and value."""
255 Assume no ':' in key and no '\0' in both key and value."""
256 for key, value in meta.iteritems():
256 for key, value in meta.iteritems():
257 if ':' in key or '\0' in key:
257 if ':' in key or '\0' in key:
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 if '\0' in value:
259 if '\0' in value:
260 raise ValueError("':' is forbidden in metadata value'")
260 raise ValueError("':' is forbidden in metadata value'")
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262
262
263 def _fm0decodemeta(data):
263 def _fm0decodemeta(data):
264 """Return string to string dictionary from encoded version."""
264 """Return string to string dictionary from encoded version."""
265 d = {}
265 d = {}
266 for l in data.split('\0'):
266 for l in data.split('\0'):
267 if l:
267 if l:
268 key, value = l.split(':')
268 key, value = l.split(':')
269 d[key] = value
269 d[key] = value
270 return d
270 return d
271
271
272 ## Parsing and writing of version "1"
272 ## Parsing and writing of version "1"
273 #
273 #
274 # The header is followed by the markers. Each marker is made of:
274 # The header is followed by the markers. Each marker is made of:
275 #
275 #
276 # - uint32: total size of the marker (including this field)
276 # - uint32: total size of the marker (including this field)
277 #
277 #
278 # - float64: date in seconds since epoch
278 # - float64: date in seconds since epoch
279 #
279 #
280 # - int16: timezone offset in minutes
280 # - int16: timezone offset in minutes
281 #
281 #
282 # - uint16: a bit field. It is reserved for flags used in common
282 # - uint16: a bit field. It is reserved for flags used in common
283 # obsolete marker operations, to avoid repeated decoding of metadata
283 # obsolete marker operations, to avoid repeated decoding of metadata
284 # entries.
284 # entries.
285 #
285 #
286 # - uint8: number of successors "N", can be zero.
286 # - uint8: number of successors "N", can be zero.
287 #
287 #
288 # - uint8: number of parents "P", can be zero.
288 # - uint8: number of parents "P", can be zero.
289 #
289 #
290 # 0: parents data stored but no parent,
290 # 0: parents data stored but no parent,
291 # 1: one parent stored,
291 # 1: one parent stored,
292 # 2: two parents stored,
292 # 2: two parents stored,
293 # 3: no parent data stored
293 # 3: no parent data stored
294 #
294 #
295 # - uint8: number of metadata entries M
295 # - uint8: number of metadata entries M
296 #
296 #
297 # - 20 or 32 bytes: predecessor changeset identifier.
297 # - 20 or 32 bytes: predecessor changeset identifier.
298 #
298 #
299 # - N*(20 or 32) bytes: successors changesets identifiers.
299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 #
300 #
301 # - P*(20 or 32) bytes: parents of the predecessors changesets.
301 # - P*(20 or 32) bytes: parents of the predecessors changesets.
302 #
302 #
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 #
304 #
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 _fm1version = 1
306 _fm1version = 1
307 _fm1fixed = '>IdhHBBB20s'
307 _fm1fixed = '>IdhHBBB20s'
308 _fm1nodesha1 = '20s'
308 _fm1nodesha1 = '20s'
309 _fm1nodesha256 = '32s'
309 _fm1nodesha256 = '32s'
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1fsize = _calcsize(_fm1fixed)
312 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1parentnone = 3
313 _fm1parentnone = 3
314 _fm1parentshift = 14
314 _fm1parentshift = 14
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1metapair = 'BB'
316 _fm1metapair = 'BB'
317 _fm1metapairsize = _calcsize(_fm1metapair)
317 _fm1metapairsize = _calcsize(_fm1metapair)
318
318
319 def _fm1purereadmarkers(data, off, stop):
319 def _fm1purereadmarkers(data, off, stop):
320 # make some global constants local for performance
320 # make some global constants local for performance
321 noneflag = _fm1parentnone
321 noneflag = _fm1parentnone
322 sha2flag = usingsha256
322 sha2flag = usingsha256
323 sha1size = _fm1nodesha1size
323 sha1size = _fm1nodesha1size
324 sha2size = _fm1nodesha256size
324 sha2size = _fm1nodesha256size
325 sha1fmt = _fm1nodesha1
325 sha1fmt = _fm1nodesha1
326 sha2fmt = _fm1nodesha256
326 sha2fmt = _fm1nodesha256
327 metasize = _fm1metapairsize
327 metasize = _fm1metapairsize
328 metafmt = _fm1metapair
328 metafmt = _fm1metapair
329 fsize = _fm1fsize
329 fsize = _fm1fsize
330 unpack = _unpack
330 unpack = _unpack
331
331
332 # Loop on markers
332 # Loop on markers
333 ufixed = struct.Struct(_fm1fixed).unpack
333 ufixed = struct.Struct(_fm1fixed).unpack
334
334
335 while off < stop:
335 while off < stop:
336 # read fixed part
336 # read fixed part
337 o1 = off + fsize
337 o1 = off + fsize
338 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
338 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
339
339
340 if flags & sha2flag:
340 if flags & sha2flag:
341 # FIXME: prec was read as a SHA1, needs to be amended
341 # FIXME: prec was read as a SHA1, needs to be amended
342
342
343 # read 0 or more successors
343 # read 0 or more successors
344 if numsuc == 1:
344 if numsuc == 1:
345 o2 = o1 + sha2size
345 o2 = o1 + sha2size
346 sucs = (data[o1:o2],)
346 sucs = (data[o1:o2],)
347 else:
347 else:
348 o2 = o1 + sha2size * numsuc
348 o2 = o1 + sha2size * numsuc
349 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
349 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
350
350
351 # read parents
351 # read parents
352 if numpar == noneflag:
352 if numpar == noneflag:
353 o3 = o2
353 o3 = o2
354 parents = None
354 parents = None
355 elif numpar == 1:
355 elif numpar == 1:
356 o3 = o2 + sha2size
356 o3 = o2 + sha2size
357 parents = (data[o2:o3],)
357 parents = (data[o2:o3],)
358 else:
358 else:
359 o3 = o2 + sha2size * numpar
359 o3 = o2 + sha2size * numpar
360 parents = unpack(sha2fmt * numpar, data[o2:o3])
360 parents = unpack(sha2fmt * numpar, data[o2:o3])
361 else:
361 else:
362 # read 0 or more successors
362 # read 0 or more successors
363 if numsuc == 1:
363 if numsuc == 1:
364 o2 = o1 + sha1size
364 o2 = o1 + sha1size
365 sucs = (data[o1:o2],)
365 sucs = (data[o1:o2],)
366 else:
366 else:
367 o2 = o1 + sha1size * numsuc
367 o2 = o1 + sha1size * numsuc
368 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
368 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
369
369
370 # read parents
370 # read parents
371 if numpar == noneflag:
371 if numpar == noneflag:
372 o3 = o2
372 o3 = o2
373 parents = None
373 parents = None
374 elif numpar == 1:
374 elif numpar == 1:
375 o3 = o2 + sha1size
375 o3 = o2 + sha1size
376 parents = (data[o2:o3],)
376 parents = (data[o2:o3],)
377 else:
377 else:
378 o3 = o2 + sha1size * numpar
378 o3 = o2 + sha1size * numpar
379 parents = unpack(sha1fmt * numpar, data[o2:o3])
379 parents = unpack(sha1fmt * numpar, data[o2:o3])
380
380
381 # read metadata
381 # read metadata
382 off = o3 + metasize * nummeta
382 off = o3 + metasize * nummeta
383 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
383 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
384 metadata = []
384 metadata = []
385 for idx in xrange(0, len(metapairsize), 2):
385 for idx in xrange(0, len(metapairsize), 2):
386 o1 = off + metapairsize[idx]
386 o1 = off + metapairsize[idx]
387 o2 = o1 + metapairsize[idx + 1]
387 o2 = o1 + metapairsize[idx + 1]
388 metadata.append((data[off:o1], data[o1:o2]))
388 metadata.append((data[off:o1], data[o1:o2]))
389 off = o2
389 off = o2
390
390
391 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
391 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
392
392
393 def _fm1encodeonemarker(marker):
393 def _fm1encodeonemarker(marker):
394 pre, sucs, flags, metadata, date, parents = marker
394 pre, sucs, flags, metadata, date, parents = marker
395 # determine node size
395 # determine node size
396 _fm1node = _fm1nodesha1
396 _fm1node = _fm1nodesha1
397 if flags & usingsha256:
397 if flags & usingsha256:
398 _fm1node = _fm1nodesha256
398 _fm1node = _fm1nodesha256
399 numsuc = len(sucs)
399 numsuc = len(sucs)
400 numextranodes = numsuc
400 numextranodes = numsuc
401 if parents is None:
401 if parents is None:
402 numpar = _fm1parentnone
402 numpar = _fm1parentnone
403 else:
403 else:
404 numpar = len(parents)
404 numpar = len(parents)
405 numextranodes += numpar
405 numextranodes += numpar
406 formatnodes = _fm1node * numextranodes
406 formatnodes = _fm1node * numextranodes
407 formatmeta = _fm1metapair * len(metadata)
407 formatmeta = _fm1metapair * len(metadata)
408 format = _fm1fixed + formatnodes + formatmeta
408 format = _fm1fixed + formatnodes + formatmeta
409 # tz is stored in minutes so we divide by 60
409 # tz is stored in minutes so we divide by 60
410 tz = date[1]//60
410 tz = date[1]//60
411 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
411 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
412 data.extend(sucs)
412 data.extend(sucs)
413 if parents is not None:
413 if parents is not None:
414 data.extend(parents)
414 data.extend(parents)
415 totalsize = _calcsize(format)
415 totalsize = _calcsize(format)
416 for key, value in metadata:
416 for key, value in metadata:
417 lk = len(key)
417 lk = len(key)
418 lv = len(value)
418 lv = len(value)
419 data.append(lk)
419 data.append(lk)
420 data.append(lv)
420 data.append(lv)
421 totalsize += lk + lv
421 totalsize += lk + lv
422 data[0] = totalsize
422 data[0] = totalsize
423 data = [_pack(format, *data)]
423 data = [_pack(format, *data)]
424 for key, value in metadata:
424 for key, value in metadata:
425 data.append(key)
425 data.append(key)
426 data.append(value)
426 data.append(value)
427 return ''.join(data)
427 return ''.join(data)
428
428
429 def _fm1readmarkers(data, off, stop):
429 def _fm1readmarkers(data, off, stop):
430 native = getattr(parsers, 'fm1readmarkers', None)
430 native = getattr(parsers, 'fm1readmarkers', None)
431 if not native:
431 if not native:
432 return _fm1purereadmarkers(data, off, stop)
432 return _fm1purereadmarkers(data, off, stop)
433 return native(data, off, stop)
433 return native(data, off, stop)
434
434
435 # mapping to read/write various marker formats
435 # mapping to read/write various marker formats
436 # <version> -> (decoder, encoder)
436 # <version> -> (decoder, encoder)
437 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
437 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
438 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
438 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
439
439
440 def _readmarkerversion(data):
440 def _readmarkerversion(data):
441 return _unpack('>B', data[0:1])[0]
441 return _unpack('>B', data[0:1])[0]
442
442
443 @util.nogc
443 @util.nogc
444 def _readmarkers(data, off=None, stop=None):
444 def _readmarkers(data, off=None, stop=None):
445 """Read and enumerate markers from raw data"""
445 """Read and enumerate markers from raw data"""
446 diskversion = _readmarkerversion(data)
446 diskversion = _readmarkerversion(data)
447 if not off:
447 if not off:
448 off = 1 # skip 1 byte version number
448 off = 1 # skip 1 byte version number
449 if stop is None:
449 if stop is None:
450 stop = len(data)
450 stop = len(data)
451 if diskversion not in formats:
451 if diskversion not in formats:
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 raise error.UnknownVersion(msg, version=diskversion)
453 raise error.UnknownVersion(msg, version=diskversion)
454 return diskversion, formats[diskversion][0](data, off, stop)
454 return diskversion, formats[diskversion][0](data, off, stop)
455
455
456 def encodeheader(version=_fm0version):
456 def encodeheader(version=_fm0version):
457 return _pack('>B', version)
457 return _pack('>B', version)
458
458
459 def encodemarkers(markers, addheader=False, version=_fm0version):
459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 # Kept separate from flushmarkers(), it will be reused for
460 # Kept separate from flushmarkers(), it will be reused for
461 # markers exchange.
461 # markers exchange.
462 encodeone = formats[version][1]
462 encodeone = formats[version][1]
463 if addheader:
463 if addheader:
464 yield encodeheader(version)
464 yield encodeheader(version)
465 for marker in markers:
465 for marker in markers:
466 yield encodeone(marker)
466 yield encodeone(marker)
467
467
468 @util.nogc
468 @util.nogc
469 def _addsuccessors(successors, markers):
469 def _addsuccessors(successors, markers):
470 for mark in markers:
470 for mark in markers:
471 successors.setdefault(mark[0], set()).add(mark)
471 successors.setdefault(mark[0], set()).add(mark)
472
472
473 def _addprecursors(*args, **kwargs):
473 def _addprecursors(*args, **kwargs):
474 msg = ("'obsolete._addprecursors' is deprecated, "
474 msg = ("'obsolete._addprecursors' is deprecated, "
475 "use 'obsolete._addpredecessors'")
475 "use 'obsolete._addpredecessors'")
476 util.nouideprecwarn(msg, '4.4')
476 util.nouideprecwarn(msg, '4.4')
477
477
478 return _addpredecessors(*args, **kwargs)
478 return _addpredecessors(*args, **kwargs)
479
479
480 @util.nogc
480 @util.nogc
481 def _addpredecessors(predecessors, markers):
481 def _addpredecessors(predecessors, markers):
482 for mark in markers:
482 for mark in markers:
483 for suc in mark[1]:
483 for suc in mark[1]:
484 predecessors.setdefault(suc, set()).add(mark)
484 predecessors.setdefault(suc, set()).add(mark)
485
485
486 @util.nogc
486 @util.nogc
487 def _addchildren(children, markers):
487 def _addchildren(children, markers):
488 for mark in markers:
488 for mark in markers:
489 parents = mark[5]
489 parents = mark[5]
490 if parents is not None:
490 if parents is not None:
491 for p in parents:
491 for p in parents:
492 children.setdefault(p, set()).add(mark)
492 children.setdefault(p, set()).add(mark)
493
493
494 def _checkinvalidmarkers(markers):
494 def _checkinvalidmarkers(markers):
495 """search for marker with invalid data and raise error if needed
495 """search for marker with invalid data and raise error if needed
496
496
497 Exist as a separated function to allow the evolve extension for a more
497 Exist as a separated function to allow the evolve extension for a more
498 subtle handling.
498 subtle handling.
499 """
499 """
500 for mark in markers:
500 for mark in markers:
501 if node.nullid in mark[1]:
501 if node.nullid in mark[1]:
502 raise error.Abort(_('bad obsolescence marker detected: '
502 raise error.Abort(_('bad obsolescence marker detected: '
503 'invalid successors nullid'))
503 'invalid successors nullid'))
504
504
505 class obsstore(object):
505 class obsstore(object):
506 """Store obsolete markers
506 """Store obsolete markers
507
507
508 Markers can be accessed with two mappings:
508 Markers can be accessed with two mappings:
509 - predecessors[x] -> set(markers on predecessors edges of x)
509 - predecessors[x] -> set(markers on predecessors edges of x)
510 - successors[x] -> set(markers on successors edges of x)
510 - successors[x] -> set(markers on successors edges of x)
511 - children[x] -> set(markers on predecessors edges of children(x)
511 - children[x] -> set(markers on predecessors edges of children(x)
512 """
512 """
513
513
514 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
514 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
515 # prec: nodeid, predecessors changesets
515 # prec: nodeid, predecessors changesets
516 # succs: tuple of nodeid, successor changesets (0-N length)
516 # succs: tuple of nodeid, successor changesets (0-N length)
517 # flag: integer, flag field carrying modifier for the markers (see doc)
517 # flag: integer, flag field carrying modifier for the markers (see doc)
518 # meta: binary blob, encoded metadata dictionary
518 # meta: binary blob, encoded metadata dictionary
519 # date: (float, int) tuple, date of marker creation
519 # date: (float, int) tuple, date of marker creation
520 # parents: (tuple of nodeid) or None, parents of predecessors
520 # parents: (tuple of nodeid) or None, parents of predecessors
521 # None is used when no data has been recorded
521 # None is used when no data has been recorded
522
522
523 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
523 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
524 # caches for various obsolescence related cache
524 # caches for various obsolescence related cache
525 self.caches = {}
525 self.caches = {}
526 self.svfs = svfs
526 self.svfs = svfs
527 self._defaultformat = defaultformat
527 self._defaultformat = defaultformat
528 self._readonly = readonly
528 self._readonly = readonly
529
529
530 def __iter__(self):
530 def __iter__(self):
531 return iter(self._all)
531 return iter(self._all)
532
532
533 def __len__(self):
533 def __len__(self):
534 return len(self._all)
534 return len(self._all)
535
535
536 def __nonzero__(self):
536 def __nonzero__(self):
537 if not self._cached('_all'):
537 if not self._cached('_all'):
538 try:
538 try:
539 return self.svfs.stat('obsstore').st_size > 1
539 return self.svfs.stat('obsstore').st_size > 1
540 except OSError as inst:
540 except OSError as inst:
541 if inst.errno != errno.ENOENT:
541 if inst.errno != errno.ENOENT:
542 raise
542 raise
543 # just build an empty _all list if no obsstore exists, which
543 # just build an empty _all list if no obsstore exists, which
544 # avoids further stat() syscalls
544 # avoids further stat() syscalls
545 pass
545 pass
546 return bool(self._all)
546 return bool(self._all)
547
547
548 __bool__ = __nonzero__
548 __bool__ = __nonzero__
549
549
550 @property
550 @property
551 def readonly(self):
551 def readonly(self):
552 """True if marker creation is disabled
552 """True if marker creation is disabled
553
553
554 Remove me in the future when obsolete marker is always on."""
554 Remove me in the future when obsolete marker is always on."""
555 return self._readonly
555 return self._readonly
556
556
557 def create(self, transaction, prec, succs=(), flag=0, parents=None,
557 def create(self, transaction, prec, succs=(), flag=0, parents=None,
558 date=None, metadata=None, ui=None):
558 date=None, metadata=None, ui=None):
559 """obsolete: add a new obsolete marker
559 """obsolete: add a new obsolete marker
560
560
561 * ensuring it is hashable
561 * ensuring it is hashable
562 * check mandatory metadata
562 * check mandatory metadata
563 * encode metadata
563 * encode metadata
564
564
565 If you are a human writing code creating marker you want to use the
565 If you are a human writing code creating marker you want to use the
566 `createmarkers` function in this module instead.
566 `createmarkers` function in this module instead.
567
567
568 return True if a new marker have been added, False if the markers
568 return True if a new marker have been added, False if the markers
569 already existed (no op).
569 already existed (no op).
570 """
570 """
571 if metadata is None:
571 if metadata is None:
572 metadata = {}
572 metadata = {}
573 if date is None:
573 if date is None:
574 if 'date' in metadata:
574 if 'date' in metadata:
575 # as a courtesy for out-of-tree extensions
575 # as a courtesy for out-of-tree extensions
576 date = util.parsedate(metadata.pop('date'))
576 date = util.parsedate(metadata.pop('date'))
577 elif ui is not None:
577 elif ui is not None:
578 date = ui.configdate('devel', 'default-date')
578 date = ui.configdate('devel', 'default-date')
579 if date is None:
579 if date is None:
580 date = util.makedate()
580 date = util.makedate()
581 else:
581 else:
582 date = util.makedate()
582 date = util.makedate()
583 if len(prec) != 20:
583 if len(prec) != 20:
584 raise ValueError(prec)
584 raise ValueError(prec)
585 for succ in succs:
585 for succ in succs:
586 if len(succ) != 20:
586 if len(succ) != 20:
587 raise ValueError(succ)
587 raise ValueError(succ)
588 if prec in succs:
588 if prec in succs:
589 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
589 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
590
590
591 metadata = tuple(sorted(metadata.iteritems()))
591 metadata = tuple(sorted(metadata.iteritems()))
592
592
593 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
593 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
594 return bool(self.add(transaction, [marker]))
594 return bool(self.add(transaction, [marker]))
595
595
596 def add(self, transaction, markers):
596 def add(self, transaction, markers):
597 """Add new markers to the store
597 """Add new markers to the store
598
598
599 Take care of filtering duplicate.
599 Take care of filtering duplicate.
600 Return the number of new marker."""
600 Return the number of new marker."""
601 if self._readonly:
601 if self._readonly:
602 raise error.Abort(_('creating obsolete markers is not enabled on '
602 raise error.Abort(_('creating obsolete markers is not enabled on '
603 'this repo'))
603 'this repo'))
604 known = set()
604 known = set()
605 getsuccessors = self.successors.get
605 getsuccessors = self.successors.get
606 new = []
606 new = []
607 for m in markers:
607 for m in markers:
608 if m not in getsuccessors(m[0], ()) and m not in known:
608 if m not in getsuccessors(m[0], ()) and m not in known:
609 known.add(m)
609 known.add(m)
610 new.append(m)
610 new.append(m)
611 if new:
611 if new:
612 f = self.svfs('obsstore', 'ab')
612 f = self.svfs('obsstore', 'ab')
613 try:
613 try:
614 offset = f.tell()
614 offset = f.tell()
615 transaction.add('obsstore', offset)
615 transaction.add('obsstore', offset)
616 # offset == 0: new file - add the version header
616 # offset == 0: new file - add the version header
617 data = b''.join(encodemarkers(new, offset == 0, self._version))
617 data = b''.join(encodemarkers(new, offset == 0, self._version))
618 f.write(data)
618 f.write(data)
619 finally:
619 finally:
620 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
620 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
621 # call 'filecacheentry.refresh()' here
621 # call 'filecacheentry.refresh()' here
622 f.close()
622 f.close()
623 addedmarkers = transaction.changes.get('obsmarkers')
623 addedmarkers = transaction.changes.get('obsmarkers')
624 if addedmarkers is not None:
624 if addedmarkers is not None:
625 addedmarkers.update(new)
625 addedmarkers.update(new)
626 self._addmarkers(new, data)
626 self._addmarkers(new, data)
627 # new marker *may* have changed several set. invalidate the cache.
627 # new marker *may* have changed several set. invalidate the cache.
628 self.caches.clear()
628 self.caches.clear()
629 # records the number of new markers for the transaction hooks
629 # records the number of new markers for the transaction hooks
630 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
630 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
631 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
631 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
632 return len(new)
632 return len(new)
633
633
634 def mergemarkers(self, transaction, data):
634 def mergemarkers(self, transaction, data):
635 """merge a binary stream of markers inside the obsstore
635 """merge a binary stream of markers inside the obsstore
636
636
637 Returns the number of new markers added."""
637 Returns the number of new markers added."""
638 version, markers = _readmarkers(data)
638 version, markers = _readmarkers(data)
639 return self.add(transaction, markers)
639 return self.add(transaction, markers)
640
640
641 @propertycache
641 @propertycache
642 def _data(self):
642 def _data(self):
643 return self.svfs.tryread('obsstore')
643 return self.svfs.tryread('obsstore')
644
644
645 @propertycache
645 @propertycache
646 def _version(self):
646 def _version(self):
647 if len(self._data) >= 1:
647 if len(self._data) >= 1:
648 return _readmarkerversion(self._data)
648 return _readmarkerversion(self._data)
649 else:
649 else:
650 return self._defaultformat
650 return self._defaultformat
651
651
652 @propertycache
652 @propertycache
653 def _all(self):
653 def _all(self):
654 data = self._data
654 data = self._data
655 if not data:
655 if not data:
656 return []
656 return []
657 self._version, markers = _readmarkers(data)
657 self._version, markers = _readmarkers(data)
658 markers = list(markers)
658 markers = list(markers)
659 _checkinvalidmarkers(markers)
659 _checkinvalidmarkers(markers)
660 return markers
660 return markers
661
661
662 @propertycache
662 @propertycache
663 def successors(self):
663 def successors(self):
664 successors = {}
664 successors = {}
665 _addsuccessors(successors, self._all)
665 _addsuccessors(successors, self._all)
666 return successors
666 return successors
667
667
668 @property
668 @property
669 def precursors(self):
669 def precursors(self):
670 msg = ("'obsstore.precursors' is deprecated, "
670 msg = ("'obsstore.precursors' is deprecated, "
671 "use 'obsstore.predecessors'")
671 "use 'obsstore.predecessors'")
672 util.nouideprecwarn(msg, '4.4')
672 util.nouideprecwarn(msg, '4.4')
673
673
674 return self.predecessors
674 return self.predecessors
675
675
676 @propertycache
676 @propertycache
677 def predecessors(self):
677 def predecessors(self):
678 predecessors = {}
678 predecessors = {}
679 _addpredecessors(predecessors, self._all)
679 _addpredecessors(predecessors, self._all)
680 return predecessors
680 return predecessors
681
681
682 @propertycache
682 @propertycache
683 def children(self):
683 def children(self):
684 children = {}
684 children = {}
685 _addchildren(children, self._all)
685 _addchildren(children, self._all)
686 return children
686 return children
687
687
688 def _cached(self, attr):
688 def _cached(self, attr):
689 return attr in self.__dict__
689 return attr in self.__dict__
690
690
691 def _addmarkers(self, markers, rawdata):
691 def _addmarkers(self, markers, rawdata):
692 markers = list(markers) # to allow repeated iteration
692 markers = list(markers) # to allow repeated iteration
693 self._data = self._data + rawdata
693 self._data = self._data + rawdata
694 self._all.extend(markers)
694 self._all.extend(markers)
695 if self._cached('successors'):
695 if self._cached('successors'):
696 _addsuccessors(self.successors, markers)
696 _addsuccessors(self.successors, markers)
697 if self._cached('predecessors'):
697 if self._cached('predecessors'):
698 _addpredecessors(self.predecessors, markers)
698 _addpredecessors(self.predecessors, markers)
699 if self._cached('children'):
699 if self._cached('children'):
700 _addchildren(self.children, markers)
700 _addchildren(self.children, markers)
701 _checkinvalidmarkers(markers)
701 _checkinvalidmarkers(markers)
702
702
703 def relevantmarkers(self, nodes):
703 def relevantmarkers(self, nodes):
704 """return a set of all obsolescence markers relevant to a set of nodes.
704 """return a set of all obsolescence markers relevant to a set of nodes.
705
705
706 "relevant" to a set of nodes mean:
706 "relevant" to a set of nodes mean:
707
707
708 - marker that use this changeset as successor
708 - marker that use this changeset as successor
709 - prune marker of direct children on this changeset
709 - prune marker of direct children on this changeset
710 - recursive application of the two rules on predecessors of these
710 - recursive application of the two rules on predecessors of these
711 markers
711 markers
712
712
713 It is a set so you cannot rely on order."""
713 It is a set so you cannot rely on order."""
714
714
715 pendingnodes = set(nodes)
715 pendingnodes = set(nodes)
716 seenmarkers = set()
716 seenmarkers = set()
717 seennodes = set(pendingnodes)
717 seennodes = set(pendingnodes)
718 precursorsmarkers = self.predecessors
718 precursorsmarkers = self.predecessors
719 succsmarkers = self.successors
719 succsmarkers = self.successors
720 children = self.children
720 children = self.children
721 while pendingnodes:
721 while pendingnodes:
722 direct = set()
722 direct = set()
723 for current in pendingnodes:
723 for current in pendingnodes:
724 direct.update(precursorsmarkers.get(current, ()))
724 direct.update(precursorsmarkers.get(current, ()))
725 pruned = [m for m in children.get(current, ()) if not m[1]]
725 pruned = [m for m in children.get(current, ()) if not m[1]]
726 direct.update(pruned)
726 direct.update(pruned)
727 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
727 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
728 direct.update(pruned)
728 direct.update(pruned)
729 direct -= seenmarkers
729 direct -= seenmarkers
730 pendingnodes = set([m[0] for m in direct])
730 pendingnodes = set([m[0] for m in direct])
731 seenmarkers |= direct
731 seenmarkers |= direct
732 pendingnodes -= seennodes
732 pendingnodes -= seennodes
733 seennodes |= pendingnodes
733 seennodes |= pendingnodes
734 return seenmarkers
734 return seenmarkers
735
735
736 def makestore(ui, repo):
736 def makestore(ui, repo):
737 """Create an obsstore instance from a repo."""
737 """Create an obsstore instance from a repo."""
738 # read default format for new obsstore.
738 # read default format for new obsstore.
739 # developer config: format.obsstore-version
739 # developer config: format.obsstore-version
740 defaultformat = ui.configint('format', 'obsstore-version')
740 defaultformat = ui.configint('format', 'obsstore-version')
741 # rely on obsstore class default when possible.
741 # rely on obsstore class default when possible.
742 kwargs = {}
742 kwargs = {}
743 if defaultformat is not None:
743 if defaultformat is not None:
744 kwargs['defaultformat'] = defaultformat
744 kwargs['defaultformat'] = defaultformat
745 readonly = not isenabled(repo, createmarkersopt)
745 readonly = not isenabled(repo, createmarkersopt)
746 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
746 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
747 if store and readonly:
747 if store and readonly:
748 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
748 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
749 % len(list(store)))
749 % len(list(store)))
750 return store
750 return store
751
751
752 def commonversion(versions):
752 def commonversion(versions):
753 """Return the newest version listed in both versions and our local formats.
753 """Return the newest version listed in both versions and our local formats.
754
754
755 Returns None if no common version exists.
755 Returns None if no common version exists.
756 """
756 """
757 versions.sort(reverse=True)
757 versions.sort(reverse=True)
758 # search for highest version known on both side
758 # search for highest version known on both side
759 for v in versions:
759 for v in versions:
760 if v in formats:
760 if v in formats:
761 return v
761 return v
762 return None
762 return None
763
763
764 # arbitrary picked to fit into 8K limit from HTTP server
764 # arbitrary picked to fit into 8K limit from HTTP server
765 # you have to take in account:
765 # you have to take in account:
766 # - the version header
766 # - the version header
767 # - the base85 encoding
767 # - the base85 encoding
768 _maxpayload = 5300
768 _maxpayload = 5300
769
769
770 def _pushkeyescape(markers):
770 def _pushkeyescape(markers):
771 """encode markers into a dict suitable for pushkey exchange
771 """encode markers into a dict suitable for pushkey exchange
772
772
773 - binary data is base85 encoded
773 - binary data is base85 encoded
774 - split in chunks smaller than 5300 bytes"""
774 - split in chunks smaller than 5300 bytes"""
775 keys = {}
775 keys = {}
776 parts = []
776 parts = []
777 currentlen = _maxpayload * 2 # ensure we create a new part
777 currentlen = _maxpayload * 2 # ensure we create a new part
778 for marker in markers:
778 for marker in markers:
779 nextdata = _fm0encodeonemarker(marker)
779 nextdata = _fm0encodeonemarker(marker)
780 if (len(nextdata) + currentlen > _maxpayload):
780 if (len(nextdata) + currentlen > _maxpayload):
781 currentpart = []
781 currentpart = []
782 currentlen = 0
782 currentlen = 0
783 parts.append(currentpart)
783 parts.append(currentpart)
784 currentpart.append(nextdata)
784 currentpart.append(nextdata)
785 currentlen += len(nextdata)
785 currentlen += len(nextdata)
786 for idx, part in enumerate(reversed(parts)):
786 for idx, part in enumerate(reversed(parts)):
787 data = ''.join([_pack('>B', _fm0version)] + part)
787 data = ''.join([_pack('>B', _fm0version)] + part)
788 keys['dump%i' % idx] = util.b85encode(data)
788 keys['dump%i' % idx] = util.b85encode(data)
789 return keys
789 return keys
790
790
791 def listmarkers(repo):
791 def listmarkers(repo):
792 """List markers over pushkey"""
792 """List markers over pushkey"""
793 if not repo.obsstore:
793 if not repo.obsstore:
794 return {}
794 return {}
795 return _pushkeyescape(sorted(repo.obsstore))
795 return _pushkeyescape(sorted(repo.obsstore))
796
796
797 def pushmarker(repo, key, old, new):
797 def pushmarker(repo, key, old, new):
798 """Push markers over pushkey"""
798 """Push markers over pushkey"""
799 if not key.startswith('dump'):
799 if not key.startswith('dump'):
800 repo.ui.warn(_('unknown key: %r') % key)
800 repo.ui.warn(_('unknown key: %r') % key)
801 return False
801 return False
802 if old:
802 if old:
803 repo.ui.warn(_('unexpected old value for %r') % key)
803 repo.ui.warn(_('unexpected old value for %r') % key)
804 return False
804 return False
805 data = util.b85decode(new)
805 data = util.b85decode(new)
806 lock = repo.lock()
806 lock = repo.lock()
807 try:
807 try:
808 tr = repo.transaction('pushkey: obsolete markers')
808 tr = repo.transaction('pushkey: obsolete markers')
809 try:
809 try:
810 repo.obsstore.mergemarkers(tr, data)
810 repo.obsstore.mergemarkers(tr, data)
811 repo.invalidatevolatilesets()
811 repo.invalidatevolatilesets()
812 tr.close()
812 tr.close()
813 return True
813 return True
814 finally:
814 finally:
815 tr.release()
815 tr.release()
816 finally:
816 finally:
817 lock.release()
817 lock.release()
818
818
819 # keep compatibility for the 4.3 cycle
819 # keep compatibility for the 4.3 cycle
820 def allprecursors(obsstore, nodes, ignoreflags=0):
820 def allprecursors(obsstore, nodes, ignoreflags=0):
821 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
821 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
822 util.nouideprecwarn(movemsg, '4.3')
822 util.nouideprecwarn(movemsg, '4.3')
823 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
823 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
824
824
825 def allsuccessors(obsstore, nodes, ignoreflags=0):
825 def allsuccessors(obsstore, nodes, ignoreflags=0):
826 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
826 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
827 util.nouideprecwarn(movemsg, '4.3')
827 util.nouideprecwarn(movemsg, '4.3')
828 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
828 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
829
829
830 def marker(repo, data):
830 def marker(repo, data):
831 movemsg = 'obsolete.marker moved to obsutil.marker'
831 movemsg = 'obsolete.marker moved to obsutil.marker'
832 repo.ui.deprecwarn(movemsg, '4.3')
832 repo.ui.deprecwarn(movemsg, '4.3')
833 return obsutil.marker(repo, data)
833 return obsutil.marker(repo, data)
834
834
835 def getmarkers(repo, nodes=None, exclusive=False):
835 def getmarkers(repo, nodes=None, exclusive=False):
836 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
836 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
837 repo.ui.deprecwarn(movemsg, '4.3')
837 repo.ui.deprecwarn(movemsg, '4.3')
838 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
838 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
839
839
840 def exclusivemarkers(repo, nodes):
840 def exclusivemarkers(repo, nodes):
841 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
841 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
842 repo.ui.deprecwarn(movemsg, '4.3')
842 repo.ui.deprecwarn(movemsg, '4.3')
843 return obsutil.exclusivemarkers(repo, nodes)
843 return obsutil.exclusivemarkers(repo, nodes)
844
844
845 def foreground(repo, nodes):
845 def foreground(repo, nodes):
846 movemsg = 'obsolete.foreground moved to obsutil.foreground'
846 movemsg = 'obsolete.foreground moved to obsutil.foreground'
847 repo.ui.deprecwarn(movemsg, '4.3')
847 repo.ui.deprecwarn(movemsg, '4.3')
848 return obsutil.foreground(repo, nodes)
848 return obsutil.foreground(repo, nodes)
849
849
850 def successorssets(repo, initialnode, cache=None):
850 def successorssets(repo, initialnode, cache=None):
851 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
851 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
852 repo.ui.deprecwarn(movemsg, '4.3')
852 repo.ui.deprecwarn(movemsg, '4.3')
853 return obsutil.successorssets(repo, initialnode, cache=cache)
853 return obsutil.successorssets(repo, initialnode, cache=cache)
854
854
855 # mapping of 'set-name' -> <function to compute this set>
855 # mapping of 'set-name' -> <function to compute this set>
856 cachefuncs = {}
856 cachefuncs = {}
857 def cachefor(name):
857 def cachefor(name):
858 """Decorator to register a function as computing the cache for a set"""
858 """Decorator to register a function as computing the cache for a set"""
859 def decorator(func):
859 def decorator(func):
860 if name in cachefuncs:
860 if name in cachefuncs:
861 msg = "duplicated registration for volatileset '%s' (existing: %r)"
861 msg = "duplicated registration for volatileset '%s' (existing: %r)"
862 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
862 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
863 cachefuncs[name] = func
863 cachefuncs[name] = func
864 return func
864 return func
865 return decorator
865 return decorator
866
866
867 def getrevs(repo, name):
867 def getrevs(repo, name):
868 """Return the set of revision that belong to the <name> set
868 """Return the set of revision that belong to the <name> set
869
869
870 Such access may compute the set and cache it for future use"""
870 Such access may compute the set and cache it for future use"""
871 repo = repo.unfiltered()
871 repo = repo.unfiltered()
872 if not repo.obsstore:
872 if not repo.obsstore:
873 return frozenset()
873 return frozenset()
874 if name not in repo.obsstore.caches:
874 if name not in repo.obsstore.caches:
875 repo.obsstore.caches[name] = cachefuncs[name](repo)
875 repo.obsstore.caches[name] = cachefuncs[name](repo)
876 return repo.obsstore.caches[name]
876 return repo.obsstore.caches[name]
877
877
878 # To be simple we need to invalidate obsolescence cache when:
878 # To be simple we need to invalidate obsolescence cache when:
879 #
879 #
880 # - new changeset is added:
880 # - new changeset is added:
881 # - public phase is changed
881 # - public phase is changed
882 # - obsolescence marker are added
882 # - obsolescence marker are added
883 # - strip is used a repo
883 # - strip is used a repo
884 def clearobscaches(repo):
884 def clearobscaches(repo):
885 """Remove all obsolescence related cache from a repo
885 """Remove all obsolescence related cache from a repo
886
886
887 This remove all cache in obsstore is the obsstore already exist on the
887 This remove all cache in obsstore is the obsstore already exist on the
888 repo.
888 repo.
889
889
890 (We could be smarter here given the exact event that trigger the cache
890 (We could be smarter here given the exact event that trigger the cache
891 clearing)"""
891 clearing)"""
892 # only clear cache is there is obsstore data in this repo
892 # only clear cache is there is obsstore data in this repo
893 if 'obsstore' in repo._filecache:
893 if 'obsstore' in repo._filecache:
894 repo.obsstore.caches.clear()
894 repo.obsstore.caches.clear()
895
895
896 def _mutablerevs(repo):
896 def _mutablerevs(repo):
897 """the set of mutable revision in the repository"""
897 """the set of mutable revision in the repository"""
898 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
898 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
899
899
900 @cachefor('obsolete')
900 @cachefor('obsolete')
901 def _computeobsoleteset(repo):
901 def _computeobsoleteset(repo):
902 """the set of obsolete revisions"""
902 """the set of obsolete revisions"""
903 getnode = repo.changelog.node
903 getnode = repo.changelog.node
904 notpublic = _mutablerevs(repo)
904 notpublic = _mutablerevs(repo)
905 isobs = repo.obsstore.successors.__contains__
905 isobs = repo.obsstore.successors.__contains__
906 obs = set(r for r in notpublic if isobs(getnode(r)))
906 obs = set(r for r in notpublic if isobs(getnode(r)))
907 return obs
907 return obs
908
908
909 @cachefor('unstable')
909 @cachefor('unstable')
910 def _computeunstableset(repo):
910 def _computeunstableset(repo):
911 msg = ("'unstable' volatile set is deprecated, "
911 msg = ("'unstable' volatile set is deprecated, "
912 "use 'orphan'")
912 "use 'orphan'")
913 repo.ui.deprecwarn(msg, '4.4')
913 repo.ui.deprecwarn(msg, '4.4')
914
914
915 return _computeorphanset(repo)
915 return _computeorphanset(repo)
916
916
917 @cachefor('orphan')
917 @cachefor('orphan')
918 def _computeorphanset(repo):
918 def _computeorphanset(repo):
919 """the set of non obsolete revisions with obsolete parents"""
919 """the set of non obsolete revisions with obsolete parents"""
920 pfunc = repo.changelog.parentrevs
920 pfunc = repo.changelog.parentrevs
921 mutable = _mutablerevs(repo)
921 mutable = _mutablerevs(repo)
922 obsolete = getrevs(repo, 'obsolete')
922 obsolete = getrevs(repo, 'obsolete')
923 others = mutable - obsolete
923 others = mutable - obsolete
924 unstable = set()
924 unstable = set()
925 for r in sorted(others):
925 for r in sorted(others):
926 # A rev is unstable if one of its parent is obsolete or unstable
926 # A rev is unstable if one of its parent is obsolete or unstable
927 # this works since we traverse following growing rev order
927 # this works since we traverse following growing rev order
928 for p in pfunc(r):
928 for p in pfunc(r):
929 if p in obsolete or p in unstable:
929 if p in obsolete or p in unstable:
930 unstable.add(r)
930 unstable.add(r)
931 break
931 break
932 return unstable
932 return unstable
933
933
934 @cachefor('suspended')
934 @cachefor('suspended')
935 def _computesuspendedset(repo):
935 def _computesuspendedset(repo):
936 """the set of obsolete parents with non obsolete descendants"""
936 """the set of obsolete parents with non obsolete descendants"""
937 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
937 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
938 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
938 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
939
939
940 @cachefor('extinct')
940 @cachefor('extinct')
941 def _computeextinctset(repo):
941 def _computeextinctset(repo):
942 """the set of obsolete parents without non obsolete descendants"""
942 """the set of obsolete parents without non obsolete descendants"""
943 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
943 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
944
944
945
945
946 @cachefor('bumped')
946 @cachefor('bumped')
947 def _computebumpedset(repo):
947 def _computebumpedset(repo):
948 """the set of revs trying to obsolete public revisions"""
948 """the set of revs trying to obsolete public revisions"""
949 bumped = set()
949 bumped = set()
950 # util function (avoid attribute lookup in the loop)
950 # util function (avoid attribute lookup in the loop)
951 phase = repo._phasecache.phase # would be faster to grab the full list
951 phase = repo._phasecache.phase # would be faster to grab the full list
952 public = phases.public
952 public = phases.public
953 cl = repo.changelog
953 cl = repo.changelog
954 torev = cl.nodemap.get
954 torev = cl.nodemap.get
955 for ctx in repo.set('(not public()) and (not obsolete())'):
955 for ctx in repo.set('(not public()) and (not obsolete())'):
956 rev = ctx.rev()
956 rev = ctx.rev()
957 # We only evaluate mutable, non-obsolete revision
957 # We only evaluate mutable, non-obsolete revision
958 node = ctx.node()
958 node = ctx.node()
959 # (future) A cache of predecessors may worth if split is very common
959 # (future) A cache of predecessors may worth if split is very common
960 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
960 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
961 ignoreflags=bumpedfix):
961 ignoreflags=bumpedfix):
962 prev = torev(pnode) # unfiltered! but so is phasecache
962 prev = torev(pnode) # unfiltered! but so is phasecache
963 if (prev is not None) and (phase(repo, prev) <= public):
963 if (prev is not None) and (phase(repo, prev) <= public):
964 # we have a public predecessor
964 # we have a public predecessor
965 bumped.add(rev)
965 bumped.add(rev)
966 break # Next draft!
966 break # Next draft!
967 return bumped
967 return bumped
968
968
969 @cachefor('divergent')
969 @cachefor('divergent')
970 def _computedivergentset(repo):
970 def _computedivergentset(repo):
971 msg = ("'divergent' volatile set is deprecated, "
972 "use 'contentdivergent'")
973 repo.ui.deprecwarn(msg, '4.4')
974
975 return _computecontentdivergentset(repo)
976
977 @cachefor('contentdivergent')
978 def _computecontentdivergentset(repo):
971 """the set of rev that compete to be the final successors of some revision.
979 """the set of rev that compete to be the final successors of some revision.
972 """
980 """
973 divergent = set()
981 divergent = set()
974 obsstore = repo.obsstore
982 obsstore = repo.obsstore
975 newermap = {}
983 newermap = {}
976 for ctx in repo.set('(not public()) - obsolete()'):
984 for ctx in repo.set('(not public()) - obsolete()'):
977 mark = obsstore.predecessors.get(ctx.node(), ())
985 mark = obsstore.predecessors.get(ctx.node(), ())
978 toprocess = set(mark)
986 toprocess = set(mark)
979 seen = set()
987 seen = set()
980 while toprocess:
988 while toprocess:
981 prec = toprocess.pop()[0]
989 prec = toprocess.pop()[0]
982 if prec in seen:
990 if prec in seen:
983 continue # emergency cycle hanging prevention
991 continue # emergency cycle hanging prevention
984 seen.add(prec)
992 seen.add(prec)
985 if prec not in newermap:
993 if prec not in newermap:
986 obsutil.successorssets(repo, prec, cache=newermap)
994 obsutil.successorssets(repo, prec, cache=newermap)
987 newer = [n for n in newermap[prec] if n]
995 newer = [n for n in newermap[prec] if n]
988 if len(newer) > 1:
996 if len(newer) > 1:
989 divergent.add(ctx.rev())
997 divergent.add(ctx.rev())
990 break
998 break
991 toprocess.update(obsstore.predecessors.get(prec, ()))
999 toprocess.update(obsstore.predecessors.get(prec, ()))
992 return divergent
1000 return divergent
993
1001
994
1002
995 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1003 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
996 operation=None):
1004 operation=None):
997 """Add obsolete markers between changesets in a repo
1005 """Add obsolete markers between changesets in a repo
998
1006
999 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1007 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1000 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1008 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1001 containing metadata for this marker only. It is merged with the global
1009 containing metadata for this marker only. It is merged with the global
1002 metadata specified through the `metadata` argument of this function,
1010 metadata specified through the `metadata` argument of this function,
1003
1011
1004 Trying to obsolete a public changeset will raise an exception.
1012 Trying to obsolete a public changeset will raise an exception.
1005
1013
1006 Current user and date are used except if specified otherwise in the
1014 Current user and date are used except if specified otherwise in the
1007 metadata attribute.
1015 metadata attribute.
1008
1016
1009 This function operates within a transaction of its own, but does
1017 This function operates within a transaction of its own, but does
1010 not take any lock on the repo.
1018 not take any lock on the repo.
1011 """
1019 """
1012 # prepare metadata
1020 # prepare metadata
1013 if metadata is None:
1021 if metadata is None:
1014 metadata = {}
1022 metadata = {}
1015 if 'user' not in metadata:
1023 if 'user' not in metadata:
1016 metadata['user'] = repo.ui.username()
1024 metadata['user'] = repo.ui.username()
1017 useoperation = repo.ui.configbool('experimental',
1025 useoperation = repo.ui.configbool('experimental',
1018 'stabilization.track-operation')
1026 'stabilization.track-operation')
1019 if useoperation and operation:
1027 if useoperation and operation:
1020 metadata['operation'] = operation
1028 metadata['operation'] = operation
1021 tr = repo.transaction('add-obsolescence-marker')
1029 tr = repo.transaction('add-obsolescence-marker')
1022 try:
1030 try:
1023 markerargs = []
1031 markerargs = []
1024 for rel in relations:
1032 for rel in relations:
1025 prec = rel[0]
1033 prec = rel[0]
1026 sucs = rel[1]
1034 sucs = rel[1]
1027 localmetadata = metadata.copy()
1035 localmetadata = metadata.copy()
1028 if 2 < len(rel):
1036 if 2 < len(rel):
1029 localmetadata.update(rel[2])
1037 localmetadata.update(rel[2])
1030
1038
1031 if not prec.mutable():
1039 if not prec.mutable():
1032 raise error.Abort(_("cannot obsolete public changeset: %s")
1040 raise error.Abort(_("cannot obsolete public changeset: %s")
1033 % prec,
1041 % prec,
1034 hint="see 'hg help phases' for details")
1042 hint="see 'hg help phases' for details")
1035 nprec = prec.node()
1043 nprec = prec.node()
1036 nsucs = tuple(s.node() for s in sucs)
1044 nsucs = tuple(s.node() for s in sucs)
1037 npare = None
1045 npare = None
1038 if not nsucs:
1046 if not nsucs:
1039 npare = tuple(p.node() for p in prec.parents())
1047 npare = tuple(p.node() for p in prec.parents())
1040 if nprec in nsucs:
1048 if nprec in nsucs:
1041 raise error.Abort(_("changeset %s cannot obsolete itself")
1049 raise error.Abort(_("changeset %s cannot obsolete itself")
1042 % prec)
1050 % prec)
1043
1051
1044 # Creating the marker causes the hidden cache to become invalid,
1052 # Creating the marker causes the hidden cache to become invalid,
1045 # which causes recomputation when we ask for prec.parents() above.
1053 # which causes recomputation when we ask for prec.parents() above.
1046 # Resulting in n^2 behavior. So let's prepare all of the args
1054 # Resulting in n^2 behavior. So let's prepare all of the args
1047 # first, then create the markers.
1055 # first, then create the markers.
1048 markerargs.append((nprec, nsucs, npare, localmetadata))
1056 markerargs.append((nprec, nsucs, npare, localmetadata))
1049
1057
1050 for args in markerargs:
1058 for args in markerargs:
1051 nprec, nsucs, npare, localmetadata = args
1059 nprec, nsucs, npare, localmetadata = args
1052 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1060 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1053 date=date, metadata=localmetadata,
1061 date=date, metadata=localmetadata,
1054 ui=repo.ui)
1062 ui=repo.ui)
1055 repo.filteredrevcache.clear()
1063 repo.filteredrevcache.clear()
1056 tr.close()
1064 tr.close()
1057 finally:
1065 finally:
1058 tr.release()
1066 tr.release()
@@ -1,2139 +1,2139 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 dagop,
14 dagop,
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 obsutil,
22 obsutil,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 registrar,
25 registrar,
26 repoview,
26 repoview,
27 revsetlang,
27 revsetlang,
28 scmutil,
28 scmutil,
29 smartset,
29 smartset,
30 util,
30 util,
31 )
31 )
32
32
33 # helpers for processing parsed tree
33 # helpers for processing parsed tree
34 getsymbol = revsetlang.getsymbol
34 getsymbol = revsetlang.getsymbol
35 getstring = revsetlang.getstring
35 getstring = revsetlang.getstring
36 getinteger = revsetlang.getinteger
36 getinteger = revsetlang.getinteger
37 getboolean = revsetlang.getboolean
37 getboolean = revsetlang.getboolean
38 getlist = revsetlang.getlist
38 getlist = revsetlang.getlist
39 getrange = revsetlang.getrange
39 getrange = revsetlang.getrange
40 getargs = revsetlang.getargs
40 getargs = revsetlang.getargs
41 getargsdict = revsetlang.getargsdict
41 getargsdict = revsetlang.getargsdict
42
42
43 # constants used as an argument of match() and matchany()
43 # constants used as an argument of match() and matchany()
44 anyorder = revsetlang.anyorder
44 anyorder = revsetlang.anyorder
45 defineorder = revsetlang.defineorder
45 defineorder = revsetlang.defineorder
46 followorder = revsetlang.followorder
46 followorder = revsetlang.followorder
47
47
48 baseset = smartset.baseset
48 baseset = smartset.baseset
49 generatorset = smartset.generatorset
49 generatorset = smartset.generatorset
50 spanset = smartset.spanset
50 spanset = smartset.spanset
51 fullreposet = smartset.fullreposet
51 fullreposet = smartset.fullreposet
52
52
53 # helpers
53 # helpers
54
54
55 def getset(repo, subset, x):
55 def getset(repo, subset, x):
56 if not x:
56 if not x:
57 raise error.ParseError(_("missing argument"))
57 raise error.ParseError(_("missing argument"))
58 return methods[x[0]](repo, subset, *x[1:])
58 return methods[x[0]](repo, subset, *x[1:])
59
59
60 def _getrevsource(repo, r):
60 def _getrevsource(repo, r):
61 extra = repo[r].extra()
61 extra = repo[r].extra()
62 for label in ('source', 'transplant_source', 'rebase_source'):
62 for label in ('source', 'transplant_source', 'rebase_source'):
63 if label in extra:
63 if label in extra:
64 try:
64 try:
65 return repo[extra[label]].rev()
65 return repo[extra[label]].rev()
66 except error.RepoLookupError:
66 except error.RepoLookupError:
67 pass
67 pass
68 return None
68 return None
69
69
70 # operator methods
70 # operator methods
71
71
72 def stringset(repo, subset, x):
72 def stringset(repo, subset, x):
73 x = scmutil.intrev(repo[x])
73 x = scmutil.intrev(repo[x])
74 if (x in subset
74 if (x in subset
75 or x == node.nullrev and isinstance(subset, fullreposet)):
75 or x == node.nullrev and isinstance(subset, fullreposet)):
76 return baseset([x])
76 return baseset([x])
77 return baseset()
77 return baseset()
78
78
79 def rangeset(repo, subset, x, y, order):
79 def rangeset(repo, subset, x, y, order):
80 m = getset(repo, fullreposet(repo), x)
80 m = getset(repo, fullreposet(repo), x)
81 n = getset(repo, fullreposet(repo), y)
81 n = getset(repo, fullreposet(repo), y)
82
82
83 if not m or not n:
83 if not m or not n:
84 return baseset()
84 return baseset()
85 return _makerangeset(repo, subset, m.first(), n.last(), order)
85 return _makerangeset(repo, subset, m.first(), n.last(), order)
86
86
87 def rangeall(repo, subset, x, order):
87 def rangeall(repo, subset, x, order):
88 assert x is None
88 assert x is None
89 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
89 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
90
90
91 def rangepre(repo, subset, y, order):
91 def rangepre(repo, subset, y, order):
92 # ':y' can't be rewritten to '0:y' since '0' may be hidden
92 # ':y' can't be rewritten to '0:y' since '0' may be hidden
93 n = getset(repo, fullreposet(repo), y)
93 n = getset(repo, fullreposet(repo), y)
94 if not n:
94 if not n:
95 return baseset()
95 return baseset()
96 return _makerangeset(repo, subset, 0, n.last(), order)
96 return _makerangeset(repo, subset, 0, n.last(), order)
97
97
98 def rangepost(repo, subset, x, order):
98 def rangepost(repo, subset, x, order):
99 m = getset(repo, fullreposet(repo), x)
99 m = getset(repo, fullreposet(repo), x)
100 if not m:
100 if not m:
101 return baseset()
101 return baseset()
102 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
102 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
103
103
104 def _makerangeset(repo, subset, m, n, order):
104 def _makerangeset(repo, subset, m, n, order):
105 if m == n:
105 if m == n:
106 r = baseset([m])
106 r = baseset([m])
107 elif n == node.wdirrev:
107 elif n == node.wdirrev:
108 r = spanset(repo, m, len(repo)) + baseset([n])
108 r = spanset(repo, m, len(repo)) + baseset([n])
109 elif m == node.wdirrev:
109 elif m == node.wdirrev:
110 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
110 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
111 elif m < n:
111 elif m < n:
112 r = spanset(repo, m, n + 1)
112 r = spanset(repo, m, n + 1)
113 else:
113 else:
114 r = spanset(repo, m, n - 1)
114 r = spanset(repo, m, n - 1)
115
115
116 if order == defineorder:
116 if order == defineorder:
117 return r & subset
117 return r & subset
118 else:
118 else:
119 # carrying the sorting over when possible would be more efficient
119 # carrying the sorting over when possible would be more efficient
120 return subset & r
120 return subset & r
121
121
122 def dagrange(repo, subset, x, y, order):
122 def dagrange(repo, subset, x, y, order):
123 r = fullreposet(repo)
123 r = fullreposet(repo)
124 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
124 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
125 includepath=True)
125 includepath=True)
126 return subset & xs
126 return subset & xs
127
127
128 def andset(repo, subset, x, y, order):
128 def andset(repo, subset, x, y, order):
129 return getset(repo, getset(repo, subset, x), y)
129 return getset(repo, getset(repo, subset, x), y)
130
130
131 def differenceset(repo, subset, x, y, order):
131 def differenceset(repo, subset, x, y, order):
132 return getset(repo, subset, x) - getset(repo, subset, y)
132 return getset(repo, subset, x) - getset(repo, subset, y)
133
133
134 def _orsetlist(repo, subset, xs):
134 def _orsetlist(repo, subset, xs):
135 assert xs
135 assert xs
136 if len(xs) == 1:
136 if len(xs) == 1:
137 return getset(repo, subset, xs[0])
137 return getset(repo, subset, xs[0])
138 p = len(xs) // 2
138 p = len(xs) // 2
139 a = _orsetlist(repo, subset, xs[:p])
139 a = _orsetlist(repo, subset, xs[:p])
140 b = _orsetlist(repo, subset, xs[p:])
140 b = _orsetlist(repo, subset, xs[p:])
141 return a + b
141 return a + b
142
142
143 def orset(repo, subset, x, order):
143 def orset(repo, subset, x, order):
144 xs = getlist(x)
144 xs = getlist(x)
145 if order == followorder:
145 if order == followorder:
146 # slow path to take the subset order
146 # slow path to take the subset order
147 return subset & _orsetlist(repo, fullreposet(repo), xs)
147 return subset & _orsetlist(repo, fullreposet(repo), xs)
148 else:
148 else:
149 return _orsetlist(repo, subset, xs)
149 return _orsetlist(repo, subset, xs)
150
150
151 def notset(repo, subset, x, order):
151 def notset(repo, subset, x, order):
152 return subset - getset(repo, subset, x)
152 return subset - getset(repo, subset, x)
153
153
154 def relationset(repo, subset, x, y, order):
154 def relationset(repo, subset, x, y, order):
155 raise error.ParseError(_("can't use a relation in this context"))
155 raise error.ParseError(_("can't use a relation in this context"))
156
156
157 def relsubscriptset(repo, subset, x, y, z, order):
157 def relsubscriptset(repo, subset, x, y, z, order):
158 # this is pretty basic implementation of 'x#y[z]' operator, still
158 # this is pretty basic implementation of 'x#y[z]' operator, still
159 # experimental so undocumented. see the wiki for further ideas.
159 # experimental so undocumented. see the wiki for further ideas.
160 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
160 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
161 rel = getsymbol(y)
161 rel = getsymbol(y)
162 n = getinteger(z, _("relation subscript must be an integer"))
162 n = getinteger(z, _("relation subscript must be an integer"))
163
163
164 # TODO: perhaps this should be a table of relation functions
164 # TODO: perhaps this should be a table of relation functions
165 if rel in ('g', 'generations'):
165 if rel in ('g', 'generations'):
166 # TODO: support range, rewrite tests, and drop startdepth argument
166 # TODO: support range, rewrite tests, and drop startdepth argument
167 # from ancestors() and descendants() predicates
167 # from ancestors() and descendants() predicates
168 if n <= 0:
168 if n <= 0:
169 n = -n
169 n = -n
170 return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
170 return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
171 else:
171 else:
172 return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
172 return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
173
173
174 raise error.UnknownIdentifier(rel, ['generations'])
174 raise error.UnknownIdentifier(rel, ['generations'])
175
175
176 def subscriptset(repo, subset, x, y, order):
176 def subscriptset(repo, subset, x, y, order):
177 raise error.ParseError(_("can't use a subscript in this context"))
177 raise error.ParseError(_("can't use a subscript in this context"))
178
178
179 def listset(repo, subset, *xs):
179 def listset(repo, subset, *xs):
180 raise error.ParseError(_("can't use a list in this context"),
180 raise error.ParseError(_("can't use a list in this context"),
181 hint=_('see hg help "revsets.x or y"'))
181 hint=_('see hg help "revsets.x or y"'))
182
182
183 def keyvaluepair(repo, subset, k, v):
183 def keyvaluepair(repo, subset, k, v):
184 raise error.ParseError(_("can't use a key-value pair in this context"))
184 raise error.ParseError(_("can't use a key-value pair in this context"))
185
185
186 def func(repo, subset, a, b, order):
186 def func(repo, subset, a, b, order):
187 f = getsymbol(a)
187 f = getsymbol(a)
188 if f in symbols:
188 if f in symbols:
189 func = symbols[f]
189 func = symbols[f]
190 if getattr(func, '_takeorder', False):
190 if getattr(func, '_takeorder', False):
191 return func(repo, subset, b, order)
191 return func(repo, subset, b, order)
192 return func(repo, subset, b)
192 return func(repo, subset, b)
193
193
194 keep = lambda fn: getattr(fn, '__doc__', None) is not None
194 keep = lambda fn: getattr(fn, '__doc__', None) is not None
195
195
196 syms = [s for (s, fn) in symbols.items() if keep(fn)]
196 syms = [s for (s, fn) in symbols.items() if keep(fn)]
197 raise error.UnknownIdentifier(f, syms)
197 raise error.UnknownIdentifier(f, syms)
198
198
199 # functions
199 # functions
200
200
201 # symbols are callables like:
201 # symbols are callables like:
202 # fn(repo, subset, x)
202 # fn(repo, subset, x)
203 # with:
203 # with:
204 # repo - current repository instance
204 # repo - current repository instance
205 # subset - of revisions to be examined
205 # subset - of revisions to be examined
206 # x - argument in tree form
206 # x - argument in tree form
207 symbols = {}
207 symbols = {}
208
208
209 # symbols which can't be used for a DoS attack for any given input
209 # symbols which can't be used for a DoS attack for any given input
210 # (e.g. those which accept regexes as plain strings shouldn't be included)
210 # (e.g. those which accept regexes as plain strings shouldn't be included)
211 # functions that just return a lot of changesets (like all) don't count here
211 # functions that just return a lot of changesets (like all) don't count here
212 safesymbols = set()
212 safesymbols = set()
213
213
214 predicate = registrar.revsetpredicate()
214 predicate = registrar.revsetpredicate()
215
215
216 @predicate('_destupdate')
216 @predicate('_destupdate')
217 def _destupdate(repo, subset, x):
217 def _destupdate(repo, subset, x):
218 # experimental revset for update destination
218 # experimental revset for update destination
219 args = getargsdict(x, 'limit', 'clean')
219 args = getargsdict(x, 'limit', 'clean')
220 return subset & baseset([destutil.destupdate(repo, **args)[0]])
220 return subset & baseset([destutil.destupdate(repo, **args)[0]])
221
221
222 @predicate('_destmerge')
222 @predicate('_destmerge')
223 def _destmerge(repo, subset, x):
223 def _destmerge(repo, subset, x):
224 # experimental revset for merge destination
224 # experimental revset for merge destination
225 sourceset = None
225 sourceset = None
226 if x is not None:
226 if x is not None:
227 sourceset = getset(repo, fullreposet(repo), x)
227 sourceset = getset(repo, fullreposet(repo), x)
228 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
228 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
229
229
230 @predicate('adds(pattern)', safe=True)
230 @predicate('adds(pattern)', safe=True)
231 def adds(repo, subset, x):
231 def adds(repo, subset, x):
232 """Changesets that add a file matching pattern.
232 """Changesets that add a file matching pattern.
233
233
234 The pattern without explicit kind like ``glob:`` is expected to be
234 The pattern without explicit kind like ``glob:`` is expected to be
235 relative to the current directory and match against a file or a
235 relative to the current directory and match against a file or a
236 directory.
236 directory.
237 """
237 """
238 # i18n: "adds" is a keyword
238 # i18n: "adds" is a keyword
239 pat = getstring(x, _("adds requires a pattern"))
239 pat = getstring(x, _("adds requires a pattern"))
240 return checkstatus(repo, subset, pat, 1)
240 return checkstatus(repo, subset, pat, 1)
241
241
242 @predicate('ancestor(*changeset)', safe=True)
242 @predicate('ancestor(*changeset)', safe=True)
243 def ancestor(repo, subset, x):
243 def ancestor(repo, subset, x):
244 """A greatest common ancestor of the changesets.
244 """A greatest common ancestor of the changesets.
245
245
246 Accepts 0 or more changesets.
246 Accepts 0 or more changesets.
247 Will return empty list when passed no args.
247 Will return empty list when passed no args.
248 Greatest common ancestor of a single changeset is that changeset.
248 Greatest common ancestor of a single changeset is that changeset.
249 """
249 """
250 # i18n: "ancestor" is a keyword
250 # i18n: "ancestor" is a keyword
251 l = getlist(x)
251 l = getlist(x)
252 rl = fullreposet(repo)
252 rl = fullreposet(repo)
253 anc = None
253 anc = None
254
254
255 # (getset(repo, rl, i) for i in l) generates a list of lists
255 # (getset(repo, rl, i) for i in l) generates a list of lists
256 for revs in (getset(repo, rl, i) for i in l):
256 for revs in (getset(repo, rl, i) for i in l):
257 for r in revs:
257 for r in revs:
258 if anc is None:
258 if anc is None:
259 anc = repo[r]
259 anc = repo[r]
260 else:
260 else:
261 anc = anc.ancestor(repo[r])
261 anc = anc.ancestor(repo[r])
262
262
263 if anc is not None and anc.rev() in subset:
263 if anc is not None and anc.rev() in subset:
264 return baseset([anc.rev()])
264 return baseset([anc.rev()])
265 return baseset()
265 return baseset()
266
266
267 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
267 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
268 stopdepth=None):
268 stopdepth=None):
269 heads = getset(repo, fullreposet(repo), x)
269 heads = getset(repo, fullreposet(repo), x)
270 if not heads:
270 if not heads:
271 return baseset()
271 return baseset()
272 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
272 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
273 return subset & s
273 return subset & s
274
274
275 @predicate('ancestors(set[, depth])', safe=True)
275 @predicate('ancestors(set[, depth])', safe=True)
276 def ancestors(repo, subset, x):
276 def ancestors(repo, subset, x):
277 """Changesets that are ancestors of changesets in set, including the
277 """Changesets that are ancestors of changesets in set, including the
278 given changesets themselves.
278 given changesets themselves.
279
279
280 If depth is specified, the result only includes changesets up to
280 If depth is specified, the result only includes changesets up to
281 the specified generation.
281 the specified generation.
282 """
282 """
283 # startdepth is for internal use only until we can decide the UI
283 # startdepth is for internal use only until we can decide the UI
284 args = getargsdict(x, 'ancestors', 'set depth startdepth')
284 args = getargsdict(x, 'ancestors', 'set depth startdepth')
285 if 'set' not in args:
285 if 'set' not in args:
286 # i18n: "ancestors" is a keyword
286 # i18n: "ancestors" is a keyword
287 raise error.ParseError(_('ancestors takes at least 1 argument'))
287 raise error.ParseError(_('ancestors takes at least 1 argument'))
288 startdepth = stopdepth = None
288 startdepth = stopdepth = None
289 if 'startdepth' in args:
289 if 'startdepth' in args:
290 n = getinteger(args['startdepth'],
290 n = getinteger(args['startdepth'],
291 "ancestors expects an integer startdepth")
291 "ancestors expects an integer startdepth")
292 if n < 0:
292 if n < 0:
293 raise error.ParseError("negative startdepth")
293 raise error.ParseError("negative startdepth")
294 startdepth = n
294 startdepth = n
295 if 'depth' in args:
295 if 'depth' in args:
296 # i18n: "ancestors" is a keyword
296 # i18n: "ancestors" is a keyword
297 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
297 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
298 if n < 0:
298 if n < 0:
299 raise error.ParseError(_("negative depth"))
299 raise error.ParseError(_("negative depth"))
300 stopdepth = n + 1
300 stopdepth = n + 1
301 return _ancestors(repo, subset, args['set'],
301 return _ancestors(repo, subset, args['set'],
302 startdepth=startdepth, stopdepth=stopdepth)
302 startdepth=startdepth, stopdepth=stopdepth)
303
303
304 @predicate('_firstancestors', safe=True)
304 @predicate('_firstancestors', safe=True)
305 def _firstancestors(repo, subset, x):
305 def _firstancestors(repo, subset, x):
306 # ``_firstancestors(set)``
306 # ``_firstancestors(set)``
307 # Like ``ancestors(set)`` but follows only the first parents.
307 # Like ``ancestors(set)`` but follows only the first parents.
308 return _ancestors(repo, subset, x, followfirst=True)
308 return _ancestors(repo, subset, x, followfirst=True)
309
309
310 def _childrenspec(repo, subset, x, n, order):
310 def _childrenspec(repo, subset, x, n, order):
311 """Changesets that are the Nth child of a changeset
311 """Changesets that are the Nth child of a changeset
312 in set.
312 in set.
313 """
313 """
314 cs = set()
314 cs = set()
315 for r in getset(repo, fullreposet(repo), x):
315 for r in getset(repo, fullreposet(repo), x):
316 for i in range(n):
316 for i in range(n):
317 c = repo[r].children()
317 c = repo[r].children()
318 if len(c) == 0:
318 if len(c) == 0:
319 break
319 break
320 if len(c) > 1:
320 if len(c) > 1:
321 raise error.RepoLookupError(
321 raise error.RepoLookupError(
322 _("revision in set has more than one child"))
322 _("revision in set has more than one child"))
323 r = c[0].rev()
323 r = c[0].rev()
324 else:
324 else:
325 cs.add(r)
325 cs.add(r)
326 return subset & cs
326 return subset & cs
327
327
328 def ancestorspec(repo, subset, x, n, order):
328 def ancestorspec(repo, subset, x, n, order):
329 """``set~n``
329 """``set~n``
330 Changesets that are the Nth ancestor (first parents only) of a changeset
330 Changesets that are the Nth ancestor (first parents only) of a changeset
331 in set.
331 in set.
332 """
332 """
333 n = getinteger(n, _("~ expects a number"))
333 n = getinteger(n, _("~ expects a number"))
334 if n < 0:
334 if n < 0:
335 # children lookup
335 # children lookup
336 return _childrenspec(repo, subset, x, -n, order)
336 return _childrenspec(repo, subset, x, -n, order)
337 ps = set()
337 ps = set()
338 cl = repo.changelog
338 cl = repo.changelog
339 for r in getset(repo, fullreposet(repo), x):
339 for r in getset(repo, fullreposet(repo), x):
340 for i in range(n):
340 for i in range(n):
341 try:
341 try:
342 r = cl.parentrevs(r)[0]
342 r = cl.parentrevs(r)[0]
343 except error.WdirUnsupported:
343 except error.WdirUnsupported:
344 r = repo[r].parents()[0].rev()
344 r = repo[r].parents()[0].rev()
345 ps.add(r)
345 ps.add(r)
346 return subset & ps
346 return subset & ps
347
347
348 @predicate('author(string)', safe=True)
348 @predicate('author(string)', safe=True)
349 def author(repo, subset, x):
349 def author(repo, subset, x):
350 """Alias for ``user(string)``.
350 """Alias for ``user(string)``.
351 """
351 """
352 # i18n: "author" is a keyword
352 # i18n: "author" is a keyword
353 n = getstring(x, _("author requires a string"))
353 n = getstring(x, _("author requires a string"))
354 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
354 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
355 return subset.filter(lambda x: matcher(repo[x].user()),
355 return subset.filter(lambda x: matcher(repo[x].user()),
356 condrepr=('<user %r>', n))
356 condrepr=('<user %r>', n))
357
357
358 @predicate('bisect(string)', safe=True)
358 @predicate('bisect(string)', safe=True)
359 def bisect(repo, subset, x):
359 def bisect(repo, subset, x):
360 """Changesets marked in the specified bisect status:
360 """Changesets marked in the specified bisect status:
361
361
362 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
362 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
363 - ``goods``, ``bads`` : csets topologically good/bad
363 - ``goods``, ``bads`` : csets topologically good/bad
364 - ``range`` : csets taking part in the bisection
364 - ``range`` : csets taking part in the bisection
365 - ``pruned`` : csets that are goods, bads or skipped
365 - ``pruned`` : csets that are goods, bads or skipped
366 - ``untested`` : csets whose fate is yet unknown
366 - ``untested`` : csets whose fate is yet unknown
367 - ``ignored`` : csets ignored due to DAG topology
367 - ``ignored`` : csets ignored due to DAG topology
368 - ``current`` : the cset currently being bisected
368 - ``current`` : the cset currently being bisected
369 """
369 """
370 # i18n: "bisect" is a keyword
370 # i18n: "bisect" is a keyword
371 status = getstring(x, _("bisect requires a string")).lower()
371 status = getstring(x, _("bisect requires a string")).lower()
372 state = set(hbisect.get(repo, status))
372 state = set(hbisect.get(repo, status))
373 return subset & state
373 return subset & state
374
374
375 # Backward-compatibility
375 # Backward-compatibility
376 # - no help entry so that we do not advertise it any more
376 # - no help entry so that we do not advertise it any more
377 @predicate('bisected', safe=True)
377 @predicate('bisected', safe=True)
378 def bisected(repo, subset, x):
378 def bisected(repo, subset, x):
379 return bisect(repo, subset, x)
379 return bisect(repo, subset, x)
380
380
381 @predicate('bookmark([name])', safe=True)
381 @predicate('bookmark([name])', safe=True)
382 def bookmark(repo, subset, x):
382 def bookmark(repo, subset, x):
383 """The named bookmark or all bookmarks.
383 """The named bookmark or all bookmarks.
384
384
385 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
385 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
386 """
386 """
387 # i18n: "bookmark" is a keyword
387 # i18n: "bookmark" is a keyword
388 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
388 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
389 if args:
389 if args:
390 bm = getstring(args[0],
390 bm = getstring(args[0],
391 # i18n: "bookmark" is a keyword
391 # i18n: "bookmark" is a keyword
392 _('the argument to bookmark must be a string'))
392 _('the argument to bookmark must be a string'))
393 kind, pattern, matcher = util.stringmatcher(bm)
393 kind, pattern, matcher = util.stringmatcher(bm)
394 bms = set()
394 bms = set()
395 if kind == 'literal':
395 if kind == 'literal':
396 bmrev = repo._bookmarks.get(pattern, None)
396 bmrev = repo._bookmarks.get(pattern, None)
397 if not bmrev:
397 if not bmrev:
398 raise error.RepoLookupError(_("bookmark '%s' does not exist")
398 raise error.RepoLookupError(_("bookmark '%s' does not exist")
399 % pattern)
399 % pattern)
400 bms.add(repo[bmrev].rev())
400 bms.add(repo[bmrev].rev())
401 else:
401 else:
402 matchrevs = set()
402 matchrevs = set()
403 for name, bmrev in repo._bookmarks.iteritems():
403 for name, bmrev in repo._bookmarks.iteritems():
404 if matcher(name):
404 if matcher(name):
405 matchrevs.add(bmrev)
405 matchrevs.add(bmrev)
406 if not matchrevs:
406 if not matchrevs:
407 raise error.RepoLookupError(_("no bookmarks exist"
407 raise error.RepoLookupError(_("no bookmarks exist"
408 " that match '%s'") % pattern)
408 " that match '%s'") % pattern)
409 for bmrev in matchrevs:
409 for bmrev in matchrevs:
410 bms.add(repo[bmrev].rev())
410 bms.add(repo[bmrev].rev())
411 else:
411 else:
412 bms = {repo[r].rev() for r in repo._bookmarks.values()}
412 bms = {repo[r].rev() for r in repo._bookmarks.values()}
413 bms -= {node.nullrev}
413 bms -= {node.nullrev}
414 return subset & bms
414 return subset & bms
415
415
416 @predicate('branch(string or set)', safe=True)
416 @predicate('branch(string or set)', safe=True)
417 def branch(repo, subset, x):
417 def branch(repo, subset, x):
418 """
418 """
419 All changesets belonging to the given branch or the branches of the given
419 All changesets belonging to the given branch or the branches of the given
420 changesets.
420 changesets.
421
421
422 Pattern matching is supported for `string`. See
422 Pattern matching is supported for `string`. See
423 :hg:`help revisions.patterns`.
423 :hg:`help revisions.patterns`.
424 """
424 """
425 getbi = repo.revbranchcache().branchinfo
425 getbi = repo.revbranchcache().branchinfo
426 def getbranch(r):
426 def getbranch(r):
427 try:
427 try:
428 return getbi(r)[0]
428 return getbi(r)[0]
429 except error.WdirUnsupported:
429 except error.WdirUnsupported:
430 return repo[r].branch()
430 return repo[r].branch()
431
431
432 try:
432 try:
433 b = getstring(x, '')
433 b = getstring(x, '')
434 except error.ParseError:
434 except error.ParseError:
435 # not a string, but another revspec, e.g. tip()
435 # not a string, but another revspec, e.g. tip()
436 pass
436 pass
437 else:
437 else:
438 kind, pattern, matcher = util.stringmatcher(b)
438 kind, pattern, matcher = util.stringmatcher(b)
439 if kind == 'literal':
439 if kind == 'literal':
440 # note: falls through to the revspec case if no branch with
440 # note: falls through to the revspec case if no branch with
441 # this name exists and pattern kind is not specified explicitly
441 # this name exists and pattern kind is not specified explicitly
442 if pattern in repo.branchmap():
442 if pattern in repo.branchmap():
443 return subset.filter(lambda r: matcher(getbranch(r)),
443 return subset.filter(lambda r: matcher(getbranch(r)),
444 condrepr=('<branch %r>', b))
444 condrepr=('<branch %r>', b))
445 if b.startswith('literal:'):
445 if b.startswith('literal:'):
446 raise error.RepoLookupError(_("branch '%s' does not exist")
446 raise error.RepoLookupError(_("branch '%s' does not exist")
447 % pattern)
447 % pattern)
448 else:
448 else:
449 return subset.filter(lambda r: matcher(getbranch(r)),
449 return subset.filter(lambda r: matcher(getbranch(r)),
450 condrepr=('<branch %r>', b))
450 condrepr=('<branch %r>', b))
451
451
452 s = getset(repo, fullreposet(repo), x)
452 s = getset(repo, fullreposet(repo), x)
453 b = set()
453 b = set()
454 for r in s:
454 for r in s:
455 b.add(getbranch(r))
455 b.add(getbranch(r))
456 c = s.__contains__
456 c = s.__contains__
457 return subset.filter(lambda r: c(r) or getbranch(r) in b,
457 return subset.filter(lambda r: c(r) or getbranch(r) in b,
458 condrepr=lambda: '<branch %r>' % sorted(b))
458 condrepr=lambda: '<branch %r>' % sorted(b))
459
459
460 @predicate('bumped()', safe=True)
460 @predicate('bumped()', safe=True)
461 def bumped(repo, subset, x):
461 def bumped(repo, subset, x):
462 msg = ("'bumped()' is deprecated, "
462 msg = ("'bumped()' is deprecated, "
463 "use 'phasedivergent()'")
463 "use 'phasedivergent()'")
464 repo.ui.deprecwarn(msg, '4.4')
464 repo.ui.deprecwarn(msg, '4.4')
465
465
466 return phasedivergent(repo, subset, x)
466 return phasedivergent(repo, subset, x)
467
467
468 @predicate('phasedivergent()', safe=True)
468 @predicate('phasedivergent()', safe=True)
469 def phasedivergent(repo, subset, x):
469 def phasedivergent(repo, subset, x):
470 """Mutable changesets marked as successors of public changesets.
470 """Mutable changesets marked as successors of public changesets.
471
471
472 Only non-public and non-obsolete changesets can be `phasedivergent`.
472 Only non-public and non-obsolete changesets can be `phasedivergent`.
473 """
473 """
474 # i18n: "phasedivergent" is a keyword
474 # i18n: "phasedivergent" is a keyword
475 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
475 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
476 bumped = obsmod.getrevs(repo, 'bumped')
476 bumped = obsmod.getrevs(repo, 'bumped')
477 return subset & bumped
477 return subset & bumped
478
478
479 @predicate('bundle()', safe=True)
479 @predicate('bundle()', safe=True)
480 def bundle(repo, subset, x):
480 def bundle(repo, subset, x):
481 """Changesets in the bundle.
481 """Changesets in the bundle.
482
482
483 Bundle must be specified by the -R option."""
483 Bundle must be specified by the -R option."""
484
484
485 try:
485 try:
486 bundlerevs = repo.changelog.bundlerevs
486 bundlerevs = repo.changelog.bundlerevs
487 except AttributeError:
487 except AttributeError:
488 raise error.Abort(_("no bundle provided - specify with -R"))
488 raise error.Abort(_("no bundle provided - specify with -R"))
489 return subset & bundlerevs
489 return subset & bundlerevs
490
490
491 def checkstatus(repo, subset, pat, field):
491 def checkstatus(repo, subset, pat, field):
492 hasset = matchmod.patkind(pat) == 'set'
492 hasset = matchmod.patkind(pat) == 'set'
493
493
494 mcache = [None]
494 mcache = [None]
495 def matches(x):
495 def matches(x):
496 c = repo[x]
496 c = repo[x]
497 if not mcache[0] or hasset:
497 if not mcache[0] or hasset:
498 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
498 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
499 m = mcache[0]
499 m = mcache[0]
500 fname = None
500 fname = None
501 if not m.anypats() and len(m.files()) == 1:
501 if not m.anypats() and len(m.files()) == 1:
502 fname = m.files()[0]
502 fname = m.files()[0]
503 if fname is not None:
503 if fname is not None:
504 if fname not in c.files():
504 if fname not in c.files():
505 return False
505 return False
506 else:
506 else:
507 for f in c.files():
507 for f in c.files():
508 if m(f):
508 if m(f):
509 break
509 break
510 else:
510 else:
511 return False
511 return False
512 files = repo.status(c.p1().node(), c.node())[field]
512 files = repo.status(c.p1().node(), c.node())[field]
513 if fname is not None:
513 if fname is not None:
514 if fname in files:
514 if fname in files:
515 return True
515 return True
516 else:
516 else:
517 for f in files:
517 for f in files:
518 if m(f):
518 if m(f):
519 return True
519 return True
520
520
521 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
521 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
522
522
523 def _children(repo, subset, parentset):
523 def _children(repo, subset, parentset):
524 if not parentset:
524 if not parentset:
525 return baseset()
525 return baseset()
526 cs = set()
526 cs = set()
527 pr = repo.changelog.parentrevs
527 pr = repo.changelog.parentrevs
528 minrev = parentset.min()
528 minrev = parentset.min()
529 nullrev = node.nullrev
529 nullrev = node.nullrev
530 for r in subset:
530 for r in subset:
531 if r <= minrev:
531 if r <= minrev:
532 continue
532 continue
533 p1, p2 = pr(r)
533 p1, p2 = pr(r)
534 if p1 in parentset:
534 if p1 in parentset:
535 cs.add(r)
535 cs.add(r)
536 if p2 != nullrev and p2 in parentset:
536 if p2 != nullrev and p2 in parentset:
537 cs.add(r)
537 cs.add(r)
538 return baseset(cs)
538 return baseset(cs)
539
539
540 @predicate('children(set)', safe=True)
540 @predicate('children(set)', safe=True)
541 def children(repo, subset, x):
541 def children(repo, subset, x):
542 """Child changesets of changesets in set.
542 """Child changesets of changesets in set.
543 """
543 """
544 s = getset(repo, fullreposet(repo), x)
544 s = getset(repo, fullreposet(repo), x)
545 cs = _children(repo, subset, s)
545 cs = _children(repo, subset, s)
546 return subset & cs
546 return subset & cs
547
547
548 @predicate('closed()', safe=True)
548 @predicate('closed()', safe=True)
549 def closed(repo, subset, x):
549 def closed(repo, subset, x):
550 """Changeset is closed.
550 """Changeset is closed.
551 """
551 """
552 # i18n: "closed" is a keyword
552 # i18n: "closed" is a keyword
553 getargs(x, 0, 0, _("closed takes no arguments"))
553 getargs(x, 0, 0, _("closed takes no arguments"))
554 return subset.filter(lambda r: repo[r].closesbranch(),
554 return subset.filter(lambda r: repo[r].closesbranch(),
555 condrepr='<branch closed>')
555 condrepr='<branch closed>')
556
556
557 @predicate('contains(pattern)')
557 @predicate('contains(pattern)')
558 def contains(repo, subset, x):
558 def contains(repo, subset, x):
559 """The revision's manifest contains a file matching pattern (but might not
559 """The revision's manifest contains a file matching pattern (but might not
560 modify it). See :hg:`help patterns` for information about file patterns.
560 modify it). See :hg:`help patterns` for information about file patterns.
561
561
562 The pattern without explicit kind like ``glob:`` is expected to be
562 The pattern without explicit kind like ``glob:`` is expected to be
563 relative to the current directory and match against a file exactly
563 relative to the current directory and match against a file exactly
564 for efficiency.
564 for efficiency.
565 """
565 """
566 # i18n: "contains" is a keyword
566 # i18n: "contains" is a keyword
567 pat = getstring(x, _("contains requires a pattern"))
567 pat = getstring(x, _("contains requires a pattern"))
568
568
569 def matches(x):
569 def matches(x):
570 if not matchmod.patkind(pat):
570 if not matchmod.patkind(pat):
571 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
571 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
572 if pats in repo[x]:
572 if pats in repo[x]:
573 return True
573 return True
574 else:
574 else:
575 c = repo[x]
575 c = repo[x]
576 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
576 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
577 for f in c.manifest():
577 for f in c.manifest():
578 if m(f):
578 if m(f):
579 return True
579 return True
580 return False
580 return False
581
581
582 return subset.filter(matches, condrepr=('<contains %r>', pat))
582 return subset.filter(matches, condrepr=('<contains %r>', pat))
583
583
584 @predicate('converted([id])', safe=True)
584 @predicate('converted([id])', safe=True)
585 def converted(repo, subset, x):
585 def converted(repo, subset, x):
586 """Changesets converted from the given identifier in the old repository if
586 """Changesets converted from the given identifier in the old repository if
587 present, or all converted changesets if no identifier is specified.
587 present, or all converted changesets if no identifier is specified.
588 """
588 """
589
589
590 # There is exactly no chance of resolving the revision, so do a simple
590 # There is exactly no chance of resolving the revision, so do a simple
591 # string compare and hope for the best
591 # string compare and hope for the best
592
592
593 rev = None
593 rev = None
594 # i18n: "converted" is a keyword
594 # i18n: "converted" is a keyword
595 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
595 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
596 if l:
596 if l:
597 # i18n: "converted" is a keyword
597 # i18n: "converted" is a keyword
598 rev = getstring(l[0], _('converted requires a revision'))
598 rev = getstring(l[0], _('converted requires a revision'))
599
599
600 def _matchvalue(r):
600 def _matchvalue(r):
601 source = repo[r].extra().get('convert_revision', None)
601 source = repo[r].extra().get('convert_revision', None)
602 return source is not None and (rev is None or source.startswith(rev))
602 return source is not None and (rev is None or source.startswith(rev))
603
603
604 return subset.filter(lambda r: _matchvalue(r),
604 return subset.filter(lambda r: _matchvalue(r),
605 condrepr=('<converted %r>', rev))
605 condrepr=('<converted %r>', rev))
606
606
607 @predicate('date(interval)', safe=True)
607 @predicate('date(interval)', safe=True)
608 def date(repo, subset, x):
608 def date(repo, subset, x):
609 """Changesets within the interval, see :hg:`help dates`.
609 """Changesets within the interval, see :hg:`help dates`.
610 """
610 """
611 # i18n: "date" is a keyword
611 # i18n: "date" is a keyword
612 ds = getstring(x, _("date requires a string"))
612 ds = getstring(x, _("date requires a string"))
613 dm = util.matchdate(ds)
613 dm = util.matchdate(ds)
614 return subset.filter(lambda x: dm(repo[x].date()[0]),
614 return subset.filter(lambda x: dm(repo[x].date()[0]),
615 condrepr=('<date %r>', ds))
615 condrepr=('<date %r>', ds))
616
616
617 @predicate('desc(string)', safe=True)
617 @predicate('desc(string)', safe=True)
618 def desc(repo, subset, x):
618 def desc(repo, subset, x):
619 """Search commit message for string. The match is case-insensitive.
619 """Search commit message for string. The match is case-insensitive.
620
620
621 Pattern matching is supported for `string`. See
621 Pattern matching is supported for `string`. See
622 :hg:`help revisions.patterns`.
622 :hg:`help revisions.patterns`.
623 """
623 """
624 # i18n: "desc" is a keyword
624 # i18n: "desc" is a keyword
625 ds = getstring(x, _("desc requires a string"))
625 ds = getstring(x, _("desc requires a string"))
626
626
627 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
627 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
628
628
629 return subset.filter(lambda r: matcher(repo[r].description()),
629 return subset.filter(lambda r: matcher(repo[r].description()),
630 condrepr=('<desc %r>', ds))
630 condrepr=('<desc %r>', ds))
631
631
632 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
632 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
633 stopdepth=None):
633 stopdepth=None):
634 roots = getset(repo, fullreposet(repo), x)
634 roots = getset(repo, fullreposet(repo), x)
635 if not roots:
635 if not roots:
636 return baseset()
636 return baseset()
637 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
637 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
638 return subset & s
638 return subset & s
639
639
640 @predicate('descendants(set[, depth])', safe=True)
640 @predicate('descendants(set[, depth])', safe=True)
641 def descendants(repo, subset, x):
641 def descendants(repo, subset, x):
642 """Changesets which are descendants of changesets in set, including the
642 """Changesets which are descendants of changesets in set, including the
643 given changesets themselves.
643 given changesets themselves.
644
644
645 If depth is specified, the result only includes changesets up to
645 If depth is specified, the result only includes changesets up to
646 the specified generation.
646 the specified generation.
647 """
647 """
648 # startdepth is for internal use only until we can decide the UI
648 # startdepth is for internal use only until we can decide the UI
649 args = getargsdict(x, 'descendants', 'set depth startdepth')
649 args = getargsdict(x, 'descendants', 'set depth startdepth')
650 if 'set' not in args:
650 if 'set' not in args:
651 # i18n: "descendants" is a keyword
651 # i18n: "descendants" is a keyword
652 raise error.ParseError(_('descendants takes at least 1 argument'))
652 raise error.ParseError(_('descendants takes at least 1 argument'))
653 startdepth = stopdepth = None
653 startdepth = stopdepth = None
654 if 'startdepth' in args:
654 if 'startdepth' in args:
655 n = getinteger(args['startdepth'],
655 n = getinteger(args['startdepth'],
656 "descendants expects an integer startdepth")
656 "descendants expects an integer startdepth")
657 if n < 0:
657 if n < 0:
658 raise error.ParseError("negative startdepth")
658 raise error.ParseError("negative startdepth")
659 startdepth = n
659 startdepth = n
660 if 'depth' in args:
660 if 'depth' in args:
661 # i18n: "descendants" is a keyword
661 # i18n: "descendants" is a keyword
662 n = getinteger(args['depth'], _("descendants expects an integer depth"))
662 n = getinteger(args['depth'], _("descendants expects an integer depth"))
663 if n < 0:
663 if n < 0:
664 raise error.ParseError(_("negative depth"))
664 raise error.ParseError(_("negative depth"))
665 stopdepth = n + 1
665 stopdepth = n + 1
666 return _descendants(repo, subset, args['set'],
666 return _descendants(repo, subset, args['set'],
667 startdepth=startdepth, stopdepth=stopdepth)
667 startdepth=startdepth, stopdepth=stopdepth)
668
668
669 @predicate('_firstdescendants', safe=True)
669 @predicate('_firstdescendants', safe=True)
670 def _firstdescendants(repo, subset, x):
670 def _firstdescendants(repo, subset, x):
671 # ``_firstdescendants(set)``
671 # ``_firstdescendants(set)``
672 # Like ``descendants(set)`` but follows only the first parents.
672 # Like ``descendants(set)`` but follows only the first parents.
673 return _descendants(repo, subset, x, followfirst=True)
673 return _descendants(repo, subset, x, followfirst=True)
674
674
675 @predicate('destination([set])', safe=True)
675 @predicate('destination([set])', safe=True)
676 def destination(repo, subset, x):
676 def destination(repo, subset, x):
677 """Changesets that were created by a graft, transplant or rebase operation,
677 """Changesets that were created by a graft, transplant or rebase operation,
678 with the given revisions specified as the source. Omitting the optional set
678 with the given revisions specified as the source. Omitting the optional set
679 is the same as passing all().
679 is the same as passing all().
680 """
680 """
681 if x is not None:
681 if x is not None:
682 sources = getset(repo, fullreposet(repo), x)
682 sources = getset(repo, fullreposet(repo), x)
683 else:
683 else:
684 sources = fullreposet(repo)
684 sources = fullreposet(repo)
685
685
686 dests = set()
686 dests = set()
687
687
688 # subset contains all of the possible destinations that can be returned, so
688 # subset contains all of the possible destinations that can be returned, so
689 # iterate over them and see if their source(s) were provided in the arg set.
689 # iterate over them and see if their source(s) were provided in the arg set.
690 # Even if the immediate src of r is not in the arg set, src's source (or
690 # Even if the immediate src of r is not in the arg set, src's source (or
691 # further back) may be. Scanning back further than the immediate src allows
691 # further back) may be. Scanning back further than the immediate src allows
692 # transitive transplants and rebases to yield the same results as transitive
692 # transitive transplants and rebases to yield the same results as transitive
693 # grafts.
693 # grafts.
694 for r in subset:
694 for r in subset:
695 src = _getrevsource(repo, r)
695 src = _getrevsource(repo, r)
696 lineage = None
696 lineage = None
697
697
698 while src is not None:
698 while src is not None:
699 if lineage is None:
699 if lineage is None:
700 lineage = list()
700 lineage = list()
701
701
702 lineage.append(r)
702 lineage.append(r)
703
703
704 # The visited lineage is a match if the current source is in the arg
704 # The visited lineage is a match if the current source is in the arg
705 # set. Since every candidate dest is visited by way of iterating
705 # set. Since every candidate dest is visited by way of iterating
706 # subset, any dests further back in the lineage will be tested by a
706 # subset, any dests further back in the lineage will be tested by a
707 # different iteration over subset. Likewise, if the src was already
707 # different iteration over subset. Likewise, if the src was already
708 # selected, the current lineage can be selected without going back
708 # selected, the current lineage can be selected without going back
709 # further.
709 # further.
710 if src in sources or src in dests:
710 if src in sources or src in dests:
711 dests.update(lineage)
711 dests.update(lineage)
712 break
712 break
713
713
714 r = src
714 r = src
715 src = _getrevsource(repo, r)
715 src = _getrevsource(repo, r)
716
716
717 return subset.filter(dests.__contains__,
717 return subset.filter(dests.__contains__,
718 condrepr=lambda: '<destination %r>' % sorted(dests))
718 condrepr=lambda: '<destination %r>' % sorted(dests))
719
719
720 @predicate('divergent()', safe=True)
720 @predicate('divergent()', safe=True)
721 def divergent(repo, subset, x):
721 def divergent(repo, subset, x):
722 msg = ("'divergent()' is deprecated, "
722 msg = ("'divergent()' is deprecated, "
723 "use 'contentdivergent()'")
723 "use 'contentdivergent()'")
724 repo.ui.deprecwarn(msg, '4.4')
724 repo.ui.deprecwarn(msg, '4.4')
725
725
726 return contentdivergent(repo, subset, x)
726 return contentdivergent(repo, subset, x)
727
727
728 @predicate('contentdivergent()', safe=True)
728 @predicate('contentdivergent()', safe=True)
729 def contentdivergent(repo, subset, x):
729 def contentdivergent(repo, subset, x):
730 """
730 """
731 Final successors of changesets with an alternative set of final successors.
731 Final successors of changesets with an alternative set of final successors.
732 """
732 """
733 # i18n: "contentdivergent" is a keyword
733 # i18n: "contentdivergent" is a keyword
734 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
734 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
735 divergent = obsmod.getrevs(repo, 'divergent')
735 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
736 return subset & divergent
736 return subset & contentdivergent
737
737
738 @predicate('extinct()', safe=True)
738 @predicate('extinct()', safe=True)
739 def extinct(repo, subset, x):
739 def extinct(repo, subset, x):
740 """Obsolete changesets with obsolete descendants only.
740 """Obsolete changesets with obsolete descendants only.
741 """
741 """
742 # i18n: "extinct" is a keyword
742 # i18n: "extinct" is a keyword
743 getargs(x, 0, 0, _("extinct takes no arguments"))
743 getargs(x, 0, 0, _("extinct takes no arguments"))
744 extincts = obsmod.getrevs(repo, 'extinct')
744 extincts = obsmod.getrevs(repo, 'extinct')
745 return subset & extincts
745 return subset & extincts
746
746
747 @predicate('extra(label, [value])', safe=True)
747 @predicate('extra(label, [value])', safe=True)
748 def extra(repo, subset, x):
748 def extra(repo, subset, x):
749 """Changesets with the given label in the extra metadata, with the given
749 """Changesets with the given label in the extra metadata, with the given
750 optional value.
750 optional value.
751
751
752 Pattern matching is supported for `value`. See
752 Pattern matching is supported for `value`. See
753 :hg:`help revisions.patterns`.
753 :hg:`help revisions.patterns`.
754 """
754 """
755 args = getargsdict(x, 'extra', 'label value')
755 args = getargsdict(x, 'extra', 'label value')
756 if 'label' not in args:
756 if 'label' not in args:
757 # i18n: "extra" is a keyword
757 # i18n: "extra" is a keyword
758 raise error.ParseError(_('extra takes at least 1 argument'))
758 raise error.ParseError(_('extra takes at least 1 argument'))
759 # i18n: "extra" is a keyword
759 # i18n: "extra" is a keyword
760 label = getstring(args['label'], _('first argument to extra must be '
760 label = getstring(args['label'], _('first argument to extra must be '
761 'a string'))
761 'a string'))
762 value = None
762 value = None
763
763
764 if 'value' in args:
764 if 'value' in args:
765 # i18n: "extra" is a keyword
765 # i18n: "extra" is a keyword
766 value = getstring(args['value'], _('second argument to extra must be '
766 value = getstring(args['value'], _('second argument to extra must be '
767 'a string'))
767 'a string'))
768 kind, value, matcher = util.stringmatcher(value)
768 kind, value, matcher = util.stringmatcher(value)
769
769
770 def _matchvalue(r):
770 def _matchvalue(r):
771 extra = repo[r].extra()
771 extra = repo[r].extra()
772 return label in extra and (value is None or matcher(extra[label]))
772 return label in extra and (value is None or matcher(extra[label]))
773
773
774 return subset.filter(lambda r: _matchvalue(r),
774 return subset.filter(lambda r: _matchvalue(r),
775 condrepr=('<extra[%r] %r>', label, value))
775 condrepr=('<extra[%r] %r>', label, value))
776
776
777 @predicate('filelog(pattern)', safe=True)
777 @predicate('filelog(pattern)', safe=True)
778 def filelog(repo, subset, x):
778 def filelog(repo, subset, x):
779 """Changesets connected to the specified filelog.
779 """Changesets connected to the specified filelog.
780
780
781 For performance reasons, visits only revisions mentioned in the file-level
781 For performance reasons, visits only revisions mentioned in the file-level
782 filelog, rather than filtering through all changesets (much faster, but
782 filelog, rather than filtering through all changesets (much faster, but
783 doesn't include deletes or duplicate changes). For a slower, more accurate
783 doesn't include deletes or duplicate changes). For a slower, more accurate
784 result, use ``file()``.
784 result, use ``file()``.
785
785
786 The pattern without explicit kind like ``glob:`` is expected to be
786 The pattern without explicit kind like ``glob:`` is expected to be
787 relative to the current directory and match against a file exactly
787 relative to the current directory and match against a file exactly
788 for efficiency.
788 for efficiency.
789
789
790 If some linkrev points to revisions filtered by the current repoview, we'll
790 If some linkrev points to revisions filtered by the current repoview, we'll
791 work around it to return a non-filtered value.
791 work around it to return a non-filtered value.
792 """
792 """
793
793
794 # i18n: "filelog" is a keyword
794 # i18n: "filelog" is a keyword
795 pat = getstring(x, _("filelog requires a pattern"))
795 pat = getstring(x, _("filelog requires a pattern"))
796 s = set()
796 s = set()
797 cl = repo.changelog
797 cl = repo.changelog
798
798
799 if not matchmod.patkind(pat):
799 if not matchmod.patkind(pat):
800 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
800 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
801 files = [f]
801 files = [f]
802 else:
802 else:
803 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
803 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
804 files = (f for f in repo[None] if m(f))
804 files = (f for f in repo[None] if m(f))
805
805
806 for f in files:
806 for f in files:
807 fl = repo.file(f)
807 fl = repo.file(f)
808 known = {}
808 known = {}
809 scanpos = 0
809 scanpos = 0
810 for fr in list(fl):
810 for fr in list(fl):
811 fn = fl.node(fr)
811 fn = fl.node(fr)
812 if fn in known:
812 if fn in known:
813 s.add(known[fn])
813 s.add(known[fn])
814 continue
814 continue
815
815
816 lr = fl.linkrev(fr)
816 lr = fl.linkrev(fr)
817 if lr in cl:
817 if lr in cl:
818 s.add(lr)
818 s.add(lr)
819 elif scanpos is not None:
819 elif scanpos is not None:
820 # lowest matching changeset is filtered, scan further
820 # lowest matching changeset is filtered, scan further
821 # ahead in changelog
821 # ahead in changelog
822 start = max(lr, scanpos) + 1
822 start = max(lr, scanpos) + 1
823 scanpos = None
823 scanpos = None
824 for r in cl.revs(start):
824 for r in cl.revs(start):
825 # minimize parsing of non-matching entries
825 # minimize parsing of non-matching entries
826 if f in cl.revision(r) and f in cl.readfiles(r):
826 if f in cl.revision(r) and f in cl.readfiles(r):
827 try:
827 try:
828 # try to use manifest delta fastpath
828 # try to use manifest delta fastpath
829 n = repo[r].filenode(f)
829 n = repo[r].filenode(f)
830 if n not in known:
830 if n not in known:
831 if n == fn:
831 if n == fn:
832 s.add(r)
832 s.add(r)
833 scanpos = r
833 scanpos = r
834 break
834 break
835 else:
835 else:
836 known[n] = r
836 known[n] = r
837 except error.ManifestLookupError:
837 except error.ManifestLookupError:
838 # deletion in changelog
838 # deletion in changelog
839 continue
839 continue
840
840
841 return subset & s
841 return subset & s
842
842
843 @predicate('first(set, [n])', safe=True, takeorder=True)
843 @predicate('first(set, [n])', safe=True, takeorder=True)
844 def first(repo, subset, x, order):
844 def first(repo, subset, x, order):
845 """An alias for limit().
845 """An alias for limit().
846 """
846 """
847 return limit(repo, subset, x, order)
847 return limit(repo, subset, x, order)
848
848
849 def _follow(repo, subset, x, name, followfirst=False):
849 def _follow(repo, subset, x, name, followfirst=False):
850 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
850 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
851 "and an optional revset") % name)
851 "and an optional revset") % name)
852 c = repo['.']
852 c = repo['.']
853 if l:
853 if l:
854 x = getstring(l[0], _("%s expected a pattern") % name)
854 x = getstring(l[0], _("%s expected a pattern") % name)
855 rev = None
855 rev = None
856 if len(l) >= 2:
856 if len(l) >= 2:
857 revs = getset(repo, fullreposet(repo), l[1])
857 revs = getset(repo, fullreposet(repo), l[1])
858 if len(revs) != 1:
858 if len(revs) != 1:
859 raise error.RepoLookupError(
859 raise error.RepoLookupError(
860 _("%s expected one starting revision") % name)
860 _("%s expected one starting revision") % name)
861 rev = revs.last()
861 rev = revs.last()
862 c = repo[rev]
862 c = repo[rev]
863 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
863 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
864 ctx=repo[rev], default='path')
864 ctx=repo[rev], default='path')
865
865
866 files = c.manifest().walk(matcher)
866 files = c.manifest().walk(matcher)
867
867
868 s = set()
868 s = set()
869 for fname in files:
869 for fname in files:
870 fctx = c[fname]
870 fctx = c[fname]
871 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
871 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
872 # include the revision responsible for the most recent version
872 # include the revision responsible for the most recent version
873 s.add(fctx.introrev())
873 s.add(fctx.introrev())
874 else:
874 else:
875 s = dagop.revancestors(repo, baseset([c.rev()]), followfirst)
875 s = dagop.revancestors(repo, baseset([c.rev()]), followfirst)
876
876
877 return subset & s
877 return subset & s
878
878
879 @predicate('follow([pattern[, startrev]])', safe=True)
879 @predicate('follow([pattern[, startrev]])', safe=True)
880 def follow(repo, subset, x):
880 def follow(repo, subset, x):
881 """
881 """
882 An alias for ``::.`` (ancestors of the working directory's first parent).
882 An alias for ``::.`` (ancestors of the working directory's first parent).
883 If pattern is specified, the histories of files matching given
883 If pattern is specified, the histories of files matching given
884 pattern in the revision given by startrev are followed, including copies.
884 pattern in the revision given by startrev are followed, including copies.
885 """
885 """
886 return _follow(repo, subset, x, 'follow')
886 return _follow(repo, subset, x, 'follow')
887
887
888 @predicate('_followfirst', safe=True)
888 @predicate('_followfirst', safe=True)
889 def _followfirst(repo, subset, x):
889 def _followfirst(repo, subset, x):
890 # ``followfirst([pattern[, startrev]])``
890 # ``followfirst([pattern[, startrev]])``
891 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
891 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
892 # of every revisions or files revisions.
892 # of every revisions or files revisions.
893 return _follow(repo, subset, x, '_followfirst', followfirst=True)
893 return _follow(repo, subset, x, '_followfirst', followfirst=True)
894
894
895 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
895 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
896 safe=True)
896 safe=True)
897 def followlines(repo, subset, x):
897 def followlines(repo, subset, x):
898 """Changesets modifying `file` in line range ('fromline', 'toline').
898 """Changesets modifying `file` in line range ('fromline', 'toline').
899
899
900 Line range corresponds to 'file' content at 'startrev' and should hence be
900 Line range corresponds to 'file' content at 'startrev' and should hence be
901 consistent with file size. If startrev is not specified, working directory's
901 consistent with file size. If startrev is not specified, working directory's
902 parent is used.
902 parent is used.
903
903
904 By default, ancestors of 'startrev' are returned. If 'descend' is True,
904 By default, ancestors of 'startrev' are returned. If 'descend' is True,
905 descendants of 'startrev' are returned though renames are (currently) not
905 descendants of 'startrev' are returned though renames are (currently) not
906 followed in this direction.
906 followed in this direction.
907 """
907 """
908 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
908 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
909 if len(args['lines']) != 1:
909 if len(args['lines']) != 1:
910 raise error.ParseError(_("followlines requires a line range"))
910 raise error.ParseError(_("followlines requires a line range"))
911
911
912 rev = '.'
912 rev = '.'
913 if 'startrev' in args:
913 if 'startrev' in args:
914 revs = getset(repo, fullreposet(repo), args['startrev'])
914 revs = getset(repo, fullreposet(repo), args['startrev'])
915 if len(revs) != 1:
915 if len(revs) != 1:
916 raise error.ParseError(
916 raise error.ParseError(
917 # i18n: "followlines" is a keyword
917 # i18n: "followlines" is a keyword
918 _("followlines expects exactly one revision"))
918 _("followlines expects exactly one revision"))
919 rev = revs.last()
919 rev = revs.last()
920
920
921 pat = getstring(args['file'], _("followlines requires a pattern"))
921 pat = getstring(args['file'], _("followlines requires a pattern"))
922 if not matchmod.patkind(pat):
922 if not matchmod.patkind(pat):
923 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
923 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
924 else:
924 else:
925 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
925 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
926 files = [f for f in repo[rev] if m(f)]
926 files = [f for f in repo[rev] if m(f)]
927 if len(files) != 1:
927 if len(files) != 1:
928 # i18n: "followlines" is a keyword
928 # i18n: "followlines" is a keyword
929 raise error.ParseError(_("followlines expects exactly one file"))
929 raise error.ParseError(_("followlines expects exactly one file"))
930 fname = files[0]
930 fname = files[0]
931
931
932 # i18n: "followlines" is a keyword
932 # i18n: "followlines" is a keyword
933 lr = getrange(args['lines'][0], _("followlines expects a line range"))
933 lr = getrange(args['lines'][0], _("followlines expects a line range"))
934 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
934 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
935 for a in lr]
935 for a in lr]
936 fromline, toline = util.processlinerange(fromline, toline)
936 fromline, toline = util.processlinerange(fromline, toline)
937
937
938 fctx = repo[rev].filectx(fname)
938 fctx = repo[rev].filectx(fname)
939 descend = False
939 descend = False
940 if 'descend' in args:
940 if 'descend' in args:
941 descend = getboolean(args['descend'],
941 descend = getboolean(args['descend'],
942 # i18n: "descend" is a keyword
942 # i18n: "descend" is a keyword
943 _("descend argument must be a boolean"))
943 _("descend argument must be a boolean"))
944 if descend:
944 if descend:
945 rs = generatorset(
945 rs = generatorset(
946 (c.rev() for c, _linerange
946 (c.rev() for c, _linerange
947 in dagop.blockdescendants(fctx, fromline, toline)),
947 in dagop.blockdescendants(fctx, fromline, toline)),
948 iterasc=True)
948 iterasc=True)
949 else:
949 else:
950 rs = generatorset(
950 rs = generatorset(
951 (c.rev() for c, _linerange
951 (c.rev() for c, _linerange
952 in dagop.blockancestors(fctx, fromline, toline)),
952 in dagop.blockancestors(fctx, fromline, toline)),
953 iterasc=False)
953 iterasc=False)
954 return subset & rs
954 return subset & rs
955
955
956 @predicate('all()', safe=True)
956 @predicate('all()', safe=True)
957 def getall(repo, subset, x):
957 def getall(repo, subset, x):
958 """All changesets, the same as ``0:tip``.
958 """All changesets, the same as ``0:tip``.
959 """
959 """
960 # i18n: "all" is a keyword
960 # i18n: "all" is a keyword
961 getargs(x, 0, 0, _("all takes no arguments"))
961 getargs(x, 0, 0, _("all takes no arguments"))
962 return subset & spanset(repo) # drop "null" if any
962 return subset & spanset(repo) # drop "null" if any
963
963
964 @predicate('grep(regex)')
964 @predicate('grep(regex)')
965 def grep(repo, subset, x):
965 def grep(repo, subset, x):
966 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
966 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
967 to ensure special escape characters are handled correctly. Unlike
967 to ensure special escape characters are handled correctly. Unlike
968 ``keyword(string)``, the match is case-sensitive.
968 ``keyword(string)``, the match is case-sensitive.
969 """
969 """
970 try:
970 try:
971 # i18n: "grep" is a keyword
971 # i18n: "grep" is a keyword
972 gr = re.compile(getstring(x, _("grep requires a string")))
972 gr = re.compile(getstring(x, _("grep requires a string")))
973 except re.error as e:
973 except re.error as e:
974 raise error.ParseError(_('invalid match pattern: %s') % e)
974 raise error.ParseError(_('invalid match pattern: %s') % e)
975
975
976 def matches(x):
976 def matches(x):
977 c = repo[x]
977 c = repo[x]
978 for e in c.files() + [c.user(), c.description()]:
978 for e in c.files() + [c.user(), c.description()]:
979 if gr.search(e):
979 if gr.search(e):
980 return True
980 return True
981 return False
981 return False
982
982
983 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
983 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
984
984
985 @predicate('_matchfiles', safe=True)
985 @predicate('_matchfiles', safe=True)
986 def _matchfiles(repo, subset, x):
986 def _matchfiles(repo, subset, x):
987 # _matchfiles takes a revset list of prefixed arguments:
987 # _matchfiles takes a revset list of prefixed arguments:
988 #
988 #
989 # [p:foo, i:bar, x:baz]
989 # [p:foo, i:bar, x:baz]
990 #
990 #
991 # builds a match object from them and filters subset. Allowed
991 # builds a match object from them and filters subset. Allowed
992 # prefixes are 'p:' for regular patterns, 'i:' for include
992 # prefixes are 'p:' for regular patterns, 'i:' for include
993 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
993 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
994 # a revision identifier, or the empty string to reference the
994 # a revision identifier, or the empty string to reference the
995 # working directory, from which the match object is
995 # working directory, from which the match object is
996 # initialized. Use 'd:' to set the default matching mode, default
996 # initialized. Use 'd:' to set the default matching mode, default
997 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
997 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
998
998
999 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
999 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1000 pats, inc, exc = [], [], []
1000 pats, inc, exc = [], [], []
1001 rev, default = None, None
1001 rev, default = None, None
1002 for arg in l:
1002 for arg in l:
1003 s = getstring(arg, "_matchfiles requires string arguments")
1003 s = getstring(arg, "_matchfiles requires string arguments")
1004 prefix, value = s[:2], s[2:]
1004 prefix, value = s[:2], s[2:]
1005 if prefix == 'p:':
1005 if prefix == 'p:':
1006 pats.append(value)
1006 pats.append(value)
1007 elif prefix == 'i:':
1007 elif prefix == 'i:':
1008 inc.append(value)
1008 inc.append(value)
1009 elif prefix == 'x:':
1009 elif prefix == 'x:':
1010 exc.append(value)
1010 exc.append(value)
1011 elif prefix == 'r:':
1011 elif prefix == 'r:':
1012 if rev is not None:
1012 if rev is not None:
1013 raise error.ParseError('_matchfiles expected at most one '
1013 raise error.ParseError('_matchfiles expected at most one '
1014 'revision')
1014 'revision')
1015 if value != '': # empty means working directory; leave rev as None
1015 if value != '': # empty means working directory; leave rev as None
1016 rev = value
1016 rev = value
1017 elif prefix == 'd:':
1017 elif prefix == 'd:':
1018 if default is not None:
1018 if default is not None:
1019 raise error.ParseError('_matchfiles expected at most one '
1019 raise error.ParseError('_matchfiles expected at most one '
1020 'default mode')
1020 'default mode')
1021 default = value
1021 default = value
1022 else:
1022 else:
1023 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1023 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1024 if not default:
1024 if not default:
1025 default = 'glob'
1025 default = 'glob'
1026
1026
1027 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1027 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1028 exclude=exc, ctx=repo[rev], default=default)
1028 exclude=exc, ctx=repo[rev], default=default)
1029
1029
1030 # This directly read the changelog data as creating changectx for all
1030 # This directly read the changelog data as creating changectx for all
1031 # revisions is quite expensive.
1031 # revisions is quite expensive.
1032 getfiles = repo.changelog.readfiles
1032 getfiles = repo.changelog.readfiles
1033 wdirrev = node.wdirrev
1033 wdirrev = node.wdirrev
1034 def matches(x):
1034 def matches(x):
1035 if x == wdirrev:
1035 if x == wdirrev:
1036 files = repo[x].files()
1036 files = repo[x].files()
1037 else:
1037 else:
1038 files = getfiles(x)
1038 files = getfiles(x)
1039 for f in files:
1039 for f in files:
1040 if m(f):
1040 if m(f):
1041 return True
1041 return True
1042 return False
1042 return False
1043
1043
1044 return subset.filter(matches,
1044 return subset.filter(matches,
1045 condrepr=('<matchfiles patterns=%r, include=%r '
1045 condrepr=('<matchfiles patterns=%r, include=%r '
1046 'exclude=%r, default=%r, rev=%r>',
1046 'exclude=%r, default=%r, rev=%r>',
1047 pats, inc, exc, default, rev))
1047 pats, inc, exc, default, rev))
1048
1048
1049 @predicate('file(pattern)', safe=True)
1049 @predicate('file(pattern)', safe=True)
1050 def hasfile(repo, subset, x):
1050 def hasfile(repo, subset, x):
1051 """Changesets affecting files matched by pattern.
1051 """Changesets affecting files matched by pattern.
1052
1052
1053 For a faster but less accurate result, consider using ``filelog()``
1053 For a faster but less accurate result, consider using ``filelog()``
1054 instead.
1054 instead.
1055
1055
1056 This predicate uses ``glob:`` as the default kind of pattern.
1056 This predicate uses ``glob:`` as the default kind of pattern.
1057 """
1057 """
1058 # i18n: "file" is a keyword
1058 # i18n: "file" is a keyword
1059 pat = getstring(x, _("file requires a pattern"))
1059 pat = getstring(x, _("file requires a pattern"))
1060 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1060 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1061
1061
1062 @predicate('head()', safe=True)
1062 @predicate('head()', safe=True)
1063 def head(repo, subset, x):
1063 def head(repo, subset, x):
1064 """Changeset is a named branch head.
1064 """Changeset is a named branch head.
1065 """
1065 """
1066 # i18n: "head" is a keyword
1066 # i18n: "head" is a keyword
1067 getargs(x, 0, 0, _("head takes no arguments"))
1067 getargs(x, 0, 0, _("head takes no arguments"))
1068 hs = set()
1068 hs = set()
1069 cl = repo.changelog
1069 cl = repo.changelog
1070 for ls in repo.branchmap().itervalues():
1070 for ls in repo.branchmap().itervalues():
1071 hs.update(cl.rev(h) for h in ls)
1071 hs.update(cl.rev(h) for h in ls)
1072 return subset & baseset(hs)
1072 return subset & baseset(hs)
1073
1073
1074 @predicate('heads(set)', safe=True)
1074 @predicate('heads(set)', safe=True)
1075 def heads(repo, subset, x):
1075 def heads(repo, subset, x):
1076 """Members of set with no children in set.
1076 """Members of set with no children in set.
1077 """
1077 """
1078 s = getset(repo, subset, x)
1078 s = getset(repo, subset, x)
1079 ps = parents(repo, subset, x)
1079 ps = parents(repo, subset, x)
1080 return s - ps
1080 return s - ps
1081
1081
1082 @predicate('hidden()', safe=True)
1082 @predicate('hidden()', safe=True)
1083 def hidden(repo, subset, x):
1083 def hidden(repo, subset, x):
1084 """Hidden changesets.
1084 """Hidden changesets.
1085 """
1085 """
1086 # i18n: "hidden" is a keyword
1086 # i18n: "hidden" is a keyword
1087 getargs(x, 0, 0, _("hidden takes no arguments"))
1087 getargs(x, 0, 0, _("hidden takes no arguments"))
1088 hiddenrevs = repoview.filterrevs(repo, 'visible')
1088 hiddenrevs = repoview.filterrevs(repo, 'visible')
1089 return subset & hiddenrevs
1089 return subset & hiddenrevs
1090
1090
1091 @predicate('keyword(string)', safe=True)
1091 @predicate('keyword(string)', safe=True)
1092 def keyword(repo, subset, x):
1092 def keyword(repo, subset, x):
1093 """Search commit message, user name, and names of changed files for
1093 """Search commit message, user name, and names of changed files for
1094 string. The match is case-insensitive.
1094 string. The match is case-insensitive.
1095
1095
1096 For a regular expression or case sensitive search of these fields, use
1096 For a regular expression or case sensitive search of these fields, use
1097 ``grep(regex)``.
1097 ``grep(regex)``.
1098 """
1098 """
1099 # i18n: "keyword" is a keyword
1099 # i18n: "keyword" is a keyword
1100 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1100 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1101
1101
1102 def matches(r):
1102 def matches(r):
1103 c = repo[r]
1103 c = repo[r]
1104 return any(kw in encoding.lower(t)
1104 return any(kw in encoding.lower(t)
1105 for t in c.files() + [c.user(), c.description()])
1105 for t in c.files() + [c.user(), c.description()])
1106
1106
1107 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1107 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1108
1108
1109 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True)
1109 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True)
1110 def limit(repo, subset, x, order):
1110 def limit(repo, subset, x, order):
1111 """First n members of set, defaulting to 1, starting from offset.
1111 """First n members of set, defaulting to 1, starting from offset.
1112 """
1112 """
1113 args = getargsdict(x, 'limit', 'set n offset')
1113 args = getargsdict(x, 'limit', 'set n offset')
1114 if 'set' not in args:
1114 if 'set' not in args:
1115 # i18n: "limit" is a keyword
1115 # i18n: "limit" is a keyword
1116 raise error.ParseError(_("limit requires one to three arguments"))
1116 raise error.ParseError(_("limit requires one to three arguments"))
1117 # i18n: "limit" is a keyword
1117 # i18n: "limit" is a keyword
1118 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1118 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1119 if lim < 0:
1119 if lim < 0:
1120 raise error.ParseError(_("negative number to select"))
1120 raise error.ParseError(_("negative number to select"))
1121 # i18n: "limit" is a keyword
1121 # i18n: "limit" is a keyword
1122 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1122 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1123 if ofs < 0:
1123 if ofs < 0:
1124 raise error.ParseError(_("negative offset"))
1124 raise error.ParseError(_("negative offset"))
1125 os = getset(repo, fullreposet(repo), args['set'])
1125 os = getset(repo, fullreposet(repo), args['set'])
1126 ls = os.slice(ofs, ofs + lim)
1126 ls = os.slice(ofs, ofs + lim)
1127 if order == followorder and lim > 1:
1127 if order == followorder and lim > 1:
1128 return subset & ls
1128 return subset & ls
1129 return ls & subset
1129 return ls & subset
1130
1130
1131 @predicate('last(set, [n])', safe=True, takeorder=True)
1131 @predicate('last(set, [n])', safe=True, takeorder=True)
1132 def last(repo, subset, x, order):
1132 def last(repo, subset, x, order):
1133 """Last n members of set, defaulting to 1.
1133 """Last n members of set, defaulting to 1.
1134 """
1134 """
1135 # i18n: "last" is a keyword
1135 # i18n: "last" is a keyword
1136 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1136 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1137 lim = 1
1137 lim = 1
1138 if len(l) == 2:
1138 if len(l) == 2:
1139 # i18n: "last" is a keyword
1139 # i18n: "last" is a keyword
1140 lim = getinteger(l[1], _("last expects a number"))
1140 lim = getinteger(l[1], _("last expects a number"))
1141 if lim < 0:
1141 if lim < 0:
1142 raise error.ParseError(_("negative number to select"))
1142 raise error.ParseError(_("negative number to select"))
1143 os = getset(repo, fullreposet(repo), l[0])
1143 os = getset(repo, fullreposet(repo), l[0])
1144 os.reverse()
1144 os.reverse()
1145 ls = os.slice(0, lim)
1145 ls = os.slice(0, lim)
1146 if order == followorder and lim > 1:
1146 if order == followorder and lim > 1:
1147 return subset & ls
1147 return subset & ls
1148 ls.reverse()
1148 ls.reverse()
1149 return ls & subset
1149 return ls & subset
1150
1150
1151 @predicate('max(set)', safe=True)
1151 @predicate('max(set)', safe=True)
1152 def maxrev(repo, subset, x):
1152 def maxrev(repo, subset, x):
1153 """Changeset with highest revision number in set.
1153 """Changeset with highest revision number in set.
1154 """
1154 """
1155 os = getset(repo, fullreposet(repo), x)
1155 os = getset(repo, fullreposet(repo), x)
1156 try:
1156 try:
1157 m = os.max()
1157 m = os.max()
1158 if m in subset:
1158 if m in subset:
1159 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1159 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1160 except ValueError:
1160 except ValueError:
1161 # os.max() throws a ValueError when the collection is empty.
1161 # os.max() throws a ValueError when the collection is empty.
1162 # Same as python's max().
1162 # Same as python's max().
1163 pass
1163 pass
1164 return baseset(datarepr=('<max %r, %r>', subset, os))
1164 return baseset(datarepr=('<max %r, %r>', subset, os))
1165
1165
1166 @predicate('merge()', safe=True)
1166 @predicate('merge()', safe=True)
1167 def merge(repo, subset, x):
1167 def merge(repo, subset, x):
1168 """Changeset is a merge changeset.
1168 """Changeset is a merge changeset.
1169 """
1169 """
1170 # i18n: "merge" is a keyword
1170 # i18n: "merge" is a keyword
1171 getargs(x, 0, 0, _("merge takes no arguments"))
1171 getargs(x, 0, 0, _("merge takes no arguments"))
1172 cl = repo.changelog
1172 cl = repo.changelog
1173 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1173 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1174 condrepr='<merge>')
1174 condrepr='<merge>')
1175
1175
1176 @predicate('branchpoint()', safe=True)
1176 @predicate('branchpoint()', safe=True)
1177 def branchpoint(repo, subset, x):
1177 def branchpoint(repo, subset, x):
1178 """Changesets with more than one child.
1178 """Changesets with more than one child.
1179 """
1179 """
1180 # i18n: "branchpoint" is a keyword
1180 # i18n: "branchpoint" is a keyword
1181 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1181 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1182 cl = repo.changelog
1182 cl = repo.changelog
1183 if not subset:
1183 if not subset:
1184 return baseset()
1184 return baseset()
1185 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1185 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1186 # (and if it is not, it should.)
1186 # (and if it is not, it should.)
1187 baserev = min(subset)
1187 baserev = min(subset)
1188 parentscount = [0]*(len(repo) - baserev)
1188 parentscount = [0]*(len(repo) - baserev)
1189 for r in cl.revs(start=baserev + 1):
1189 for r in cl.revs(start=baserev + 1):
1190 for p in cl.parentrevs(r):
1190 for p in cl.parentrevs(r):
1191 if p >= baserev:
1191 if p >= baserev:
1192 parentscount[p - baserev] += 1
1192 parentscount[p - baserev] += 1
1193 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1193 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1194 condrepr='<branchpoint>')
1194 condrepr='<branchpoint>')
1195
1195
1196 @predicate('min(set)', safe=True)
1196 @predicate('min(set)', safe=True)
1197 def minrev(repo, subset, x):
1197 def minrev(repo, subset, x):
1198 """Changeset with lowest revision number in set.
1198 """Changeset with lowest revision number in set.
1199 """
1199 """
1200 os = getset(repo, fullreposet(repo), x)
1200 os = getset(repo, fullreposet(repo), x)
1201 try:
1201 try:
1202 m = os.min()
1202 m = os.min()
1203 if m in subset:
1203 if m in subset:
1204 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1204 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1205 except ValueError:
1205 except ValueError:
1206 # os.min() throws a ValueError when the collection is empty.
1206 # os.min() throws a ValueError when the collection is empty.
1207 # Same as python's min().
1207 # Same as python's min().
1208 pass
1208 pass
1209 return baseset(datarepr=('<min %r, %r>', subset, os))
1209 return baseset(datarepr=('<min %r, %r>', subset, os))
1210
1210
1211 @predicate('modifies(pattern)', safe=True)
1211 @predicate('modifies(pattern)', safe=True)
1212 def modifies(repo, subset, x):
1212 def modifies(repo, subset, x):
1213 """Changesets modifying files matched by pattern.
1213 """Changesets modifying files matched by pattern.
1214
1214
1215 The pattern without explicit kind like ``glob:`` is expected to be
1215 The pattern without explicit kind like ``glob:`` is expected to be
1216 relative to the current directory and match against a file or a
1216 relative to the current directory and match against a file or a
1217 directory.
1217 directory.
1218 """
1218 """
1219 # i18n: "modifies" is a keyword
1219 # i18n: "modifies" is a keyword
1220 pat = getstring(x, _("modifies requires a pattern"))
1220 pat = getstring(x, _("modifies requires a pattern"))
1221 return checkstatus(repo, subset, pat, 0)
1221 return checkstatus(repo, subset, pat, 0)
1222
1222
1223 @predicate('named(namespace)')
1223 @predicate('named(namespace)')
1224 def named(repo, subset, x):
1224 def named(repo, subset, x):
1225 """The changesets in a given namespace.
1225 """The changesets in a given namespace.
1226
1226
1227 Pattern matching is supported for `namespace`. See
1227 Pattern matching is supported for `namespace`. See
1228 :hg:`help revisions.patterns`.
1228 :hg:`help revisions.patterns`.
1229 """
1229 """
1230 # i18n: "named" is a keyword
1230 # i18n: "named" is a keyword
1231 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1231 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1232
1232
1233 ns = getstring(args[0],
1233 ns = getstring(args[0],
1234 # i18n: "named" is a keyword
1234 # i18n: "named" is a keyword
1235 _('the argument to named must be a string'))
1235 _('the argument to named must be a string'))
1236 kind, pattern, matcher = util.stringmatcher(ns)
1236 kind, pattern, matcher = util.stringmatcher(ns)
1237 namespaces = set()
1237 namespaces = set()
1238 if kind == 'literal':
1238 if kind == 'literal':
1239 if pattern not in repo.names:
1239 if pattern not in repo.names:
1240 raise error.RepoLookupError(_("namespace '%s' does not exist")
1240 raise error.RepoLookupError(_("namespace '%s' does not exist")
1241 % ns)
1241 % ns)
1242 namespaces.add(repo.names[pattern])
1242 namespaces.add(repo.names[pattern])
1243 else:
1243 else:
1244 for name, ns in repo.names.iteritems():
1244 for name, ns in repo.names.iteritems():
1245 if matcher(name):
1245 if matcher(name):
1246 namespaces.add(ns)
1246 namespaces.add(ns)
1247 if not namespaces:
1247 if not namespaces:
1248 raise error.RepoLookupError(_("no namespace exists"
1248 raise error.RepoLookupError(_("no namespace exists"
1249 " that match '%s'") % pattern)
1249 " that match '%s'") % pattern)
1250
1250
1251 names = set()
1251 names = set()
1252 for ns in namespaces:
1252 for ns in namespaces:
1253 for name in ns.listnames(repo):
1253 for name in ns.listnames(repo):
1254 if name not in ns.deprecated:
1254 if name not in ns.deprecated:
1255 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1255 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1256
1256
1257 names -= {node.nullrev}
1257 names -= {node.nullrev}
1258 return subset & names
1258 return subset & names
1259
1259
1260 @predicate('id(string)', safe=True)
1260 @predicate('id(string)', safe=True)
1261 def node_(repo, subset, x):
1261 def node_(repo, subset, x):
1262 """Revision non-ambiguously specified by the given hex string prefix.
1262 """Revision non-ambiguously specified by the given hex string prefix.
1263 """
1263 """
1264 # i18n: "id" is a keyword
1264 # i18n: "id" is a keyword
1265 l = getargs(x, 1, 1, _("id requires one argument"))
1265 l = getargs(x, 1, 1, _("id requires one argument"))
1266 # i18n: "id" is a keyword
1266 # i18n: "id" is a keyword
1267 n = getstring(l[0], _("id requires a string"))
1267 n = getstring(l[0], _("id requires a string"))
1268 if len(n) == 40:
1268 if len(n) == 40:
1269 try:
1269 try:
1270 rn = repo.changelog.rev(node.bin(n))
1270 rn = repo.changelog.rev(node.bin(n))
1271 except error.WdirUnsupported:
1271 except error.WdirUnsupported:
1272 rn = node.wdirrev
1272 rn = node.wdirrev
1273 except (LookupError, TypeError):
1273 except (LookupError, TypeError):
1274 rn = None
1274 rn = None
1275 else:
1275 else:
1276 rn = None
1276 rn = None
1277 try:
1277 try:
1278 pm = repo.changelog._partialmatch(n)
1278 pm = repo.changelog._partialmatch(n)
1279 if pm is not None:
1279 if pm is not None:
1280 rn = repo.changelog.rev(pm)
1280 rn = repo.changelog.rev(pm)
1281 except error.WdirUnsupported:
1281 except error.WdirUnsupported:
1282 rn = node.wdirrev
1282 rn = node.wdirrev
1283
1283
1284 if rn is None:
1284 if rn is None:
1285 return baseset()
1285 return baseset()
1286 result = baseset([rn])
1286 result = baseset([rn])
1287 return result & subset
1287 return result & subset
1288
1288
1289 @predicate('obsolete()', safe=True)
1289 @predicate('obsolete()', safe=True)
1290 def obsolete(repo, subset, x):
1290 def obsolete(repo, subset, x):
1291 """Mutable changeset with a newer version."""
1291 """Mutable changeset with a newer version."""
1292 # i18n: "obsolete" is a keyword
1292 # i18n: "obsolete" is a keyword
1293 getargs(x, 0, 0, _("obsolete takes no arguments"))
1293 getargs(x, 0, 0, _("obsolete takes no arguments"))
1294 obsoletes = obsmod.getrevs(repo, 'obsolete')
1294 obsoletes = obsmod.getrevs(repo, 'obsolete')
1295 return subset & obsoletes
1295 return subset & obsoletes
1296
1296
1297 @predicate('only(set, [set])', safe=True)
1297 @predicate('only(set, [set])', safe=True)
1298 def only(repo, subset, x):
1298 def only(repo, subset, x):
1299 """Changesets that are ancestors of the first set that are not ancestors
1299 """Changesets that are ancestors of the first set that are not ancestors
1300 of any other head in the repo. If a second set is specified, the result
1300 of any other head in the repo. If a second set is specified, the result
1301 is ancestors of the first set that are not ancestors of the second set
1301 is ancestors of the first set that are not ancestors of the second set
1302 (i.e. ::<set1> - ::<set2>).
1302 (i.e. ::<set1> - ::<set2>).
1303 """
1303 """
1304 cl = repo.changelog
1304 cl = repo.changelog
1305 # i18n: "only" is a keyword
1305 # i18n: "only" is a keyword
1306 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1306 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1307 include = getset(repo, fullreposet(repo), args[0])
1307 include = getset(repo, fullreposet(repo), args[0])
1308 if len(args) == 1:
1308 if len(args) == 1:
1309 if not include:
1309 if not include:
1310 return baseset()
1310 return baseset()
1311
1311
1312 descendants = set(dagop.revdescendants(repo, include, False))
1312 descendants = set(dagop.revdescendants(repo, include, False))
1313 exclude = [rev for rev in cl.headrevs()
1313 exclude = [rev for rev in cl.headrevs()
1314 if not rev in descendants and not rev in include]
1314 if not rev in descendants and not rev in include]
1315 else:
1315 else:
1316 exclude = getset(repo, fullreposet(repo), args[1])
1316 exclude = getset(repo, fullreposet(repo), args[1])
1317
1317
1318 results = set(cl.findmissingrevs(common=exclude, heads=include))
1318 results = set(cl.findmissingrevs(common=exclude, heads=include))
1319 # XXX we should turn this into a baseset instead of a set, smartset may do
1319 # XXX we should turn this into a baseset instead of a set, smartset may do
1320 # some optimizations from the fact this is a baseset.
1320 # some optimizations from the fact this is a baseset.
1321 return subset & results
1321 return subset & results
1322
1322
1323 @predicate('origin([set])', safe=True)
1323 @predicate('origin([set])', safe=True)
1324 def origin(repo, subset, x):
1324 def origin(repo, subset, x):
1325 """
1325 """
1326 Changesets that were specified as a source for the grafts, transplants or
1326 Changesets that were specified as a source for the grafts, transplants or
1327 rebases that created the given revisions. Omitting the optional set is the
1327 rebases that created the given revisions. Omitting the optional set is the
1328 same as passing all(). If a changeset created by these operations is itself
1328 same as passing all(). If a changeset created by these operations is itself
1329 specified as a source for one of these operations, only the source changeset
1329 specified as a source for one of these operations, only the source changeset
1330 for the first operation is selected.
1330 for the first operation is selected.
1331 """
1331 """
1332 if x is not None:
1332 if x is not None:
1333 dests = getset(repo, fullreposet(repo), x)
1333 dests = getset(repo, fullreposet(repo), x)
1334 else:
1334 else:
1335 dests = fullreposet(repo)
1335 dests = fullreposet(repo)
1336
1336
1337 def _firstsrc(rev):
1337 def _firstsrc(rev):
1338 src = _getrevsource(repo, rev)
1338 src = _getrevsource(repo, rev)
1339 if src is None:
1339 if src is None:
1340 return None
1340 return None
1341
1341
1342 while True:
1342 while True:
1343 prev = _getrevsource(repo, src)
1343 prev = _getrevsource(repo, src)
1344
1344
1345 if prev is None:
1345 if prev is None:
1346 return src
1346 return src
1347 src = prev
1347 src = prev
1348
1348
1349 o = {_firstsrc(r) for r in dests}
1349 o = {_firstsrc(r) for r in dests}
1350 o -= {None}
1350 o -= {None}
1351 # XXX we should turn this into a baseset instead of a set, smartset may do
1351 # XXX we should turn this into a baseset instead of a set, smartset may do
1352 # some optimizations from the fact this is a baseset.
1352 # some optimizations from the fact this is a baseset.
1353 return subset & o
1353 return subset & o
1354
1354
1355 @predicate('outgoing([path])', safe=False)
1355 @predicate('outgoing([path])', safe=False)
1356 def outgoing(repo, subset, x):
1356 def outgoing(repo, subset, x):
1357 """Changesets not found in the specified destination repository, or the
1357 """Changesets not found in the specified destination repository, or the
1358 default push location.
1358 default push location.
1359 """
1359 """
1360 # Avoid cycles.
1360 # Avoid cycles.
1361 from . import (
1361 from . import (
1362 discovery,
1362 discovery,
1363 hg,
1363 hg,
1364 )
1364 )
1365 # i18n: "outgoing" is a keyword
1365 # i18n: "outgoing" is a keyword
1366 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1366 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1367 # i18n: "outgoing" is a keyword
1367 # i18n: "outgoing" is a keyword
1368 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1368 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1369 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1369 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1370 dest, branches = hg.parseurl(dest)
1370 dest, branches = hg.parseurl(dest)
1371 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1371 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1372 if revs:
1372 if revs:
1373 revs = [repo.lookup(rev) for rev in revs]
1373 revs = [repo.lookup(rev) for rev in revs]
1374 other = hg.peer(repo, {}, dest)
1374 other = hg.peer(repo, {}, dest)
1375 repo.ui.pushbuffer()
1375 repo.ui.pushbuffer()
1376 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1376 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1377 repo.ui.popbuffer()
1377 repo.ui.popbuffer()
1378 cl = repo.changelog
1378 cl = repo.changelog
1379 o = {cl.rev(r) for r in outgoing.missing}
1379 o = {cl.rev(r) for r in outgoing.missing}
1380 return subset & o
1380 return subset & o
1381
1381
1382 @predicate('p1([set])', safe=True)
1382 @predicate('p1([set])', safe=True)
1383 def p1(repo, subset, x):
1383 def p1(repo, subset, x):
1384 """First parent of changesets in set, or the working directory.
1384 """First parent of changesets in set, or the working directory.
1385 """
1385 """
1386 if x is None:
1386 if x is None:
1387 p = repo[x].p1().rev()
1387 p = repo[x].p1().rev()
1388 if p >= 0:
1388 if p >= 0:
1389 return subset & baseset([p])
1389 return subset & baseset([p])
1390 return baseset()
1390 return baseset()
1391
1391
1392 ps = set()
1392 ps = set()
1393 cl = repo.changelog
1393 cl = repo.changelog
1394 for r in getset(repo, fullreposet(repo), x):
1394 for r in getset(repo, fullreposet(repo), x):
1395 try:
1395 try:
1396 ps.add(cl.parentrevs(r)[0])
1396 ps.add(cl.parentrevs(r)[0])
1397 except error.WdirUnsupported:
1397 except error.WdirUnsupported:
1398 ps.add(repo[r].parents()[0].rev())
1398 ps.add(repo[r].parents()[0].rev())
1399 ps -= {node.nullrev}
1399 ps -= {node.nullrev}
1400 # XXX we should turn this into a baseset instead of a set, smartset may do
1400 # XXX we should turn this into a baseset instead of a set, smartset may do
1401 # some optimizations from the fact this is a baseset.
1401 # some optimizations from the fact this is a baseset.
1402 return subset & ps
1402 return subset & ps
1403
1403
1404 @predicate('p2([set])', safe=True)
1404 @predicate('p2([set])', safe=True)
1405 def p2(repo, subset, x):
1405 def p2(repo, subset, x):
1406 """Second parent of changesets in set, or the working directory.
1406 """Second parent of changesets in set, or the working directory.
1407 """
1407 """
1408 if x is None:
1408 if x is None:
1409 ps = repo[x].parents()
1409 ps = repo[x].parents()
1410 try:
1410 try:
1411 p = ps[1].rev()
1411 p = ps[1].rev()
1412 if p >= 0:
1412 if p >= 0:
1413 return subset & baseset([p])
1413 return subset & baseset([p])
1414 return baseset()
1414 return baseset()
1415 except IndexError:
1415 except IndexError:
1416 return baseset()
1416 return baseset()
1417
1417
1418 ps = set()
1418 ps = set()
1419 cl = repo.changelog
1419 cl = repo.changelog
1420 for r in getset(repo, fullreposet(repo), x):
1420 for r in getset(repo, fullreposet(repo), x):
1421 try:
1421 try:
1422 ps.add(cl.parentrevs(r)[1])
1422 ps.add(cl.parentrevs(r)[1])
1423 except error.WdirUnsupported:
1423 except error.WdirUnsupported:
1424 parents = repo[r].parents()
1424 parents = repo[r].parents()
1425 if len(parents) == 2:
1425 if len(parents) == 2:
1426 ps.add(parents[1])
1426 ps.add(parents[1])
1427 ps -= {node.nullrev}
1427 ps -= {node.nullrev}
1428 # XXX we should turn this into a baseset instead of a set, smartset may do
1428 # XXX we should turn this into a baseset instead of a set, smartset may do
1429 # some optimizations from the fact this is a baseset.
1429 # some optimizations from the fact this is a baseset.
1430 return subset & ps
1430 return subset & ps
1431
1431
1432 def parentpost(repo, subset, x, order):
1432 def parentpost(repo, subset, x, order):
1433 return p1(repo, subset, x)
1433 return p1(repo, subset, x)
1434
1434
1435 @predicate('parents([set])', safe=True)
1435 @predicate('parents([set])', safe=True)
1436 def parents(repo, subset, x):
1436 def parents(repo, subset, x):
1437 """
1437 """
1438 The set of all parents for all changesets in set, or the working directory.
1438 The set of all parents for all changesets in set, or the working directory.
1439 """
1439 """
1440 if x is None:
1440 if x is None:
1441 ps = set(p.rev() for p in repo[x].parents())
1441 ps = set(p.rev() for p in repo[x].parents())
1442 else:
1442 else:
1443 ps = set()
1443 ps = set()
1444 cl = repo.changelog
1444 cl = repo.changelog
1445 up = ps.update
1445 up = ps.update
1446 parentrevs = cl.parentrevs
1446 parentrevs = cl.parentrevs
1447 for r in getset(repo, fullreposet(repo), x):
1447 for r in getset(repo, fullreposet(repo), x):
1448 try:
1448 try:
1449 up(parentrevs(r))
1449 up(parentrevs(r))
1450 except error.WdirUnsupported:
1450 except error.WdirUnsupported:
1451 up(p.rev() for p in repo[r].parents())
1451 up(p.rev() for p in repo[r].parents())
1452 ps -= {node.nullrev}
1452 ps -= {node.nullrev}
1453 return subset & ps
1453 return subset & ps
1454
1454
1455 def _phase(repo, subset, *targets):
1455 def _phase(repo, subset, *targets):
1456 """helper to select all rev in <targets> phases"""
1456 """helper to select all rev in <targets> phases"""
1457 s = repo._phasecache.getrevset(repo, targets)
1457 s = repo._phasecache.getrevset(repo, targets)
1458 return subset & s
1458 return subset & s
1459
1459
1460 @predicate('draft()', safe=True)
1460 @predicate('draft()', safe=True)
1461 def draft(repo, subset, x):
1461 def draft(repo, subset, x):
1462 """Changeset in draft phase."""
1462 """Changeset in draft phase."""
1463 # i18n: "draft" is a keyword
1463 # i18n: "draft" is a keyword
1464 getargs(x, 0, 0, _("draft takes no arguments"))
1464 getargs(x, 0, 0, _("draft takes no arguments"))
1465 target = phases.draft
1465 target = phases.draft
1466 return _phase(repo, subset, target)
1466 return _phase(repo, subset, target)
1467
1467
1468 @predicate('secret()', safe=True)
1468 @predicate('secret()', safe=True)
1469 def secret(repo, subset, x):
1469 def secret(repo, subset, x):
1470 """Changeset in secret phase."""
1470 """Changeset in secret phase."""
1471 # i18n: "secret" is a keyword
1471 # i18n: "secret" is a keyword
1472 getargs(x, 0, 0, _("secret takes no arguments"))
1472 getargs(x, 0, 0, _("secret takes no arguments"))
1473 target = phases.secret
1473 target = phases.secret
1474 return _phase(repo, subset, target)
1474 return _phase(repo, subset, target)
1475
1475
1476 def parentspec(repo, subset, x, n, order):
1476 def parentspec(repo, subset, x, n, order):
1477 """``set^0``
1477 """``set^0``
1478 The set.
1478 The set.
1479 ``set^1`` (or ``set^``), ``set^2``
1479 ``set^1`` (or ``set^``), ``set^2``
1480 First or second parent, respectively, of all changesets in set.
1480 First or second parent, respectively, of all changesets in set.
1481 """
1481 """
1482 try:
1482 try:
1483 n = int(n[1])
1483 n = int(n[1])
1484 if n not in (0, 1, 2):
1484 if n not in (0, 1, 2):
1485 raise ValueError
1485 raise ValueError
1486 except (TypeError, ValueError):
1486 except (TypeError, ValueError):
1487 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1487 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1488 ps = set()
1488 ps = set()
1489 cl = repo.changelog
1489 cl = repo.changelog
1490 for r in getset(repo, fullreposet(repo), x):
1490 for r in getset(repo, fullreposet(repo), x):
1491 if n == 0:
1491 if n == 0:
1492 ps.add(r)
1492 ps.add(r)
1493 elif n == 1:
1493 elif n == 1:
1494 try:
1494 try:
1495 ps.add(cl.parentrevs(r)[0])
1495 ps.add(cl.parentrevs(r)[0])
1496 except error.WdirUnsupported:
1496 except error.WdirUnsupported:
1497 ps.add(repo[r].parents()[0].rev())
1497 ps.add(repo[r].parents()[0].rev())
1498 else:
1498 else:
1499 try:
1499 try:
1500 parents = cl.parentrevs(r)
1500 parents = cl.parentrevs(r)
1501 if parents[1] != node.nullrev:
1501 if parents[1] != node.nullrev:
1502 ps.add(parents[1])
1502 ps.add(parents[1])
1503 except error.WdirUnsupported:
1503 except error.WdirUnsupported:
1504 parents = repo[r].parents()
1504 parents = repo[r].parents()
1505 if len(parents) == 2:
1505 if len(parents) == 2:
1506 ps.add(parents[1].rev())
1506 ps.add(parents[1].rev())
1507 return subset & ps
1507 return subset & ps
1508
1508
1509 @predicate('present(set)', safe=True)
1509 @predicate('present(set)', safe=True)
1510 def present(repo, subset, x):
1510 def present(repo, subset, x):
1511 """An empty set, if any revision in set isn't found; otherwise,
1511 """An empty set, if any revision in set isn't found; otherwise,
1512 all revisions in set.
1512 all revisions in set.
1513
1513
1514 If any of specified revisions is not present in the local repository,
1514 If any of specified revisions is not present in the local repository,
1515 the query is normally aborted. But this predicate allows the query
1515 the query is normally aborted. But this predicate allows the query
1516 to continue even in such cases.
1516 to continue even in such cases.
1517 """
1517 """
1518 try:
1518 try:
1519 return getset(repo, subset, x)
1519 return getset(repo, subset, x)
1520 except error.RepoLookupError:
1520 except error.RepoLookupError:
1521 return baseset()
1521 return baseset()
1522
1522
1523 # for internal use
1523 # for internal use
1524 @predicate('_notpublic', safe=True)
1524 @predicate('_notpublic', safe=True)
1525 def _notpublic(repo, subset, x):
1525 def _notpublic(repo, subset, x):
1526 getargs(x, 0, 0, "_notpublic takes no arguments")
1526 getargs(x, 0, 0, "_notpublic takes no arguments")
1527 return _phase(repo, subset, phases.draft, phases.secret)
1527 return _phase(repo, subset, phases.draft, phases.secret)
1528
1528
1529 @predicate('public()', safe=True)
1529 @predicate('public()', safe=True)
1530 def public(repo, subset, x):
1530 def public(repo, subset, x):
1531 """Changeset in public phase."""
1531 """Changeset in public phase."""
1532 # i18n: "public" is a keyword
1532 # i18n: "public" is a keyword
1533 getargs(x, 0, 0, _("public takes no arguments"))
1533 getargs(x, 0, 0, _("public takes no arguments"))
1534 phase = repo._phasecache.phase
1534 phase = repo._phasecache.phase
1535 target = phases.public
1535 target = phases.public
1536 condition = lambda r: phase(repo, r) == target
1536 condition = lambda r: phase(repo, r) == target
1537 return subset.filter(condition, condrepr=('<phase %r>', target),
1537 return subset.filter(condition, condrepr=('<phase %r>', target),
1538 cache=False)
1538 cache=False)
1539
1539
1540 @predicate('remote([id [,path]])', safe=False)
1540 @predicate('remote([id [,path]])', safe=False)
1541 def remote(repo, subset, x):
1541 def remote(repo, subset, x):
1542 """Local revision that corresponds to the given identifier in a
1542 """Local revision that corresponds to the given identifier in a
1543 remote repository, if present. Here, the '.' identifier is a
1543 remote repository, if present. Here, the '.' identifier is a
1544 synonym for the current local branch.
1544 synonym for the current local branch.
1545 """
1545 """
1546
1546
1547 from . import hg # avoid start-up nasties
1547 from . import hg # avoid start-up nasties
1548 # i18n: "remote" is a keyword
1548 # i18n: "remote" is a keyword
1549 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1549 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1550
1550
1551 q = '.'
1551 q = '.'
1552 if len(l) > 0:
1552 if len(l) > 0:
1553 # i18n: "remote" is a keyword
1553 # i18n: "remote" is a keyword
1554 q = getstring(l[0], _("remote requires a string id"))
1554 q = getstring(l[0], _("remote requires a string id"))
1555 if q == '.':
1555 if q == '.':
1556 q = repo['.'].branch()
1556 q = repo['.'].branch()
1557
1557
1558 dest = ''
1558 dest = ''
1559 if len(l) > 1:
1559 if len(l) > 1:
1560 # i18n: "remote" is a keyword
1560 # i18n: "remote" is a keyword
1561 dest = getstring(l[1], _("remote requires a repository path"))
1561 dest = getstring(l[1], _("remote requires a repository path"))
1562 dest = repo.ui.expandpath(dest or 'default')
1562 dest = repo.ui.expandpath(dest or 'default')
1563 dest, branches = hg.parseurl(dest)
1563 dest, branches = hg.parseurl(dest)
1564 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1564 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1565 if revs:
1565 if revs:
1566 revs = [repo.lookup(rev) for rev in revs]
1566 revs = [repo.lookup(rev) for rev in revs]
1567 other = hg.peer(repo, {}, dest)
1567 other = hg.peer(repo, {}, dest)
1568 n = other.lookup(q)
1568 n = other.lookup(q)
1569 if n in repo:
1569 if n in repo:
1570 r = repo[n].rev()
1570 r = repo[n].rev()
1571 if r in subset:
1571 if r in subset:
1572 return baseset([r])
1572 return baseset([r])
1573 return baseset()
1573 return baseset()
1574
1574
1575 @predicate('removes(pattern)', safe=True)
1575 @predicate('removes(pattern)', safe=True)
1576 def removes(repo, subset, x):
1576 def removes(repo, subset, x):
1577 """Changesets which remove files matching pattern.
1577 """Changesets which remove files matching pattern.
1578
1578
1579 The pattern without explicit kind like ``glob:`` is expected to be
1579 The pattern without explicit kind like ``glob:`` is expected to be
1580 relative to the current directory and match against a file or a
1580 relative to the current directory and match against a file or a
1581 directory.
1581 directory.
1582 """
1582 """
1583 # i18n: "removes" is a keyword
1583 # i18n: "removes" is a keyword
1584 pat = getstring(x, _("removes requires a pattern"))
1584 pat = getstring(x, _("removes requires a pattern"))
1585 return checkstatus(repo, subset, pat, 2)
1585 return checkstatus(repo, subset, pat, 2)
1586
1586
1587 @predicate('rev(number)', safe=True)
1587 @predicate('rev(number)', safe=True)
1588 def rev(repo, subset, x):
1588 def rev(repo, subset, x):
1589 """Revision with the given numeric identifier.
1589 """Revision with the given numeric identifier.
1590 """
1590 """
1591 # i18n: "rev" is a keyword
1591 # i18n: "rev" is a keyword
1592 l = getargs(x, 1, 1, _("rev requires one argument"))
1592 l = getargs(x, 1, 1, _("rev requires one argument"))
1593 try:
1593 try:
1594 # i18n: "rev" is a keyword
1594 # i18n: "rev" is a keyword
1595 l = int(getstring(l[0], _("rev requires a number")))
1595 l = int(getstring(l[0], _("rev requires a number")))
1596 except (TypeError, ValueError):
1596 except (TypeError, ValueError):
1597 # i18n: "rev" is a keyword
1597 # i18n: "rev" is a keyword
1598 raise error.ParseError(_("rev expects a number"))
1598 raise error.ParseError(_("rev expects a number"))
1599 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1599 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1600 return baseset()
1600 return baseset()
1601 return subset & baseset([l])
1601 return subset & baseset([l])
1602
1602
1603 @predicate('matching(revision [, field])', safe=True)
1603 @predicate('matching(revision [, field])', safe=True)
1604 def matching(repo, subset, x):
1604 def matching(repo, subset, x):
1605 """Changesets in which a given set of fields match the set of fields in the
1605 """Changesets in which a given set of fields match the set of fields in the
1606 selected revision or set.
1606 selected revision or set.
1607
1607
1608 To match more than one field pass the list of fields to match separated
1608 To match more than one field pass the list of fields to match separated
1609 by spaces (e.g. ``author description``).
1609 by spaces (e.g. ``author description``).
1610
1610
1611 Valid fields are most regular revision fields and some special fields.
1611 Valid fields are most regular revision fields and some special fields.
1612
1612
1613 Regular revision fields are ``description``, ``author``, ``branch``,
1613 Regular revision fields are ``description``, ``author``, ``branch``,
1614 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1614 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1615 and ``diff``.
1615 and ``diff``.
1616 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1616 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1617 contents of the revision. Two revisions matching their ``diff`` will
1617 contents of the revision. Two revisions matching their ``diff`` will
1618 also match their ``files``.
1618 also match their ``files``.
1619
1619
1620 Special fields are ``summary`` and ``metadata``:
1620 Special fields are ``summary`` and ``metadata``:
1621 ``summary`` matches the first line of the description.
1621 ``summary`` matches the first line of the description.
1622 ``metadata`` is equivalent to matching ``description user date``
1622 ``metadata`` is equivalent to matching ``description user date``
1623 (i.e. it matches the main metadata fields).
1623 (i.e. it matches the main metadata fields).
1624
1624
1625 ``metadata`` is the default field which is used when no fields are
1625 ``metadata`` is the default field which is used when no fields are
1626 specified. You can match more than one field at a time.
1626 specified. You can match more than one field at a time.
1627 """
1627 """
1628 # i18n: "matching" is a keyword
1628 # i18n: "matching" is a keyword
1629 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1629 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1630
1630
1631 revs = getset(repo, fullreposet(repo), l[0])
1631 revs = getset(repo, fullreposet(repo), l[0])
1632
1632
1633 fieldlist = ['metadata']
1633 fieldlist = ['metadata']
1634 if len(l) > 1:
1634 if len(l) > 1:
1635 fieldlist = getstring(l[1],
1635 fieldlist = getstring(l[1],
1636 # i18n: "matching" is a keyword
1636 # i18n: "matching" is a keyword
1637 _("matching requires a string "
1637 _("matching requires a string "
1638 "as its second argument")).split()
1638 "as its second argument")).split()
1639
1639
1640 # Make sure that there are no repeated fields,
1640 # Make sure that there are no repeated fields,
1641 # expand the 'special' 'metadata' field type
1641 # expand the 'special' 'metadata' field type
1642 # and check the 'files' whenever we check the 'diff'
1642 # and check the 'files' whenever we check the 'diff'
1643 fields = []
1643 fields = []
1644 for field in fieldlist:
1644 for field in fieldlist:
1645 if field == 'metadata':
1645 if field == 'metadata':
1646 fields += ['user', 'description', 'date']
1646 fields += ['user', 'description', 'date']
1647 elif field == 'diff':
1647 elif field == 'diff':
1648 # a revision matching the diff must also match the files
1648 # a revision matching the diff must also match the files
1649 # since matching the diff is very costly, make sure to
1649 # since matching the diff is very costly, make sure to
1650 # also match the files first
1650 # also match the files first
1651 fields += ['files', 'diff']
1651 fields += ['files', 'diff']
1652 else:
1652 else:
1653 if field == 'author':
1653 if field == 'author':
1654 field = 'user'
1654 field = 'user'
1655 fields.append(field)
1655 fields.append(field)
1656 fields = set(fields)
1656 fields = set(fields)
1657 if 'summary' in fields and 'description' in fields:
1657 if 'summary' in fields and 'description' in fields:
1658 # If a revision matches its description it also matches its summary
1658 # If a revision matches its description it also matches its summary
1659 fields.discard('summary')
1659 fields.discard('summary')
1660
1660
1661 # We may want to match more than one field
1661 # We may want to match more than one field
1662 # Not all fields take the same amount of time to be matched
1662 # Not all fields take the same amount of time to be matched
1663 # Sort the selected fields in order of increasing matching cost
1663 # Sort the selected fields in order of increasing matching cost
1664 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1664 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1665 'files', 'description', 'substate', 'diff']
1665 'files', 'description', 'substate', 'diff']
1666 def fieldkeyfunc(f):
1666 def fieldkeyfunc(f):
1667 try:
1667 try:
1668 return fieldorder.index(f)
1668 return fieldorder.index(f)
1669 except ValueError:
1669 except ValueError:
1670 # assume an unknown field is very costly
1670 # assume an unknown field is very costly
1671 return len(fieldorder)
1671 return len(fieldorder)
1672 fields = list(fields)
1672 fields = list(fields)
1673 fields.sort(key=fieldkeyfunc)
1673 fields.sort(key=fieldkeyfunc)
1674
1674
1675 # Each field will be matched with its own "getfield" function
1675 # Each field will be matched with its own "getfield" function
1676 # which will be added to the getfieldfuncs array of functions
1676 # which will be added to the getfieldfuncs array of functions
1677 getfieldfuncs = []
1677 getfieldfuncs = []
1678 _funcs = {
1678 _funcs = {
1679 'user': lambda r: repo[r].user(),
1679 'user': lambda r: repo[r].user(),
1680 'branch': lambda r: repo[r].branch(),
1680 'branch': lambda r: repo[r].branch(),
1681 'date': lambda r: repo[r].date(),
1681 'date': lambda r: repo[r].date(),
1682 'description': lambda r: repo[r].description(),
1682 'description': lambda r: repo[r].description(),
1683 'files': lambda r: repo[r].files(),
1683 'files': lambda r: repo[r].files(),
1684 'parents': lambda r: repo[r].parents(),
1684 'parents': lambda r: repo[r].parents(),
1685 'phase': lambda r: repo[r].phase(),
1685 'phase': lambda r: repo[r].phase(),
1686 'substate': lambda r: repo[r].substate,
1686 'substate': lambda r: repo[r].substate,
1687 'summary': lambda r: repo[r].description().splitlines()[0],
1687 'summary': lambda r: repo[r].description().splitlines()[0],
1688 'diff': lambda r: list(repo[r].diff(git=True),)
1688 'diff': lambda r: list(repo[r].diff(git=True),)
1689 }
1689 }
1690 for info in fields:
1690 for info in fields:
1691 getfield = _funcs.get(info, None)
1691 getfield = _funcs.get(info, None)
1692 if getfield is None:
1692 if getfield is None:
1693 raise error.ParseError(
1693 raise error.ParseError(
1694 # i18n: "matching" is a keyword
1694 # i18n: "matching" is a keyword
1695 _("unexpected field name passed to matching: %s") % info)
1695 _("unexpected field name passed to matching: %s") % info)
1696 getfieldfuncs.append(getfield)
1696 getfieldfuncs.append(getfield)
1697 # convert the getfield array of functions into a "getinfo" function
1697 # convert the getfield array of functions into a "getinfo" function
1698 # which returns an array of field values (or a single value if there
1698 # which returns an array of field values (or a single value if there
1699 # is only one field to match)
1699 # is only one field to match)
1700 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1700 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1701
1701
1702 def matches(x):
1702 def matches(x):
1703 for rev in revs:
1703 for rev in revs:
1704 target = getinfo(rev)
1704 target = getinfo(rev)
1705 match = True
1705 match = True
1706 for n, f in enumerate(getfieldfuncs):
1706 for n, f in enumerate(getfieldfuncs):
1707 if target[n] != f(x):
1707 if target[n] != f(x):
1708 match = False
1708 match = False
1709 if match:
1709 if match:
1710 return True
1710 return True
1711 return False
1711 return False
1712
1712
1713 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1713 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1714
1714
1715 @predicate('reverse(set)', safe=True, takeorder=True)
1715 @predicate('reverse(set)', safe=True, takeorder=True)
1716 def reverse(repo, subset, x, order):
1716 def reverse(repo, subset, x, order):
1717 """Reverse order of set.
1717 """Reverse order of set.
1718 """
1718 """
1719 l = getset(repo, subset, x)
1719 l = getset(repo, subset, x)
1720 if order == defineorder:
1720 if order == defineorder:
1721 l.reverse()
1721 l.reverse()
1722 return l
1722 return l
1723
1723
1724 @predicate('roots(set)', safe=True)
1724 @predicate('roots(set)', safe=True)
1725 def roots(repo, subset, x):
1725 def roots(repo, subset, x):
1726 """Changesets in set with no parent changeset in set.
1726 """Changesets in set with no parent changeset in set.
1727 """
1727 """
1728 s = getset(repo, fullreposet(repo), x)
1728 s = getset(repo, fullreposet(repo), x)
1729 parents = repo.changelog.parentrevs
1729 parents = repo.changelog.parentrevs
1730 def filter(r):
1730 def filter(r):
1731 for p in parents(r):
1731 for p in parents(r):
1732 if 0 <= p and p in s:
1732 if 0 <= p and p in s:
1733 return False
1733 return False
1734 return True
1734 return True
1735 return subset & s.filter(filter, condrepr='<roots>')
1735 return subset & s.filter(filter, condrepr='<roots>')
1736
1736
1737 _sortkeyfuncs = {
1737 _sortkeyfuncs = {
1738 'rev': lambda c: c.rev(),
1738 'rev': lambda c: c.rev(),
1739 'branch': lambda c: c.branch(),
1739 'branch': lambda c: c.branch(),
1740 'desc': lambda c: c.description(),
1740 'desc': lambda c: c.description(),
1741 'user': lambda c: c.user(),
1741 'user': lambda c: c.user(),
1742 'author': lambda c: c.user(),
1742 'author': lambda c: c.user(),
1743 'date': lambda c: c.date()[0],
1743 'date': lambda c: c.date()[0],
1744 }
1744 }
1745
1745
1746 def _getsortargs(x):
1746 def _getsortargs(x):
1747 """Parse sort options into (set, [(key, reverse)], opts)"""
1747 """Parse sort options into (set, [(key, reverse)], opts)"""
1748 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1748 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1749 if 'set' not in args:
1749 if 'set' not in args:
1750 # i18n: "sort" is a keyword
1750 # i18n: "sort" is a keyword
1751 raise error.ParseError(_('sort requires one or two arguments'))
1751 raise error.ParseError(_('sort requires one or two arguments'))
1752 keys = "rev"
1752 keys = "rev"
1753 if 'keys' in args:
1753 if 'keys' in args:
1754 # i18n: "sort" is a keyword
1754 # i18n: "sort" is a keyword
1755 keys = getstring(args['keys'], _("sort spec must be a string"))
1755 keys = getstring(args['keys'], _("sort spec must be a string"))
1756
1756
1757 keyflags = []
1757 keyflags = []
1758 for k in keys.split():
1758 for k in keys.split():
1759 fk = k
1759 fk = k
1760 reverse = (k[0] == '-')
1760 reverse = (k[0] == '-')
1761 if reverse:
1761 if reverse:
1762 k = k[1:]
1762 k = k[1:]
1763 if k not in _sortkeyfuncs and k != 'topo':
1763 if k not in _sortkeyfuncs and k != 'topo':
1764 raise error.ParseError(_("unknown sort key %r") % fk)
1764 raise error.ParseError(_("unknown sort key %r") % fk)
1765 keyflags.append((k, reverse))
1765 keyflags.append((k, reverse))
1766
1766
1767 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1767 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1768 # i18n: "topo" is a keyword
1768 # i18n: "topo" is a keyword
1769 raise error.ParseError(_('topo sort order cannot be combined '
1769 raise error.ParseError(_('topo sort order cannot be combined '
1770 'with other sort keys'))
1770 'with other sort keys'))
1771
1771
1772 opts = {}
1772 opts = {}
1773 if 'topo.firstbranch' in args:
1773 if 'topo.firstbranch' in args:
1774 if any(k == 'topo' for k, reverse in keyflags):
1774 if any(k == 'topo' for k, reverse in keyflags):
1775 opts['topo.firstbranch'] = args['topo.firstbranch']
1775 opts['topo.firstbranch'] = args['topo.firstbranch']
1776 else:
1776 else:
1777 # i18n: "topo" and "topo.firstbranch" are keywords
1777 # i18n: "topo" and "topo.firstbranch" are keywords
1778 raise error.ParseError(_('topo.firstbranch can only be used '
1778 raise error.ParseError(_('topo.firstbranch can only be used '
1779 'when using the topo sort key'))
1779 'when using the topo sort key'))
1780
1780
1781 return args['set'], keyflags, opts
1781 return args['set'], keyflags, opts
1782
1782
1783 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1783 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1784 def sort(repo, subset, x, order):
1784 def sort(repo, subset, x, order):
1785 """Sort set by keys. The default sort order is ascending, specify a key
1785 """Sort set by keys. The default sort order is ascending, specify a key
1786 as ``-key`` to sort in descending order.
1786 as ``-key`` to sort in descending order.
1787
1787
1788 The keys can be:
1788 The keys can be:
1789
1789
1790 - ``rev`` for the revision number,
1790 - ``rev`` for the revision number,
1791 - ``branch`` for the branch name,
1791 - ``branch`` for the branch name,
1792 - ``desc`` for the commit message (description),
1792 - ``desc`` for the commit message (description),
1793 - ``user`` for user name (``author`` can be used as an alias),
1793 - ``user`` for user name (``author`` can be used as an alias),
1794 - ``date`` for the commit date
1794 - ``date`` for the commit date
1795 - ``topo`` for a reverse topographical sort
1795 - ``topo`` for a reverse topographical sort
1796
1796
1797 The ``topo`` sort order cannot be combined with other sort keys. This sort
1797 The ``topo`` sort order cannot be combined with other sort keys. This sort
1798 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1798 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1799 specifies what topographical branches to prioritize in the sort.
1799 specifies what topographical branches to prioritize in the sort.
1800
1800
1801 """
1801 """
1802 s, keyflags, opts = _getsortargs(x)
1802 s, keyflags, opts = _getsortargs(x)
1803 revs = getset(repo, subset, s)
1803 revs = getset(repo, subset, s)
1804
1804
1805 if not keyflags or order != defineorder:
1805 if not keyflags or order != defineorder:
1806 return revs
1806 return revs
1807 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1807 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1808 revs.sort(reverse=keyflags[0][1])
1808 revs.sort(reverse=keyflags[0][1])
1809 return revs
1809 return revs
1810 elif keyflags[0][0] == "topo":
1810 elif keyflags[0][0] == "topo":
1811 firstbranch = ()
1811 firstbranch = ()
1812 if 'topo.firstbranch' in opts:
1812 if 'topo.firstbranch' in opts:
1813 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1813 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1814 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1814 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1815 firstbranch),
1815 firstbranch),
1816 istopo=True)
1816 istopo=True)
1817 if keyflags[0][1]:
1817 if keyflags[0][1]:
1818 revs.reverse()
1818 revs.reverse()
1819 return revs
1819 return revs
1820
1820
1821 # sort() is guaranteed to be stable
1821 # sort() is guaranteed to be stable
1822 ctxs = [repo[r] for r in revs]
1822 ctxs = [repo[r] for r in revs]
1823 for k, reverse in reversed(keyflags):
1823 for k, reverse in reversed(keyflags):
1824 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1824 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1825 return baseset([c.rev() for c in ctxs])
1825 return baseset([c.rev() for c in ctxs])
1826
1826
1827 @predicate('subrepo([pattern])')
1827 @predicate('subrepo([pattern])')
1828 def subrepo(repo, subset, x):
1828 def subrepo(repo, subset, x):
1829 """Changesets that add, modify or remove the given subrepo. If no subrepo
1829 """Changesets that add, modify or remove the given subrepo. If no subrepo
1830 pattern is named, any subrepo changes are returned.
1830 pattern is named, any subrepo changes are returned.
1831 """
1831 """
1832 # i18n: "subrepo" is a keyword
1832 # i18n: "subrepo" is a keyword
1833 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1833 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1834 pat = None
1834 pat = None
1835 if len(args) != 0:
1835 if len(args) != 0:
1836 pat = getstring(args[0], _("subrepo requires a pattern"))
1836 pat = getstring(args[0], _("subrepo requires a pattern"))
1837
1837
1838 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1838 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1839
1839
1840 def submatches(names):
1840 def submatches(names):
1841 k, p, m = util.stringmatcher(pat)
1841 k, p, m = util.stringmatcher(pat)
1842 for name in names:
1842 for name in names:
1843 if m(name):
1843 if m(name):
1844 yield name
1844 yield name
1845
1845
1846 def matches(x):
1846 def matches(x):
1847 c = repo[x]
1847 c = repo[x]
1848 s = repo.status(c.p1().node(), c.node(), match=m)
1848 s = repo.status(c.p1().node(), c.node(), match=m)
1849
1849
1850 if pat is None:
1850 if pat is None:
1851 return s.added or s.modified or s.removed
1851 return s.added or s.modified or s.removed
1852
1852
1853 if s.added:
1853 if s.added:
1854 return any(submatches(c.substate.keys()))
1854 return any(submatches(c.substate.keys()))
1855
1855
1856 if s.modified:
1856 if s.modified:
1857 subs = set(c.p1().substate.keys())
1857 subs = set(c.p1().substate.keys())
1858 subs.update(c.substate.keys())
1858 subs.update(c.substate.keys())
1859
1859
1860 for path in submatches(subs):
1860 for path in submatches(subs):
1861 if c.p1().substate.get(path) != c.substate.get(path):
1861 if c.p1().substate.get(path) != c.substate.get(path):
1862 return True
1862 return True
1863
1863
1864 if s.removed:
1864 if s.removed:
1865 return any(submatches(c.p1().substate.keys()))
1865 return any(submatches(c.p1().substate.keys()))
1866
1866
1867 return False
1867 return False
1868
1868
1869 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1869 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1870
1870
1871 def _mapbynodefunc(repo, s, f):
1871 def _mapbynodefunc(repo, s, f):
1872 """(repo, smartset, [node] -> [node]) -> smartset
1872 """(repo, smartset, [node] -> [node]) -> smartset
1873
1873
1874 Helper method to map a smartset to another smartset given a function only
1874 Helper method to map a smartset to another smartset given a function only
1875 talking about nodes. Handles converting between rev numbers and nodes, and
1875 talking about nodes. Handles converting between rev numbers and nodes, and
1876 filtering.
1876 filtering.
1877 """
1877 """
1878 cl = repo.unfiltered().changelog
1878 cl = repo.unfiltered().changelog
1879 torev = cl.rev
1879 torev = cl.rev
1880 tonode = cl.node
1880 tonode = cl.node
1881 nodemap = cl.nodemap
1881 nodemap = cl.nodemap
1882 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
1882 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
1883 return smartset.baseset(result - repo.changelog.filteredrevs)
1883 return smartset.baseset(result - repo.changelog.filteredrevs)
1884
1884
1885 @predicate('successors(set)', safe=True)
1885 @predicate('successors(set)', safe=True)
1886 def successors(repo, subset, x):
1886 def successors(repo, subset, x):
1887 """All successors for set, including the given set themselves"""
1887 """All successors for set, including the given set themselves"""
1888 s = getset(repo, fullreposet(repo), x)
1888 s = getset(repo, fullreposet(repo), x)
1889 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
1889 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
1890 d = _mapbynodefunc(repo, s, f)
1890 d = _mapbynodefunc(repo, s, f)
1891 return subset & d
1891 return subset & d
1892
1892
1893 def _substringmatcher(pattern, casesensitive=True):
1893 def _substringmatcher(pattern, casesensitive=True):
1894 kind, pattern, matcher = util.stringmatcher(pattern,
1894 kind, pattern, matcher = util.stringmatcher(pattern,
1895 casesensitive=casesensitive)
1895 casesensitive=casesensitive)
1896 if kind == 'literal':
1896 if kind == 'literal':
1897 if not casesensitive:
1897 if not casesensitive:
1898 pattern = encoding.lower(pattern)
1898 pattern = encoding.lower(pattern)
1899 matcher = lambda s: pattern in encoding.lower(s)
1899 matcher = lambda s: pattern in encoding.lower(s)
1900 else:
1900 else:
1901 matcher = lambda s: pattern in s
1901 matcher = lambda s: pattern in s
1902 return kind, pattern, matcher
1902 return kind, pattern, matcher
1903
1903
1904 @predicate('tag([name])', safe=True)
1904 @predicate('tag([name])', safe=True)
1905 def tag(repo, subset, x):
1905 def tag(repo, subset, x):
1906 """The specified tag by name, or all tagged revisions if no name is given.
1906 """The specified tag by name, or all tagged revisions if no name is given.
1907
1907
1908 Pattern matching is supported for `name`. See
1908 Pattern matching is supported for `name`. See
1909 :hg:`help revisions.patterns`.
1909 :hg:`help revisions.patterns`.
1910 """
1910 """
1911 # i18n: "tag" is a keyword
1911 # i18n: "tag" is a keyword
1912 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1912 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1913 cl = repo.changelog
1913 cl = repo.changelog
1914 if args:
1914 if args:
1915 pattern = getstring(args[0],
1915 pattern = getstring(args[0],
1916 # i18n: "tag" is a keyword
1916 # i18n: "tag" is a keyword
1917 _('the argument to tag must be a string'))
1917 _('the argument to tag must be a string'))
1918 kind, pattern, matcher = util.stringmatcher(pattern)
1918 kind, pattern, matcher = util.stringmatcher(pattern)
1919 if kind == 'literal':
1919 if kind == 'literal':
1920 # avoid resolving all tags
1920 # avoid resolving all tags
1921 tn = repo._tagscache.tags.get(pattern, None)
1921 tn = repo._tagscache.tags.get(pattern, None)
1922 if tn is None:
1922 if tn is None:
1923 raise error.RepoLookupError(_("tag '%s' does not exist")
1923 raise error.RepoLookupError(_("tag '%s' does not exist")
1924 % pattern)
1924 % pattern)
1925 s = {repo[tn].rev()}
1925 s = {repo[tn].rev()}
1926 else:
1926 else:
1927 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
1927 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
1928 else:
1928 else:
1929 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
1929 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
1930 return subset & s
1930 return subset & s
1931
1931
1932 @predicate('tagged', safe=True)
1932 @predicate('tagged', safe=True)
1933 def tagged(repo, subset, x):
1933 def tagged(repo, subset, x):
1934 return tag(repo, subset, x)
1934 return tag(repo, subset, x)
1935
1935
1936 @predicate('unstable()', safe=True)
1936 @predicate('unstable()', safe=True)
1937 def unstable(repo, subset, x):
1937 def unstable(repo, subset, x):
1938 msg = ("'unstable()' is deprecated, "
1938 msg = ("'unstable()' is deprecated, "
1939 "use 'orphan()'")
1939 "use 'orphan()'")
1940 repo.ui.deprecwarn(msg, '4.4')
1940 repo.ui.deprecwarn(msg, '4.4')
1941
1941
1942 return orphan(repo, subset, x)
1942 return orphan(repo, subset, x)
1943
1943
1944 @predicate('orphan()', safe=True)
1944 @predicate('orphan()', safe=True)
1945 def orphan(repo, subset, x):
1945 def orphan(repo, subset, x):
1946 """Non-obsolete changesets with obsolete ancestors.
1946 """Non-obsolete changesets with obsolete ancestors.
1947 """
1947 """
1948 # i18n: "orphan" is a keyword
1948 # i18n: "orphan" is a keyword
1949 getargs(x, 0, 0, _("orphan takes no arguments"))
1949 getargs(x, 0, 0, _("orphan takes no arguments"))
1950 orphan = obsmod.getrevs(repo, 'orphan')
1950 orphan = obsmod.getrevs(repo, 'orphan')
1951 return subset & orphan
1951 return subset & orphan
1952
1952
1953
1953
1954 @predicate('user(string)', safe=True)
1954 @predicate('user(string)', safe=True)
1955 def user(repo, subset, x):
1955 def user(repo, subset, x):
1956 """User name contains string. The match is case-insensitive.
1956 """User name contains string. The match is case-insensitive.
1957
1957
1958 Pattern matching is supported for `string`. See
1958 Pattern matching is supported for `string`. See
1959 :hg:`help revisions.patterns`.
1959 :hg:`help revisions.patterns`.
1960 """
1960 """
1961 return author(repo, subset, x)
1961 return author(repo, subset, x)
1962
1962
1963 @predicate('wdir()', safe=True)
1963 @predicate('wdir()', safe=True)
1964 def wdir(repo, subset, x):
1964 def wdir(repo, subset, x):
1965 """Working directory. (EXPERIMENTAL)"""
1965 """Working directory. (EXPERIMENTAL)"""
1966 # i18n: "wdir" is a keyword
1966 # i18n: "wdir" is a keyword
1967 getargs(x, 0, 0, _("wdir takes no arguments"))
1967 getargs(x, 0, 0, _("wdir takes no arguments"))
1968 if node.wdirrev in subset or isinstance(subset, fullreposet):
1968 if node.wdirrev in subset or isinstance(subset, fullreposet):
1969 return baseset([node.wdirrev])
1969 return baseset([node.wdirrev])
1970 return baseset()
1970 return baseset()
1971
1971
1972 def _orderedlist(repo, subset, x):
1972 def _orderedlist(repo, subset, x):
1973 s = getstring(x, "internal error")
1973 s = getstring(x, "internal error")
1974 if not s:
1974 if not s:
1975 return baseset()
1975 return baseset()
1976 # remove duplicates here. it's difficult for caller to deduplicate sets
1976 # remove duplicates here. it's difficult for caller to deduplicate sets
1977 # because different symbols can point to the same rev.
1977 # because different symbols can point to the same rev.
1978 cl = repo.changelog
1978 cl = repo.changelog
1979 ls = []
1979 ls = []
1980 seen = set()
1980 seen = set()
1981 for t in s.split('\0'):
1981 for t in s.split('\0'):
1982 try:
1982 try:
1983 # fast path for integer revision
1983 # fast path for integer revision
1984 r = int(t)
1984 r = int(t)
1985 if str(r) != t or r not in cl:
1985 if str(r) != t or r not in cl:
1986 raise ValueError
1986 raise ValueError
1987 revs = [r]
1987 revs = [r]
1988 except ValueError:
1988 except ValueError:
1989 revs = stringset(repo, subset, t)
1989 revs = stringset(repo, subset, t)
1990
1990
1991 for r in revs:
1991 for r in revs:
1992 if r in seen:
1992 if r in seen:
1993 continue
1993 continue
1994 if (r in subset
1994 if (r in subset
1995 or r == node.nullrev and isinstance(subset, fullreposet)):
1995 or r == node.nullrev and isinstance(subset, fullreposet)):
1996 ls.append(r)
1996 ls.append(r)
1997 seen.add(r)
1997 seen.add(r)
1998 return baseset(ls)
1998 return baseset(ls)
1999
1999
2000 # for internal use
2000 # for internal use
2001 @predicate('_list', safe=True, takeorder=True)
2001 @predicate('_list', safe=True, takeorder=True)
2002 def _list(repo, subset, x, order):
2002 def _list(repo, subset, x, order):
2003 if order == followorder:
2003 if order == followorder:
2004 # slow path to take the subset order
2004 # slow path to take the subset order
2005 return subset & _orderedlist(repo, fullreposet(repo), x)
2005 return subset & _orderedlist(repo, fullreposet(repo), x)
2006 else:
2006 else:
2007 return _orderedlist(repo, subset, x)
2007 return _orderedlist(repo, subset, x)
2008
2008
2009 def _orderedintlist(repo, subset, x):
2009 def _orderedintlist(repo, subset, x):
2010 s = getstring(x, "internal error")
2010 s = getstring(x, "internal error")
2011 if not s:
2011 if not s:
2012 return baseset()
2012 return baseset()
2013 ls = [int(r) for r in s.split('\0')]
2013 ls = [int(r) for r in s.split('\0')]
2014 s = subset
2014 s = subset
2015 return baseset([r for r in ls if r in s])
2015 return baseset([r for r in ls if r in s])
2016
2016
2017 # for internal use
2017 # for internal use
2018 @predicate('_intlist', safe=True, takeorder=True)
2018 @predicate('_intlist', safe=True, takeorder=True)
2019 def _intlist(repo, subset, x, order):
2019 def _intlist(repo, subset, x, order):
2020 if order == followorder:
2020 if order == followorder:
2021 # slow path to take the subset order
2021 # slow path to take the subset order
2022 return subset & _orderedintlist(repo, fullreposet(repo), x)
2022 return subset & _orderedintlist(repo, fullreposet(repo), x)
2023 else:
2023 else:
2024 return _orderedintlist(repo, subset, x)
2024 return _orderedintlist(repo, subset, x)
2025
2025
2026 def _orderedhexlist(repo, subset, x):
2026 def _orderedhexlist(repo, subset, x):
2027 s = getstring(x, "internal error")
2027 s = getstring(x, "internal error")
2028 if not s:
2028 if not s:
2029 return baseset()
2029 return baseset()
2030 cl = repo.changelog
2030 cl = repo.changelog
2031 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2031 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2032 s = subset
2032 s = subset
2033 return baseset([r for r in ls if r in s])
2033 return baseset([r for r in ls if r in s])
2034
2034
2035 # for internal use
2035 # for internal use
2036 @predicate('_hexlist', safe=True, takeorder=True)
2036 @predicate('_hexlist', safe=True, takeorder=True)
2037 def _hexlist(repo, subset, x, order):
2037 def _hexlist(repo, subset, x, order):
2038 if order == followorder:
2038 if order == followorder:
2039 # slow path to take the subset order
2039 # slow path to take the subset order
2040 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2040 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2041 else:
2041 else:
2042 return _orderedhexlist(repo, subset, x)
2042 return _orderedhexlist(repo, subset, x)
2043
2043
2044 methods = {
2044 methods = {
2045 "range": rangeset,
2045 "range": rangeset,
2046 "rangeall": rangeall,
2046 "rangeall": rangeall,
2047 "rangepre": rangepre,
2047 "rangepre": rangepre,
2048 "rangepost": rangepost,
2048 "rangepost": rangepost,
2049 "dagrange": dagrange,
2049 "dagrange": dagrange,
2050 "string": stringset,
2050 "string": stringset,
2051 "symbol": stringset,
2051 "symbol": stringset,
2052 "and": andset,
2052 "and": andset,
2053 "or": orset,
2053 "or": orset,
2054 "not": notset,
2054 "not": notset,
2055 "difference": differenceset,
2055 "difference": differenceset,
2056 "relation": relationset,
2056 "relation": relationset,
2057 "relsubscript": relsubscriptset,
2057 "relsubscript": relsubscriptset,
2058 "subscript": subscriptset,
2058 "subscript": subscriptset,
2059 "list": listset,
2059 "list": listset,
2060 "keyvalue": keyvaluepair,
2060 "keyvalue": keyvaluepair,
2061 "func": func,
2061 "func": func,
2062 "ancestor": ancestorspec,
2062 "ancestor": ancestorspec,
2063 "parent": parentspec,
2063 "parent": parentspec,
2064 "parentpost": parentpost,
2064 "parentpost": parentpost,
2065 }
2065 }
2066
2066
2067 def posttreebuilthook(tree, repo):
2067 def posttreebuilthook(tree, repo):
2068 # hook for extensions to execute code on the optimized tree
2068 # hook for extensions to execute code on the optimized tree
2069 pass
2069 pass
2070
2070
2071 def match(ui, spec, repo=None, order=defineorder):
2071 def match(ui, spec, repo=None, order=defineorder):
2072 """Create a matcher for a single revision spec
2072 """Create a matcher for a single revision spec
2073
2073
2074 If order=followorder, a matcher takes the ordering specified by the input
2074 If order=followorder, a matcher takes the ordering specified by the input
2075 set.
2075 set.
2076 """
2076 """
2077 return matchany(ui, [spec], repo=repo, order=order)
2077 return matchany(ui, [spec], repo=repo, order=order)
2078
2078
2079 def matchany(ui, specs, repo=None, order=defineorder, localalias=None):
2079 def matchany(ui, specs, repo=None, order=defineorder, localalias=None):
2080 """Create a matcher that will include any revisions matching one of the
2080 """Create a matcher that will include any revisions matching one of the
2081 given specs
2081 given specs
2082
2082
2083 If order=followorder, a matcher takes the ordering specified by the input
2083 If order=followorder, a matcher takes the ordering specified by the input
2084 set.
2084 set.
2085
2085
2086 If localalias is not None, it is a dict {name: definitionstring}. It takes
2086 If localalias is not None, it is a dict {name: definitionstring}. It takes
2087 precedence over [revsetalias] config section.
2087 precedence over [revsetalias] config section.
2088 """
2088 """
2089 if not specs:
2089 if not specs:
2090 def mfunc(repo, subset=None):
2090 def mfunc(repo, subset=None):
2091 return baseset()
2091 return baseset()
2092 return mfunc
2092 return mfunc
2093 if not all(specs):
2093 if not all(specs):
2094 raise error.ParseError(_("empty query"))
2094 raise error.ParseError(_("empty query"))
2095 lookup = None
2095 lookup = None
2096 if repo:
2096 if repo:
2097 lookup = repo.__contains__
2097 lookup = repo.__contains__
2098 if len(specs) == 1:
2098 if len(specs) == 1:
2099 tree = revsetlang.parse(specs[0], lookup)
2099 tree = revsetlang.parse(specs[0], lookup)
2100 else:
2100 else:
2101 tree = ('or',
2101 tree = ('or',
2102 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2102 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2103
2103
2104 aliases = []
2104 aliases = []
2105 warn = None
2105 warn = None
2106 if ui:
2106 if ui:
2107 aliases.extend(ui.configitems('revsetalias'))
2107 aliases.extend(ui.configitems('revsetalias'))
2108 warn = ui.warn
2108 warn = ui.warn
2109 if localalias:
2109 if localalias:
2110 aliases.extend(localalias.items())
2110 aliases.extend(localalias.items())
2111 if aliases:
2111 if aliases:
2112 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2112 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2113 tree = revsetlang.foldconcat(tree)
2113 tree = revsetlang.foldconcat(tree)
2114 tree = revsetlang.analyze(tree, order)
2114 tree = revsetlang.analyze(tree, order)
2115 tree = revsetlang.optimize(tree)
2115 tree = revsetlang.optimize(tree)
2116 posttreebuilthook(tree, repo)
2116 posttreebuilthook(tree, repo)
2117 return makematcher(tree)
2117 return makematcher(tree)
2118
2118
2119 def makematcher(tree):
2119 def makematcher(tree):
2120 """Create a matcher from an evaluatable tree"""
2120 """Create a matcher from an evaluatable tree"""
2121 def mfunc(repo, subset=None):
2121 def mfunc(repo, subset=None):
2122 if subset is None:
2122 if subset is None:
2123 subset = fullreposet(repo)
2123 subset = fullreposet(repo)
2124 return getset(repo, subset, tree)
2124 return getset(repo, subset, tree)
2125 return mfunc
2125 return mfunc
2126
2126
2127 def loadpredicate(ui, extname, registrarobj):
2127 def loadpredicate(ui, extname, registrarobj):
2128 """Load revset predicates from specified registrarobj
2128 """Load revset predicates from specified registrarobj
2129 """
2129 """
2130 for name, func in registrarobj._table.iteritems():
2130 for name, func in registrarobj._table.iteritems():
2131 symbols[name] = func
2131 symbols[name] = func
2132 if func._safe:
2132 if func._safe:
2133 safesymbols.add(name)
2133 safesymbols.add(name)
2134
2134
2135 # load built-in predicates explicitly to setup safesymbols
2135 # load built-in predicates explicitly to setup safesymbols
2136 loadpredicate(None, None, predicate)
2136 loadpredicate(None, None, predicate)
2137
2137
2138 # tell hggettext to extract docstrings from these functions:
2138 # tell hggettext to extract docstrings from these functions:
2139 i18nfunctions = symbols.values()
2139 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now