##// END OF EJS Templates
branchcache: introduce hasbranch()...
Pulkit Goyal -
r42171:0bd730fb default
parent child Browse files
Show More
@@ -1,616 +1,620 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11
11
12 from .node import (
12 from .node import (
13 bin,
13 bin,
14 hex,
14 hex,
15 nullid,
15 nullid,
16 nullrev,
16 nullrev,
17 )
17 )
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 pycompat,
21 pycompat,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 stringutil,
26 stringutil,
27 )
27 )
28
28
29 calcsize = struct.calcsize
29 calcsize = struct.calcsize
30 pack_into = struct.pack_into
30 pack_into = struct.pack_into
31 unpack_from = struct.unpack_from
31 unpack_from = struct.unpack_from
32
32
33
33
34 ### Nearest subset relation
34 ### Nearest subset relation
35 # Nearest subset of filter X is a filter Y so that:
35 # Nearest subset of filter X is a filter Y so that:
36 # * Y is included in X,
36 # * Y is included in X,
37 # * X - Y is as small as possible.
37 # * X - Y is as small as possible.
38 # This create and ordering used for branchmap purpose.
38 # This create and ordering used for branchmap purpose.
39 # the ordering may be partial
39 # the ordering may be partial
40 subsettable = {None: 'visible',
40 subsettable = {None: 'visible',
41 'visible-hidden': 'visible',
41 'visible-hidden': 'visible',
42 'visible': 'served',
42 'visible': 'served',
43 'served': 'immutable',
43 'served': 'immutable',
44 'immutable': 'base'}
44 'immutable': 'base'}
45
45
46
46
47 class BranchMapCache(object):
47 class BranchMapCache(object):
48 """mapping of filtered views of repo with their branchcache"""
48 """mapping of filtered views of repo with their branchcache"""
49 def __init__(self):
49 def __init__(self):
50 self._per_filter = {}
50 self._per_filter = {}
51
51
52 def __getitem__(self, repo):
52 def __getitem__(self, repo):
53 self.updatecache(repo)
53 self.updatecache(repo)
54 return self._per_filter[repo.filtername]
54 return self._per_filter[repo.filtername]
55
55
56 def updatecache(self, repo):
56 def updatecache(self, repo):
57 """Update the cache for the given filtered view on a repository"""
57 """Update the cache for the given filtered view on a repository"""
58 # This can trigger updates for the caches for subsets of the filtered
58 # This can trigger updates for the caches for subsets of the filtered
59 # view, e.g. when there is no cache for this filtered view or the cache
59 # view, e.g. when there is no cache for this filtered view or the cache
60 # is stale.
60 # is stale.
61
61
62 cl = repo.changelog
62 cl = repo.changelog
63 filtername = repo.filtername
63 filtername = repo.filtername
64 bcache = self._per_filter.get(filtername)
64 bcache = self._per_filter.get(filtername)
65 if bcache is None or not bcache.validfor(repo):
65 if bcache is None or not bcache.validfor(repo):
66 # cache object missing or cache object stale? Read from disk
66 # cache object missing or cache object stale? Read from disk
67 bcache = branchcache.fromfile(repo)
67 bcache = branchcache.fromfile(repo)
68
68
69 revs = []
69 revs = []
70 if bcache is None:
70 if bcache is None:
71 # no (fresh) cache available anymore, perhaps we can re-use
71 # no (fresh) cache available anymore, perhaps we can re-use
72 # the cache for a subset, then extend that to add info on missing
72 # the cache for a subset, then extend that to add info on missing
73 # revisions.
73 # revisions.
74 subsetname = subsettable.get(filtername)
74 subsetname = subsettable.get(filtername)
75 if subsetname is not None:
75 if subsetname is not None:
76 subset = repo.filtered(subsetname)
76 subset = repo.filtered(subsetname)
77 bcache = self[subset].copy()
77 bcache = self[subset].copy()
78 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
78 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
79 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
79 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
80 else:
80 else:
81 # nothing to fall back on, start empty.
81 # nothing to fall back on, start empty.
82 bcache = branchcache()
82 bcache = branchcache()
83
83
84 revs.extend(cl.revs(start=bcache.tiprev + 1))
84 revs.extend(cl.revs(start=bcache.tiprev + 1))
85 if revs:
85 if revs:
86 bcache.update(repo, revs)
86 bcache.update(repo, revs)
87
87
88 assert bcache.validfor(repo), filtername
88 assert bcache.validfor(repo), filtername
89 self._per_filter[repo.filtername] = bcache
89 self._per_filter[repo.filtername] = bcache
90
90
91 def replace(self, repo, remotebranchmap):
91 def replace(self, repo, remotebranchmap):
92 """Replace the branchmap cache for a repo with a branch mapping.
92 """Replace the branchmap cache for a repo with a branch mapping.
93
93
94 This is likely only called during clone with a branch map from a
94 This is likely only called during clone with a branch map from a
95 remote.
95 remote.
96
96
97 """
97 """
98 cl = repo.changelog
98 cl = repo.changelog
99 clrev = cl.rev
99 clrev = cl.rev
100 clbranchinfo = cl.branchinfo
100 clbranchinfo = cl.branchinfo
101 rbheads = []
101 rbheads = []
102 closed = []
102 closed = []
103 for bheads in remotebranchmap.itervalues():
103 for bheads in remotebranchmap.itervalues():
104 rbheads += bheads
104 rbheads += bheads
105 for h in bheads:
105 for h in bheads:
106 r = clrev(h)
106 r = clrev(h)
107 b, c = clbranchinfo(r)
107 b, c = clbranchinfo(r)
108 if c:
108 if c:
109 closed.append(h)
109 closed.append(h)
110
110
111 if rbheads:
111 if rbheads:
112 rtiprev = max((int(clrev(node)) for node in rbheads))
112 rtiprev = max((int(clrev(node)) for node in rbheads))
113 cache = branchcache(
113 cache = branchcache(
114 remotebranchmap, repo[rtiprev].node(), rtiprev,
114 remotebranchmap, repo[rtiprev].node(), rtiprev,
115 closednodes=closed)
115 closednodes=closed)
116
116
117 # Try to stick it as low as possible
117 # Try to stick it as low as possible
118 # filter above served are unlikely to be fetch from a clone
118 # filter above served are unlikely to be fetch from a clone
119 for candidate in ('base', 'immutable', 'served'):
119 for candidate in ('base', 'immutable', 'served'):
120 rview = repo.filtered(candidate)
120 rview = repo.filtered(candidate)
121 if cache.validfor(rview):
121 if cache.validfor(rview):
122 self._per_filter[candidate] = cache
122 self._per_filter[candidate] = cache
123 cache.write(rview)
123 cache.write(rview)
124 return
124 return
125
125
126 def clear(self):
126 def clear(self):
127 self._per_filter.clear()
127 self._per_filter.clear()
128
128
129
129
130 class branchcache(object):
130 class branchcache(object):
131 """A dict like object that hold branches heads cache.
131 """A dict like object that hold branches heads cache.
132
132
133 This cache is used to avoid costly computations to determine all the
133 This cache is used to avoid costly computations to determine all the
134 branch heads of a repo.
134 branch heads of a repo.
135
135
136 The cache is serialized on disk in the following format:
136 The cache is serialized on disk in the following format:
137
137
138 <tip hex node> <tip rev number> [optional filtered repo hex hash]
138 <tip hex node> <tip rev number> [optional filtered repo hex hash]
139 <branch head hex node> <open/closed state> <branch name>
139 <branch head hex node> <open/closed state> <branch name>
140 <branch head hex node> <open/closed state> <branch name>
140 <branch head hex node> <open/closed state> <branch name>
141 ...
141 ...
142
142
143 The first line is used to check if the cache is still valid. If the
143 The first line is used to check if the cache is still valid. If the
144 branch cache is for a filtered repo view, an optional third hash is
144 branch cache is for a filtered repo view, an optional third hash is
145 included that hashes the hashes of all filtered revisions.
145 included that hashes the hashes of all filtered revisions.
146
146
147 The open/closed state is represented by a single letter 'o' or 'c'.
147 The open/closed state is represented by a single letter 'o' or 'c'.
148 This field can be used to avoid changelog reads when determining if a
148 This field can be used to avoid changelog reads when determining if a
149 branch head closes a branch or not.
149 branch head closes a branch or not.
150 """
150 """
151
151
152 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
152 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
153 filteredhash=None, closednodes=None):
153 filteredhash=None, closednodes=None):
154 self.tipnode = tipnode
154 self.tipnode = tipnode
155 self.tiprev = tiprev
155 self.tiprev = tiprev
156 self.filteredhash = filteredhash
156 self.filteredhash = filteredhash
157 # closednodes is a set of nodes that close their branch. If the branch
157 # closednodes is a set of nodes that close their branch. If the branch
158 # cache has been updated, it may contain nodes that are no longer
158 # cache has been updated, it may contain nodes that are no longer
159 # heads.
159 # heads.
160 if closednodes is None:
160 if closednodes is None:
161 self._closednodes = set()
161 self._closednodes = set()
162 else:
162 else:
163 self._closednodes = closednodes
163 self._closednodes = closednodes
164 self.entries = dict(entries)
164 self.entries = dict(entries)
165
165
166 def __iter__(self):
166 def __iter__(self):
167 return iter(self.entries)
167 return iter(self.entries)
168
168
169 def __setitem__(self, key, value):
169 def __setitem__(self, key, value):
170 self.entries[key] = value
170 self.entries[key] = value
171
171
172 def __getitem__(self, key):
172 def __getitem__(self, key):
173 return self.entries[key]
173 return self.entries[key]
174
174
175 def iteritems(self):
175 def iteritems(self):
176 return self.entries.iteritems()
176 return self.entries.iteritems()
177
177
178 def hasbranch(self, label):
179 """ checks whether a branch of this name exists or not """
180 return label in self.entries
181
178 @classmethod
182 @classmethod
179 def fromfile(cls, repo):
183 def fromfile(cls, repo):
180 f = None
184 f = None
181 try:
185 try:
182 f = repo.cachevfs(cls._filename(repo))
186 f = repo.cachevfs(cls._filename(repo))
183 lineiter = iter(f)
187 lineiter = iter(f)
184 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
188 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
185 last, lrev = cachekey[:2]
189 last, lrev = cachekey[:2]
186 last, lrev = bin(last), int(lrev)
190 last, lrev = bin(last), int(lrev)
187 filteredhash = None
191 filteredhash = None
188 if len(cachekey) > 2:
192 if len(cachekey) > 2:
189 filteredhash = bin(cachekey[2])
193 filteredhash = bin(cachekey[2])
190 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash)
194 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash)
191 if not bcache.validfor(repo):
195 if not bcache.validfor(repo):
192 # invalidate the cache
196 # invalidate the cache
193 raise ValueError(r'tip differs')
197 raise ValueError(r'tip differs')
194 bcache.load(repo, lineiter)
198 bcache.load(repo, lineiter)
195 except (IOError, OSError):
199 except (IOError, OSError):
196 return None
200 return None
197
201
198 except Exception as inst:
202 except Exception as inst:
199 if repo.ui.debugflag:
203 if repo.ui.debugflag:
200 msg = 'invalid branchheads cache'
204 msg = 'invalid branchheads cache'
201 if repo.filtername is not None:
205 if repo.filtername is not None:
202 msg += ' (%s)' % repo.filtername
206 msg += ' (%s)' % repo.filtername
203 msg += ': %s\n'
207 msg += ': %s\n'
204 repo.ui.debug(msg % pycompat.bytestr(inst))
208 repo.ui.debug(msg % pycompat.bytestr(inst))
205 bcache = None
209 bcache = None
206
210
207 finally:
211 finally:
208 if f:
212 if f:
209 f.close()
213 f.close()
210
214
211 return bcache
215 return bcache
212
216
213 def load(self, repo, lineiter):
217 def load(self, repo, lineiter):
214 """ fully loads the branchcache by reading from the file using the line
218 """ fully loads the branchcache by reading from the file using the line
215 iterator passed"""
219 iterator passed"""
216 cl = repo.changelog
220 cl = repo.changelog
217 for line in lineiter:
221 for line in lineiter:
218 line = line.rstrip('\n')
222 line = line.rstrip('\n')
219 if not line:
223 if not line:
220 continue
224 continue
221 node, state, label = line.split(" ", 2)
225 node, state, label = line.split(" ", 2)
222 if state not in 'oc':
226 if state not in 'oc':
223 raise ValueError(r'invalid branch state')
227 raise ValueError(r'invalid branch state')
224 label = encoding.tolocal(label.strip())
228 label = encoding.tolocal(label.strip())
225 node = bin(node)
229 node = bin(node)
226 if not cl.hasnode(node):
230 if not cl.hasnode(node):
227 raise ValueError(
231 raise ValueError(
228 r'node %s does not exist' % pycompat.sysstr(hex(node)))
232 r'node %s does not exist' % pycompat.sysstr(hex(node)))
229 self.entries.setdefault(label, []).append(node)
233 self.entries.setdefault(label, []).append(node)
230 if state == 'c':
234 if state == 'c':
231 self._closednodes.add(node)
235 self._closednodes.add(node)
232
236
233 @staticmethod
237 @staticmethod
234 def _filename(repo):
238 def _filename(repo):
235 """name of a branchcache file for a given repo or repoview"""
239 """name of a branchcache file for a given repo or repoview"""
236 filename = "branch2"
240 filename = "branch2"
237 if repo.filtername:
241 if repo.filtername:
238 filename = '%s-%s' % (filename, repo.filtername)
242 filename = '%s-%s' % (filename, repo.filtername)
239 return filename
243 return filename
240
244
241 def validfor(self, repo):
245 def validfor(self, repo):
242 """Is the cache content valid regarding a repo
246 """Is the cache content valid regarding a repo
243
247
244 - False when cached tipnode is unknown or if we detect a strip.
248 - False when cached tipnode is unknown or if we detect a strip.
245 - True when cache is up to date or a subset of current repo."""
249 - True when cache is up to date or a subset of current repo."""
246 try:
250 try:
247 return ((self.tipnode == repo.changelog.node(self.tiprev))
251 return ((self.tipnode == repo.changelog.node(self.tiprev))
248 and (self.filteredhash ==
252 and (self.filteredhash ==
249 scmutil.filteredhash(repo, self.tiprev)))
253 scmutil.filteredhash(repo, self.tiprev)))
250 except IndexError:
254 except IndexError:
251 return False
255 return False
252
256
253 def _branchtip(self, heads):
257 def _branchtip(self, heads):
254 '''Return tuple with last open head in heads and false,
258 '''Return tuple with last open head in heads and false,
255 otherwise return last closed head and true.'''
259 otherwise return last closed head and true.'''
256 tip = heads[-1]
260 tip = heads[-1]
257 closed = True
261 closed = True
258 for h in reversed(heads):
262 for h in reversed(heads):
259 if h not in self._closednodes:
263 if h not in self._closednodes:
260 tip = h
264 tip = h
261 closed = False
265 closed = False
262 break
266 break
263 return tip, closed
267 return tip, closed
264
268
265 def branchtip(self, branch):
269 def branchtip(self, branch):
266 '''Return the tipmost open head on branch head, otherwise return the
270 '''Return the tipmost open head on branch head, otherwise return the
267 tipmost closed head on branch.
271 tipmost closed head on branch.
268 Raise KeyError for unknown branch.'''
272 Raise KeyError for unknown branch.'''
269 return self._branchtip(self[branch])[0]
273 return self._branchtip(self[branch])[0]
270
274
271 def iteropen(self, nodes):
275 def iteropen(self, nodes):
272 return (n for n in nodes if n not in self._closednodes)
276 return (n for n in nodes if n not in self._closednodes)
273
277
274 def branchheads(self, branch, closed=False):
278 def branchheads(self, branch, closed=False):
275 heads = self[branch]
279 heads = self[branch]
276 if not closed:
280 if not closed:
277 heads = list(self.iteropen(heads))
281 heads = list(self.iteropen(heads))
278 return heads
282 return heads
279
283
280 def iterbranches(self):
284 def iterbranches(self):
281 for bn, heads in self.iteritems():
285 for bn, heads in self.iteritems():
282 yield (bn, heads) + self._branchtip(heads)
286 yield (bn, heads) + self._branchtip(heads)
283
287
284 def iterheads(self):
288 def iterheads(self):
285 """ returns all the heads """
289 """ returns all the heads """
286 return self.entries.itervalues()
290 return self.entries.itervalues()
287
291
288 def copy(self):
292 def copy(self):
289 """return an deep copy of the branchcache object"""
293 """return an deep copy of the branchcache object"""
290 return branchcache(
294 return branchcache(
291 self.entries, self.tipnode, self.tiprev, self.filteredhash,
295 self.entries, self.tipnode, self.tiprev, self.filteredhash,
292 self._closednodes)
296 self._closednodes)
293
297
294 def write(self, repo):
298 def write(self, repo):
295 try:
299 try:
296 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
300 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
297 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
301 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
298 if self.filteredhash is not None:
302 if self.filteredhash is not None:
299 cachekey.append(hex(self.filteredhash))
303 cachekey.append(hex(self.filteredhash))
300 f.write(" ".join(cachekey) + '\n')
304 f.write(" ".join(cachekey) + '\n')
301 nodecount = 0
305 nodecount = 0
302 for label, nodes in sorted(self.iteritems()):
306 for label, nodes in sorted(self.iteritems()):
303 label = encoding.fromlocal(label)
307 label = encoding.fromlocal(label)
304 for node in nodes:
308 for node in nodes:
305 nodecount += 1
309 nodecount += 1
306 if node in self._closednodes:
310 if node in self._closednodes:
307 state = 'c'
311 state = 'c'
308 else:
312 else:
309 state = 'o'
313 state = 'o'
310 f.write("%s %s %s\n" % (hex(node), state, label))
314 f.write("%s %s %s\n" % (hex(node), state, label))
311 f.close()
315 f.close()
312 repo.ui.log('branchcache',
316 repo.ui.log('branchcache',
313 'wrote %s branch cache with %d labels and %d nodes\n',
317 'wrote %s branch cache with %d labels and %d nodes\n',
314 repo.filtername, len(self.entries), nodecount)
318 repo.filtername, len(self.entries), nodecount)
315 except (IOError, OSError, error.Abort) as inst:
319 except (IOError, OSError, error.Abort) as inst:
316 # Abort may be raised by read only opener, so log and continue
320 # Abort may be raised by read only opener, so log and continue
317 repo.ui.debug("couldn't write branch cache: %s\n" %
321 repo.ui.debug("couldn't write branch cache: %s\n" %
318 stringutil.forcebytestr(inst))
322 stringutil.forcebytestr(inst))
319
323
320 def update(self, repo, revgen):
324 def update(self, repo, revgen):
321 """Given a branchhead cache, self, that may have extra nodes or be
325 """Given a branchhead cache, self, that may have extra nodes or be
322 missing heads, and a generator of nodes that are strictly a superset of
326 missing heads, and a generator of nodes that are strictly a superset of
323 heads missing, this function updates self to be correct.
327 heads missing, this function updates self to be correct.
324 """
328 """
325 starttime = util.timer()
329 starttime = util.timer()
326 cl = repo.changelog
330 cl = repo.changelog
327 # collect new branch entries
331 # collect new branch entries
328 newbranches = {}
332 newbranches = {}
329 getbranchinfo = repo.revbranchcache().branchinfo
333 getbranchinfo = repo.revbranchcache().branchinfo
330 for r in revgen:
334 for r in revgen:
331 branch, closesbranch = getbranchinfo(r)
335 branch, closesbranch = getbranchinfo(r)
332 newbranches.setdefault(branch, []).append(r)
336 newbranches.setdefault(branch, []).append(r)
333 if closesbranch:
337 if closesbranch:
334 self._closednodes.add(cl.node(r))
338 self._closednodes.add(cl.node(r))
335
339
336 # fetch current topological heads to speed up filtering
340 # fetch current topological heads to speed up filtering
337 topoheads = set(cl.headrevs())
341 topoheads = set(cl.headrevs())
338
342
339 # if older branchheads are reachable from new ones, they aren't
343 # if older branchheads are reachable from new ones, they aren't
340 # really branchheads. Note checking parents is insufficient:
344 # really branchheads. Note checking parents is insufficient:
341 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
345 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
342 for branch, newheadrevs in newbranches.iteritems():
346 for branch, newheadrevs in newbranches.iteritems():
343 bheads = self.entries.setdefault(branch, [])
347 bheads = self.entries.setdefault(branch, [])
344 bheadset = set(cl.rev(node) for node in bheads)
348 bheadset = set(cl.rev(node) for node in bheads)
345
349
346 # This have been tested True on all internal usage of this function.
350 # This have been tested True on all internal usage of this function.
347 # run it again in case of doubt
351 # run it again in case of doubt
348 # assert not (set(bheadrevs) & set(newheadrevs))
352 # assert not (set(bheadrevs) & set(newheadrevs))
349 bheadset.update(newheadrevs)
353 bheadset.update(newheadrevs)
350
354
351 # This prunes out two kinds of heads - heads that are superseded by
355 # This prunes out two kinds of heads - heads that are superseded by
352 # a head in newheadrevs, and newheadrevs that are not heads because
356 # a head in newheadrevs, and newheadrevs that are not heads because
353 # an existing head is their descendant.
357 # an existing head is their descendant.
354 uncertain = bheadset - topoheads
358 uncertain = bheadset - topoheads
355 if uncertain:
359 if uncertain:
356 floorrev = min(uncertain)
360 floorrev = min(uncertain)
357 ancestors = set(cl.ancestors(newheadrevs, floorrev))
361 ancestors = set(cl.ancestors(newheadrevs, floorrev))
358 bheadset -= ancestors
362 bheadset -= ancestors
359 bheadrevs = sorted(bheadset)
363 bheadrevs = sorted(bheadset)
360 self[branch] = [cl.node(rev) for rev in bheadrevs]
364 self[branch] = [cl.node(rev) for rev in bheadrevs]
361 tiprev = bheadrevs[-1]
365 tiprev = bheadrevs[-1]
362 if tiprev > self.tiprev:
366 if tiprev > self.tiprev:
363 self.tipnode = cl.node(tiprev)
367 self.tipnode = cl.node(tiprev)
364 self.tiprev = tiprev
368 self.tiprev = tiprev
365
369
366 if not self.validfor(repo):
370 if not self.validfor(repo):
367 # cache key are not valid anymore
371 # cache key are not valid anymore
368 self.tipnode = nullid
372 self.tipnode = nullid
369 self.tiprev = nullrev
373 self.tiprev = nullrev
370 for heads in self.iterheads():
374 for heads in self.iterheads():
371 tiprev = max(cl.rev(node) for node in heads)
375 tiprev = max(cl.rev(node) for node in heads)
372 if tiprev > self.tiprev:
376 if tiprev > self.tiprev:
373 self.tipnode = cl.node(tiprev)
377 self.tipnode = cl.node(tiprev)
374 self.tiprev = tiprev
378 self.tiprev = tiprev
375 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
379 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
376
380
377 duration = util.timer() - starttime
381 duration = util.timer() - starttime
378 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
382 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
379 repo.filtername or b'None', duration)
383 repo.filtername or b'None', duration)
380
384
381 self.write(repo)
385 self.write(repo)
382
386
383
387
384 class remotebranchcache(branchcache):
388 class remotebranchcache(branchcache):
385 """Branchmap info for a remote connection, should not write locally"""
389 """Branchmap info for a remote connection, should not write locally"""
386 def write(self, repo):
390 def write(self, repo):
387 pass
391 pass
388
392
389
393
390 # Revision branch info cache
394 # Revision branch info cache
391
395
392 _rbcversion = '-v1'
396 _rbcversion = '-v1'
393 _rbcnames = 'rbc-names' + _rbcversion
397 _rbcnames = 'rbc-names' + _rbcversion
394 _rbcrevs = 'rbc-revs' + _rbcversion
398 _rbcrevs = 'rbc-revs' + _rbcversion
395 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
399 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
396 _rbcrecfmt = '>4sI'
400 _rbcrecfmt = '>4sI'
397 _rbcrecsize = calcsize(_rbcrecfmt)
401 _rbcrecsize = calcsize(_rbcrecfmt)
398 _rbcnodelen = 4
402 _rbcnodelen = 4
399 _rbcbranchidxmask = 0x7fffffff
403 _rbcbranchidxmask = 0x7fffffff
400 _rbccloseflag = 0x80000000
404 _rbccloseflag = 0x80000000
401
405
402 class revbranchcache(object):
406 class revbranchcache(object):
403 """Persistent cache, mapping from revision number to branch name and close.
407 """Persistent cache, mapping from revision number to branch name and close.
404 This is a low level cache, independent of filtering.
408 This is a low level cache, independent of filtering.
405
409
406 Branch names are stored in rbc-names in internal encoding separated by 0.
410 Branch names are stored in rbc-names in internal encoding separated by 0.
407 rbc-names is append-only, and each branch name is only stored once and will
411 rbc-names is append-only, and each branch name is only stored once and will
408 thus have a unique index.
412 thus have a unique index.
409
413
410 The branch info for each revision is stored in rbc-revs as constant size
414 The branch info for each revision is stored in rbc-revs as constant size
411 records. The whole file is read into memory, but it is only 'parsed' on
415 records. The whole file is read into memory, but it is only 'parsed' on
412 demand. The file is usually append-only but will be truncated if repo
416 demand. The file is usually append-only but will be truncated if repo
413 modification is detected.
417 modification is detected.
414 The record for each revision contains the first 4 bytes of the
418 The record for each revision contains the first 4 bytes of the
415 corresponding node hash, and the record is only used if it still matches.
419 corresponding node hash, and the record is only used if it still matches.
416 Even a completely trashed rbc-revs fill thus still give the right result
420 Even a completely trashed rbc-revs fill thus still give the right result
417 while converging towards full recovery ... assuming no incorrectly matching
421 while converging towards full recovery ... assuming no incorrectly matching
418 node hashes.
422 node hashes.
419 The record also contains 4 bytes where 31 bits contains the index of the
423 The record also contains 4 bytes where 31 bits contains the index of the
420 branch and the last bit indicate that it is a branch close commit.
424 branch and the last bit indicate that it is a branch close commit.
421 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
425 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
422 and will grow with it but be 1/8th of its size.
426 and will grow with it but be 1/8th of its size.
423 """
427 """
424
428
425 def __init__(self, repo, readonly=True):
429 def __init__(self, repo, readonly=True):
426 assert repo.filtername is None
430 assert repo.filtername is None
427 self._repo = repo
431 self._repo = repo
428 self._names = [] # branch names in local encoding with static index
432 self._names = [] # branch names in local encoding with static index
429 self._rbcrevs = bytearray()
433 self._rbcrevs = bytearray()
430 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
434 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
431 try:
435 try:
432 bndata = repo.cachevfs.read(_rbcnames)
436 bndata = repo.cachevfs.read(_rbcnames)
433 self._rbcsnameslen = len(bndata) # for verification before writing
437 self._rbcsnameslen = len(bndata) # for verification before writing
434 if bndata:
438 if bndata:
435 self._names = [encoding.tolocal(bn)
439 self._names = [encoding.tolocal(bn)
436 for bn in bndata.split('\0')]
440 for bn in bndata.split('\0')]
437 except (IOError, OSError):
441 except (IOError, OSError):
438 if readonly:
442 if readonly:
439 # don't try to use cache - fall back to the slow path
443 # don't try to use cache - fall back to the slow path
440 self.branchinfo = self._branchinfo
444 self.branchinfo = self._branchinfo
441
445
442 if self._names:
446 if self._names:
443 try:
447 try:
444 data = repo.cachevfs.read(_rbcrevs)
448 data = repo.cachevfs.read(_rbcrevs)
445 self._rbcrevs[:] = data
449 self._rbcrevs[:] = data
446 except (IOError, OSError) as inst:
450 except (IOError, OSError) as inst:
447 repo.ui.debug("couldn't read revision branch cache: %s\n" %
451 repo.ui.debug("couldn't read revision branch cache: %s\n" %
448 stringutil.forcebytestr(inst))
452 stringutil.forcebytestr(inst))
449 # remember number of good records on disk
453 # remember number of good records on disk
450 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
454 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
451 len(repo.changelog))
455 len(repo.changelog))
452 if self._rbcrevslen == 0:
456 if self._rbcrevslen == 0:
453 self._names = []
457 self._names = []
454 self._rbcnamescount = len(self._names) # number of names read at
458 self._rbcnamescount = len(self._names) # number of names read at
455 # _rbcsnameslen
459 # _rbcsnameslen
456
460
457 def _clear(self):
461 def _clear(self):
458 self._rbcsnameslen = 0
462 self._rbcsnameslen = 0
459 del self._names[:]
463 del self._names[:]
460 self._rbcnamescount = 0
464 self._rbcnamescount = 0
461 self._rbcrevslen = len(self._repo.changelog)
465 self._rbcrevslen = len(self._repo.changelog)
462 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
466 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
463 util.clearcachedproperty(self, '_namesreverse')
467 util.clearcachedproperty(self, '_namesreverse')
464
468
465 @util.propertycache
469 @util.propertycache
466 def _namesreverse(self):
470 def _namesreverse(self):
467 return dict((b, r) for r, b in enumerate(self._names))
471 return dict((b, r) for r, b in enumerate(self._names))
468
472
469 def branchinfo(self, rev):
473 def branchinfo(self, rev):
470 """Return branch name and close flag for rev, using and updating
474 """Return branch name and close flag for rev, using and updating
471 persistent cache."""
475 persistent cache."""
472 changelog = self._repo.changelog
476 changelog = self._repo.changelog
473 rbcrevidx = rev * _rbcrecsize
477 rbcrevidx = rev * _rbcrecsize
474
478
475 # avoid negative index, changelog.read(nullrev) is fast without cache
479 # avoid negative index, changelog.read(nullrev) is fast without cache
476 if rev == nullrev:
480 if rev == nullrev:
477 return changelog.branchinfo(rev)
481 return changelog.branchinfo(rev)
478
482
479 # if requested rev isn't allocated, grow and cache the rev info
483 # if requested rev isn't allocated, grow and cache the rev info
480 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
484 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
481 return self._branchinfo(rev)
485 return self._branchinfo(rev)
482
486
483 # fast path: extract data from cache, use it if node is matching
487 # fast path: extract data from cache, use it if node is matching
484 reponode = changelog.node(rev)[:_rbcnodelen]
488 reponode = changelog.node(rev)[:_rbcnodelen]
485 cachenode, branchidx = unpack_from(
489 cachenode, branchidx = unpack_from(
486 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
490 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
487 close = bool(branchidx & _rbccloseflag)
491 close = bool(branchidx & _rbccloseflag)
488 if close:
492 if close:
489 branchidx &= _rbcbranchidxmask
493 branchidx &= _rbcbranchidxmask
490 if cachenode == '\0\0\0\0':
494 if cachenode == '\0\0\0\0':
491 pass
495 pass
492 elif cachenode == reponode:
496 elif cachenode == reponode:
493 try:
497 try:
494 return self._names[branchidx], close
498 return self._names[branchidx], close
495 except IndexError:
499 except IndexError:
496 # recover from invalid reference to unknown branch
500 # recover from invalid reference to unknown branch
497 self._repo.ui.debug("referenced branch names not found"
501 self._repo.ui.debug("referenced branch names not found"
498 " - rebuilding revision branch cache from scratch\n")
502 " - rebuilding revision branch cache from scratch\n")
499 self._clear()
503 self._clear()
500 else:
504 else:
501 # rev/node map has changed, invalidate the cache from here up
505 # rev/node map has changed, invalidate the cache from here up
502 self._repo.ui.debug("history modification detected - truncating "
506 self._repo.ui.debug("history modification detected - truncating "
503 "revision branch cache to revision %d\n" % rev)
507 "revision branch cache to revision %d\n" % rev)
504 truncate = rbcrevidx + _rbcrecsize
508 truncate = rbcrevidx + _rbcrecsize
505 del self._rbcrevs[truncate:]
509 del self._rbcrevs[truncate:]
506 self._rbcrevslen = min(self._rbcrevslen, truncate)
510 self._rbcrevslen = min(self._rbcrevslen, truncate)
507
511
508 # fall back to slow path and make sure it will be written to disk
512 # fall back to slow path and make sure it will be written to disk
509 return self._branchinfo(rev)
513 return self._branchinfo(rev)
510
514
511 def _branchinfo(self, rev):
515 def _branchinfo(self, rev):
512 """Retrieve branch info from changelog and update _rbcrevs"""
516 """Retrieve branch info from changelog and update _rbcrevs"""
513 changelog = self._repo.changelog
517 changelog = self._repo.changelog
514 b, close = changelog.branchinfo(rev)
518 b, close = changelog.branchinfo(rev)
515 if b in self._namesreverse:
519 if b in self._namesreverse:
516 branchidx = self._namesreverse[b]
520 branchidx = self._namesreverse[b]
517 else:
521 else:
518 branchidx = len(self._names)
522 branchidx = len(self._names)
519 self._names.append(b)
523 self._names.append(b)
520 self._namesreverse[b] = branchidx
524 self._namesreverse[b] = branchidx
521 reponode = changelog.node(rev)
525 reponode = changelog.node(rev)
522 if close:
526 if close:
523 branchidx |= _rbccloseflag
527 branchidx |= _rbccloseflag
524 self._setcachedata(rev, reponode, branchidx)
528 self._setcachedata(rev, reponode, branchidx)
525 return b, close
529 return b, close
526
530
527 def setdata(self, branch, rev, node, close):
531 def setdata(self, branch, rev, node, close):
528 """add new data information to the cache"""
532 """add new data information to the cache"""
529 if branch in self._namesreverse:
533 if branch in self._namesreverse:
530 branchidx = self._namesreverse[branch]
534 branchidx = self._namesreverse[branch]
531 else:
535 else:
532 branchidx = len(self._names)
536 branchidx = len(self._names)
533 self._names.append(branch)
537 self._names.append(branch)
534 self._namesreverse[branch] = branchidx
538 self._namesreverse[branch] = branchidx
535 if close:
539 if close:
536 branchidx |= _rbccloseflag
540 branchidx |= _rbccloseflag
537 self._setcachedata(rev, node, branchidx)
541 self._setcachedata(rev, node, branchidx)
538 # If no cache data were readable (non exists, bad permission, etc)
542 # If no cache data were readable (non exists, bad permission, etc)
539 # the cache was bypassing itself by setting:
543 # the cache was bypassing itself by setting:
540 #
544 #
541 # self.branchinfo = self._branchinfo
545 # self.branchinfo = self._branchinfo
542 #
546 #
543 # Since we now have data in the cache, we need to drop this bypassing.
547 # Since we now have data in the cache, we need to drop this bypassing.
544 if r'branchinfo' in vars(self):
548 if r'branchinfo' in vars(self):
545 del self.branchinfo
549 del self.branchinfo
546
550
547 def _setcachedata(self, rev, node, branchidx):
551 def _setcachedata(self, rev, node, branchidx):
548 """Writes the node's branch data to the in-memory cache data."""
552 """Writes the node's branch data to the in-memory cache data."""
549 if rev == nullrev:
553 if rev == nullrev:
550 return
554 return
551 rbcrevidx = rev * _rbcrecsize
555 rbcrevidx = rev * _rbcrecsize
552 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
556 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
553 self._rbcrevs.extend('\0' *
557 self._rbcrevs.extend('\0' *
554 (len(self._repo.changelog) * _rbcrecsize -
558 (len(self._repo.changelog) * _rbcrecsize -
555 len(self._rbcrevs)))
559 len(self._rbcrevs)))
556 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
560 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
557 self._rbcrevslen = min(self._rbcrevslen, rev)
561 self._rbcrevslen = min(self._rbcrevslen, rev)
558
562
559 tr = self._repo.currenttransaction()
563 tr = self._repo.currenttransaction()
560 if tr:
564 if tr:
561 tr.addfinalize('write-revbranchcache', self.write)
565 tr.addfinalize('write-revbranchcache', self.write)
562
566
563 def write(self, tr=None):
567 def write(self, tr=None):
564 """Save branch cache if it is dirty."""
568 """Save branch cache if it is dirty."""
565 repo = self._repo
569 repo = self._repo
566 wlock = None
570 wlock = None
567 step = ''
571 step = ''
568 try:
572 try:
569 if self._rbcnamescount < len(self._names):
573 if self._rbcnamescount < len(self._names):
570 step = ' names'
574 step = ' names'
571 wlock = repo.wlock(wait=False)
575 wlock = repo.wlock(wait=False)
572 if self._rbcnamescount != 0:
576 if self._rbcnamescount != 0:
573 f = repo.cachevfs.open(_rbcnames, 'ab')
577 f = repo.cachevfs.open(_rbcnames, 'ab')
574 if f.tell() == self._rbcsnameslen:
578 if f.tell() == self._rbcsnameslen:
575 f.write('\0')
579 f.write('\0')
576 else:
580 else:
577 f.close()
581 f.close()
578 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
582 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
579 self._rbcnamescount = 0
583 self._rbcnamescount = 0
580 self._rbcrevslen = 0
584 self._rbcrevslen = 0
581 if self._rbcnamescount == 0:
585 if self._rbcnamescount == 0:
582 # before rewriting names, make sure references are removed
586 # before rewriting names, make sure references are removed
583 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
587 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
584 f = repo.cachevfs.open(_rbcnames, 'wb')
588 f = repo.cachevfs.open(_rbcnames, 'wb')
585 f.write('\0'.join(encoding.fromlocal(b)
589 f.write('\0'.join(encoding.fromlocal(b)
586 for b in self._names[self._rbcnamescount:]))
590 for b in self._names[self._rbcnamescount:]))
587 self._rbcsnameslen = f.tell()
591 self._rbcsnameslen = f.tell()
588 f.close()
592 f.close()
589 self._rbcnamescount = len(self._names)
593 self._rbcnamescount = len(self._names)
590
594
591 start = self._rbcrevslen * _rbcrecsize
595 start = self._rbcrevslen * _rbcrecsize
592 if start != len(self._rbcrevs):
596 if start != len(self._rbcrevs):
593 step = ''
597 step = ''
594 if wlock is None:
598 if wlock is None:
595 wlock = repo.wlock(wait=False)
599 wlock = repo.wlock(wait=False)
596 revs = min(len(repo.changelog),
600 revs = min(len(repo.changelog),
597 len(self._rbcrevs) // _rbcrecsize)
601 len(self._rbcrevs) // _rbcrecsize)
598 f = repo.cachevfs.open(_rbcrevs, 'ab')
602 f = repo.cachevfs.open(_rbcrevs, 'ab')
599 if f.tell() != start:
603 if f.tell() != start:
600 repo.ui.debug("truncating cache/%s to %d\n"
604 repo.ui.debug("truncating cache/%s to %d\n"
601 % (_rbcrevs, start))
605 % (_rbcrevs, start))
602 f.seek(start)
606 f.seek(start)
603 if f.tell() != start:
607 if f.tell() != start:
604 start = 0
608 start = 0
605 f.seek(start)
609 f.seek(start)
606 f.truncate()
610 f.truncate()
607 end = revs * _rbcrecsize
611 end = revs * _rbcrecsize
608 f.write(self._rbcrevs[start:end])
612 f.write(self._rbcrevs[start:end])
609 f.close()
613 f.close()
610 self._rbcrevslen = revs
614 self._rbcrevslen = revs
611 except (IOError, OSError, error.Abort, error.LockError) as inst:
615 except (IOError, OSError, error.Abort, error.LockError) as inst:
612 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
616 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
613 % (step, stringutil.forcebytestr(inst)))
617 % (step, stringutil.forcebytestr(inst)))
614 finally:
618 finally:
615 if wlock is not None:
619 if wlock is not None:
616 wlock.release()
620 wlock.release()
@@ -1,3092 +1,3092 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 def isfilecached(repo, name):
125 def isfilecached(repo, name):
126 """check if a repo has already cached "name" filecache-ed property
126 """check if a repo has already cached "name" filecache-ed property
127
127
128 This returns (cachedobj-or-None, iscached) tuple.
128 This returns (cachedobj-or-None, iscached) tuple.
129 """
129 """
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
131 if not cacheentry:
131 if not cacheentry:
132 return None, False
132 return None, False
133 return cacheentry.obj, True
133 return cacheentry.obj, True
134
134
135 class unfilteredpropertycache(util.propertycache):
135 class unfilteredpropertycache(util.propertycache):
136 """propertycache that apply to unfiltered repo only"""
136 """propertycache that apply to unfiltered repo only"""
137
137
138 def __get__(self, repo, type=None):
138 def __get__(self, repo, type=None):
139 unfi = repo.unfiltered()
139 unfi = repo.unfiltered()
140 if unfi is repo:
140 if unfi is repo:
141 return super(unfilteredpropertycache, self).__get__(unfi)
141 return super(unfilteredpropertycache, self).__get__(unfi)
142 return getattr(unfi, self.name)
142 return getattr(unfi, self.name)
143
143
144 class filteredpropertycache(util.propertycache):
144 class filteredpropertycache(util.propertycache):
145 """propertycache that must take filtering in account"""
145 """propertycache that must take filtering in account"""
146
146
147 def cachevalue(self, obj, value):
147 def cachevalue(self, obj, value):
148 object.__setattr__(obj, self.name, value)
148 object.__setattr__(obj, self.name, value)
149
149
150
150
151 def hasunfilteredcache(repo, name):
151 def hasunfilteredcache(repo, name):
152 """check if a repo has an unfilteredpropertycache value for <name>"""
152 """check if a repo has an unfilteredpropertycache value for <name>"""
153 return name in vars(repo.unfiltered())
153 return name in vars(repo.unfiltered())
154
154
155 def unfilteredmethod(orig):
155 def unfilteredmethod(orig):
156 """decorate method that always need to be run on unfiltered version"""
156 """decorate method that always need to be run on unfiltered version"""
157 def wrapper(repo, *args, **kwargs):
157 def wrapper(repo, *args, **kwargs):
158 return orig(repo.unfiltered(), *args, **kwargs)
158 return orig(repo.unfiltered(), *args, **kwargs)
159 return wrapper
159 return wrapper
160
160
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
162 'unbundle'}
162 'unbundle'}
163 legacycaps = moderncaps.union({'changegroupsubset'})
163 legacycaps = moderncaps.union({'changegroupsubset'})
164
164
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
166 class localcommandexecutor(object):
166 class localcommandexecutor(object):
167 def __init__(self, peer):
167 def __init__(self, peer):
168 self._peer = peer
168 self._peer = peer
169 self._sent = False
169 self._sent = False
170 self._closed = False
170 self._closed = False
171
171
172 def __enter__(self):
172 def __enter__(self):
173 return self
173 return self
174
174
175 def __exit__(self, exctype, excvalue, exctb):
175 def __exit__(self, exctype, excvalue, exctb):
176 self.close()
176 self.close()
177
177
178 def callcommand(self, command, args):
178 def callcommand(self, command, args):
179 if self._sent:
179 if self._sent:
180 raise error.ProgrammingError('callcommand() cannot be used after '
180 raise error.ProgrammingError('callcommand() cannot be used after '
181 'sendcommands()')
181 'sendcommands()')
182
182
183 if self._closed:
183 if self._closed:
184 raise error.ProgrammingError('callcommand() cannot be used after '
184 raise error.ProgrammingError('callcommand() cannot be used after '
185 'close()')
185 'close()')
186
186
187 # We don't need to support anything fancy. Just call the named
187 # We don't need to support anything fancy. Just call the named
188 # method on the peer and return a resolved future.
188 # method on the peer and return a resolved future.
189 fn = getattr(self._peer, pycompat.sysstr(command))
189 fn = getattr(self._peer, pycompat.sysstr(command))
190
190
191 f = pycompat.futures.Future()
191 f = pycompat.futures.Future()
192
192
193 try:
193 try:
194 result = fn(**pycompat.strkwargs(args))
194 result = fn(**pycompat.strkwargs(args))
195 except Exception:
195 except Exception:
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
197 else:
197 else:
198 f.set_result(result)
198 f.set_result(result)
199
199
200 return f
200 return f
201
201
202 def sendcommands(self):
202 def sendcommands(self):
203 self._sent = True
203 self._sent = True
204
204
205 def close(self):
205 def close(self):
206 self._closed = True
206 self._closed = True
207
207
208 @interfaceutil.implementer(repository.ipeercommands)
208 @interfaceutil.implementer(repository.ipeercommands)
209 class localpeer(repository.peer):
209 class localpeer(repository.peer):
210 '''peer for a local repo; reflects only the most recent API'''
210 '''peer for a local repo; reflects only the most recent API'''
211
211
212 def __init__(self, repo, caps=None):
212 def __init__(self, repo, caps=None):
213 super(localpeer, self).__init__()
213 super(localpeer, self).__init__()
214
214
215 if caps is None:
215 if caps is None:
216 caps = moderncaps.copy()
216 caps = moderncaps.copy()
217 self._repo = repo.filtered('served')
217 self._repo = repo.filtered('served')
218 self.ui = repo.ui
218 self.ui = repo.ui
219 self._caps = repo._restrictcapabilities(caps)
219 self._caps = repo._restrictcapabilities(caps)
220
220
221 # Begin of _basepeer interface.
221 # Begin of _basepeer interface.
222
222
223 def url(self):
223 def url(self):
224 return self._repo.url()
224 return self._repo.url()
225
225
226 def local(self):
226 def local(self):
227 return self._repo
227 return self._repo
228
228
229 def peer(self):
229 def peer(self):
230 return self
230 return self
231
231
232 def canpush(self):
232 def canpush(self):
233 return True
233 return True
234
234
235 def close(self):
235 def close(self):
236 self._repo.close()
236 self._repo.close()
237
237
238 # End of _basepeer interface.
238 # End of _basepeer interface.
239
239
240 # Begin of _basewirecommands interface.
240 # Begin of _basewirecommands interface.
241
241
242 def branchmap(self):
242 def branchmap(self):
243 return self._repo.branchmap()
243 return self._repo.branchmap()
244
244
245 def capabilities(self):
245 def capabilities(self):
246 return self._caps
246 return self._caps
247
247
248 def clonebundles(self):
248 def clonebundles(self):
249 return self._repo.tryread('clonebundles.manifest')
249 return self._repo.tryread('clonebundles.manifest')
250
250
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
252 """Used to test argument passing over the wire"""
252 """Used to test argument passing over the wire"""
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
254 pycompat.bytestr(four),
254 pycompat.bytestr(four),
255 pycompat.bytestr(five))
255 pycompat.bytestr(five))
256
256
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
258 **kwargs):
258 **kwargs):
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
260 common=common, bundlecaps=bundlecaps,
260 common=common, bundlecaps=bundlecaps,
261 **kwargs)[1]
261 **kwargs)[1]
262 cb = util.chunkbuffer(chunks)
262 cb = util.chunkbuffer(chunks)
263
263
264 if exchange.bundle2requested(bundlecaps):
264 if exchange.bundle2requested(bundlecaps):
265 # When requesting a bundle2, getbundle returns a stream to make the
265 # When requesting a bundle2, getbundle returns a stream to make the
266 # wire level function happier. We need to build a proper object
266 # wire level function happier. We need to build a proper object
267 # from it in local peer.
267 # from it in local peer.
268 return bundle2.getunbundler(self.ui, cb)
268 return bundle2.getunbundler(self.ui, cb)
269 else:
269 else:
270 return changegroup.getunbundler('01', cb, None)
270 return changegroup.getunbundler('01', cb, None)
271
271
272 def heads(self):
272 def heads(self):
273 return self._repo.heads()
273 return self._repo.heads()
274
274
275 def known(self, nodes):
275 def known(self, nodes):
276 return self._repo.known(nodes)
276 return self._repo.known(nodes)
277
277
278 def listkeys(self, namespace):
278 def listkeys(self, namespace):
279 return self._repo.listkeys(namespace)
279 return self._repo.listkeys(namespace)
280
280
281 def lookup(self, key):
281 def lookup(self, key):
282 return self._repo.lookup(key)
282 return self._repo.lookup(key)
283
283
284 def pushkey(self, namespace, key, old, new):
284 def pushkey(self, namespace, key, old, new):
285 return self._repo.pushkey(namespace, key, old, new)
285 return self._repo.pushkey(namespace, key, old, new)
286
286
287 def stream_out(self):
287 def stream_out(self):
288 raise error.Abort(_('cannot perform stream clone against local '
288 raise error.Abort(_('cannot perform stream clone against local '
289 'peer'))
289 'peer'))
290
290
291 def unbundle(self, bundle, heads, url):
291 def unbundle(self, bundle, heads, url):
292 """apply a bundle on a repo
292 """apply a bundle on a repo
293
293
294 This function handles the repo locking itself."""
294 This function handles the repo locking itself."""
295 try:
295 try:
296 try:
296 try:
297 bundle = exchange.readbundle(self.ui, bundle, None)
297 bundle = exchange.readbundle(self.ui, bundle, None)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
299 if util.safehasattr(ret, 'getchunks'):
299 if util.safehasattr(ret, 'getchunks'):
300 # This is a bundle20 object, turn it into an unbundler.
300 # This is a bundle20 object, turn it into an unbundler.
301 # This little dance should be dropped eventually when the
301 # This little dance should be dropped eventually when the
302 # API is finally improved.
302 # API is finally improved.
303 stream = util.chunkbuffer(ret.getchunks())
303 stream = util.chunkbuffer(ret.getchunks())
304 ret = bundle2.getunbundler(self.ui, stream)
304 ret = bundle2.getunbundler(self.ui, stream)
305 return ret
305 return ret
306 except Exception as exc:
306 except Exception as exc:
307 # If the exception contains output salvaged from a bundle2
307 # If the exception contains output salvaged from a bundle2
308 # reply, we need to make sure it is printed before continuing
308 # reply, we need to make sure it is printed before continuing
309 # to fail. So we build a bundle2 with such output and consume
309 # to fail. So we build a bundle2 with such output and consume
310 # it directly.
310 # it directly.
311 #
311 #
312 # This is not very elegant but allows a "simple" solution for
312 # This is not very elegant but allows a "simple" solution for
313 # issue4594
313 # issue4594
314 output = getattr(exc, '_bundle2salvagedoutput', ())
314 output = getattr(exc, '_bundle2salvagedoutput', ())
315 if output:
315 if output:
316 bundler = bundle2.bundle20(self._repo.ui)
316 bundler = bundle2.bundle20(self._repo.ui)
317 for out in output:
317 for out in output:
318 bundler.addpart(out)
318 bundler.addpart(out)
319 stream = util.chunkbuffer(bundler.getchunks())
319 stream = util.chunkbuffer(bundler.getchunks())
320 b = bundle2.getunbundler(self.ui, stream)
320 b = bundle2.getunbundler(self.ui, stream)
321 bundle2.processbundle(self._repo, b)
321 bundle2.processbundle(self._repo, b)
322 raise
322 raise
323 except error.PushRaced as exc:
323 except error.PushRaced as exc:
324 raise error.ResponseError(_('push failed:'),
324 raise error.ResponseError(_('push failed:'),
325 stringutil.forcebytestr(exc))
325 stringutil.forcebytestr(exc))
326
326
327 # End of _basewirecommands interface.
327 # End of _basewirecommands interface.
328
328
329 # Begin of peer interface.
329 # Begin of peer interface.
330
330
331 def commandexecutor(self):
331 def commandexecutor(self):
332 return localcommandexecutor(self)
332 return localcommandexecutor(self)
333
333
334 # End of peer interface.
334 # End of peer interface.
335
335
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
337 class locallegacypeer(localpeer):
337 class locallegacypeer(localpeer):
338 '''peer extension which implements legacy methods too; used for tests with
338 '''peer extension which implements legacy methods too; used for tests with
339 restricted capabilities'''
339 restricted capabilities'''
340
340
341 def __init__(self, repo):
341 def __init__(self, repo):
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
343
343
344 # Begin of baselegacywirecommands interface.
344 # Begin of baselegacywirecommands interface.
345
345
346 def between(self, pairs):
346 def between(self, pairs):
347 return self._repo.between(pairs)
347 return self._repo.between(pairs)
348
348
349 def branches(self, nodes):
349 def branches(self, nodes):
350 return self._repo.branches(nodes)
350 return self._repo.branches(nodes)
351
351
352 def changegroup(self, nodes, source):
352 def changegroup(self, nodes, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
354 missingheads=self._repo.heads())
354 missingheads=self._repo.heads())
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 def changegroupsubset(self, bases, heads, source):
357 def changegroupsubset(self, bases, heads, source):
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
359 missingheads=heads)
359 missingheads=heads)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
361
361
362 # End of baselegacywirecommands interface.
362 # End of baselegacywirecommands interface.
363
363
364 # Increment the sub-version when the revlog v2 format changes to lock out old
364 # Increment the sub-version when the revlog v2 format changes to lock out old
365 # clients.
365 # clients.
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
367
367
368 # A repository with the sparserevlog feature will have delta chains that
368 # A repository with the sparserevlog feature will have delta chains that
369 # can spread over a larger span. Sparse reading cuts these large spans into
369 # can spread over a larger span. Sparse reading cuts these large spans into
370 # pieces, so that each piece isn't too big.
370 # pieces, so that each piece isn't too big.
371 # Without the sparserevlog capability, reading from the repository could use
371 # Without the sparserevlog capability, reading from the repository could use
372 # huge amounts of memory, because the whole span would be read at once,
372 # huge amounts of memory, because the whole span would be read at once,
373 # including all the intermediate revisions that aren't pertinent for the chain.
373 # including all the intermediate revisions that aren't pertinent for the chain.
374 # This is why once a repository has enabled sparse-read, it becomes required.
374 # This is why once a repository has enabled sparse-read, it becomes required.
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
376
376
377 # Functions receiving (ui, features) that extensions can register to impact
377 # Functions receiving (ui, features) that extensions can register to impact
378 # the ability to load repositories with custom requirements. Only
378 # the ability to load repositories with custom requirements. Only
379 # functions defined in loaded extensions are called.
379 # functions defined in loaded extensions are called.
380 #
380 #
381 # The function receives a set of requirement strings that the repository
381 # The function receives a set of requirement strings that the repository
382 # is capable of opening. Functions will typically add elements to the
382 # is capable of opening. Functions will typically add elements to the
383 # set to reflect that the extension knows how to handle that requirements.
383 # set to reflect that the extension knows how to handle that requirements.
384 featuresetupfuncs = set()
384 featuresetupfuncs = set()
385
385
386 def makelocalrepository(baseui, path, intents=None):
386 def makelocalrepository(baseui, path, intents=None):
387 """Create a local repository object.
387 """Create a local repository object.
388
388
389 Given arguments needed to construct a local repository, this function
389 Given arguments needed to construct a local repository, this function
390 performs various early repository loading functionality (such as
390 performs various early repository loading functionality (such as
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
392 the repository can be opened, derives a type suitable for representing
392 the repository can be opened, derives a type suitable for representing
393 that repository, and returns an instance of it.
393 that repository, and returns an instance of it.
394
394
395 The returned object conforms to the ``repository.completelocalrepository``
395 The returned object conforms to the ``repository.completelocalrepository``
396 interface.
396 interface.
397
397
398 The repository type is derived by calling a series of factory functions
398 The repository type is derived by calling a series of factory functions
399 for each aspect/interface of the final repository. These are defined by
399 for each aspect/interface of the final repository. These are defined by
400 ``REPO_INTERFACES``.
400 ``REPO_INTERFACES``.
401
401
402 Each factory function is called to produce a type implementing a specific
402 Each factory function is called to produce a type implementing a specific
403 interface. The cumulative list of returned types will be combined into a
403 interface. The cumulative list of returned types will be combined into a
404 new type and that type will be instantiated to represent the local
404 new type and that type will be instantiated to represent the local
405 repository.
405 repository.
406
406
407 The factory functions each receive various state that may be consulted
407 The factory functions each receive various state that may be consulted
408 as part of deriving a type.
408 as part of deriving a type.
409
409
410 Extensions should wrap these factory functions to customize repository type
410 Extensions should wrap these factory functions to customize repository type
411 creation. Note that an extension's wrapped function may be called even if
411 creation. Note that an extension's wrapped function may be called even if
412 that extension is not loaded for the repo being constructed. Extensions
412 that extension is not loaded for the repo being constructed. Extensions
413 should check if their ``__name__`` appears in the
413 should check if their ``__name__`` appears in the
414 ``extensionmodulenames`` set passed to the factory function and no-op if
414 ``extensionmodulenames`` set passed to the factory function and no-op if
415 not.
415 not.
416 """
416 """
417 ui = baseui.copy()
417 ui = baseui.copy()
418 # Prevent copying repo configuration.
418 # Prevent copying repo configuration.
419 ui.copy = baseui.copy
419 ui.copy = baseui.copy
420
420
421 # Working directory VFS rooted at repository root.
421 # Working directory VFS rooted at repository root.
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
423
423
424 # Main VFS for .hg/ directory.
424 # Main VFS for .hg/ directory.
425 hgpath = wdirvfs.join(b'.hg')
425 hgpath = wdirvfs.join(b'.hg')
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
427
427
428 # The .hg/ path should exist and should be a directory. All other
428 # The .hg/ path should exist and should be a directory. All other
429 # cases are errors.
429 # cases are errors.
430 if not hgvfs.isdir():
430 if not hgvfs.isdir():
431 try:
431 try:
432 hgvfs.stat()
432 hgvfs.stat()
433 except OSError as e:
433 except OSError as e:
434 if e.errno != errno.ENOENT:
434 if e.errno != errno.ENOENT:
435 raise
435 raise
436
436
437 raise error.RepoError(_(b'repository %s not found') % path)
437 raise error.RepoError(_(b'repository %s not found') % path)
438
438
439 # .hg/requires file contains a newline-delimited list of
439 # .hg/requires file contains a newline-delimited list of
440 # features/capabilities the opener (us) must have in order to use
440 # features/capabilities the opener (us) must have in order to use
441 # the repository. This file was introduced in Mercurial 0.9.2,
441 # the repository. This file was introduced in Mercurial 0.9.2,
442 # which means very old repositories may not have one. We assume
442 # which means very old repositories may not have one. We assume
443 # a missing file translates to no requirements.
443 # a missing file translates to no requirements.
444 try:
444 try:
445 requirements = set(hgvfs.read(b'requires').splitlines())
445 requirements = set(hgvfs.read(b'requires').splitlines())
446 except IOError as e:
446 except IOError as e:
447 if e.errno != errno.ENOENT:
447 if e.errno != errno.ENOENT:
448 raise
448 raise
449 requirements = set()
449 requirements = set()
450
450
451 # The .hg/hgrc file may load extensions or contain config options
451 # The .hg/hgrc file may load extensions or contain config options
452 # that influence repository construction. Attempt to load it and
452 # that influence repository construction. Attempt to load it and
453 # process any new extensions that it may have pulled in.
453 # process any new extensions that it may have pulled in.
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
456 extensions.loadall(ui)
456 extensions.loadall(ui)
457 extensions.populateui(ui)
457 extensions.populateui(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511 wcachepath = hgvfs.join(b'wcache')
511 wcachepath = hgvfs.join(b'wcache')
512
512
513
513
514 # The store has changed over time and the exact layout is dictated by
514 # The store has changed over time and the exact layout is dictated by
515 # requirements. The store interface abstracts differences across all
515 # requirements. The store interface abstracts differences across all
516 # of them.
516 # of them.
517 store = makestore(requirements, storebasepath,
517 store = makestore(requirements, storebasepath,
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
519 hgvfs.createmode = store.createmode
519 hgvfs.createmode = store.createmode
520
520
521 storevfs = store.vfs
521 storevfs = store.vfs
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
523
523
524 # The cache vfs is used to manage cache files.
524 # The cache vfs is used to manage cache files.
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
526 cachevfs.createmode = store.createmode
526 cachevfs.createmode = store.createmode
527 # The cache vfs is used to manage cache files related to the working copy
527 # The cache vfs is used to manage cache files related to the working copy
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
529 wcachevfs.createmode = store.createmode
529 wcachevfs.createmode = store.createmode
530
530
531 # Now resolve the type for the repository object. We do this by repeatedly
531 # Now resolve the type for the repository object. We do this by repeatedly
532 # calling a factory function to produces types for specific aspects of the
532 # calling a factory function to produces types for specific aspects of the
533 # repo's operation. The aggregate returned types are used as base classes
533 # repo's operation. The aggregate returned types are used as base classes
534 # for a dynamically-derived type, which will represent our new repository.
534 # for a dynamically-derived type, which will represent our new repository.
535
535
536 bases = []
536 bases = []
537 extrastate = {}
537 extrastate = {}
538
538
539 for iface, fn in REPO_INTERFACES:
539 for iface, fn in REPO_INTERFACES:
540 # We pass all potentially useful state to give extensions tons of
540 # We pass all potentially useful state to give extensions tons of
541 # flexibility.
541 # flexibility.
542 typ = fn()(ui=ui,
542 typ = fn()(ui=ui,
543 intents=intents,
543 intents=intents,
544 requirements=requirements,
544 requirements=requirements,
545 features=features,
545 features=features,
546 wdirvfs=wdirvfs,
546 wdirvfs=wdirvfs,
547 hgvfs=hgvfs,
547 hgvfs=hgvfs,
548 store=store,
548 store=store,
549 storevfs=storevfs,
549 storevfs=storevfs,
550 storeoptions=storevfs.options,
550 storeoptions=storevfs.options,
551 cachevfs=cachevfs,
551 cachevfs=cachevfs,
552 wcachevfs=wcachevfs,
552 wcachevfs=wcachevfs,
553 extensionmodulenames=extensionmodulenames,
553 extensionmodulenames=extensionmodulenames,
554 extrastate=extrastate,
554 extrastate=extrastate,
555 baseclasses=bases)
555 baseclasses=bases)
556
556
557 if not isinstance(typ, type):
557 if not isinstance(typ, type):
558 raise error.ProgrammingError('unable to construct type for %s' %
558 raise error.ProgrammingError('unable to construct type for %s' %
559 iface)
559 iface)
560
560
561 bases.append(typ)
561 bases.append(typ)
562
562
563 # type() allows you to use characters in type names that wouldn't be
563 # type() allows you to use characters in type names that wouldn't be
564 # recognized as Python symbols in source code. We abuse that to add
564 # recognized as Python symbols in source code. We abuse that to add
565 # rich information about our constructed repo.
565 # rich information about our constructed repo.
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
567 wdirvfs.base,
567 wdirvfs.base,
568 b','.join(sorted(requirements))))
568 b','.join(sorted(requirements))))
569
569
570 cls = type(name, tuple(bases), {})
570 cls = type(name, tuple(bases), {})
571
571
572 return cls(
572 return cls(
573 baseui=baseui,
573 baseui=baseui,
574 ui=ui,
574 ui=ui,
575 origroot=path,
575 origroot=path,
576 wdirvfs=wdirvfs,
576 wdirvfs=wdirvfs,
577 hgvfs=hgvfs,
577 hgvfs=hgvfs,
578 requirements=requirements,
578 requirements=requirements,
579 supportedrequirements=supportedrequirements,
579 supportedrequirements=supportedrequirements,
580 sharedpath=storebasepath,
580 sharedpath=storebasepath,
581 store=store,
581 store=store,
582 cachevfs=cachevfs,
582 cachevfs=cachevfs,
583 wcachevfs=wcachevfs,
583 wcachevfs=wcachevfs,
584 features=features,
584 features=features,
585 intents=intents)
585 intents=intents)
586
586
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
588 """Load hgrc files/content into a ui instance.
588 """Load hgrc files/content into a ui instance.
589
589
590 This is called during repository opening to load any additional
590 This is called during repository opening to load any additional
591 config files or settings relevant to the current repository.
591 config files or settings relevant to the current repository.
592
592
593 Returns a bool indicating whether any additional configs were loaded.
593 Returns a bool indicating whether any additional configs were loaded.
594
594
595 Extensions should monkeypatch this function to modify how per-repo
595 Extensions should monkeypatch this function to modify how per-repo
596 configs are loaded. For example, an extension may wish to pull in
596 configs are loaded. For example, an extension may wish to pull in
597 configs from alternate files or sources.
597 configs from alternate files or sources.
598 """
598 """
599 try:
599 try:
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
601 return True
601 return True
602 except IOError:
602 except IOError:
603 return False
603 return False
604
604
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
606 """Perform additional actions after .hg/hgrc is loaded.
606 """Perform additional actions after .hg/hgrc is loaded.
607
607
608 This function is called during repository loading immediately after
608 This function is called during repository loading immediately after
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
610
610
611 The function can be used to validate configs, automatically add
611 The function can be used to validate configs, automatically add
612 options (including extensions) based on requirements, etc.
612 options (including extensions) based on requirements, etc.
613 """
613 """
614
614
615 # Map of requirements to list of extensions to load automatically when
615 # Map of requirements to list of extensions to load automatically when
616 # requirement is present.
616 # requirement is present.
617 autoextensions = {
617 autoextensions = {
618 b'largefiles': [b'largefiles'],
618 b'largefiles': [b'largefiles'],
619 b'lfs': [b'lfs'],
619 b'lfs': [b'lfs'],
620 }
620 }
621
621
622 for requirement, names in sorted(autoextensions.items()):
622 for requirement, names in sorted(autoextensions.items()):
623 if requirement not in requirements:
623 if requirement not in requirements:
624 continue
624 continue
625
625
626 for name in names:
626 for name in names:
627 if not ui.hasconfig(b'extensions', name):
627 if not ui.hasconfig(b'extensions', name):
628 ui.setconfig(b'extensions', name, b'', source='autoload')
628 ui.setconfig(b'extensions', name, b'', source='autoload')
629
629
630 def gathersupportedrequirements(ui):
630 def gathersupportedrequirements(ui):
631 """Determine the complete set of recognized requirements."""
631 """Determine the complete set of recognized requirements."""
632 # Start with all requirements supported by this file.
632 # Start with all requirements supported by this file.
633 supported = set(localrepository._basesupported)
633 supported = set(localrepository._basesupported)
634
634
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
636 # relevant to this ui instance.
636 # relevant to this ui instance.
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
638
638
639 for fn in featuresetupfuncs:
639 for fn in featuresetupfuncs:
640 if fn.__module__ in modules:
640 if fn.__module__ in modules:
641 fn(ui, supported)
641 fn(ui, supported)
642
642
643 # Add derived requirements from registered compression engines.
643 # Add derived requirements from registered compression engines.
644 for name in util.compengines:
644 for name in util.compengines:
645 engine = util.compengines[name]
645 engine = util.compengines[name]
646 if engine.revlogheader():
646 if engine.revlogheader():
647 supported.add(b'exp-compression-%s' % name)
647 supported.add(b'exp-compression-%s' % name)
648
648
649 return supported
649 return supported
650
650
651 def ensurerequirementsrecognized(requirements, supported):
651 def ensurerequirementsrecognized(requirements, supported):
652 """Validate that a set of local requirements is recognized.
652 """Validate that a set of local requirements is recognized.
653
653
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
655 exists any requirement in that set that currently loaded code doesn't
655 exists any requirement in that set that currently loaded code doesn't
656 recognize.
656 recognize.
657
657
658 Returns a set of supported requirements.
658 Returns a set of supported requirements.
659 """
659 """
660 missing = set()
660 missing = set()
661
661
662 for requirement in requirements:
662 for requirement in requirements:
663 if requirement in supported:
663 if requirement in supported:
664 continue
664 continue
665
665
666 if not requirement or not requirement[0:1].isalnum():
666 if not requirement or not requirement[0:1].isalnum():
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
668
668
669 missing.add(requirement)
669 missing.add(requirement)
670
670
671 if missing:
671 if missing:
672 raise error.RequirementError(
672 raise error.RequirementError(
673 _(b'repository requires features unknown to this Mercurial: %s') %
673 _(b'repository requires features unknown to this Mercurial: %s') %
674 b' '.join(sorted(missing)),
674 b' '.join(sorted(missing)),
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
676 b'for more information'))
676 b'for more information'))
677
677
678 def ensurerequirementscompatible(ui, requirements):
678 def ensurerequirementscompatible(ui, requirements):
679 """Validates that a set of recognized requirements is mutually compatible.
679 """Validates that a set of recognized requirements is mutually compatible.
680
680
681 Some requirements may not be compatible with others or require
681 Some requirements may not be compatible with others or require
682 config options that aren't enabled. This function is called during
682 config options that aren't enabled. This function is called during
683 repository opening to ensure that the set of requirements needed
683 repository opening to ensure that the set of requirements needed
684 to open a repository is sane and compatible with config options.
684 to open a repository is sane and compatible with config options.
685
685
686 Extensions can monkeypatch this function to perform additional
686 Extensions can monkeypatch this function to perform additional
687 checking.
687 checking.
688
688
689 ``error.RepoError`` should be raised on failure.
689 ``error.RepoError`` should be raised on failure.
690 """
690 """
691 if b'exp-sparse' in requirements and not sparse.enabled:
691 if b'exp-sparse' in requirements and not sparse.enabled:
692 raise error.RepoError(_(b'repository is using sparse feature but '
692 raise error.RepoError(_(b'repository is using sparse feature but '
693 b'sparse is not enabled; enable the '
693 b'sparse is not enabled; enable the '
694 b'"sparse" extensions to access'))
694 b'"sparse" extensions to access'))
695
695
696 def makestore(requirements, path, vfstype):
696 def makestore(requirements, path, vfstype):
697 """Construct a storage object for a repository."""
697 """Construct a storage object for a repository."""
698 if b'store' in requirements:
698 if b'store' in requirements:
699 if b'fncache' in requirements:
699 if b'fncache' in requirements:
700 return storemod.fncachestore(path, vfstype,
700 return storemod.fncachestore(path, vfstype,
701 b'dotencode' in requirements)
701 b'dotencode' in requirements)
702
702
703 return storemod.encodedstore(path, vfstype)
703 return storemod.encodedstore(path, vfstype)
704
704
705 return storemod.basicstore(path, vfstype)
705 return storemod.basicstore(path, vfstype)
706
706
707 def resolvestorevfsoptions(ui, requirements, features):
707 def resolvestorevfsoptions(ui, requirements, features):
708 """Resolve the options to pass to the store vfs opener.
708 """Resolve the options to pass to the store vfs opener.
709
709
710 The returned dict is used to influence behavior of the storage layer.
710 The returned dict is used to influence behavior of the storage layer.
711 """
711 """
712 options = {}
712 options = {}
713
713
714 if b'treemanifest' in requirements:
714 if b'treemanifest' in requirements:
715 options[b'treemanifest'] = True
715 options[b'treemanifest'] = True
716
716
717 # experimental config: format.manifestcachesize
717 # experimental config: format.manifestcachesize
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
719 if manifestcachesize is not None:
719 if manifestcachesize is not None:
720 options[b'manifestcachesize'] = manifestcachesize
720 options[b'manifestcachesize'] = manifestcachesize
721
721
722 # In the absence of another requirement superseding a revlog-related
722 # In the absence of another requirement superseding a revlog-related
723 # requirement, we have to assume the repo is using revlog version 0.
723 # requirement, we have to assume the repo is using revlog version 0.
724 # This revlog format is super old and we don't bother trying to parse
724 # This revlog format is super old and we don't bother trying to parse
725 # opener options for it because those options wouldn't do anything
725 # opener options for it because those options wouldn't do anything
726 # meaningful on such old repos.
726 # meaningful on such old repos.
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
729
729
730 return options
730 return options
731
731
732 def resolverevlogstorevfsoptions(ui, requirements, features):
732 def resolverevlogstorevfsoptions(ui, requirements, features):
733 """Resolve opener options specific to revlogs."""
733 """Resolve opener options specific to revlogs."""
734
734
735 options = {}
735 options = {}
736 options[b'flagprocessors'] = {}
736 options[b'flagprocessors'] = {}
737
737
738 if b'revlogv1' in requirements:
738 if b'revlogv1' in requirements:
739 options[b'revlogv1'] = True
739 options[b'revlogv1'] = True
740 if REVLOGV2_REQUIREMENT in requirements:
740 if REVLOGV2_REQUIREMENT in requirements:
741 options[b'revlogv2'] = True
741 options[b'revlogv2'] = True
742
742
743 if b'generaldelta' in requirements:
743 if b'generaldelta' in requirements:
744 options[b'generaldelta'] = True
744 options[b'generaldelta'] = True
745
745
746 # experimental config: format.chunkcachesize
746 # experimental config: format.chunkcachesize
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
748 if chunkcachesize is not None:
748 if chunkcachesize is not None:
749 options[b'chunkcachesize'] = chunkcachesize
749 options[b'chunkcachesize'] = chunkcachesize
750
750
751 deltabothparents = ui.configbool(b'storage',
751 deltabothparents = ui.configbool(b'storage',
752 b'revlog.optimize-delta-parent-choice')
752 b'revlog.optimize-delta-parent-choice')
753 options[b'deltabothparents'] = deltabothparents
753 options[b'deltabothparents'] = deltabothparents
754
754
755 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
755 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
756 lazydeltabase = False
756 lazydeltabase = False
757 if lazydelta:
757 if lazydelta:
758 lazydeltabase = ui.configbool(b'storage',
758 lazydeltabase = ui.configbool(b'storage',
759 b'revlog.reuse-external-delta-parent')
759 b'revlog.reuse-external-delta-parent')
760 if lazydeltabase is None:
760 if lazydeltabase is None:
761 lazydeltabase = not scmutil.gddeltaconfig(ui)
761 lazydeltabase = not scmutil.gddeltaconfig(ui)
762 options[b'lazydelta'] = lazydelta
762 options[b'lazydelta'] = lazydelta
763 options[b'lazydeltabase'] = lazydeltabase
763 options[b'lazydeltabase'] = lazydeltabase
764
764
765 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
765 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
766 if 0 <= chainspan:
766 if 0 <= chainspan:
767 options[b'maxdeltachainspan'] = chainspan
767 options[b'maxdeltachainspan'] = chainspan
768
768
769 mmapindexthreshold = ui.configbytes(b'experimental',
769 mmapindexthreshold = ui.configbytes(b'experimental',
770 b'mmapindexthreshold')
770 b'mmapindexthreshold')
771 if mmapindexthreshold is not None:
771 if mmapindexthreshold is not None:
772 options[b'mmapindexthreshold'] = mmapindexthreshold
772 options[b'mmapindexthreshold'] = mmapindexthreshold
773
773
774 withsparseread = ui.configbool(b'experimental', b'sparse-read')
774 withsparseread = ui.configbool(b'experimental', b'sparse-read')
775 srdensitythres = float(ui.config(b'experimental',
775 srdensitythres = float(ui.config(b'experimental',
776 b'sparse-read.density-threshold'))
776 b'sparse-read.density-threshold'))
777 srmingapsize = ui.configbytes(b'experimental',
777 srmingapsize = ui.configbytes(b'experimental',
778 b'sparse-read.min-gap-size')
778 b'sparse-read.min-gap-size')
779 options[b'with-sparse-read'] = withsparseread
779 options[b'with-sparse-read'] = withsparseread
780 options[b'sparse-read-density-threshold'] = srdensitythres
780 options[b'sparse-read-density-threshold'] = srdensitythres
781 options[b'sparse-read-min-gap-size'] = srmingapsize
781 options[b'sparse-read-min-gap-size'] = srmingapsize
782
782
783 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
783 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
784 options[b'sparse-revlog'] = sparserevlog
784 options[b'sparse-revlog'] = sparserevlog
785 if sparserevlog:
785 if sparserevlog:
786 options[b'generaldelta'] = True
786 options[b'generaldelta'] = True
787
787
788 maxchainlen = None
788 maxchainlen = None
789 if sparserevlog:
789 if sparserevlog:
790 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
790 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
791 # experimental config: format.maxchainlen
791 # experimental config: format.maxchainlen
792 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
792 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
793 if maxchainlen is not None:
793 if maxchainlen is not None:
794 options[b'maxchainlen'] = maxchainlen
794 options[b'maxchainlen'] = maxchainlen
795
795
796 for r in requirements:
796 for r in requirements:
797 if r.startswith(b'exp-compression-'):
797 if r.startswith(b'exp-compression-'):
798 options[b'compengine'] = r[len(b'exp-compression-'):]
798 options[b'compengine'] = r[len(b'exp-compression-'):]
799
799
800 if repository.NARROW_REQUIREMENT in requirements:
800 if repository.NARROW_REQUIREMENT in requirements:
801 options[b'enableellipsis'] = True
801 options[b'enableellipsis'] = True
802
802
803 return options
803 return options
804
804
805 def makemain(**kwargs):
805 def makemain(**kwargs):
806 """Produce a type conforming to ``ilocalrepositorymain``."""
806 """Produce a type conforming to ``ilocalrepositorymain``."""
807 return localrepository
807 return localrepository
808
808
809 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
809 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
810 class revlogfilestorage(object):
810 class revlogfilestorage(object):
811 """File storage when using revlogs."""
811 """File storage when using revlogs."""
812
812
813 def file(self, path):
813 def file(self, path):
814 if path[0] == b'/':
814 if path[0] == b'/':
815 path = path[1:]
815 path = path[1:]
816
816
817 return filelog.filelog(self.svfs, path)
817 return filelog.filelog(self.svfs, path)
818
818
819 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
819 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
820 class revlognarrowfilestorage(object):
820 class revlognarrowfilestorage(object):
821 """File storage when using revlogs and narrow files."""
821 """File storage when using revlogs and narrow files."""
822
822
823 def file(self, path):
823 def file(self, path):
824 if path[0] == b'/':
824 if path[0] == b'/':
825 path = path[1:]
825 path = path[1:]
826
826
827 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
827 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
828
828
829 def makefilestorage(requirements, features, **kwargs):
829 def makefilestorage(requirements, features, **kwargs):
830 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
830 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
831 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
831 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
832 features.add(repository.REPO_FEATURE_STREAM_CLONE)
832 features.add(repository.REPO_FEATURE_STREAM_CLONE)
833
833
834 if repository.NARROW_REQUIREMENT in requirements:
834 if repository.NARROW_REQUIREMENT in requirements:
835 return revlognarrowfilestorage
835 return revlognarrowfilestorage
836 else:
836 else:
837 return revlogfilestorage
837 return revlogfilestorage
838
838
839 # List of repository interfaces and factory functions for them. Each
839 # List of repository interfaces and factory functions for them. Each
840 # will be called in order during ``makelocalrepository()`` to iteratively
840 # will be called in order during ``makelocalrepository()`` to iteratively
841 # derive the final type for a local repository instance. We capture the
841 # derive the final type for a local repository instance. We capture the
842 # function as a lambda so we don't hold a reference and the module-level
842 # function as a lambda so we don't hold a reference and the module-level
843 # functions can be wrapped.
843 # functions can be wrapped.
844 REPO_INTERFACES = [
844 REPO_INTERFACES = [
845 (repository.ilocalrepositorymain, lambda: makemain),
845 (repository.ilocalrepositorymain, lambda: makemain),
846 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
846 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
847 ]
847 ]
848
848
849 @interfaceutil.implementer(repository.ilocalrepositorymain)
849 @interfaceutil.implementer(repository.ilocalrepositorymain)
850 class localrepository(object):
850 class localrepository(object):
851 """Main class for representing local repositories.
851 """Main class for representing local repositories.
852
852
853 All local repositories are instances of this class.
853 All local repositories are instances of this class.
854
854
855 Constructed on its own, instances of this class are not usable as
855 Constructed on its own, instances of this class are not usable as
856 repository objects. To obtain a usable repository object, call
856 repository objects. To obtain a usable repository object, call
857 ``hg.repository()``, ``localrepo.instance()``, or
857 ``hg.repository()``, ``localrepo.instance()``, or
858 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
858 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
859 ``instance()`` adds support for creating new repositories.
859 ``instance()`` adds support for creating new repositories.
860 ``hg.repository()`` adds more extension integration, including calling
860 ``hg.repository()`` adds more extension integration, including calling
861 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
861 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
862 used.
862 used.
863 """
863 """
864
864
865 # obsolete experimental requirements:
865 # obsolete experimental requirements:
866 # - manifestv2: An experimental new manifest format that allowed
866 # - manifestv2: An experimental new manifest format that allowed
867 # for stem compression of long paths. Experiment ended up not
867 # for stem compression of long paths. Experiment ended up not
868 # being successful (repository sizes went up due to worse delta
868 # being successful (repository sizes went up due to worse delta
869 # chains), and the code was deleted in 4.6.
869 # chains), and the code was deleted in 4.6.
870 supportedformats = {
870 supportedformats = {
871 'revlogv1',
871 'revlogv1',
872 'generaldelta',
872 'generaldelta',
873 'treemanifest',
873 'treemanifest',
874 REVLOGV2_REQUIREMENT,
874 REVLOGV2_REQUIREMENT,
875 SPARSEREVLOG_REQUIREMENT,
875 SPARSEREVLOG_REQUIREMENT,
876 }
876 }
877 _basesupported = supportedformats | {
877 _basesupported = supportedformats | {
878 'store',
878 'store',
879 'fncache',
879 'fncache',
880 'shared',
880 'shared',
881 'relshared',
881 'relshared',
882 'dotencode',
882 'dotencode',
883 'exp-sparse',
883 'exp-sparse',
884 'internal-phase'
884 'internal-phase'
885 }
885 }
886
886
887 # list of prefix for file which can be written without 'wlock'
887 # list of prefix for file which can be written without 'wlock'
888 # Extensions should extend this list when needed
888 # Extensions should extend this list when needed
889 _wlockfreeprefix = {
889 _wlockfreeprefix = {
890 # We migh consider requiring 'wlock' for the next
890 # We migh consider requiring 'wlock' for the next
891 # two, but pretty much all the existing code assume
891 # two, but pretty much all the existing code assume
892 # wlock is not needed so we keep them excluded for
892 # wlock is not needed so we keep them excluded for
893 # now.
893 # now.
894 'hgrc',
894 'hgrc',
895 'requires',
895 'requires',
896 # XXX cache is a complicatged business someone
896 # XXX cache is a complicatged business someone
897 # should investigate this in depth at some point
897 # should investigate this in depth at some point
898 'cache/',
898 'cache/',
899 # XXX shouldn't be dirstate covered by the wlock?
899 # XXX shouldn't be dirstate covered by the wlock?
900 'dirstate',
900 'dirstate',
901 # XXX bisect was still a bit too messy at the time
901 # XXX bisect was still a bit too messy at the time
902 # this changeset was introduced. Someone should fix
902 # this changeset was introduced. Someone should fix
903 # the remainig bit and drop this line
903 # the remainig bit and drop this line
904 'bisect.state',
904 'bisect.state',
905 }
905 }
906
906
907 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
907 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
908 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
908 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
909 features, intents=None):
909 features, intents=None):
910 """Create a new local repository instance.
910 """Create a new local repository instance.
911
911
912 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
912 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
913 or ``localrepo.makelocalrepository()`` for obtaining a new repository
913 or ``localrepo.makelocalrepository()`` for obtaining a new repository
914 object.
914 object.
915
915
916 Arguments:
916 Arguments:
917
917
918 baseui
918 baseui
919 ``ui.ui`` instance that ``ui`` argument was based off of.
919 ``ui.ui`` instance that ``ui`` argument was based off of.
920
920
921 ui
921 ui
922 ``ui.ui`` instance for use by the repository.
922 ``ui.ui`` instance for use by the repository.
923
923
924 origroot
924 origroot
925 ``bytes`` path to working directory root of this repository.
925 ``bytes`` path to working directory root of this repository.
926
926
927 wdirvfs
927 wdirvfs
928 ``vfs.vfs`` rooted at the working directory.
928 ``vfs.vfs`` rooted at the working directory.
929
929
930 hgvfs
930 hgvfs
931 ``vfs.vfs`` rooted at .hg/
931 ``vfs.vfs`` rooted at .hg/
932
932
933 requirements
933 requirements
934 ``set`` of bytestrings representing repository opening requirements.
934 ``set`` of bytestrings representing repository opening requirements.
935
935
936 supportedrequirements
936 supportedrequirements
937 ``set`` of bytestrings representing repository requirements that we
937 ``set`` of bytestrings representing repository requirements that we
938 know how to open. May be a supetset of ``requirements``.
938 know how to open. May be a supetset of ``requirements``.
939
939
940 sharedpath
940 sharedpath
941 ``bytes`` Defining path to storage base directory. Points to a
941 ``bytes`` Defining path to storage base directory. Points to a
942 ``.hg/`` directory somewhere.
942 ``.hg/`` directory somewhere.
943
943
944 store
944 store
945 ``store.basicstore`` (or derived) instance providing access to
945 ``store.basicstore`` (or derived) instance providing access to
946 versioned storage.
946 versioned storage.
947
947
948 cachevfs
948 cachevfs
949 ``vfs.vfs`` used for cache files.
949 ``vfs.vfs`` used for cache files.
950
950
951 wcachevfs
951 wcachevfs
952 ``vfs.vfs`` used for cache files related to the working copy.
952 ``vfs.vfs`` used for cache files related to the working copy.
953
953
954 features
954 features
955 ``set`` of bytestrings defining features/capabilities of this
955 ``set`` of bytestrings defining features/capabilities of this
956 instance.
956 instance.
957
957
958 intents
958 intents
959 ``set`` of system strings indicating what this repo will be used
959 ``set`` of system strings indicating what this repo will be used
960 for.
960 for.
961 """
961 """
962 self.baseui = baseui
962 self.baseui = baseui
963 self.ui = ui
963 self.ui = ui
964 self.origroot = origroot
964 self.origroot = origroot
965 # vfs rooted at working directory.
965 # vfs rooted at working directory.
966 self.wvfs = wdirvfs
966 self.wvfs = wdirvfs
967 self.root = wdirvfs.base
967 self.root = wdirvfs.base
968 # vfs rooted at .hg/. Used to access most non-store paths.
968 # vfs rooted at .hg/. Used to access most non-store paths.
969 self.vfs = hgvfs
969 self.vfs = hgvfs
970 self.path = hgvfs.base
970 self.path = hgvfs.base
971 self.requirements = requirements
971 self.requirements = requirements
972 self.supported = supportedrequirements
972 self.supported = supportedrequirements
973 self.sharedpath = sharedpath
973 self.sharedpath = sharedpath
974 self.store = store
974 self.store = store
975 self.cachevfs = cachevfs
975 self.cachevfs = cachevfs
976 self.wcachevfs = wcachevfs
976 self.wcachevfs = wcachevfs
977 self.features = features
977 self.features = features
978
978
979 self.filtername = None
979 self.filtername = None
980
980
981 if (self.ui.configbool('devel', 'all-warnings') or
981 if (self.ui.configbool('devel', 'all-warnings') or
982 self.ui.configbool('devel', 'check-locks')):
982 self.ui.configbool('devel', 'check-locks')):
983 self.vfs.audit = self._getvfsward(self.vfs.audit)
983 self.vfs.audit = self._getvfsward(self.vfs.audit)
984 # A list of callback to shape the phase if no data were found.
984 # A list of callback to shape the phase if no data were found.
985 # Callback are in the form: func(repo, roots) --> processed root.
985 # Callback are in the form: func(repo, roots) --> processed root.
986 # This list it to be filled by extension during repo setup
986 # This list it to be filled by extension during repo setup
987 self._phasedefaults = []
987 self._phasedefaults = []
988
988
989 color.setup(self.ui)
989 color.setup(self.ui)
990
990
991 self.spath = self.store.path
991 self.spath = self.store.path
992 self.svfs = self.store.vfs
992 self.svfs = self.store.vfs
993 self.sjoin = self.store.join
993 self.sjoin = self.store.join
994 if (self.ui.configbool('devel', 'all-warnings') or
994 if (self.ui.configbool('devel', 'all-warnings') or
995 self.ui.configbool('devel', 'check-locks')):
995 self.ui.configbool('devel', 'check-locks')):
996 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
996 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
997 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
997 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
998 else: # standard vfs
998 else: # standard vfs
999 self.svfs.audit = self._getsvfsward(self.svfs.audit)
999 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1000
1000
1001 self._dirstatevalidatewarned = False
1001 self._dirstatevalidatewarned = False
1002
1002
1003 self._branchcaches = branchmap.BranchMapCache()
1003 self._branchcaches = branchmap.BranchMapCache()
1004 self._revbranchcache = None
1004 self._revbranchcache = None
1005 self._filterpats = {}
1005 self._filterpats = {}
1006 self._datafilters = {}
1006 self._datafilters = {}
1007 self._transref = self._lockref = self._wlockref = None
1007 self._transref = self._lockref = self._wlockref = None
1008
1008
1009 # A cache for various files under .hg/ that tracks file changes,
1009 # A cache for various files under .hg/ that tracks file changes,
1010 # (used by the filecache decorator)
1010 # (used by the filecache decorator)
1011 #
1011 #
1012 # Maps a property name to its util.filecacheentry
1012 # Maps a property name to its util.filecacheentry
1013 self._filecache = {}
1013 self._filecache = {}
1014
1014
1015 # hold sets of revision to be filtered
1015 # hold sets of revision to be filtered
1016 # should be cleared when something might have changed the filter value:
1016 # should be cleared when something might have changed the filter value:
1017 # - new changesets,
1017 # - new changesets,
1018 # - phase change,
1018 # - phase change,
1019 # - new obsolescence marker,
1019 # - new obsolescence marker,
1020 # - working directory parent change,
1020 # - working directory parent change,
1021 # - bookmark changes
1021 # - bookmark changes
1022 self.filteredrevcache = {}
1022 self.filteredrevcache = {}
1023
1023
1024 # post-dirstate-status hooks
1024 # post-dirstate-status hooks
1025 self._postdsstatus = []
1025 self._postdsstatus = []
1026
1026
1027 # generic mapping between names and nodes
1027 # generic mapping between names and nodes
1028 self.names = namespaces.namespaces()
1028 self.names = namespaces.namespaces()
1029
1029
1030 # Key to signature value.
1030 # Key to signature value.
1031 self._sparsesignaturecache = {}
1031 self._sparsesignaturecache = {}
1032 # Signature to cached matcher instance.
1032 # Signature to cached matcher instance.
1033 self._sparsematchercache = {}
1033 self._sparsematchercache = {}
1034
1034
1035 def _getvfsward(self, origfunc):
1035 def _getvfsward(self, origfunc):
1036 """build a ward for self.vfs"""
1036 """build a ward for self.vfs"""
1037 rref = weakref.ref(self)
1037 rref = weakref.ref(self)
1038 def checkvfs(path, mode=None):
1038 def checkvfs(path, mode=None):
1039 ret = origfunc(path, mode=mode)
1039 ret = origfunc(path, mode=mode)
1040 repo = rref()
1040 repo = rref()
1041 if (repo is None
1041 if (repo is None
1042 or not util.safehasattr(repo, '_wlockref')
1042 or not util.safehasattr(repo, '_wlockref')
1043 or not util.safehasattr(repo, '_lockref')):
1043 or not util.safehasattr(repo, '_lockref')):
1044 return
1044 return
1045 if mode in (None, 'r', 'rb'):
1045 if mode in (None, 'r', 'rb'):
1046 return
1046 return
1047 if path.startswith(repo.path):
1047 if path.startswith(repo.path):
1048 # truncate name relative to the repository (.hg)
1048 # truncate name relative to the repository (.hg)
1049 path = path[len(repo.path) + 1:]
1049 path = path[len(repo.path) + 1:]
1050 if path.startswith('cache/'):
1050 if path.startswith('cache/'):
1051 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1051 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1052 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1052 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1053 if path.startswith('journal.') or path.startswith('undo.'):
1053 if path.startswith('journal.') or path.startswith('undo.'):
1054 # journal is covered by 'lock'
1054 # journal is covered by 'lock'
1055 if repo._currentlock(repo._lockref) is None:
1055 if repo._currentlock(repo._lockref) is None:
1056 repo.ui.develwarn('write with no lock: "%s"' % path,
1056 repo.ui.develwarn('write with no lock: "%s"' % path,
1057 stacklevel=3, config='check-locks')
1057 stacklevel=3, config='check-locks')
1058 elif repo._currentlock(repo._wlockref) is None:
1058 elif repo._currentlock(repo._wlockref) is None:
1059 # rest of vfs files are covered by 'wlock'
1059 # rest of vfs files are covered by 'wlock'
1060 #
1060 #
1061 # exclude special files
1061 # exclude special files
1062 for prefix in self._wlockfreeprefix:
1062 for prefix in self._wlockfreeprefix:
1063 if path.startswith(prefix):
1063 if path.startswith(prefix):
1064 return
1064 return
1065 repo.ui.develwarn('write with no wlock: "%s"' % path,
1065 repo.ui.develwarn('write with no wlock: "%s"' % path,
1066 stacklevel=3, config='check-locks')
1066 stacklevel=3, config='check-locks')
1067 return ret
1067 return ret
1068 return checkvfs
1068 return checkvfs
1069
1069
1070 def _getsvfsward(self, origfunc):
1070 def _getsvfsward(self, origfunc):
1071 """build a ward for self.svfs"""
1071 """build a ward for self.svfs"""
1072 rref = weakref.ref(self)
1072 rref = weakref.ref(self)
1073 def checksvfs(path, mode=None):
1073 def checksvfs(path, mode=None):
1074 ret = origfunc(path, mode=mode)
1074 ret = origfunc(path, mode=mode)
1075 repo = rref()
1075 repo = rref()
1076 if repo is None or not util.safehasattr(repo, '_lockref'):
1076 if repo is None or not util.safehasattr(repo, '_lockref'):
1077 return
1077 return
1078 if mode in (None, 'r', 'rb'):
1078 if mode in (None, 'r', 'rb'):
1079 return
1079 return
1080 if path.startswith(repo.sharedpath):
1080 if path.startswith(repo.sharedpath):
1081 # truncate name relative to the repository (.hg)
1081 # truncate name relative to the repository (.hg)
1082 path = path[len(repo.sharedpath) + 1:]
1082 path = path[len(repo.sharedpath) + 1:]
1083 if repo._currentlock(repo._lockref) is None:
1083 if repo._currentlock(repo._lockref) is None:
1084 repo.ui.develwarn('write with no lock: "%s"' % path,
1084 repo.ui.develwarn('write with no lock: "%s"' % path,
1085 stacklevel=4)
1085 stacklevel=4)
1086 return ret
1086 return ret
1087 return checksvfs
1087 return checksvfs
1088
1088
1089 def close(self):
1089 def close(self):
1090 self._writecaches()
1090 self._writecaches()
1091
1091
1092 def _writecaches(self):
1092 def _writecaches(self):
1093 if self._revbranchcache:
1093 if self._revbranchcache:
1094 self._revbranchcache.write()
1094 self._revbranchcache.write()
1095
1095
1096 def _restrictcapabilities(self, caps):
1096 def _restrictcapabilities(self, caps):
1097 if self.ui.configbool('experimental', 'bundle2-advertise'):
1097 if self.ui.configbool('experimental', 'bundle2-advertise'):
1098 caps = set(caps)
1098 caps = set(caps)
1099 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1099 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1100 role='client'))
1100 role='client'))
1101 caps.add('bundle2=' + urlreq.quote(capsblob))
1101 caps.add('bundle2=' + urlreq.quote(capsblob))
1102 return caps
1102 return caps
1103
1103
1104 def _writerequirements(self):
1104 def _writerequirements(self):
1105 scmutil.writerequires(self.vfs, self.requirements)
1105 scmutil.writerequires(self.vfs, self.requirements)
1106
1106
1107 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1107 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1108 # self -> auditor -> self._checknested -> self
1108 # self -> auditor -> self._checknested -> self
1109
1109
1110 @property
1110 @property
1111 def auditor(self):
1111 def auditor(self):
1112 # This is only used by context.workingctx.match in order to
1112 # This is only used by context.workingctx.match in order to
1113 # detect files in subrepos.
1113 # detect files in subrepos.
1114 return pathutil.pathauditor(self.root, callback=self._checknested)
1114 return pathutil.pathauditor(self.root, callback=self._checknested)
1115
1115
1116 @property
1116 @property
1117 def nofsauditor(self):
1117 def nofsauditor(self):
1118 # This is only used by context.basectx.match in order to detect
1118 # This is only used by context.basectx.match in order to detect
1119 # files in subrepos.
1119 # files in subrepos.
1120 return pathutil.pathauditor(self.root, callback=self._checknested,
1120 return pathutil.pathauditor(self.root, callback=self._checknested,
1121 realfs=False, cached=True)
1121 realfs=False, cached=True)
1122
1122
1123 def _checknested(self, path):
1123 def _checknested(self, path):
1124 """Determine if path is a legal nested repository."""
1124 """Determine if path is a legal nested repository."""
1125 if not path.startswith(self.root):
1125 if not path.startswith(self.root):
1126 return False
1126 return False
1127 subpath = path[len(self.root) + 1:]
1127 subpath = path[len(self.root) + 1:]
1128 normsubpath = util.pconvert(subpath)
1128 normsubpath = util.pconvert(subpath)
1129
1129
1130 # XXX: Checking against the current working copy is wrong in
1130 # XXX: Checking against the current working copy is wrong in
1131 # the sense that it can reject things like
1131 # the sense that it can reject things like
1132 #
1132 #
1133 # $ hg cat -r 10 sub/x.txt
1133 # $ hg cat -r 10 sub/x.txt
1134 #
1134 #
1135 # if sub/ is no longer a subrepository in the working copy
1135 # if sub/ is no longer a subrepository in the working copy
1136 # parent revision.
1136 # parent revision.
1137 #
1137 #
1138 # However, it can of course also allow things that would have
1138 # However, it can of course also allow things that would have
1139 # been rejected before, such as the above cat command if sub/
1139 # been rejected before, such as the above cat command if sub/
1140 # is a subrepository now, but was a normal directory before.
1140 # is a subrepository now, but was a normal directory before.
1141 # The old path auditor would have rejected by mistake since it
1141 # The old path auditor would have rejected by mistake since it
1142 # panics when it sees sub/.hg/.
1142 # panics when it sees sub/.hg/.
1143 #
1143 #
1144 # All in all, checking against the working copy seems sensible
1144 # All in all, checking against the working copy seems sensible
1145 # since we want to prevent access to nested repositories on
1145 # since we want to prevent access to nested repositories on
1146 # the filesystem *now*.
1146 # the filesystem *now*.
1147 ctx = self[None]
1147 ctx = self[None]
1148 parts = util.splitpath(subpath)
1148 parts = util.splitpath(subpath)
1149 while parts:
1149 while parts:
1150 prefix = '/'.join(parts)
1150 prefix = '/'.join(parts)
1151 if prefix in ctx.substate:
1151 if prefix in ctx.substate:
1152 if prefix == normsubpath:
1152 if prefix == normsubpath:
1153 return True
1153 return True
1154 else:
1154 else:
1155 sub = ctx.sub(prefix)
1155 sub = ctx.sub(prefix)
1156 return sub.checknested(subpath[len(prefix) + 1:])
1156 return sub.checknested(subpath[len(prefix) + 1:])
1157 else:
1157 else:
1158 parts.pop()
1158 parts.pop()
1159 return False
1159 return False
1160
1160
1161 def peer(self):
1161 def peer(self):
1162 return localpeer(self) # not cached to avoid reference cycle
1162 return localpeer(self) # not cached to avoid reference cycle
1163
1163
1164 def unfiltered(self):
1164 def unfiltered(self):
1165 """Return unfiltered version of the repository
1165 """Return unfiltered version of the repository
1166
1166
1167 Intended to be overwritten by filtered repo."""
1167 Intended to be overwritten by filtered repo."""
1168 return self
1168 return self
1169
1169
1170 def filtered(self, name, visibilityexceptions=None):
1170 def filtered(self, name, visibilityexceptions=None):
1171 """Return a filtered version of a repository"""
1171 """Return a filtered version of a repository"""
1172 cls = repoview.newtype(self.unfiltered().__class__)
1172 cls = repoview.newtype(self.unfiltered().__class__)
1173 return cls(self, name, visibilityexceptions)
1173 return cls(self, name, visibilityexceptions)
1174
1174
1175 @repofilecache('bookmarks', 'bookmarks.current')
1175 @repofilecache('bookmarks', 'bookmarks.current')
1176 def _bookmarks(self):
1176 def _bookmarks(self):
1177 return bookmarks.bmstore(self)
1177 return bookmarks.bmstore(self)
1178
1178
1179 @property
1179 @property
1180 def _activebookmark(self):
1180 def _activebookmark(self):
1181 return self._bookmarks.active
1181 return self._bookmarks.active
1182
1182
1183 # _phasesets depend on changelog. what we need is to call
1183 # _phasesets depend on changelog. what we need is to call
1184 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1184 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1185 # can't be easily expressed in filecache mechanism.
1185 # can't be easily expressed in filecache mechanism.
1186 @storecache('phaseroots', '00changelog.i')
1186 @storecache('phaseroots', '00changelog.i')
1187 def _phasecache(self):
1187 def _phasecache(self):
1188 return phases.phasecache(self, self._phasedefaults)
1188 return phases.phasecache(self, self._phasedefaults)
1189
1189
1190 @storecache('obsstore')
1190 @storecache('obsstore')
1191 def obsstore(self):
1191 def obsstore(self):
1192 return obsolete.makestore(self.ui, self)
1192 return obsolete.makestore(self.ui, self)
1193
1193
1194 @storecache('00changelog.i')
1194 @storecache('00changelog.i')
1195 def changelog(self):
1195 def changelog(self):
1196 return changelog.changelog(self.svfs,
1196 return changelog.changelog(self.svfs,
1197 trypending=txnutil.mayhavepending(self.root))
1197 trypending=txnutil.mayhavepending(self.root))
1198
1198
1199 @storecache('00manifest.i')
1199 @storecache('00manifest.i')
1200 def manifestlog(self):
1200 def manifestlog(self):
1201 rootstore = manifest.manifestrevlog(self.svfs)
1201 rootstore = manifest.manifestrevlog(self.svfs)
1202 return manifest.manifestlog(self.svfs, self, rootstore,
1202 return manifest.manifestlog(self.svfs, self, rootstore,
1203 self._storenarrowmatch)
1203 self._storenarrowmatch)
1204
1204
1205 @repofilecache('dirstate')
1205 @repofilecache('dirstate')
1206 def dirstate(self):
1206 def dirstate(self):
1207 return self._makedirstate()
1207 return self._makedirstate()
1208
1208
1209 def _makedirstate(self):
1209 def _makedirstate(self):
1210 """Extension point for wrapping the dirstate per-repo."""
1210 """Extension point for wrapping the dirstate per-repo."""
1211 sparsematchfn = lambda: sparse.matcher(self)
1211 sparsematchfn = lambda: sparse.matcher(self)
1212
1212
1213 return dirstate.dirstate(self.vfs, self.ui, self.root,
1213 return dirstate.dirstate(self.vfs, self.ui, self.root,
1214 self._dirstatevalidate, sparsematchfn)
1214 self._dirstatevalidate, sparsematchfn)
1215
1215
1216 def _dirstatevalidate(self, node):
1216 def _dirstatevalidate(self, node):
1217 try:
1217 try:
1218 self.changelog.rev(node)
1218 self.changelog.rev(node)
1219 return node
1219 return node
1220 except error.LookupError:
1220 except error.LookupError:
1221 if not self._dirstatevalidatewarned:
1221 if not self._dirstatevalidatewarned:
1222 self._dirstatevalidatewarned = True
1222 self._dirstatevalidatewarned = True
1223 self.ui.warn(_("warning: ignoring unknown"
1223 self.ui.warn(_("warning: ignoring unknown"
1224 " working parent %s!\n") % short(node))
1224 " working parent %s!\n") % short(node))
1225 return nullid
1225 return nullid
1226
1226
1227 @storecache(narrowspec.FILENAME)
1227 @storecache(narrowspec.FILENAME)
1228 def narrowpats(self):
1228 def narrowpats(self):
1229 """matcher patterns for this repository's narrowspec
1229 """matcher patterns for this repository's narrowspec
1230
1230
1231 A tuple of (includes, excludes).
1231 A tuple of (includes, excludes).
1232 """
1232 """
1233 return narrowspec.load(self)
1233 return narrowspec.load(self)
1234
1234
1235 @storecache(narrowspec.FILENAME)
1235 @storecache(narrowspec.FILENAME)
1236 def _storenarrowmatch(self):
1236 def _storenarrowmatch(self):
1237 if repository.NARROW_REQUIREMENT not in self.requirements:
1237 if repository.NARROW_REQUIREMENT not in self.requirements:
1238 return matchmod.always()
1238 return matchmod.always()
1239 include, exclude = self.narrowpats
1239 include, exclude = self.narrowpats
1240 return narrowspec.match(self.root, include=include, exclude=exclude)
1240 return narrowspec.match(self.root, include=include, exclude=exclude)
1241
1241
1242 @storecache(narrowspec.FILENAME)
1242 @storecache(narrowspec.FILENAME)
1243 def _narrowmatch(self):
1243 def _narrowmatch(self):
1244 if repository.NARROW_REQUIREMENT not in self.requirements:
1244 if repository.NARROW_REQUIREMENT not in self.requirements:
1245 return matchmod.always()
1245 return matchmod.always()
1246 narrowspec.checkworkingcopynarrowspec(self)
1246 narrowspec.checkworkingcopynarrowspec(self)
1247 include, exclude = self.narrowpats
1247 include, exclude = self.narrowpats
1248 return narrowspec.match(self.root, include=include, exclude=exclude)
1248 return narrowspec.match(self.root, include=include, exclude=exclude)
1249
1249
1250 def narrowmatch(self, match=None, includeexact=False):
1250 def narrowmatch(self, match=None, includeexact=False):
1251 """matcher corresponding the the repo's narrowspec
1251 """matcher corresponding the the repo's narrowspec
1252
1252
1253 If `match` is given, then that will be intersected with the narrow
1253 If `match` is given, then that will be intersected with the narrow
1254 matcher.
1254 matcher.
1255
1255
1256 If `includeexact` is True, then any exact matches from `match` will
1256 If `includeexact` is True, then any exact matches from `match` will
1257 be included even if they're outside the narrowspec.
1257 be included even if they're outside the narrowspec.
1258 """
1258 """
1259 if match:
1259 if match:
1260 if includeexact and not self._narrowmatch.always():
1260 if includeexact and not self._narrowmatch.always():
1261 # do not exclude explicitly-specified paths so that they can
1261 # do not exclude explicitly-specified paths so that they can
1262 # be warned later on
1262 # be warned later on
1263 em = matchmod.exact(match.files())
1263 em = matchmod.exact(match.files())
1264 nm = matchmod.unionmatcher([self._narrowmatch, em])
1264 nm = matchmod.unionmatcher([self._narrowmatch, em])
1265 return matchmod.intersectmatchers(match, nm)
1265 return matchmod.intersectmatchers(match, nm)
1266 return matchmod.intersectmatchers(match, self._narrowmatch)
1266 return matchmod.intersectmatchers(match, self._narrowmatch)
1267 return self._narrowmatch
1267 return self._narrowmatch
1268
1268
1269 def setnarrowpats(self, newincludes, newexcludes):
1269 def setnarrowpats(self, newincludes, newexcludes):
1270 narrowspec.save(self, newincludes, newexcludes)
1270 narrowspec.save(self, newincludes, newexcludes)
1271 self.invalidate(clearfilecache=True)
1271 self.invalidate(clearfilecache=True)
1272
1272
1273 def __getitem__(self, changeid):
1273 def __getitem__(self, changeid):
1274 if changeid is None:
1274 if changeid is None:
1275 return context.workingctx(self)
1275 return context.workingctx(self)
1276 if isinstance(changeid, context.basectx):
1276 if isinstance(changeid, context.basectx):
1277 return changeid
1277 return changeid
1278 if isinstance(changeid, slice):
1278 if isinstance(changeid, slice):
1279 # wdirrev isn't contiguous so the slice shouldn't include it
1279 # wdirrev isn't contiguous so the slice shouldn't include it
1280 return [self[i]
1280 return [self[i]
1281 for i in pycompat.xrange(*changeid.indices(len(self)))
1281 for i in pycompat.xrange(*changeid.indices(len(self)))
1282 if i not in self.changelog.filteredrevs]
1282 if i not in self.changelog.filteredrevs]
1283 try:
1283 try:
1284 if isinstance(changeid, int):
1284 if isinstance(changeid, int):
1285 node = self.changelog.node(changeid)
1285 node = self.changelog.node(changeid)
1286 rev = changeid
1286 rev = changeid
1287 elif changeid == 'null':
1287 elif changeid == 'null':
1288 node = nullid
1288 node = nullid
1289 rev = nullrev
1289 rev = nullrev
1290 elif changeid == 'tip':
1290 elif changeid == 'tip':
1291 node = self.changelog.tip()
1291 node = self.changelog.tip()
1292 rev = self.changelog.rev(node)
1292 rev = self.changelog.rev(node)
1293 elif changeid == '.':
1293 elif changeid == '.':
1294 # this is a hack to delay/avoid loading obsmarkers
1294 # this is a hack to delay/avoid loading obsmarkers
1295 # when we know that '.' won't be hidden
1295 # when we know that '.' won't be hidden
1296 node = self.dirstate.p1()
1296 node = self.dirstate.p1()
1297 rev = self.unfiltered().changelog.rev(node)
1297 rev = self.unfiltered().changelog.rev(node)
1298 elif len(changeid) == 20:
1298 elif len(changeid) == 20:
1299 try:
1299 try:
1300 node = changeid
1300 node = changeid
1301 rev = self.changelog.rev(changeid)
1301 rev = self.changelog.rev(changeid)
1302 except error.FilteredLookupError:
1302 except error.FilteredLookupError:
1303 changeid = hex(changeid) # for the error message
1303 changeid = hex(changeid) # for the error message
1304 raise
1304 raise
1305 except LookupError:
1305 except LookupError:
1306 # check if it might have come from damaged dirstate
1306 # check if it might have come from damaged dirstate
1307 #
1307 #
1308 # XXX we could avoid the unfiltered if we had a recognizable
1308 # XXX we could avoid the unfiltered if we had a recognizable
1309 # exception for filtered changeset access
1309 # exception for filtered changeset access
1310 if (self.local()
1310 if (self.local()
1311 and changeid in self.unfiltered().dirstate.parents()):
1311 and changeid in self.unfiltered().dirstate.parents()):
1312 msg = _("working directory has unknown parent '%s'!")
1312 msg = _("working directory has unknown parent '%s'!")
1313 raise error.Abort(msg % short(changeid))
1313 raise error.Abort(msg % short(changeid))
1314 changeid = hex(changeid) # for the error message
1314 changeid = hex(changeid) # for the error message
1315 raise
1315 raise
1316
1316
1317 elif len(changeid) == 40:
1317 elif len(changeid) == 40:
1318 node = bin(changeid)
1318 node = bin(changeid)
1319 rev = self.changelog.rev(node)
1319 rev = self.changelog.rev(node)
1320 else:
1320 else:
1321 raise error.ProgrammingError(
1321 raise error.ProgrammingError(
1322 "unsupported changeid '%s' of type %s" %
1322 "unsupported changeid '%s' of type %s" %
1323 (changeid, type(changeid)))
1323 (changeid, type(changeid)))
1324
1324
1325 return context.changectx(self, rev, node)
1325 return context.changectx(self, rev, node)
1326
1326
1327 except (error.FilteredIndexError, error.FilteredLookupError):
1327 except (error.FilteredIndexError, error.FilteredLookupError):
1328 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1328 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1329 % pycompat.bytestr(changeid))
1329 % pycompat.bytestr(changeid))
1330 except (IndexError, LookupError):
1330 except (IndexError, LookupError):
1331 raise error.RepoLookupError(
1331 raise error.RepoLookupError(
1332 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1332 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1333 except error.WdirUnsupported:
1333 except error.WdirUnsupported:
1334 return context.workingctx(self)
1334 return context.workingctx(self)
1335
1335
1336 def __contains__(self, changeid):
1336 def __contains__(self, changeid):
1337 """True if the given changeid exists
1337 """True if the given changeid exists
1338
1338
1339 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1339 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1340 specified.
1340 specified.
1341 """
1341 """
1342 try:
1342 try:
1343 self[changeid]
1343 self[changeid]
1344 return True
1344 return True
1345 except error.RepoLookupError:
1345 except error.RepoLookupError:
1346 return False
1346 return False
1347
1347
1348 def __nonzero__(self):
1348 def __nonzero__(self):
1349 return True
1349 return True
1350
1350
1351 __bool__ = __nonzero__
1351 __bool__ = __nonzero__
1352
1352
1353 def __len__(self):
1353 def __len__(self):
1354 # no need to pay the cost of repoview.changelog
1354 # no need to pay the cost of repoview.changelog
1355 unfi = self.unfiltered()
1355 unfi = self.unfiltered()
1356 return len(unfi.changelog)
1356 return len(unfi.changelog)
1357
1357
1358 def __iter__(self):
1358 def __iter__(self):
1359 return iter(self.changelog)
1359 return iter(self.changelog)
1360
1360
1361 def revs(self, expr, *args):
1361 def revs(self, expr, *args):
1362 '''Find revisions matching a revset.
1362 '''Find revisions matching a revset.
1363
1363
1364 The revset is specified as a string ``expr`` that may contain
1364 The revset is specified as a string ``expr`` that may contain
1365 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1365 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1366
1366
1367 Revset aliases from the configuration are not expanded. To expand
1367 Revset aliases from the configuration are not expanded. To expand
1368 user aliases, consider calling ``scmutil.revrange()`` or
1368 user aliases, consider calling ``scmutil.revrange()`` or
1369 ``repo.anyrevs([expr], user=True)``.
1369 ``repo.anyrevs([expr], user=True)``.
1370
1370
1371 Returns a revset.abstractsmartset, which is a list-like interface
1371 Returns a revset.abstractsmartset, which is a list-like interface
1372 that contains integer revisions.
1372 that contains integer revisions.
1373 '''
1373 '''
1374 tree = revsetlang.spectree(expr, *args)
1374 tree = revsetlang.spectree(expr, *args)
1375 return revset.makematcher(tree)(self)
1375 return revset.makematcher(tree)(self)
1376
1376
1377 def set(self, expr, *args):
1377 def set(self, expr, *args):
1378 '''Find revisions matching a revset and emit changectx instances.
1378 '''Find revisions matching a revset and emit changectx instances.
1379
1379
1380 This is a convenience wrapper around ``revs()`` that iterates the
1380 This is a convenience wrapper around ``revs()`` that iterates the
1381 result and is a generator of changectx instances.
1381 result and is a generator of changectx instances.
1382
1382
1383 Revset aliases from the configuration are not expanded. To expand
1383 Revset aliases from the configuration are not expanded. To expand
1384 user aliases, consider calling ``scmutil.revrange()``.
1384 user aliases, consider calling ``scmutil.revrange()``.
1385 '''
1385 '''
1386 for r in self.revs(expr, *args):
1386 for r in self.revs(expr, *args):
1387 yield self[r]
1387 yield self[r]
1388
1388
1389 def anyrevs(self, specs, user=False, localalias=None):
1389 def anyrevs(self, specs, user=False, localalias=None):
1390 '''Find revisions matching one of the given revsets.
1390 '''Find revisions matching one of the given revsets.
1391
1391
1392 Revset aliases from the configuration are not expanded by default. To
1392 Revset aliases from the configuration are not expanded by default. To
1393 expand user aliases, specify ``user=True``. To provide some local
1393 expand user aliases, specify ``user=True``. To provide some local
1394 definitions overriding user aliases, set ``localalias`` to
1394 definitions overriding user aliases, set ``localalias`` to
1395 ``{name: definitionstring}``.
1395 ``{name: definitionstring}``.
1396 '''
1396 '''
1397 if user:
1397 if user:
1398 m = revset.matchany(self.ui, specs,
1398 m = revset.matchany(self.ui, specs,
1399 lookup=revset.lookupfn(self),
1399 lookup=revset.lookupfn(self),
1400 localalias=localalias)
1400 localalias=localalias)
1401 else:
1401 else:
1402 m = revset.matchany(None, specs, localalias=localalias)
1402 m = revset.matchany(None, specs, localalias=localalias)
1403 return m(self)
1403 return m(self)
1404
1404
1405 def url(self):
1405 def url(self):
1406 return 'file:' + self.root
1406 return 'file:' + self.root
1407
1407
1408 def hook(self, name, throw=False, **args):
1408 def hook(self, name, throw=False, **args):
1409 """Call a hook, passing this repo instance.
1409 """Call a hook, passing this repo instance.
1410
1410
1411 This a convenience method to aid invoking hooks. Extensions likely
1411 This a convenience method to aid invoking hooks. Extensions likely
1412 won't call this unless they have registered a custom hook or are
1412 won't call this unless they have registered a custom hook or are
1413 replacing code that is expected to call a hook.
1413 replacing code that is expected to call a hook.
1414 """
1414 """
1415 return hook.hook(self.ui, self, name, throw, **args)
1415 return hook.hook(self.ui, self, name, throw, **args)
1416
1416
1417 @filteredpropertycache
1417 @filteredpropertycache
1418 def _tagscache(self):
1418 def _tagscache(self):
1419 '''Returns a tagscache object that contains various tags related
1419 '''Returns a tagscache object that contains various tags related
1420 caches.'''
1420 caches.'''
1421
1421
1422 # This simplifies its cache management by having one decorated
1422 # This simplifies its cache management by having one decorated
1423 # function (this one) and the rest simply fetch things from it.
1423 # function (this one) and the rest simply fetch things from it.
1424 class tagscache(object):
1424 class tagscache(object):
1425 def __init__(self):
1425 def __init__(self):
1426 # These two define the set of tags for this repository. tags
1426 # These two define the set of tags for this repository. tags
1427 # maps tag name to node; tagtypes maps tag name to 'global' or
1427 # maps tag name to node; tagtypes maps tag name to 'global' or
1428 # 'local'. (Global tags are defined by .hgtags across all
1428 # 'local'. (Global tags are defined by .hgtags across all
1429 # heads, and local tags are defined in .hg/localtags.)
1429 # heads, and local tags are defined in .hg/localtags.)
1430 # They constitute the in-memory cache of tags.
1430 # They constitute the in-memory cache of tags.
1431 self.tags = self.tagtypes = None
1431 self.tags = self.tagtypes = None
1432
1432
1433 self.nodetagscache = self.tagslist = None
1433 self.nodetagscache = self.tagslist = None
1434
1434
1435 cache = tagscache()
1435 cache = tagscache()
1436 cache.tags, cache.tagtypes = self._findtags()
1436 cache.tags, cache.tagtypes = self._findtags()
1437
1437
1438 return cache
1438 return cache
1439
1439
1440 def tags(self):
1440 def tags(self):
1441 '''return a mapping of tag to node'''
1441 '''return a mapping of tag to node'''
1442 t = {}
1442 t = {}
1443 if self.changelog.filteredrevs:
1443 if self.changelog.filteredrevs:
1444 tags, tt = self._findtags()
1444 tags, tt = self._findtags()
1445 else:
1445 else:
1446 tags = self._tagscache.tags
1446 tags = self._tagscache.tags
1447 rev = self.changelog.rev
1447 rev = self.changelog.rev
1448 for k, v in tags.iteritems():
1448 for k, v in tags.iteritems():
1449 try:
1449 try:
1450 # ignore tags to unknown nodes
1450 # ignore tags to unknown nodes
1451 rev(v)
1451 rev(v)
1452 t[k] = v
1452 t[k] = v
1453 except (error.LookupError, ValueError):
1453 except (error.LookupError, ValueError):
1454 pass
1454 pass
1455 return t
1455 return t
1456
1456
1457 def _findtags(self):
1457 def _findtags(self):
1458 '''Do the hard work of finding tags. Return a pair of dicts
1458 '''Do the hard work of finding tags. Return a pair of dicts
1459 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1459 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1460 maps tag name to a string like \'global\' or \'local\'.
1460 maps tag name to a string like \'global\' or \'local\'.
1461 Subclasses or extensions are free to add their own tags, but
1461 Subclasses or extensions are free to add their own tags, but
1462 should be aware that the returned dicts will be retained for the
1462 should be aware that the returned dicts will be retained for the
1463 duration of the localrepo object.'''
1463 duration of the localrepo object.'''
1464
1464
1465 # XXX what tagtype should subclasses/extensions use? Currently
1465 # XXX what tagtype should subclasses/extensions use? Currently
1466 # mq and bookmarks add tags, but do not set the tagtype at all.
1466 # mq and bookmarks add tags, but do not set the tagtype at all.
1467 # Should each extension invent its own tag type? Should there
1467 # Should each extension invent its own tag type? Should there
1468 # be one tagtype for all such "virtual" tags? Or is the status
1468 # be one tagtype for all such "virtual" tags? Or is the status
1469 # quo fine?
1469 # quo fine?
1470
1470
1471
1471
1472 # map tag name to (node, hist)
1472 # map tag name to (node, hist)
1473 alltags = tagsmod.findglobaltags(self.ui, self)
1473 alltags = tagsmod.findglobaltags(self.ui, self)
1474 # map tag name to tag type
1474 # map tag name to tag type
1475 tagtypes = dict((tag, 'global') for tag in alltags)
1475 tagtypes = dict((tag, 'global') for tag in alltags)
1476
1476
1477 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1477 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1478
1478
1479 # Build the return dicts. Have to re-encode tag names because
1479 # Build the return dicts. Have to re-encode tag names because
1480 # the tags module always uses UTF-8 (in order not to lose info
1480 # the tags module always uses UTF-8 (in order not to lose info
1481 # writing to the cache), but the rest of Mercurial wants them in
1481 # writing to the cache), but the rest of Mercurial wants them in
1482 # local encoding.
1482 # local encoding.
1483 tags = {}
1483 tags = {}
1484 for (name, (node, hist)) in alltags.iteritems():
1484 for (name, (node, hist)) in alltags.iteritems():
1485 if node != nullid:
1485 if node != nullid:
1486 tags[encoding.tolocal(name)] = node
1486 tags[encoding.tolocal(name)] = node
1487 tags['tip'] = self.changelog.tip()
1487 tags['tip'] = self.changelog.tip()
1488 tagtypes = dict([(encoding.tolocal(name), value)
1488 tagtypes = dict([(encoding.tolocal(name), value)
1489 for (name, value) in tagtypes.iteritems()])
1489 for (name, value) in tagtypes.iteritems()])
1490 return (tags, tagtypes)
1490 return (tags, tagtypes)
1491
1491
1492 def tagtype(self, tagname):
1492 def tagtype(self, tagname):
1493 '''
1493 '''
1494 return the type of the given tag. result can be:
1494 return the type of the given tag. result can be:
1495
1495
1496 'local' : a local tag
1496 'local' : a local tag
1497 'global' : a global tag
1497 'global' : a global tag
1498 None : tag does not exist
1498 None : tag does not exist
1499 '''
1499 '''
1500
1500
1501 return self._tagscache.tagtypes.get(tagname)
1501 return self._tagscache.tagtypes.get(tagname)
1502
1502
1503 def tagslist(self):
1503 def tagslist(self):
1504 '''return a list of tags ordered by revision'''
1504 '''return a list of tags ordered by revision'''
1505 if not self._tagscache.tagslist:
1505 if not self._tagscache.tagslist:
1506 l = []
1506 l = []
1507 for t, n in self.tags().iteritems():
1507 for t, n in self.tags().iteritems():
1508 l.append((self.changelog.rev(n), t, n))
1508 l.append((self.changelog.rev(n), t, n))
1509 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1509 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1510
1510
1511 return self._tagscache.tagslist
1511 return self._tagscache.tagslist
1512
1512
1513 def nodetags(self, node):
1513 def nodetags(self, node):
1514 '''return the tags associated with a node'''
1514 '''return the tags associated with a node'''
1515 if not self._tagscache.nodetagscache:
1515 if not self._tagscache.nodetagscache:
1516 nodetagscache = {}
1516 nodetagscache = {}
1517 for t, n in self._tagscache.tags.iteritems():
1517 for t, n in self._tagscache.tags.iteritems():
1518 nodetagscache.setdefault(n, []).append(t)
1518 nodetagscache.setdefault(n, []).append(t)
1519 for tags in nodetagscache.itervalues():
1519 for tags in nodetagscache.itervalues():
1520 tags.sort()
1520 tags.sort()
1521 self._tagscache.nodetagscache = nodetagscache
1521 self._tagscache.nodetagscache = nodetagscache
1522 return self._tagscache.nodetagscache.get(node, [])
1522 return self._tagscache.nodetagscache.get(node, [])
1523
1523
1524 def nodebookmarks(self, node):
1524 def nodebookmarks(self, node):
1525 """return the list of bookmarks pointing to the specified node"""
1525 """return the list of bookmarks pointing to the specified node"""
1526 return self._bookmarks.names(node)
1526 return self._bookmarks.names(node)
1527
1527
1528 def branchmap(self):
1528 def branchmap(self):
1529 '''returns a dictionary {branch: [branchheads]} with branchheads
1529 '''returns a dictionary {branch: [branchheads]} with branchheads
1530 ordered by increasing revision number'''
1530 ordered by increasing revision number'''
1531 return self._branchcaches[self]
1531 return self._branchcaches[self]
1532
1532
1533 @unfilteredmethod
1533 @unfilteredmethod
1534 def revbranchcache(self):
1534 def revbranchcache(self):
1535 if not self._revbranchcache:
1535 if not self._revbranchcache:
1536 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1536 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1537 return self._revbranchcache
1537 return self._revbranchcache
1538
1538
1539 def branchtip(self, branch, ignoremissing=False):
1539 def branchtip(self, branch, ignoremissing=False):
1540 '''return the tip node for a given branch
1540 '''return the tip node for a given branch
1541
1541
1542 If ignoremissing is True, then this method will not raise an error.
1542 If ignoremissing is True, then this method will not raise an error.
1543 This is helpful for callers that only expect None for a missing branch
1543 This is helpful for callers that only expect None for a missing branch
1544 (e.g. namespace).
1544 (e.g. namespace).
1545
1545
1546 '''
1546 '''
1547 try:
1547 try:
1548 return self.branchmap().branchtip(branch)
1548 return self.branchmap().branchtip(branch)
1549 except KeyError:
1549 except KeyError:
1550 if not ignoremissing:
1550 if not ignoremissing:
1551 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1551 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1552 else:
1552 else:
1553 pass
1553 pass
1554
1554
1555 def lookup(self, key):
1555 def lookup(self, key):
1556 return scmutil.revsymbol(self, key).node()
1556 return scmutil.revsymbol(self, key).node()
1557
1557
1558 def lookupbranch(self, key):
1558 def lookupbranch(self, key):
1559 if key in self.branchmap().entries:
1559 if self.branchmap().hasbranch(key):
1560 return key
1560 return key
1561
1561
1562 return scmutil.revsymbol(self, key).branch()
1562 return scmutil.revsymbol(self, key).branch()
1563
1563
1564 def known(self, nodes):
1564 def known(self, nodes):
1565 cl = self.changelog
1565 cl = self.changelog
1566 nm = cl.nodemap
1566 nm = cl.nodemap
1567 filtered = cl.filteredrevs
1567 filtered = cl.filteredrevs
1568 result = []
1568 result = []
1569 for n in nodes:
1569 for n in nodes:
1570 r = nm.get(n)
1570 r = nm.get(n)
1571 resp = not (r is None or r in filtered)
1571 resp = not (r is None or r in filtered)
1572 result.append(resp)
1572 result.append(resp)
1573 return result
1573 return result
1574
1574
1575 def local(self):
1575 def local(self):
1576 return self
1576 return self
1577
1577
1578 def publishing(self):
1578 def publishing(self):
1579 # it's safe (and desirable) to trust the publish flag unconditionally
1579 # it's safe (and desirable) to trust the publish flag unconditionally
1580 # so that we don't finalize changes shared between users via ssh or nfs
1580 # so that we don't finalize changes shared between users via ssh or nfs
1581 return self.ui.configbool('phases', 'publish', untrusted=True)
1581 return self.ui.configbool('phases', 'publish', untrusted=True)
1582
1582
1583 def cancopy(self):
1583 def cancopy(self):
1584 # so statichttprepo's override of local() works
1584 # so statichttprepo's override of local() works
1585 if not self.local():
1585 if not self.local():
1586 return False
1586 return False
1587 if not self.publishing():
1587 if not self.publishing():
1588 return True
1588 return True
1589 # if publishing we can't copy if there is filtered content
1589 # if publishing we can't copy if there is filtered content
1590 return not self.filtered('visible').changelog.filteredrevs
1590 return not self.filtered('visible').changelog.filteredrevs
1591
1591
1592 def shared(self):
1592 def shared(self):
1593 '''the type of shared repository (None if not shared)'''
1593 '''the type of shared repository (None if not shared)'''
1594 if self.sharedpath != self.path:
1594 if self.sharedpath != self.path:
1595 return 'store'
1595 return 'store'
1596 return None
1596 return None
1597
1597
1598 def wjoin(self, f, *insidef):
1598 def wjoin(self, f, *insidef):
1599 return self.vfs.reljoin(self.root, f, *insidef)
1599 return self.vfs.reljoin(self.root, f, *insidef)
1600
1600
1601 def setparents(self, p1, p2=nullid):
1601 def setparents(self, p1, p2=nullid):
1602 with self.dirstate.parentchange():
1602 with self.dirstate.parentchange():
1603 copies = self.dirstate.setparents(p1, p2)
1603 copies = self.dirstate.setparents(p1, p2)
1604 pctx = self[p1]
1604 pctx = self[p1]
1605 if copies:
1605 if copies:
1606 # Adjust copy records, the dirstate cannot do it, it
1606 # Adjust copy records, the dirstate cannot do it, it
1607 # requires access to parents manifests. Preserve them
1607 # requires access to parents manifests. Preserve them
1608 # only for entries added to first parent.
1608 # only for entries added to first parent.
1609 for f in copies:
1609 for f in copies:
1610 if f not in pctx and copies[f] in pctx:
1610 if f not in pctx and copies[f] in pctx:
1611 self.dirstate.copy(copies[f], f)
1611 self.dirstate.copy(copies[f], f)
1612 if p2 == nullid:
1612 if p2 == nullid:
1613 for f, s in sorted(self.dirstate.copies().items()):
1613 for f, s in sorted(self.dirstate.copies().items()):
1614 if f not in pctx and s not in pctx:
1614 if f not in pctx and s not in pctx:
1615 self.dirstate.copy(None, f)
1615 self.dirstate.copy(None, f)
1616
1616
1617 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1617 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1618 """changeid must be a changeset revision, if specified.
1618 """changeid must be a changeset revision, if specified.
1619 fileid can be a file revision or node."""
1619 fileid can be a file revision or node."""
1620 return context.filectx(self, path, changeid, fileid,
1620 return context.filectx(self, path, changeid, fileid,
1621 changectx=changectx)
1621 changectx=changectx)
1622
1622
1623 def getcwd(self):
1623 def getcwd(self):
1624 return self.dirstate.getcwd()
1624 return self.dirstate.getcwd()
1625
1625
1626 def pathto(self, f, cwd=None):
1626 def pathto(self, f, cwd=None):
1627 return self.dirstate.pathto(f, cwd)
1627 return self.dirstate.pathto(f, cwd)
1628
1628
1629 def _loadfilter(self, filter):
1629 def _loadfilter(self, filter):
1630 if filter not in self._filterpats:
1630 if filter not in self._filterpats:
1631 l = []
1631 l = []
1632 for pat, cmd in self.ui.configitems(filter):
1632 for pat, cmd in self.ui.configitems(filter):
1633 if cmd == '!':
1633 if cmd == '!':
1634 continue
1634 continue
1635 mf = matchmod.match(self.root, '', [pat])
1635 mf = matchmod.match(self.root, '', [pat])
1636 fn = None
1636 fn = None
1637 params = cmd
1637 params = cmd
1638 for name, filterfn in self._datafilters.iteritems():
1638 for name, filterfn in self._datafilters.iteritems():
1639 if cmd.startswith(name):
1639 if cmd.startswith(name):
1640 fn = filterfn
1640 fn = filterfn
1641 params = cmd[len(name):].lstrip()
1641 params = cmd[len(name):].lstrip()
1642 break
1642 break
1643 if not fn:
1643 if not fn:
1644 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1644 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1645 # Wrap old filters not supporting keyword arguments
1645 # Wrap old filters not supporting keyword arguments
1646 if not pycompat.getargspec(fn)[2]:
1646 if not pycompat.getargspec(fn)[2]:
1647 oldfn = fn
1647 oldfn = fn
1648 fn = lambda s, c, **kwargs: oldfn(s, c)
1648 fn = lambda s, c, **kwargs: oldfn(s, c)
1649 l.append((mf, fn, params))
1649 l.append((mf, fn, params))
1650 self._filterpats[filter] = l
1650 self._filterpats[filter] = l
1651 return self._filterpats[filter]
1651 return self._filterpats[filter]
1652
1652
1653 def _filter(self, filterpats, filename, data):
1653 def _filter(self, filterpats, filename, data):
1654 for mf, fn, cmd in filterpats:
1654 for mf, fn, cmd in filterpats:
1655 if mf(filename):
1655 if mf(filename):
1656 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1656 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1657 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1657 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1658 break
1658 break
1659
1659
1660 return data
1660 return data
1661
1661
1662 @unfilteredpropertycache
1662 @unfilteredpropertycache
1663 def _encodefilterpats(self):
1663 def _encodefilterpats(self):
1664 return self._loadfilter('encode')
1664 return self._loadfilter('encode')
1665
1665
1666 @unfilteredpropertycache
1666 @unfilteredpropertycache
1667 def _decodefilterpats(self):
1667 def _decodefilterpats(self):
1668 return self._loadfilter('decode')
1668 return self._loadfilter('decode')
1669
1669
1670 def adddatafilter(self, name, filter):
1670 def adddatafilter(self, name, filter):
1671 self._datafilters[name] = filter
1671 self._datafilters[name] = filter
1672
1672
1673 def wread(self, filename):
1673 def wread(self, filename):
1674 if self.wvfs.islink(filename):
1674 if self.wvfs.islink(filename):
1675 data = self.wvfs.readlink(filename)
1675 data = self.wvfs.readlink(filename)
1676 else:
1676 else:
1677 data = self.wvfs.read(filename)
1677 data = self.wvfs.read(filename)
1678 return self._filter(self._encodefilterpats, filename, data)
1678 return self._filter(self._encodefilterpats, filename, data)
1679
1679
1680 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1680 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1681 """write ``data`` into ``filename`` in the working directory
1681 """write ``data`` into ``filename`` in the working directory
1682
1682
1683 This returns length of written (maybe decoded) data.
1683 This returns length of written (maybe decoded) data.
1684 """
1684 """
1685 data = self._filter(self._decodefilterpats, filename, data)
1685 data = self._filter(self._decodefilterpats, filename, data)
1686 if 'l' in flags:
1686 if 'l' in flags:
1687 self.wvfs.symlink(data, filename)
1687 self.wvfs.symlink(data, filename)
1688 else:
1688 else:
1689 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1689 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1690 **kwargs)
1690 **kwargs)
1691 if 'x' in flags:
1691 if 'x' in flags:
1692 self.wvfs.setflags(filename, False, True)
1692 self.wvfs.setflags(filename, False, True)
1693 else:
1693 else:
1694 self.wvfs.setflags(filename, False, False)
1694 self.wvfs.setflags(filename, False, False)
1695 return len(data)
1695 return len(data)
1696
1696
1697 def wwritedata(self, filename, data):
1697 def wwritedata(self, filename, data):
1698 return self._filter(self._decodefilterpats, filename, data)
1698 return self._filter(self._decodefilterpats, filename, data)
1699
1699
1700 def currenttransaction(self):
1700 def currenttransaction(self):
1701 """return the current transaction or None if non exists"""
1701 """return the current transaction or None if non exists"""
1702 if self._transref:
1702 if self._transref:
1703 tr = self._transref()
1703 tr = self._transref()
1704 else:
1704 else:
1705 tr = None
1705 tr = None
1706
1706
1707 if tr and tr.running():
1707 if tr and tr.running():
1708 return tr
1708 return tr
1709 return None
1709 return None
1710
1710
1711 def transaction(self, desc, report=None):
1711 def transaction(self, desc, report=None):
1712 if (self.ui.configbool('devel', 'all-warnings')
1712 if (self.ui.configbool('devel', 'all-warnings')
1713 or self.ui.configbool('devel', 'check-locks')):
1713 or self.ui.configbool('devel', 'check-locks')):
1714 if self._currentlock(self._lockref) is None:
1714 if self._currentlock(self._lockref) is None:
1715 raise error.ProgrammingError('transaction requires locking')
1715 raise error.ProgrammingError('transaction requires locking')
1716 tr = self.currenttransaction()
1716 tr = self.currenttransaction()
1717 if tr is not None:
1717 if tr is not None:
1718 return tr.nest(name=desc)
1718 return tr.nest(name=desc)
1719
1719
1720 # abort here if the journal already exists
1720 # abort here if the journal already exists
1721 if self.svfs.exists("journal"):
1721 if self.svfs.exists("journal"):
1722 raise error.RepoError(
1722 raise error.RepoError(
1723 _("abandoned transaction found"),
1723 _("abandoned transaction found"),
1724 hint=_("run 'hg recover' to clean up transaction"))
1724 hint=_("run 'hg recover' to clean up transaction"))
1725
1725
1726 idbase = "%.40f#%f" % (random.random(), time.time())
1726 idbase = "%.40f#%f" % (random.random(), time.time())
1727 ha = hex(hashlib.sha1(idbase).digest())
1727 ha = hex(hashlib.sha1(idbase).digest())
1728 txnid = 'TXN:' + ha
1728 txnid = 'TXN:' + ha
1729 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1729 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1730
1730
1731 self._writejournal(desc)
1731 self._writejournal(desc)
1732 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1732 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1733 if report:
1733 if report:
1734 rp = report
1734 rp = report
1735 else:
1735 else:
1736 rp = self.ui.warn
1736 rp = self.ui.warn
1737 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1737 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1738 # we must avoid cyclic reference between repo and transaction.
1738 # we must avoid cyclic reference between repo and transaction.
1739 reporef = weakref.ref(self)
1739 reporef = weakref.ref(self)
1740 # Code to track tag movement
1740 # Code to track tag movement
1741 #
1741 #
1742 # Since tags are all handled as file content, it is actually quite hard
1742 # Since tags are all handled as file content, it is actually quite hard
1743 # to track these movement from a code perspective. So we fallback to a
1743 # to track these movement from a code perspective. So we fallback to a
1744 # tracking at the repository level. One could envision to track changes
1744 # tracking at the repository level. One could envision to track changes
1745 # to the '.hgtags' file through changegroup apply but that fails to
1745 # to the '.hgtags' file through changegroup apply but that fails to
1746 # cope with case where transaction expose new heads without changegroup
1746 # cope with case where transaction expose new heads without changegroup
1747 # being involved (eg: phase movement).
1747 # being involved (eg: phase movement).
1748 #
1748 #
1749 # For now, We gate the feature behind a flag since this likely comes
1749 # For now, We gate the feature behind a flag since this likely comes
1750 # with performance impacts. The current code run more often than needed
1750 # with performance impacts. The current code run more often than needed
1751 # and do not use caches as much as it could. The current focus is on
1751 # and do not use caches as much as it could. The current focus is on
1752 # the behavior of the feature so we disable it by default. The flag
1752 # the behavior of the feature so we disable it by default. The flag
1753 # will be removed when we are happy with the performance impact.
1753 # will be removed when we are happy with the performance impact.
1754 #
1754 #
1755 # Once this feature is no longer experimental move the following
1755 # Once this feature is no longer experimental move the following
1756 # documentation to the appropriate help section:
1756 # documentation to the appropriate help section:
1757 #
1757 #
1758 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1758 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1759 # tags (new or changed or deleted tags). In addition the details of
1759 # tags (new or changed or deleted tags). In addition the details of
1760 # these changes are made available in a file at:
1760 # these changes are made available in a file at:
1761 # ``REPOROOT/.hg/changes/tags.changes``.
1761 # ``REPOROOT/.hg/changes/tags.changes``.
1762 # Make sure you check for HG_TAG_MOVED before reading that file as it
1762 # Make sure you check for HG_TAG_MOVED before reading that file as it
1763 # might exist from a previous transaction even if no tag were touched
1763 # might exist from a previous transaction even if no tag were touched
1764 # in this one. Changes are recorded in a line base format::
1764 # in this one. Changes are recorded in a line base format::
1765 #
1765 #
1766 # <action> <hex-node> <tag-name>\n
1766 # <action> <hex-node> <tag-name>\n
1767 #
1767 #
1768 # Actions are defined as follow:
1768 # Actions are defined as follow:
1769 # "-R": tag is removed,
1769 # "-R": tag is removed,
1770 # "+A": tag is added,
1770 # "+A": tag is added,
1771 # "-M": tag is moved (old value),
1771 # "-M": tag is moved (old value),
1772 # "+M": tag is moved (new value),
1772 # "+M": tag is moved (new value),
1773 tracktags = lambda x: None
1773 tracktags = lambda x: None
1774 # experimental config: experimental.hook-track-tags
1774 # experimental config: experimental.hook-track-tags
1775 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1775 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1776 if desc != 'strip' and shouldtracktags:
1776 if desc != 'strip' and shouldtracktags:
1777 oldheads = self.changelog.headrevs()
1777 oldheads = self.changelog.headrevs()
1778 def tracktags(tr2):
1778 def tracktags(tr2):
1779 repo = reporef()
1779 repo = reporef()
1780 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1780 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1781 newheads = repo.changelog.headrevs()
1781 newheads = repo.changelog.headrevs()
1782 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1782 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1783 # notes: we compare lists here.
1783 # notes: we compare lists here.
1784 # As we do it only once buiding set would not be cheaper
1784 # As we do it only once buiding set would not be cheaper
1785 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1785 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1786 if changes:
1786 if changes:
1787 tr2.hookargs['tag_moved'] = '1'
1787 tr2.hookargs['tag_moved'] = '1'
1788 with repo.vfs('changes/tags.changes', 'w',
1788 with repo.vfs('changes/tags.changes', 'w',
1789 atomictemp=True) as changesfile:
1789 atomictemp=True) as changesfile:
1790 # note: we do not register the file to the transaction
1790 # note: we do not register the file to the transaction
1791 # because we needs it to still exist on the transaction
1791 # because we needs it to still exist on the transaction
1792 # is close (for txnclose hooks)
1792 # is close (for txnclose hooks)
1793 tagsmod.writediff(changesfile, changes)
1793 tagsmod.writediff(changesfile, changes)
1794 def validate(tr2):
1794 def validate(tr2):
1795 """will run pre-closing hooks"""
1795 """will run pre-closing hooks"""
1796 # XXX the transaction API is a bit lacking here so we take a hacky
1796 # XXX the transaction API is a bit lacking here so we take a hacky
1797 # path for now
1797 # path for now
1798 #
1798 #
1799 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1799 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1800 # dict is copied before these run. In addition we needs the data
1800 # dict is copied before these run. In addition we needs the data
1801 # available to in memory hooks too.
1801 # available to in memory hooks too.
1802 #
1802 #
1803 # Moreover, we also need to make sure this runs before txnclose
1803 # Moreover, we also need to make sure this runs before txnclose
1804 # hooks and there is no "pending" mechanism that would execute
1804 # hooks and there is no "pending" mechanism that would execute
1805 # logic only if hooks are about to run.
1805 # logic only if hooks are about to run.
1806 #
1806 #
1807 # Fixing this limitation of the transaction is also needed to track
1807 # Fixing this limitation of the transaction is also needed to track
1808 # other families of changes (bookmarks, phases, obsolescence).
1808 # other families of changes (bookmarks, phases, obsolescence).
1809 #
1809 #
1810 # This will have to be fixed before we remove the experimental
1810 # This will have to be fixed before we remove the experimental
1811 # gating.
1811 # gating.
1812 tracktags(tr2)
1812 tracktags(tr2)
1813 repo = reporef()
1813 repo = reporef()
1814 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1814 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1815 scmutil.enforcesinglehead(repo, tr2, desc)
1815 scmutil.enforcesinglehead(repo, tr2, desc)
1816 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1816 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1817 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1817 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1818 args = tr.hookargs.copy()
1818 args = tr.hookargs.copy()
1819 args.update(bookmarks.preparehookargs(name, old, new))
1819 args.update(bookmarks.preparehookargs(name, old, new))
1820 repo.hook('pretxnclose-bookmark', throw=True,
1820 repo.hook('pretxnclose-bookmark', throw=True,
1821 **pycompat.strkwargs(args))
1821 **pycompat.strkwargs(args))
1822 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1822 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1823 cl = repo.unfiltered().changelog
1823 cl = repo.unfiltered().changelog
1824 for rev, (old, new) in tr.changes['phases'].items():
1824 for rev, (old, new) in tr.changes['phases'].items():
1825 args = tr.hookargs.copy()
1825 args = tr.hookargs.copy()
1826 node = hex(cl.node(rev))
1826 node = hex(cl.node(rev))
1827 args.update(phases.preparehookargs(node, old, new))
1827 args.update(phases.preparehookargs(node, old, new))
1828 repo.hook('pretxnclose-phase', throw=True,
1828 repo.hook('pretxnclose-phase', throw=True,
1829 **pycompat.strkwargs(args))
1829 **pycompat.strkwargs(args))
1830
1830
1831 repo.hook('pretxnclose', throw=True,
1831 repo.hook('pretxnclose', throw=True,
1832 **pycompat.strkwargs(tr.hookargs))
1832 **pycompat.strkwargs(tr.hookargs))
1833 def releasefn(tr, success):
1833 def releasefn(tr, success):
1834 repo = reporef()
1834 repo = reporef()
1835 if success:
1835 if success:
1836 # this should be explicitly invoked here, because
1836 # this should be explicitly invoked here, because
1837 # in-memory changes aren't written out at closing
1837 # in-memory changes aren't written out at closing
1838 # transaction, if tr.addfilegenerator (via
1838 # transaction, if tr.addfilegenerator (via
1839 # dirstate.write or so) isn't invoked while
1839 # dirstate.write or so) isn't invoked while
1840 # transaction running
1840 # transaction running
1841 repo.dirstate.write(None)
1841 repo.dirstate.write(None)
1842 else:
1842 else:
1843 # discard all changes (including ones already written
1843 # discard all changes (including ones already written
1844 # out) in this transaction
1844 # out) in this transaction
1845 narrowspec.restorebackup(self, 'journal.narrowspec')
1845 narrowspec.restorebackup(self, 'journal.narrowspec')
1846 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1846 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1847 repo.dirstate.restorebackup(None, 'journal.dirstate')
1847 repo.dirstate.restorebackup(None, 'journal.dirstate')
1848
1848
1849 repo.invalidate(clearfilecache=True)
1849 repo.invalidate(clearfilecache=True)
1850
1850
1851 tr = transaction.transaction(rp, self.svfs, vfsmap,
1851 tr = transaction.transaction(rp, self.svfs, vfsmap,
1852 "journal",
1852 "journal",
1853 "undo",
1853 "undo",
1854 aftertrans(renames),
1854 aftertrans(renames),
1855 self.store.createmode,
1855 self.store.createmode,
1856 validator=validate,
1856 validator=validate,
1857 releasefn=releasefn,
1857 releasefn=releasefn,
1858 checkambigfiles=_cachedfiles,
1858 checkambigfiles=_cachedfiles,
1859 name=desc)
1859 name=desc)
1860 tr.changes['origrepolen'] = len(self)
1860 tr.changes['origrepolen'] = len(self)
1861 tr.changes['obsmarkers'] = set()
1861 tr.changes['obsmarkers'] = set()
1862 tr.changes['phases'] = {}
1862 tr.changes['phases'] = {}
1863 tr.changes['bookmarks'] = {}
1863 tr.changes['bookmarks'] = {}
1864
1864
1865 tr.hookargs['txnid'] = txnid
1865 tr.hookargs['txnid'] = txnid
1866 tr.hookargs['txnname'] = desc
1866 tr.hookargs['txnname'] = desc
1867 # note: writing the fncache only during finalize mean that the file is
1867 # note: writing the fncache only during finalize mean that the file is
1868 # outdated when running hooks. As fncache is used for streaming clone,
1868 # outdated when running hooks. As fncache is used for streaming clone,
1869 # this is not expected to break anything that happen during the hooks.
1869 # this is not expected to break anything that happen during the hooks.
1870 tr.addfinalize('flush-fncache', self.store.write)
1870 tr.addfinalize('flush-fncache', self.store.write)
1871 def txnclosehook(tr2):
1871 def txnclosehook(tr2):
1872 """To be run if transaction is successful, will schedule a hook run
1872 """To be run if transaction is successful, will schedule a hook run
1873 """
1873 """
1874 # Don't reference tr2 in hook() so we don't hold a reference.
1874 # Don't reference tr2 in hook() so we don't hold a reference.
1875 # This reduces memory consumption when there are multiple
1875 # This reduces memory consumption when there are multiple
1876 # transactions per lock. This can likely go away if issue5045
1876 # transactions per lock. This can likely go away if issue5045
1877 # fixes the function accumulation.
1877 # fixes the function accumulation.
1878 hookargs = tr2.hookargs
1878 hookargs = tr2.hookargs
1879
1879
1880 def hookfunc():
1880 def hookfunc():
1881 repo = reporef()
1881 repo = reporef()
1882 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1882 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1883 bmchanges = sorted(tr.changes['bookmarks'].items())
1883 bmchanges = sorted(tr.changes['bookmarks'].items())
1884 for name, (old, new) in bmchanges:
1884 for name, (old, new) in bmchanges:
1885 args = tr.hookargs.copy()
1885 args = tr.hookargs.copy()
1886 args.update(bookmarks.preparehookargs(name, old, new))
1886 args.update(bookmarks.preparehookargs(name, old, new))
1887 repo.hook('txnclose-bookmark', throw=False,
1887 repo.hook('txnclose-bookmark', throw=False,
1888 **pycompat.strkwargs(args))
1888 **pycompat.strkwargs(args))
1889
1889
1890 if hook.hashook(repo.ui, 'txnclose-phase'):
1890 if hook.hashook(repo.ui, 'txnclose-phase'):
1891 cl = repo.unfiltered().changelog
1891 cl = repo.unfiltered().changelog
1892 phasemv = sorted(tr.changes['phases'].items())
1892 phasemv = sorted(tr.changes['phases'].items())
1893 for rev, (old, new) in phasemv:
1893 for rev, (old, new) in phasemv:
1894 args = tr.hookargs.copy()
1894 args = tr.hookargs.copy()
1895 node = hex(cl.node(rev))
1895 node = hex(cl.node(rev))
1896 args.update(phases.preparehookargs(node, old, new))
1896 args.update(phases.preparehookargs(node, old, new))
1897 repo.hook('txnclose-phase', throw=False,
1897 repo.hook('txnclose-phase', throw=False,
1898 **pycompat.strkwargs(args))
1898 **pycompat.strkwargs(args))
1899
1899
1900 repo.hook('txnclose', throw=False,
1900 repo.hook('txnclose', throw=False,
1901 **pycompat.strkwargs(hookargs))
1901 **pycompat.strkwargs(hookargs))
1902 reporef()._afterlock(hookfunc)
1902 reporef()._afterlock(hookfunc)
1903 tr.addfinalize('txnclose-hook', txnclosehook)
1903 tr.addfinalize('txnclose-hook', txnclosehook)
1904 # Include a leading "-" to make it happen before the transaction summary
1904 # Include a leading "-" to make it happen before the transaction summary
1905 # reports registered via scmutil.registersummarycallback() whose names
1905 # reports registered via scmutil.registersummarycallback() whose names
1906 # are 00-txnreport etc. That way, the caches will be warm when the
1906 # are 00-txnreport etc. That way, the caches will be warm when the
1907 # callbacks run.
1907 # callbacks run.
1908 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1908 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1909 def txnaborthook(tr2):
1909 def txnaborthook(tr2):
1910 """To be run if transaction is aborted
1910 """To be run if transaction is aborted
1911 """
1911 """
1912 reporef().hook('txnabort', throw=False,
1912 reporef().hook('txnabort', throw=False,
1913 **pycompat.strkwargs(tr2.hookargs))
1913 **pycompat.strkwargs(tr2.hookargs))
1914 tr.addabort('txnabort-hook', txnaborthook)
1914 tr.addabort('txnabort-hook', txnaborthook)
1915 # avoid eager cache invalidation. in-memory data should be identical
1915 # avoid eager cache invalidation. in-memory data should be identical
1916 # to stored data if transaction has no error.
1916 # to stored data if transaction has no error.
1917 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1917 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1918 self._transref = weakref.ref(tr)
1918 self._transref = weakref.ref(tr)
1919 scmutil.registersummarycallback(self, tr, desc)
1919 scmutil.registersummarycallback(self, tr, desc)
1920 return tr
1920 return tr
1921
1921
1922 def _journalfiles(self):
1922 def _journalfiles(self):
1923 return ((self.svfs, 'journal'),
1923 return ((self.svfs, 'journal'),
1924 (self.svfs, 'journal.narrowspec'),
1924 (self.svfs, 'journal.narrowspec'),
1925 (self.vfs, 'journal.narrowspec.dirstate'),
1925 (self.vfs, 'journal.narrowspec.dirstate'),
1926 (self.vfs, 'journal.dirstate'),
1926 (self.vfs, 'journal.dirstate'),
1927 (self.vfs, 'journal.branch'),
1927 (self.vfs, 'journal.branch'),
1928 (self.vfs, 'journal.desc'),
1928 (self.vfs, 'journal.desc'),
1929 (self.vfs, 'journal.bookmarks'),
1929 (self.vfs, 'journal.bookmarks'),
1930 (self.svfs, 'journal.phaseroots'))
1930 (self.svfs, 'journal.phaseroots'))
1931
1931
1932 def undofiles(self):
1932 def undofiles(self):
1933 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1933 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1934
1934
1935 @unfilteredmethod
1935 @unfilteredmethod
1936 def _writejournal(self, desc):
1936 def _writejournal(self, desc):
1937 self.dirstate.savebackup(None, 'journal.dirstate')
1937 self.dirstate.savebackup(None, 'journal.dirstate')
1938 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1938 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1939 narrowspec.savebackup(self, 'journal.narrowspec')
1939 narrowspec.savebackup(self, 'journal.narrowspec')
1940 self.vfs.write("journal.branch",
1940 self.vfs.write("journal.branch",
1941 encoding.fromlocal(self.dirstate.branch()))
1941 encoding.fromlocal(self.dirstate.branch()))
1942 self.vfs.write("journal.desc",
1942 self.vfs.write("journal.desc",
1943 "%d\n%s\n" % (len(self), desc))
1943 "%d\n%s\n" % (len(self), desc))
1944 self.vfs.write("journal.bookmarks",
1944 self.vfs.write("journal.bookmarks",
1945 self.vfs.tryread("bookmarks"))
1945 self.vfs.tryread("bookmarks"))
1946 self.svfs.write("journal.phaseroots",
1946 self.svfs.write("journal.phaseroots",
1947 self.svfs.tryread("phaseroots"))
1947 self.svfs.tryread("phaseroots"))
1948
1948
1949 def recover(self):
1949 def recover(self):
1950 with self.lock():
1950 with self.lock():
1951 if self.svfs.exists("journal"):
1951 if self.svfs.exists("journal"):
1952 self.ui.status(_("rolling back interrupted transaction\n"))
1952 self.ui.status(_("rolling back interrupted transaction\n"))
1953 vfsmap = {'': self.svfs,
1953 vfsmap = {'': self.svfs,
1954 'plain': self.vfs,}
1954 'plain': self.vfs,}
1955 transaction.rollback(self.svfs, vfsmap, "journal",
1955 transaction.rollback(self.svfs, vfsmap, "journal",
1956 self.ui.warn,
1956 self.ui.warn,
1957 checkambigfiles=_cachedfiles)
1957 checkambigfiles=_cachedfiles)
1958 self.invalidate()
1958 self.invalidate()
1959 return True
1959 return True
1960 else:
1960 else:
1961 self.ui.warn(_("no interrupted transaction available\n"))
1961 self.ui.warn(_("no interrupted transaction available\n"))
1962 return False
1962 return False
1963
1963
1964 def rollback(self, dryrun=False, force=False):
1964 def rollback(self, dryrun=False, force=False):
1965 wlock = lock = dsguard = None
1965 wlock = lock = dsguard = None
1966 try:
1966 try:
1967 wlock = self.wlock()
1967 wlock = self.wlock()
1968 lock = self.lock()
1968 lock = self.lock()
1969 if self.svfs.exists("undo"):
1969 if self.svfs.exists("undo"):
1970 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1970 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1971
1971
1972 return self._rollback(dryrun, force, dsguard)
1972 return self._rollback(dryrun, force, dsguard)
1973 else:
1973 else:
1974 self.ui.warn(_("no rollback information available\n"))
1974 self.ui.warn(_("no rollback information available\n"))
1975 return 1
1975 return 1
1976 finally:
1976 finally:
1977 release(dsguard, lock, wlock)
1977 release(dsguard, lock, wlock)
1978
1978
1979 @unfilteredmethod # Until we get smarter cache management
1979 @unfilteredmethod # Until we get smarter cache management
1980 def _rollback(self, dryrun, force, dsguard):
1980 def _rollback(self, dryrun, force, dsguard):
1981 ui = self.ui
1981 ui = self.ui
1982 try:
1982 try:
1983 args = self.vfs.read('undo.desc').splitlines()
1983 args = self.vfs.read('undo.desc').splitlines()
1984 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1984 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1985 if len(args) >= 3:
1985 if len(args) >= 3:
1986 detail = args[2]
1986 detail = args[2]
1987 oldtip = oldlen - 1
1987 oldtip = oldlen - 1
1988
1988
1989 if detail and ui.verbose:
1989 if detail and ui.verbose:
1990 msg = (_('repository tip rolled back to revision %d'
1990 msg = (_('repository tip rolled back to revision %d'
1991 ' (undo %s: %s)\n')
1991 ' (undo %s: %s)\n')
1992 % (oldtip, desc, detail))
1992 % (oldtip, desc, detail))
1993 else:
1993 else:
1994 msg = (_('repository tip rolled back to revision %d'
1994 msg = (_('repository tip rolled back to revision %d'
1995 ' (undo %s)\n')
1995 ' (undo %s)\n')
1996 % (oldtip, desc))
1996 % (oldtip, desc))
1997 except IOError:
1997 except IOError:
1998 msg = _('rolling back unknown transaction\n')
1998 msg = _('rolling back unknown transaction\n')
1999 desc = None
1999 desc = None
2000
2000
2001 if not force and self['.'] != self['tip'] and desc == 'commit':
2001 if not force and self['.'] != self['tip'] and desc == 'commit':
2002 raise error.Abort(
2002 raise error.Abort(
2003 _('rollback of last commit while not checked out '
2003 _('rollback of last commit while not checked out '
2004 'may lose data'), hint=_('use -f to force'))
2004 'may lose data'), hint=_('use -f to force'))
2005
2005
2006 ui.status(msg)
2006 ui.status(msg)
2007 if dryrun:
2007 if dryrun:
2008 return 0
2008 return 0
2009
2009
2010 parents = self.dirstate.parents()
2010 parents = self.dirstate.parents()
2011 self.destroying()
2011 self.destroying()
2012 vfsmap = {'plain': self.vfs, '': self.svfs}
2012 vfsmap = {'plain': self.vfs, '': self.svfs}
2013 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2013 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2014 checkambigfiles=_cachedfiles)
2014 checkambigfiles=_cachedfiles)
2015 if self.vfs.exists('undo.bookmarks'):
2015 if self.vfs.exists('undo.bookmarks'):
2016 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2016 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2017 if self.svfs.exists('undo.phaseroots'):
2017 if self.svfs.exists('undo.phaseroots'):
2018 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2018 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2019 self.invalidate()
2019 self.invalidate()
2020
2020
2021 parentgone = any(p not in self.changelog.nodemap for p in parents)
2021 parentgone = any(p not in self.changelog.nodemap for p in parents)
2022 if parentgone:
2022 if parentgone:
2023 # prevent dirstateguard from overwriting already restored one
2023 # prevent dirstateguard from overwriting already restored one
2024 dsguard.close()
2024 dsguard.close()
2025
2025
2026 narrowspec.restorebackup(self, 'undo.narrowspec')
2026 narrowspec.restorebackup(self, 'undo.narrowspec')
2027 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2027 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2028 self.dirstate.restorebackup(None, 'undo.dirstate')
2028 self.dirstate.restorebackup(None, 'undo.dirstate')
2029 try:
2029 try:
2030 branch = self.vfs.read('undo.branch')
2030 branch = self.vfs.read('undo.branch')
2031 self.dirstate.setbranch(encoding.tolocal(branch))
2031 self.dirstate.setbranch(encoding.tolocal(branch))
2032 except IOError:
2032 except IOError:
2033 ui.warn(_('named branch could not be reset: '
2033 ui.warn(_('named branch could not be reset: '
2034 'current branch is still \'%s\'\n')
2034 'current branch is still \'%s\'\n')
2035 % self.dirstate.branch())
2035 % self.dirstate.branch())
2036
2036
2037 parents = tuple([p.rev() for p in self[None].parents()])
2037 parents = tuple([p.rev() for p in self[None].parents()])
2038 if len(parents) > 1:
2038 if len(parents) > 1:
2039 ui.status(_('working directory now based on '
2039 ui.status(_('working directory now based on '
2040 'revisions %d and %d\n') % parents)
2040 'revisions %d and %d\n') % parents)
2041 else:
2041 else:
2042 ui.status(_('working directory now based on '
2042 ui.status(_('working directory now based on '
2043 'revision %d\n') % parents)
2043 'revision %d\n') % parents)
2044 mergemod.mergestate.clean(self, self['.'].node())
2044 mergemod.mergestate.clean(self, self['.'].node())
2045
2045
2046 # TODO: if we know which new heads may result from this rollback, pass
2046 # TODO: if we know which new heads may result from this rollback, pass
2047 # them to destroy(), which will prevent the branchhead cache from being
2047 # them to destroy(), which will prevent the branchhead cache from being
2048 # invalidated.
2048 # invalidated.
2049 self.destroyed()
2049 self.destroyed()
2050 return 0
2050 return 0
2051
2051
2052 def _buildcacheupdater(self, newtransaction):
2052 def _buildcacheupdater(self, newtransaction):
2053 """called during transaction to build the callback updating cache
2053 """called during transaction to build the callback updating cache
2054
2054
2055 Lives on the repository to help extension who might want to augment
2055 Lives on the repository to help extension who might want to augment
2056 this logic. For this purpose, the created transaction is passed to the
2056 this logic. For this purpose, the created transaction is passed to the
2057 method.
2057 method.
2058 """
2058 """
2059 # we must avoid cyclic reference between repo and transaction.
2059 # we must avoid cyclic reference between repo and transaction.
2060 reporef = weakref.ref(self)
2060 reporef = weakref.ref(self)
2061 def updater(tr):
2061 def updater(tr):
2062 repo = reporef()
2062 repo = reporef()
2063 repo.updatecaches(tr)
2063 repo.updatecaches(tr)
2064 return updater
2064 return updater
2065
2065
2066 @unfilteredmethod
2066 @unfilteredmethod
2067 def updatecaches(self, tr=None, full=False):
2067 def updatecaches(self, tr=None, full=False):
2068 """warm appropriate caches
2068 """warm appropriate caches
2069
2069
2070 If this function is called after a transaction closed. The transaction
2070 If this function is called after a transaction closed. The transaction
2071 will be available in the 'tr' argument. This can be used to selectively
2071 will be available in the 'tr' argument. This can be used to selectively
2072 update caches relevant to the changes in that transaction.
2072 update caches relevant to the changes in that transaction.
2073
2073
2074 If 'full' is set, make sure all caches the function knows about have
2074 If 'full' is set, make sure all caches the function knows about have
2075 up-to-date data. Even the ones usually loaded more lazily.
2075 up-to-date data. Even the ones usually loaded more lazily.
2076 """
2076 """
2077 if tr is not None and tr.hookargs.get('source') == 'strip':
2077 if tr is not None and tr.hookargs.get('source') == 'strip':
2078 # During strip, many caches are invalid but
2078 # During strip, many caches are invalid but
2079 # later call to `destroyed` will refresh them.
2079 # later call to `destroyed` will refresh them.
2080 return
2080 return
2081
2081
2082 if tr is None or tr.changes['origrepolen'] < len(self):
2082 if tr is None or tr.changes['origrepolen'] < len(self):
2083 # accessing the 'ser ved' branchmap should refresh all the others,
2083 # accessing the 'ser ved' branchmap should refresh all the others,
2084 self.ui.debug('updating the branch cache\n')
2084 self.ui.debug('updating the branch cache\n')
2085 self.filtered('served').branchmap()
2085 self.filtered('served').branchmap()
2086
2086
2087 if full:
2087 if full:
2088 unfi = self.unfiltered()
2088 unfi = self.unfiltered()
2089 rbc = unfi.revbranchcache()
2089 rbc = unfi.revbranchcache()
2090 for r in unfi.changelog:
2090 for r in unfi.changelog:
2091 rbc.branchinfo(r)
2091 rbc.branchinfo(r)
2092 rbc.write()
2092 rbc.write()
2093
2093
2094 # ensure the working copy parents are in the manifestfulltextcache
2094 # ensure the working copy parents are in the manifestfulltextcache
2095 for ctx in self['.'].parents():
2095 for ctx in self['.'].parents():
2096 ctx.manifest() # accessing the manifest is enough
2096 ctx.manifest() # accessing the manifest is enough
2097
2097
2098 # accessing tags warm the cache
2098 # accessing tags warm the cache
2099 self.tags()
2099 self.tags()
2100 self.filtered('served').tags()
2100 self.filtered('served').tags()
2101
2101
2102 def invalidatecaches(self):
2102 def invalidatecaches(self):
2103
2103
2104 if r'_tagscache' in vars(self):
2104 if r'_tagscache' in vars(self):
2105 # can't use delattr on proxy
2105 # can't use delattr on proxy
2106 del self.__dict__[r'_tagscache']
2106 del self.__dict__[r'_tagscache']
2107
2107
2108 self._branchcaches.clear()
2108 self._branchcaches.clear()
2109 self.invalidatevolatilesets()
2109 self.invalidatevolatilesets()
2110 self._sparsesignaturecache.clear()
2110 self._sparsesignaturecache.clear()
2111
2111
2112 def invalidatevolatilesets(self):
2112 def invalidatevolatilesets(self):
2113 self.filteredrevcache.clear()
2113 self.filteredrevcache.clear()
2114 obsolete.clearobscaches(self)
2114 obsolete.clearobscaches(self)
2115
2115
2116 def invalidatedirstate(self):
2116 def invalidatedirstate(self):
2117 '''Invalidates the dirstate, causing the next call to dirstate
2117 '''Invalidates the dirstate, causing the next call to dirstate
2118 to check if it was modified since the last time it was read,
2118 to check if it was modified since the last time it was read,
2119 rereading it if it has.
2119 rereading it if it has.
2120
2120
2121 This is different to dirstate.invalidate() that it doesn't always
2121 This is different to dirstate.invalidate() that it doesn't always
2122 rereads the dirstate. Use dirstate.invalidate() if you want to
2122 rereads the dirstate. Use dirstate.invalidate() if you want to
2123 explicitly read the dirstate again (i.e. restoring it to a previous
2123 explicitly read the dirstate again (i.e. restoring it to a previous
2124 known good state).'''
2124 known good state).'''
2125 if hasunfilteredcache(self, r'dirstate'):
2125 if hasunfilteredcache(self, r'dirstate'):
2126 for k in self.dirstate._filecache:
2126 for k in self.dirstate._filecache:
2127 try:
2127 try:
2128 delattr(self.dirstate, k)
2128 delattr(self.dirstate, k)
2129 except AttributeError:
2129 except AttributeError:
2130 pass
2130 pass
2131 delattr(self.unfiltered(), r'dirstate')
2131 delattr(self.unfiltered(), r'dirstate')
2132
2132
2133 def invalidate(self, clearfilecache=False):
2133 def invalidate(self, clearfilecache=False):
2134 '''Invalidates both store and non-store parts other than dirstate
2134 '''Invalidates both store and non-store parts other than dirstate
2135
2135
2136 If a transaction is running, invalidation of store is omitted,
2136 If a transaction is running, invalidation of store is omitted,
2137 because discarding in-memory changes might cause inconsistency
2137 because discarding in-memory changes might cause inconsistency
2138 (e.g. incomplete fncache causes unintentional failure, but
2138 (e.g. incomplete fncache causes unintentional failure, but
2139 redundant one doesn't).
2139 redundant one doesn't).
2140 '''
2140 '''
2141 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2141 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2142 for k in list(self._filecache.keys()):
2142 for k in list(self._filecache.keys()):
2143 # dirstate is invalidated separately in invalidatedirstate()
2143 # dirstate is invalidated separately in invalidatedirstate()
2144 if k == 'dirstate':
2144 if k == 'dirstate':
2145 continue
2145 continue
2146 if (k == 'changelog' and
2146 if (k == 'changelog' and
2147 self.currenttransaction() and
2147 self.currenttransaction() and
2148 self.changelog._delayed):
2148 self.changelog._delayed):
2149 # The changelog object may store unwritten revisions. We don't
2149 # The changelog object may store unwritten revisions. We don't
2150 # want to lose them.
2150 # want to lose them.
2151 # TODO: Solve the problem instead of working around it.
2151 # TODO: Solve the problem instead of working around it.
2152 continue
2152 continue
2153
2153
2154 if clearfilecache:
2154 if clearfilecache:
2155 del self._filecache[k]
2155 del self._filecache[k]
2156 try:
2156 try:
2157 delattr(unfiltered, k)
2157 delattr(unfiltered, k)
2158 except AttributeError:
2158 except AttributeError:
2159 pass
2159 pass
2160 self.invalidatecaches()
2160 self.invalidatecaches()
2161 if not self.currenttransaction():
2161 if not self.currenttransaction():
2162 # TODO: Changing contents of store outside transaction
2162 # TODO: Changing contents of store outside transaction
2163 # causes inconsistency. We should make in-memory store
2163 # causes inconsistency. We should make in-memory store
2164 # changes detectable, and abort if changed.
2164 # changes detectable, and abort if changed.
2165 self.store.invalidatecaches()
2165 self.store.invalidatecaches()
2166
2166
2167 def invalidateall(self):
2167 def invalidateall(self):
2168 '''Fully invalidates both store and non-store parts, causing the
2168 '''Fully invalidates both store and non-store parts, causing the
2169 subsequent operation to reread any outside changes.'''
2169 subsequent operation to reread any outside changes.'''
2170 # extension should hook this to invalidate its caches
2170 # extension should hook this to invalidate its caches
2171 self.invalidate()
2171 self.invalidate()
2172 self.invalidatedirstate()
2172 self.invalidatedirstate()
2173
2173
2174 @unfilteredmethod
2174 @unfilteredmethod
2175 def _refreshfilecachestats(self, tr):
2175 def _refreshfilecachestats(self, tr):
2176 """Reload stats of cached files so that they are flagged as valid"""
2176 """Reload stats of cached files so that they are flagged as valid"""
2177 for k, ce in self._filecache.items():
2177 for k, ce in self._filecache.items():
2178 k = pycompat.sysstr(k)
2178 k = pycompat.sysstr(k)
2179 if k == r'dirstate' or k not in self.__dict__:
2179 if k == r'dirstate' or k not in self.__dict__:
2180 continue
2180 continue
2181 ce.refresh()
2181 ce.refresh()
2182
2182
2183 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2183 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2184 inheritchecker=None, parentenvvar=None):
2184 inheritchecker=None, parentenvvar=None):
2185 parentlock = None
2185 parentlock = None
2186 # the contents of parentenvvar are used by the underlying lock to
2186 # the contents of parentenvvar are used by the underlying lock to
2187 # determine whether it can be inherited
2187 # determine whether it can be inherited
2188 if parentenvvar is not None:
2188 if parentenvvar is not None:
2189 parentlock = encoding.environ.get(parentenvvar)
2189 parentlock = encoding.environ.get(parentenvvar)
2190
2190
2191 timeout = 0
2191 timeout = 0
2192 warntimeout = 0
2192 warntimeout = 0
2193 if wait:
2193 if wait:
2194 timeout = self.ui.configint("ui", "timeout")
2194 timeout = self.ui.configint("ui", "timeout")
2195 warntimeout = self.ui.configint("ui", "timeout.warn")
2195 warntimeout = self.ui.configint("ui", "timeout.warn")
2196 # internal config: ui.signal-safe-lock
2196 # internal config: ui.signal-safe-lock
2197 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2197 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2198
2198
2199 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2199 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2200 releasefn=releasefn,
2200 releasefn=releasefn,
2201 acquirefn=acquirefn, desc=desc,
2201 acquirefn=acquirefn, desc=desc,
2202 inheritchecker=inheritchecker,
2202 inheritchecker=inheritchecker,
2203 parentlock=parentlock,
2203 parentlock=parentlock,
2204 signalsafe=signalsafe)
2204 signalsafe=signalsafe)
2205 return l
2205 return l
2206
2206
2207 def _afterlock(self, callback):
2207 def _afterlock(self, callback):
2208 """add a callback to be run when the repository is fully unlocked
2208 """add a callback to be run when the repository is fully unlocked
2209
2209
2210 The callback will be executed when the outermost lock is released
2210 The callback will be executed when the outermost lock is released
2211 (with wlock being higher level than 'lock')."""
2211 (with wlock being higher level than 'lock')."""
2212 for ref in (self._wlockref, self._lockref):
2212 for ref in (self._wlockref, self._lockref):
2213 l = ref and ref()
2213 l = ref and ref()
2214 if l and l.held:
2214 if l and l.held:
2215 l.postrelease.append(callback)
2215 l.postrelease.append(callback)
2216 break
2216 break
2217 else: # no lock have been found.
2217 else: # no lock have been found.
2218 callback()
2218 callback()
2219
2219
2220 def lock(self, wait=True):
2220 def lock(self, wait=True):
2221 '''Lock the repository store (.hg/store) and return a weak reference
2221 '''Lock the repository store (.hg/store) and return a weak reference
2222 to the lock. Use this before modifying the store (e.g. committing or
2222 to the lock. Use this before modifying the store (e.g. committing or
2223 stripping). If you are opening a transaction, get a lock as well.)
2223 stripping). If you are opening a transaction, get a lock as well.)
2224
2224
2225 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2225 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2226 'wlock' first to avoid a dead-lock hazard.'''
2226 'wlock' first to avoid a dead-lock hazard.'''
2227 l = self._currentlock(self._lockref)
2227 l = self._currentlock(self._lockref)
2228 if l is not None:
2228 if l is not None:
2229 l.lock()
2229 l.lock()
2230 return l
2230 return l
2231
2231
2232 l = self._lock(vfs=self.svfs,
2232 l = self._lock(vfs=self.svfs,
2233 lockname="lock",
2233 lockname="lock",
2234 wait=wait,
2234 wait=wait,
2235 releasefn=None,
2235 releasefn=None,
2236 acquirefn=self.invalidate,
2236 acquirefn=self.invalidate,
2237 desc=_('repository %s') % self.origroot)
2237 desc=_('repository %s') % self.origroot)
2238 self._lockref = weakref.ref(l)
2238 self._lockref = weakref.ref(l)
2239 return l
2239 return l
2240
2240
2241 def _wlockchecktransaction(self):
2241 def _wlockchecktransaction(self):
2242 if self.currenttransaction() is not None:
2242 if self.currenttransaction() is not None:
2243 raise error.LockInheritanceContractViolation(
2243 raise error.LockInheritanceContractViolation(
2244 'wlock cannot be inherited in the middle of a transaction')
2244 'wlock cannot be inherited in the middle of a transaction')
2245
2245
2246 def wlock(self, wait=True):
2246 def wlock(self, wait=True):
2247 '''Lock the non-store parts of the repository (everything under
2247 '''Lock the non-store parts of the repository (everything under
2248 .hg except .hg/store) and return a weak reference to the lock.
2248 .hg except .hg/store) and return a weak reference to the lock.
2249
2249
2250 Use this before modifying files in .hg.
2250 Use this before modifying files in .hg.
2251
2251
2252 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2252 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2253 'wlock' first to avoid a dead-lock hazard.'''
2253 'wlock' first to avoid a dead-lock hazard.'''
2254 l = self._wlockref and self._wlockref()
2254 l = self._wlockref and self._wlockref()
2255 if l is not None and l.held:
2255 if l is not None and l.held:
2256 l.lock()
2256 l.lock()
2257 return l
2257 return l
2258
2258
2259 # We do not need to check for non-waiting lock acquisition. Such
2259 # We do not need to check for non-waiting lock acquisition. Such
2260 # acquisition would not cause dead-lock as they would just fail.
2260 # acquisition would not cause dead-lock as they would just fail.
2261 if wait and (self.ui.configbool('devel', 'all-warnings')
2261 if wait and (self.ui.configbool('devel', 'all-warnings')
2262 or self.ui.configbool('devel', 'check-locks')):
2262 or self.ui.configbool('devel', 'check-locks')):
2263 if self._currentlock(self._lockref) is not None:
2263 if self._currentlock(self._lockref) is not None:
2264 self.ui.develwarn('"wlock" acquired after "lock"')
2264 self.ui.develwarn('"wlock" acquired after "lock"')
2265
2265
2266 def unlock():
2266 def unlock():
2267 if self.dirstate.pendingparentchange():
2267 if self.dirstate.pendingparentchange():
2268 self.dirstate.invalidate()
2268 self.dirstate.invalidate()
2269 else:
2269 else:
2270 self.dirstate.write(None)
2270 self.dirstate.write(None)
2271
2271
2272 self._filecache['dirstate'].refresh()
2272 self._filecache['dirstate'].refresh()
2273
2273
2274 l = self._lock(self.vfs, "wlock", wait, unlock,
2274 l = self._lock(self.vfs, "wlock", wait, unlock,
2275 self.invalidatedirstate, _('working directory of %s') %
2275 self.invalidatedirstate, _('working directory of %s') %
2276 self.origroot,
2276 self.origroot,
2277 inheritchecker=self._wlockchecktransaction,
2277 inheritchecker=self._wlockchecktransaction,
2278 parentenvvar='HG_WLOCK_LOCKER')
2278 parentenvvar='HG_WLOCK_LOCKER')
2279 self._wlockref = weakref.ref(l)
2279 self._wlockref = weakref.ref(l)
2280 return l
2280 return l
2281
2281
2282 def _currentlock(self, lockref):
2282 def _currentlock(self, lockref):
2283 """Returns the lock if it's held, or None if it's not."""
2283 """Returns the lock if it's held, or None if it's not."""
2284 if lockref is None:
2284 if lockref is None:
2285 return None
2285 return None
2286 l = lockref()
2286 l = lockref()
2287 if l is None or not l.held:
2287 if l is None or not l.held:
2288 return None
2288 return None
2289 return l
2289 return l
2290
2290
2291 def currentwlock(self):
2291 def currentwlock(self):
2292 """Returns the wlock if it's held, or None if it's not."""
2292 """Returns the wlock if it's held, or None if it's not."""
2293 return self._currentlock(self._wlockref)
2293 return self._currentlock(self._wlockref)
2294
2294
2295 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2295 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2296 """
2296 """
2297 commit an individual file as part of a larger transaction
2297 commit an individual file as part of a larger transaction
2298 """
2298 """
2299
2299
2300 fname = fctx.path()
2300 fname = fctx.path()
2301 fparent1 = manifest1.get(fname, nullid)
2301 fparent1 = manifest1.get(fname, nullid)
2302 fparent2 = manifest2.get(fname, nullid)
2302 fparent2 = manifest2.get(fname, nullid)
2303 if isinstance(fctx, context.filectx):
2303 if isinstance(fctx, context.filectx):
2304 node = fctx.filenode()
2304 node = fctx.filenode()
2305 if node in [fparent1, fparent2]:
2305 if node in [fparent1, fparent2]:
2306 self.ui.debug('reusing %s filelog entry\n' % fname)
2306 self.ui.debug('reusing %s filelog entry\n' % fname)
2307 if manifest1.flags(fname) != fctx.flags():
2307 if manifest1.flags(fname) != fctx.flags():
2308 changelist.append(fname)
2308 changelist.append(fname)
2309 return node
2309 return node
2310
2310
2311 flog = self.file(fname)
2311 flog = self.file(fname)
2312 meta = {}
2312 meta = {}
2313 cfname = fctx.copysource()
2313 cfname = fctx.copysource()
2314 if cfname and cfname != fname:
2314 if cfname and cfname != fname:
2315 # Mark the new revision of this file as a copy of another
2315 # Mark the new revision of this file as a copy of another
2316 # file. This copy data will effectively act as a parent
2316 # file. This copy data will effectively act as a parent
2317 # of this new revision. If this is a merge, the first
2317 # of this new revision. If this is a merge, the first
2318 # parent will be the nullid (meaning "look up the copy data")
2318 # parent will be the nullid (meaning "look up the copy data")
2319 # and the second one will be the other parent. For example:
2319 # and the second one will be the other parent. For example:
2320 #
2320 #
2321 # 0 --- 1 --- 3 rev1 changes file foo
2321 # 0 --- 1 --- 3 rev1 changes file foo
2322 # \ / rev2 renames foo to bar and changes it
2322 # \ / rev2 renames foo to bar and changes it
2323 # \- 2 -/ rev3 should have bar with all changes and
2323 # \- 2 -/ rev3 should have bar with all changes and
2324 # should record that bar descends from
2324 # should record that bar descends from
2325 # bar in rev2 and foo in rev1
2325 # bar in rev2 and foo in rev1
2326 #
2326 #
2327 # this allows this merge to succeed:
2327 # this allows this merge to succeed:
2328 #
2328 #
2329 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2329 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2330 # \ / merging rev3 and rev4 should use bar@rev2
2330 # \ / merging rev3 and rev4 should use bar@rev2
2331 # \- 2 --- 4 as the merge base
2331 # \- 2 --- 4 as the merge base
2332 #
2332 #
2333
2333
2334 crev = manifest1.get(cfname)
2334 crev = manifest1.get(cfname)
2335 newfparent = fparent2
2335 newfparent = fparent2
2336
2336
2337 if manifest2: # branch merge
2337 if manifest2: # branch merge
2338 if fparent2 == nullid or crev is None: # copied on remote side
2338 if fparent2 == nullid or crev is None: # copied on remote side
2339 if cfname in manifest2:
2339 if cfname in manifest2:
2340 crev = manifest2[cfname]
2340 crev = manifest2[cfname]
2341 newfparent = fparent1
2341 newfparent = fparent1
2342
2342
2343 # Here, we used to search backwards through history to try to find
2343 # Here, we used to search backwards through history to try to find
2344 # where the file copy came from if the source of a copy was not in
2344 # where the file copy came from if the source of a copy was not in
2345 # the parent directory. However, this doesn't actually make sense to
2345 # the parent directory. However, this doesn't actually make sense to
2346 # do (what does a copy from something not in your working copy even
2346 # do (what does a copy from something not in your working copy even
2347 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2347 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2348 # the user that copy information was dropped, so if they didn't
2348 # the user that copy information was dropped, so if they didn't
2349 # expect this outcome it can be fixed, but this is the correct
2349 # expect this outcome it can be fixed, but this is the correct
2350 # behavior in this circumstance.
2350 # behavior in this circumstance.
2351
2351
2352 if crev:
2352 if crev:
2353 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2353 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2354 meta["copy"] = cfname
2354 meta["copy"] = cfname
2355 meta["copyrev"] = hex(crev)
2355 meta["copyrev"] = hex(crev)
2356 fparent1, fparent2 = nullid, newfparent
2356 fparent1, fparent2 = nullid, newfparent
2357 else:
2357 else:
2358 self.ui.warn(_("warning: can't find ancestor for '%s' "
2358 self.ui.warn(_("warning: can't find ancestor for '%s' "
2359 "copied from '%s'!\n") % (fname, cfname))
2359 "copied from '%s'!\n") % (fname, cfname))
2360
2360
2361 elif fparent1 == nullid:
2361 elif fparent1 == nullid:
2362 fparent1, fparent2 = fparent2, nullid
2362 fparent1, fparent2 = fparent2, nullid
2363 elif fparent2 != nullid:
2363 elif fparent2 != nullid:
2364 # is one parent an ancestor of the other?
2364 # is one parent an ancestor of the other?
2365 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2365 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2366 if fparent1 in fparentancestors:
2366 if fparent1 in fparentancestors:
2367 fparent1, fparent2 = fparent2, nullid
2367 fparent1, fparent2 = fparent2, nullid
2368 elif fparent2 in fparentancestors:
2368 elif fparent2 in fparentancestors:
2369 fparent2 = nullid
2369 fparent2 = nullid
2370
2370
2371 # is the file changed?
2371 # is the file changed?
2372 text = fctx.data()
2372 text = fctx.data()
2373 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2373 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2374 changelist.append(fname)
2374 changelist.append(fname)
2375 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2375 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2376 # are just the flags changed during merge?
2376 # are just the flags changed during merge?
2377 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2377 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2378 changelist.append(fname)
2378 changelist.append(fname)
2379
2379
2380 return fparent1
2380 return fparent1
2381
2381
2382 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2382 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2383 """check for commit arguments that aren't committable"""
2383 """check for commit arguments that aren't committable"""
2384 if match.isexact() or match.prefix():
2384 if match.isexact() or match.prefix():
2385 matched = set(status.modified + status.added + status.removed)
2385 matched = set(status.modified + status.added + status.removed)
2386
2386
2387 for f in match.files():
2387 for f in match.files():
2388 f = self.dirstate.normalize(f)
2388 f = self.dirstate.normalize(f)
2389 if f == '.' or f in matched or f in wctx.substate:
2389 if f == '.' or f in matched or f in wctx.substate:
2390 continue
2390 continue
2391 if f in status.deleted:
2391 if f in status.deleted:
2392 fail(f, _('file not found!'))
2392 fail(f, _('file not found!'))
2393 if f in vdirs: # visited directory
2393 if f in vdirs: # visited directory
2394 d = f + '/'
2394 d = f + '/'
2395 for mf in matched:
2395 for mf in matched:
2396 if mf.startswith(d):
2396 if mf.startswith(d):
2397 break
2397 break
2398 else:
2398 else:
2399 fail(f, _("no match under directory!"))
2399 fail(f, _("no match under directory!"))
2400 elif f not in self.dirstate:
2400 elif f not in self.dirstate:
2401 fail(f, _("file not tracked!"))
2401 fail(f, _("file not tracked!"))
2402
2402
2403 @unfilteredmethod
2403 @unfilteredmethod
2404 def commit(self, text="", user=None, date=None, match=None, force=False,
2404 def commit(self, text="", user=None, date=None, match=None, force=False,
2405 editor=False, extra=None):
2405 editor=False, extra=None):
2406 """Add a new revision to current repository.
2406 """Add a new revision to current repository.
2407
2407
2408 Revision information is gathered from the working directory,
2408 Revision information is gathered from the working directory,
2409 match can be used to filter the committed files. If editor is
2409 match can be used to filter the committed files. If editor is
2410 supplied, it is called to get a commit message.
2410 supplied, it is called to get a commit message.
2411 """
2411 """
2412 if extra is None:
2412 if extra is None:
2413 extra = {}
2413 extra = {}
2414
2414
2415 def fail(f, msg):
2415 def fail(f, msg):
2416 raise error.Abort('%s: %s' % (f, msg))
2416 raise error.Abort('%s: %s' % (f, msg))
2417
2417
2418 if not match:
2418 if not match:
2419 match = matchmod.always()
2419 match = matchmod.always()
2420
2420
2421 if not force:
2421 if not force:
2422 vdirs = []
2422 vdirs = []
2423 match.explicitdir = vdirs.append
2423 match.explicitdir = vdirs.append
2424 match.bad = fail
2424 match.bad = fail
2425
2425
2426 # lock() for recent changelog (see issue4368)
2426 # lock() for recent changelog (see issue4368)
2427 with self.wlock(), self.lock():
2427 with self.wlock(), self.lock():
2428 wctx = self[None]
2428 wctx = self[None]
2429 merge = len(wctx.parents()) > 1
2429 merge = len(wctx.parents()) > 1
2430
2430
2431 if not force and merge and not match.always():
2431 if not force and merge and not match.always():
2432 raise error.Abort(_('cannot partially commit a merge '
2432 raise error.Abort(_('cannot partially commit a merge '
2433 '(do not specify files or patterns)'))
2433 '(do not specify files or patterns)'))
2434
2434
2435 status = self.status(match=match, clean=force)
2435 status = self.status(match=match, clean=force)
2436 if force:
2436 if force:
2437 status.modified.extend(status.clean) # mq may commit clean files
2437 status.modified.extend(status.clean) # mq may commit clean files
2438
2438
2439 # check subrepos
2439 # check subrepos
2440 subs, commitsubs, newstate = subrepoutil.precommit(
2440 subs, commitsubs, newstate = subrepoutil.precommit(
2441 self.ui, wctx, status, match, force=force)
2441 self.ui, wctx, status, match, force=force)
2442
2442
2443 # make sure all explicit patterns are matched
2443 # make sure all explicit patterns are matched
2444 if not force:
2444 if not force:
2445 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2445 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2446
2446
2447 cctx = context.workingcommitctx(self, status,
2447 cctx = context.workingcommitctx(self, status,
2448 text, user, date, extra)
2448 text, user, date, extra)
2449
2449
2450 # internal config: ui.allowemptycommit
2450 # internal config: ui.allowemptycommit
2451 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2451 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2452 or extra.get('close') or merge or cctx.files()
2452 or extra.get('close') or merge or cctx.files()
2453 or self.ui.configbool('ui', 'allowemptycommit'))
2453 or self.ui.configbool('ui', 'allowemptycommit'))
2454 if not allowemptycommit:
2454 if not allowemptycommit:
2455 return None
2455 return None
2456
2456
2457 if merge and cctx.deleted():
2457 if merge and cctx.deleted():
2458 raise error.Abort(_("cannot commit merge with missing files"))
2458 raise error.Abort(_("cannot commit merge with missing files"))
2459
2459
2460 ms = mergemod.mergestate.read(self)
2460 ms = mergemod.mergestate.read(self)
2461 mergeutil.checkunresolved(ms)
2461 mergeutil.checkunresolved(ms)
2462
2462
2463 if editor:
2463 if editor:
2464 cctx._text = editor(self, cctx, subs)
2464 cctx._text = editor(self, cctx, subs)
2465 edited = (text != cctx._text)
2465 edited = (text != cctx._text)
2466
2466
2467 # Save commit message in case this transaction gets rolled back
2467 # Save commit message in case this transaction gets rolled back
2468 # (e.g. by a pretxncommit hook). Leave the content alone on
2468 # (e.g. by a pretxncommit hook). Leave the content alone on
2469 # the assumption that the user will use the same editor again.
2469 # the assumption that the user will use the same editor again.
2470 msgfn = self.savecommitmessage(cctx._text)
2470 msgfn = self.savecommitmessage(cctx._text)
2471
2471
2472 # commit subs and write new state
2472 # commit subs and write new state
2473 if subs:
2473 if subs:
2474 uipathfn = scmutil.getuipathfn(self)
2474 uipathfn = scmutil.getuipathfn(self)
2475 for s in sorted(commitsubs):
2475 for s in sorted(commitsubs):
2476 sub = wctx.sub(s)
2476 sub = wctx.sub(s)
2477 self.ui.status(_('committing subrepository %s\n') %
2477 self.ui.status(_('committing subrepository %s\n') %
2478 uipathfn(subrepoutil.subrelpath(sub)))
2478 uipathfn(subrepoutil.subrelpath(sub)))
2479 sr = sub.commit(cctx._text, user, date)
2479 sr = sub.commit(cctx._text, user, date)
2480 newstate[s] = (newstate[s][0], sr)
2480 newstate[s] = (newstate[s][0], sr)
2481 subrepoutil.writestate(self, newstate)
2481 subrepoutil.writestate(self, newstate)
2482
2482
2483 p1, p2 = self.dirstate.parents()
2483 p1, p2 = self.dirstate.parents()
2484 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2484 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2485 try:
2485 try:
2486 self.hook("precommit", throw=True, parent1=hookp1,
2486 self.hook("precommit", throw=True, parent1=hookp1,
2487 parent2=hookp2)
2487 parent2=hookp2)
2488 with self.transaction('commit'):
2488 with self.transaction('commit'):
2489 ret = self.commitctx(cctx, True)
2489 ret = self.commitctx(cctx, True)
2490 # update bookmarks, dirstate and mergestate
2490 # update bookmarks, dirstate and mergestate
2491 bookmarks.update(self, [p1, p2], ret)
2491 bookmarks.update(self, [p1, p2], ret)
2492 cctx.markcommitted(ret)
2492 cctx.markcommitted(ret)
2493 ms.reset()
2493 ms.reset()
2494 except: # re-raises
2494 except: # re-raises
2495 if edited:
2495 if edited:
2496 self.ui.write(
2496 self.ui.write(
2497 _('note: commit message saved in %s\n') % msgfn)
2497 _('note: commit message saved in %s\n') % msgfn)
2498 raise
2498 raise
2499
2499
2500 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2500 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2501 # hack for command that use a temporary commit (eg: histedit)
2501 # hack for command that use a temporary commit (eg: histedit)
2502 # temporary commit got stripped before hook release
2502 # temporary commit got stripped before hook release
2503 if self.changelog.hasnode(ret):
2503 if self.changelog.hasnode(ret):
2504 self.hook("commit", node=node, parent1=parent1,
2504 self.hook("commit", node=node, parent1=parent1,
2505 parent2=parent2)
2505 parent2=parent2)
2506 self._afterlock(commithook)
2506 self._afterlock(commithook)
2507 return ret
2507 return ret
2508
2508
2509 @unfilteredmethod
2509 @unfilteredmethod
2510 def commitctx(self, ctx, error=False):
2510 def commitctx(self, ctx, error=False):
2511 """Add a new revision to current repository.
2511 """Add a new revision to current repository.
2512 Revision information is passed via the context argument.
2512 Revision information is passed via the context argument.
2513
2513
2514 ctx.files() should list all files involved in this commit, i.e.
2514 ctx.files() should list all files involved in this commit, i.e.
2515 modified/added/removed files. On merge, it may be wider than the
2515 modified/added/removed files. On merge, it may be wider than the
2516 ctx.files() to be committed, since any file nodes derived directly
2516 ctx.files() to be committed, since any file nodes derived directly
2517 from p1 or p2 are excluded from the committed ctx.files().
2517 from p1 or p2 are excluded from the committed ctx.files().
2518 """
2518 """
2519
2519
2520 p1, p2 = ctx.p1(), ctx.p2()
2520 p1, p2 = ctx.p1(), ctx.p2()
2521 user = ctx.user()
2521 user = ctx.user()
2522
2522
2523 with self.lock(), self.transaction("commit") as tr:
2523 with self.lock(), self.transaction("commit") as tr:
2524 trp = weakref.proxy(tr)
2524 trp = weakref.proxy(tr)
2525
2525
2526 if ctx.manifestnode():
2526 if ctx.manifestnode():
2527 # reuse an existing manifest revision
2527 # reuse an existing manifest revision
2528 self.ui.debug('reusing known manifest\n')
2528 self.ui.debug('reusing known manifest\n')
2529 mn = ctx.manifestnode()
2529 mn = ctx.manifestnode()
2530 files = ctx.files()
2530 files = ctx.files()
2531 elif ctx.files():
2531 elif ctx.files():
2532 m1ctx = p1.manifestctx()
2532 m1ctx = p1.manifestctx()
2533 m2ctx = p2.manifestctx()
2533 m2ctx = p2.manifestctx()
2534 mctx = m1ctx.copy()
2534 mctx = m1ctx.copy()
2535
2535
2536 m = mctx.read()
2536 m = mctx.read()
2537 m1 = m1ctx.read()
2537 m1 = m1ctx.read()
2538 m2 = m2ctx.read()
2538 m2 = m2ctx.read()
2539
2539
2540 # check in files
2540 # check in files
2541 added = []
2541 added = []
2542 changed = []
2542 changed = []
2543 removed = list(ctx.removed())
2543 removed = list(ctx.removed())
2544 linkrev = len(self)
2544 linkrev = len(self)
2545 self.ui.note(_("committing files:\n"))
2545 self.ui.note(_("committing files:\n"))
2546 uipathfn = scmutil.getuipathfn(self)
2546 uipathfn = scmutil.getuipathfn(self)
2547 for f in sorted(ctx.modified() + ctx.added()):
2547 for f in sorted(ctx.modified() + ctx.added()):
2548 self.ui.note(uipathfn(f) + "\n")
2548 self.ui.note(uipathfn(f) + "\n")
2549 try:
2549 try:
2550 fctx = ctx[f]
2550 fctx = ctx[f]
2551 if fctx is None:
2551 if fctx is None:
2552 removed.append(f)
2552 removed.append(f)
2553 else:
2553 else:
2554 added.append(f)
2554 added.append(f)
2555 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2555 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2556 trp, changed)
2556 trp, changed)
2557 m.setflag(f, fctx.flags())
2557 m.setflag(f, fctx.flags())
2558 except OSError:
2558 except OSError:
2559 self.ui.warn(_("trouble committing %s!\n") %
2559 self.ui.warn(_("trouble committing %s!\n") %
2560 uipathfn(f))
2560 uipathfn(f))
2561 raise
2561 raise
2562 except IOError as inst:
2562 except IOError as inst:
2563 errcode = getattr(inst, 'errno', errno.ENOENT)
2563 errcode = getattr(inst, 'errno', errno.ENOENT)
2564 if error or errcode and errcode != errno.ENOENT:
2564 if error or errcode and errcode != errno.ENOENT:
2565 self.ui.warn(_("trouble committing %s!\n") %
2565 self.ui.warn(_("trouble committing %s!\n") %
2566 uipathfn(f))
2566 uipathfn(f))
2567 raise
2567 raise
2568
2568
2569 # update manifest
2569 # update manifest
2570 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2570 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2571 drop = [f for f in removed if f in m]
2571 drop = [f for f in removed if f in m]
2572 for f in drop:
2572 for f in drop:
2573 del m[f]
2573 del m[f]
2574 files = changed + removed
2574 files = changed + removed
2575 md = None
2575 md = None
2576 if not files:
2576 if not files:
2577 # if no "files" actually changed in terms of the changelog,
2577 # if no "files" actually changed in terms of the changelog,
2578 # try hard to detect unmodified manifest entry so that the
2578 # try hard to detect unmodified manifest entry so that the
2579 # exact same commit can be reproduced later on convert.
2579 # exact same commit can be reproduced later on convert.
2580 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2580 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2581 if not files and md:
2581 if not files and md:
2582 self.ui.debug('not reusing manifest (no file change in '
2582 self.ui.debug('not reusing manifest (no file change in '
2583 'changelog, but manifest differs)\n')
2583 'changelog, but manifest differs)\n')
2584 if files or md:
2584 if files or md:
2585 self.ui.note(_("committing manifest\n"))
2585 self.ui.note(_("committing manifest\n"))
2586 # we're using narrowmatch here since it's already applied at
2586 # we're using narrowmatch here since it's already applied at
2587 # other stages (such as dirstate.walk), so we're already
2587 # other stages (such as dirstate.walk), so we're already
2588 # ignoring things outside of narrowspec in most cases. The
2588 # ignoring things outside of narrowspec in most cases. The
2589 # one case where we might have files outside the narrowspec
2589 # one case where we might have files outside the narrowspec
2590 # at this point is merges, and we already error out in the
2590 # at this point is merges, and we already error out in the
2591 # case where the merge has files outside of the narrowspec,
2591 # case where the merge has files outside of the narrowspec,
2592 # so this is safe.
2592 # so this is safe.
2593 mn = mctx.write(trp, linkrev,
2593 mn = mctx.write(trp, linkrev,
2594 p1.manifestnode(), p2.manifestnode(),
2594 p1.manifestnode(), p2.manifestnode(),
2595 added, drop, match=self.narrowmatch())
2595 added, drop, match=self.narrowmatch())
2596 else:
2596 else:
2597 self.ui.debug('reusing manifest form p1 (listed files '
2597 self.ui.debug('reusing manifest form p1 (listed files '
2598 'actually unchanged)\n')
2598 'actually unchanged)\n')
2599 mn = p1.manifestnode()
2599 mn = p1.manifestnode()
2600 else:
2600 else:
2601 self.ui.debug('reusing manifest from p1 (no file change)\n')
2601 self.ui.debug('reusing manifest from p1 (no file change)\n')
2602 mn = p1.manifestnode()
2602 mn = p1.manifestnode()
2603 files = []
2603 files = []
2604
2604
2605 # update changelog
2605 # update changelog
2606 self.ui.note(_("committing changelog\n"))
2606 self.ui.note(_("committing changelog\n"))
2607 self.changelog.delayupdate(tr)
2607 self.changelog.delayupdate(tr)
2608 n = self.changelog.add(mn, files, ctx.description(),
2608 n = self.changelog.add(mn, files, ctx.description(),
2609 trp, p1.node(), p2.node(),
2609 trp, p1.node(), p2.node(),
2610 user, ctx.date(), ctx.extra().copy())
2610 user, ctx.date(), ctx.extra().copy())
2611 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2611 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2612 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2612 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2613 parent2=xp2)
2613 parent2=xp2)
2614 # set the new commit is proper phase
2614 # set the new commit is proper phase
2615 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2615 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2616 if targetphase:
2616 if targetphase:
2617 # retract boundary do not alter parent changeset.
2617 # retract boundary do not alter parent changeset.
2618 # if a parent have higher the resulting phase will
2618 # if a parent have higher the resulting phase will
2619 # be compliant anyway
2619 # be compliant anyway
2620 #
2620 #
2621 # if minimal phase was 0 we don't need to retract anything
2621 # if minimal phase was 0 we don't need to retract anything
2622 phases.registernew(self, tr, targetphase, [n])
2622 phases.registernew(self, tr, targetphase, [n])
2623 return n
2623 return n
2624
2624
2625 @unfilteredmethod
2625 @unfilteredmethod
2626 def destroying(self):
2626 def destroying(self):
2627 '''Inform the repository that nodes are about to be destroyed.
2627 '''Inform the repository that nodes are about to be destroyed.
2628 Intended for use by strip and rollback, so there's a common
2628 Intended for use by strip and rollback, so there's a common
2629 place for anything that has to be done before destroying history.
2629 place for anything that has to be done before destroying history.
2630
2630
2631 This is mostly useful for saving state that is in memory and waiting
2631 This is mostly useful for saving state that is in memory and waiting
2632 to be flushed when the current lock is released. Because a call to
2632 to be flushed when the current lock is released. Because a call to
2633 destroyed is imminent, the repo will be invalidated causing those
2633 destroyed is imminent, the repo will be invalidated causing those
2634 changes to stay in memory (waiting for the next unlock), or vanish
2634 changes to stay in memory (waiting for the next unlock), or vanish
2635 completely.
2635 completely.
2636 '''
2636 '''
2637 # When using the same lock to commit and strip, the phasecache is left
2637 # When using the same lock to commit and strip, the phasecache is left
2638 # dirty after committing. Then when we strip, the repo is invalidated,
2638 # dirty after committing. Then when we strip, the repo is invalidated,
2639 # causing those changes to disappear.
2639 # causing those changes to disappear.
2640 if '_phasecache' in vars(self):
2640 if '_phasecache' in vars(self):
2641 self._phasecache.write()
2641 self._phasecache.write()
2642
2642
2643 @unfilteredmethod
2643 @unfilteredmethod
2644 def destroyed(self):
2644 def destroyed(self):
2645 '''Inform the repository that nodes have been destroyed.
2645 '''Inform the repository that nodes have been destroyed.
2646 Intended for use by strip and rollback, so there's a common
2646 Intended for use by strip and rollback, so there's a common
2647 place for anything that has to be done after destroying history.
2647 place for anything that has to be done after destroying history.
2648 '''
2648 '''
2649 # When one tries to:
2649 # When one tries to:
2650 # 1) destroy nodes thus calling this method (e.g. strip)
2650 # 1) destroy nodes thus calling this method (e.g. strip)
2651 # 2) use phasecache somewhere (e.g. commit)
2651 # 2) use phasecache somewhere (e.g. commit)
2652 #
2652 #
2653 # then 2) will fail because the phasecache contains nodes that were
2653 # then 2) will fail because the phasecache contains nodes that were
2654 # removed. We can either remove phasecache from the filecache,
2654 # removed. We can either remove phasecache from the filecache,
2655 # causing it to reload next time it is accessed, or simply filter
2655 # causing it to reload next time it is accessed, or simply filter
2656 # the removed nodes now and write the updated cache.
2656 # the removed nodes now and write the updated cache.
2657 self._phasecache.filterunknown(self)
2657 self._phasecache.filterunknown(self)
2658 self._phasecache.write()
2658 self._phasecache.write()
2659
2659
2660 # refresh all repository caches
2660 # refresh all repository caches
2661 self.updatecaches()
2661 self.updatecaches()
2662
2662
2663 # Ensure the persistent tag cache is updated. Doing it now
2663 # Ensure the persistent tag cache is updated. Doing it now
2664 # means that the tag cache only has to worry about destroyed
2664 # means that the tag cache only has to worry about destroyed
2665 # heads immediately after a strip/rollback. That in turn
2665 # heads immediately after a strip/rollback. That in turn
2666 # guarantees that "cachetip == currenttip" (comparing both rev
2666 # guarantees that "cachetip == currenttip" (comparing both rev
2667 # and node) always means no nodes have been added or destroyed.
2667 # and node) always means no nodes have been added or destroyed.
2668
2668
2669 # XXX this is suboptimal when qrefresh'ing: we strip the current
2669 # XXX this is suboptimal when qrefresh'ing: we strip the current
2670 # head, refresh the tag cache, then immediately add a new head.
2670 # head, refresh the tag cache, then immediately add a new head.
2671 # But I think doing it this way is necessary for the "instant
2671 # But I think doing it this way is necessary for the "instant
2672 # tag cache retrieval" case to work.
2672 # tag cache retrieval" case to work.
2673 self.invalidate()
2673 self.invalidate()
2674
2674
2675 def status(self, node1='.', node2=None, match=None,
2675 def status(self, node1='.', node2=None, match=None,
2676 ignored=False, clean=False, unknown=False,
2676 ignored=False, clean=False, unknown=False,
2677 listsubrepos=False):
2677 listsubrepos=False):
2678 '''a convenience method that calls node1.status(node2)'''
2678 '''a convenience method that calls node1.status(node2)'''
2679 return self[node1].status(node2, match, ignored, clean, unknown,
2679 return self[node1].status(node2, match, ignored, clean, unknown,
2680 listsubrepos)
2680 listsubrepos)
2681
2681
2682 def addpostdsstatus(self, ps):
2682 def addpostdsstatus(self, ps):
2683 """Add a callback to run within the wlock, at the point at which status
2683 """Add a callback to run within the wlock, at the point at which status
2684 fixups happen.
2684 fixups happen.
2685
2685
2686 On status completion, callback(wctx, status) will be called with the
2686 On status completion, callback(wctx, status) will be called with the
2687 wlock held, unless the dirstate has changed from underneath or the wlock
2687 wlock held, unless the dirstate has changed from underneath or the wlock
2688 couldn't be grabbed.
2688 couldn't be grabbed.
2689
2689
2690 Callbacks should not capture and use a cached copy of the dirstate --
2690 Callbacks should not capture and use a cached copy of the dirstate --
2691 it might change in the meanwhile. Instead, they should access the
2691 it might change in the meanwhile. Instead, they should access the
2692 dirstate via wctx.repo().dirstate.
2692 dirstate via wctx.repo().dirstate.
2693
2693
2694 This list is emptied out after each status run -- extensions should
2694 This list is emptied out after each status run -- extensions should
2695 make sure it adds to this list each time dirstate.status is called.
2695 make sure it adds to this list each time dirstate.status is called.
2696 Extensions should also make sure they don't call this for statuses
2696 Extensions should also make sure they don't call this for statuses
2697 that don't involve the dirstate.
2697 that don't involve the dirstate.
2698 """
2698 """
2699
2699
2700 # The list is located here for uniqueness reasons -- it is actually
2700 # The list is located here for uniqueness reasons -- it is actually
2701 # managed by the workingctx, but that isn't unique per-repo.
2701 # managed by the workingctx, but that isn't unique per-repo.
2702 self._postdsstatus.append(ps)
2702 self._postdsstatus.append(ps)
2703
2703
2704 def postdsstatus(self):
2704 def postdsstatus(self):
2705 """Used by workingctx to get the list of post-dirstate-status hooks."""
2705 """Used by workingctx to get the list of post-dirstate-status hooks."""
2706 return self._postdsstatus
2706 return self._postdsstatus
2707
2707
2708 def clearpostdsstatus(self):
2708 def clearpostdsstatus(self):
2709 """Used by workingctx to clear post-dirstate-status hooks."""
2709 """Used by workingctx to clear post-dirstate-status hooks."""
2710 del self._postdsstatus[:]
2710 del self._postdsstatus[:]
2711
2711
2712 def heads(self, start=None):
2712 def heads(self, start=None):
2713 if start is None:
2713 if start is None:
2714 cl = self.changelog
2714 cl = self.changelog
2715 headrevs = reversed(cl.headrevs())
2715 headrevs = reversed(cl.headrevs())
2716 return [cl.node(rev) for rev in headrevs]
2716 return [cl.node(rev) for rev in headrevs]
2717
2717
2718 heads = self.changelog.heads(start)
2718 heads = self.changelog.heads(start)
2719 # sort the output in rev descending order
2719 # sort the output in rev descending order
2720 return sorted(heads, key=self.changelog.rev, reverse=True)
2720 return sorted(heads, key=self.changelog.rev, reverse=True)
2721
2721
2722 def branchheads(self, branch=None, start=None, closed=False):
2722 def branchheads(self, branch=None, start=None, closed=False):
2723 '''return a (possibly filtered) list of heads for the given branch
2723 '''return a (possibly filtered) list of heads for the given branch
2724
2724
2725 Heads are returned in topological order, from newest to oldest.
2725 Heads are returned in topological order, from newest to oldest.
2726 If branch is None, use the dirstate branch.
2726 If branch is None, use the dirstate branch.
2727 If start is not None, return only heads reachable from start.
2727 If start is not None, return only heads reachable from start.
2728 If closed is True, return heads that are marked as closed as well.
2728 If closed is True, return heads that are marked as closed as well.
2729 '''
2729 '''
2730 if branch is None:
2730 if branch is None:
2731 branch = self[None].branch()
2731 branch = self[None].branch()
2732 branches = self.branchmap()
2732 branches = self.branchmap()
2733 if branch not in branches.entries:
2733 if not branches.hasbranch(branch):
2734 return []
2734 return []
2735 # the cache returns heads ordered lowest to highest
2735 # the cache returns heads ordered lowest to highest
2736 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2736 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2737 if start is not None:
2737 if start is not None:
2738 # filter out the heads that cannot be reached from startrev
2738 # filter out the heads that cannot be reached from startrev
2739 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2739 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2740 bheads = [h for h in bheads if h in fbheads]
2740 bheads = [h for h in bheads if h in fbheads]
2741 return bheads
2741 return bheads
2742
2742
2743 def branches(self, nodes):
2743 def branches(self, nodes):
2744 if not nodes:
2744 if not nodes:
2745 nodes = [self.changelog.tip()]
2745 nodes = [self.changelog.tip()]
2746 b = []
2746 b = []
2747 for n in nodes:
2747 for n in nodes:
2748 t = n
2748 t = n
2749 while True:
2749 while True:
2750 p = self.changelog.parents(n)
2750 p = self.changelog.parents(n)
2751 if p[1] != nullid or p[0] == nullid:
2751 if p[1] != nullid or p[0] == nullid:
2752 b.append((t, n, p[0], p[1]))
2752 b.append((t, n, p[0], p[1]))
2753 break
2753 break
2754 n = p[0]
2754 n = p[0]
2755 return b
2755 return b
2756
2756
2757 def between(self, pairs):
2757 def between(self, pairs):
2758 r = []
2758 r = []
2759
2759
2760 for top, bottom in pairs:
2760 for top, bottom in pairs:
2761 n, l, i = top, [], 0
2761 n, l, i = top, [], 0
2762 f = 1
2762 f = 1
2763
2763
2764 while n != bottom and n != nullid:
2764 while n != bottom and n != nullid:
2765 p = self.changelog.parents(n)[0]
2765 p = self.changelog.parents(n)[0]
2766 if i == f:
2766 if i == f:
2767 l.append(n)
2767 l.append(n)
2768 f = f * 2
2768 f = f * 2
2769 n = p
2769 n = p
2770 i += 1
2770 i += 1
2771
2771
2772 r.append(l)
2772 r.append(l)
2773
2773
2774 return r
2774 return r
2775
2775
2776 def checkpush(self, pushop):
2776 def checkpush(self, pushop):
2777 """Extensions can override this function if additional checks have
2777 """Extensions can override this function if additional checks have
2778 to be performed before pushing, or call it if they override push
2778 to be performed before pushing, or call it if they override push
2779 command.
2779 command.
2780 """
2780 """
2781
2781
2782 @unfilteredpropertycache
2782 @unfilteredpropertycache
2783 def prepushoutgoinghooks(self):
2783 def prepushoutgoinghooks(self):
2784 """Return util.hooks consists of a pushop with repo, remote, outgoing
2784 """Return util.hooks consists of a pushop with repo, remote, outgoing
2785 methods, which are called before pushing changesets.
2785 methods, which are called before pushing changesets.
2786 """
2786 """
2787 return util.hooks()
2787 return util.hooks()
2788
2788
2789 def pushkey(self, namespace, key, old, new):
2789 def pushkey(self, namespace, key, old, new):
2790 try:
2790 try:
2791 tr = self.currenttransaction()
2791 tr = self.currenttransaction()
2792 hookargs = {}
2792 hookargs = {}
2793 if tr is not None:
2793 if tr is not None:
2794 hookargs.update(tr.hookargs)
2794 hookargs.update(tr.hookargs)
2795 hookargs = pycompat.strkwargs(hookargs)
2795 hookargs = pycompat.strkwargs(hookargs)
2796 hookargs[r'namespace'] = namespace
2796 hookargs[r'namespace'] = namespace
2797 hookargs[r'key'] = key
2797 hookargs[r'key'] = key
2798 hookargs[r'old'] = old
2798 hookargs[r'old'] = old
2799 hookargs[r'new'] = new
2799 hookargs[r'new'] = new
2800 self.hook('prepushkey', throw=True, **hookargs)
2800 self.hook('prepushkey', throw=True, **hookargs)
2801 except error.HookAbort as exc:
2801 except error.HookAbort as exc:
2802 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2802 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2803 if exc.hint:
2803 if exc.hint:
2804 self.ui.write_err(_("(%s)\n") % exc.hint)
2804 self.ui.write_err(_("(%s)\n") % exc.hint)
2805 return False
2805 return False
2806 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2806 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2807 ret = pushkey.push(self, namespace, key, old, new)
2807 ret = pushkey.push(self, namespace, key, old, new)
2808 def runhook():
2808 def runhook():
2809 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2809 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2810 ret=ret)
2810 ret=ret)
2811 self._afterlock(runhook)
2811 self._afterlock(runhook)
2812 return ret
2812 return ret
2813
2813
2814 def listkeys(self, namespace):
2814 def listkeys(self, namespace):
2815 self.hook('prelistkeys', throw=True, namespace=namespace)
2815 self.hook('prelistkeys', throw=True, namespace=namespace)
2816 self.ui.debug('listing keys for "%s"\n' % namespace)
2816 self.ui.debug('listing keys for "%s"\n' % namespace)
2817 values = pushkey.list(self, namespace)
2817 values = pushkey.list(self, namespace)
2818 self.hook('listkeys', namespace=namespace, values=values)
2818 self.hook('listkeys', namespace=namespace, values=values)
2819 return values
2819 return values
2820
2820
2821 def debugwireargs(self, one, two, three=None, four=None, five=None):
2821 def debugwireargs(self, one, two, three=None, four=None, five=None):
2822 '''used to test argument passing over the wire'''
2822 '''used to test argument passing over the wire'''
2823 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2823 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2824 pycompat.bytestr(four),
2824 pycompat.bytestr(four),
2825 pycompat.bytestr(five))
2825 pycompat.bytestr(five))
2826
2826
2827 def savecommitmessage(self, text):
2827 def savecommitmessage(self, text):
2828 fp = self.vfs('last-message.txt', 'wb')
2828 fp = self.vfs('last-message.txt', 'wb')
2829 try:
2829 try:
2830 fp.write(text)
2830 fp.write(text)
2831 finally:
2831 finally:
2832 fp.close()
2832 fp.close()
2833 return self.pathto(fp.name[len(self.root) + 1:])
2833 return self.pathto(fp.name[len(self.root) + 1:])
2834
2834
2835 # used to avoid circular references so destructors work
2835 # used to avoid circular references so destructors work
2836 def aftertrans(files):
2836 def aftertrans(files):
2837 renamefiles = [tuple(t) for t in files]
2837 renamefiles = [tuple(t) for t in files]
2838 def a():
2838 def a():
2839 for vfs, src, dest in renamefiles:
2839 for vfs, src, dest in renamefiles:
2840 # if src and dest refer to a same file, vfs.rename is a no-op,
2840 # if src and dest refer to a same file, vfs.rename is a no-op,
2841 # leaving both src and dest on disk. delete dest to make sure
2841 # leaving both src and dest on disk. delete dest to make sure
2842 # the rename couldn't be such a no-op.
2842 # the rename couldn't be such a no-op.
2843 vfs.tryunlink(dest)
2843 vfs.tryunlink(dest)
2844 try:
2844 try:
2845 vfs.rename(src, dest)
2845 vfs.rename(src, dest)
2846 except OSError: # journal file does not yet exist
2846 except OSError: # journal file does not yet exist
2847 pass
2847 pass
2848 return a
2848 return a
2849
2849
2850 def undoname(fn):
2850 def undoname(fn):
2851 base, name = os.path.split(fn)
2851 base, name = os.path.split(fn)
2852 assert name.startswith('journal')
2852 assert name.startswith('journal')
2853 return os.path.join(base, name.replace('journal', 'undo', 1))
2853 return os.path.join(base, name.replace('journal', 'undo', 1))
2854
2854
2855 def instance(ui, path, create, intents=None, createopts=None):
2855 def instance(ui, path, create, intents=None, createopts=None):
2856 localpath = util.urllocalpath(path)
2856 localpath = util.urllocalpath(path)
2857 if create:
2857 if create:
2858 createrepository(ui, localpath, createopts=createopts)
2858 createrepository(ui, localpath, createopts=createopts)
2859
2859
2860 return makelocalrepository(ui, localpath, intents=intents)
2860 return makelocalrepository(ui, localpath, intents=intents)
2861
2861
2862 def islocal(path):
2862 def islocal(path):
2863 return True
2863 return True
2864
2864
2865 def defaultcreateopts(ui, createopts=None):
2865 def defaultcreateopts(ui, createopts=None):
2866 """Populate the default creation options for a repository.
2866 """Populate the default creation options for a repository.
2867
2867
2868 A dictionary of explicitly requested creation options can be passed
2868 A dictionary of explicitly requested creation options can be passed
2869 in. Missing keys will be populated.
2869 in. Missing keys will be populated.
2870 """
2870 """
2871 createopts = dict(createopts or {})
2871 createopts = dict(createopts or {})
2872
2872
2873 if 'backend' not in createopts:
2873 if 'backend' not in createopts:
2874 # experimental config: storage.new-repo-backend
2874 # experimental config: storage.new-repo-backend
2875 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2875 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2876
2876
2877 return createopts
2877 return createopts
2878
2878
2879 def newreporequirements(ui, createopts):
2879 def newreporequirements(ui, createopts):
2880 """Determine the set of requirements for a new local repository.
2880 """Determine the set of requirements for a new local repository.
2881
2881
2882 Extensions can wrap this function to specify custom requirements for
2882 Extensions can wrap this function to specify custom requirements for
2883 new repositories.
2883 new repositories.
2884 """
2884 """
2885 # If the repo is being created from a shared repository, we copy
2885 # If the repo is being created from a shared repository, we copy
2886 # its requirements.
2886 # its requirements.
2887 if 'sharedrepo' in createopts:
2887 if 'sharedrepo' in createopts:
2888 requirements = set(createopts['sharedrepo'].requirements)
2888 requirements = set(createopts['sharedrepo'].requirements)
2889 if createopts.get('sharedrelative'):
2889 if createopts.get('sharedrelative'):
2890 requirements.add('relshared')
2890 requirements.add('relshared')
2891 else:
2891 else:
2892 requirements.add('shared')
2892 requirements.add('shared')
2893
2893
2894 return requirements
2894 return requirements
2895
2895
2896 if 'backend' not in createopts:
2896 if 'backend' not in createopts:
2897 raise error.ProgrammingError('backend key not present in createopts; '
2897 raise error.ProgrammingError('backend key not present in createopts; '
2898 'was defaultcreateopts() called?')
2898 'was defaultcreateopts() called?')
2899
2899
2900 if createopts['backend'] != 'revlogv1':
2900 if createopts['backend'] != 'revlogv1':
2901 raise error.Abort(_('unable to determine repository requirements for '
2901 raise error.Abort(_('unable to determine repository requirements for '
2902 'storage backend: %s') % createopts['backend'])
2902 'storage backend: %s') % createopts['backend'])
2903
2903
2904 requirements = {'revlogv1'}
2904 requirements = {'revlogv1'}
2905 if ui.configbool('format', 'usestore'):
2905 if ui.configbool('format', 'usestore'):
2906 requirements.add('store')
2906 requirements.add('store')
2907 if ui.configbool('format', 'usefncache'):
2907 if ui.configbool('format', 'usefncache'):
2908 requirements.add('fncache')
2908 requirements.add('fncache')
2909 if ui.configbool('format', 'dotencode'):
2909 if ui.configbool('format', 'dotencode'):
2910 requirements.add('dotencode')
2910 requirements.add('dotencode')
2911
2911
2912 compengine = ui.config('experimental', 'format.compression')
2912 compengine = ui.config('experimental', 'format.compression')
2913 if compengine not in util.compengines:
2913 if compengine not in util.compengines:
2914 raise error.Abort(_('compression engine %s defined by '
2914 raise error.Abort(_('compression engine %s defined by '
2915 'experimental.format.compression not available') %
2915 'experimental.format.compression not available') %
2916 compengine,
2916 compengine,
2917 hint=_('run "hg debuginstall" to list available '
2917 hint=_('run "hg debuginstall" to list available '
2918 'compression engines'))
2918 'compression engines'))
2919
2919
2920 # zlib is the historical default and doesn't need an explicit requirement.
2920 # zlib is the historical default and doesn't need an explicit requirement.
2921 if compengine != 'zlib':
2921 if compengine != 'zlib':
2922 requirements.add('exp-compression-%s' % compengine)
2922 requirements.add('exp-compression-%s' % compengine)
2923
2923
2924 if scmutil.gdinitconfig(ui):
2924 if scmutil.gdinitconfig(ui):
2925 requirements.add('generaldelta')
2925 requirements.add('generaldelta')
2926 if ui.configbool('format', 'sparse-revlog'):
2926 if ui.configbool('format', 'sparse-revlog'):
2927 requirements.add(SPARSEREVLOG_REQUIREMENT)
2927 requirements.add(SPARSEREVLOG_REQUIREMENT)
2928 if ui.configbool('experimental', 'treemanifest'):
2928 if ui.configbool('experimental', 'treemanifest'):
2929 requirements.add('treemanifest')
2929 requirements.add('treemanifest')
2930
2930
2931 revlogv2 = ui.config('experimental', 'revlogv2')
2931 revlogv2 = ui.config('experimental', 'revlogv2')
2932 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2932 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2933 requirements.remove('revlogv1')
2933 requirements.remove('revlogv1')
2934 # generaldelta is implied by revlogv2.
2934 # generaldelta is implied by revlogv2.
2935 requirements.discard('generaldelta')
2935 requirements.discard('generaldelta')
2936 requirements.add(REVLOGV2_REQUIREMENT)
2936 requirements.add(REVLOGV2_REQUIREMENT)
2937 # experimental config: format.internal-phase
2937 # experimental config: format.internal-phase
2938 if ui.configbool('format', 'internal-phase'):
2938 if ui.configbool('format', 'internal-phase'):
2939 requirements.add('internal-phase')
2939 requirements.add('internal-phase')
2940
2940
2941 if createopts.get('narrowfiles'):
2941 if createopts.get('narrowfiles'):
2942 requirements.add(repository.NARROW_REQUIREMENT)
2942 requirements.add(repository.NARROW_REQUIREMENT)
2943
2943
2944 if createopts.get('lfs'):
2944 if createopts.get('lfs'):
2945 requirements.add('lfs')
2945 requirements.add('lfs')
2946
2946
2947 return requirements
2947 return requirements
2948
2948
2949 def filterknowncreateopts(ui, createopts):
2949 def filterknowncreateopts(ui, createopts):
2950 """Filters a dict of repo creation options against options that are known.
2950 """Filters a dict of repo creation options against options that are known.
2951
2951
2952 Receives a dict of repo creation options and returns a dict of those
2952 Receives a dict of repo creation options and returns a dict of those
2953 options that we don't know how to handle.
2953 options that we don't know how to handle.
2954
2954
2955 This function is called as part of repository creation. If the
2955 This function is called as part of repository creation. If the
2956 returned dict contains any items, repository creation will not
2956 returned dict contains any items, repository creation will not
2957 be allowed, as it means there was a request to create a repository
2957 be allowed, as it means there was a request to create a repository
2958 with options not recognized by loaded code.
2958 with options not recognized by loaded code.
2959
2959
2960 Extensions can wrap this function to filter out creation options
2960 Extensions can wrap this function to filter out creation options
2961 they know how to handle.
2961 they know how to handle.
2962 """
2962 """
2963 known = {
2963 known = {
2964 'backend',
2964 'backend',
2965 'lfs',
2965 'lfs',
2966 'narrowfiles',
2966 'narrowfiles',
2967 'sharedrepo',
2967 'sharedrepo',
2968 'sharedrelative',
2968 'sharedrelative',
2969 'shareditems',
2969 'shareditems',
2970 'shallowfilestore',
2970 'shallowfilestore',
2971 }
2971 }
2972
2972
2973 return {k: v for k, v in createopts.items() if k not in known}
2973 return {k: v for k, v in createopts.items() if k not in known}
2974
2974
2975 def createrepository(ui, path, createopts=None):
2975 def createrepository(ui, path, createopts=None):
2976 """Create a new repository in a vfs.
2976 """Create a new repository in a vfs.
2977
2977
2978 ``path`` path to the new repo's working directory.
2978 ``path`` path to the new repo's working directory.
2979 ``createopts`` options for the new repository.
2979 ``createopts`` options for the new repository.
2980
2980
2981 The following keys for ``createopts`` are recognized:
2981 The following keys for ``createopts`` are recognized:
2982
2982
2983 backend
2983 backend
2984 The storage backend to use.
2984 The storage backend to use.
2985 lfs
2985 lfs
2986 Repository will be created with ``lfs`` requirement. The lfs extension
2986 Repository will be created with ``lfs`` requirement. The lfs extension
2987 will automatically be loaded when the repository is accessed.
2987 will automatically be loaded when the repository is accessed.
2988 narrowfiles
2988 narrowfiles
2989 Set up repository to support narrow file storage.
2989 Set up repository to support narrow file storage.
2990 sharedrepo
2990 sharedrepo
2991 Repository object from which storage should be shared.
2991 Repository object from which storage should be shared.
2992 sharedrelative
2992 sharedrelative
2993 Boolean indicating if the path to the shared repo should be
2993 Boolean indicating if the path to the shared repo should be
2994 stored as relative. By default, the pointer to the "parent" repo
2994 stored as relative. By default, the pointer to the "parent" repo
2995 is stored as an absolute path.
2995 is stored as an absolute path.
2996 shareditems
2996 shareditems
2997 Set of items to share to the new repository (in addition to storage).
2997 Set of items to share to the new repository (in addition to storage).
2998 shallowfilestore
2998 shallowfilestore
2999 Indicates that storage for files should be shallow (not all ancestor
2999 Indicates that storage for files should be shallow (not all ancestor
3000 revisions are known).
3000 revisions are known).
3001 """
3001 """
3002 createopts = defaultcreateopts(ui, createopts=createopts)
3002 createopts = defaultcreateopts(ui, createopts=createopts)
3003
3003
3004 unknownopts = filterknowncreateopts(ui, createopts)
3004 unknownopts = filterknowncreateopts(ui, createopts)
3005
3005
3006 if not isinstance(unknownopts, dict):
3006 if not isinstance(unknownopts, dict):
3007 raise error.ProgrammingError('filterknowncreateopts() did not return '
3007 raise error.ProgrammingError('filterknowncreateopts() did not return '
3008 'a dict')
3008 'a dict')
3009
3009
3010 if unknownopts:
3010 if unknownopts:
3011 raise error.Abort(_('unable to create repository because of unknown '
3011 raise error.Abort(_('unable to create repository because of unknown '
3012 'creation option: %s') %
3012 'creation option: %s') %
3013 ', '.join(sorted(unknownopts)),
3013 ', '.join(sorted(unknownopts)),
3014 hint=_('is a required extension not loaded?'))
3014 hint=_('is a required extension not loaded?'))
3015
3015
3016 requirements = newreporequirements(ui, createopts=createopts)
3016 requirements = newreporequirements(ui, createopts=createopts)
3017
3017
3018 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3018 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3019
3019
3020 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3020 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3021 if hgvfs.exists():
3021 if hgvfs.exists():
3022 raise error.RepoError(_('repository %s already exists') % path)
3022 raise error.RepoError(_('repository %s already exists') % path)
3023
3023
3024 if 'sharedrepo' in createopts:
3024 if 'sharedrepo' in createopts:
3025 sharedpath = createopts['sharedrepo'].sharedpath
3025 sharedpath = createopts['sharedrepo'].sharedpath
3026
3026
3027 if createopts.get('sharedrelative'):
3027 if createopts.get('sharedrelative'):
3028 try:
3028 try:
3029 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3029 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3030 except (IOError, ValueError) as e:
3030 except (IOError, ValueError) as e:
3031 # ValueError is raised on Windows if the drive letters differ
3031 # ValueError is raised on Windows if the drive letters differ
3032 # on each path.
3032 # on each path.
3033 raise error.Abort(_('cannot calculate relative path'),
3033 raise error.Abort(_('cannot calculate relative path'),
3034 hint=stringutil.forcebytestr(e))
3034 hint=stringutil.forcebytestr(e))
3035
3035
3036 if not wdirvfs.exists():
3036 if not wdirvfs.exists():
3037 wdirvfs.makedirs()
3037 wdirvfs.makedirs()
3038
3038
3039 hgvfs.makedir(notindexed=True)
3039 hgvfs.makedir(notindexed=True)
3040 if 'sharedrepo' not in createopts:
3040 if 'sharedrepo' not in createopts:
3041 hgvfs.mkdir(b'cache')
3041 hgvfs.mkdir(b'cache')
3042 hgvfs.mkdir(b'wcache')
3042 hgvfs.mkdir(b'wcache')
3043
3043
3044 if b'store' in requirements and 'sharedrepo' not in createopts:
3044 if b'store' in requirements and 'sharedrepo' not in createopts:
3045 hgvfs.mkdir(b'store')
3045 hgvfs.mkdir(b'store')
3046
3046
3047 # We create an invalid changelog outside the store so very old
3047 # We create an invalid changelog outside the store so very old
3048 # Mercurial versions (which didn't know about the requirements
3048 # Mercurial versions (which didn't know about the requirements
3049 # file) encounter an error on reading the changelog. This
3049 # file) encounter an error on reading the changelog. This
3050 # effectively locks out old clients and prevents them from
3050 # effectively locks out old clients and prevents them from
3051 # mucking with a repo in an unknown format.
3051 # mucking with a repo in an unknown format.
3052 #
3052 #
3053 # The revlog header has version 2, which won't be recognized by
3053 # The revlog header has version 2, which won't be recognized by
3054 # such old clients.
3054 # such old clients.
3055 hgvfs.append(b'00changelog.i',
3055 hgvfs.append(b'00changelog.i',
3056 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3056 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3057 b'layout')
3057 b'layout')
3058
3058
3059 scmutil.writerequires(hgvfs, requirements)
3059 scmutil.writerequires(hgvfs, requirements)
3060
3060
3061 # Write out file telling readers where to find the shared store.
3061 # Write out file telling readers where to find the shared store.
3062 if 'sharedrepo' in createopts:
3062 if 'sharedrepo' in createopts:
3063 hgvfs.write(b'sharedpath', sharedpath)
3063 hgvfs.write(b'sharedpath', sharedpath)
3064
3064
3065 if createopts.get('shareditems'):
3065 if createopts.get('shareditems'):
3066 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3066 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3067 hgvfs.write(b'shared', shared)
3067 hgvfs.write(b'shared', shared)
3068
3068
3069 def poisonrepository(repo):
3069 def poisonrepository(repo):
3070 """Poison a repository instance so it can no longer be used."""
3070 """Poison a repository instance so it can no longer be used."""
3071 # Perform any cleanup on the instance.
3071 # Perform any cleanup on the instance.
3072 repo.close()
3072 repo.close()
3073
3073
3074 # Our strategy is to replace the type of the object with one that
3074 # Our strategy is to replace the type of the object with one that
3075 # has all attribute lookups result in error.
3075 # has all attribute lookups result in error.
3076 #
3076 #
3077 # But we have to allow the close() method because some constructors
3077 # But we have to allow the close() method because some constructors
3078 # of repos call close() on repo references.
3078 # of repos call close() on repo references.
3079 class poisonedrepository(object):
3079 class poisonedrepository(object):
3080 def __getattribute__(self, item):
3080 def __getattribute__(self, item):
3081 if item == r'close':
3081 if item == r'close':
3082 return object.__getattribute__(self, item)
3082 return object.__getattribute__(self, item)
3083
3083
3084 raise error.ProgrammingError('repo instances should not be used '
3084 raise error.ProgrammingError('repo instances should not be used '
3085 'after unshare')
3085 'after unshare')
3086
3086
3087 def close(self):
3087 def close(self):
3088 pass
3088 pass
3089
3089
3090 # We may have a repoview, which intercepts __setattr__. So be sure
3090 # We may have a repoview, which intercepts __setattr__. So be sure
3091 # we operate at the lowest level possible.
3091 # we operate at the lowest level possible.
3092 object.__setattr__(repo, r'__class__', poisonedrepository)
3092 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,2409 +1,2409 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 dagop,
14 dagop,
15 destutil,
15 destutil,
16 diffutil,
16 diffutil,
17 encoding,
17 encoding,
18 error,
18 error,
19 hbisect,
19 hbisect,
20 match as matchmod,
20 match as matchmod,
21 node,
21 node,
22 obsolete as obsmod,
22 obsolete as obsmod,
23 obsutil,
23 obsutil,
24 pathutil,
24 pathutil,
25 phases,
25 phases,
26 pycompat,
26 pycompat,
27 registrar,
27 registrar,
28 repoview,
28 repoview,
29 revsetlang,
29 revsetlang,
30 scmutil,
30 scmutil,
31 smartset,
31 smartset,
32 stack as stackmod,
32 stack as stackmod,
33 util,
33 util,
34 )
34 )
35 from .utils import (
35 from .utils import (
36 dateutil,
36 dateutil,
37 stringutil,
37 stringutil,
38 )
38 )
39
39
40 # helpers for processing parsed tree
40 # helpers for processing parsed tree
41 getsymbol = revsetlang.getsymbol
41 getsymbol = revsetlang.getsymbol
42 getstring = revsetlang.getstring
42 getstring = revsetlang.getstring
43 getinteger = revsetlang.getinteger
43 getinteger = revsetlang.getinteger
44 getboolean = revsetlang.getboolean
44 getboolean = revsetlang.getboolean
45 getlist = revsetlang.getlist
45 getlist = revsetlang.getlist
46 getintrange = revsetlang.getintrange
46 getintrange = revsetlang.getintrange
47 getargs = revsetlang.getargs
47 getargs = revsetlang.getargs
48 getargsdict = revsetlang.getargsdict
48 getargsdict = revsetlang.getargsdict
49
49
50 baseset = smartset.baseset
50 baseset = smartset.baseset
51 generatorset = smartset.generatorset
51 generatorset = smartset.generatorset
52 spanset = smartset.spanset
52 spanset = smartset.spanset
53 fullreposet = smartset.fullreposet
53 fullreposet = smartset.fullreposet
54
54
55 # Constants for ordering requirement, used in getset():
55 # Constants for ordering requirement, used in getset():
56 #
56 #
57 # If 'define', any nested functions and operations MAY change the ordering of
57 # If 'define', any nested functions and operations MAY change the ordering of
58 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
58 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
59 # it). If 'follow', any nested functions and operations MUST take the ordering
59 # it). If 'follow', any nested functions and operations MUST take the ordering
60 # specified by the first operand to the '&' operator.
60 # specified by the first operand to the '&' operator.
61 #
61 #
62 # For instance,
62 # For instance,
63 #
63 #
64 # X & (Y | Z)
64 # X & (Y | Z)
65 # ^ ^^^^^^^
65 # ^ ^^^^^^^
66 # | follow
66 # | follow
67 # define
67 # define
68 #
68 #
69 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
69 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
70 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
70 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
71 #
71 #
72 # 'any' means the order doesn't matter. For instance,
72 # 'any' means the order doesn't matter. For instance,
73 #
73 #
74 # (X & !Y) | ancestors(Z)
74 # (X & !Y) | ancestors(Z)
75 # ^ ^
75 # ^ ^
76 # any any
76 # any any
77 #
77 #
78 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
78 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
79 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
79 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
80 # since 'ancestors' does not care about the order of its argument.
80 # since 'ancestors' does not care about the order of its argument.
81 #
81 #
82 # Currently, most revsets do not care about the order, so 'define' is
82 # Currently, most revsets do not care about the order, so 'define' is
83 # equivalent to 'follow' for them, and the resulting order is based on the
83 # equivalent to 'follow' for them, and the resulting order is based on the
84 # 'subset' parameter passed down to them:
84 # 'subset' parameter passed down to them:
85 #
85 #
86 # m = revset.match(...)
86 # m = revset.match(...)
87 # m(repo, subset, order=defineorder)
87 # m(repo, subset, order=defineorder)
88 # ^^^^^^
88 # ^^^^^^
89 # For most revsets, 'define' means using the order this subset provides
89 # For most revsets, 'define' means using the order this subset provides
90 #
90 #
91 # There are a few revsets that always redefine the order if 'define' is
91 # There are a few revsets that always redefine the order if 'define' is
92 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
92 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
93 anyorder = 'any' # don't care the order, could be even random-shuffled
93 anyorder = 'any' # don't care the order, could be even random-shuffled
94 defineorder = 'define' # ALWAYS redefine, or ALWAYS follow the current order
94 defineorder = 'define' # ALWAYS redefine, or ALWAYS follow the current order
95 followorder = 'follow' # MUST follow the current order
95 followorder = 'follow' # MUST follow the current order
96
96
97 # helpers
97 # helpers
98
98
99 def getset(repo, subset, x, order=defineorder):
99 def getset(repo, subset, x, order=defineorder):
100 if not x:
100 if not x:
101 raise error.ParseError(_("missing argument"))
101 raise error.ParseError(_("missing argument"))
102 return methods[x[0]](repo, subset, *x[1:], order=order)
102 return methods[x[0]](repo, subset, *x[1:], order=order)
103
103
104 def _getrevsource(repo, r):
104 def _getrevsource(repo, r):
105 extra = repo[r].extra()
105 extra = repo[r].extra()
106 for label in ('source', 'transplant_source', 'rebase_source'):
106 for label in ('source', 'transplant_source', 'rebase_source'):
107 if label in extra:
107 if label in extra:
108 try:
108 try:
109 return repo[extra[label]].rev()
109 return repo[extra[label]].rev()
110 except error.RepoLookupError:
110 except error.RepoLookupError:
111 pass
111 pass
112 return None
112 return None
113
113
114 def _sortedb(xs):
114 def _sortedb(xs):
115 return sorted(pycompat.rapply(pycompat.maybebytestr, xs))
115 return sorted(pycompat.rapply(pycompat.maybebytestr, xs))
116
116
117 # operator methods
117 # operator methods
118
118
119 def stringset(repo, subset, x, order):
119 def stringset(repo, subset, x, order):
120 if not x:
120 if not x:
121 raise error.ParseError(_("empty string is not a valid revision"))
121 raise error.ParseError(_("empty string is not a valid revision"))
122 x = scmutil.intrev(scmutil.revsymbol(repo, x))
122 x = scmutil.intrev(scmutil.revsymbol(repo, x))
123 if (x in subset
123 if (x in subset
124 or x == node.nullrev and isinstance(subset, fullreposet)):
124 or x == node.nullrev and isinstance(subset, fullreposet)):
125 return baseset([x])
125 return baseset([x])
126 return baseset()
126 return baseset()
127
127
128 def rawsmartset(repo, subset, x, order):
128 def rawsmartset(repo, subset, x, order):
129 """argument is already a smartset, use that directly"""
129 """argument is already a smartset, use that directly"""
130 if order == followorder:
130 if order == followorder:
131 return subset & x
131 return subset & x
132 else:
132 else:
133 return x & subset
133 return x & subset
134
134
135 def rangeset(repo, subset, x, y, order):
135 def rangeset(repo, subset, x, y, order):
136 m = getset(repo, fullreposet(repo), x)
136 m = getset(repo, fullreposet(repo), x)
137 n = getset(repo, fullreposet(repo), y)
137 n = getset(repo, fullreposet(repo), y)
138
138
139 if not m or not n:
139 if not m or not n:
140 return baseset()
140 return baseset()
141 return _makerangeset(repo, subset, m.first(), n.last(), order)
141 return _makerangeset(repo, subset, m.first(), n.last(), order)
142
142
143 def rangeall(repo, subset, x, order):
143 def rangeall(repo, subset, x, order):
144 assert x is None
144 assert x is None
145 return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
145 return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
146
146
147 def rangepre(repo, subset, y, order):
147 def rangepre(repo, subset, y, order):
148 # ':y' can't be rewritten to '0:y' since '0' may be hidden
148 # ':y' can't be rewritten to '0:y' since '0' may be hidden
149 n = getset(repo, fullreposet(repo), y)
149 n = getset(repo, fullreposet(repo), y)
150 if not n:
150 if not n:
151 return baseset()
151 return baseset()
152 return _makerangeset(repo, subset, 0, n.last(), order)
152 return _makerangeset(repo, subset, 0, n.last(), order)
153
153
154 def rangepost(repo, subset, x, order):
154 def rangepost(repo, subset, x, order):
155 m = getset(repo, fullreposet(repo), x)
155 m = getset(repo, fullreposet(repo), x)
156 if not m:
156 if not m:
157 return baseset()
157 return baseset()
158 return _makerangeset(repo, subset, m.first(), repo.changelog.tiprev(),
158 return _makerangeset(repo, subset, m.first(), repo.changelog.tiprev(),
159 order)
159 order)
160
160
161 def _makerangeset(repo, subset, m, n, order):
161 def _makerangeset(repo, subset, m, n, order):
162 if m == n:
162 if m == n:
163 r = baseset([m])
163 r = baseset([m])
164 elif n == node.wdirrev:
164 elif n == node.wdirrev:
165 r = spanset(repo, m, len(repo)) + baseset([n])
165 r = spanset(repo, m, len(repo)) + baseset([n])
166 elif m == node.wdirrev:
166 elif m == node.wdirrev:
167 r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1)
167 r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1)
168 elif m < n:
168 elif m < n:
169 r = spanset(repo, m, n + 1)
169 r = spanset(repo, m, n + 1)
170 else:
170 else:
171 r = spanset(repo, m, n - 1)
171 r = spanset(repo, m, n - 1)
172
172
173 if order == defineorder:
173 if order == defineorder:
174 return r & subset
174 return r & subset
175 else:
175 else:
176 # carrying the sorting over when possible would be more efficient
176 # carrying the sorting over when possible would be more efficient
177 return subset & r
177 return subset & r
178
178
179 def dagrange(repo, subset, x, y, order):
179 def dagrange(repo, subset, x, y, order):
180 r = fullreposet(repo)
180 r = fullreposet(repo)
181 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
181 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
182 includepath=True)
182 includepath=True)
183 return subset & xs
183 return subset & xs
184
184
185 def andset(repo, subset, x, y, order):
185 def andset(repo, subset, x, y, order):
186 if order == anyorder:
186 if order == anyorder:
187 yorder = anyorder
187 yorder = anyorder
188 else:
188 else:
189 yorder = followorder
189 yorder = followorder
190 return getset(repo, getset(repo, subset, x, order), y, yorder)
190 return getset(repo, getset(repo, subset, x, order), y, yorder)
191
191
192 def andsmallyset(repo, subset, x, y, order):
192 def andsmallyset(repo, subset, x, y, order):
193 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
193 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
194 if order == anyorder:
194 if order == anyorder:
195 yorder = anyorder
195 yorder = anyorder
196 else:
196 else:
197 yorder = followorder
197 yorder = followorder
198 return getset(repo, getset(repo, subset, y, yorder), x, order)
198 return getset(repo, getset(repo, subset, y, yorder), x, order)
199
199
200 def differenceset(repo, subset, x, y, order):
200 def differenceset(repo, subset, x, y, order):
201 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
201 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
202
202
203 def _orsetlist(repo, subset, xs, order):
203 def _orsetlist(repo, subset, xs, order):
204 assert xs
204 assert xs
205 if len(xs) == 1:
205 if len(xs) == 1:
206 return getset(repo, subset, xs[0], order)
206 return getset(repo, subset, xs[0], order)
207 p = len(xs) // 2
207 p = len(xs) // 2
208 a = _orsetlist(repo, subset, xs[:p], order)
208 a = _orsetlist(repo, subset, xs[:p], order)
209 b = _orsetlist(repo, subset, xs[p:], order)
209 b = _orsetlist(repo, subset, xs[p:], order)
210 return a + b
210 return a + b
211
211
212 def orset(repo, subset, x, order):
212 def orset(repo, subset, x, order):
213 xs = getlist(x)
213 xs = getlist(x)
214 if not xs:
214 if not xs:
215 return baseset()
215 return baseset()
216 if order == followorder:
216 if order == followorder:
217 # slow path to take the subset order
217 # slow path to take the subset order
218 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
218 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
219 else:
219 else:
220 return _orsetlist(repo, subset, xs, order)
220 return _orsetlist(repo, subset, xs, order)
221
221
222 def notset(repo, subset, x, order):
222 def notset(repo, subset, x, order):
223 return subset - getset(repo, subset, x, anyorder)
223 return subset - getset(repo, subset, x, anyorder)
224
224
225 def relationset(repo, subset, x, y, order):
225 def relationset(repo, subset, x, y, order):
226 raise error.ParseError(_("can't use a relation in this context"))
226 raise error.ParseError(_("can't use a relation in this context"))
227
227
228 def _splitrange(a, b):
228 def _splitrange(a, b):
229 """Split range with bounds a and b into two ranges at 0 and return two
229 """Split range with bounds a and b into two ranges at 0 and return two
230 tuples of numbers for use as startdepth and stopdepth arguments of
230 tuples of numbers for use as startdepth and stopdepth arguments of
231 revancestors and revdescendants.
231 revancestors and revdescendants.
232
232
233 >>> _splitrange(-10, -5) # [-10:-5]
233 >>> _splitrange(-10, -5) # [-10:-5]
234 ((5, 11), (None, None))
234 ((5, 11), (None, None))
235 >>> _splitrange(5, 10) # [5:10]
235 >>> _splitrange(5, 10) # [5:10]
236 ((None, None), (5, 11))
236 ((None, None), (5, 11))
237 >>> _splitrange(-10, 10) # [-10:10]
237 >>> _splitrange(-10, 10) # [-10:10]
238 ((0, 11), (0, 11))
238 ((0, 11), (0, 11))
239 >>> _splitrange(-10, 0) # [-10:0]
239 >>> _splitrange(-10, 0) # [-10:0]
240 ((0, 11), (None, None))
240 ((0, 11), (None, None))
241 >>> _splitrange(0, 10) # [0:10]
241 >>> _splitrange(0, 10) # [0:10]
242 ((None, None), (0, 11))
242 ((None, None), (0, 11))
243 >>> _splitrange(0, 0) # [0:0]
243 >>> _splitrange(0, 0) # [0:0]
244 ((0, 1), (None, None))
244 ((0, 1), (None, None))
245 >>> _splitrange(1, -1) # [1:-1]
245 >>> _splitrange(1, -1) # [1:-1]
246 ((None, None), (None, None))
246 ((None, None), (None, None))
247 """
247 """
248 ancdepths = (None, None)
248 ancdepths = (None, None)
249 descdepths = (None, None)
249 descdepths = (None, None)
250 if a == b == 0:
250 if a == b == 0:
251 ancdepths = (0, 1)
251 ancdepths = (0, 1)
252 if a < 0:
252 if a < 0:
253 ancdepths = (-min(b, 0), -a + 1)
253 ancdepths = (-min(b, 0), -a + 1)
254 if b > 0:
254 if b > 0:
255 descdepths = (max(a, 0), b + 1)
255 descdepths = (max(a, 0), b + 1)
256 return ancdepths, descdepths
256 return ancdepths, descdepths
257
257
258 def generationsrel(repo, subset, x, rel, z, order):
258 def generationsrel(repo, subset, x, rel, z, order):
259 # TODO: rewrite tests, and drop startdepth argument from ancestors() and
259 # TODO: rewrite tests, and drop startdepth argument from ancestors() and
260 # descendants() predicates
260 # descendants() predicates
261 a, b = getintrange(z,
261 a, b = getintrange(z,
262 _('relation subscript must be an integer or a range'),
262 _('relation subscript must be an integer or a range'),
263 _('relation subscript bounds must be integers'),
263 _('relation subscript bounds must be integers'),
264 deffirst=-(dagop.maxlogdepth - 1),
264 deffirst=-(dagop.maxlogdepth - 1),
265 deflast=+(dagop.maxlogdepth - 1))
265 deflast=+(dagop.maxlogdepth - 1))
266 (ancstart, ancstop), (descstart, descstop) = _splitrange(a, b)
266 (ancstart, ancstop), (descstart, descstop) = _splitrange(a, b)
267
267
268 if ancstart is None and descstart is None:
268 if ancstart is None and descstart is None:
269 return baseset()
269 return baseset()
270
270
271 revs = getset(repo, fullreposet(repo), x)
271 revs = getset(repo, fullreposet(repo), x)
272 if not revs:
272 if not revs:
273 return baseset()
273 return baseset()
274
274
275 if ancstart is not None and descstart is not None:
275 if ancstart is not None and descstart is not None:
276 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
276 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
277 s += dagop.revdescendants(repo, revs, False, descstart, descstop)
277 s += dagop.revdescendants(repo, revs, False, descstart, descstop)
278 elif ancstart is not None:
278 elif ancstart is not None:
279 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
279 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
280 elif descstart is not None:
280 elif descstart is not None:
281 s = dagop.revdescendants(repo, revs, False, descstart, descstop)
281 s = dagop.revdescendants(repo, revs, False, descstart, descstop)
282
282
283 return subset & s
283 return subset & s
284
284
285 def relsubscriptset(repo, subset, x, y, z, order):
285 def relsubscriptset(repo, subset, x, y, z, order):
286 # this is pretty basic implementation of 'x#y[z]' operator, still
286 # this is pretty basic implementation of 'x#y[z]' operator, still
287 # experimental so undocumented. see the wiki for further ideas.
287 # experimental so undocumented. see the wiki for further ideas.
288 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
288 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
289 rel = getsymbol(y)
289 rel = getsymbol(y)
290 if rel in subscriptrelations:
290 if rel in subscriptrelations:
291 return subscriptrelations[rel](repo, subset, x, rel, z, order)
291 return subscriptrelations[rel](repo, subset, x, rel, z, order)
292
292
293 relnames = [r for r in subscriptrelations.keys() if len(r) > 1]
293 relnames = [r for r in subscriptrelations.keys() if len(r) > 1]
294 raise error.UnknownIdentifier(rel, relnames)
294 raise error.UnknownIdentifier(rel, relnames)
295
295
296 def subscriptset(repo, subset, x, y, order):
296 def subscriptset(repo, subset, x, y, order):
297 raise error.ParseError(_("can't use a subscript in this context"))
297 raise error.ParseError(_("can't use a subscript in this context"))
298
298
299 def listset(repo, subset, *xs, **opts):
299 def listset(repo, subset, *xs, **opts):
300 raise error.ParseError(_("can't use a list in this context"),
300 raise error.ParseError(_("can't use a list in this context"),
301 hint=_('see \'hg help "revsets.x or y"\''))
301 hint=_('see \'hg help "revsets.x or y"\''))
302
302
303 def keyvaluepair(repo, subset, k, v, order):
303 def keyvaluepair(repo, subset, k, v, order):
304 raise error.ParseError(_("can't use a key-value pair in this context"))
304 raise error.ParseError(_("can't use a key-value pair in this context"))
305
305
306 def func(repo, subset, a, b, order):
306 def func(repo, subset, a, b, order):
307 f = getsymbol(a)
307 f = getsymbol(a)
308 if f in symbols:
308 if f in symbols:
309 func = symbols[f]
309 func = symbols[f]
310 if getattr(func, '_takeorder', False):
310 if getattr(func, '_takeorder', False):
311 return func(repo, subset, b, order)
311 return func(repo, subset, b, order)
312 return func(repo, subset, b)
312 return func(repo, subset, b)
313
313
314 keep = lambda fn: getattr(fn, '__doc__', None) is not None
314 keep = lambda fn: getattr(fn, '__doc__', None) is not None
315
315
316 syms = [s for (s, fn) in symbols.items() if keep(fn)]
316 syms = [s for (s, fn) in symbols.items() if keep(fn)]
317 raise error.UnknownIdentifier(f, syms)
317 raise error.UnknownIdentifier(f, syms)
318
318
319 # functions
319 # functions
320
320
321 # symbols are callables like:
321 # symbols are callables like:
322 # fn(repo, subset, x)
322 # fn(repo, subset, x)
323 # with:
323 # with:
324 # repo - current repository instance
324 # repo - current repository instance
325 # subset - of revisions to be examined
325 # subset - of revisions to be examined
326 # x - argument in tree form
326 # x - argument in tree form
327 symbols = revsetlang.symbols
327 symbols = revsetlang.symbols
328
328
329 # symbols which can't be used for a DoS attack for any given input
329 # symbols which can't be used for a DoS attack for any given input
330 # (e.g. those which accept regexes as plain strings shouldn't be included)
330 # (e.g. those which accept regexes as plain strings shouldn't be included)
331 # functions that just return a lot of changesets (like all) don't count here
331 # functions that just return a lot of changesets (like all) don't count here
332 safesymbols = set()
332 safesymbols = set()
333
333
334 predicate = registrar.revsetpredicate()
334 predicate = registrar.revsetpredicate()
335
335
336 @predicate('_destupdate')
336 @predicate('_destupdate')
337 def _destupdate(repo, subset, x):
337 def _destupdate(repo, subset, x):
338 # experimental revset for update destination
338 # experimental revset for update destination
339 args = getargsdict(x, 'limit', 'clean')
339 args = getargsdict(x, 'limit', 'clean')
340 return subset & baseset([destutil.destupdate(repo,
340 return subset & baseset([destutil.destupdate(repo,
341 **pycompat.strkwargs(args))[0]])
341 **pycompat.strkwargs(args))[0]])
342
342
343 @predicate('_destmerge')
343 @predicate('_destmerge')
344 def _destmerge(repo, subset, x):
344 def _destmerge(repo, subset, x):
345 # experimental revset for merge destination
345 # experimental revset for merge destination
346 sourceset = None
346 sourceset = None
347 if x is not None:
347 if x is not None:
348 sourceset = getset(repo, fullreposet(repo), x)
348 sourceset = getset(repo, fullreposet(repo), x)
349 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
349 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
350
350
351 @predicate('adds(pattern)', safe=True, weight=30)
351 @predicate('adds(pattern)', safe=True, weight=30)
352 def adds(repo, subset, x):
352 def adds(repo, subset, x):
353 """Changesets that add a file matching pattern.
353 """Changesets that add a file matching pattern.
354
354
355 The pattern without explicit kind like ``glob:`` is expected to be
355 The pattern without explicit kind like ``glob:`` is expected to be
356 relative to the current directory and match against a file or a
356 relative to the current directory and match against a file or a
357 directory.
357 directory.
358 """
358 """
359 # i18n: "adds" is a keyword
359 # i18n: "adds" is a keyword
360 pat = getstring(x, _("adds requires a pattern"))
360 pat = getstring(x, _("adds requires a pattern"))
361 return checkstatus(repo, subset, pat, 1)
361 return checkstatus(repo, subset, pat, 1)
362
362
363 @predicate('ancestor(*changeset)', safe=True, weight=0.5)
363 @predicate('ancestor(*changeset)', safe=True, weight=0.5)
364 def ancestor(repo, subset, x):
364 def ancestor(repo, subset, x):
365 """A greatest common ancestor of the changesets.
365 """A greatest common ancestor of the changesets.
366
366
367 Accepts 0 or more changesets.
367 Accepts 0 or more changesets.
368 Will return empty list when passed no args.
368 Will return empty list when passed no args.
369 Greatest common ancestor of a single changeset is that changeset.
369 Greatest common ancestor of a single changeset is that changeset.
370 """
370 """
371 reviter = iter(orset(repo, fullreposet(repo), x, order=anyorder))
371 reviter = iter(orset(repo, fullreposet(repo), x, order=anyorder))
372 try:
372 try:
373 anc = repo[next(reviter)]
373 anc = repo[next(reviter)]
374 except StopIteration:
374 except StopIteration:
375 return baseset()
375 return baseset()
376 for r in reviter:
376 for r in reviter:
377 anc = anc.ancestor(repo[r])
377 anc = anc.ancestor(repo[r])
378
378
379 r = scmutil.intrev(anc)
379 r = scmutil.intrev(anc)
380 if r in subset:
380 if r in subset:
381 return baseset([r])
381 return baseset([r])
382 return baseset()
382 return baseset()
383
383
384 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
384 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
385 stopdepth=None):
385 stopdepth=None):
386 heads = getset(repo, fullreposet(repo), x)
386 heads = getset(repo, fullreposet(repo), x)
387 if not heads:
387 if not heads:
388 return baseset()
388 return baseset()
389 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
389 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
390 return subset & s
390 return subset & s
391
391
392 @predicate('ancestors(set[, depth])', safe=True)
392 @predicate('ancestors(set[, depth])', safe=True)
393 def ancestors(repo, subset, x):
393 def ancestors(repo, subset, x):
394 """Changesets that are ancestors of changesets in set, including the
394 """Changesets that are ancestors of changesets in set, including the
395 given changesets themselves.
395 given changesets themselves.
396
396
397 If depth is specified, the result only includes changesets up to
397 If depth is specified, the result only includes changesets up to
398 the specified generation.
398 the specified generation.
399 """
399 """
400 # startdepth is for internal use only until we can decide the UI
400 # startdepth is for internal use only until we can decide the UI
401 args = getargsdict(x, 'ancestors', 'set depth startdepth')
401 args = getargsdict(x, 'ancestors', 'set depth startdepth')
402 if 'set' not in args:
402 if 'set' not in args:
403 # i18n: "ancestors" is a keyword
403 # i18n: "ancestors" is a keyword
404 raise error.ParseError(_('ancestors takes at least 1 argument'))
404 raise error.ParseError(_('ancestors takes at least 1 argument'))
405 startdepth = stopdepth = None
405 startdepth = stopdepth = None
406 if 'startdepth' in args:
406 if 'startdepth' in args:
407 n = getinteger(args['startdepth'],
407 n = getinteger(args['startdepth'],
408 "ancestors expects an integer startdepth")
408 "ancestors expects an integer startdepth")
409 if n < 0:
409 if n < 0:
410 raise error.ParseError("negative startdepth")
410 raise error.ParseError("negative startdepth")
411 startdepth = n
411 startdepth = n
412 if 'depth' in args:
412 if 'depth' in args:
413 # i18n: "ancestors" is a keyword
413 # i18n: "ancestors" is a keyword
414 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
414 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
415 if n < 0:
415 if n < 0:
416 raise error.ParseError(_("negative depth"))
416 raise error.ParseError(_("negative depth"))
417 stopdepth = n + 1
417 stopdepth = n + 1
418 return _ancestors(repo, subset, args['set'],
418 return _ancestors(repo, subset, args['set'],
419 startdepth=startdepth, stopdepth=stopdepth)
419 startdepth=startdepth, stopdepth=stopdepth)
420
420
421 @predicate('_firstancestors', safe=True)
421 @predicate('_firstancestors', safe=True)
422 def _firstancestors(repo, subset, x):
422 def _firstancestors(repo, subset, x):
423 # ``_firstancestors(set)``
423 # ``_firstancestors(set)``
424 # Like ``ancestors(set)`` but follows only the first parents.
424 # Like ``ancestors(set)`` but follows only the first parents.
425 return _ancestors(repo, subset, x, followfirst=True)
425 return _ancestors(repo, subset, x, followfirst=True)
426
426
427 def _childrenspec(repo, subset, x, n, order):
427 def _childrenspec(repo, subset, x, n, order):
428 """Changesets that are the Nth child of a changeset
428 """Changesets that are the Nth child of a changeset
429 in set.
429 in set.
430 """
430 """
431 cs = set()
431 cs = set()
432 for r in getset(repo, fullreposet(repo), x):
432 for r in getset(repo, fullreposet(repo), x):
433 for i in range(n):
433 for i in range(n):
434 c = repo[r].children()
434 c = repo[r].children()
435 if len(c) == 0:
435 if len(c) == 0:
436 break
436 break
437 if len(c) > 1:
437 if len(c) > 1:
438 raise error.RepoLookupError(
438 raise error.RepoLookupError(
439 _("revision in set has more than one child"))
439 _("revision in set has more than one child"))
440 r = c[0].rev()
440 r = c[0].rev()
441 else:
441 else:
442 cs.add(r)
442 cs.add(r)
443 return subset & cs
443 return subset & cs
444
444
445 def ancestorspec(repo, subset, x, n, order):
445 def ancestorspec(repo, subset, x, n, order):
446 """``set~n``
446 """``set~n``
447 Changesets that are the Nth ancestor (first parents only) of a changeset
447 Changesets that are the Nth ancestor (first parents only) of a changeset
448 in set.
448 in set.
449 """
449 """
450 n = getinteger(n, _("~ expects a number"))
450 n = getinteger(n, _("~ expects a number"))
451 if n < 0:
451 if n < 0:
452 # children lookup
452 # children lookup
453 return _childrenspec(repo, subset, x, -n, order)
453 return _childrenspec(repo, subset, x, -n, order)
454 ps = set()
454 ps = set()
455 cl = repo.changelog
455 cl = repo.changelog
456 for r in getset(repo, fullreposet(repo), x):
456 for r in getset(repo, fullreposet(repo), x):
457 for i in range(n):
457 for i in range(n):
458 try:
458 try:
459 r = cl.parentrevs(r)[0]
459 r = cl.parentrevs(r)[0]
460 except error.WdirUnsupported:
460 except error.WdirUnsupported:
461 r = repo[r].p1().rev()
461 r = repo[r].p1().rev()
462 ps.add(r)
462 ps.add(r)
463 return subset & ps
463 return subset & ps
464
464
465 @predicate('author(string)', safe=True, weight=10)
465 @predicate('author(string)', safe=True, weight=10)
466 def author(repo, subset, x):
466 def author(repo, subset, x):
467 """Alias for ``user(string)``.
467 """Alias for ``user(string)``.
468 """
468 """
469 # i18n: "author" is a keyword
469 # i18n: "author" is a keyword
470 n = getstring(x, _("author requires a string"))
470 n = getstring(x, _("author requires a string"))
471 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
471 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
472 return subset.filter(lambda x: matcher(repo[x].user()),
472 return subset.filter(lambda x: matcher(repo[x].user()),
473 condrepr=('<user %r>', n))
473 condrepr=('<user %r>', n))
474
474
475 @predicate('bisect(string)', safe=True)
475 @predicate('bisect(string)', safe=True)
476 def bisect(repo, subset, x):
476 def bisect(repo, subset, x):
477 """Changesets marked in the specified bisect status:
477 """Changesets marked in the specified bisect status:
478
478
479 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
479 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
480 - ``goods``, ``bads`` : csets topologically good/bad
480 - ``goods``, ``bads`` : csets topologically good/bad
481 - ``range`` : csets taking part in the bisection
481 - ``range`` : csets taking part in the bisection
482 - ``pruned`` : csets that are goods, bads or skipped
482 - ``pruned`` : csets that are goods, bads or skipped
483 - ``untested`` : csets whose fate is yet unknown
483 - ``untested`` : csets whose fate is yet unknown
484 - ``ignored`` : csets ignored due to DAG topology
484 - ``ignored`` : csets ignored due to DAG topology
485 - ``current`` : the cset currently being bisected
485 - ``current`` : the cset currently being bisected
486 """
486 """
487 # i18n: "bisect" is a keyword
487 # i18n: "bisect" is a keyword
488 status = getstring(x, _("bisect requires a string")).lower()
488 status = getstring(x, _("bisect requires a string")).lower()
489 state = set(hbisect.get(repo, status))
489 state = set(hbisect.get(repo, status))
490 return subset & state
490 return subset & state
491
491
492 # Backward-compatibility
492 # Backward-compatibility
493 # - no help entry so that we do not advertise it any more
493 # - no help entry so that we do not advertise it any more
494 @predicate('bisected', safe=True)
494 @predicate('bisected', safe=True)
495 def bisected(repo, subset, x):
495 def bisected(repo, subset, x):
496 return bisect(repo, subset, x)
496 return bisect(repo, subset, x)
497
497
498 @predicate('bookmark([name])', safe=True)
498 @predicate('bookmark([name])', safe=True)
499 def bookmark(repo, subset, x):
499 def bookmark(repo, subset, x):
500 """The named bookmark or all bookmarks.
500 """The named bookmark or all bookmarks.
501
501
502 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
502 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
503 """
503 """
504 # i18n: "bookmark" is a keyword
504 # i18n: "bookmark" is a keyword
505 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
505 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
506 if args:
506 if args:
507 bm = getstring(args[0],
507 bm = getstring(args[0],
508 # i18n: "bookmark" is a keyword
508 # i18n: "bookmark" is a keyword
509 _('the argument to bookmark must be a string'))
509 _('the argument to bookmark must be a string'))
510 kind, pattern, matcher = stringutil.stringmatcher(bm)
510 kind, pattern, matcher = stringutil.stringmatcher(bm)
511 bms = set()
511 bms = set()
512 if kind == 'literal':
512 if kind == 'literal':
513 if bm == pattern:
513 if bm == pattern:
514 pattern = repo._bookmarks.expandname(pattern)
514 pattern = repo._bookmarks.expandname(pattern)
515 bmrev = repo._bookmarks.get(pattern, None)
515 bmrev = repo._bookmarks.get(pattern, None)
516 if not bmrev:
516 if not bmrev:
517 raise error.RepoLookupError(_("bookmark '%s' does not exist")
517 raise error.RepoLookupError(_("bookmark '%s' does not exist")
518 % pattern)
518 % pattern)
519 bms.add(repo[bmrev].rev())
519 bms.add(repo[bmrev].rev())
520 else:
520 else:
521 matchrevs = set()
521 matchrevs = set()
522 for name, bmrev in repo._bookmarks.iteritems():
522 for name, bmrev in repo._bookmarks.iteritems():
523 if matcher(name):
523 if matcher(name):
524 matchrevs.add(bmrev)
524 matchrevs.add(bmrev)
525 for bmrev in matchrevs:
525 for bmrev in matchrevs:
526 bms.add(repo[bmrev].rev())
526 bms.add(repo[bmrev].rev())
527 else:
527 else:
528 bms = {repo[r].rev() for r in repo._bookmarks.values()}
528 bms = {repo[r].rev() for r in repo._bookmarks.values()}
529 bms -= {node.nullrev}
529 bms -= {node.nullrev}
530 return subset & bms
530 return subset & bms
531
531
532 @predicate('branch(string or set)', safe=True, weight=10)
532 @predicate('branch(string or set)', safe=True, weight=10)
533 def branch(repo, subset, x):
533 def branch(repo, subset, x):
534 """
534 """
535 All changesets belonging to the given branch or the branches of the given
535 All changesets belonging to the given branch or the branches of the given
536 changesets.
536 changesets.
537
537
538 Pattern matching is supported for `string`. See
538 Pattern matching is supported for `string`. See
539 :hg:`help revisions.patterns`.
539 :hg:`help revisions.patterns`.
540 """
540 """
541 getbi = repo.revbranchcache().branchinfo
541 getbi = repo.revbranchcache().branchinfo
542 def getbranch(r):
542 def getbranch(r):
543 try:
543 try:
544 return getbi(r)[0]
544 return getbi(r)[0]
545 except error.WdirUnsupported:
545 except error.WdirUnsupported:
546 return repo[r].branch()
546 return repo[r].branch()
547
547
548 try:
548 try:
549 b = getstring(x, '')
549 b = getstring(x, '')
550 except error.ParseError:
550 except error.ParseError:
551 # not a string, but another revspec, e.g. tip()
551 # not a string, but another revspec, e.g. tip()
552 pass
552 pass
553 else:
553 else:
554 kind, pattern, matcher = stringutil.stringmatcher(b)
554 kind, pattern, matcher = stringutil.stringmatcher(b)
555 if kind == 'literal':
555 if kind == 'literal':
556 # note: falls through to the revspec case if no branch with
556 # note: falls through to the revspec case if no branch with
557 # this name exists and pattern kind is not specified explicitly
557 # this name exists and pattern kind is not specified explicitly
558 if pattern in repo.branchmap():
558 if repo.branchmap().hasbranch(pattern):
559 return subset.filter(lambda r: matcher(getbranch(r)),
559 return subset.filter(lambda r: matcher(getbranch(r)),
560 condrepr=('<branch %r>', b))
560 condrepr=('<branch %r>', b))
561 if b.startswith('literal:'):
561 if b.startswith('literal:'):
562 raise error.RepoLookupError(_("branch '%s' does not exist")
562 raise error.RepoLookupError(_("branch '%s' does not exist")
563 % pattern)
563 % pattern)
564 else:
564 else:
565 return subset.filter(lambda r: matcher(getbranch(r)),
565 return subset.filter(lambda r: matcher(getbranch(r)),
566 condrepr=('<branch %r>', b))
566 condrepr=('<branch %r>', b))
567
567
568 s = getset(repo, fullreposet(repo), x)
568 s = getset(repo, fullreposet(repo), x)
569 b = set()
569 b = set()
570 for r in s:
570 for r in s:
571 b.add(getbranch(r))
571 b.add(getbranch(r))
572 c = s.__contains__
572 c = s.__contains__
573 return subset.filter(lambda r: c(r) or getbranch(r) in b,
573 return subset.filter(lambda r: c(r) or getbranch(r) in b,
574 condrepr=lambda: '<branch %r>' % _sortedb(b))
574 condrepr=lambda: '<branch %r>' % _sortedb(b))
575
575
576 @predicate('phasedivergent()', safe=True)
576 @predicate('phasedivergent()', safe=True)
577 def phasedivergent(repo, subset, x):
577 def phasedivergent(repo, subset, x):
578 """Mutable changesets marked as successors of public changesets.
578 """Mutable changesets marked as successors of public changesets.
579
579
580 Only non-public and non-obsolete changesets can be `phasedivergent`.
580 Only non-public and non-obsolete changesets can be `phasedivergent`.
581 (EXPERIMENTAL)
581 (EXPERIMENTAL)
582 """
582 """
583 # i18n: "phasedivergent" is a keyword
583 # i18n: "phasedivergent" is a keyword
584 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
584 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
585 phasedivergent = obsmod.getrevs(repo, 'phasedivergent')
585 phasedivergent = obsmod.getrevs(repo, 'phasedivergent')
586 return subset & phasedivergent
586 return subset & phasedivergent
587
587
588 @predicate('bundle()', safe=True)
588 @predicate('bundle()', safe=True)
589 def bundle(repo, subset, x):
589 def bundle(repo, subset, x):
590 """Changesets in the bundle.
590 """Changesets in the bundle.
591
591
592 Bundle must be specified by the -R option."""
592 Bundle must be specified by the -R option."""
593
593
594 try:
594 try:
595 bundlerevs = repo.changelog.bundlerevs
595 bundlerevs = repo.changelog.bundlerevs
596 except AttributeError:
596 except AttributeError:
597 raise error.Abort(_("no bundle provided - specify with -R"))
597 raise error.Abort(_("no bundle provided - specify with -R"))
598 return subset & bundlerevs
598 return subset & bundlerevs
599
599
600 def checkstatus(repo, subset, pat, field):
600 def checkstatus(repo, subset, pat, field):
601 hasset = matchmod.patkind(pat) == 'set'
601 hasset = matchmod.patkind(pat) == 'set'
602
602
603 mcache = [None]
603 mcache = [None]
604 def matches(x):
604 def matches(x):
605 c = repo[x]
605 c = repo[x]
606 if not mcache[0] or hasset:
606 if not mcache[0] or hasset:
607 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
607 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
608 m = mcache[0]
608 m = mcache[0]
609 fname = None
609 fname = None
610 if not m.anypats() and len(m.files()) == 1:
610 if not m.anypats() and len(m.files()) == 1:
611 fname = m.files()[0]
611 fname = m.files()[0]
612 if fname is not None:
612 if fname is not None:
613 if fname not in c.files():
613 if fname not in c.files():
614 return False
614 return False
615 else:
615 else:
616 for f in c.files():
616 for f in c.files():
617 if m(f):
617 if m(f):
618 break
618 break
619 else:
619 else:
620 return False
620 return False
621 files = repo.status(c.p1().node(), c.node())[field]
621 files = repo.status(c.p1().node(), c.node())[field]
622 if fname is not None:
622 if fname is not None:
623 if fname in files:
623 if fname in files:
624 return True
624 return True
625 else:
625 else:
626 for f in files:
626 for f in files:
627 if m(f):
627 if m(f):
628 return True
628 return True
629
629
630 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
630 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
631
631
632 def _children(repo, subset, parentset):
632 def _children(repo, subset, parentset):
633 if not parentset:
633 if not parentset:
634 return baseset()
634 return baseset()
635 cs = set()
635 cs = set()
636 pr = repo.changelog.parentrevs
636 pr = repo.changelog.parentrevs
637 minrev = parentset.min()
637 minrev = parentset.min()
638 nullrev = node.nullrev
638 nullrev = node.nullrev
639 for r in subset:
639 for r in subset:
640 if r <= minrev:
640 if r <= minrev:
641 continue
641 continue
642 p1, p2 = pr(r)
642 p1, p2 = pr(r)
643 if p1 in parentset:
643 if p1 in parentset:
644 cs.add(r)
644 cs.add(r)
645 if p2 != nullrev and p2 in parentset:
645 if p2 != nullrev and p2 in parentset:
646 cs.add(r)
646 cs.add(r)
647 return baseset(cs)
647 return baseset(cs)
648
648
649 @predicate('children(set)', safe=True)
649 @predicate('children(set)', safe=True)
650 def children(repo, subset, x):
650 def children(repo, subset, x):
651 """Child changesets of changesets in set.
651 """Child changesets of changesets in set.
652 """
652 """
653 s = getset(repo, fullreposet(repo), x)
653 s = getset(repo, fullreposet(repo), x)
654 cs = _children(repo, subset, s)
654 cs = _children(repo, subset, s)
655 return subset & cs
655 return subset & cs
656
656
657 @predicate('closed()', safe=True, weight=10)
657 @predicate('closed()', safe=True, weight=10)
658 def closed(repo, subset, x):
658 def closed(repo, subset, x):
659 """Changeset is closed.
659 """Changeset is closed.
660 """
660 """
661 # i18n: "closed" is a keyword
661 # i18n: "closed" is a keyword
662 getargs(x, 0, 0, _("closed takes no arguments"))
662 getargs(x, 0, 0, _("closed takes no arguments"))
663 return subset.filter(lambda r: repo[r].closesbranch(),
663 return subset.filter(lambda r: repo[r].closesbranch(),
664 condrepr='<branch closed>')
664 condrepr='<branch closed>')
665
665
666 # for internal use
666 # for internal use
667 @predicate('_commonancestorheads(set)', safe=True)
667 @predicate('_commonancestorheads(set)', safe=True)
668 def _commonancestorheads(repo, subset, x):
668 def _commonancestorheads(repo, subset, x):
669 # This is an internal method is for quickly calculating "heads(::x and
669 # This is an internal method is for quickly calculating "heads(::x and
670 # ::y)"
670 # ::y)"
671
671
672 # These greatest common ancestors are the same ones that the consensus bid
672 # These greatest common ancestors are the same ones that the consensus bid
673 # merge will find.
673 # merge will find.
674 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
674 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
675
675
676 ancs = repo.changelog._commonancestorsheads(*list(startrevs))
676 ancs = repo.changelog._commonancestorsheads(*list(startrevs))
677 return subset & baseset(ancs)
677 return subset & baseset(ancs)
678
678
679 @predicate('commonancestors(set)', safe=True)
679 @predicate('commonancestors(set)', safe=True)
680 def commonancestors(repo, subset, x):
680 def commonancestors(repo, subset, x):
681 """Changesets that are ancestors of every changeset in set.
681 """Changesets that are ancestors of every changeset in set.
682 """
682 """
683 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
683 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
684 if not startrevs:
684 if not startrevs:
685 return baseset()
685 return baseset()
686 for r in startrevs:
686 for r in startrevs:
687 subset &= dagop.revancestors(repo, baseset([r]))
687 subset &= dagop.revancestors(repo, baseset([r]))
688 return subset
688 return subset
689
689
690 @predicate('contains(pattern)', weight=100)
690 @predicate('contains(pattern)', weight=100)
691 def contains(repo, subset, x):
691 def contains(repo, subset, x):
692 """The revision's manifest contains a file matching pattern (but might not
692 """The revision's manifest contains a file matching pattern (but might not
693 modify it). See :hg:`help patterns` for information about file patterns.
693 modify it). See :hg:`help patterns` for information about file patterns.
694
694
695 The pattern without explicit kind like ``glob:`` is expected to be
695 The pattern without explicit kind like ``glob:`` is expected to be
696 relative to the current directory and match against a file exactly
696 relative to the current directory and match against a file exactly
697 for efficiency.
697 for efficiency.
698 """
698 """
699 # i18n: "contains" is a keyword
699 # i18n: "contains" is a keyword
700 pat = getstring(x, _("contains requires a pattern"))
700 pat = getstring(x, _("contains requires a pattern"))
701
701
702 def matches(x):
702 def matches(x):
703 if not matchmod.patkind(pat):
703 if not matchmod.patkind(pat):
704 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
704 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
705 if pats in repo[x]:
705 if pats in repo[x]:
706 return True
706 return True
707 else:
707 else:
708 c = repo[x]
708 c = repo[x]
709 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
709 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
710 for f in c.manifest():
710 for f in c.manifest():
711 if m(f):
711 if m(f):
712 return True
712 return True
713 return False
713 return False
714
714
715 return subset.filter(matches, condrepr=('<contains %r>', pat))
715 return subset.filter(matches, condrepr=('<contains %r>', pat))
716
716
717 @predicate('converted([id])', safe=True)
717 @predicate('converted([id])', safe=True)
718 def converted(repo, subset, x):
718 def converted(repo, subset, x):
719 """Changesets converted from the given identifier in the old repository if
719 """Changesets converted from the given identifier in the old repository if
720 present, or all converted changesets if no identifier is specified.
720 present, or all converted changesets if no identifier is specified.
721 """
721 """
722
722
723 # There is exactly no chance of resolving the revision, so do a simple
723 # There is exactly no chance of resolving the revision, so do a simple
724 # string compare and hope for the best
724 # string compare and hope for the best
725
725
726 rev = None
726 rev = None
727 # i18n: "converted" is a keyword
727 # i18n: "converted" is a keyword
728 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
728 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
729 if l:
729 if l:
730 # i18n: "converted" is a keyword
730 # i18n: "converted" is a keyword
731 rev = getstring(l[0], _('converted requires a revision'))
731 rev = getstring(l[0], _('converted requires a revision'))
732
732
733 def _matchvalue(r):
733 def _matchvalue(r):
734 source = repo[r].extra().get('convert_revision', None)
734 source = repo[r].extra().get('convert_revision', None)
735 return source is not None and (rev is None or source.startswith(rev))
735 return source is not None and (rev is None or source.startswith(rev))
736
736
737 return subset.filter(lambda r: _matchvalue(r),
737 return subset.filter(lambda r: _matchvalue(r),
738 condrepr=('<converted %r>', rev))
738 condrepr=('<converted %r>', rev))
739
739
740 @predicate('date(interval)', safe=True, weight=10)
740 @predicate('date(interval)', safe=True, weight=10)
741 def date(repo, subset, x):
741 def date(repo, subset, x):
742 """Changesets within the interval, see :hg:`help dates`.
742 """Changesets within the interval, see :hg:`help dates`.
743 """
743 """
744 # i18n: "date" is a keyword
744 # i18n: "date" is a keyword
745 ds = getstring(x, _("date requires a string"))
745 ds = getstring(x, _("date requires a string"))
746 dm = dateutil.matchdate(ds)
746 dm = dateutil.matchdate(ds)
747 return subset.filter(lambda x: dm(repo[x].date()[0]),
747 return subset.filter(lambda x: dm(repo[x].date()[0]),
748 condrepr=('<date %r>', ds))
748 condrepr=('<date %r>', ds))
749
749
750 @predicate('desc(string)', safe=True, weight=10)
750 @predicate('desc(string)', safe=True, weight=10)
751 def desc(repo, subset, x):
751 def desc(repo, subset, x):
752 """Search commit message for string. The match is case-insensitive.
752 """Search commit message for string. The match is case-insensitive.
753
753
754 Pattern matching is supported for `string`. See
754 Pattern matching is supported for `string`. See
755 :hg:`help revisions.patterns`.
755 :hg:`help revisions.patterns`.
756 """
756 """
757 # i18n: "desc" is a keyword
757 # i18n: "desc" is a keyword
758 ds = getstring(x, _("desc requires a string"))
758 ds = getstring(x, _("desc requires a string"))
759
759
760 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
760 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
761
761
762 return subset.filter(lambda r: matcher(repo[r].description()),
762 return subset.filter(lambda r: matcher(repo[r].description()),
763 condrepr=('<desc %r>', ds))
763 condrepr=('<desc %r>', ds))
764
764
765 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
765 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
766 stopdepth=None):
766 stopdepth=None):
767 roots = getset(repo, fullreposet(repo), x)
767 roots = getset(repo, fullreposet(repo), x)
768 if not roots:
768 if not roots:
769 return baseset()
769 return baseset()
770 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
770 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
771 return subset & s
771 return subset & s
772
772
773 @predicate('descendants(set[, depth])', safe=True)
773 @predicate('descendants(set[, depth])', safe=True)
774 def descendants(repo, subset, x):
774 def descendants(repo, subset, x):
775 """Changesets which are descendants of changesets in set, including the
775 """Changesets which are descendants of changesets in set, including the
776 given changesets themselves.
776 given changesets themselves.
777
777
778 If depth is specified, the result only includes changesets up to
778 If depth is specified, the result only includes changesets up to
779 the specified generation.
779 the specified generation.
780 """
780 """
781 # startdepth is for internal use only until we can decide the UI
781 # startdepth is for internal use only until we can decide the UI
782 args = getargsdict(x, 'descendants', 'set depth startdepth')
782 args = getargsdict(x, 'descendants', 'set depth startdepth')
783 if 'set' not in args:
783 if 'set' not in args:
784 # i18n: "descendants" is a keyword
784 # i18n: "descendants" is a keyword
785 raise error.ParseError(_('descendants takes at least 1 argument'))
785 raise error.ParseError(_('descendants takes at least 1 argument'))
786 startdepth = stopdepth = None
786 startdepth = stopdepth = None
787 if 'startdepth' in args:
787 if 'startdepth' in args:
788 n = getinteger(args['startdepth'],
788 n = getinteger(args['startdepth'],
789 "descendants expects an integer startdepth")
789 "descendants expects an integer startdepth")
790 if n < 0:
790 if n < 0:
791 raise error.ParseError("negative startdepth")
791 raise error.ParseError("negative startdepth")
792 startdepth = n
792 startdepth = n
793 if 'depth' in args:
793 if 'depth' in args:
794 # i18n: "descendants" is a keyword
794 # i18n: "descendants" is a keyword
795 n = getinteger(args['depth'], _("descendants expects an integer depth"))
795 n = getinteger(args['depth'], _("descendants expects an integer depth"))
796 if n < 0:
796 if n < 0:
797 raise error.ParseError(_("negative depth"))
797 raise error.ParseError(_("negative depth"))
798 stopdepth = n + 1
798 stopdepth = n + 1
799 return _descendants(repo, subset, args['set'],
799 return _descendants(repo, subset, args['set'],
800 startdepth=startdepth, stopdepth=stopdepth)
800 startdepth=startdepth, stopdepth=stopdepth)
801
801
802 @predicate('_firstdescendants', safe=True)
802 @predicate('_firstdescendants', safe=True)
803 def _firstdescendants(repo, subset, x):
803 def _firstdescendants(repo, subset, x):
804 # ``_firstdescendants(set)``
804 # ``_firstdescendants(set)``
805 # Like ``descendants(set)`` but follows only the first parents.
805 # Like ``descendants(set)`` but follows only the first parents.
806 return _descendants(repo, subset, x, followfirst=True)
806 return _descendants(repo, subset, x, followfirst=True)
807
807
808 @predicate('destination([set])', safe=True, weight=10)
808 @predicate('destination([set])', safe=True, weight=10)
809 def destination(repo, subset, x):
809 def destination(repo, subset, x):
810 """Changesets that were created by a graft, transplant or rebase operation,
810 """Changesets that were created by a graft, transplant or rebase operation,
811 with the given revisions specified as the source. Omitting the optional set
811 with the given revisions specified as the source. Omitting the optional set
812 is the same as passing all().
812 is the same as passing all().
813 """
813 """
814 if x is not None:
814 if x is not None:
815 sources = getset(repo, fullreposet(repo), x)
815 sources = getset(repo, fullreposet(repo), x)
816 else:
816 else:
817 sources = fullreposet(repo)
817 sources = fullreposet(repo)
818
818
819 dests = set()
819 dests = set()
820
820
821 # subset contains all of the possible destinations that can be returned, so
821 # subset contains all of the possible destinations that can be returned, so
822 # iterate over them and see if their source(s) were provided in the arg set.
822 # iterate over them and see if their source(s) were provided in the arg set.
823 # Even if the immediate src of r is not in the arg set, src's source (or
823 # Even if the immediate src of r is not in the arg set, src's source (or
824 # further back) may be. Scanning back further than the immediate src allows
824 # further back) may be. Scanning back further than the immediate src allows
825 # transitive transplants and rebases to yield the same results as transitive
825 # transitive transplants and rebases to yield the same results as transitive
826 # grafts.
826 # grafts.
827 for r in subset:
827 for r in subset:
828 src = _getrevsource(repo, r)
828 src = _getrevsource(repo, r)
829 lineage = None
829 lineage = None
830
830
831 while src is not None:
831 while src is not None:
832 if lineage is None:
832 if lineage is None:
833 lineage = list()
833 lineage = list()
834
834
835 lineage.append(r)
835 lineage.append(r)
836
836
837 # The visited lineage is a match if the current source is in the arg
837 # The visited lineage is a match if the current source is in the arg
838 # set. Since every candidate dest is visited by way of iterating
838 # set. Since every candidate dest is visited by way of iterating
839 # subset, any dests further back in the lineage will be tested by a
839 # subset, any dests further back in the lineage will be tested by a
840 # different iteration over subset. Likewise, if the src was already
840 # different iteration over subset. Likewise, if the src was already
841 # selected, the current lineage can be selected without going back
841 # selected, the current lineage can be selected without going back
842 # further.
842 # further.
843 if src in sources or src in dests:
843 if src in sources or src in dests:
844 dests.update(lineage)
844 dests.update(lineage)
845 break
845 break
846
846
847 r = src
847 r = src
848 src = _getrevsource(repo, r)
848 src = _getrevsource(repo, r)
849
849
850 return subset.filter(dests.__contains__,
850 return subset.filter(dests.__contains__,
851 condrepr=lambda: '<destination %r>' % _sortedb(dests))
851 condrepr=lambda: '<destination %r>' % _sortedb(dests))
852
852
853 @predicate('contentdivergent()', safe=True)
853 @predicate('contentdivergent()', safe=True)
854 def contentdivergent(repo, subset, x):
854 def contentdivergent(repo, subset, x):
855 """
855 """
856 Final successors of changesets with an alternative set of final
856 Final successors of changesets with an alternative set of final
857 successors. (EXPERIMENTAL)
857 successors. (EXPERIMENTAL)
858 """
858 """
859 # i18n: "contentdivergent" is a keyword
859 # i18n: "contentdivergent" is a keyword
860 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
860 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
861 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
861 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
862 return subset & contentdivergent
862 return subset & contentdivergent
863
863
864 @predicate('expectsize(set[, size])', safe=True, takeorder=True)
864 @predicate('expectsize(set[, size])', safe=True, takeorder=True)
865 def expectsize(repo, subset, x, order):
865 def expectsize(repo, subset, x, order):
866 """Return the given revset if size matches the revset size.
866 """Return the given revset if size matches the revset size.
867 Abort if the revset doesn't expect given size.
867 Abort if the revset doesn't expect given size.
868 size can either be an integer range or an integer.
868 size can either be an integer range or an integer.
869
869
870 For example, ``expectsize(0:1, 3:5)`` will abort as revset size is 2 and
870 For example, ``expectsize(0:1, 3:5)`` will abort as revset size is 2 and
871 2 is not between 3 and 5 inclusive."""
871 2 is not between 3 and 5 inclusive."""
872
872
873 args = getargsdict(x, 'expectsize', 'set size')
873 args = getargsdict(x, 'expectsize', 'set size')
874 minsize = 0
874 minsize = 0
875 maxsize = len(repo) + 1
875 maxsize = len(repo) + 1
876 err = ''
876 err = ''
877 if 'size' not in args or 'set' not in args:
877 if 'size' not in args or 'set' not in args:
878 raise error.ParseError(_('invalid set of arguments'))
878 raise error.ParseError(_('invalid set of arguments'))
879 minsize, maxsize = getintrange(args['size'],
879 minsize, maxsize = getintrange(args['size'],
880 _('expectsize requires a size range'
880 _('expectsize requires a size range'
881 ' or a positive integer'),
881 ' or a positive integer'),
882 _('size range bounds must be integers'),
882 _('size range bounds must be integers'),
883 minsize, maxsize)
883 minsize, maxsize)
884 if minsize < 0 or maxsize < 0:
884 if minsize < 0 or maxsize < 0:
885 raise error.ParseError(_('negative size'))
885 raise error.ParseError(_('negative size'))
886 rev = getset(repo, fullreposet(repo), args['set'], order=order)
886 rev = getset(repo, fullreposet(repo), args['set'], order=order)
887 if minsize != maxsize and (len(rev) < minsize or len(rev) > maxsize):
887 if minsize != maxsize and (len(rev) < minsize or len(rev) > maxsize):
888 err = _('revset size mismatch.'
888 err = _('revset size mismatch.'
889 ' expected between %d and %d, got %d') % (minsize, maxsize,
889 ' expected between %d and %d, got %d') % (minsize, maxsize,
890 len(rev))
890 len(rev))
891 elif minsize == maxsize and len(rev) != minsize:
891 elif minsize == maxsize and len(rev) != minsize:
892 err = _('revset size mismatch.'
892 err = _('revset size mismatch.'
893 ' expected %d, got %d') % (minsize, len(rev))
893 ' expected %d, got %d') % (minsize, len(rev))
894 if err:
894 if err:
895 raise error.RepoLookupError(err)
895 raise error.RepoLookupError(err)
896 if order == followorder:
896 if order == followorder:
897 return subset & rev
897 return subset & rev
898 else:
898 else:
899 return rev & subset
899 return rev & subset
900
900
901 @predicate('extdata(source)', safe=False, weight=100)
901 @predicate('extdata(source)', safe=False, weight=100)
902 def extdata(repo, subset, x):
902 def extdata(repo, subset, x):
903 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
903 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
904 # i18n: "extdata" is a keyword
904 # i18n: "extdata" is a keyword
905 args = getargsdict(x, 'extdata', 'source')
905 args = getargsdict(x, 'extdata', 'source')
906 source = getstring(args.get('source'),
906 source = getstring(args.get('source'),
907 # i18n: "extdata" is a keyword
907 # i18n: "extdata" is a keyword
908 _('extdata takes at least 1 string argument'))
908 _('extdata takes at least 1 string argument'))
909 data = scmutil.extdatasource(repo, source)
909 data = scmutil.extdatasource(repo, source)
910 return subset & baseset(data)
910 return subset & baseset(data)
911
911
912 @predicate('extinct()', safe=True)
912 @predicate('extinct()', safe=True)
913 def extinct(repo, subset, x):
913 def extinct(repo, subset, x):
914 """Obsolete changesets with obsolete descendants only.
914 """Obsolete changesets with obsolete descendants only.
915 """
915 """
916 # i18n: "extinct" is a keyword
916 # i18n: "extinct" is a keyword
917 getargs(x, 0, 0, _("extinct takes no arguments"))
917 getargs(x, 0, 0, _("extinct takes no arguments"))
918 extincts = obsmod.getrevs(repo, 'extinct')
918 extincts = obsmod.getrevs(repo, 'extinct')
919 return subset & extincts
919 return subset & extincts
920
920
921 @predicate('extra(label, [value])', safe=True)
921 @predicate('extra(label, [value])', safe=True)
922 def extra(repo, subset, x):
922 def extra(repo, subset, x):
923 """Changesets with the given label in the extra metadata, with the given
923 """Changesets with the given label in the extra metadata, with the given
924 optional value.
924 optional value.
925
925
926 Pattern matching is supported for `value`. See
926 Pattern matching is supported for `value`. See
927 :hg:`help revisions.patterns`.
927 :hg:`help revisions.patterns`.
928 """
928 """
929 args = getargsdict(x, 'extra', 'label value')
929 args = getargsdict(x, 'extra', 'label value')
930 if 'label' not in args:
930 if 'label' not in args:
931 # i18n: "extra" is a keyword
931 # i18n: "extra" is a keyword
932 raise error.ParseError(_('extra takes at least 1 argument'))
932 raise error.ParseError(_('extra takes at least 1 argument'))
933 # i18n: "extra" is a keyword
933 # i18n: "extra" is a keyword
934 label = getstring(args['label'], _('first argument to extra must be '
934 label = getstring(args['label'], _('first argument to extra must be '
935 'a string'))
935 'a string'))
936 value = None
936 value = None
937
937
938 if 'value' in args:
938 if 'value' in args:
939 # i18n: "extra" is a keyword
939 # i18n: "extra" is a keyword
940 value = getstring(args['value'], _('second argument to extra must be '
940 value = getstring(args['value'], _('second argument to extra must be '
941 'a string'))
941 'a string'))
942 kind, value, matcher = stringutil.stringmatcher(value)
942 kind, value, matcher = stringutil.stringmatcher(value)
943
943
944 def _matchvalue(r):
944 def _matchvalue(r):
945 extra = repo[r].extra()
945 extra = repo[r].extra()
946 return label in extra and (value is None or matcher(extra[label]))
946 return label in extra and (value is None or matcher(extra[label]))
947
947
948 return subset.filter(lambda r: _matchvalue(r),
948 return subset.filter(lambda r: _matchvalue(r),
949 condrepr=('<extra[%r] %r>', label, value))
949 condrepr=('<extra[%r] %r>', label, value))
950
950
951 @predicate('filelog(pattern)', safe=True)
951 @predicate('filelog(pattern)', safe=True)
952 def filelog(repo, subset, x):
952 def filelog(repo, subset, x):
953 """Changesets connected to the specified filelog.
953 """Changesets connected to the specified filelog.
954
954
955 For performance reasons, visits only revisions mentioned in the file-level
955 For performance reasons, visits only revisions mentioned in the file-level
956 filelog, rather than filtering through all changesets (much faster, but
956 filelog, rather than filtering through all changesets (much faster, but
957 doesn't include deletes or duplicate changes). For a slower, more accurate
957 doesn't include deletes or duplicate changes). For a slower, more accurate
958 result, use ``file()``.
958 result, use ``file()``.
959
959
960 The pattern without explicit kind like ``glob:`` is expected to be
960 The pattern without explicit kind like ``glob:`` is expected to be
961 relative to the current directory and match against a file exactly
961 relative to the current directory and match against a file exactly
962 for efficiency.
962 for efficiency.
963
963
964 If some linkrev points to revisions filtered by the current repoview, we'll
964 If some linkrev points to revisions filtered by the current repoview, we'll
965 work around it to return a non-filtered value.
965 work around it to return a non-filtered value.
966 """
966 """
967
967
968 # i18n: "filelog" is a keyword
968 # i18n: "filelog" is a keyword
969 pat = getstring(x, _("filelog requires a pattern"))
969 pat = getstring(x, _("filelog requires a pattern"))
970 s = set()
970 s = set()
971 cl = repo.changelog
971 cl = repo.changelog
972
972
973 if not matchmod.patkind(pat):
973 if not matchmod.patkind(pat):
974 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
974 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
975 files = [f]
975 files = [f]
976 else:
976 else:
977 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
977 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
978 files = (f for f in repo[None] if m(f))
978 files = (f for f in repo[None] if m(f))
979
979
980 for f in files:
980 for f in files:
981 fl = repo.file(f)
981 fl = repo.file(f)
982 known = {}
982 known = {}
983 scanpos = 0
983 scanpos = 0
984 for fr in list(fl):
984 for fr in list(fl):
985 fn = fl.node(fr)
985 fn = fl.node(fr)
986 if fn in known:
986 if fn in known:
987 s.add(known[fn])
987 s.add(known[fn])
988 continue
988 continue
989
989
990 lr = fl.linkrev(fr)
990 lr = fl.linkrev(fr)
991 if lr in cl:
991 if lr in cl:
992 s.add(lr)
992 s.add(lr)
993 elif scanpos is not None:
993 elif scanpos is not None:
994 # lowest matching changeset is filtered, scan further
994 # lowest matching changeset is filtered, scan further
995 # ahead in changelog
995 # ahead in changelog
996 start = max(lr, scanpos) + 1
996 start = max(lr, scanpos) + 1
997 scanpos = None
997 scanpos = None
998 for r in cl.revs(start):
998 for r in cl.revs(start):
999 # minimize parsing of non-matching entries
999 # minimize parsing of non-matching entries
1000 if f in cl.revision(r) and f in cl.readfiles(r):
1000 if f in cl.revision(r) and f in cl.readfiles(r):
1001 try:
1001 try:
1002 # try to use manifest delta fastpath
1002 # try to use manifest delta fastpath
1003 n = repo[r].filenode(f)
1003 n = repo[r].filenode(f)
1004 if n not in known:
1004 if n not in known:
1005 if n == fn:
1005 if n == fn:
1006 s.add(r)
1006 s.add(r)
1007 scanpos = r
1007 scanpos = r
1008 break
1008 break
1009 else:
1009 else:
1010 known[n] = r
1010 known[n] = r
1011 except error.ManifestLookupError:
1011 except error.ManifestLookupError:
1012 # deletion in changelog
1012 # deletion in changelog
1013 continue
1013 continue
1014
1014
1015 return subset & s
1015 return subset & s
1016
1016
1017 @predicate('first(set, [n])', safe=True, takeorder=True, weight=0)
1017 @predicate('first(set, [n])', safe=True, takeorder=True, weight=0)
1018 def first(repo, subset, x, order):
1018 def first(repo, subset, x, order):
1019 """An alias for limit().
1019 """An alias for limit().
1020 """
1020 """
1021 return limit(repo, subset, x, order)
1021 return limit(repo, subset, x, order)
1022
1022
1023 def _follow(repo, subset, x, name, followfirst=False):
1023 def _follow(repo, subset, x, name, followfirst=False):
1024 args = getargsdict(x, name, 'file startrev')
1024 args = getargsdict(x, name, 'file startrev')
1025 revs = None
1025 revs = None
1026 if 'startrev' in args:
1026 if 'startrev' in args:
1027 revs = getset(repo, fullreposet(repo), args['startrev'])
1027 revs = getset(repo, fullreposet(repo), args['startrev'])
1028 if 'file' in args:
1028 if 'file' in args:
1029 x = getstring(args['file'], _("%s expected a pattern") % name)
1029 x = getstring(args['file'], _("%s expected a pattern") % name)
1030 if revs is None:
1030 if revs is None:
1031 revs = [None]
1031 revs = [None]
1032 fctxs = []
1032 fctxs = []
1033 for r in revs:
1033 for r in revs:
1034 ctx = mctx = repo[r]
1034 ctx = mctx = repo[r]
1035 if r is None:
1035 if r is None:
1036 ctx = repo['.']
1036 ctx = repo['.']
1037 m = matchmod.match(repo.root, repo.getcwd(), [x],
1037 m = matchmod.match(repo.root, repo.getcwd(), [x],
1038 ctx=mctx, default='path')
1038 ctx=mctx, default='path')
1039 fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
1039 fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
1040 s = dagop.filerevancestors(fctxs, followfirst)
1040 s = dagop.filerevancestors(fctxs, followfirst)
1041 else:
1041 else:
1042 if revs is None:
1042 if revs is None:
1043 revs = baseset([repo['.'].rev()])
1043 revs = baseset([repo['.'].rev()])
1044 s = dagop.revancestors(repo, revs, followfirst)
1044 s = dagop.revancestors(repo, revs, followfirst)
1045
1045
1046 return subset & s
1046 return subset & s
1047
1047
1048 @predicate('follow([file[, startrev]])', safe=True)
1048 @predicate('follow([file[, startrev]])', safe=True)
1049 def follow(repo, subset, x):
1049 def follow(repo, subset, x):
1050 """
1050 """
1051 An alias for ``::.`` (ancestors of the working directory's first parent).
1051 An alias for ``::.`` (ancestors of the working directory's first parent).
1052 If file pattern is specified, the histories of files matching given
1052 If file pattern is specified, the histories of files matching given
1053 pattern in the revision given by startrev are followed, including copies.
1053 pattern in the revision given by startrev are followed, including copies.
1054 """
1054 """
1055 return _follow(repo, subset, x, 'follow')
1055 return _follow(repo, subset, x, 'follow')
1056
1056
1057 @predicate('_followfirst', safe=True)
1057 @predicate('_followfirst', safe=True)
1058 def _followfirst(repo, subset, x):
1058 def _followfirst(repo, subset, x):
1059 # ``followfirst([file[, startrev]])``
1059 # ``followfirst([file[, startrev]])``
1060 # Like ``follow([file[, startrev]])`` but follows only the first parent
1060 # Like ``follow([file[, startrev]])`` but follows only the first parent
1061 # of every revisions or files revisions.
1061 # of every revisions or files revisions.
1062 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1062 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1063
1063
1064 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
1064 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
1065 safe=True)
1065 safe=True)
1066 def followlines(repo, subset, x):
1066 def followlines(repo, subset, x):
1067 """Changesets modifying `file` in line range ('fromline', 'toline').
1067 """Changesets modifying `file` in line range ('fromline', 'toline').
1068
1068
1069 Line range corresponds to 'file' content at 'startrev' and should hence be
1069 Line range corresponds to 'file' content at 'startrev' and should hence be
1070 consistent with file size. If startrev is not specified, working directory's
1070 consistent with file size. If startrev is not specified, working directory's
1071 parent is used.
1071 parent is used.
1072
1072
1073 By default, ancestors of 'startrev' are returned. If 'descend' is True,
1073 By default, ancestors of 'startrev' are returned. If 'descend' is True,
1074 descendants of 'startrev' are returned though renames are (currently) not
1074 descendants of 'startrev' are returned though renames are (currently) not
1075 followed in this direction.
1075 followed in this direction.
1076 """
1076 """
1077 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
1077 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
1078 if len(args['lines']) != 1:
1078 if len(args['lines']) != 1:
1079 raise error.ParseError(_("followlines requires a line range"))
1079 raise error.ParseError(_("followlines requires a line range"))
1080
1080
1081 rev = '.'
1081 rev = '.'
1082 if 'startrev' in args:
1082 if 'startrev' in args:
1083 revs = getset(repo, fullreposet(repo), args['startrev'])
1083 revs = getset(repo, fullreposet(repo), args['startrev'])
1084 if len(revs) != 1:
1084 if len(revs) != 1:
1085 raise error.ParseError(
1085 raise error.ParseError(
1086 # i18n: "followlines" is a keyword
1086 # i18n: "followlines" is a keyword
1087 _("followlines expects exactly one revision"))
1087 _("followlines expects exactly one revision"))
1088 rev = revs.last()
1088 rev = revs.last()
1089
1089
1090 pat = getstring(args['file'], _("followlines requires a pattern"))
1090 pat = getstring(args['file'], _("followlines requires a pattern"))
1091 # i18n: "followlines" is a keyword
1091 # i18n: "followlines" is a keyword
1092 msg = _("followlines expects exactly one file")
1092 msg = _("followlines expects exactly one file")
1093 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
1093 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
1094 fromline, toline = util.processlinerange(
1094 fromline, toline = util.processlinerange(
1095 *getintrange(args['lines'][0],
1095 *getintrange(args['lines'][0],
1096 # i18n: "followlines" is a keyword
1096 # i18n: "followlines" is a keyword
1097 _("followlines expects a line number or a range"),
1097 _("followlines expects a line number or a range"),
1098 _("line range bounds must be integers")))
1098 _("line range bounds must be integers")))
1099
1099
1100 fctx = repo[rev].filectx(fname)
1100 fctx = repo[rev].filectx(fname)
1101 descend = False
1101 descend = False
1102 if 'descend' in args:
1102 if 'descend' in args:
1103 descend = getboolean(args['descend'],
1103 descend = getboolean(args['descend'],
1104 # i18n: "descend" is a keyword
1104 # i18n: "descend" is a keyword
1105 _("descend argument must be a boolean"))
1105 _("descend argument must be a boolean"))
1106 if descend:
1106 if descend:
1107 rs = generatorset(
1107 rs = generatorset(
1108 (c.rev() for c, _linerange
1108 (c.rev() for c, _linerange
1109 in dagop.blockdescendants(fctx, fromline, toline)),
1109 in dagop.blockdescendants(fctx, fromline, toline)),
1110 iterasc=True)
1110 iterasc=True)
1111 else:
1111 else:
1112 rs = generatorset(
1112 rs = generatorset(
1113 (c.rev() for c, _linerange
1113 (c.rev() for c, _linerange
1114 in dagop.blockancestors(fctx, fromline, toline)),
1114 in dagop.blockancestors(fctx, fromline, toline)),
1115 iterasc=False)
1115 iterasc=False)
1116 return subset & rs
1116 return subset & rs
1117
1117
1118 @predicate('all()', safe=True)
1118 @predicate('all()', safe=True)
1119 def getall(repo, subset, x):
1119 def getall(repo, subset, x):
1120 """All changesets, the same as ``0:tip``.
1120 """All changesets, the same as ``0:tip``.
1121 """
1121 """
1122 # i18n: "all" is a keyword
1122 # i18n: "all" is a keyword
1123 getargs(x, 0, 0, _("all takes no arguments"))
1123 getargs(x, 0, 0, _("all takes no arguments"))
1124 return subset & spanset(repo) # drop "null" if any
1124 return subset & spanset(repo) # drop "null" if any
1125
1125
1126 @predicate('grep(regex)', weight=10)
1126 @predicate('grep(regex)', weight=10)
1127 def grep(repo, subset, x):
1127 def grep(repo, subset, x):
1128 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1128 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1129 to ensure special escape characters are handled correctly. Unlike
1129 to ensure special escape characters are handled correctly. Unlike
1130 ``keyword(string)``, the match is case-sensitive.
1130 ``keyword(string)``, the match is case-sensitive.
1131 """
1131 """
1132 try:
1132 try:
1133 # i18n: "grep" is a keyword
1133 # i18n: "grep" is a keyword
1134 gr = re.compile(getstring(x, _("grep requires a string")))
1134 gr = re.compile(getstring(x, _("grep requires a string")))
1135 except re.error as e:
1135 except re.error as e:
1136 raise error.ParseError(
1136 raise error.ParseError(
1137 _('invalid match pattern: %s') % stringutil.forcebytestr(e))
1137 _('invalid match pattern: %s') % stringutil.forcebytestr(e))
1138
1138
1139 def matches(x):
1139 def matches(x):
1140 c = repo[x]
1140 c = repo[x]
1141 for e in c.files() + [c.user(), c.description()]:
1141 for e in c.files() + [c.user(), c.description()]:
1142 if gr.search(e):
1142 if gr.search(e):
1143 return True
1143 return True
1144 return False
1144 return False
1145
1145
1146 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1146 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1147
1147
1148 @predicate('_matchfiles', safe=True)
1148 @predicate('_matchfiles', safe=True)
1149 def _matchfiles(repo, subset, x):
1149 def _matchfiles(repo, subset, x):
1150 # _matchfiles takes a revset list of prefixed arguments:
1150 # _matchfiles takes a revset list of prefixed arguments:
1151 #
1151 #
1152 # [p:foo, i:bar, x:baz]
1152 # [p:foo, i:bar, x:baz]
1153 #
1153 #
1154 # builds a match object from them and filters subset. Allowed
1154 # builds a match object from them and filters subset. Allowed
1155 # prefixes are 'p:' for regular patterns, 'i:' for include
1155 # prefixes are 'p:' for regular patterns, 'i:' for include
1156 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1156 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1157 # a revision identifier, or the empty string to reference the
1157 # a revision identifier, or the empty string to reference the
1158 # working directory, from which the match object is
1158 # working directory, from which the match object is
1159 # initialized. Use 'd:' to set the default matching mode, default
1159 # initialized. Use 'd:' to set the default matching mode, default
1160 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1160 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1161
1161
1162 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1162 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1163 pats, inc, exc = [], [], []
1163 pats, inc, exc = [], [], []
1164 rev, default = None, None
1164 rev, default = None, None
1165 for arg in l:
1165 for arg in l:
1166 s = getstring(arg, "_matchfiles requires string arguments")
1166 s = getstring(arg, "_matchfiles requires string arguments")
1167 prefix, value = s[:2], s[2:]
1167 prefix, value = s[:2], s[2:]
1168 if prefix == 'p:':
1168 if prefix == 'p:':
1169 pats.append(value)
1169 pats.append(value)
1170 elif prefix == 'i:':
1170 elif prefix == 'i:':
1171 inc.append(value)
1171 inc.append(value)
1172 elif prefix == 'x:':
1172 elif prefix == 'x:':
1173 exc.append(value)
1173 exc.append(value)
1174 elif prefix == 'r:':
1174 elif prefix == 'r:':
1175 if rev is not None:
1175 if rev is not None:
1176 raise error.ParseError('_matchfiles expected at most one '
1176 raise error.ParseError('_matchfiles expected at most one '
1177 'revision')
1177 'revision')
1178 if value == '': # empty means working directory
1178 if value == '': # empty means working directory
1179 rev = node.wdirrev
1179 rev = node.wdirrev
1180 else:
1180 else:
1181 rev = value
1181 rev = value
1182 elif prefix == 'd:':
1182 elif prefix == 'd:':
1183 if default is not None:
1183 if default is not None:
1184 raise error.ParseError('_matchfiles expected at most one '
1184 raise error.ParseError('_matchfiles expected at most one '
1185 'default mode')
1185 'default mode')
1186 default = value
1186 default = value
1187 else:
1187 else:
1188 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1188 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1189 if not default:
1189 if not default:
1190 default = 'glob'
1190 default = 'glob'
1191 hasset = any(matchmod.patkind(p) == 'set' for p in pats + inc + exc)
1191 hasset = any(matchmod.patkind(p) == 'set' for p in pats + inc + exc)
1192
1192
1193 mcache = [None]
1193 mcache = [None]
1194
1194
1195 # This directly read the changelog data as creating changectx for all
1195 # This directly read the changelog data as creating changectx for all
1196 # revisions is quite expensive.
1196 # revisions is quite expensive.
1197 getfiles = repo.changelog.readfiles
1197 getfiles = repo.changelog.readfiles
1198 wdirrev = node.wdirrev
1198 wdirrev = node.wdirrev
1199 def matches(x):
1199 def matches(x):
1200 if x == wdirrev:
1200 if x == wdirrev:
1201 files = repo[x].files()
1201 files = repo[x].files()
1202 else:
1202 else:
1203 files = getfiles(x)
1203 files = getfiles(x)
1204
1204
1205 if not mcache[0] or (hasset and rev is None):
1205 if not mcache[0] or (hasset and rev is None):
1206 r = x if rev is None else rev
1206 r = x if rev is None else rev
1207 mcache[0] = matchmod.match(repo.root, repo.getcwd(), pats,
1207 mcache[0] = matchmod.match(repo.root, repo.getcwd(), pats,
1208 include=inc, exclude=exc, ctx=repo[r],
1208 include=inc, exclude=exc, ctx=repo[r],
1209 default=default)
1209 default=default)
1210 m = mcache[0]
1210 m = mcache[0]
1211
1211
1212 for f in files:
1212 for f in files:
1213 if m(f):
1213 if m(f):
1214 return True
1214 return True
1215 return False
1215 return False
1216
1216
1217 return subset.filter(matches,
1217 return subset.filter(matches,
1218 condrepr=('<matchfiles patterns=%r, include=%r '
1218 condrepr=('<matchfiles patterns=%r, include=%r '
1219 'exclude=%r, default=%r, rev=%r>',
1219 'exclude=%r, default=%r, rev=%r>',
1220 pats, inc, exc, default, rev))
1220 pats, inc, exc, default, rev))
1221
1221
1222 @predicate('file(pattern)', safe=True, weight=10)
1222 @predicate('file(pattern)', safe=True, weight=10)
1223 def hasfile(repo, subset, x):
1223 def hasfile(repo, subset, x):
1224 """Changesets affecting files matched by pattern.
1224 """Changesets affecting files matched by pattern.
1225
1225
1226 For a faster but less accurate result, consider using ``filelog()``
1226 For a faster but less accurate result, consider using ``filelog()``
1227 instead.
1227 instead.
1228
1228
1229 This predicate uses ``glob:`` as the default kind of pattern.
1229 This predicate uses ``glob:`` as the default kind of pattern.
1230 """
1230 """
1231 # i18n: "file" is a keyword
1231 # i18n: "file" is a keyword
1232 pat = getstring(x, _("file requires a pattern"))
1232 pat = getstring(x, _("file requires a pattern"))
1233 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1233 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1234
1234
1235 @predicate('head()', safe=True)
1235 @predicate('head()', safe=True)
1236 def head(repo, subset, x):
1236 def head(repo, subset, x):
1237 """Changeset is a named branch head.
1237 """Changeset is a named branch head.
1238 """
1238 """
1239 # i18n: "head" is a keyword
1239 # i18n: "head" is a keyword
1240 getargs(x, 0, 0, _("head takes no arguments"))
1240 getargs(x, 0, 0, _("head takes no arguments"))
1241 hs = set()
1241 hs = set()
1242 cl = repo.changelog
1242 cl = repo.changelog
1243 for ls in repo.branchmap().iterheads():
1243 for ls in repo.branchmap().iterheads():
1244 hs.update(cl.rev(h) for h in ls)
1244 hs.update(cl.rev(h) for h in ls)
1245 return subset & baseset(hs)
1245 return subset & baseset(hs)
1246
1246
1247 @predicate('heads(set)', safe=True, takeorder=True)
1247 @predicate('heads(set)', safe=True, takeorder=True)
1248 def heads(repo, subset, x, order):
1248 def heads(repo, subset, x, order):
1249 """Members of set with no children in set.
1249 """Members of set with no children in set.
1250 """
1250 """
1251 # argument set should never define order
1251 # argument set should never define order
1252 if order == defineorder:
1252 if order == defineorder:
1253 order = followorder
1253 order = followorder
1254 inputset = getset(repo, fullreposet(repo), x, order=order)
1254 inputset = getset(repo, fullreposet(repo), x, order=order)
1255 wdirparents = None
1255 wdirparents = None
1256 if node.wdirrev in inputset:
1256 if node.wdirrev in inputset:
1257 # a bit slower, but not common so good enough for now
1257 # a bit slower, but not common so good enough for now
1258 wdirparents = [p.rev() for p in repo[None].parents()]
1258 wdirparents = [p.rev() for p in repo[None].parents()]
1259 inputset = set(inputset)
1259 inputset = set(inputset)
1260 inputset.discard(node.wdirrev)
1260 inputset.discard(node.wdirrev)
1261 heads = repo.changelog.headrevs(inputset)
1261 heads = repo.changelog.headrevs(inputset)
1262 if wdirparents is not None:
1262 if wdirparents is not None:
1263 heads.difference_update(wdirparents)
1263 heads.difference_update(wdirparents)
1264 heads.add(node.wdirrev)
1264 heads.add(node.wdirrev)
1265 heads = baseset(heads)
1265 heads = baseset(heads)
1266 return subset & heads
1266 return subset & heads
1267
1267
1268 @predicate('hidden()', safe=True)
1268 @predicate('hidden()', safe=True)
1269 def hidden(repo, subset, x):
1269 def hidden(repo, subset, x):
1270 """Hidden changesets.
1270 """Hidden changesets.
1271 """
1271 """
1272 # i18n: "hidden" is a keyword
1272 # i18n: "hidden" is a keyword
1273 getargs(x, 0, 0, _("hidden takes no arguments"))
1273 getargs(x, 0, 0, _("hidden takes no arguments"))
1274 hiddenrevs = repoview.filterrevs(repo, 'visible')
1274 hiddenrevs = repoview.filterrevs(repo, 'visible')
1275 return subset & hiddenrevs
1275 return subset & hiddenrevs
1276
1276
1277 @predicate('keyword(string)', safe=True, weight=10)
1277 @predicate('keyword(string)', safe=True, weight=10)
1278 def keyword(repo, subset, x):
1278 def keyword(repo, subset, x):
1279 """Search commit message, user name, and names of changed files for
1279 """Search commit message, user name, and names of changed files for
1280 string. The match is case-insensitive.
1280 string. The match is case-insensitive.
1281
1281
1282 For a regular expression or case sensitive search of these fields, use
1282 For a regular expression or case sensitive search of these fields, use
1283 ``grep(regex)``.
1283 ``grep(regex)``.
1284 """
1284 """
1285 # i18n: "keyword" is a keyword
1285 # i18n: "keyword" is a keyword
1286 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1286 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1287
1287
1288 def matches(r):
1288 def matches(r):
1289 c = repo[r]
1289 c = repo[r]
1290 return any(kw in encoding.lower(t)
1290 return any(kw in encoding.lower(t)
1291 for t in c.files() + [c.user(), c.description()])
1291 for t in c.files() + [c.user(), c.description()])
1292
1292
1293 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1293 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1294
1294
1295 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1295 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1296 def limit(repo, subset, x, order):
1296 def limit(repo, subset, x, order):
1297 """First n members of set, defaulting to 1, starting from offset.
1297 """First n members of set, defaulting to 1, starting from offset.
1298 """
1298 """
1299 args = getargsdict(x, 'limit', 'set n offset')
1299 args = getargsdict(x, 'limit', 'set n offset')
1300 if 'set' not in args:
1300 if 'set' not in args:
1301 # i18n: "limit" is a keyword
1301 # i18n: "limit" is a keyword
1302 raise error.ParseError(_("limit requires one to three arguments"))
1302 raise error.ParseError(_("limit requires one to three arguments"))
1303 # i18n: "limit" is a keyword
1303 # i18n: "limit" is a keyword
1304 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1304 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1305 if lim < 0:
1305 if lim < 0:
1306 raise error.ParseError(_("negative number to select"))
1306 raise error.ParseError(_("negative number to select"))
1307 # i18n: "limit" is a keyword
1307 # i18n: "limit" is a keyword
1308 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1308 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1309 if ofs < 0:
1309 if ofs < 0:
1310 raise error.ParseError(_("negative offset"))
1310 raise error.ParseError(_("negative offset"))
1311 os = getset(repo, fullreposet(repo), args['set'])
1311 os = getset(repo, fullreposet(repo), args['set'])
1312 ls = os.slice(ofs, ofs + lim)
1312 ls = os.slice(ofs, ofs + lim)
1313 if order == followorder and lim > 1:
1313 if order == followorder and lim > 1:
1314 return subset & ls
1314 return subset & ls
1315 return ls & subset
1315 return ls & subset
1316
1316
1317 @predicate('last(set, [n])', safe=True, takeorder=True)
1317 @predicate('last(set, [n])', safe=True, takeorder=True)
1318 def last(repo, subset, x, order):
1318 def last(repo, subset, x, order):
1319 """Last n members of set, defaulting to 1.
1319 """Last n members of set, defaulting to 1.
1320 """
1320 """
1321 # i18n: "last" is a keyword
1321 # i18n: "last" is a keyword
1322 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1322 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1323 lim = 1
1323 lim = 1
1324 if len(l) == 2:
1324 if len(l) == 2:
1325 # i18n: "last" is a keyword
1325 # i18n: "last" is a keyword
1326 lim = getinteger(l[1], _("last expects a number"))
1326 lim = getinteger(l[1], _("last expects a number"))
1327 if lim < 0:
1327 if lim < 0:
1328 raise error.ParseError(_("negative number to select"))
1328 raise error.ParseError(_("negative number to select"))
1329 os = getset(repo, fullreposet(repo), l[0])
1329 os = getset(repo, fullreposet(repo), l[0])
1330 os.reverse()
1330 os.reverse()
1331 ls = os.slice(0, lim)
1331 ls = os.slice(0, lim)
1332 if order == followorder and lim > 1:
1332 if order == followorder and lim > 1:
1333 return subset & ls
1333 return subset & ls
1334 ls.reverse()
1334 ls.reverse()
1335 return ls & subset
1335 return ls & subset
1336
1336
1337 @predicate('max(set)', safe=True)
1337 @predicate('max(set)', safe=True)
1338 def maxrev(repo, subset, x):
1338 def maxrev(repo, subset, x):
1339 """Changeset with highest revision number in set.
1339 """Changeset with highest revision number in set.
1340 """
1340 """
1341 os = getset(repo, fullreposet(repo), x)
1341 os = getset(repo, fullreposet(repo), x)
1342 try:
1342 try:
1343 m = os.max()
1343 m = os.max()
1344 if m in subset:
1344 if m in subset:
1345 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1345 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1346 except ValueError:
1346 except ValueError:
1347 # os.max() throws a ValueError when the collection is empty.
1347 # os.max() throws a ValueError when the collection is empty.
1348 # Same as python's max().
1348 # Same as python's max().
1349 pass
1349 pass
1350 return baseset(datarepr=('<max %r, %r>', subset, os))
1350 return baseset(datarepr=('<max %r, %r>', subset, os))
1351
1351
1352 @predicate('merge()', safe=True)
1352 @predicate('merge()', safe=True)
1353 def merge(repo, subset, x):
1353 def merge(repo, subset, x):
1354 """Changeset is a merge changeset.
1354 """Changeset is a merge changeset.
1355 """
1355 """
1356 # i18n: "merge" is a keyword
1356 # i18n: "merge" is a keyword
1357 getargs(x, 0, 0, _("merge takes no arguments"))
1357 getargs(x, 0, 0, _("merge takes no arguments"))
1358 cl = repo.changelog
1358 cl = repo.changelog
1359 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1359 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1360 condrepr='<merge>')
1360 condrepr='<merge>')
1361
1361
1362 @predicate('branchpoint()', safe=True)
1362 @predicate('branchpoint()', safe=True)
1363 def branchpoint(repo, subset, x):
1363 def branchpoint(repo, subset, x):
1364 """Changesets with more than one child.
1364 """Changesets with more than one child.
1365 """
1365 """
1366 # i18n: "branchpoint" is a keyword
1366 # i18n: "branchpoint" is a keyword
1367 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1367 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1368 cl = repo.changelog
1368 cl = repo.changelog
1369 if not subset:
1369 if not subset:
1370 return baseset()
1370 return baseset()
1371 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1371 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1372 # (and if it is not, it should.)
1372 # (and if it is not, it should.)
1373 baserev = min(subset)
1373 baserev = min(subset)
1374 parentscount = [0]*(len(repo) - baserev)
1374 parentscount = [0]*(len(repo) - baserev)
1375 for r in cl.revs(start=baserev + 1):
1375 for r in cl.revs(start=baserev + 1):
1376 for p in cl.parentrevs(r):
1376 for p in cl.parentrevs(r):
1377 if p >= baserev:
1377 if p >= baserev:
1378 parentscount[p - baserev] += 1
1378 parentscount[p - baserev] += 1
1379 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1379 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1380 condrepr='<branchpoint>')
1380 condrepr='<branchpoint>')
1381
1381
1382 @predicate('min(set)', safe=True)
1382 @predicate('min(set)', safe=True)
1383 def minrev(repo, subset, x):
1383 def minrev(repo, subset, x):
1384 """Changeset with lowest revision number in set.
1384 """Changeset with lowest revision number in set.
1385 """
1385 """
1386 os = getset(repo, fullreposet(repo), x)
1386 os = getset(repo, fullreposet(repo), x)
1387 try:
1387 try:
1388 m = os.min()
1388 m = os.min()
1389 if m in subset:
1389 if m in subset:
1390 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1390 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1391 except ValueError:
1391 except ValueError:
1392 # os.min() throws a ValueError when the collection is empty.
1392 # os.min() throws a ValueError when the collection is empty.
1393 # Same as python's min().
1393 # Same as python's min().
1394 pass
1394 pass
1395 return baseset(datarepr=('<min %r, %r>', subset, os))
1395 return baseset(datarepr=('<min %r, %r>', subset, os))
1396
1396
1397 @predicate('modifies(pattern)', safe=True, weight=30)
1397 @predicate('modifies(pattern)', safe=True, weight=30)
1398 def modifies(repo, subset, x):
1398 def modifies(repo, subset, x):
1399 """Changesets modifying files matched by pattern.
1399 """Changesets modifying files matched by pattern.
1400
1400
1401 The pattern without explicit kind like ``glob:`` is expected to be
1401 The pattern without explicit kind like ``glob:`` is expected to be
1402 relative to the current directory and match against a file or a
1402 relative to the current directory and match against a file or a
1403 directory.
1403 directory.
1404 """
1404 """
1405 # i18n: "modifies" is a keyword
1405 # i18n: "modifies" is a keyword
1406 pat = getstring(x, _("modifies requires a pattern"))
1406 pat = getstring(x, _("modifies requires a pattern"))
1407 return checkstatus(repo, subset, pat, 0)
1407 return checkstatus(repo, subset, pat, 0)
1408
1408
1409 @predicate('named(namespace)')
1409 @predicate('named(namespace)')
1410 def named(repo, subset, x):
1410 def named(repo, subset, x):
1411 """The changesets in a given namespace.
1411 """The changesets in a given namespace.
1412
1412
1413 Pattern matching is supported for `namespace`. See
1413 Pattern matching is supported for `namespace`. See
1414 :hg:`help revisions.patterns`.
1414 :hg:`help revisions.patterns`.
1415 """
1415 """
1416 # i18n: "named" is a keyword
1416 # i18n: "named" is a keyword
1417 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1417 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1418
1418
1419 ns = getstring(args[0],
1419 ns = getstring(args[0],
1420 # i18n: "named" is a keyword
1420 # i18n: "named" is a keyword
1421 _('the argument to named must be a string'))
1421 _('the argument to named must be a string'))
1422 kind, pattern, matcher = stringutil.stringmatcher(ns)
1422 kind, pattern, matcher = stringutil.stringmatcher(ns)
1423 namespaces = set()
1423 namespaces = set()
1424 if kind == 'literal':
1424 if kind == 'literal':
1425 if pattern not in repo.names:
1425 if pattern not in repo.names:
1426 raise error.RepoLookupError(_("namespace '%s' does not exist")
1426 raise error.RepoLookupError(_("namespace '%s' does not exist")
1427 % ns)
1427 % ns)
1428 namespaces.add(repo.names[pattern])
1428 namespaces.add(repo.names[pattern])
1429 else:
1429 else:
1430 for name, ns in repo.names.iteritems():
1430 for name, ns in repo.names.iteritems():
1431 if matcher(name):
1431 if matcher(name):
1432 namespaces.add(ns)
1432 namespaces.add(ns)
1433
1433
1434 names = set()
1434 names = set()
1435 for ns in namespaces:
1435 for ns in namespaces:
1436 for name in ns.listnames(repo):
1436 for name in ns.listnames(repo):
1437 if name not in ns.deprecated:
1437 if name not in ns.deprecated:
1438 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1438 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1439
1439
1440 names -= {node.nullrev}
1440 names -= {node.nullrev}
1441 return subset & names
1441 return subset & names
1442
1442
1443 @predicate('id(string)', safe=True)
1443 @predicate('id(string)', safe=True)
1444 def node_(repo, subset, x):
1444 def node_(repo, subset, x):
1445 """Revision non-ambiguously specified by the given hex string prefix.
1445 """Revision non-ambiguously specified by the given hex string prefix.
1446 """
1446 """
1447 # i18n: "id" is a keyword
1447 # i18n: "id" is a keyword
1448 l = getargs(x, 1, 1, _("id requires one argument"))
1448 l = getargs(x, 1, 1, _("id requires one argument"))
1449 # i18n: "id" is a keyword
1449 # i18n: "id" is a keyword
1450 n = getstring(l[0], _("id requires a string"))
1450 n = getstring(l[0], _("id requires a string"))
1451 if len(n) == 40:
1451 if len(n) == 40:
1452 try:
1452 try:
1453 rn = repo.changelog.rev(node.bin(n))
1453 rn = repo.changelog.rev(node.bin(n))
1454 except error.WdirUnsupported:
1454 except error.WdirUnsupported:
1455 rn = node.wdirrev
1455 rn = node.wdirrev
1456 except (LookupError, TypeError):
1456 except (LookupError, TypeError):
1457 rn = None
1457 rn = None
1458 else:
1458 else:
1459 rn = None
1459 rn = None
1460 try:
1460 try:
1461 pm = scmutil.resolvehexnodeidprefix(repo, n)
1461 pm = scmutil.resolvehexnodeidprefix(repo, n)
1462 if pm is not None:
1462 if pm is not None:
1463 rn = repo.changelog.rev(pm)
1463 rn = repo.changelog.rev(pm)
1464 except LookupError:
1464 except LookupError:
1465 pass
1465 pass
1466 except error.WdirUnsupported:
1466 except error.WdirUnsupported:
1467 rn = node.wdirrev
1467 rn = node.wdirrev
1468
1468
1469 if rn is None:
1469 if rn is None:
1470 return baseset()
1470 return baseset()
1471 result = baseset([rn])
1471 result = baseset([rn])
1472 return result & subset
1472 return result & subset
1473
1473
1474 @predicate('none()', safe=True)
1474 @predicate('none()', safe=True)
1475 def none(repo, subset, x):
1475 def none(repo, subset, x):
1476 """No changesets.
1476 """No changesets.
1477 """
1477 """
1478 # i18n: "none" is a keyword
1478 # i18n: "none" is a keyword
1479 getargs(x, 0, 0, _("none takes no arguments"))
1479 getargs(x, 0, 0, _("none takes no arguments"))
1480 return baseset()
1480 return baseset()
1481
1481
1482 @predicate('obsolete()', safe=True)
1482 @predicate('obsolete()', safe=True)
1483 def obsolete(repo, subset, x):
1483 def obsolete(repo, subset, x):
1484 """Mutable changeset with a newer version."""
1484 """Mutable changeset with a newer version."""
1485 # i18n: "obsolete" is a keyword
1485 # i18n: "obsolete" is a keyword
1486 getargs(x, 0, 0, _("obsolete takes no arguments"))
1486 getargs(x, 0, 0, _("obsolete takes no arguments"))
1487 obsoletes = obsmod.getrevs(repo, 'obsolete')
1487 obsoletes = obsmod.getrevs(repo, 'obsolete')
1488 return subset & obsoletes
1488 return subset & obsoletes
1489
1489
1490 @predicate('only(set, [set])', safe=True)
1490 @predicate('only(set, [set])', safe=True)
1491 def only(repo, subset, x):
1491 def only(repo, subset, x):
1492 """Changesets that are ancestors of the first set that are not ancestors
1492 """Changesets that are ancestors of the first set that are not ancestors
1493 of any other head in the repo. If a second set is specified, the result
1493 of any other head in the repo. If a second set is specified, the result
1494 is ancestors of the first set that are not ancestors of the second set
1494 is ancestors of the first set that are not ancestors of the second set
1495 (i.e. ::<set1> - ::<set2>).
1495 (i.e. ::<set1> - ::<set2>).
1496 """
1496 """
1497 cl = repo.changelog
1497 cl = repo.changelog
1498 # i18n: "only" is a keyword
1498 # i18n: "only" is a keyword
1499 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1499 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1500 include = getset(repo, fullreposet(repo), args[0])
1500 include = getset(repo, fullreposet(repo), args[0])
1501 if len(args) == 1:
1501 if len(args) == 1:
1502 if not include:
1502 if not include:
1503 return baseset()
1503 return baseset()
1504
1504
1505 descendants = set(dagop.revdescendants(repo, include, False))
1505 descendants = set(dagop.revdescendants(repo, include, False))
1506 exclude = [rev for rev in cl.headrevs()
1506 exclude = [rev for rev in cl.headrevs()
1507 if not rev in descendants and not rev in include]
1507 if not rev in descendants and not rev in include]
1508 else:
1508 else:
1509 exclude = getset(repo, fullreposet(repo), args[1])
1509 exclude = getset(repo, fullreposet(repo), args[1])
1510
1510
1511 results = set(cl.findmissingrevs(common=exclude, heads=include))
1511 results = set(cl.findmissingrevs(common=exclude, heads=include))
1512 # XXX we should turn this into a baseset instead of a set, smartset may do
1512 # XXX we should turn this into a baseset instead of a set, smartset may do
1513 # some optimizations from the fact this is a baseset.
1513 # some optimizations from the fact this is a baseset.
1514 return subset & results
1514 return subset & results
1515
1515
1516 @predicate('origin([set])', safe=True)
1516 @predicate('origin([set])', safe=True)
1517 def origin(repo, subset, x):
1517 def origin(repo, subset, x):
1518 """
1518 """
1519 Changesets that were specified as a source for the grafts, transplants or
1519 Changesets that were specified as a source for the grafts, transplants or
1520 rebases that created the given revisions. Omitting the optional set is the
1520 rebases that created the given revisions. Omitting the optional set is the
1521 same as passing all(). If a changeset created by these operations is itself
1521 same as passing all(). If a changeset created by these operations is itself
1522 specified as a source for one of these operations, only the source changeset
1522 specified as a source for one of these operations, only the source changeset
1523 for the first operation is selected.
1523 for the first operation is selected.
1524 """
1524 """
1525 if x is not None:
1525 if x is not None:
1526 dests = getset(repo, fullreposet(repo), x)
1526 dests = getset(repo, fullreposet(repo), x)
1527 else:
1527 else:
1528 dests = fullreposet(repo)
1528 dests = fullreposet(repo)
1529
1529
1530 def _firstsrc(rev):
1530 def _firstsrc(rev):
1531 src = _getrevsource(repo, rev)
1531 src = _getrevsource(repo, rev)
1532 if src is None:
1532 if src is None:
1533 return None
1533 return None
1534
1534
1535 while True:
1535 while True:
1536 prev = _getrevsource(repo, src)
1536 prev = _getrevsource(repo, src)
1537
1537
1538 if prev is None:
1538 if prev is None:
1539 return src
1539 return src
1540 src = prev
1540 src = prev
1541
1541
1542 o = {_firstsrc(r) for r in dests}
1542 o = {_firstsrc(r) for r in dests}
1543 o -= {None}
1543 o -= {None}
1544 # XXX we should turn this into a baseset instead of a set, smartset may do
1544 # XXX we should turn this into a baseset instead of a set, smartset may do
1545 # some optimizations from the fact this is a baseset.
1545 # some optimizations from the fact this is a baseset.
1546 return subset & o
1546 return subset & o
1547
1547
1548 @predicate('outgoing([path])', safe=False, weight=10)
1548 @predicate('outgoing([path])', safe=False, weight=10)
1549 def outgoing(repo, subset, x):
1549 def outgoing(repo, subset, x):
1550 """Changesets not found in the specified destination repository, or the
1550 """Changesets not found in the specified destination repository, or the
1551 default push location.
1551 default push location.
1552 """
1552 """
1553 # Avoid cycles.
1553 # Avoid cycles.
1554 from . import (
1554 from . import (
1555 discovery,
1555 discovery,
1556 hg,
1556 hg,
1557 )
1557 )
1558 # i18n: "outgoing" is a keyword
1558 # i18n: "outgoing" is a keyword
1559 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1559 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1560 # i18n: "outgoing" is a keyword
1560 # i18n: "outgoing" is a keyword
1561 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1561 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1562 if not dest:
1562 if not dest:
1563 # ui.paths.getpath() explicitly tests for None, not just a boolean
1563 # ui.paths.getpath() explicitly tests for None, not just a boolean
1564 dest = None
1564 dest = None
1565 path = repo.ui.paths.getpath(dest, default=('default-push', 'default'))
1565 path = repo.ui.paths.getpath(dest, default=('default-push', 'default'))
1566 if not path:
1566 if not path:
1567 raise error.Abort(_('default repository not configured!'),
1567 raise error.Abort(_('default repository not configured!'),
1568 hint=_("see 'hg help config.paths'"))
1568 hint=_("see 'hg help config.paths'"))
1569 dest = path.pushloc or path.loc
1569 dest = path.pushloc or path.loc
1570 branches = path.branch, []
1570 branches = path.branch, []
1571
1571
1572 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1572 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1573 if revs:
1573 if revs:
1574 revs = [repo.lookup(rev) for rev in revs]
1574 revs = [repo.lookup(rev) for rev in revs]
1575 other = hg.peer(repo, {}, dest)
1575 other = hg.peer(repo, {}, dest)
1576 repo.ui.pushbuffer()
1576 repo.ui.pushbuffer()
1577 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1577 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1578 repo.ui.popbuffer()
1578 repo.ui.popbuffer()
1579 cl = repo.changelog
1579 cl = repo.changelog
1580 o = {cl.rev(r) for r in outgoing.missing}
1580 o = {cl.rev(r) for r in outgoing.missing}
1581 return subset & o
1581 return subset & o
1582
1582
1583 @predicate('p1([set])', safe=True)
1583 @predicate('p1([set])', safe=True)
1584 def p1(repo, subset, x):
1584 def p1(repo, subset, x):
1585 """First parent of changesets in set, or the working directory.
1585 """First parent of changesets in set, or the working directory.
1586 """
1586 """
1587 if x is None:
1587 if x is None:
1588 p = repo[x].p1().rev()
1588 p = repo[x].p1().rev()
1589 if p >= 0:
1589 if p >= 0:
1590 return subset & baseset([p])
1590 return subset & baseset([p])
1591 return baseset()
1591 return baseset()
1592
1592
1593 ps = set()
1593 ps = set()
1594 cl = repo.changelog
1594 cl = repo.changelog
1595 for r in getset(repo, fullreposet(repo), x):
1595 for r in getset(repo, fullreposet(repo), x):
1596 try:
1596 try:
1597 ps.add(cl.parentrevs(r)[0])
1597 ps.add(cl.parentrevs(r)[0])
1598 except error.WdirUnsupported:
1598 except error.WdirUnsupported:
1599 ps.add(repo[r].p1().rev())
1599 ps.add(repo[r].p1().rev())
1600 ps -= {node.nullrev}
1600 ps -= {node.nullrev}
1601 # XXX we should turn this into a baseset instead of a set, smartset may do
1601 # XXX we should turn this into a baseset instead of a set, smartset may do
1602 # some optimizations from the fact this is a baseset.
1602 # some optimizations from the fact this is a baseset.
1603 return subset & ps
1603 return subset & ps
1604
1604
1605 @predicate('p2([set])', safe=True)
1605 @predicate('p2([set])', safe=True)
1606 def p2(repo, subset, x):
1606 def p2(repo, subset, x):
1607 """Second parent of changesets in set, or the working directory.
1607 """Second parent of changesets in set, or the working directory.
1608 """
1608 """
1609 if x is None:
1609 if x is None:
1610 ps = repo[x].parents()
1610 ps = repo[x].parents()
1611 try:
1611 try:
1612 p = ps[1].rev()
1612 p = ps[1].rev()
1613 if p >= 0:
1613 if p >= 0:
1614 return subset & baseset([p])
1614 return subset & baseset([p])
1615 return baseset()
1615 return baseset()
1616 except IndexError:
1616 except IndexError:
1617 return baseset()
1617 return baseset()
1618
1618
1619 ps = set()
1619 ps = set()
1620 cl = repo.changelog
1620 cl = repo.changelog
1621 for r in getset(repo, fullreposet(repo), x):
1621 for r in getset(repo, fullreposet(repo), x):
1622 try:
1622 try:
1623 ps.add(cl.parentrevs(r)[1])
1623 ps.add(cl.parentrevs(r)[1])
1624 except error.WdirUnsupported:
1624 except error.WdirUnsupported:
1625 parents = repo[r].parents()
1625 parents = repo[r].parents()
1626 if len(parents) == 2:
1626 if len(parents) == 2:
1627 ps.add(parents[1])
1627 ps.add(parents[1])
1628 ps -= {node.nullrev}
1628 ps -= {node.nullrev}
1629 # XXX we should turn this into a baseset instead of a set, smartset may do
1629 # XXX we should turn this into a baseset instead of a set, smartset may do
1630 # some optimizations from the fact this is a baseset.
1630 # some optimizations from the fact this is a baseset.
1631 return subset & ps
1631 return subset & ps
1632
1632
1633 def parentpost(repo, subset, x, order):
1633 def parentpost(repo, subset, x, order):
1634 return p1(repo, subset, x)
1634 return p1(repo, subset, x)
1635
1635
1636 @predicate('parents([set])', safe=True)
1636 @predicate('parents([set])', safe=True)
1637 def parents(repo, subset, x):
1637 def parents(repo, subset, x):
1638 """
1638 """
1639 The set of all parents for all changesets in set, or the working directory.
1639 The set of all parents for all changesets in set, or the working directory.
1640 """
1640 """
1641 if x is None:
1641 if x is None:
1642 ps = set(p.rev() for p in repo[x].parents())
1642 ps = set(p.rev() for p in repo[x].parents())
1643 else:
1643 else:
1644 ps = set()
1644 ps = set()
1645 cl = repo.changelog
1645 cl = repo.changelog
1646 up = ps.update
1646 up = ps.update
1647 parentrevs = cl.parentrevs
1647 parentrevs = cl.parentrevs
1648 for r in getset(repo, fullreposet(repo), x):
1648 for r in getset(repo, fullreposet(repo), x):
1649 try:
1649 try:
1650 up(parentrevs(r))
1650 up(parentrevs(r))
1651 except error.WdirUnsupported:
1651 except error.WdirUnsupported:
1652 up(p.rev() for p in repo[r].parents())
1652 up(p.rev() for p in repo[r].parents())
1653 ps -= {node.nullrev}
1653 ps -= {node.nullrev}
1654 return subset & ps
1654 return subset & ps
1655
1655
1656 def _phase(repo, subset, *targets):
1656 def _phase(repo, subset, *targets):
1657 """helper to select all rev in <targets> phases"""
1657 """helper to select all rev in <targets> phases"""
1658 return repo._phasecache.getrevset(repo, targets, subset)
1658 return repo._phasecache.getrevset(repo, targets, subset)
1659
1659
1660 @predicate('_phase(idx)', safe=True)
1660 @predicate('_phase(idx)', safe=True)
1661 def phase(repo, subset, x):
1661 def phase(repo, subset, x):
1662 l = getargs(x, 1, 1, ("_phase requires one argument"))
1662 l = getargs(x, 1, 1, ("_phase requires one argument"))
1663 target = getinteger(l[0], ("_phase expects a number"))
1663 target = getinteger(l[0], ("_phase expects a number"))
1664 return _phase(repo, subset, target)
1664 return _phase(repo, subset, target)
1665
1665
1666 @predicate('draft()', safe=True)
1666 @predicate('draft()', safe=True)
1667 def draft(repo, subset, x):
1667 def draft(repo, subset, x):
1668 """Changeset in draft phase."""
1668 """Changeset in draft phase."""
1669 # i18n: "draft" is a keyword
1669 # i18n: "draft" is a keyword
1670 getargs(x, 0, 0, _("draft takes no arguments"))
1670 getargs(x, 0, 0, _("draft takes no arguments"))
1671 target = phases.draft
1671 target = phases.draft
1672 return _phase(repo, subset, target)
1672 return _phase(repo, subset, target)
1673
1673
1674 @predicate('secret()', safe=True)
1674 @predicate('secret()', safe=True)
1675 def secret(repo, subset, x):
1675 def secret(repo, subset, x):
1676 """Changeset in secret phase."""
1676 """Changeset in secret phase."""
1677 # i18n: "secret" is a keyword
1677 # i18n: "secret" is a keyword
1678 getargs(x, 0, 0, _("secret takes no arguments"))
1678 getargs(x, 0, 0, _("secret takes no arguments"))
1679 target = phases.secret
1679 target = phases.secret
1680 return _phase(repo, subset, target)
1680 return _phase(repo, subset, target)
1681
1681
1682 @predicate('stack([revs])', safe=True)
1682 @predicate('stack([revs])', safe=True)
1683 def stack(repo, subset, x):
1683 def stack(repo, subset, x):
1684 """Experimental revset for the stack of changesets or working directory
1684 """Experimental revset for the stack of changesets or working directory
1685 parent. (EXPERIMENTAL)
1685 parent. (EXPERIMENTAL)
1686 """
1686 """
1687 if x is None:
1687 if x is None:
1688 stacks = stackmod.getstack(repo, x)
1688 stacks = stackmod.getstack(repo, x)
1689 else:
1689 else:
1690 stacks = smartset.baseset([])
1690 stacks = smartset.baseset([])
1691 for revision in getset(repo, fullreposet(repo), x):
1691 for revision in getset(repo, fullreposet(repo), x):
1692 currentstack = stackmod.getstack(repo, revision)
1692 currentstack = stackmod.getstack(repo, revision)
1693 stacks = stacks + currentstack
1693 stacks = stacks + currentstack
1694
1694
1695 return subset & stacks
1695 return subset & stacks
1696
1696
1697 def parentspec(repo, subset, x, n, order):
1697 def parentspec(repo, subset, x, n, order):
1698 """``set^0``
1698 """``set^0``
1699 The set.
1699 The set.
1700 ``set^1`` (or ``set^``), ``set^2``
1700 ``set^1`` (or ``set^``), ``set^2``
1701 First or second parent, respectively, of all changesets in set.
1701 First or second parent, respectively, of all changesets in set.
1702 """
1702 """
1703 try:
1703 try:
1704 n = int(n[1])
1704 n = int(n[1])
1705 if n not in (0, 1, 2):
1705 if n not in (0, 1, 2):
1706 raise ValueError
1706 raise ValueError
1707 except (TypeError, ValueError):
1707 except (TypeError, ValueError):
1708 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1708 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1709 ps = set()
1709 ps = set()
1710 cl = repo.changelog
1710 cl = repo.changelog
1711 for r in getset(repo, fullreposet(repo), x):
1711 for r in getset(repo, fullreposet(repo), x):
1712 if n == 0:
1712 if n == 0:
1713 ps.add(r)
1713 ps.add(r)
1714 elif n == 1:
1714 elif n == 1:
1715 try:
1715 try:
1716 ps.add(cl.parentrevs(r)[0])
1716 ps.add(cl.parentrevs(r)[0])
1717 except error.WdirUnsupported:
1717 except error.WdirUnsupported:
1718 ps.add(repo[r].p1().rev())
1718 ps.add(repo[r].p1().rev())
1719 else:
1719 else:
1720 try:
1720 try:
1721 parents = cl.parentrevs(r)
1721 parents = cl.parentrevs(r)
1722 if parents[1] != node.nullrev:
1722 if parents[1] != node.nullrev:
1723 ps.add(parents[1])
1723 ps.add(parents[1])
1724 except error.WdirUnsupported:
1724 except error.WdirUnsupported:
1725 parents = repo[r].parents()
1725 parents = repo[r].parents()
1726 if len(parents) == 2:
1726 if len(parents) == 2:
1727 ps.add(parents[1].rev())
1727 ps.add(parents[1].rev())
1728 return subset & ps
1728 return subset & ps
1729
1729
1730 @predicate('present(set)', safe=True, takeorder=True)
1730 @predicate('present(set)', safe=True, takeorder=True)
1731 def present(repo, subset, x, order):
1731 def present(repo, subset, x, order):
1732 """An empty set, if any revision in set isn't found; otherwise,
1732 """An empty set, if any revision in set isn't found; otherwise,
1733 all revisions in set.
1733 all revisions in set.
1734
1734
1735 If any of specified revisions is not present in the local repository,
1735 If any of specified revisions is not present in the local repository,
1736 the query is normally aborted. But this predicate allows the query
1736 the query is normally aborted. But this predicate allows the query
1737 to continue even in such cases.
1737 to continue even in such cases.
1738 """
1738 """
1739 try:
1739 try:
1740 return getset(repo, subset, x, order)
1740 return getset(repo, subset, x, order)
1741 except error.RepoLookupError:
1741 except error.RepoLookupError:
1742 return baseset()
1742 return baseset()
1743
1743
1744 # for internal use
1744 # for internal use
1745 @predicate('_notpublic', safe=True)
1745 @predicate('_notpublic', safe=True)
1746 def _notpublic(repo, subset, x):
1746 def _notpublic(repo, subset, x):
1747 getargs(x, 0, 0, "_notpublic takes no arguments")
1747 getargs(x, 0, 0, "_notpublic takes no arguments")
1748 return _phase(repo, subset, phases.draft, phases.secret)
1748 return _phase(repo, subset, phases.draft, phases.secret)
1749
1749
1750 # for internal use
1750 # for internal use
1751 @predicate('_phaseandancestors(phasename, set)', safe=True)
1751 @predicate('_phaseandancestors(phasename, set)', safe=True)
1752 def _phaseandancestors(repo, subset, x):
1752 def _phaseandancestors(repo, subset, x):
1753 # equivalent to (phasename() & ancestors(set)) but more efficient
1753 # equivalent to (phasename() & ancestors(set)) but more efficient
1754 # phasename could be one of 'draft', 'secret', or '_notpublic'
1754 # phasename could be one of 'draft', 'secret', or '_notpublic'
1755 args = getargs(x, 2, 2, "_phaseandancestors requires two arguments")
1755 args = getargs(x, 2, 2, "_phaseandancestors requires two arguments")
1756 phasename = getsymbol(args[0])
1756 phasename = getsymbol(args[0])
1757 s = getset(repo, fullreposet(repo), args[1])
1757 s = getset(repo, fullreposet(repo), args[1])
1758
1758
1759 draft = phases.draft
1759 draft = phases.draft
1760 secret = phases.secret
1760 secret = phases.secret
1761 phasenamemap = {
1761 phasenamemap = {
1762 '_notpublic': draft,
1762 '_notpublic': draft,
1763 'draft': draft, # follow secret's ancestors
1763 'draft': draft, # follow secret's ancestors
1764 'secret': secret,
1764 'secret': secret,
1765 }
1765 }
1766 if phasename not in phasenamemap:
1766 if phasename not in phasenamemap:
1767 raise error.ParseError('%r is not a valid phasename' % phasename)
1767 raise error.ParseError('%r is not a valid phasename' % phasename)
1768
1768
1769 minimalphase = phasenamemap[phasename]
1769 minimalphase = phasenamemap[phasename]
1770 getphase = repo._phasecache.phase
1770 getphase = repo._phasecache.phase
1771
1771
1772 def cutfunc(rev):
1772 def cutfunc(rev):
1773 return getphase(repo, rev) < minimalphase
1773 return getphase(repo, rev) < minimalphase
1774
1774
1775 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
1775 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
1776
1776
1777 if phasename == 'draft': # need to remove secret changesets
1777 if phasename == 'draft': # need to remove secret changesets
1778 revs = revs.filter(lambda r: getphase(repo, r) == draft)
1778 revs = revs.filter(lambda r: getphase(repo, r) == draft)
1779 return subset & revs
1779 return subset & revs
1780
1780
1781 @predicate('public()', safe=True)
1781 @predicate('public()', safe=True)
1782 def public(repo, subset, x):
1782 def public(repo, subset, x):
1783 """Changeset in public phase."""
1783 """Changeset in public phase."""
1784 # i18n: "public" is a keyword
1784 # i18n: "public" is a keyword
1785 getargs(x, 0, 0, _("public takes no arguments"))
1785 getargs(x, 0, 0, _("public takes no arguments"))
1786 return _phase(repo, subset, phases.public)
1786 return _phase(repo, subset, phases.public)
1787
1787
1788 @predicate('remote([id [,path]])', safe=False)
1788 @predicate('remote([id [,path]])', safe=False)
1789 def remote(repo, subset, x):
1789 def remote(repo, subset, x):
1790 """Local revision that corresponds to the given identifier in a
1790 """Local revision that corresponds to the given identifier in a
1791 remote repository, if present. Here, the '.' identifier is a
1791 remote repository, if present. Here, the '.' identifier is a
1792 synonym for the current local branch.
1792 synonym for the current local branch.
1793 """
1793 """
1794
1794
1795 from . import hg # avoid start-up nasties
1795 from . import hg # avoid start-up nasties
1796 # i18n: "remote" is a keyword
1796 # i18n: "remote" is a keyword
1797 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1797 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1798
1798
1799 q = '.'
1799 q = '.'
1800 if len(l) > 0:
1800 if len(l) > 0:
1801 # i18n: "remote" is a keyword
1801 # i18n: "remote" is a keyword
1802 q = getstring(l[0], _("remote requires a string id"))
1802 q = getstring(l[0], _("remote requires a string id"))
1803 if q == '.':
1803 if q == '.':
1804 q = repo['.'].branch()
1804 q = repo['.'].branch()
1805
1805
1806 dest = ''
1806 dest = ''
1807 if len(l) > 1:
1807 if len(l) > 1:
1808 # i18n: "remote" is a keyword
1808 # i18n: "remote" is a keyword
1809 dest = getstring(l[1], _("remote requires a repository path"))
1809 dest = getstring(l[1], _("remote requires a repository path"))
1810 dest = repo.ui.expandpath(dest or 'default')
1810 dest = repo.ui.expandpath(dest or 'default')
1811 dest, branches = hg.parseurl(dest)
1811 dest, branches = hg.parseurl(dest)
1812 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1812 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1813 if revs:
1813 if revs:
1814 revs = [repo.lookup(rev) for rev in revs]
1814 revs = [repo.lookup(rev) for rev in revs]
1815 other = hg.peer(repo, {}, dest)
1815 other = hg.peer(repo, {}, dest)
1816 n = other.lookup(q)
1816 n = other.lookup(q)
1817 if n in repo:
1817 if n in repo:
1818 r = repo[n].rev()
1818 r = repo[n].rev()
1819 if r in subset:
1819 if r in subset:
1820 return baseset([r])
1820 return baseset([r])
1821 return baseset()
1821 return baseset()
1822
1822
1823 @predicate('removes(pattern)', safe=True, weight=30)
1823 @predicate('removes(pattern)', safe=True, weight=30)
1824 def removes(repo, subset, x):
1824 def removes(repo, subset, x):
1825 """Changesets which remove files matching pattern.
1825 """Changesets which remove files matching pattern.
1826
1826
1827 The pattern without explicit kind like ``glob:`` is expected to be
1827 The pattern without explicit kind like ``glob:`` is expected to be
1828 relative to the current directory and match against a file or a
1828 relative to the current directory and match against a file or a
1829 directory.
1829 directory.
1830 """
1830 """
1831 # i18n: "removes" is a keyword
1831 # i18n: "removes" is a keyword
1832 pat = getstring(x, _("removes requires a pattern"))
1832 pat = getstring(x, _("removes requires a pattern"))
1833 return checkstatus(repo, subset, pat, 2)
1833 return checkstatus(repo, subset, pat, 2)
1834
1834
1835 @predicate('rev(number)', safe=True)
1835 @predicate('rev(number)', safe=True)
1836 def rev(repo, subset, x):
1836 def rev(repo, subset, x):
1837 """Revision with the given numeric identifier.
1837 """Revision with the given numeric identifier.
1838 """
1838 """
1839 # i18n: "rev" is a keyword
1839 # i18n: "rev" is a keyword
1840 l = getargs(x, 1, 1, _("rev requires one argument"))
1840 l = getargs(x, 1, 1, _("rev requires one argument"))
1841 try:
1841 try:
1842 # i18n: "rev" is a keyword
1842 # i18n: "rev" is a keyword
1843 l = int(getstring(l[0], _("rev requires a number")))
1843 l = int(getstring(l[0], _("rev requires a number")))
1844 except (TypeError, ValueError):
1844 except (TypeError, ValueError):
1845 # i18n: "rev" is a keyword
1845 # i18n: "rev" is a keyword
1846 raise error.ParseError(_("rev expects a number"))
1846 raise error.ParseError(_("rev expects a number"))
1847 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1847 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1848 return baseset()
1848 return baseset()
1849 return subset & baseset([l])
1849 return subset & baseset([l])
1850
1850
1851 @predicate('_rev(number)', safe=True)
1851 @predicate('_rev(number)', safe=True)
1852 def _rev(repo, subset, x):
1852 def _rev(repo, subset, x):
1853 # internal version of "rev(x)" that raise error if "x" is invalid
1853 # internal version of "rev(x)" that raise error if "x" is invalid
1854 # i18n: "rev" is a keyword
1854 # i18n: "rev" is a keyword
1855 l = getargs(x, 1, 1, _("rev requires one argument"))
1855 l = getargs(x, 1, 1, _("rev requires one argument"))
1856 try:
1856 try:
1857 # i18n: "rev" is a keyword
1857 # i18n: "rev" is a keyword
1858 l = int(getstring(l[0], _("rev requires a number")))
1858 l = int(getstring(l[0], _("rev requires a number")))
1859 except (TypeError, ValueError):
1859 except (TypeError, ValueError):
1860 # i18n: "rev" is a keyword
1860 # i18n: "rev" is a keyword
1861 raise error.ParseError(_("rev expects a number"))
1861 raise error.ParseError(_("rev expects a number"))
1862 repo.changelog.node(l) # check that the rev exists
1862 repo.changelog.node(l) # check that the rev exists
1863 return subset & baseset([l])
1863 return subset & baseset([l])
1864
1864
1865 @predicate('revset(set)', safe=True, takeorder=True)
1865 @predicate('revset(set)', safe=True, takeorder=True)
1866 def revsetpredicate(repo, subset, x, order):
1866 def revsetpredicate(repo, subset, x, order):
1867 """Strictly interpret the content as a revset.
1867 """Strictly interpret the content as a revset.
1868
1868
1869 The content of this special predicate will be strictly interpreted as a
1869 The content of this special predicate will be strictly interpreted as a
1870 revset. For example, ``revset(id(0))`` will be interpreted as "id(0)"
1870 revset. For example, ``revset(id(0))`` will be interpreted as "id(0)"
1871 without possible ambiguity with a "id(0)" bookmark or tag.
1871 without possible ambiguity with a "id(0)" bookmark or tag.
1872 """
1872 """
1873 return getset(repo, subset, x, order)
1873 return getset(repo, subset, x, order)
1874
1874
1875 @predicate('matching(revision [, field])', safe=True)
1875 @predicate('matching(revision [, field])', safe=True)
1876 def matching(repo, subset, x):
1876 def matching(repo, subset, x):
1877 """Changesets in which a given set of fields match the set of fields in the
1877 """Changesets in which a given set of fields match the set of fields in the
1878 selected revision or set.
1878 selected revision or set.
1879
1879
1880 To match more than one field pass the list of fields to match separated
1880 To match more than one field pass the list of fields to match separated
1881 by spaces (e.g. ``author description``).
1881 by spaces (e.g. ``author description``).
1882
1882
1883 Valid fields are most regular revision fields and some special fields.
1883 Valid fields are most regular revision fields and some special fields.
1884
1884
1885 Regular revision fields are ``description``, ``author``, ``branch``,
1885 Regular revision fields are ``description``, ``author``, ``branch``,
1886 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1886 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1887 and ``diff``.
1887 and ``diff``.
1888 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1888 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1889 contents of the revision. Two revisions matching their ``diff`` will
1889 contents of the revision. Two revisions matching their ``diff`` will
1890 also match their ``files``.
1890 also match their ``files``.
1891
1891
1892 Special fields are ``summary`` and ``metadata``:
1892 Special fields are ``summary`` and ``metadata``:
1893 ``summary`` matches the first line of the description.
1893 ``summary`` matches the first line of the description.
1894 ``metadata`` is equivalent to matching ``description user date``
1894 ``metadata`` is equivalent to matching ``description user date``
1895 (i.e. it matches the main metadata fields).
1895 (i.e. it matches the main metadata fields).
1896
1896
1897 ``metadata`` is the default field which is used when no fields are
1897 ``metadata`` is the default field which is used when no fields are
1898 specified. You can match more than one field at a time.
1898 specified. You can match more than one field at a time.
1899 """
1899 """
1900 # i18n: "matching" is a keyword
1900 # i18n: "matching" is a keyword
1901 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1901 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1902
1902
1903 revs = getset(repo, fullreposet(repo), l[0])
1903 revs = getset(repo, fullreposet(repo), l[0])
1904
1904
1905 fieldlist = ['metadata']
1905 fieldlist = ['metadata']
1906 if len(l) > 1:
1906 if len(l) > 1:
1907 fieldlist = getstring(l[1],
1907 fieldlist = getstring(l[1],
1908 # i18n: "matching" is a keyword
1908 # i18n: "matching" is a keyword
1909 _("matching requires a string "
1909 _("matching requires a string "
1910 "as its second argument")).split()
1910 "as its second argument")).split()
1911
1911
1912 # Make sure that there are no repeated fields,
1912 # Make sure that there are no repeated fields,
1913 # expand the 'special' 'metadata' field type
1913 # expand the 'special' 'metadata' field type
1914 # and check the 'files' whenever we check the 'diff'
1914 # and check the 'files' whenever we check the 'diff'
1915 fields = []
1915 fields = []
1916 for field in fieldlist:
1916 for field in fieldlist:
1917 if field == 'metadata':
1917 if field == 'metadata':
1918 fields += ['user', 'description', 'date']
1918 fields += ['user', 'description', 'date']
1919 elif field == 'diff':
1919 elif field == 'diff':
1920 # a revision matching the diff must also match the files
1920 # a revision matching the diff must also match the files
1921 # since matching the diff is very costly, make sure to
1921 # since matching the diff is very costly, make sure to
1922 # also match the files first
1922 # also match the files first
1923 fields += ['files', 'diff']
1923 fields += ['files', 'diff']
1924 else:
1924 else:
1925 if field == 'author':
1925 if field == 'author':
1926 field = 'user'
1926 field = 'user'
1927 fields.append(field)
1927 fields.append(field)
1928 fields = set(fields)
1928 fields = set(fields)
1929 if 'summary' in fields and 'description' in fields:
1929 if 'summary' in fields and 'description' in fields:
1930 # If a revision matches its description it also matches its summary
1930 # If a revision matches its description it also matches its summary
1931 fields.discard('summary')
1931 fields.discard('summary')
1932
1932
1933 # We may want to match more than one field
1933 # We may want to match more than one field
1934 # Not all fields take the same amount of time to be matched
1934 # Not all fields take the same amount of time to be matched
1935 # Sort the selected fields in order of increasing matching cost
1935 # Sort the selected fields in order of increasing matching cost
1936 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1936 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1937 'files', 'description', 'substate', 'diff']
1937 'files', 'description', 'substate', 'diff']
1938 def fieldkeyfunc(f):
1938 def fieldkeyfunc(f):
1939 try:
1939 try:
1940 return fieldorder.index(f)
1940 return fieldorder.index(f)
1941 except ValueError:
1941 except ValueError:
1942 # assume an unknown field is very costly
1942 # assume an unknown field is very costly
1943 return len(fieldorder)
1943 return len(fieldorder)
1944 fields = list(fields)
1944 fields = list(fields)
1945 fields.sort(key=fieldkeyfunc)
1945 fields.sort(key=fieldkeyfunc)
1946
1946
1947 # Each field will be matched with its own "getfield" function
1947 # Each field will be matched with its own "getfield" function
1948 # which will be added to the getfieldfuncs array of functions
1948 # which will be added to the getfieldfuncs array of functions
1949 getfieldfuncs = []
1949 getfieldfuncs = []
1950 _funcs = {
1950 _funcs = {
1951 'user': lambda r: repo[r].user(),
1951 'user': lambda r: repo[r].user(),
1952 'branch': lambda r: repo[r].branch(),
1952 'branch': lambda r: repo[r].branch(),
1953 'date': lambda r: repo[r].date(),
1953 'date': lambda r: repo[r].date(),
1954 'description': lambda r: repo[r].description(),
1954 'description': lambda r: repo[r].description(),
1955 'files': lambda r: repo[r].files(),
1955 'files': lambda r: repo[r].files(),
1956 'parents': lambda r: repo[r].parents(),
1956 'parents': lambda r: repo[r].parents(),
1957 'phase': lambda r: repo[r].phase(),
1957 'phase': lambda r: repo[r].phase(),
1958 'substate': lambda r: repo[r].substate,
1958 'substate': lambda r: repo[r].substate,
1959 'summary': lambda r: repo[r].description().splitlines()[0],
1959 'summary': lambda r: repo[r].description().splitlines()[0],
1960 'diff': lambda r: list(repo[r].diff(
1960 'diff': lambda r: list(repo[r].diff(
1961 opts=diffutil.diffallopts(repo.ui, {'git': True}))),
1961 opts=diffutil.diffallopts(repo.ui, {'git': True}))),
1962 }
1962 }
1963 for info in fields:
1963 for info in fields:
1964 getfield = _funcs.get(info, None)
1964 getfield = _funcs.get(info, None)
1965 if getfield is None:
1965 if getfield is None:
1966 raise error.ParseError(
1966 raise error.ParseError(
1967 # i18n: "matching" is a keyword
1967 # i18n: "matching" is a keyword
1968 _("unexpected field name passed to matching: %s") % info)
1968 _("unexpected field name passed to matching: %s") % info)
1969 getfieldfuncs.append(getfield)
1969 getfieldfuncs.append(getfield)
1970 # convert the getfield array of functions into a "getinfo" function
1970 # convert the getfield array of functions into a "getinfo" function
1971 # which returns an array of field values (or a single value if there
1971 # which returns an array of field values (or a single value if there
1972 # is only one field to match)
1972 # is only one field to match)
1973 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1973 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1974
1974
1975 def matches(x):
1975 def matches(x):
1976 for rev in revs:
1976 for rev in revs:
1977 target = getinfo(rev)
1977 target = getinfo(rev)
1978 match = True
1978 match = True
1979 for n, f in enumerate(getfieldfuncs):
1979 for n, f in enumerate(getfieldfuncs):
1980 if target[n] != f(x):
1980 if target[n] != f(x):
1981 match = False
1981 match = False
1982 if match:
1982 if match:
1983 return True
1983 return True
1984 return False
1984 return False
1985
1985
1986 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1986 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1987
1987
1988 @predicate('reverse(set)', safe=True, takeorder=True, weight=0)
1988 @predicate('reverse(set)', safe=True, takeorder=True, weight=0)
1989 def reverse(repo, subset, x, order):
1989 def reverse(repo, subset, x, order):
1990 """Reverse order of set.
1990 """Reverse order of set.
1991 """
1991 """
1992 l = getset(repo, subset, x, order)
1992 l = getset(repo, subset, x, order)
1993 if order == defineorder:
1993 if order == defineorder:
1994 l.reverse()
1994 l.reverse()
1995 return l
1995 return l
1996
1996
1997 @predicate('roots(set)', safe=True)
1997 @predicate('roots(set)', safe=True)
1998 def roots(repo, subset, x):
1998 def roots(repo, subset, x):
1999 """Changesets in set with no parent changeset in set.
1999 """Changesets in set with no parent changeset in set.
2000 """
2000 """
2001 s = getset(repo, fullreposet(repo), x)
2001 s = getset(repo, fullreposet(repo), x)
2002 parents = repo.changelog.parentrevs
2002 parents = repo.changelog.parentrevs
2003 def filter(r):
2003 def filter(r):
2004 for p in parents(r):
2004 for p in parents(r):
2005 if 0 <= p and p in s:
2005 if 0 <= p and p in s:
2006 return False
2006 return False
2007 return True
2007 return True
2008 return subset & s.filter(filter, condrepr='<roots>')
2008 return subset & s.filter(filter, condrepr='<roots>')
2009
2009
2010 _sortkeyfuncs = {
2010 _sortkeyfuncs = {
2011 'rev': lambda c: c.rev(),
2011 'rev': lambda c: c.rev(),
2012 'branch': lambda c: c.branch(),
2012 'branch': lambda c: c.branch(),
2013 'desc': lambda c: c.description(),
2013 'desc': lambda c: c.description(),
2014 'user': lambda c: c.user(),
2014 'user': lambda c: c.user(),
2015 'author': lambda c: c.user(),
2015 'author': lambda c: c.user(),
2016 'date': lambda c: c.date()[0],
2016 'date': lambda c: c.date()[0],
2017 }
2017 }
2018
2018
2019 def _getsortargs(x):
2019 def _getsortargs(x):
2020 """Parse sort options into (set, [(key, reverse)], opts)"""
2020 """Parse sort options into (set, [(key, reverse)], opts)"""
2021 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
2021 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
2022 if 'set' not in args:
2022 if 'set' not in args:
2023 # i18n: "sort" is a keyword
2023 # i18n: "sort" is a keyword
2024 raise error.ParseError(_('sort requires one or two arguments'))
2024 raise error.ParseError(_('sort requires one or two arguments'))
2025 keys = "rev"
2025 keys = "rev"
2026 if 'keys' in args:
2026 if 'keys' in args:
2027 # i18n: "sort" is a keyword
2027 # i18n: "sort" is a keyword
2028 keys = getstring(args['keys'], _("sort spec must be a string"))
2028 keys = getstring(args['keys'], _("sort spec must be a string"))
2029
2029
2030 keyflags = []
2030 keyflags = []
2031 for k in keys.split():
2031 for k in keys.split():
2032 fk = k
2032 fk = k
2033 reverse = (k.startswith('-'))
2033 reverse = (k.startswith('-'))
2034 if reverse:
2034 if reverse:
2035 k = k[1:]
2035 k = k[1:]
2036 if k not in _sortkeyfuncs and k != 'topo':
2036 if k not in _sortkeyfuncs and k != 'topo':
2037 raise error.ParseError(
2037 raise error.ParseError(
2038 _("unknown sort key %r") % pycompat.bytestr(fk))
2038 _("unknown sort key %r") % pycompat.bytestr(fk))
2039 keyflags.append((k, reverse))
2039 keyflags.append((k, reverse))
2040
2040
2041 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
2041 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
2042 # i18n: "topo" is a keyword
2042 # i18n: "topo" is a keyword
2043 raise error.ParseError(_('topo sort order cannot be combined '
2043 raise error.ParseError(_('topo sort order cannot be combined '
2044 'with other sort keys'))
2044 'with other sort keys'))
2045
2045
2046 opts = {}
2046 opts = {}
2047 if 'topo.firstbranch' in args:
2047 if 'topo.firstbranch' in args:
2048 if any(k == 'topo' for k, reverse in keyflags):
2048 if any(k == 'topo' for k, reverse in keyflags):
2049 opts['topo.firstbranch'] = args['topo.firstbranch']
2049 opts['topo.firstbranch'] = args['topo.firstbranch']
2050 else:
2050 else:
2051 # i18n: "topo" and "topo.firstbranch" are keywords
2051 # i18n: "topo" and "topo.firstbranch" are keywords
2052 raise error.ParseError(_('topo.firstbranch can only be used '
2052 raise error.ParseError(_('topo.firstbranch can only be used '
2053 'when using the topo sort key'))
2053 'when using the topo sort key'))
2054
2054
2055 return args['set'], keyflags, opts
2055 return args['set'], keyflags, opts
2056
2056
2057 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True,
2057 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True,
2058 weight=10)
2058 weight=10)
2059 def sort(repo, subset, x, order):
2059 def sort(repo, subset, x, order):
2060 """Sort set by keys. The default sort order is ascending, specify a key
2060 """Sort set by keys. The default sort order is ascending, specify a key
2061 as ``-key`` to sort in descending order.
2061 as ``-key`` to sort in descending order.
2062
2062
2063 The keys can be:
2063 The keys can be:
2064
2064
2065 - ``rev`` for the revision number,
2065 - ``rev`` for the revision number,
2066 - ``branch`` for the branch name,
2066 - ``branch`` for the branch name,
2067 - ``desc`` for the commit message (description),
2067 - ``desc`` for the commit message (description),
2068 - ``user`` for user name (``author`` can be used as an alias),
2068 - ``user`` for user name (``author`` can be used as an alias),
2069 - ``date`` for the commit date
2069 - ``date`` for the commit date
2070 - ``topo`` for a reverse topographical sort
2070 - ``topo`` for a reverse topographical sort
2071
2071
2072 The ``topo`` sort order cannot be combined with other sort keys. This sort
2072 The ``topo`` sort order cannot be combined with other sort keys. This sort
2073 takes one optional argument, ``topo.firstbranch``, which takes a revset that
2073 takes one optional argument, ``topo.firstbranch``, which takes a revset that
2074 specifies what topographical branches to prioritize in the sort.
2074 specifies what topographical branches to prioritize in the sort.
2075
2075
2076 """
2076 """
2077 s, keyflags, opts = _getsortargs(x)
2077 s, keyflags, opts = _getsortargs(x)
2078 revs = getset(repo, subset, s, order)
2078 revs = getset(repo, subset, s, order)
2079
2079
2080 if not keyflags or order != defineorder:
2080 if not keyflags or order != defineorder:
2081 return revs
2081 return revs
2082 if len(keyflags) == 1 and keyflags[0][0] == "rev":
2082 if len(keyflags) == 1 and keyflags[0][0] == "rev":
2083 revs.sort(reverse=keyflags[0][1])
2083 revs.sort(reverse=keyflags[0][1])
2084 return revs
2084 return revs
2085 elif keyflags[0][0] == "topo":
2085 elif keyflags[0][0] == "topo":
2086 firstbranch = ()
2086 firstbranch = ()
2087 if 'topo.firstbranch' in opts:
2087 if 'topo.firstbranch' in opts:
2088 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
2088 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
2089 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
2089 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
2090 firstbranch),
2090 firstbranch),
2091 istopo=True)
2091 istopo=True)
2092 if keyflags[0][1]:
2092 if keyflags[0][1]:
2093 revs.reverse()
2093 revs.reverse()
2094 return revs
2094 return revs
2095
2095
2096 # sort() is guaranteed to be stable
2096 # sort() is guaranteed to be stable
2097 ctxs = [repo[r] for r in revs]
2097 ctxs = [repo[r] for r in revs]
2098 for k, reverse in reversed(keyflags):
2098 for k, reverse in reversed(keyflags):
2099 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
2099 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
2100 return baseset([c.rev() for c in ctxs])
2100 return baseset([c.rev() for c in ctxs])
2101
2101
2102 @predicate('subrepo([pattern])')
2102 @predicate('subrepo([pattern])')
2103 def subrepo(repo, subset, x):
2103 def subrepo(repo, subset, x):
2104 """Changesets that add, modify or remove the given subrepo. If no subrepo
2104 """Changesets that add, modify or remove the given subrepo. If no subrepo
2105 pattern is named, any subrepo changes are returned.
2105 pattern is named, any subrepo changes are returned.
2106 """
2106 """
2107 # i18n: "subrepo" is a keyword
2107 # i18n: "subrepo" is a keyword
2108 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2108 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2109 pat = None
2109 pat = None
2110 if len(args) != 0:
2110 if len(args) != 0:
2111 pat = getstring(args[0], _("subrepo requires a pattern"))
2111 pat = getstring(args[0], _("subrepo requires a pattern"))
2112
2112
2113 m = matchmod.exact(['.hgsubstate'])
2113 m = matchmod.exact(['.hgsubstate'])
2114
2114
2115 def submatches(names):
2115 def submatches(names):
2116 k, p, m = stringutil.stringmatcher(pat)
2116 k, p, m = stringutil.stringmatcher(pat)
2117 for name in names:
2117 for name in names:
2118 if m(name):
2118 if m(name):
2119 yield name
2119 yield name
2120
2120
2121 def matches(x):
2121 def matches(x):
2122 c = repo[x]
2122 c = repo[x]
2123 s = repo.status(c.p1().node(), c.node(), match=m)
2123 s = repo.status(c.p1().node(), c.node(), match=m)
2124
2124
2125 if pat is None:
2125 if pat is None:
2126 return s.added or s.modified or s.removed
2126 return s.added or s.modified or s.removed
2127
2127
2128 if s.added:
2128 if s.added:
2129 return any(submatches(c.substate.keys()))
2129 return any(submatches(c.substate.keys()))
2130
2130
2131 if s.modified:
2131 if s.modified:
2132 subs = set(c.p1().substate.keys())
2132 subs = set(c.p1().substate.keys())
2133 subs.update(c.substate.keys())
2133 subs.update(c.substate.keys())
2134
2134
2135 for path in submatches(subs):
2135 for path in submatches(subs):
2136 if c.p1().substate.get(path) != c.substate.get(path):
2136 if c.p1().substate.get(path) != c.substate.get(path):
2137 return True
2137 return True
2138
2138
2139 if s.removed:
2139 if s.removed:
2140 return any(submatches(c.p1().substate.keys()))
2140 return any(submatches(c.p1().substate.keys()))
2141
2141
2142 return False
2142 return False
2143
2143
2144 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2144 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2145
2145
2146 def _mapbynodefunc(repo, s, f):
2146 def _mapbynodefunc(repo, s, f):
2147 """(repo, smartset, [node] -> [node]) -> smartset
2147 """(repo, smartset, [node] -> [node]) -> smartset
2148
2148
2149 Helper method to map a smartset to another smartset given a function only
2149 Helper method to map a smartset to another smartset given a function only
2150 talking about nodes. Handles converting between rev numbers and nodes, and
2150 talking about nodes. Handles converting between rev numbers and nodes, and
2151 filtering.
2151 filtering.
2152 """
2152 """
2153 cl = repo.unfiltered().changelog
2153 cl = repo.unfiltered().changelog
2154 torev = cl.rev
2154 torev = cl.rev
2155 tonode = cl.node
2155 tonode = cl.node
2156 nodemap = cl.nodemap
2156 nodemap = cl.nodemap
2157 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
2157 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
2158 return smartset.baseset(result - repo.changelog.filteredrevs)
2158 return smartset.baseset(result - repo.changelog.filteredrevs)
2159
2159
2160 @predicate('successors(set)', safe=True)
2160 @predicate('successors(set)', safe=True)
2161 def successors(repo, subset, x):
2161 def successors(repo, subset, x):
2162 """All successors for set, including the given set themselves"""
2162 """All successors for set, including the given set themselves"""
2163 s = getset(repo, fullreposet(repo), x)
2163 s = getset(repo, fullreposet(repo), x)
2164 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
2164 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
2165 d = _mapbynodefunc(repo, s, f)
2165 d = _mapbynodefunc(repo, s, f)
2166 return subset & d
2166 return subset & d
2167
2167
2168 def _substringmatcher(pattern, casesensitive=True):
2168 def _substringmatcher(pattern, casesensitive=True):
2169 kind, pattern, matcher = stringutil.stringmatcher(
2169 kind, pattern, matcher = stringutil.stringmatcher(
2170 pattern, casesensitive=casesensitive)
2170 pattern, casesensitive=casesensitive)
2171 if kind == 'literal':
2171 if kind == 'literal':
2172 if not casesensitive:
2172 if not casesensitive:
2173 pattern = encoding.lower(pattern)
2173 pattern = encoding.lower(pattern)
2174 matcher = lambda s: pattern in encoding.lower(s)
2174 matcher = lambda s: pattern in encoding.lower(s)
2175 else:
2175 else:
2176 matcher = lambda s: pattern in s
2176 matcher = lambda s: pattern in s
2177 return kind, pattern, matcher
2177 return kind, pattern, matcher
2178
2178
2179 @predicate('tag([name])', safe=True)
2179 @predicate('tag([name])', safe=True)
2180 def tag(repo, subset, x):
2180 def tag(repo, subset, x):
2181 """The specified tag by name, or all tagged revisions if no name is given.
2181 """The specified tag by name, or all tagged revisions if no name is given.
2182
2182
2183 Pattern matching is supported for `name`. See
2183 Pattern matching is supported for `name`. See
2184 :hg:`help revisions.patterns`.
2184 :hg:`help revisions.patterns`.
2185 """
2185 """
2186 # i18n: "tag" is a keyword
2186 # i18n: "tag" is a keyword
2187 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2187 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2188 cl = repo.changelog
2188 cl = repo.changelog
2189 if args:
2189 if args:
2190 pattern = getstring(args[0],
2190 pattern = getstring(args[0],
2191 # i18n: "tag" is a keyword
2191 # i18n: "tag" is a keyword
2192 _('the argument to tag must be a string'))
2192 _('the argument to tag must be a string'))
2193 kind, pattern, matcher = stringutil.stringmatcher(pattern)
2193 kind, pattern, matcher = stringutil.stringmatcher(pattern)
2194 if kind == 'literal':
2194 if kind == 'literal':
2195 # avoid resolving all tags
2195 # avoid resolving all tags
2196 tn = repo._tagscache.tags.get(pattern, None)
2196 tn = repo._tagscache.tags.get(pattern, None)
2197 if tn is None:
2197 if tn is None:
2198 raise error.RepoLookupError(_("tag '%s' does not exist")
2198 raise error.RepoLookupError(_("tag '%s' does not exist")
2199 % pattern)
2199 % pattern)
2200 s = {repo[tn].rev()}
2200 s = {repo[tn].rev()}
2201 else:
2201 else:
2202 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2202 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2203 else:
2203 else:
2204 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
2204 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
2205 return subset & s
2205 return subset & s
2206
2206
2207 @predicate('tagged', safe=True)
2207 @predicate('tagged', safe=True)
2208 def tagged(repo, subset, x):
2208 def tagged(repo, subset, x):
2209 return tag(repo, subset, x)
2209 return tag(repo, subset, x)
2210
2210
2211 @predicate('orphan()', safe=True)
2211 @predicate('orphan()', safe=True)
2212 def orphan(repo, subset, x):
2212 def orphan(repo, subset, x):
2213 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2213 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2214 """
2214 """
2215 # i18n: "orphan" is a keyword
2215 # i18n: "orphan" is a keyword
2216 getargs(x, 0, 0, _("orphan takes no arguments"))
2216 getargs(x, 0, 0, _("orphan takes no arguments"))
2217 orphan = obsmod.getrevs(repo, 'orphan')
2217 orphan = obsmod.getrevs(repo, 'orphan')
2218 return subset & orphan
2218 return subset & orphan
2219
2219
2220
2220
2221 @predicate('user(string)', safe=True, weight=10)
2221 @predicate('user(string)', safe=True, weight=10)
2222 def user(repo, subset, x):
2222 def user(repo, subset, x):
2223 """User name contains string. The match is case-insensitive.
2223 """User name contains string. The match is case-insensitive.
2224
2224
2225 Pattern matching is supported for `string`. See
2225 Pattern matching is supported for `string`. See
2226 :hg:`help revisions.patterns`.
2226 :hg:`help revisions.patterns`.
2227 """
2227 """
2228 return author(repo, subset, x)
2228 return author(repo, subset, x)
2229
2229
2230 @predicate('wdir()', safe=True, weight=0)
2230 @predicate('wdir()', safe=True, weight=0)
2231 def wdir(repo, subset, x):
2231 def wdir(repo, subset, x):
2232 """Working directory. (EXPERIMENTAL)"""
2232 """Working directory. (EXPERIMENTAL)"""
2233 # i18n: "wdir" is a keyword
2233 # i18n: "wdir" is a keyword
2234 getargs(x, 0, 0, _("wdir takes no arguments"))
2234 getargs(x, 0, 0, _("wdir takes no arguments"))
2235 if node.wdirrev in subset or isinstance(subset, fullreposet):
2235 if node.wdirrev in subset or isinstance(subset, fullreposet):
2236 return baseset([node.wdirrev])
2236 return baseset([node.wdirrev])
2237 return baseset()
2237 return baseset()
2238
2238
2239 def _orderedlist(repo, subset, x):
2239 def _orderedlist(repo, subset, x):
2240 s = getstring(x, "internal error")
2240 s = getstring(x, "internal error")
2241 if not s:
2241 if not s:
2242 return baseset()
2242 return baseset()
2243 # remove duplicates here. it's difficult for caller to deduplicate sets
2243 # remove duplicates here. it's difficult for caller to deduplicate sets
2244 # because different symbols can point to the same rev.
2244 # because different symbols can point to the same rev.
2245 cl = repo.changelog
2245 cl = repo.changelog
2246 ls = []
2246 ls = []
2247 seen = set()
2247 seen = set()
2248 for t in s.split('\0'):
2248 for t in s.split('\0'):
2249 try:
2249 try:
2250 # fast path for integer revision
2250 # fast path for integer revision
2251 r = int(t)
2251 r = int(t)
2252 if ('%d' % r) != t or r not in cl:
2252 if ('%d' % r) != t or r not in cl:
2253 raise ValueError
2253 raise ValueError
2254 revs = [r]
2254 revs = [r]
2255 except ValueError:
2255 except ValueError:
2256 revs = stringset(repo, subset, t, defineorder)
2256 revs = stringset(repo, subset, t, defineorder)
2257
2257
2258 for r in revs:
2258 for r in revs:
2259 if r in seen:
2259 if r in seen:
2260 continue
2260 continue
2261 if (r in subset
2261 if (r in subset
2262 or r == node.nullrev and isinstance(subset, fullreposet)):
2262 or r == node.nullrev and isinstance(subset, fullreposet)):
2263 ls.append(r)
2263 ls.append(r)
2264 seen.add(r)
2264 seen.add(r)
2265 return baseset(ls)
2265 return baseset(ls)
2266
2266
2267 # for internal use
2267 # for internal use
2268 @predicate('_list', safe=True, takeorder=True)
2268 @predicate('_list', safe=True, takeorder=True)
2269 def _list(repo, subset, x, order):
2269 def _list(repo, subset, x, order):
2270 if order == followorder:
2270 if order == followorder:
2271 # slow path to take the subset order
2271 # slow path to take the subset order
2272 return subset & _orderedlist(repo, fullreposet(repo), x)
2272 return subset & _orderedlist(repo, fullreposet(repo), x)
2273 else:
2273 else:
2274 return _orderedlist(repo, subset, x)
2274 return _orderedlist(repo, subset, x)
2275
2275
2276 def _orderedintlist(repo, subset, x):
2276 def _orderedintlist(repo, subset, x):
2277 s = getstring(x, "internal error")
2277 s = getstring(x, "internal error")
2278 if not s:
2278 if not s:
2279 return baseset()
2279 return baseset()
2280 ls = [int(r) for r in s.split('\0')]
2280 ls = [int(r) for r in s.split('\0')]
2281 s = subset
2281 s = subset
2282 return baseset([r for r in ls if r in s])
2282 return baseset([r for r in ls if r in s])
2283
2283
2284 # for internal use
2284 # for internal use
2285 @predicate('_intlist', safe=True, takeorder=True, weight=0)
2285 @predicate('_intlist', safe=True, takeorder=True, weight=0)
2286 def _intlist(repo, subset, x, order):
2286 def _intlist(repo, subset, x, order):
2287 if order == followorder:
2287 if order == followorder:
2288 # slow path to take the subset order
2288 # slow path to take the subset order
2289 return subset & _orderedintlist(repo, fullreposet(repo), x)
2289 return subset & _orderedintlist(repo, fullreposet(repo), x)
2290 else:
2290 else:
2291 return _orderedintlist(repo, subset, x)
2291 return _orderedintlist(repo, subset, x)
2292
2292
2293 def _orderedhexlist(repo, subset, x):
2293 def _orderedhexlist(repo, subset, x):
2294 s = getstring(x, "internal error")
2294 s = getstring(x, "internal error")
2295 if not s:
2295 if not s:
2296 return baseset()
2296 return baseset()
2297 cl = repo.changelog
2297 cl = repo.changelog
2298 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2298 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2299 s = subset
2299 s = subset
2300 return baseset([r for r in ls if r in s])
2300 return baseset([r for r in ls if r in s])
2301
2301
2302 # for internal use
2302 # for internal use
2303 @predicate('_hexlist', safe=True, takeorder=True)
2303 @predicate('_hexlist', safe=True, takeorder=True)
2304 def _hexlist(repo, subset, x, order):
2304 def _hexlist(repo, subset, x, order):
2305 if order == followorder:
2305 if order == followorder:
2306 # slow path to take the subset order
2306 # slow path to take the subset order
2307 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2307 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2308 else:
2308 else:
2309 return _orderedhexlist(repo, subset, x)
2309 return _orderedhexlist(repo, subset, x)
2310
2310
2311 methods = {
2311 methods = {
2312 "range": rangeset,
2312 "range": rangeset,
2313 "rangeall": rangeall,
2313 "rangeall": rangeall,
2314 "rangepre": rangepre,
2314 "rangepre": rangepre,
2315 "rangepost": rangepost,
2315 "rangepost": rangepost,
2316 "dagrange": dagrange,
2316 "dagrange": dagrange,
2317 "string": stringset,
2317 "string": stringset,
2318 "symbol": stringset,
2318 "symbol": stringset,
2319 "and": andset,
2319 "and": andset,
2320 "andsmally": andsmallyset,
2320 "andsmally": andsmallyset,
2321 "or": orset,
2321 "or": orset,
2322 "not": notset,
2322 "not": notset,
2323 "difference": differenceset,
2323 "difference": differenceset,
2324 "relation": relationset,
2324 "relation": relationset,
2325 "relsubscript": relsubscriptset,
2325 "relsubscript": relsubscriptset,
2326 "subscript": subscriptset,
2326 "subscript": subscriptset,
2327 "list": listset,
2327 "list": listset,
2328 "keyvalue": keyvaluepair,
2328 "keyvalue": keyvaluepair,
2329 "func": func,
2329 "func": func,
2330 "ancestor": ancestorspec,
2330 "ancestor": ancestorspec,
2331 "parent": parentspec,
2331 "parent": parentspec,
2332 "parentpost": parentpost,
2332 "parentpost": parentpost,
2333 "smartset": rawsmartset,
2333 "smartset": rawsmartset,
2334 }
2334 }
2335
2335
2336 subscriptrelations = {
2336 subscriptrelations = {
2337 "g": generationsrel,
2337 "g": generationsrel,
2338 "generations": generationsrel,
2338 "generations": generationsrel,
2339 }
2339 }
2340
2340
2341 def lookupfn(repo):
2341 def lookupfn(repo):
2342 return lambda symbol: scmutil.isrevsymbol(repo, symbol)
2342 return lambda symbol: scmutil.isrevsymbol(repo, symbol)
2343
2343
2344 def match(ui, spec, lookup=None):
2344 def match(ui, spec, lookup=None):
2345 """Create a matcher for a single revision spec"""
2345 """Create a matcher for a single revision spec"""
2346 return matchany(ui, [spec], lookup=lookup)
2346 return matchany(ui, [spec], lookup=lookup)
2347
2347
2348 def matchany(ui, specs, lookup=None, localalias=None):
2348 def matchany(ui, specs, lookup=None, localalias=None):
2349 """Create a matcher that will include any revisions matching one of the
2349 """Create a matcher that will include any revisions matching one of the
2350 given specs
2350 given specs
2351
2351
2352 If lookup function is not None, the parser will first attempt to handle
2352 If lookup function is not None, the parser will first attempt to handle
2353 old-style ranges, which may contain operator characters.
2353 old-style ranges, which may contain operator characters.
2354
2354
2355 If localalias is not None, it is a dict {name: definitionstring}. It takes
2355 If localalias is not None, it is a dict {name: definitionstring}. It takes
2356 precedence over [revsetalias] config section.
2356 precedence over [revsetalias] config section.
2357 """
2357 """
2358 if not specs:
2358 if not specs:
2359 def mfunc(repo, subset=None):
2359 def mfunc(repo, subset=None):
2360 return baseset()
2360 return baseset()
2361 return mfunc
2361 return mfunc
2362 if not all(specs):
2362 if not all(specs):
2363 raise error.ParseError(_("empty query"))
2363 raise error.ParseError(_("empty query"))
2364 if len(specs) == 1:
2364 if len(specs) == 1:
2365 tree = revsetlang.parse(specs[0], lookup)
2365 tree = revsetlang.parse(specs[0], lookup)
2366 else:
2366 else:
2367 tree = ('or',
2367 tree = ('or',
2368 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2368 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2369
2369
2370 aliases = []
2370 aliases = []
2371 warn = None
2371 warn = None
2372 if ui:
2372 if ui:
2373 aliases.extend(ui.configitems('revsetalias'))
2373 aliases.extend(ui.configitems('revsetalias'))
2374 warn = ui.warn
2374 warn = ui.warn
2375 if localalias:
2375 if localalias:
2376 aliases.extend(localalias.items())
2376 aliases.extend(localalias.items())
2377 if aliases:
2377 if aliases:
2378 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2378 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2379 tree = revsetlang.foldconcat(tree)
2379 tree = revsetlang.foldconcat(tree)
2380 tree = revsetlang.analyze(tree)
2380 tree = revsetlang.analyze(tree)
2381 tree = revsetlang.optimize(tree)
2381 tree = revsetlang.optimize(tree)
2382 return makematcher(tree)
2382 return makematcher(tree)
2383
2383
2384 def makematcher(tree):
2384 def makematcher(tree):
2385 """Create a matcher from an evaluatable tree"""
2385 """Create a matcher from an evaluatable tree"""
2386 def mfunc(repo, subset=None, order=None):
2386 def mfunc(repo, subset=None, order=None):
2387 if order is None:
2387 if order is None:
2388 if subset is None:
2388 if subset is None:
2389 order = defineorder # 'x'
2389 order = defineorder # 'x'
2390 else:
2390 else:
2391 order = followorder # 'subset & x'
2391 order = followorder # 'subset & x'
2392 if subset is None:
2392 if subset is None:
2393 subset = fullreposet(repo)
2393 subset = fullreposet(repo)
2394 return getset(repo, subset, tree, order)
2394 return getset(repo, subset, tree, order)
2395 return mfunc
2395 return mfunc
2396
2396
2397 def loadpredicate(ui, extname, registrarobj):
2397 def loadpredicate(ui, extname, registrarobj):
2398 """Load revset predicates from specified registrarobj
2398 """Load revset predicates from specified registrarobj
2399 """
2399 """
2400 for name, func in registrarobj._table.iteritems():
2400 for name, func in registrarobj._table.iteritems():
2401 symbols[name] = func
2401 symbols[name] = func
2402 if func._safe:
2402 if func._safe:
2403 safesymbols.add(name)
2403 safesymbols.add(name)
2404
2404
2405 # load built-in predicates explicitly to setup safesymbols
2405 # load built-in predicates explicitly to setup safesymbols
2406 loadpredicate(None, None, predicate)
2406 loadpredicate(None, None, predicate)
2407
2407
2408 # tell hggettext to extract docstrings from these functions:
2408 # tell hggettext to extract docstrings from these functions:
2409 i18nfunctions = symbols.values()
2409 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now