##// END OF EJS Templates
py3: source-transform only call-sites of iteritems(), not definitions...
Martin von Zweigbergk -
r42809:3018749a default
parent child Browse files
Show More
@@ -1,403 +1,405 b''
1 # remotenames.py - extension to display remotenames
1 # remotenames.py - extension to display remotenames
2 #
2 #
3 # Copyright 2017 Augie Fackler <raf@durin42.com>
3 # Copyright 2017 Augie Fackler <raf@durin42.com>
4 # Copyright 2017 Sean Farley <sean@farley.io>
4 # Copyright 2017 Sean Farley <sean@farley.io>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """ showing remotebookmarks and remotebranches in UI (EXPERIMENTAL)
9 """ showing remotebookmarks and remotebranches in UI (EXPERIMENTAL)
10
10
11 By default both remotebookmarks and remotebranches are turned on. Config knob to
11 By default both remotebookmarks and remotebranches are turned on. Config knob to
12 control the individually are as follows.
12 control the individually are as follows.
13
13
14 Config options to tweak the default behaviour:
14 Config options to tweak the default behaviour:
15
15
16 remotenames.bookmarks
16 remotenames.bookmarks
17 Boolean value to enable or disable showing of remotebookmarks (default: True)
17 Boolean value to enable or disable showing of remotebookmarks (default: True)
18
18
19 remotenames.branches
19 remotenames.branches
20 Boolean value to enable or disable showing of remotebranches (default: True)
20 Boolean value to enable or disable showing of remotebranches (default: True)
21
21
22 remotenames.hoistedpeer
22 remotenames.hoistedpeer
23 Name of the peer whose remotebookmarks should be hoisted into the top-level
23 Name of the peer whose remotebookmarks should be hoisted into the top-level
24 namespace (default: 'default')
24 namespace (default: 'default')
25 """
25 """
26
26
27 from __future__ import absolute_import
27 from __future__ import absolute_import
28
28
29 from mercurial.i18n import _
29 from mercurial.i18n import _
30
30
31 from mercurial.node import (
31 from mercurial.node import (
32 bin,
32 bin,
33 )
33 )
34 from mercurial import (
34 from mercurial import (
35 bookmarks,
35 bookmarks,
36 error,
36 error,
37 extensions,
37 extensions,
38 logexchange,
38 logexchange,
39 namespaces,
39 namespaces,
40 pycompat,
40 pycompat,
41 registrar,
41 registrar,
42 revsetlang,
42 revsetlang,
43 smartset,
43 smartset,
44 templateutil,
44 templateutil,
45 util,
45 util,
46 )
46 )
47
47
48 from mercurial.utils import (
48 from mercurial.utils import (
49 stringutil,
49 stringutil,
50 )
50 )
51
51
52 if pycompat.ispy3:
52 if pycompat.ispy3:
53 import collections.abc
53 import collections.abc
54 mutablemapping = collections.abc.MutableMapping
54 mutablemapping = collections.abc.MutableMapping
55 else:
55 else:
56 import collections
56 import collections
57 mutablemapping = collections.MutableMapping
57 mutablemapping = collections.MutableMapping
58
58
59 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
59 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
60 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
60 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
61 # be specifying the version(s) of Mercurial they are tested with, or
61 # be specifying the version(s) of Mercurial they are tested with, or
62 # leave the attribute unspecified.
62 # leave the attribute unspecified.
63 testedwith = 'ships-with-hg-core'
63 testedwith = 'ships-with-hg-core'
64
64
65 configtable = {}
65 configtable = {}
66 configitem = registrar.configitem(configtable)
66 configitem = registrar.configitem(configtable)
67 templatekeyword = registrar.templatekeyword()
67 templatekeyword = registrar.templatekeyword()
68 revsetpredicate = registrar.revsetpredicate()
68 revsetpredicate = registrar.revsetpredicate()
69
69
70 configitem('remotenames', 'bookmarks',
70 configitem('remotenames', 'bookmarks',
71 default=True,
71 default=True,
72 )
72 )
73 configitem('remotenames', 'branches',
73 configitem('remotenames', 'branches',
74 default=True,
74 default=True,
75 )
75 )
76 configitem('remotenames', 'hoistedpeer',
76 configitem('remotenames', 'hoistedpeer',
77 default='default',
77 default='default',
78 )
78 )
79
79
80 class lazyremotenamedict(mutablemapping):
80 class lazyremotenamedict(mutablemapping):
81 """
81 """
82 Read-only dict-like Class to lazily resolve remotename entries
82 Read-only dict-like Class to lazily resolve remotename entries
83
83
84 We are doing that because remotenames startup was slow.
84 We are doing that because remotenames startup was slow.
85 We lazily read the remotenames file once to figure out the potential entries
85 We lazily read the remotenames file once to figure out the potential entries
86 and store them in self.potentialentries. Then when asked to resolve an
86 and store them in self.potentialentries. Then when asked to resolve an
87 entry, if it is not in self.potentialentries, then it isn't there, if it
87 entry, if it is not in self.potentialentries, then it isn't there, if it
88 is in self.potentialentries we resolve it and store the result in
88 is in self.potentialentries we resolve it and store the result in
89 self.cache. We cannot be lazy is when asked all the entries (keys).
89 self.cache. We cannot be lazy is when asked all the entries (keys).
90 """
90 """
91 def __init__(self, kind, repo):
91 def __init__(self, kind, repo):
92 self.cache = {}
92 self.cache = {}
93 self.potentialentries = {}
93 self.potentialentries = {}
94 self._kind = kind # bookmarks or branches
94 self._kind = kind # bookmarks or branches
95 self._repo = repo
95 self._repo = repo
96 self.loaded = False
96 self.loaded = False
97
97
98 def _load(self):
98 def _load(self):
99 """ Read the remotenames file, store entries matching selected kind """
99 """ Read the remotenames file, store entries matching selected kind """
100 self.loaded = True
100 self.loaded = True
101 repo = self._repo
101 repo = self._repo
102 for node, rpath, rname in logexchange.readremotenamefile(repo,
102 for node, rpath, rname in logexchange.readremotenamefile(repo,
103 self._kind):
103 self._kind):
104 name = rpath + '/' + rname
104 name = rpath + '/' + rname
105 self.potentialentries[name] = (node, rpath, name)
105 self.potentialentries[name] = (node, rpath, name)
106
106
107 def _resolvedata(self, potentialentry):
107 def _resolvedata(self, potentialentry):
108 """ Check that the node for potentialentry exists and return it """
108 """ Check that the node for potentialentry exists and return it """
109 if not potentialentry in self.potentialentries:
109 if not potentialentry in self.potentialentries:
110 return None
110 return None
111 node, remote, name = self.potentialentries[potentialentry]
111 node, remote, name = self.potentialentries[potentialentry]
112 repo = self._repo
112 repo = self._repo
113 binnode = bin(node)
113 binnode = bin(node)
114 # if the node doesn't exist, skip it
114 # if the node doesn't exist, skip it
115 try:
115 try:
116 repo.changelog.rev(binnode)
116 repo.changelog.rev(binnode)
117 except LookupError:
117 except LookupError:
118 return None
118 return None
119 # Skip closed branches
119 # Skip closed branches
120 if (self._kind == 'branches' and repo[binnode].closesbranch()):
120 if (self._kind == 'branches' and repo[binnode].closesbranch()):
121 return None
121 return None
122 return [binnode]
122 return [binnode]
123
123
124 def __getitem__(self, key):
124 def __getitem__(self, key):
125 if not self.loaded:
125 if not self.loaded:
126 self._load()
126 self._load()
127 val = self._fetchandcache(key)
127 val = self._fetchandcache(key)
128 if val is not None:
128 if val is not None:
129 return val
129 return val
130 else:
130 else:
131 raise KeyError()
131 raise KeyError()
132
132
133 def __iter__(self):
133 def __iter__(self):
134 return iter(self.potentialentries)
134 return iter(self.potentialentries)
135
135
136 def __len__(self):
136 def __len__(self):
137 return len(self.potentialentries)
137 return len(self.potentialentries)
138
138
139 def __setitem__(self):
139 def __setitem__(self):
140 raise NotImplementedError
140 raise NotImplementedError
141
141
142 def __delitem__(self):
142 def __delitem__(self):
143 raise NotImplementedError
143 raise NotImplementedError
144
144
145 def _fetchandcache(self, key):
145 def _fetchandcache(self, key):
146 if key in self.cache:
146 if key in self.cache:
147 return self.cache[key]
147 return self.cache[key]
148 val = self._resolvedata(key)
148 val = self._resolvedata(key)
149 if val is not None:
149 if val is not None:
150 self.cache[key] = val
150 self.cache[key] = val
151 return val
151 return val
152 else:
152 else:
153 return None
153 return None
154
154
155 def keys(self):
155 def keys(self):
156 """ Get a list of bookmark or branch names """
156 """ Get a list of bookmark or branch names """
157 if not self.loaded:
157 if not self.loaded:
158 self._load()
158 self._load()
159 return self.potentialentries.keys()
159 return self.potentialentries.keys()
160
160
161 def iteritems(self):
161 def iteritems(self):
162 """ Iterate over (name, node) tuples """
162 """ Iterate over (name, node) tuples """
163
163
164 if not self.loaded:
164 if not self.loaded:
165 self._load()
165 self._load()
166
166
167 for k, vtup in self.potentialentries.iteritems():
167 for k, vtup in self.potentialentries.iteritems():
168 yield (k, [bin(vtup[0])])
168 yield (k, [bin(vtup[0])])
169
169
170 items = iteritems
171
170 class remotenames(object):
172 class remotenames(object):
171 """
173 """
172 This class encapsulates all the remotenames state. It also contains
174 This class encapsulates all the remotenames state. It also contains
173 methods to access that state in convenient ways. Remotenames are lazy
175 methods to access that state in convenient ways. Remotenames are lazy
174 loaded. Whenever client code needs to ensure the freshest copy of
176 loaded. Whenever client code needs to ensure the freshest copy of
175 remotenames, use the `clearnames` method to force an eventual load.
177 remotenames, use the `clearnames` method to force an eventual load.
176 """
178 """
177
179
178 def __init__(self, repo, *args):
180 def __init__(self, repo, *args):
179 self._repo = repo
181 self._repo = repo
180 self.clearnames()
182 self.clearnames()
181
183
182 def clearnames(self):
184 def clearnames(self):
183 """ Clear all remote names state """
185 """ Clear all remote names state """
184 self.bookmarks = lazyremotenamedict("bookmarks", self._repo)
186 self.bookmarks = lazyremotenamedict("bookmarks", self._repo)
185 self.branches = lazyremotenamedict("branches", self._repo)
187 self.branches = lazyremotenamedict("branches", self._repo)
186 self._invalidatecache()
188 self._invalidatecache()
187
189
188 def _invalidatecache(self):
190 def _invalidatecache(self):
189 self._nodetobmarks = None
191 self._nodetobmarks = None
190 self._nodetobranch = None
192 self._nodetobranch = None
191 self._hoisttonodes = None
193 self._hoisttonodes = None
192 self._nodetohoists = None
194 self._nodetohoists = None
193
195
194 def bmarktonodes(self):
196 def bmarktonodes(self):
195 return self.bookmarks
197 return self.bookmarks
196
198
197 def nodetobmarks(self):
199 def nodetobmarks(self):
198 if not self._nodetobmarks:
200 if not self._nodetobmarks:
199 bmarktonodes = self.bmarktonodes()
201 bmarktonodes = self.bmarktonodes()
200 self._nodetobmarks = {}
202 self._nodetobmarks = {}
201 for name, node in bmarktonodes.iteritems():
203 for name, node in bmarktonodes.iteritems():
202 self._nodetobmarks.setdefault(node[0], []).append(name)
204 self._nodetobmarks.setdefault(node[0], []).append(name)
203 return self._nodetobmarks
205 return self._nodetobmarks
204
206
205 def branchtonodes(self):
207 def branchtonodes(self):
206 return self.branches
208 return self.branches
207
209
208 def nodetobranch(self):
210 def nodetobranch(self):
209 if not self._nodetobranch:
211 if not self._nodetobranch:
210 branchtonodes = self.branchtonodes()
212 branchtonodes = self.branchtonodes()
211 self._nodetobranch = {}
213 self._nodetobranch = {}
212 for name, nodes in branchtonodes.iteritems():
214 for name, nodes in branchtonodes.iteritems():
213 for node in nodes:
215 for node in nodes:
214 self._nodetobranch.setdefault(node, []).append(name)
216 self._nodetobranch.setdefault(node, []).append(name)
215 return self._nodetobranch
217 return self._nodetobranch
216
218
217 def hoisttonodes(self, hoist):
219 def hoisttonodes(self, hoist):
218 if not self._hoisttonodes:
220 if not self._hoisttonodes:
219 marktonodes = self.bmarktonodes()
221 marktonodes = self.bmarktonodes()
220 self._hoisttonodes = {}
222 self._hoisttonodes = {}
221 hoist += '/'
223 hoist += '/'
222 for name, node in marktonodes.iteritems():
224 for name, node in marktonodes.iteritems():
223 if name.startswith(hoist):
225 if name.startswith(hoist):
224 name = name[len(hoist):]
226 name = name[len(hoist):]
225 self._hoisttonodes[name] = node
227 self._hoisttonodes[name] = node
226 return self._hoisttonodes
228 return self._hoisttonodes
227
229
228 def nodetohoists(self, hoist):
230 def nodetohoists(self, hoist):
229 if not self._nodetohoists:
231 if not self._nodetohoists:
230 marktonodes = self.bmarktonodes()
232 marktonodes = self.bmarktonodes()
231 self._nodetohoists = {}
233 self._nodetohoists = {}
232 hoist += '/'
234 hoist += '/'
233 for name, node in marktonodes.iteritems():
235 for name, node in marktonodes.iteritems():
234 if name.startswith(hoist):
236 if name.startswith(hoist):
235 name = name[len(hoist):]
237 name = name[len(hoist):]
236 self._nodetohoists.setdefault(node[0], []).append(name)
238 self._nodetohoists.setdefault(node[0], []).append(name)
237 return self._nodetohoists
239 return self._nodetohoists
238
240
239 def wrapprintbookmarks(orig, ui, repo, fm, bmarks):
241 def wrapprintbookmarks(orig, ui, repo, fm, bmarks):
240 if 'remotebookmarks' not in repo.names:
242 if 'remotebookmarks' not in repo.names:
241 return
243 return
242 ns = repo.names['remotebookmarks']
244 ns = repo.names['remotebookmarks']
243
245
244 for name in ns.listnames(repo):
246 for name in ns.listnames(repo):
245 nodes = ns.nodes(repo, name)
247 nodes = ns.nodes(repo, name)
246 if not nodes:
248 if not nodes:
247 continue
249 continue
248 node = nodes[0]
250 node = nodes[0]
249
251
250 bmarks[name] = (node, ' ', '')
252 bmarks[name] = (node, ' ', '')
251
253
252 return orig(ui, repo, fm, bmarks)
254 return orig(ui, repo, fm, bmarks)
253
255
254 def extsetup(ui):
256 def extsetup(ui):
255 extensions.wrapfunction(bookmarks, '_printbookmarks', wrapprintbookmarks)
257 extensions.wrapfunction(bookmarks, '_printbookmarks', wrapprintbookmarks)
256
258
257 def reposetup(ui, repo):
259 def reposetup(ui, repo):
258
260
259 # set the config option to store remotenames
261 # set the config option to store remotenames
260 repo.ui.setconfig('experimental', 'remotenames', True, 'remotenames-ext')
262 repo.ui.setconfig('experimental', 'remotenames', True, 'remotenames-ext')
261
263
262 if not repo.local():
264 if not repo.local():
263 return
265 return
264
266
265 repo._remotenames = remotenames(repo)
267 repo._remotenames = remotenames(repo)
266 ns = namespaces.namespace
268 ns = namespaces.namespace
267
269
268 if ui.configbool('remotenames', 'bookmarks'):
270 if ui.configbool('remotenames', 'bookmarks'):
269 remotebookmarkns = ns(
271 remotebookmarkns = ns(
270 'remotebookmarks',
272 'remotebookmarks',
271 templatename='remotebookmarks',
273 templatename='remotebookmarks',
272 colorname='remotebookmark',
274 colorname='remotebookmark',
273 logfmt='remote bookmark: %s\n',
275 logfmt='remote bookmark: %s\n',
274 listnames=lambda repo: repo._remotenames.bmarktonodes().keys(),
276 listnames=lambda repo: repo._remotenames.bmarktonodes().keys(),
275 namemap=lambda repo, name:
277 namemap=lambda repo, name:
276 repo._remotenames.bmarktonodes().get(name, []),
278 repo._remotenames.bmarktonodes().get(name, []),
277 nodemap=lambda repo, node:
279 nodemap=lambda repo, node:
278 repo._remotenames.nodetobmarks().get(node, []))
280 repo._remotenames.nodetobmarks().get(node, []))
279 repo.names.addnamespace(remotebookmarkns)
281 repo.names.addnamespace(remotebookmarkns)
280
282
281 # hoisting only works if there are remote bookmarks
283 # hoisting only works if there are remote bookmarks
282 hoist = ui.config('remotenames', 'hoistedpeer')
284 hoist = ui.config('remotenames', 'hoistedpeer')
283 if hoist:
285 if hoist:
284 hoistednamens = ns(
286 hoistednamens = ns(
285 'hoistednames',
287 'hoistednames',
286 templatename='hoistednames',
288 templatename='hoistednames',
287 colorname='hoistedname',
289 colorname='hoistedname',
288 logfmt='hoisted name: %s\n',
290 logfmt='hoisted name: %s\n',
289 listnames = lambda repo:
291 listnames = lambda repo:
290 repo._remotenames.hoisttonodes(hoist).keys(),
292 repo._remotenames.hoisttonodes(hoist).keys(),
291 namemap = lambda repo, name:
293 namemap = lambda repo, name:
292 repo._remotenames.hoisttonodes(hoist).get(name, []),
294 repo._remotenames.hoisttonodes(hoist).get(name, []),
293 nodemap = lambda repo, node:
295 nodemap = lambda repo, node:
294 repo._remotenames.nodetohoists(hoist).get(node, []))
296 repo._remotenames.nodetohoists(hoist).get(node, []))
295 repo.names.addnamespace(hoistednamens)
297 repo.names.addnamespace(hoistednamens)
296
298
297 if ui.configbool('remotenames', 'branches'):
299 if ui.configbool('remotenames', 'branches'):
298 remotebranchns = ns(
300 remotebranchns = ns(
299 'remotebranches',
301 'remotebranches',
300 templatename='remotebranches',
302 templatename='remotebranches',
301 colorname='remotebranch',
303 colorname='remotebranch',
302 logfmt='remote branch: %s\n',
304 logfmt='remote branch: %s\n',
303 listnames = lambda repo: repo._remotenames.branchtonodes().keys(),
305 listnames = lambda repo: repo._remotenames.branchtonodes().keys(),
304 namemap = lambda repo, name:
306 namemap = lambda repo, name:
305 repo._remotenames.branchtonodes().get(name, []),
307 repo._remotenames.branchtonodes().get(name, []),
306 nodemap = lambda repo, node:
308 nodemap = lambda repo, node:
307 repo._remotenames.nodetobranch().get(node, []))
309 repo._remotenames.nodetobranch().get(node, []))
308 repo.names.addnamespace(remotebranchns)
310 repo.names.addnamespace(remotebranchns)
309
311
310 @templatekeyword('remotenames', requires={'repo', 'ctx'})
312 @templatekeyword('remotenames', requires={'repo', 'ctx'})
311 def remotenameskw(context, mapping):
313 def remotenameskw(context, mapping):
312 """List of strings. Remote names associated with the changeset."""
314 """List of strings. Remote names associated with the changeset."""
313 repo = context.resource(mapping, 'repo')
315 repo = context.resource(mapping, 'repo')
314 ctx = context.resource(mapping, 'ctx')
316 ctx = context.resource(mapping, 'ctx')
315
317
316 remotenames = []
318 remotenames = []
317 if 'remotebookmarks' in repo.names:
319 if 'remotebookmarks' in repo.names:
318 remotenames = repo.names['remotebookmarks'].names(repo, ctx.node())
320 remotenames = repo.names['remotebookmarks'].names(repo, ctx.node())
319
321
320 if 'remotebranches' in repo.names:
322 if 'remotebranches' in repo.names:
321 remotenames += repo.names['remotebranches'].names(repo, ctx.node())
323 remotenames += repo.names['remotebranches'].names(repo, ctx.node())
322
324
323 return templateutil.compatlist(context, mapping, 'remotename', remotenames,
325 return templateutil.compatlist(context, mapping, 'remotename', remotenames,
324 plural='remotenames')
326 plural='remotenames')
325
327
326 @templatekeyword('remotebookmarks', requires={'repo', 'ctx'})
328 @templatekeyword('remotebookmarks', requires={'repo', 'ctx'})
327 def remotebookmarkskw(context, mapping):
329 def remotebookmarkskw(context, mapping):
328 """List of strings. Remote bookmarks associated with the changeset."""
330 """List of strings. Remote bookmarks associated with the changeset."""
329 repo = context.resource(mapping, 'repo')
331 repo = context.resource(mapping, 'repo')
330 ctx = context.resource(mapping, 'ctx')
332 ctx = context.resource(mapping, 'ctx')
331
333
332 remotebmarks = []
334 remotebmarks = []
333 if 'remotebookmarks' in repo.names:
335 if 'remotebookmarks' in repo.names:
334 remotebmarks = repo.names['remotebookmarks'].names(repo, ctx.node())
336 remotebmarks = repo.names['remotebookmarks'].names(repo, ctx.node())
335
337
336 return templateutil.compatlist(context, mapping, 'remotebookmark',
338 return templateutil.compatlist(context, mapping, 'remotebookmark',
337 remotebmarks, plural='remotebookmarks')
339 remotebmarks, plural='remotebookmarks')
338
340
339 @templatekeyword('remotebranches', requires={'repo', 'ctx'})
341 @templatekeyword('remotebranches', requires={'repo', 'ctx'})
340 def remotebrancheskw(context, mapping):
342 def remotebrancheskw(context, mapping):
341 """List of strings. Remote branches associated with the changeset."""
343 """List of strings. Remote branches associated with the changeset."""
342 repo = context.resource(mapping, 'repo')
344 repo = context.resource(mapping, 'repo')
343 ctx = context.resource(mapping, 'ctx')
345 ctx = context.resource(mapping, 'ctx')
344
346
345 remotebranches = []
347 remotebranches = []
346 if 'remotebranches' in repo.names:
348 if 'remotebranches' in repo.names:
347 remotebranches = repo.names['remotebranches'].names(repo, ctx.node())
349 remotebranches = repo.names['remotebranches'].names(repo, ctx.node())
348
350
349 return templateutil.compatlist(context, mapping, 'remotebranch',
351 return templateutil.compatlist(context, mapping, 'remotebranch',
350 remotebranches, plural='remotebranches')
352 remotebranches, plural='remotebranches')
351
353
352 def _revsetutil(repo, subset, x, rtypes):
354 def _revsetutil(repo, subset, x, rtypes):
353 """utility function to return a set of revs based on the rtypes"""
355 """utility function to return a set of revs based on the rtypes"""
354 args = revsetlang.getargs(x, 0, 1, _('only one argument accepted'))
356 args = revsetlang.getargs(x, 0, 1, _('only one argument accepted'))
355 if args:
357 if args:
356 kind, pattern, matcher = stringutil.stringmatcher(
358 kind, pattern, matcher = stringutil.stringmatcher(
357 revsetlang.getstring(args[0], _('argument must be a string')))
359 revsetlang.getstring(args[0], _('argument must be a string')))
358 else:
360 else:
359 kind = pattern = None
361 kind = pattern = None
360 matcher = util.always
362 matcher = util.always
361
363
362 nodes = set()
364 nodes = set()
363 cl = repo.changelog
365 cl = repo.changelog
364 for rtype in rtypes:
366 for rtype in rtypes:
365 if rtype in repo.names:
367 if rtype in repo.names:
366 ns = repo.names[rtype]
368 ns = repo.names[rtype]
367 for name in ns.listnames(repo):
369 for name in ns.listnames(repo):
368 if not matcher(name):
370 if not matcher(name):
369 continue
371 continue
370 nodes.update(ns.nodes(repo, name))
372 nodes.update(ns.nodes(repo, name))
371 if kind == 'literal' and not nodes:
373 if kind == 'literal' and not nodes:
372 raise error.RepoLookupError(_("remote name '%s' does not exist")
374 raise error.RepoLookupError(_("remote name '%s' does not exist")
373 % pattern)
375 % pattern)
374
376
375 revs = (cl.rev(n) for n in nodes if cl.hasnode(n))
377 revs = (cl.rev(n) for n in nodes if cl.hasnode(n))
376 return subset & smartset.baseset(revs)
378 return subset & smartset.baseset(revs)
377
379
378 @revsetpredicate('remotenames([name])')
380 @revsetpredicate('remotenames([name])')
379 def remotenamesrevset(repo, subset, x):
381 def remotenamesrevset(repo, subset, x):
380 """All changesets which have a remotename on them. If `name` is
382 """All changesets which have a remotename on them. If `name` is
381 specified, only remotenames of matching remote paths are considered.
383 specified, only remotenames of matching remote paths are considered.
382
384
383 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
385 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
384 """
386 """
385 return _revsetutil(repo, subset, x, ('remotebookmarks', 'remotebranches'))
387 return _revsetutil(repo, subset, x, ('remotebookmarks', 'remotebranches'))
386
388
387 @revsetpredicate('remotebranches([name])')
389 @revsetpredicate('remotebranches([name])')
388 def remotebranchesrevset(repo, subset, x):
390 def remotebranchesrevset(repo, subset, x):
389 """All changesets which are branch heads on remotes. If `name` is
391 """All changesets which are branch heads on remotes. If `name` is
390 specified, only remotenames of matching remote paths are considered.
392 specified, only remotenames of matching remote paths are considered.
391
393
392 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
394 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
393 """
395 """
394 return _revsetutil(repo, subset, x, ('remotebranches',))
396 return _revsetutil(repo, subset, x, ('remotebranches',))
395
397
396 @revsetpredicate('remotebookmarks([name])')
398 @revsetpredicate('remotebookmarks([name])')
397 def remotebmarksrevset(repo, subset, x):
399 def remotebmarksrevset(repo, subset, x):
398 """All changesets which have bookmarks on remotes. If `name` is
400 """All changesets which have bookmarks on remotes. If `name` is
399 specified, only remotenames of matching remote paths are considered.
401 specified, only remotenames of matching remote paths are considered.
400
402
401 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
403 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
402 """
404 """
403 return _revsetutil(repo, subset, x, ('remotebookmarks',))
405 return _revsetutil(repo, subset, x, ('remotebookmarks',))
@@ -1,312 +1,314 b''
1 # __init__.py - Startup and module loading logic for Mercurial.
1 # __init__.py - Startup and module loading logic for Mercurial.
2 #
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import sys
10 import sys
11
11
12 # Allow 'from mercurial import demandimport' to keep working.
12 # Allow 'from mercurial import demandimport' to keep working.
13 import hgdemandimport
13 import hgdemandimport
14 demandimport = hgdemandimport
14 demandimport = hgdemandimport
15
15
16 __all__ = []
16 __all__ = []
17
17
18 # Python 3 uses a custom module loader that transforms source code between
18 # Python 3 uses a custom module loader that transforms source code between
19 # source file reading and compilation. This is done by registering a custom
19 # source file reading and compilation. This is done by registering a custom
20 # finder that changes the spec for Mercurial modules to use a custom loader.
20 # finder that changes the spec for Mercurial modules to use a custom loader.
21 if sys.version_info[0] >= 3:
21 if sys.version_info[0] >= 3:
22 import importlib
22 import importlib
23 import importlib.abc
23 import importlib.abc
24 import io
24 import io
25 import token
25 import token
26 import tokenize
26 import tokenize
27
27
28 class hgpathentryfinder(importlib.abc.MetaPathFinder):
28 class hgpathentryfinder(importlib.abc.MetaPathFinder):
29 """A sys.meta_path finder that uses a custom module loader."""
29 """A sys.meta_path finder that uses a custom module loader."""
30 def find_spec(self, fullname, path, target=None):
30 def find_spec(self, fullname, path, target=None):
31 # Only handle Mercurial-related modules.
31 # Only handle Mercurial-related modules.
32 if not fullname.startswith(('mercurial.', 'hgext.')):
32 if not fullname.startswith(('mercurial.', 'hgext.')):
33 return None
33 return None
34 # don't try to parse binary
34 # don't try to parse binary
35 if fullname.startswith('mercurial.cext.'):
35 if fullname.startswith('mercurial.cext.'):
36 return None
36 return None
37 # third-party packages are expected to be dual-version clean
37 # third-party packages are expected to be dual-version clean
38 if fullname.startswith('mercurial.thirdparty'):
38 if fullname.startswith('mercurial.thirdparty'):
39 return None
39 return None
40 # zstd is already dual-version clean, don't try and mangle it
40 # zstd is already dual-version clean, don't try and mangle it
41 if fullname.startswith('mercurial.zstd'):
41 if fullname.startswith('mercurial.zstd'):
42 return None
42 return None
43 # rustext is built for the right python version,
43 # rustext is built for the right python version,
44 # don't try and mangle it
44 # don't try and mangle it
45 if fullname.startswith('mercurial.rustext'):
45 if fullname.startswith('mercurial.rustext'):
46 return None
46 return None
47 # pywatchman is already dual-version clean, don't try and mangle it
47 # pywatchman is already dual-version clean, don't try and mangle it
48 if fullname.startswith('hgext.fsmonitor.pywatchman'):
48 if fullname.startswith('hgext.fsmonitor.pywatchman'):
49 return None
49 return None
50
50
51 # Try to find the module using other registered finders.
51 # Try to find the module using other registered finders.
52 spec = None
52 spec = None
53 for finder in sys.meta_path:
53 for finder in sys.meta_path:
54 if finder == self:
54 if finder == self:
55 continue
55 continue
56
56
57 # Originally the API was a `find_module` method, but it was
57 # Originally the API was a `find_module` method, but it was
58 # renamed to `find_spec` in python 3.4, with a new `target`
58 # renamed to `find_spec` in python 3.4, with a new `target`
59 # argument.
59 # argument.
60 find_spec_method = getattr(finder, 'find_spec', None)
60 find_spec_method = getattr(finder, 'find_spec', None)
61 if find_spec_method:
61 if find_spec_method:
62 spec = find_spec_method(fullname, path, target=target)
62 spec = find_spec_method(fullname, path, target=target)
63 else:
63 else:
64 spec = finder.find_module(fullname)
64 spec = finder.find_module(fullname)
65 if spec is not None:
65 if spec is not None:
66 spec = importlib.util.spec_from_loader(fullname, spec)
66 spec = importlib.util.spec_from_loader(fullname, spec)
67 if spec:
67 if spec:
68 break
68 break
69
69
70 # This is a Mercurial-related module but we couldn't find it
70 # This is a Mercurial-related module but we couldn't find it
71 # using the previously-registered finders. This likely means
71 # using the previously-registered finders. This likely means
72 # the module doesn't exist.
72 # the module doesn't exist.
73 if not spec:
73 if not spec:
74 return None
74 return None
75
75
76 # TODO need to support loaders from alternate specs, like zip
76 # TODO need to support loaders from alternate specs, like zip
77 # loaders.
77 # loaders.
78 loader = hgloader(spec.name, spec.origin)
78 loader = hgloader(spec.name, spec.origin)
79 # Can't use util.safehasattr here because that would require
79 # Can't use util.safehasattr here because that would require
80 # importing util, and we're in import code.
80 # importing util, and we're in import code.
81 if hasattr(spec.loader, 'loader'): # hasattr-py3-only
81 if hasattr(spec.loader, 'loader'): # hasattr-py3-only
82 # This is a nested loader (maybe a lazy loader?)
82 # This is a nested loader (maybe a lazy loader?)
83 spec.loader.loader = loader
83 spec.loader.loader = loader
84 else:
84 else:
85 spec.loader = loader
85 spec.loader = loader
86 return spec
86 return spec
87
87
88 def replacetokens(tokens, fullname):
88 def replacetokens(tokens, fullname):
89 """Transform a stream of tokens from raw to Python 3.
89 """Transform a stream of tokens from raw to Python 3.
90
90
91 It is called by the custom module loading machinery to rewrite
91 It is called by the custom module loading machinery to rewrite
92 source/tokens between source decoding and compilation.
92 source/tokens between source decoding and compilation.
93
93
94 Returns a generator of possibly rewritten tokens.
94 Returns a generator of possibly rewritten tokens.
95
95
96 The input token list may be mutated as part of processing. However,
96 The input token list may be mutated as part of processing. However,
97 its changes do not necessarily match the output token stream.
97 its changes do not necessarily match the output token stream.
98
98
99 REMEMBER TO CHANGE ``BYTECODEHEADER`` WHEN CHANGING THIS FUNCTION
99 REMEMBER TO CHANGE ``BYTECODEHEADER`` WHEN CHANGING THIS FUNCTION
100 OR CACHED FILES WON'T GET INVALIDATED PROPERLY.
100 OR CACHED FILES WON'T GET INVALIDATED PROPERLY.
101 """
101 """
102 futureimpline = False
102 futureimpline = False
103
103
104 # The following utility functions access the tokens list and i index of
104 # The following utility functions access the tokens list and i index of
105 # the for i, t enumerate(tokens) loop below
105 # the for i, t enumerate(tokens) loop below
106 def _isop(j, *o):
106 def _isop(j, *o):
107 """Assert that tokens[j] is an OP with one of the given values"""
107 """Assert that tokens[j] is an OP with one of the given values"""
108 try:
108 try:
109 return tokens[j].type == token.OP and tokens[j].string in o
109 return tokens[j].type == token.OP and tokens[j].string in o
110 except IndexError:
110 except IndexError:
111 return False
111 return False
112
112
113 def _findargnofcall(n):
113 def _findargnofcall(n):
114 """Find arg n of a call expression (start at 0)
114 """Find arg n of a call expression (start at 0)
115
115
116 Returns index of the first token of that argument, or None if
116 Returns index of the first token of that argument, or None if
117 there is not that many arguments.
117 there is not that many arguments.
118
118
119 Assumes that token[i + 1] is '('.
119 Assumes that token[i + 1] is '('.
120
120
121 """
121 """
122 nested = 0
122 nested = 0
123 for j in range(i + 2, len(tokens)):
123 for j in range(i + 2, len(tokens)):
124 if _isop(j, ')', ']', '}'):
124 if _isop(j, ')', ']', '}'):
125 # end of call, tuple, subscription or dict / set
125 # end of call, tuple, subscription or dict / set
126 nested -= 1
126 nested -= 1
127 if nested < 0:
127 if nested < 0:
128 return None
128 return None
129 elif n == 0:
129 elif n == 0:
130 # this is the starting position of arg
130 # this is the starting position of arg
131 return j
131 return j
132 elif _isop(j, '(', '[', '{'):
132 elif _isop(j, '(', '[', '{'):
133 nested += 1
133 nested += 1
134 elif _isop(j, ',') and nested == 0:
134 elif _isop(j, ',') and nested == 0:
135 n -= 1
135 n -= 1
136
136
137 return None
137 return None
138
138
139 def _ensureunicode(j):
139 def _ensureunicode(j):
140 """Make sure the token at j is a unicode string
140 """Make sure the token at j is a unicode string
141
141
142 This rewrites a string token to include the unicode literal prefix
142 This rewrites a string token to include the unicode literal prefix
143 so the string transformer won't add the byte prefix.
143 so the string transformer won't add the byte prefix.
144
144
145 Ignores tokens that are not strings. Assumes bounds checking has
145 Ignores tokens that are not strings. Assumes bounds checking has
146 already been done.
146 already been done.
147
147
148 """
148 """
149 st = tokens[j]
149 st = tokens[j]
150 if st.type == token.STRING and st.string.startswith(("'", '"')):
150 if st.type == token.STRING and st.string.startswith(("'", '"')):
151 tokens[j] = st._replace(string='u%s' % st.string)
151 tokens[j] = st._replace(string='u%s' % st.string)
152
152
153 for i, t in enumerate(tokens):
153 for i, t in enumerate(tokens):
154 # Convert most string literals to byte literals. String literals
154 # Convert most string literals to byte literals. String literals
155 # in Python 2 are bytes. String literals in Python 3 are unicode.
155 # in Python 2 are bytes. String literals in Python 3 are unicode.
156 # Most strings in Mercurial are bytes and unicode strings are rare.
156 # Most strings in Mercurial are bytes and unicode strings are rare.
157 # Rather than rewrite all string literals to use ``b''`` to indicate
157 # Rather than rewrite all string literals to use ``b''`` to indicate
158 # byte strings, we apply this token transformer to insert the ``b``
158 # byte strings, we apply this token transformer to insert the ``b``
159 # prefix nearly everywhere.
159 # prefix nearly everywhere.
160 if t.type == token.STRING:
160 if t.type == token.STRING:
161 s = t.string
161 s = t.string
162
162
163 # Preserve docstrings as string literals. This is inconsistent
163 # Preserve docstrings as string literals. This is inconsistent
164 # with regular unprefixed strings. However, the
164 # with regular unprefixed strings. However, the
165 # "from __future__" parsing (which allows a module docstring to
165 # "from __future__" parsing (which allows a module docstring to
166 # exist before it) doesn't properly handle the docstring if it
166 # exist before it) doesn't properly handle the docstring if it
167 # is b''' prefixed, leading to a SyntaxError. We leave all
167 # is b''' prefixed, leading to a SyntaxError. We leave all
168 # docstrings as unprefixed to avoid this. This means Mercurial
168 # docstrings as unprefixed to avoid this. This means Mercurial
169 # components touching docstrings need to handle unicode,
169 # components touching docstrings need to handle unicode,
170 # unfortunately.
170 # unfortunately.
171 if s[0:3] in ("'''", '"""'):
171 if s[0:3] in ("'''", '"""'):
172 yield t
172 yield t
173 continue
173 continue
174
174
175 # If the first character isn't a quote, it is likely a string
175 # If the first character isn't a quote, it is likely a string
176 # prefixing character (such as 'b', 'u', or 'r'. Ignore.
176 # prefixing character (such as 'b', 'u', or 'r'. Ignore.
177 if s[0] not in ("'", '"'):
177 if s[0] not in ("'", '"'):
178 yield t
178 yield t
179 continue
179 continue
180
180
181 # String literal. Prefix to make a b'' string.
181 # String literal. Prefix to make a b'' string.
182 yield t._replace(string='b%s' % t.string)
182 yield t._replace(string='b%s' % t.string)
183 continue
183 continue
184
184
185 # Insert compatibility imports at "from __future__ import" line.
185 # Insert compatibility imports at "from __future__ import" line.
186 # No '\n' should be added to preserve line numbers.
186 # No '\n' should be added to preserve line numbers.
187 if (t.type == token.NAME and t.string == 'import' and
187 if (t.type == token.NAME and t.string == 'import' and
188 all(u.type == token.NAME for u in tokens[i - 2:i]) and
188 all(u.type == token.NAME for u in tokens[i - 2:i]) and
189 [u.string for u in tokens[i - 2:i]] == ['from', '__future__']):
189 [u.string for u in tokens[i - 2:i]] == ['from', '__future__']):
190 futureimpline = True
190 futureimpline = True
191 if t.type == token.NEWLINE and futureimpline:
191 if t.type == token.NEWLINE and futureimpline:
192 futureimpline = False
192 futureimpline = False
193 if fullname == 'mercurial.pycompat':
193 if fullname == 'mercurial.pycompat':
194 yield t
194 yield t
195 continue
195 continue
196 r, c = t.start
196 r, c = t.start
197 l = (b'; from mercurial.pycompat import '
197 l = (b'; from mercurial.pycompat import '
198 b'delattr, getattr, hasattr, setattr, '
198 b'delattr, getattr, hasattr, setattr, '
199 b'open, unicode\n')
199 b'open, unicode\n')
200 for u in tokenize.tokenize(io.BytesIO(l).readline):
200 for u in tokenize.tokenize(io.BytesIO(l).readline):
201 if u.type in (tokenize.ENCODING, token.ENDMARKER):
201 if u.type in (tokenize.ENCODING, token.ENDMARKER):
202 continue
202 continue
203 yield u._replace(
203 yield u._replace(
204 start=(r, c + u.start[1]), end=(r, c + u.end[1]))
204 start=(r, c + u.start[1]), end=(r, c + u.end[1]))
205 continue
205 continue
206
206
207 # This looks like a function call.
207 # This looks like a function call.
208 if t.type == token.NAME and _isop(i + 1, '('):
208 if t.type == token.NAME and _isop(i + 1, '('):
209 fn = t.string
209 fn = t.string
210
210
211 # *attr() builtins don't accept byte strings to 2nd argument.
211 # *attr() builtins don't accept byte strings to 2nd argument.
212 if (fn in ('getattr', 'setattr', 'hasattr', 'safehasattr') and
212 if (fn in ('getattr', 'setattr', 'hasattr', 'safehasattr') and
213 not _isop(i - 1, '.')):
213 not _isop(i - 1, '.')):
214 arg1idx = _findargnofcall(1)
214 arg1idx = _findargnofcall(1)
215 if arg1idx is not None:
215 if arg1idx is not None:
216 _ensureunicode(arg1idx)
216 _ensureunicode(arg1idx)
217
217
218 # .encode() and .decode() on str/bytes/unicode don't accept
218 # .encode() and .decode() on str/bytes/unicode don't accept
219 # byte strings on Python 3.
219 # byte strings on Python 3.
220 elif fn in ('encode', 'decode') and _isop(i - 1, '.'):
220 elif fn in ('encode', 'decode') and _isop(i - 1, '.'):
221 for argn in range(2):
221 for argn in range(2):
222 argidx = _findargnofcall(argn)
222 argidx = _findargnofcall(argn)
223 if argidx is not None:
223 if argidx is not None:
224 _ensureunicode(argidx)
224 _ensureunicode(argidx)
225
225
226 # It changes iteritems/values to items/values as they are not
226 # It changes iteritems/values to items/values as they are not
227 # present in Python 3 world.
227 # present in Python 3 world.
228 elif fn in ('iteritems', 'itervalues'):
228 elif (fn in ('iteritems', 'itervalues') and
229 not (tokens[i - 1].type == token.NAME and
230 tokens[i - 1].string == 'def')):
229 yield t._replace(string=fn[4:])
231 yield t._replace(string=fn[4:])
230 continue
232 continue
231
233
232 # Emit unmodified token.
234 # Emit unmodified token.
233 yield t
235 yield t
234
236
235 # Header to add to bytecode files. This MUST be changed when
237 # Header to add to bytecode files. This MUST be changed when
236 # ``replacetoken`` or any mechanism that changes semantics of module
238 # ``replacetoken`` or any mechanism that changes semantics of module
237 # loading is changed. Otherwise cached bytecode may get loaded without
239 # loading is changed. Otherwise cached bytecode may get loaded without
238 # the new transformation mechanisms applied.
240 # the new transformation mechanisms applied.
239 BYTECODEHEADER = b'HG\x00\x0b'
241 BYTECODEHEADER = b'HG\x00\x0c'
240
242
241 class hgloader(importlib.machinery.SourceFileLoader):
243 class hgloader(importlib.machinery.SourceFileLoader):
242 """Custom module loader that transforms source code.
244 """Custom module loader that transforms source code.
243
245
244 When the source code is converted to a code object, we transform
246 When the source code is converted to a code object, we transform
245 certain patterns to be Python 3 compatible. This allows us to write code
247 certain patterns to be Python 3 compatible. This allows us to write code
246 that is natively Python 2 and compatible with Python 3 without
248 that is natively Python 2 and compatible with Python 3 without
247 making the code excessively ugly.
249 making the code excessively ugly.
248
250
249 We do this by transforming the token stream between parse and compile.
251 We do this by transforming the token stream between parse and compile.
250
252
251 Implementing transformations invalidates caching assumptions made
253 Implementing transformations invalidates caching assumptions made
252 by the built-in importer. The built-in importer stores a header on
254 by the built-in importer. The built-in importer stores a header on
253 saved bytecode files indicating the Python/bytecode version. If the
255 saved bytecode files indicating the Python/bytecode version. If the
254 version changes, the cached bytecode is ignored. The Mercurial
256 version changes, the cached bytecode is ignored. The Mercurial
255 transformations could change at any time. This means we need to check
257 transformations could change at any time. This means we need to check
256 that cached bytecode was generated with the current transformation
258 that cached bytecode was generated with the current transformation
257 code or there could be a mismatch between cached bytecode and what
259 code or there could be a mismatch between cached bytecode and what
258 would be generated from this class.
260 would be generated from this class.
259
261
260 We supplement the bytecode caching layer by wrapping ``get_data``
262 We supplement the bytecode caching layer by wrapping ``get_data``
261 and ``set_data``. These functions are called when the
263 and ``set_data``. These functions are called when the
262 ``SourceFileLoader`` retrieves and saves bytecode cache files,
264 ``SourceFileLoader`` retrieves and saves bytecode cache files,
263 respectively. We simply add an additional header on the file. As
265 respectively. We simply add an additional header on the file. As
264 long as the version in this file is changed when semantics change,
266 long as the version in this file is changed when semantics change,
265 cached bytecode should be invalidated when transformations change.
267 cached bytecode should be invalidated when transformations change.
266
268
267 The added header has the form ``HG<VERSION>``. That is a literal
269 The added header has the form ``HG<VERSION>``. That is a literal
268 ``HG`` with 2 binary bytes indicating the transformation version.
270 ``HG`` with 2 binary bytes indicating the transformation version.
269 """
271 """
270 def get_data(self, path):
272 def get_data(self, path):
271 data = super(hgloader, self).get_data(path)
273 data = super(hgloader, self).get_data(path)
272
274
273 if not path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
275 if not path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
274 return data
276 return data
275
277
276 # There should be a header indicating the Mercurial transformation
278 # There should be a header indicating the Mercurial transformation
277 # version. If it doesn't exist or doesn't match the current version,
279 # version. If it doesn't exist or doesn't match the current version,
278 # we raise an OSError because that is what
280 # we raise an OSError because that is what
279 # ``SourceFileLoader.get_code()`` expects when loading bytecode
281 # ``SourceFileLoader.get_code()`` expects when loading bytecode
280 # paths to indicate the cached file is "bad."
282 # paths to indicate the cached file is "bad."
281 if data[0:2] != b'HG':
283 if data[0:2] != b'HG':
282 raise OSError('no hg header')
284 raise OSError('no hg header')
283 if data[0:4] != BYTECODEHEADER:
285 if data[0:4] != BYTECODEHEADER:
284 raise OSError('hg header version mismatch')
286 raise OSError('hg header version mismatch')
285
287
286 return data[4:]
288 return data[4:]
287
289
288 def set_data(self, path, data, *args, **kwargs):
290 def set_data(self, path, data, *args, **kwargs):
289 if path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
291 if path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
290 data = BYTECODEHEADER + data
292 data = BYTECODEHEADER + data
291
293
292 return super(hgloader, self).set_data(path, data, *args, **kwargs)
294 return super(hgloader, self).set_data(path, data, *args, **kwargs)
293
295
294 def source_to_code(self, data, path):
296 def source_to_code(self, data, path):
295 """Perform token transformation before compilation."""
297 """Perform token transformation before compilation."""
296 buf = io.BytesIO(data)
298 buf = io.BytesIO(data)
297 tokens = tokenize.tokenize(buf.readline)
299 tokens = tokenize.tokenize(buf.readline)
298 data = tokenize.untokenize(replacetokens(list(tokens), self.name))
300 data = tokenize.untokenize(replacetokens(list(tokens), self.name))
299 # Python's built-in importer strips frames from exceptions raised
301 # Python's built-in importer strips frames from exceptions raised
300 # for this code. Unfortunately, that mechanism isn't extensible
302 # for this code. Unfortunately, that mechanism isn't extensible
301 # and our frame will be blamed for the import failure. There
303 # and our frame will be blamed for the import failure. There
302 # are extremely hacky ways to do frame stripping. We haven't
304 # are extremely hacky ways to do frame stripping. We haven't
303 # implemented them because they are very ugly.
305 # implemented them because they are very ugly.
304 return super(hgloader, self).source_to_code(data, path)
306 return super(hgloader, self).source_to_code(data, path)
305
307
306 # We automagically register our custom importer as a side-effect of
308 # We automagically register our custom importer as a side-effect of
307 # loading. This is necessary to ensure that any entry points are able
309 # loading. This is necessary to ensure that any entry points are able
308 # to import mercurial.* modules without having to perform this
310 # to import mercurial.* modules without having to perform this
309 # registration themselves.
311 # registration themselves.
310 if not any(isinstance(x, hgpathentryfinder) for x in sys.meta_path):
312 if not any(isinstance(x, hgpathentryfinder) for x in sys.meta_path):
311 # meta_path is used before any implicit finders and before sys.path.
313 # meta_path is used before any implicit finders and before sys.path.
312 sys.meta_path.insert(0, hgpathentryfinder())
314 sys.meta_path.insert(0, hgpathentryfinder())
@@ -1,676 +1,678 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11
11
12 from .node import (
12 from .node import (
13 bin,
13 bin,
14 hex,
14 hex,
15 nullid,
15 nullid,
16 nullrev,
16 nullrev,
17 )
17 )
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 pycompat,
21 pycompat,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 repoviewutil,
26 repoviewutil,
27 stringutil,
27 stringutil,
28 )
28 )
29
29
30 subsettable = repoviewutil. subsettable
30 subsettable = repoviewutil. subsettable
31
31
32 calcsize = struct.calcsize
32 calcsize = struct.calcsize
33 pack_into = struct.pack_into
33 pack_into = struct.pack_into
34 unpack_from = struct.unpack_from
34 unpack_from = struct.unpack_from
35
35
36
36
37 class BranchMapCache(object):
37 class BranchMapCache(object):
38 """mapping of filtered views of repo with their branchcache"""
38 """mapping of filtered views of repo with their branchcache"""
39 def __init__(self):
39 def __init__(self):
40 self._per_filter = {}
40 self._per_filter = {}
41
41
42 def __getitem__(self, repo):
42 def __getitem__(self, repo):
43 self.updatecache(repo)
43 self.updatecache(repo)
44 return self._per_filter[repo.filtername]
44 return self._per_filter[repo.filtername]
45
45
46 def updatecache(self, repo):
46 def updatecache(self, repo):
47 """Update the cache for the given filtered view on a repository"""
47 """Update the cache for the given filtered view on a repository"""
48 # This can trigger updates for the caches for subsets of the filtered
48 # This can trigger updates for the caches for subsets of the filtered
49 # view, e.g. when there is no cache for this filtered view or the cache
49 # view, e.g. when there is no cache for this filtered view or the cache
50 # is stale.
50 # is stale.
51
51
52 cl = repo.changelog
52 cl = repo.changelog
53 filtername = repo.filtername
53 filtername = repo.filtername
54 bcache = self._per_filter.get(filtername)
54 bcache = self._per_filter.get(filtername)
55 if bcache is None or not bcache.validfor(repo):
55 if bcache is None or not bcache.validfor(repo):
56 # cache object missing or cache object stale? Read from disk
56 # cache object missing or cache object stale? Read from disk
57 bcache = branchcache.fromfile(repo)
57 bcache = branchcache.fromfile(repo)
58
58
59 revs = []
59 revs = []
60 if bcache is None:
60 if bcache is None:
61 # no (fresh) cache available anymore, perhaps we can re-use
61 # no (fresh) cache available anymore, perhaps we can re-use
62 # the cache for a subset, then extend that to add info on missing
62 # the cache for a subset, then extend that to add info on missing
63 # revisions.
63 # revisions.
64 subsetname = subsettable.get(filtername)
64 subsetname = subsettable.get(filtername)
65 if subsetname is not None:
65 if subsetname is not None:
66 subset = repo.filtered(subsetname)
66 subset = repo.filtered(subsetname)
67 bcache = self[subset].copy()
67 bcache = self[subset].copy()
68 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
68 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
69 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
69 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
70 else:
70 else:
71 # nothing to fall back on, start empty.
71 # nothing to fall back on, start empty.
72 bcache = branchcache()
72 bcache = branchcache()
73
73
74 revs.extend(cl.revs(start=bcache.tiprev + 1))
74 revs.extend(cl.revs(start=bcache.tiprev + 1))
75 if revs:
75 if revs:
76 bcache.update(repo, revs)
76 bcache.update(repo, revs)
77
77
78 assert bcache.validfor(repo), filtername
78 assert bcache.validfor(repo), filtername
79 self._per_filter[repo.filtername] = bcache
79 self._per_filter[repo.filtername] = bcache
80
80
81 def replace(self, repo, remotebranchmap):
81 def replace(self, repo, remotebranchmap):
82 """Replace the branchmap cache for a repo with a branch mapping.
82 """Replace the branchmap cache for a repo with a branch mapping.
83
83
84 This is likely only called during clone with a branch map from a
84 This is likely only called during clone with a branch map from a
85 remote.
85 remote.
86
86
87 """
87 """
88 cl = repo.changelog
88 cl = repo.changelog
89 clrev = cl.rev
89 clrev = cl.rev
90 clbranchinfo = cl.branchinfo
90 clbranchinfo = cl.branchinfo
91 rbheads = []
91 rbheads = []
92 closed = []
92 closed = []
93 for bheads in remotebranchmap.itervalues():
93 for bheads in remotebranchmap.itervalues():
94 rbheads += bheads
94 rbheads += bheads
95 for h in bheads:
95 for h in bheads:
96 r = clrev(h)
96 r = clrev(h)
97 b, c = clbranchinfo(r)
97 b, c = clbranchinfo(r)
98 if c:
98 if c:
99 closed.append(h)
99 closed.append(h)
100
100
101 if rbheads:
101 if rbheads:
102 rtiprev = max((int(clrev(node)) for node in rbheads))
102 rtiprev = max((int(clrev(node)) for node in rbheads))
103 cache = branchcache(
103 cache = branchcache(
104 remotebranchmap, repo[rtiprev].node(), rtiprev,
104 remotebranchmap, repo[rtiprev].node(), rtiprev,
105 closednodes=closed)
105 closednodes=closed)
106
106
107 # Try to stick it as low as possible
107 # Try to stick it as low as possible
108 # filter above served are unlikely to be fetch from a clone
108 # filter above served are unlikely to be fetch from a clone
109 for candidate in ('base', 'immutable', 'served'):
109 for candidate in ('base', 'immutable', 'served'):
110 rview = repo.filtered(candidate)
110 rview = repo.filtered(candidate)
111 if cache.validfor(rview):
111 if cache.validfor(rview):
112 self._per_filter[candidate] = cache
112 self._per_filter[candidate] = cache
113 cache.write(rview)
113 cache.write(rview)
114 return
114 return
115
115
116 def clear(self):
116 def clear(self):
117 self._per_filter.clear()
117 self._per_filter.clear()
118
118
119 def _unknownnode(node):
119 def _unknownnode(node):
120 """ raises ValueError when branchcache found a node which does not exists
120 """ raises ValueError when branchcache found a node which does not exists
121 """
121 """
122 raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node)))
122 raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node)))
123
123
124 def _branchcachedesc(repo):
124 def _branchcachedesc(repo):
125 if repo.filtername is not None:
125 if repo.filtername is not None:
126 return 'branch cache (%s)' % repo.filtername
126 return 'branch cache (%s)' % repo.filtername
127 else:
127 else:
128 return 'branch cache'
128 return 'branch cache'
129
129
130 class branchcache(object):
130 class branchcache(object):
131 """A dict like object that hold branches heads cache.
131 """A dict like object that hold branches heads cache.
132
132
133 This cache is used to avoid costly computations to determine all the
133 This cache is used to avoid costly computations to determine all the
134 branch heads of a repo.
134 branch heads of a repo.
135
135
136 The cache is serialized on disk in the following format:
136 The cache is serialized on disk in the following format:
137
137
138 <tip hex node> <tip rev number> [optional filtered repo hex hash]
138 <tip hex node> <tip rev number> [optional filtered repo hex hash]
139 <branch head hex node> <open/closed state> <branch name>
139 <branch head hex node> <open/closed state> <branch name>
140 <branch head hex node> <open/closed state> <branch name>
140 <branch head hex node> <open/closed state> <branch name>
141 ...
141 ...
142
142
143 The first line is used to check if the cache is still valid. If the
143 The first line is used to check if the cache is still valid. If the
144 branch cache is for a filtered repo view, an optional third hash is
144 branch cache is for a filtered repo view, an optional third hash is
145 included that hashes the hashes of all filtered revisions.
145 included that hashes the hashes of all filtered revisions.
146
146
147 The open/closed state is represented by a single letter 'o' or 'c'.
147 The open/closed state is represented by a single letter 'o' or 'c'.
148 This field can be used to avoid changelog reads when determining if a
148 This field can be used to avoid changelog reads when determining if a
149 branch head closes a branch or not.
149 branch head closes a branch or not.
150 """
150 """
151
151
152 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
152 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
153 filteredhash=None, closednodes=None, hasnode=None):
153 filteredhash=None, closednodes=None, hasnode=None):
154 """ hasnode is a function which can be used to verify whether changelog
154 """ hasnode is a function which can be used to verify whether changelog
155 has a given node or not. If it's not provided, we assume that every node
155 has a given node or not. If it's not provided, we assume that every node
156 we have exists in changelog """
156 we have exists in changelog """
157 self.tipnode = tipnode
157 self.tipnode = tipnode
158 self.tiprev = tiprev
158 self.tiprev = tiprev
159 self.filteredhash = filteredhash
159 self.filteredhash = filteredhash
160 # closednodes is a set of nodes that close their branch. If the branch
160 # closednodes is a set of nodes that close their branch. If the branch
161 # cache has been updated, it may contain nodes that are no longer
161 # cache has been updated, it may contain nodes that are no longer
162 # heads.
162 # heads.
163 if closednodes is None:
163 if closednodes is None:
164 self._closednodes = set()
164 self._closednodes = set()
165 else:
165 else:
166 self._closednodes = closednodes
166 self._closednodes = closednodes
167 self._entries = dict(entries)
167 self._entries = dict(entries)
168 # whether closed nodes are verified or not
168 # whether closed nodes are verified or not
169 self._closedverified = False
169 self._closedverified = False
170 # branches for which nodes are verified
170 # branches for which nodes are verified
171 self._verifiedbranches = set()
171 self._verifiedbranches = set()
172 self._hasnode = hasnode
172 self._hasnode = hasnode
173 if self._hasnode is None:
173 if self._hasnode is None:
174 self._hasnode = lambda x: True
174 self._hasnode = lambda x: True
175
175
176 def _verifyclosed(self):
176 def _verifyclosed(self):
177 """ verify the closed nodes we have """
177 """ verify the closed nodes we have """
178 if self._closedverified:
178 if self._closedverified:
179 return
179 return
180 for node in self._closednodes:
180 for node in self._closednodes:
181 if not self._hasnode(node):
181 if not self._hasnode(node):
182 _unknownnode(node)
182 _unknownnode(node)
183
183
184 self._closedverified = True
184 self._closedverified = True
185
185
186 def _verifybranch(self, branch):
186 def _verifybranch(self, branch):
187 """ verify head nodes for the given branch. """
187 """ verify head nodes for the given branch. """
188 if branch not in self._entries or branch in self._verifiedbranches:
188 if branch not in self._entries or branch in self._verifiedbranches:
189 return
189 return
190 for n in self._entries[branch]:
190 for n in self._entries[branch]:
191 if not self._hasnode(n):
191 if not self._hasnode(n):
192 _unknownnode(n)
192 _unknownnode(n)
193
193
194 self._verifiedbranches.add(branch)
194 self._verifiedbranches.add(branch)
195
195
196 def _verifyall(self):
196 def _verifyall(self):
197 """ verifies nodes of all the branches """
197 """ verifies nodes of all the branches """
198 needverification = set(self._entries.keys()) - self._verifiedbranches
198 needverification = set(self._entries.keys()) - self._verifiedbranches
199 for b in needverification:
199 for b in needverification:
200 self._verifybranch(b)
200 self._verifybranch(b)
201
201
202 def __iter__(self):
202 def __iter__(self):
203 return iter(self._entries)
203 return iter(self._entries)
204
204
205 def __setitem__(self, key, value):
205 def __setitem__(self, key, value):
206 self._entries[key] = value
206 self._entries[key] = value
207
207
208 def __getitem__(self, key):
208 def __getitem__(self, key):
209 self._verifybranch(key)
209 self._verifybranch(key)
210 return self._entries[key]
210 return self._entries[key]
211
211
212 def __contains__(self, key):
212 def __contains__(self, key):
213 self._verifybranch(key)
213 self._verifybranch(key)
214 return key in self._entries
214 return key in self._entries
215
215
216 def iteritems(self):
216 def iteritems(self):
217 for k, v in self._entries.iteritems():
217 for k, v in self._entries.iteritems():
218 self._verifybranch(k)
218 self._verifybranch(k)
219 yield k, v
219 yield k, v
220
220
221 items = iteritems
222
221 def hasbranch(self, label):
223 def hasbranch(self, label):
222 """ checks whether a branch of this name exists or not """
224 """ checks whether a branch of this name exists or not """
223 self._verifybranch(label)
225 self._verifybranch(label)
224 return label in self._entries
226 return label in self._entries
225
227
226 @classmethod
228 @classmethod
227 def fromfile(cls, repo):
229 def fromfile(cls, repo):
228 f = None
230 f = None
229 try:
231 try:
230 f = repo.cachevfs(cls._filename(repo))
232 f = repo.cachevfs(cls._filename(repo))
231 lineiter = iter(f)
233 lineiter = iter(f)
232 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
234 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
233 last, lrev = cachekey[:2]
235 last, lrev = cachekey[:2]
234 last, lrev = bin(last), int(lrev)
236 last, lrev = bin(last), int(lrev)
235 filteredhash = None
237 filteredhash = None
236 hasnode = repo.changelog.hasnode
238 hasnode = repo.changelog.hasnode
237 if len(cachekey) > 2:
239 if len(cachekey) > 2:
238 filteredhash = bin(cachekey[2])
240 filteredhash = bin(cachekey[2])
239 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash,
241 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash,
240 hasnode=hasnode)
242 hasnode=hasnode)
241 if not bcache.validfor(repo):
243 if not bcache.validfor(repo):
242 # invalidate the cache
244 # invalidate the cache
243 raise ValueError(r'tip differs')
245 raise ValueError(r'tip differs')
244 bcache.load(repo, lineiter)
246 bcache.load(repo, lineiter)
245 except (IOError, OSError):
247 except (IOError, OSError):
246 return None
248 return None
247
249
248 except Exception as inst:
250 except Exception as inst:
249 if repo.ui.debugflag:
251 if repo.ui.debugflag:
250 msg = 'invalid %s: %s\n'
252 msg = 'invalid %s: %s\n'
251 repo.ui.debug(msg % (_branchcachedesc(repo),
253 repo.ui.debug(msg % (_branchcachedesc(repo),
252 pycompat.bytestr(inst)))
254 pycompat.bytestr(inst)))
253 bcache = None
255 bcache = None
254
256
255 finally:
257 finally:
256 if f:
258 if f:
257 f.close()
259 f.close()
258
260
259 return bcache
261 return bcache
260
262
261 def load(self, repo, lineiter):
263 def load(self, repo, lineiter):
262 """ fully loads the branchcache by reading from the file using the line
264 """ fully loads the branchcache by reading from the file using the line
263 iterator passed"""
265 iterator passed"""
264 for line in lineiter:
266 for line in lineiter:
265 line = line.rstrip('\n')
267 line = line.rstrip('\n')
266 if not line:
268 if not line:
267 continue
269 continue
268 node, state, label = line.split(" ", 2)
270 node, state, label = line.split(" ", 2)
269 if state not in 'oc':
271 if state not in 'oc':
270 raise ValueError(r'invalid branch state')
272 raise ValueError(r'invalid branch state')
271 label = encoding.tolocal(label.strip())
273 label = encoding.tolocal(label.strip())
272 node = bin(node)
274 node = bin(node)
273 self._entries.setdefault(label, []).append(node)
275 self._entries.setdefault(label, []).append(node)
274 if state == 'c':
276 if state == 'c':
275 self._closednodes.add(node)
277 self._closednodes.add(node)
276
278
277 @staticmethod
279 @staticmethod
278 def _filename(repo):
280 def _filename(repo):
279 """name of a branchcache file for a given repo or repoview"""
281 """name of a branchcache file for a given repo or repoview"""
280 filename = "branch2"
282 filename = "branch2"
281 if repo.filtername:
283 if repo.filtername:
282 filename = '%s-%s' % (filename, repo.filtername)
284 filename = '%s-%s' % (filename, repo.filtername)
283 return filename
285 return filename
284
286
285 def validfor(self, repo):
287 def validfor(self, repo):
286 """Is the cache content valid regarding a repo
288 """Is the cache content valid regarding a repo
287
289
288 - False when cached tipnode is unknown or if we detect a strip.
290 - False when cached tipnode is unknown or if we detect a strip.
289 - True when cache is up to date or a subset of current repo."""
291 - True when cache is up to date or a subset of current repo."""
290 try:
292 try:
291 return ((self.tipnode == repo.changelog.node(self.tiprev))
293 return ((self.tipnode == repo.changelog.node(self.tiprev))
292 and (self.filteredhash ==
294 and (self.filteredhash ==
293 scmutil.filteredhash(repo, self.tiprev)))
295 scmutil.filteredhash(repo, self.tiprev)))
294 except IndexError:
296 except IndexError:
295 return False
297 return False
296
298
297 def _branchtip(self, heads):
299 def _branchtip(self, heads):
298 '''Return tuple with last open head in heads and false,
300 '''Return tuple with last open head in heads and false,
299 otherwise return last closed head and true.'''
301 otherwise return last closed head and true.'''
300 tip = heads[-1]
302 tip = heads[-1]
301 closed = True
303 closed = True
302 for h in reversed(heads):
304 for h in reversed(heads):
303 if h not in self._closednodes:
305 if h not in self._closednodes:
304 tip = h
306 tip = h
305 closed = False
307 closed = False
306 break
308 break
307 return tip, closed
309 return tip, closed
308
310
309 def branchtip(self, branch):
311 def branchtip(self, branch):
310 '''Return the tipmost open head on branch head, otherwise return the
312 '''Return the tipmost open head on branch head, otherwise return the
311 tipmost closed head on branch.
313 tipmost closed head on branch.
312 Raise KeyError for unknown branch.'''
314 Raise KeyError for unknown branch.'''
313 return self._branchtip(self[branch])[0]
315 return self._branchtip(self[branch])[0]
314
316
315 def iteropen(self, nodes):
317 def iteropen(self, nodes):
316 return (n for n in nodes if n not in self._closednodes)
318 return (n for n in nodes if n not in self._closednodes)
317
319
318 def branchheads(self, branch, closed=False):
320 def branchheads(self, branch, closed=False):
319 self._verifybranch(branch)
321 self._verifybranch(branch)
320 heads = self._entries[branch]
322 heads = self._entries[branch]
321 if not closed:
323 if not closed:
322 heads = list(self.iteropen(heads))
324 heads = list(self.iteropen(heads))
323 return heads
325 return heads
324
326
325 def iterbranches(self):
327 def iterbranches(self):
326 for bn, heads in self.iteritems():
328 for bn, heads in self.iteritems():
327 yield (bn, heads) + self._branchtip(heads)
329 yield (bn, heads) + self._branchtip(heads)
328
330
329 def iterheads(self):
331 def iterheads(self):
330 """ returns all the heads """
332 """ returns all the heads """
331 self._verifyall()
333 self._verifyall()
332 return self._entries.itervalues()
334 return self._entries.itervalues()
333
335
334 def copy(self):
336 def copy(self):
335 """return an deep copy of the branchcache object"""
337 """return an deep copy of the branchcache object"""
336 return type(self)(
338 return type(self)(
337 self._entries, self.tipnode, self.tiprev, self.filteredhash,
339 self._entries, self.tipnode, self.tiprev, self.filteredhash,
338 self._closednodes)
340 self._closednodes)
339
341
340 def write(self, repo):
342 def write(self, repo):
341 try:
343 try:
342 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
344 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
343 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
345 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
344 if self.filteredhash is not None:
346 if self.filteredhash is not None:
345 cachekey.append(hex(self.filteredhash))
347 cachekey.append(hex(self.filteredhash))
346 f.write(" ".join(cachekey) + '\n')
348 f.write(" ".join(cachekey) + '\n')
347 nodecount = 0
349 nodecount = 0
348 for label, nodes in sorted(self._entries.iteritems()):
350 for label, nodes in sorted(self._entries.iteritems()):
349 label = encoding.fromlocal(label)
351 label = encoding.fromlocal(label)
350 for node in nodes:
352 for node in nodes:
351 nodecount += 1
353 nodecount += 1
352 if node in self._closednodes:
354 if node in self._closednodes:
353 state = 'c'
355 state = 'c'
354 else:
356 else:
355 state = 'o'
357 state = 'o'
356 f.write("%s %s %s\n" % (hex(node), state, label))
358 f.write("%s %s %s\n" % (hex(node), state, label))
357 f.close()
359 f.close()
358 repo.ui.log('branchcache', 'wrote %s with %d labels and %d nodes\n',
360 repo.ui.log('branchcache', 'wrote %s with %d labels and %d nodes\n',
359 _branchcachedesc(repo), len(self._entries), nodecount)
361 _branchcachedesc(repo), len(self._entries), nodecount)
360 except (IOError, OSError, error.Abort) as inst:
362 except (IOError, OSError, error.Abort) as inst:
361 # Abort may be raised by read only opener, so log and continue
363 # Abort may be raised by read only opener, so log and continue
362 repo.ui.debug("couldn't write branch cache: %s\n" %
364 repo.ui.debug("couldn't write branch cache: %s\n" %
363 stringutil.forcebytestr(inst))
365 stringutil.forcebytestr(inst))
364
366
365 def update(self, repo, revgen):
367 def update(self, repo, revgen):
366 """Given a branchhead cache, self, that may have extra nodes or be
368 """Given a branchhead cache, self, that may have extra nodes or be
367 missing heads, and a generator of nodes that are strictly a superset of
369 missing heads, and a generator of nodes that are strictly a superset of
368 heads missing, this function updates self to be correct.
370 heads missing, this function updates self to be correct.
369 """
371 """
370 starttime = util.timer()
372 starttime = util.timer()
371 cl = repo.changelog
373 cl = repo.changelog
372 # collect new branch entries
374 # collect new branch entries
373 newbranches = {}
375 newbranches = {}
374 getbranchinfo = repo.revbranchcache().branchinfo
376 getbranchinfo = repo.revbranchcache().branchinfo
375 for r in revgen:
377 for r in revgen:
376 branch, closesbranch = getbranchinfo(r)
378 branch, closesbranch = getbranchinfo(r)
377 newbranches.setdefault(branch, []).append(r)
379 newbranches.setdefault(branch, []).append(r)
378 if closesbranch:
380 if closesbranch:
379 self._closednodes.add(cl.node(r))
381 self._closednodes.add(cl.node(r))
380
382
381 # fetch current topological heads to speed up filtering
383 # fetch current topological heads to speed up filtering
382 topoheads = set(cl.headrevs())
384 topoheads = set(cl.headrevs())
383
385
384 # new tip revision which we found after iterating items from new
386 # new tip revision which we found after iterating items from new
385 # branches
387 # branches
386 ntiprev = self.tiprev
388 ntiprev = self.tiprev
387
389
388 # if older branchheads are reachable from new ones, they aren't
390 # if older branchheads are reachable from new ones, they aren't
389 # really branchheads. Note checking parents is insufficient:
391 # really branchheads. Note checking parents is insufficient:
390 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
392 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
391 for branch, newheadrevs in newbranches.iteritems():
393 for branch, newheadrevs in newbranches.iteritems():
392 bheads = self._entries.setdefault(branch, [])
394 bheads = self._entries.setdefault(branch, [])
393 bheadset = set(cl.rev(node) for node in bheads)
395 bheadset = set(cl.rev(node) for node in bheads)
394
396
395 # This have been tested True on all internal usage of this function.
397 # This have been tested True on all internal usage of this function.
396 # run it again in case of doubt
398 # run it again in case of doubt
397 # assert not (set(bheadrevs) & set(newheadrevs))
399 # assert not (set(bheadrevs) & set(newheadrevs))
398 bheadset.update(newheadrevs)
400 bheadset.update(newheadrevs)
399
401
400 # This prunes out two kinds of heads - heads that are superseded by
402 # This prunes out two kinds of heads - heads that are superseded by
401 # a head in newheadrevs, and newheadrevs that are not heads because
403 # a head in newheadrevs, and newheadrevs that are not heads because
402 # an existing head is their descendant.
404 # an existing head is their descendant.
403 uncertain = bheadset - topoheads
405 uncertain = bheadset - topoheads
404 if uncertain:
406 if uncertain:
405 floorrev = min(uncertain)
407 floorrev = min(uncertain)
406 ancestors = set(cl.ancestors(newheadrevs, floorrev))
408 ancestors = set(cl.ancestors(newheadrevs, floorrev))
407 bheadset -= ancestors
409 bheadset -= ancestors
408 bheadrevs = sorted(bheadset)
410 bheadrevs = sorted(bheadset)
409 self[branch] = [cl.node(rev) for rev in bheadrevs]
411 self[branch] = [cl.node(rev) for rev in bheadrevs]
410 tiprev = bheadrevs[-1]
412 tiprev = bheadrevs[-1]
411 if tiprev > ntiprev:
413 if tiprev > ntiprev:
412 ntiprev = tiprev
414 ntiprev = tiprev
413
415
414 if ntiprev > self.tiprev:
416 if ntiprev > self.tiprev:
415 self.tiprev = ntiprev
417 self.tiprev = ntiprev
416 self.tipnode = cl.node(ntiprev)
418 self.tipnode = cl.node(ntiprev)
417
419
418 if not self.validfor(repo):
420 if not self.validfor(repo):
419 # cache key are not valid anymore
421 # cache key are not valid anymore
420 self.tipnode = nullid
422 self.tipnode = nullid
421 self.tiprev = nullrev
423 self.tiprev = nullrev
422 for heads in self.iterheads():
424 for heads in self.iterheads():
423 tiprev = max(cl.rev(node) for node in heads)
425 tiprev = max(cl.rev(node) for node in heads)
424 if tiprev > self.tiprev:
426 if tiprev > self.tiprev:
425 self.tipnode = cl.node(tiprev)
427 self.tipnode = cl.node(tiprev)
426 self.tiprev = tiprev
428 self.tiprev = tiprev
427 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
429 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
428
430
429 duration = util.timer() - starttime
431 duration = util.timer() - starttime
430 repo.ui.log('branchcache', 'updated %s in %.4f seconds\n',
432 repo.ui.log('branchcache', 'updated %s in %.4f seconds\n',
431 _branchcachedesc(repo), duration)
433 _branchcachedesc(repo), duration)
432
434
433 self.write(repo)
435 self.write(repo)
434
436
435
437
436 class remotebranchcache(branchcache):
438 class remotebranchcache(branchcache):
437 """Branchmap info for a remote connection, should not write locally"""
439 """Branchmap info for a remote connection, should not write locally"""
438 def write(self, repo):
440 def write(self, repo):
439 pass
441 pass
440
442
441
443
442 # Revision branch info cache
444 # Revision branch info cache
443
445
444 _rbcversion = '-v1'
446 _rbcversion = '-v1'
445 _rbcnames = 'rbc-names' + _rbcversion
447 _rbcnames = 'rbc-names' + _rbcversion
446 _rbcrevs = 'rbc-revs' + _rbcversion
448 _rbcrevs = 'rbc-revs' + _rbcversion
447 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
449 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
448 _rbcrecfmt = '>4sI'
450 _rbcrecfmt = '>4sI'
449 _rbcrecsize = calcsize(_rbcrecfmt)
451 _rbcrecsize = calcsize(_rbcrecfmt)
450 _rbcnodelen = 4
452 _rbcnodelen = 4
451 _rbcbranchidxmask = 0x7fffffff
453 _rbcbranchidxmask = 0x7fffffff
452 _rbccloseflag = 0x80000000
454 _rbccloseflag = 0x80000000
453
455
454 class revbranchcache(object):
456 class revbranchcache(object):
455 """Persistent cache, mapping from revision number to branch name and close.
457 """Persistent cache, mapping from revision number to branch name and close.
456 This is a low level cache, independent of filtering.
458 This is a low level cache, independent of filtering.
457
459
458 Branch names are stored in rbc-names in internal encoding separated by 0.
460 Branch names are stored in rbc-names in internal encoding separated by 0.
459 rbc-names is append-only, and each branch name is only stored once and will
461 rbc-names is append-only, and each branch name is only stored once and will
460 thus have a unique index.
462 thus have a unique index.
461
463
462 The branch info for each revision is stored in rbc-revs as constant size
464 The branch info for each revision is stored in rbc-revs as constant size
463 records. The whole file is read into memory, but it is only 'parsed' on
465 records. The whole file is read into memory, but it is only 'parsed' on
464 demand. The file is usually append-only but will be truncated if repo
466 demand. The file is usually append-only but will be truncated if repo
465 modification is detected.
467 modification is detected.
466 The record for each revision contains the first 4 bytes of the
468 The record for each revision contains the first 4 bytes of the
467 corresponding node hash, and the record is only used if it still matches.
469 corresponding node hash, and the record is only used if it still matches.
468 Even a completely trashed rbc-revs fill thus still give the right result
470 Even a completely trashed rbc-revs fill thus still give the right result
469 while converging towards full recovery ... assuming no incorrectly matching
471 while converging towards full recovery ... assuming no incorrectly matching
470 node hashes.
472 node hashes.
471 The record also contains 4 bytes where 31 bits contains the index of the
473 The record also contains 4 bytes where 31 bits contains the index of the
472 branch and the last bit indicate that it is a branch close commit.
474 branch and the last bit indicate that it is a branch close commit.
473 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
475 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
474 and will grow with it but be 1/8th of its size.
476 and will grow with it but be 1/8th of its size.
475 """
477 """
476
478
477 def __init__(self, repo, readonly=True):
479 def __init__(self, repo, readonly=True):
478 assert repo.filtername is None
480 assert repo.filtername is None
479 self._repo = repo
481 self._repo = repo
480 self._names = [] # branch names in local encoding with static index
482 self._names = [] # branch names in local encoding with static index
481 self._rbcrevs = bytearray()
483 self._rbcrevs = bytearray()
482 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
484 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
483 try:
485 try:
484 bndata = repo.cachevfs.read(_rbcnames)
486 bndata = repo.cachevfs.read(_rbcnames)
485 self._rbcsnameslen = len(bndata) # for verification before writing
487 self._rbcsnameslen = len(bndata) # for verification before writing
486 if bndata:
488 if bndata:
487 self._names = [encoding.tolocal(bn)
489 self._names = [encoding.tolocal(bn)
488 for bn in bndata.split('\0')]
490 for bn in bndata.split('\0')]
489 except (IOError, OSError):
491 except (IOError, OSError):
490 if readonly:
492 if readonly:
491 # don't try to use cache - fall back to the slow path
493 # don't try to use cache - fall back to the slow path
492 self.branchinfo = self._branchinfo
494 self.branchinfo = self._branchinfo
493
495
494 if self._names:
496 if self._names:
495 try:
497 try:
496 data = repo.cachevfs.read(_rbcrevs)
498 data = repo.cachevfs.read(_rbcrevs)
497 self._rbcrevs[:] = data
499 self._rbcrevs[:] = data
498 except (IOError, OSError) as inst:
500 except (IOError, OSError) as inst:
499 repo.ui.debug("couldn't read revision branch cache: %s\n" %
501 repo.ui.debug("couldn't read revision branch cache: %s\n" %
500 stringutil.forcebytestr(inst))
502 stringutil.forcebytestr(inst))
501 # remember number of good records on disk
503 # remember number of good records on disk
502 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
504 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
503 len(repo.changelog))
505 len(repo.changelog))
504 if self._rbcrevslen == 0:
506 if self._rbcrevslen == 0:
505 self._names = []
507 self._names = []
506 self._rbcnamescount = len(self._names) # number of names read at
508 self._rbcnamescount = len(self._names) # number of names read at
507 # _rbcsnameslen
509 # _rbcsnameslen
508
510
509 def _clear(self):
511 def _clear(self):
510 self._rbcsnameslen = 0
512 self._rbcsnameslen = 0
511 del self._names[:]
513 del self._names[:]
512 self._rbcnamescount = 0
514 self._rbcnamescount = 0
513 self._rbcrevslen = len(self._repo.changelog)
515 self._rbcrevslen = len(self._repo.changelog)
514 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
516 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
515 util.clearcachedproperty(self, '_namesreverse')
517 util.clearcachedproperty(self, '_namesreverse')
516
518
517 @util.propertycache
519 @util.propertycache
518 def _namesreverse(self):
520 def _namesreverse(self):
519 return dict((b, r) for r, b in enumerate(self._names))
521 return dict((b, r) for r, b in enumerate(self._names))
520
522
521 def branchinfo(self, rev):
523 def branchinfo(self, rev):
522 """Return branch name and close flag for rev, using and updating
524 """Return branch name and close flag for rev, using and updating
523 persistent cache."""
525 persistent cache."""
524 changelog = self._repo.changelog
526 changelog = self._repo.changelog
525 rbcrevidx = rev * _rbcrecsize
527 rbcrevidx = rev * _rbcrecsize
526
528
527 # avoid negative index, changelog.read(nullrev) is fast without cache
529 # avoid negative index, changelog.read(nullrev) is fast without cache
528 if rev == nullrev:
530 if rev == nullrev:
529 return changelog.branchinfo(rev)
531 return changelog.branchinfo(rev)
530
532
531 # if requested rev isn't allocated, grow and cache the rev info
533 # if requested rev isn't allocated, grow and cache the rev info
532 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
534 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
533 return self._branchinfo(rev)
535 return self._branchinfo(rev)
534
536
535 # fast path: extract data from cache, use it if node is matching
537 # fast path: extract data from cache, use it if node is matching
536 reponode = changelog.node(rev)[:_rbcnodelen]
538 reponode = changelog.node(rev)[:_rbcnodelen]
537 cachenode, branchidx = unpack_from(
539 cachenode, branchidx = unpack_from(
538 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
540 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
539 close = bool(branchidx & _rbccloseflag)
541 close = bool(branchidx & _rbccloseflag)
540 if close:
542 if close:
541 branchidx &= _rbcbranchidxmask
543 branchidx &= _rbcbranchidxmask
542 if cachenode == '\0\0\0\0':
544 if cachenode == '\0\0\0\0':
543 pass
545 pass
544 elif cachenode == reponode:
546 elif cachenode == reponode:
545 try:
547 try:
546 return self._names[branchidx], close
548 return self._names[branchidx], close
547 except IndexError:
549 except IndexError:
548 # recover from invalid reference to unknown branch
550 # recover from invalid reference to unknown branch
549 self._repo.ui.debug("referenced branch names not found"
551 self._repo.ui.debug("referenced branch names not found"
550 " - rebuilding revision branch cache from scratch\n")
552 " - rebuilding revision branch cache from scratch\n")
551 self._clear()
553 self._clear()
552 else:
554 else:
553 # rev/node map has changed, invalidate the cache from here up
555 # rev/node map has changed, invalidate the cache from here up
554 self._repo.ui.debug("history modification detected - truncating "
556 self._repo.ui.debug("history modification detected - truncating "
555 "revision branch cache to revision %d\n" % rev)
557 "revision branch cache to revision %d\n" % rev)
556 truncate = rbcrevidx + _rbcrecsize
558 truncate = rbcrevidx + _rbcrecsize
557 del self._rbcrevs[truncate:]
559 del self._rbcrevs[truncate:]
558 self._rbcrevslen = min(self._rbcrevslen, truncate)
560 self._rbcrevslen = min(self._rbcrevslen, truncate)
559
561
560 # fall back to slow path and make sure it will be written to disk
562 # fall back to slow path and make sure it will be written to disk
561 return self._branchinfo(rev)
563 return self._branchinfo(rev)
562
564
563 def _branchinfo(self, rev):
565 def _branchinfo(self, rev):
564 """Retrieve branch info from changelog and update _rbcrevs"""
566 """Retrieve branch info from changelog and update _rbcrevs"""
565 changelog = self._repo.changelog
567 changelog = self._repo.changelog
566 b, close = changelog.branchinfo(rev)
568 b, close = changelog.branchinfo(rev)
567 if b in self._namesreverse:
569 if b in self._namesreverse:
568 branchidx = self._namesreverse[b]
570 branchidx = self._namesreverse[b]
569 else:
571 else:
570 branchidx = len(self._names)
572 branchidx = len(self._names)
571 self._names.append(b)
573 self._names.append(b)
572 self._namesreverse[b] = branchidx
574 self._namesreverse[b] = branchidx
573 reponode = changelog.node(rev)
575 reponode = changelog.node(rev)
574 if close:
576 if close:
575 branchidx |= _rbccloseflag
577 branchidx |= _rbccloseflag
576 self._setcachedata(rev, reponode, branchidx)
578 self._setcachedata(rev, reponode, branchidx)
577 return b, close
579 return b, close
578
580
579 def setdata(self, branch, rev, node, close):
581 def setdata(self, branch, rev, node, close):
580 """add new data information to the cache"""
582 """add new data information to the cache"""
581 if branch in self._namesreverse:
583 if branch in self._namesreverse:
582 branchidx = self._namesreverse[branch]
584 branchidx = self._namesreverse[branch]
583 else:
585 else:
584 branchidx = len(self._names)
586 branchidx = len(self._names)
585 self._names.append(branch)
587 self._names.append(branch)
586 self._namesreverse[branch] = branchidx
588 self._namesreverse[branch] = branchidx
587 if close:
589 if close:
588 branchidx |= _rbccloseflag
590 branchidx |= _rbccloseflag
589 self._setcachedata(rev, node, branchidx)
591 self._setcachedata(rev, node, branchidx)
590 # If no cache data were readable (non exists, bad permission, etc)
592 # If no cache data were readable (non exists, bad permission, etc)
591 # the cache was bypassing itself by setting:
593 # the cache was bypassing itself by setting:
592 #
594 #
593 # self.branchinfo = self._branchinfo
595 # self.branchinfo = self._branchinfo
594 #
596 #
595 # Since we now have data in the cache, we need to drop this bypassing.
597 # Since we now have data in the cache, we need to drop this bypassing.
596 if r'branchinfo' in vars(self):
598 if r'branchinfo' in vars(self):
597 del self.branchinfo
599 del self.branchinfo
598
600
599 def _setcachedata(self, rev, node, branchidx):
601 def _setcachedata(self, rev, node, branchidx):
600 """Writes the node's branch data to the in-memory cache data."""
602 """Writes the node's branch data to the in-memory cache data."""
601 if rev == nullrev:
603 if rev == nullrev:
602 return
604 return
603 rbcrevidx = rev * _rbcrecsize
605 rbcrevidx = rev * _rbcrecsize
604 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
606 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
605 self._rbcrevs.extend('\0' *
607 self._rbcrevs.extend('\0' *
606 (len(self._repo.changelog) * _rbcrecsize -
608 (len(self._repo.changelog) * _rbcrecsize -
607 len(self._rbcrevs)))
609 len(self._rbcrevs)))
608 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
610 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
609 self._rbcrevslen = min(self._rbcrevslen, rev)
611 self._rbcrevslen = min(self._rbcrevslen, rev)
610
612
611 tr = self._repo.currenttransaction()
613 tr = self._repo.currenttransaction()
612 if tr:
614 if tr:
613 tr.addfinalize('write-revbranchcache', self.write)
615 tr.addfinalize('write-revbranchcache', self.write)
614
616
615 def write(self, tr=None):
617 def write(self, tr=None):
616 """Save branch cache if it is dirty."""
618 """Save branch cache if it is dirty."""
617 repo = self._repo
619 repo = self._repo
618 wlock = None
620 wlock = None
619 step = ''
621 step = ''
620 try:
622 try:
621 # write the new names
623 # write the new names
622 if self._rbcnamescount < len(self._names):
624 if self._rbcnamescount < len(self._names):
623 wlock = repo.wlock(wait=False)
625 wlock = repo.wlock(wait=False)
624 step = ' names'
626 step = ' names'
625 self._writenames(repo)
627 self._writenames(repo)
626
628
627 # write the new revs
629 # write the new revs
628 start = self._rbcrevslen * _rbcrecsize
630 start = self._rbcrevslen * _rbcrecsize
629 if start != len(self._rbcrevs):
631 if start != len(self._rbcrevs):
630 step = ''
632 step = ''
631 if wlock is None:
633 if wlock is None:
632 wlock = repo.wlock(wait=False)
634 wlock = repo.wlock(wait=False)
633 self._writerevs(repo, start)
635 self._writerevs(repo, start)
634
636
635 except (IOError, OSError, error.Abort, error.LockError) as inst:
637 except (IOError, OSError, error.Abort, error.LockError) as inst:
636 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
638 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
637 % (step, stringutil.forcebytestr(inst)))
639 % (step, stringutil.forcebytestr(inst)))
638 finally:
640 finally:
639 if wlock is not None:
641 if wlock is not None:
640 wlock.release()
642 wlock.release()
641
643
642 def _writenames(self, repo):
644 def _writenames(self, repo):
643 """ write the new branch names to revbranchcache """
645 """ write the new branch names to revbranchcache """
644 if self._rbcnamescount != 0:
646 if self._rbcnamescount != 0:
645 f = repo.cachevfs.open(_rbcnames, 'ab')
647 f = repo.cachevfs.open(_rbcnames, 'ab')
646 if f.tell() == self._rbcsnameslen:
648 if f.tell() == self._rbcsnameslen:
647 f.write('\0')
649 f.write('\0')
648 else:
650 else:
649 f.close()
651 f.close()
650 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
652 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
651 self._rbcnamescount = 0
653 self._rbcnamescount = 0
652 self._rbcrevslen = 0
654 self._rbcrevslen = 0
653 if self._rbcnamescount == 0:
655 if self._rbcnamescount == 0:
654 # before rewriting names, make sure references are removed
656 # before rewriting names, make sure references are removed
655 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
657 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
656 f = repo.cachevfs.open(_rbcnames, 'wb')
658 f = repo.cachevfs.open(_rbcnames, 'wb')
657 f.write('\0'.join(encoding.fromlocal(b)
659 f.write('\0'.join(encoding.fromlocal(b)
658 for b in self._names[self._rbcnamescount:]))
660 for b in self._names[self._rbcnamescount:]))
659 self._rbcsnameslen = f.tell()
661 self._rbcsnameslen = f.tell()
660 f.close()
662 f.close()
661 self._rbcnamescount = len(self._names)
663 self._rbcnamescount = len(self._names)
662
664
663 def _writerevs(self, repo, start):
665 def _writerevs(self, repo, start):
664 """ write the new revs to revbranchcache """
666 """ write the new revs to revbranchcache """
665 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
667 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
666 with repo.cachevfs.open(_rbcrevs, 'ab') as f:
668 with repo.cachevfs.open(_rbcrevs, 'ab') as f:
667 if f.tell() != start:
669 if f.tell() != start:
668 repo.ui.debug("truncating cache/%s to %d\n" % (_rbcrevs, start))
670 repo.ui.debug("truncating cache/%s to %d\n" % (_rbcrevs, start))
669 f.seek(start)
671 f.seek(start)
670 if f.tell() != start:
672 if f.tell() != start:
671 start = 0
673 start = 0
672 f.seek(start)
674 f.seek(start)
673 f.truncate()
675 f.truncate()
674 end = revs * _rbcrecsize
676 end = revs * _rbcrecsize
675 f.write(self._rbcrevs[start:end])
677 f.write(self._rbcrevs[start:end])
676 self._rbcrevslen = revs
678 self._rbcrevslen = revs
General Comments 0
You need to be logged in to leave comments. Login now